tarantool_1.9.1.26.g63eb81e3c/0000775000000000000000000000000013306565107014214 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/apk/0000775000000000000000000000000013306560010014753 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/apk/tarantool.pre-install0000775000000000000000000000014513306560010021135 0ustar rootroot#!/bin/sh addgroup -S tarantool adduser -S -G tarantool tarantool apk add --no-cache 'su-exec>=0.2'tarantool_1.9.1.26.g63eb81e3c/apk/tarantool.post-install0000775000000000000000000000043613306560010021337 0ustar rootroot#!/bin/sh mkdir -p /var/lib/tarantool chown tarantool:tarantool /var/lib/tarantool mkdir -p /opt/tarantool chown tarantool:tarantool /opt/tarantool mkdir -p /var/run/tarantool chown tarantool:tarantool /var/run/tarantool mkdir /etc/tarantool chown tarantool:tarantool /etc/tarantooltarantool_1.9.1.26.g63eb81e3c/apk/APKBUILD0000664000000000000000000000300513306560010016067 0ustar rootrootpkgname="tarantool" pkgver="1.7.6" pkgrel="0" pkgdesc="Tarantool is an in-memory database and application server" maintainer="Ilya Konyukhov " license="BSD-2-Clause" arch="all" source="" giturl="https://github.com/tarantool/tarantool.git" url="https://github.com/tarantool/tarantool" depends="libstdc++ readline libressl yaml lz4 binutils ncurses libgomp lua curl tar zip libunwind libcurl icu" makedepends="perl gcc cmake readline-dev libressl-dev yaml-dev lz4-dev binutils-dev ncurses-dev lua-dev musl-dev make git libunwind-dev autoconf automake libtool linux-headers go curl-dev icu-dev" subpackages="$pkgname-dev $pkgname-dbg $pkgname-doc" builddir="$srcdir"/"$pkgname-$pkgver" prepare() { default_prepare } build() { cd "$builddir" cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo \ -DENABLE_BUNDLED_LIBYAML:BOOL=OFF \ -DENABLE_BACKTRACE:BOOL=ON \ -DENABLE_DIST:BOOL=ON \ -DENABLE_BUNDLED_LIBYAML:BOOL=OFF \ -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_INSTALL_SYSCONFDIR=/etc \ -DCMAKE_INSTALL_LOCALSTATEDIR=/var \ . make -C "$builddir" -j || return 1 return 0 } package() { mkdir -p "$pkgdir" make -C "$builddir" DESTDIR="$pkgdir" install make -C "$builddir"/src/lib/small DESTDIR="$pkgdir" install make -C "$builddir"/src/lib/msgpuck DESTDIR="$pkgdir" install make -C "$builddir/src/lib/msgpuck" clean make -C "$builddir/src/lib/small" clean make -C "$builddir" clean return 0 } tarantool_1.9.1.26.g63eb81e3c/README.FreeBSD0000664000000000000000000000122213306560010016266 0ustar rootrootTarget OS: FreeBSD 10.1 (RELEASE) 1. Install necessary packages: ------------- pkg install sudo git cmake gmake readline 2. Download & build tarantool source code: ------------- git clone git://github.com/tarantool/tarantool.git cd tarantool mkdir build && cd build git submodule update --init --recursive cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo gmake 3. Set up python 2.7 ------------- From packages: pkg install python27 py27-yaml py27-daemon py27-msgpack From pip: pkg install py27-virtualenv virtualenv .venv source .venv/bin/activate pip install -r ../test-run/requirements.txt 4. Run tarantool test suite ------------- gmake test -- EOF tarantool_1.9.1.26.g63eb81e3c/test/0000775000000000000000000000000013306565107015173 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog-py/0000775000000000000000000000000013306560010016556 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog-py/misc.test.py0000664000000000000000000000465413306560010021052 0ustar rootrootimport os import yaml from os.path import abspath # cleanup server.vardir server.stop() server.deploy() lsn = int(yaml.load(server.admin("box.info.lsn", silent=True))[0]) server.stop() data_path = os.path.join(server.vardir, server.name) print """ # xlog file must exist after inserts. """ filename = str(lsn).zfill(20) + ".xlog" wal = os.path.join(data_path, filename) server.start() server.admin("space = box.schema.space.create('tweedledum')") if os.access(wal, os.F_OK): print ".xlog exists" server.admin("index = space:create_index('primary', { type = 'hash' })") server.stop() lsn += 2 print """ # a new xlog must be opened after regular termination. """ filename = str(lsn).zfill(20) + ".xlog" server.start() wal = os.path.join(data_path, filename) server.admin("box.space.tweedledum:insert{3, 'third tuple'}") if os.access(wal, os.F_OK): print "a new .xlog exists" server.stop() if os.access(wal, os.F_OK): print ".xlog stays around after sutdown" lsn += 1 print """ # An xlog file with one record during recovery. """ server.start() filename = str(lsn).zfill(20) + ".xlog" wal = os.path.join(data_path, filename) server.admin("box.space.tweedledum:insert{4, 'fourth tuple'}") server.admin("box.space.tweedledum:insert{5, 'Unfinished record'}") pid = int(yaml.load(server.admin("require('tarantool').pid()", silent=True))[0]) from signal import SIGKILL if pid > 0: os.kill(pid, SIGKILL) server.stop() if os.access(wal, os.F_OK): print ".xlog exists after kill -9" # Remove last byte from xlog f = open(wal, "a") size = f.tell() f.truncate(size - 1) f.close() server.start() if os.access(wal, os.F_OK): print "corrupt .xlog exists after start" server.stop() lsn += 1 server.start() orig_lsn = int(yaml.load(admin("box.info.lsn", silent=True))[0]) # create .snap.inprogress admin("box.snapshot()") admin("box.space._schema:insert({'test', 'test'})") admin("box.snapshot()") lsn = int(yaml.load(admin("box.info.lsn", silent=True))[0]) snapshot = str(lsn).zfill(20) + ".snap" snapshot = os.path.join(data_path, snapshot) server.stop() os.rename(snapshot, snapshot + ".inprogress") # remove .xlogs and .vylog for f in os.listdir(data_path): if f.endswith((".xlog", ".vylog")): os.remove(os.path.join(data_path, f)) # check that .snap.inprogress is ignored during scan server.start() lsn = int(yaml.load(admin("box.info.lsn", silent=True))[0]) if lsn == orig_lsn: print ".snap.inprogress is ignored" tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/dup_key.result0000664000000000000000000000151513306560010021460 0ustar rootrootspace = box.schema.space.create('test') --- ... index = box.space.test:create_index('primary') --- ... box.space.test:insert{1, 'first tuple'} --- - [1, 'first tuple'] ... box.space.test:insert{2, 'second tuple'} --- - [2, 'second tuple'] ... .xlog exists space = box.schema.space.create('test') --- ... index = box.space.test:create_index('primary') --- ... box.space.test:insert{1, 'first tuple'} --- - [1, 'first tuple'] ... box.space.test:delete{1} --- - [1, 'first tuple'] ... box.space.test:insert{1, 'third tuple'} --- - [1, 'third tuple'] ... box.space.test:insert{2, 'fourth tuple'} --- - [2, 'fourth tuple'] ... .xlog exists check log line for 'Duplicate key' 'Duplicate key' exists in server log box.space.test:get{1} --- - [1, 'first tuple'] ... box.space.test:get{2} --- - [2, 'second tuple'] ... box.space.test:len() --- - 2 ... tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/suite.ini0000664000000000000000000000024013306560010020404 0ustar rootroot[default] core = tarantool description = legacy python tests script = box.lua lua_libs = lua/fiber.lua lua/fifo.lua use_unix_sockets = True is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/lsn_gap.test.py0000664000000000000000000000202413306560010021527 0ustar rootrootimport os import yaml # # gh-167: Replica can't find next xlog file if there is a gap in LSN # server.stop() server.deploy() # Create wal#1 server.admin("space = box.schema.space.create('test')") server.admin("index = box.space.test:create_index('primary')") server.admin("box.space.test:insert{1, 'first tuple'}") server.admin("box.space.test:insert{2, 'second tuple'}") lsn = int(yaml.load(server.admin("box.info.lsn", silent=True))[0]) path = os.path.join(server.vardir, server.name) wal = os.path.join(path, str(lsn).zfill(20) + ".xlog") server.stop() server.start() server.admin("box.space.test:insert{3, 'third tuple'}") server.stop() server.start() server.admin("box.space.test:insert{4, 'fourth tuple'}") server.stop() # Remove xlog with {3, 'third tuple'} os.unlink(wal) server.start() line="ignoring a gap in LSN" print "check log line for '%s'" % line print if server.logfile_pos.seek_once(line) >= 0: print "'%s' exists in server log" % line print # missing tuple from removed xlog server.admin("box.space.test:select{}") tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/empty.result0000664000000000000000000000025013306560010021151 0ustar rootroot.xlog exists _ = box.schema.space.create('test') --- ... _ = box.schema.space.create('test') --- - error: Space 'test' already exists ... box.space.test:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/missing.test.py0000664000000000000000000000250013306560010021554 0ustar rootrootimport yaml import os # # gh-716: infinite loop at start if missing xlog # server.stop() server.deploy() # Create wal#1 server.admin("space = box.schema.space.create('test')") server.admin("index = box.space.test:create_index('primary')") server.stop() server.start() # these inserts will be in their own xlog, which then will # get "lost" lsn = int(yaml.load(server.admin("box.info.lsn", silent=True))[0]) data_path = os.path.join(server.vardir, server.name) wal = os.path.join(data_path, str(lsn).zfill(20) + ".xlog") server.admin("box.space.test:insert{1, 'first tuple'}") server.admin("box.space.test:insert{2, 'second tuple'}") server.admin("box.space.test:insert{3, 'third tuple'}") server.stop() server.start() # put deletes in their own xlog server.admin("box.space.test:delete{1}") server.admin("box.space.test:delete{2}") server.admin("box.space.test:delete{3}") server.stop() # Remove xlog with inserts os.unlink(wal) # tarantool doesn't issue an LSN for deletes which delete nothing # this may lead to infinite recursion at start server.start() line="ignoring a gap in LSN" print "check log line for '%s'" % line print if server.logfile_pos.seek_once(line) >= 0: print "'%s' exists in server log" % line print # missing tuples from removed xlog server.admin("box.space.test:select{}") server.admin("box.space.test:drop()") tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/box.lua0000664000000000000000000000044313306560010020052 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", force_recovery = true, rows_per_wal = 10 } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/dup_key.test.py0000664000000000000000000000320213306560010021543 0ustar rootrootimport os import yaml #print """ #A test case for https://bugs.launchpad.net/tarantool/+bug/1052018 #panic_on_wal_error doesn't work for duplicate key errors #""" server.stop() server.deploy() lsn = int(yaml.load(server.admin("box.info.lsn", silent=True))[0]) filename = str(lsn).zfill(20) + ".xlog" vardir = os.path.join(server.vardir, server.name) wal_old = os.path.join(vardir, "old_" + filename) wal = os.path.join(vardir, filename) # Create wal#1 server.admin("space = box.schema.space.create('test')") server.admin("index = box.space.test:create_index('primary')") server.admin("box.space.test:insert{1, 'first tuple'}") server.admin("box.space.test:insert{2, 'second tuple'}") server.stop() # Save wal #1 if os.access(wal, os.F_OK): print ".xlog exists" os.rename(wal, wal_old) lsn += 4 # Create another wal#1 server.start() server.admin("space = box.schema.space.create('test')") server.admin("index = box.space.test:create_index('primary')") server.admin("box.space.test:insert{1, 'first tuple'}") server.admin("box.space.test:delete{1}") server.stop() # Create wal#2 server.start() server.admin("box.space.test:insert{1, 'third tuple'}") server.admin("box.space.test:insert{2, 'fourth tuple'}") server.stop() if os.access(wal, os.F_OK): print ".xlog exists" # Replace wal#1 with saved copy os.unlink(wal) os.rename(wal_old, wal) server.start() line = 'Duplicate key' print "check log line for '%s'" % line print if server.logfile_pos.seek_once(line) >= 0: print "'%s' exists in server log" % line print server.admin("box.space.test:get{1}") server.admin("box.space.test:get{2}") server.admin("box.space.test:len()") tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/lsn_gap.result0000664000000000000000000000110013306560010021431 0ustar rootrootspace = box.schema.space.create('test') --- ... index = box.space.test:create_index('primary') --- ... box.space.test:insert{1, 'first tuple'} --- - [1, 'first tuple'] ... box.space.test:insert{2, 'second tuple'} --- - [2, 'second tuple'] ... box.space.test:insert{3, 'third tuple'} --- - [3, 'third tuple'] ... box.space.test:insert{4, 'fourth tuple'} --- - [4, 'fourth tuple'] ... check log line for 'ignoring a gap in LSN' 'ignoring a gap in LSN' exists in server log box.space.test:select{} --- - - [1, 'first tuple'] - [2, 'second tuple'] - [4, 'fourth tuple'] ... tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/missing.result0000664000000000000000000000117013306560010021466 0ustar rootrootspace = box.schema.space.create('test') --- ... index = box.space.test:create_index('primary') --- ... box.space.test:insert{1, 'first tuple'} --- - [1, 'first tuple'] ... box.space.test:insert{2, 'second tuple'} --- - [2, 'second tuple'] ... box.space.test:insert{3, 'third tuple'} --- - [3, 'third tuple'] ... box.space.test:delete{1} --- - [1, 'first tuple'] ... box.space.test:delete{2} --- - [2, 'second tuple'] ... box.space.test:delete{3} --- - [3, 'third tuple'] ... check log line for 'ignoring a gap in LSN' 'ignoring a gap in LSN' exists in server log box.space.test:select{} --- - [] ... box.space.test:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/misc.result0000664000000000000000000000133413306560010020752 0ustar rootroot # xlog file must exist after inserts. space = box.schema.space.create('tweedledum') --- ... .xlog exists index = space:create_index('primary', { type = 'hash' }) --- ... # a new xlog must be opened after regular termination. box.space.tweedledum:insert{3, 'third tuple'} --- - [3, 'third tuple'] ... # An xlog file with one record during recovery. box.space.tweedledum:insert{4, 'fourth tuple'} --- - [4, 'fourth tuple'] ... box.space.tweedledum:insert{5, 'Unfinished record'} --- - [5, 'Unfinished record'] ... .xlog exists after kill -9 corrupt .xlog exists after start box.snapshot() --- - ok ... box.space._schema:insert({'test', 'test'}) --- - ['test', 'test'] ... box.snapshot() --- - ok ... .snap.inprogress is ignored tarantool_1.9.1.26.g63eb81e3c/test/xlog-py/empty.test.py0000664000000000000000000000151713306560010021250 0ustar rootrootimport os import yaml from os.path import abspath # # This test used to pass: # # Empty xlog.inprogress must be deleted during recovery # # it doesn't pass any more since an xlog with missing header # can't be parsed by xdir_scan, thus we do nothing about it. # server.stop() server.deploy() lsn = str(yaml.load(server.admin("box.info.lsn", silent=True))[0]) path = os.path.join(server.vardir, server.name) filename = os.path.join(path, lsn.zfill(20) + ".xlog") f = open(filename, "w+") f.close() server.start() server.stop() if os.access(filename, os.F_OK): print ".xlog exists" # the server has started but is crippled since it # can't override an existing file server.start() server.admin("_ = box.schema.space.create('test')") os.unlink(filename) server.admin("_ = box.schema.space.create('test')") server.admin("box.space.test:drop()") tarantool_1.9.1.26.g63eb81e3c/test/box-tap/0000775000000000000000000000000013306565107016545 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/box-tap/trigger_atexit.test.lua0000775000000000000000000000132413306560010023236 0ustar rootroot#!/usr/bin/env tarantool -- vim: set ft=lua : -- see https://github.com/tarantool/tarantool/issues/583 tap = require 'tap' fio = require 'fio' log = require 'log' tempdir = fio.tempdir() box.cfg { wal_dir = tempdir, memtx_dir = tempdir, vinyl_dir = tempdir, log = fio.pathjoin(tempdir, 'tarantool.log'), memtx_memory = 104857600 -- for small systems } local function test_replace(old_tuple, new_tuple) end box.schema.space.create('abc') box.space.abc:create_index('pk', { type = 'tree' }) box.space.abc:on_replace(test_replace) cleanup_list = fio.glob(fio.pathjoin(tempdir, '*')) for _, file in pairs(cleanup_list) do fio.unlink(file) end fio.rmdir(tempdir) print("done") os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/box-tap/suite.ini0000664000000000000000000000012713306560010020363 0ustar rootroot[default] core = app description = Database tests with #! using TAP is_parallel = True tarantool_1.9.1.26.g63eb81e3c/test/box-tap/trigger_atexit.result0000664000000000000000000000000513306560010023005 0ustar rootrootdone tarantool_1.9.1.26.g63eb81e3c/test/box-tap/cfg.test.lua0000775000000000000000000003472413306565107021002 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local test = tap.test('cfg') local socket = require('socket') local fio = require('fio') local uuid = require('uuid') local msgpack = require('msgpack') test:plan(89) -------------------------------------------------------------------------------- -- Invalid values -------------------------------------------------------------------------------- test:is(type(box.cfg), 'function', 'box is not started') local function invalid(name, val) local status, result = pcall(box.cfg, {[name]=val}) test:ok(not status and result:match('Incorrect'), 'invalid '..name) end invalid('memtx_min_tuple_size', 7) invalid('memtx_min_tuple_size', 0) invalid('memtx_min_tuple_size', -1) invalid('memtx_min_tuple_size', 1048281) invalid('memtx_min_tuple_size', 1000000000) invalid('replication', '//guest@localhost:3301') invalid('replication_timeout', -1) invalid('replication_timeout', 0) invalid('replication_sync_lag', -1) invalid('replication_sync_lag', 0) invalid('replication_connect_timeout', -1) invalid('replication_connect_timeout', 0) invalid('replication_connect_quorum', -1) invalid('wal_mode', 'invalid') invalid('rows_per_wal', -1) invalid('listen', '//!') invalid('log', ':') invalid('log', 'syslog:xxx=') invalid('log_level', 'unknown') invalid('vinyl_read_threads', 0) invalid('vinyl_write_threads', 1) invalid('vinyl_range_size', 0) invalid('vinyl_page_size', 0) invalid('vinyl_run_count_per_level', 0) invalid('vinyl_run_size_ratio', 1) invalid('vinyl_bloom_fpr', 0) invalid('vinyl_bloom_fpr', 1.1) test:is(type(box.cfg), 'function', 'box is not started') -------------------------------------------------------------------------------- -- All box members must raise an exception on access if box.cfg{} wasn't called -------------------------------------------------------------------------------- local box = require('box') local function testfun() return type(box.space) end local status, result = pcall(testfun) test:ok(not status and result:match('Please call box.cfg{}'), 'exception on unconfigured box') status, result = pcall(box.error, box.error.ILLEGAL_PARAMS, 'xx') test:ok(result.code == box.error.ILLEGAL_PARAMS, "box.error without box.cfg") status, result = pcall(function() return box.runtime.info() end) test:ok(status and type(result) == 'table', "box.runtime without box.cfg") status, result = pcall(function() return box.index.EQ end) test:ok(status and type(result) == 'number', "box.index without box.cfg") status, result = pcall(function() return box.NULL end) test:ok(status and result == msgpack.NULL, "box.NULL without box.cfg") status, result = pcall(box.session.id) test:ok(status, "box.session without box.cfg") status, result = pcall(box.tuple.new, {1, 2, 3}) test:ok(status and result[1] == 1, "box.tuple without box.cfg") os.execute("rm -rf vinyl") box.cfg{ log="tarantool.log", memtx_memory=104857600, wal_mode = "", -- "" means default value } -- gh-678: vinyl engine creates vinyl dir with empty 'snapshot' file test:isnil(io.open("vinyl", 'r'), 'vinyl_dir is not auto-created') status, result = pcall(testfun) test:ok(status and result == 'table', 'configured box') -------------------------------------------------------------------------------- -- Dynamic configuration -------------------------------------------------------------------------------- invalid('log_level', 'unknown') -------------------------------------------------------------------------------- -- gh-534: Segmentation fault after two bad wal_mode settings -------------------------------------------------------------------------------- test:is(box.cfg.wal_mode, "write", "wal_mode default value") -- box.cfg{wal_mode = ""} -- test:is(box.cfg.wal_mode, "write", "wal_mode default value") -- box.cfg{wal_mode = "none"} -- test:is(box.cfg.wal_mode, "none", "wal_mode change") -- "" or NULL resets option to default value -- box.cfg{wal_mode = ""} -- test:is(box.cfg.wal_mode, "write", "wal_mode default value") -- box.cfg{wal_mode = "none"} -- test:is(box.cfg.wal_mode, "none", "wal_mode change") -- box.cfg{wal_mode = require('msgpack').NULL} -- test:is(box.cfg.wal_mode, "write", "wal_mode default value") test:is(box.cfg.force_recovery, false, "force_recovery default value") box.cfg{force_recovery=true} test:is(box.cfg.force_recovery, true, "force_recovery new value") test:is(box.cfg.wal_dir_rescan_delay, 2, "wal_dir_rescan_delay default value") box.cfg{wal_dir_rescan_delay=0.2} test:is(box.cfg.wal_dir_rescan_delay, 0.2, "wal_dir_rescan_delay new value") test:is(box.cfg.too_long_threshold, 0.5, "too_long_threshold default value") box.cfg{too_long_threshold=0.1} test:is(box.cfg.too_long_threshold , 0.1, "too_long_threshold new value") -------------------------------------------------------------------------------- -- gh-246: Read only mode -------------------------------------------------------------------------------- test:is(box.cfg.read_only, false, "read_only default value") box.cfg{read_only = true} test:is(box.cfg.read_only, true, "read_only new value") local status, reason = pcall(function() box.space._schema:insert({'read_only', 'test'}) end) test:ok(not status and box.error.last().code == box.error.READONLY, "read_only = true") box.cfg{read_only = false} local status, reason = pcall(function() box.space._schema:insert({'read_only', 'test'}) end) test:ok(status, "read_only = false") -- gh-2663: box.cfg() parameter to set the number of coio threads box.cfg({ worker_pool_threads = 1}) test:is(box.cfg.worker_pool_threads, 1, 'worker_pool_threads') local tarantool_bin = arg[-1] local PANIC = 256 function run_script(code) local dir = fio.tempdir() local script_path = fio.pathjoin(dir, 'script.lua') local script = fio.open(script_path, {'O_CREAT', 'O_WRONLY', 'O_APPEND'}, tonumber('0777', 8)) script:write(code) script:write("\nos.exit(0)") script:close() local cmd = [[/bin/sh -c 'cd "%s" && "%s" ./script.lua 2> /dev/null']] local res = os.execute(string.format(cmd, dir, tarantool_bin)) fio.rmdir(dir) return res end -- gh-715: Cannot switch to/from 'fsync' code = [[ box.cfg{ log="tarantool.log", wal_mode = 'fsync' }; ]] test:is(run_script(code), 0, 'wal_mode fsync') code = [[ box.cfg{ wal_mode = 'fsync' }; box.cfg { wal_mode = 'fsync' }; ]] test:is(run_script(code), 0, 'wal_mode fsync -> fsync') code = [[ box.cfg{ wal_mode = 'fsync' }; box.cfg { wal_mode = 'none'} ]] test:is(run_script(code), PANIC, 'wal_mode fsync -> write is not supported') code = [[ box.cfg{ wal_mode = 'write' }; box.cfg { wal_mode = 'fsync'} ]] test:is(run_script(code), PANIC, 'wal_mode write -> fsync is not supported') -- gh-684: Inconsistency with box.cfg and directories local code; code = [[ box.cfg{ work_dir='invalid' } ]] test:is(run_script(code), PANIC, 'work_dir is invalid') -- gh-2664: vinyl_dir is checked on the first use code = [[ box.cfg{ vinyl_dir='invalid' } ]] test:is(run_script(code), 0, 'vinyl_dir is invalid') code = [[ box.cfg{ memtx_dir='invalid' } ]] test:is(run_script(code), PANIC, 'snap_dir is invalid') code = [[ box.cfg{ wal_dir='invalid' } ]] test:is(run_script(code), PANIC, 'wal_dir is invalid') test:is(box.cfg.log_nonblock, true, "log_nonblock default value") code = [[ box.cfg{log_nonblock = false } os.exit(box.cfg.log_nonblock == false and 0 or 1) ]] test:is(run_script(code), 0, "log_nonblock new value") -- gh-3048: box.cfg must not crash on invalid log configuration code = [[ box.cfg{ log = '/' } ]] test:is(run_script(code), PANIC, 'log is invalid') -- box.cfg { listen = xx } local path = './tarantool.sock' os.remove(path) box.cfg{ listen = 'unix/:'..path } local s = socket.tcp_connect('unix/', path) test:isnt(s, nil, "dynamic listen") if s then s:close() end box.cfg{ listen = '' } s = socket.tcp_connect('unix/', path) test:isnil(s, 'dynamic listen') if s then s:close() end os.remove(path) path = './tarantool.sock' local path2 = './tarantool2.sock' local s = socket.tcp_server('unix/', path, function () end) os.execute('ln ' .. path .. ' ' .. path2) s:close() box.cfg{ listen = 'unix/:'.. path2} s = socket.tcp_connect('unix/', path2) test:isnt(s, nil, "reuse unix socket") if s then s:close() end box.cfg{ listen = '' } os.remove(path2) code = " box.cfg{ listen='unix/:'" .. path .. "' } " run_script(code) test:isnil(fio.stat(path), "delete socket at exit") -- -- gh-1499: AUTH raises ER_LOADING if wal_mode is 'none' -- code = [[ box.cfg{wal_mode = 'none', listen='unix/:./tarantool.sock' } box.once("bootstrap", function() box.schema.user.create("test", { password = '123' }) end) local conn = require('net.box').connect('unix/:./tarantool.sock', { user = 'test', password = '123' }) if not conn:ping() then os.exit(1) end os.exit(0) ]] test:is(run_script(code), 0, "wal_mode none and ER_LOADING") -- -- gh-1962: incorrect replication source -- status, reason = pcall(box.cfg, {replication="3303,3304"}) test:ok(not status and reason:match("Incorrect"), "invalid replication") -- -- gh-1778 vinyl page can't be greather than range -- code = [[ box.cfg{vinyl_page_size = 4 * 1024 * 1024, vinyl_range_size = 2 * 1024 * 1024} os.exit(0) ]] test:is(run_script(code), PANIC, "page size greather than range") code = [[ box.cfg{vinyl_page_size = 1 * 1024 * 1024, vinyl_range_size = 2 * 1024 * 1024} os.exit(0) ]] test:is(run_script(code), 0, "page size less than range") code = [[ box.cfg{vinyl_page_size = 2 * 1024 * 1024, vinyl_range_size = 2 * 1024 * 1024} os.exit(0) ]] test:is(run_script(code), 0, "page size equal with range") -- test memtx options upgrade code = [[ box.cfg{slab_alloc_arena = 0.2, slab_alloc_minimal = 16, slab_alloc_maximal = 64 * 1024} os.exit(box.cfg.memtx_memory == 214748364 and box.cfg.memtx_min_tuple_size == 16 and box.cfg.memtx_max_tuple_size == 64 * 1024 and 0 or 1) ]] test:is(run_script(code), 0, "upgrade memtx memory options") code = [[ box.cfg{slab_alloc_arena = 0.2, slab_alloc_minimal = 16, slab_alloc_maximal = 64 * 1024, memtx_memory = 214748364, memtx_min_tuple_size = 16, memtx_max_tuple_size = 64 * 1024} os.exit(0) ]] test:is(run_script(code), 0, "equal new and old memtx options") code = [[ box.cfg{slab_alloc_arena = 0.2, slab_alloc_minimal = 16, slab_alloc_maximal = 64 * 1024, memtx_memory = 107374182, memtx_min_tuple_size = 16, memtx_max_tuple_size = 64 * 1024} os.exit(0) ]] test:is(run_script(code), PANIC, "different new and old memtx_memory") code = [[ box.cfg{slab_alloc_arena = 0.2, slab_alloc_minimal = 16, slab_alloc_maximal = 64 * 1024, memtx_memory = 214748364, memtx_min_tuple_size = 32, memtx_max_tuple_size = 64 * 1024} os.exit(0) ]] test:is(run_script(code), PANIC, "different new and old min_tuple_size") code = [[ box.cfg{snap_dir = 'tmp1', memtx_dir = 'tmp2'} os.exit(0) ]] test:is(run_script(code), PANIC, "different memtx_dir") code = [[ box.cfg{panic_on_wal_error = true} os.exit(box.cfg.force_recovery == false and 0 or 1) ]] test:is(run_script(code), 0, "panic_on_wal_error") code = [[ box.cfg{panic_on_snap_error = false} os.exit(box.cfg.force_recovery == true and 0 or 1) ]] test:is(run_script(code), 0, "panic_on_snap_error") code = [[ box.cfg{snapshot_period = 100, snapshot_count = 4} os.exit(box.cfg.checkpoint_interval == 100 and box.cfg.checkpoint_count == 4 and 0 or 1) ]] test:is(run_script(code), 0, "setup checkpoint params") code = [[ box.cfg{snapshot_period = 100, snapshot_count = 4} box.cfg{snapshot_period = 150, snapshot_count = 8} os.exit(box.cfg.checkpoint_interval == 150 and box.cfg.checkpoint_count == 8 and 0 or 1) ]] test:is(run_script(code), 0, "update checkpoint params") -- -- test wal_max_size option -- code = [[ digest = require'digest' fio = require'fio' box.cfg{wal_max_size = 1024} _ = box.schema.space.create('test'):create_index('pk') data = digest.urandom(1024) cnt1 = #fio.glob(fio.pathjoin(box.cfg.wal_dir, '*.xlog')) for i = 0, 9 do box.space.test:replace({1, data}) end cnt2 = #fio.glob(fio.pathjoin(box.cfg.wal_dir, '*.xlog')) os.exit(cnt1 < cnt2 - 8 and 0 or 1) ]] test:is(run_script(code), 0, "wal_max_size xlog rotation") -- -- gh-2872 bootstrap is aborted if vinyl_dir contains vylog files -- left from previous runs -- vinyl_dir = fio.tempdir() run_script(string.format([[ box.cfg{vinyl_dir = '%s'} s = box.schema.space.create('test', {engine = 'vinyl'}) s:create_index('pk') os.exit(0) ]], vinyl_dir)) code = string.format([[ box.cfg{vinyl_dir = '%s'} os.exit(0) ]], vinyl_dir) test:is(run_script(code), PANIC, "bootstrap from non-empty vinyl_dir") fio.rmdir(vinyl_dir) -- -- gh-2278 vinyl does not support DDL/DML if wal_mode = 'none' -- dir = fio.tempdir() cfg = string.format("wal_dir = '%s', memtx_dir = '%s', vinyl_dir = '%s'", dir, dir, dir) run_script(string.format([[ box.cfg{%s} s = box.schema.space.create('test', {engine = 'vinyl'}) s:create_index('primary') os.exit(0) ]], cfg)) code = string.format([[ box.cfg{wal_mode = 'none', %s} s = box.space.test ok = true ok = ok and not pcall(s.create_index, s, 'secondary') ok = ok and not pcall(s.index.primary.drop, s.index.primary) ok = ok and not pcall(s.drop, s) ok = ok and not pcall(s.truncate, s) ok = ok and not pcall(s.insert, s, {1}) ok = ok and pcall(s.select, s) os.exit(ok and 0 or 1) ]], cfg) test:is(run_script(code), 0, "wal_mode none -> vinyl DDL/DML is not supported") fio.rmdir(dir) -- -- Invalid values of instance_uuid or replicaset_uuid. -- code = [[ box.cfg{instance_uuid = 'uuid'} ]] test:is(run_script(code), PANIC, 'invalid instance_uuid') code = [[ box.cfg{replicaset_uuid = 'uuid'} ]] test:is(run_script(code), PANIC, 'invalid replicaset_uuid') -- -- Instance and replica set UUID are set to the configured values. -- code = [[ instance_uuid = tostring(require('uuid').new()) box.cfg{instance_uuid = instance_uuid} os.exit(instance_uuid == box.info.uuid and 0 or 1) ]] test:is(run_script(code), 0, "check instance_uuid") code = [[ replicaset_uuid = tostring(require('uuid').new()) box.cfg{replicaset_uuid = replicaset_uuid} os.exit(replicaset_uuid == box.info.cluster.uuid and 0 or 1) ]] test:is(run_script(code), 0, "check replicaset_uuid") -- -- Configuration fails on instance or replica set UUID mismatch. -- dir = fio.tempdir() instance_uuid = uuid.new() replicaset_uuid = uuid.new() code_fmt = [[ box.cfg{memtx_dir = '%s', instance_uuid = '%s', replicaset_uuid = '%s'} os.exit(0) ]] code = string.format(code_fmt, dir, instance_uuid, replicaset_uuid) run_script(code) code = string.format(code_fmt, dir, uuid.new(), replicaset_uuid) test:is(run_script(code), PANIC, "instance_uuid mismatch") code = string.format(code_fmt, dir, instance_uuid, uuid.new()) test:is(run_script(code), PANIC, "replicaset_uuid mismatch") fio.rmdir(dir) test:check() os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/box-tap/session.test.lua0000775000000000000000000001755113306560010021711 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local test = tap.test('session') local fiber = require('fiber') box.cfg{ listen = os.getenv('LISTEN'); log="tarantool.log"; } local uri = require('uri').parse(box.cfg.listen) local HOST, PORT = uri.host or 'localhost', uri.service session = box.session space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) test:plan(53) --- --- Check that Tarantool creates ADMIN session for #! script --- test:ok(session.exists(session.id()), "session is created") test:isnil(session.peer(session.id()), "session.peer") local ok, err = pcall(session.exists) test:is(err, "session.exists(sid): bad arguments", "exists bad args #1") ok, err = pcall(session.exists, 1, 2, 3) test:is(err, "session.exists(sid): bad arguments", "exists bad args #2") test:ok(not session.exists(1234567890), "session doesn't exist") -- check session.id() test:ok(session.id() > 0, "id > 0") failed = false local f = fiber.create(function() failed = session.id() == 0 end) while f:status() ~= 'dead' do fiber.sleep(0) end test:ok(not failed, "session not broken") test:is(session.peer(), session.peer(session.id()), "peer() == peer(id())") -- check on_connect/on_disconnect triggers function noop() end test:is(type(session.on_connect(noop)), "function", "type of trigger noop on_connect") test:is(type(session.on_disconnect(noop)), "function", "type of trigger noop on_disconnect") -- check it's possible to reset these triggers function fail() error('hear') end test:is(type(session.on_connect(fail, noop)), "function", "type of trigger fail, noop on_connect") test:is(type(session.on_disconnect(fail, noop)), "function", "type of trigger fail, noop on_disconnect") -- check on_connect/on_disconnect argument count and type test:is(type(session.on_connect()), "table", "type of trigger on_connect, no args") test:is(type(session.on_disconnect()), "table", "type of trigger on_disconnect, no args") ok, err = pcall(session.on_connect, function() end, function() end) test:is(err,"trigger reset: Trigger is not found", "on_connect trigger not found") ok, err = pcall(session.on_disconnect, function() end, function() end) test:is(err,"trigger reset: Trigger is not found", "on_disconnect trigger not found") ok, err = pcall(session.on_connect, 1, 2) test:is(err, "trigger reset: incorrect arguments", "on_connect bad args #1") ok, err = pcall(session.on_disconnect, 1, 2) test:is(err, "trigger reset: incorrect arguments", "on_disconnect bad args #1") ok, err = pcall(session.on_connect, 1) test:is(err, "trigger reset: incorrect arguments", "on_connect bad args #2") ok, err = pcall(session.on_disconnect, 1) test:is(err, "trigger reset: incorrect arguments", "on_disconnect bad args #2") -- use of nil to clear the trigger session.on_connect(nil, fail) session.on_disconnect(nil, fail) -- check how connect/disconnect triggers work local peer_name = "peer_name" function inc() active_connections = active_connections + 1 end function dec() active_connections = active_connections - 1 end function peer() peer_name = box.session.peer() end net = { box = require('net.box') } test:is(type(session.on_connect(inc)), "function", "type of trigger inc on_connect") test:is(type(session.on_disconnect(dec)), "function", "type of trigger dec on_disconnect") test:is(type(session.on_disconnect(peer)), "function", "type of trigger peer on_disconnect") active_connections = 0 c = net.box.connect(HOST, PORT) while active_connections < 1 do fiber.sleep(0.001) end test:is(active_connections, 1, "active_connections after 1 connection") c1 = net.box.connect(HOST, PORT) while active_connections < 2 do fiber.sleep(0.001) end test:is(active_connections, 2, "active_connections after 2 connection") c:close() c1:close() while active_connections > 0 do fiber.sleep(0.001) end test:is(active_connections, 0, "active_connections after closing") test:isnil(peer_name, "peer_name after closing") session.on_connect(nil, inc) session.on_disconnect(nil, dec) session.on_disconnect(nil, peer) -- write audit trail of connect/disconnect into a space function audit_connect() box.space['tweedledum']:insert{session.id()} end function audit_disconnect() box.space['tweedledum']:delete{session.id()} end test:is(type(session.on_connect(audit_connect)), "function", "type of trigger audit_connect on_connect") test:is(type(session.on_disconnect(audit_disconnect)), "function", "type of trigger audit_connect on_disconnect") box.schema.user.grant('guest', 'read,write,execute', 'universe') a = net.box.connect(HOST, PORT) test:ok(a:eval('return space:get{box.session.id()}[1] == session.id()'), "eval get_id") test:ok(a:eval('return session.sync() ~= 0'), "eval sync") a:close() -- cleanup session.on_connect(nil, audit_connect) session.on_disconnect(nil, audit_disconnect) test:is(active_connections, 0, "active connections after other triggers") space:drop() test:is(session.uid(), 1, "uid == 1") test:is(session.user(), "admin", "user is admin") test:is(session.sync(), 0, "sync constant") box.schema.user.revoke('guest', 'read,write,execute', 'universe') -- audit permission in on_connect/on_disconnect triggers box.schema.user.create('tester', { password = 'tester' }) on_connect_user = nil on_disconnect_user = nil function on_connect() on_connect_user = box.session.effective_user() end function on_disconnect() on_disconnect_user = box.session.effective_user() end _ = box.session.on_connect(on_connect) _ = box.session.on_disconnect(on_disconnect) local conn = require('net.box').connect("tester:tester@" ..HOST..':'..PORT) -- Triggers must not lead to privilege escalation ok, err = pcall(function () conn:eval('box.space._user:select()') end) test:ok(not ok, "check access") conn:close() conn = nil while not on_disconnect_user do fiber.sleep(0.001) end -- Triggers are executed with admin permissions test:is(on_connect_user, 'admin', "check trigger permissions, on_connect") test:is(on_disconnect_user, 'admin', "check trigger permissions, on_disconnect") box.session.on_connect(nil, on_connect) box.session.on_disconnect(nil, on_disconnect) -- check Session privilege ok, err = pcall(function() net.box.connect("tester:tester@" ..HOST..':'..PORT) end) test:ok(ok, "session privilege") box.schema.user.revoke('tester', 'session', 'universe') conn = net.box.connect("tester:tester@" ..HOST..':'..PORT) test:is(conn.state, "error", "session privilege state") test:ok(conn.error:match("Session"), "sesssion privilege errmsg") ok, err = pcall(box.session.su, "user1") test:ok(not ok, "session.su on revoked") box.schema.user.drop('tester') local test_run = require('test_run') local inspector = test_run.new() test:is( inspector:cmd("create server session with script='box/tiny.lua'\n"), true, 'instance created' ) test:is( inspector:cmd('start server session'), true, 'instance started' ) local uri = inspector:eval('session', 'box.cfg.listen')[1] conn = net.box.connect(uri) test:ok(conn:eval("return box.session.exists(box.session.id())"), "remote session exist check") test:isnt(conn:eval("return box.session.peer(box.session.id())"), nil, "remote session peer check") test:ok(conn:eval("return box.session.peer() == box.session.peer(box.session.id())"), "remote session peer check") -- gh-2994 session uid vs session effective uid test:is(session.euid(), 1, "session.uid") test:is(session.su("guest", session.uid), 1, "session.uid from su is admin") test:is(session.su("guest", session.euid), 0, "session.euid from su is guest") local id = conn:eval("return box.session.uid()") test:is(id, 0, "session.uid from netbox") id = conn:eval("return box.session.euid()") test:is(id, 0, "session.euid from netbox") --box.session.su("admin") conn:eval("box.session.su(\"admin\", box.schema.create_space, \"sp1\")") local sp = conn:eval("return box.space._space.index.name:get{\"sp1\"}[2]") test:is(sp, 1, "effective ddl owner") conn:close() inspector:cmd('stop server session with cleanup=1') session = nil os.exit(test:check() == true and 0 or -1) tarantool_1.9.1.26.g63eb81e3c/test/box-tap/net.box.test.lua0000775000000000000000000000163513306560010021577 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local test = tap.test('netbox') local net_box = require('net.box') local test_run = require('test_run') local inspector = test_run.new() test:plan(5) -- create tarantool instance test:is( inspector:cmd("create server second with script='box/box.lua'\n"), true, 'instance created' ) test:is( inspector:cmd('start server second'), true, 'instance started' ) -- check that net.box is correct without box.cfg{} local uri = inspector:eval('second', 'box.cfg.listen')[1] local conn = net_box.connect(uri) test:is(conn:is_connected(), true, 'connected to instance') test:is(conn.space ~= nil, true, 'space exists') -- gh-1814: Segfault if using `net.box` before `box.cfg` start test:ok(not pcall(function() conn.space._vspace:insert() end), "error handling") -- cleanup conn:close() inspector:cmd('stop server second with cleanup=1') test:check() os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/box-tap/trigger_yield.result0000664000000000000000000000001013306560010022611 0ustar rootrootdone: 0 tarantool_1.9.1.26.g63eb81e3c/test/box-tap/cfgup.test.lua0000775000000000000000000000141113306560010021316 0ustar rootroot#!/usr/bin/env tarantool -- Testing configuration updates local tap = require('tap') local test = tap.test('cfg') test:plan(3) config = { pid_file = '1.pid', log="tarantool.log" } local status = pcall(box.cfg, config) test:ok(status, 'initial config') -- Assigning the same value to immutable key which is effectively a NOP, -- expecting success status = pcall(box.cfg, {pid_file = config.pid_file}) test:ok(status, 'assign the same value to immutable key (pid_file)') -- Now change it to a different value, must fail local result status, result = pcall(box.cfg, {pid_file = 'Z'..config.pid_file}) test:ok(not status and result:match("Can't set option 'pid_file' dynamically"), 'attempt to change immutable key (pid_file)') test:check() os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/box-tap/session.storage.test.lua0000775000000000000000000000401713306560010023345 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local test = tap.test('session') local net_box = require("net.box") local test_run = require('test_run') local inspector = test_run.new() test:plan(15) test:is( inspector:cmd("create server session_storage with script='box/tiny.lua'\n"), true, 'instance created' ) test:is( inspector:cmd('start server session_storage'), true, 'instance started' ) local uri = inspector:eval('session_storage', 'box.cfg.listen')[1] conn1 = net_box.connect(uri) conn1:eval("session = box.session") test:is(conn1:eval("return type(session.id())"), "number", "session.id()") test:ok(conn1:eval("return session.unknown_field == nil"), "no field") test:is(conn1:eval("return type(session.storage)"), "table", "storage") conn1:eval("session.storage.abc = 'cde'") test:is(conn1:eval("return session.storage.abc"), "cde", "written to storage") conn1:eval("all = getmetatable(session).aggregate_storage") test:ok(conn1:eval("return all[session.id()].abc == 'cde'"), "check meta table") conn2 = net_box.connect(uri) test:is(conn2:eval("return type(session.storage)"), "table", "storage") test:isnil(conn2:eval("return type(session.storage.abc)"), "empty storage") conn2:eval("session.storage.abc = 'def'") test:ok(conn2:eval("return session.storage.abc == 'def'"), "written to storage") test:ok(conn1:eval("return session.storage.abc == 'cde'"), "first conn storage") test:ok(conn1:eval("return all[session.id()].abc == 'cde'"), "check first conn metatable") test:ok(conn2:eval("return all[session.id()].abc == 'def'"), "check second conn metatable") tres1 = conn1:eval("t1 = {} for k, v in pairs(all) do table.insert(t1, v.abc) end return t1") conn1:close() conn2:close() conn3 = net_box.connect(uri) tres2 = conn3:eval("t2 = {} for k, v in pairs(all) do table.insert(t2, v.abc) end return t2") table.sort(tres1) table.sort(tres2) test:is(tres1[1], "cde", "check after closing") test:is(#tres2, 0, "check after closing") conn3:close() inspector:cmd('stop server session_storage with cleanup=1') os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/box-tap/auth.test.lua0000775000000000000000000001310013306560010021151 0ustar rootroot#!/usr/bin/env tarantool local session = box.session local fiber = require('fiber') local tap = require('tap') local netbox = require('net.box') local urilib = require('uri') box.cfg { listen = os.getenv('LISTEN'); log="tarantool.log"; memtx_memory=100*1024*1024; } local uri = urilib.parse(box.cfg.listen) local HOST, PORT = uri.host or 'localhost', uri.service local test = tap.test("auth") test:plan(42) local space = box.schema.space.create('tweedledum') local index = space:create_index('primary', { type = 'hash' }) box.schema.user.create('test', {password='pass'}) box.schema.user.grant('test', 'read,write,execute', 'universe') box.schema.user.create('test2', {password=''}) box.schema.user.grant('test2', 'read,write,execute', 'universe') box.schema.user.grant('guest', 'read,write,execute', 'universe') -- check how authentication trigger work local msg, counter, succeeded function auth_trigger(user_name) counter = counter + 1 end -- get user name as argument function auth_trigger2(user_name) msg = 'user ' .. user_name .. ' is there' end -- get user name and result of authentication as arguments function auth_trigger3(user_name, success) succeeded = success end -- set trigger local handle = session.on_auth(auth_trigger) -- check handle test:is(type(handle), "function", "handle is a function") -- check triggers list test:is(#session.on_auth(), 1, "the number of triggers") local handle2 = session.on_auth(auth_trigger2) test:is(type(handle2), "function", "handle is a function") test:is(#session.on_auth(), 2, "the number of triggers") local handle3 = session.on_auth(auth_trigger3) test:is(type(handle3), "function", "handle is a function") test:is(#session.on_auth(), 3, "the number of triggers") -- check connection with authentication(counter must be incremented) counter = 0 succeeded = false local conn = netbox.connect('test:pass@' .. HOST .. ':' .. PORT) while counter < 1 do fiber.sleep(0.001) end test:is(counter, 1, "on_auth has been fired once") test:is(msg, "user test is there", "on_auth username param") test:ok(succeeded, "on_auth success param") conn:close() conn = nil -- check failing authentication counter = 0 succeeded = true local conn = netbox.connect('test:pas@' .. HOST .. ':' .. PORT) while counter < 1 do fiber.sleep(0.001) end test:is(counter, 1, "on_auth has been fired once") test:is(msg, "user test is there", "on_auth username param") test:ok(not succeeded, "on_auth success param") conn:close() conn = nil counter = 0 succeeded = false local conn = netbox.connect('test2:@' .. HOST .. ':' .. PORT) while counter < 1 do fiber.sleep(0.001) end test:is(counter, 1, "on_auth has been fired once") test:is(msg, "user test2 is there", "on_auth username param") test:ok(succeeded, "on_auth success param") conn:close() conn = nil counter = 0 succeeded = false local conn = netbox.connect('test2@' .. HOST .. ':' .. PORT) while counter < 1 do fiber.sleep(0.001) end test:is(counter, 1, "on_auth has been fired once") test:is(msg, "user test2 is there", "on_auth username param") test:ok(succeeded, "on_auth success param") conn:close() conn = nil counter = 0 succeeded = false local conn = netbox.connect(HOST, PORT, {user='test2'}) while counter < 1 do fiber.sleep(0.001) end test:is(counter, 1, "on_auth has been fired once") test:is(msg, "user test2 is there", "on_auth username param") test:ok(succeeded, "on_auth success param") conn:close() conn = nil counter = 0 succeeded = false local conn = netbox.connect('guest@' .. HOST .. ':' .. PORT) while counter < 1 do fiber.sleep(0.001) end test:is(counter, 1, "on_auth has been fired once") test:is(msg, "user guest is there", "on_auth username param") test:ok(succeeded, "on_auth success param") conn:close() conn = nil counter = 0 succeeded = false local conn = netbox.connect('guest:@' .. HOST .. ':' .. PORT) while counter < 1 do fiber.sleep(0.001) end test:is(counter, 1, "on_auth has been fired once") test:is(msg, "user guest is there", "on_auth username param") test:ok(succeeded, "on_auth success param") conn:close() conn = nil counter = 0 succeeded = false conn = netbox.connect(HOST, PORT, {user='guest', password=''}) while counter < 1 do fiber.sleep(0.001) end test:is(counter, 1, "on_auth has been fired once") test:is(msg, "user guest is there", "on_auth username param") test:ok(succeeded, "on_auth success param") conn:close() conn = nil counter = 0 succeeded = false local conn = netbox.connect(HOST, PORT, {user='guest'}) while counter < 1 do fiber.sleep(0.001) end test:is(counter, 1, "on_auth has been fired once") test:is(msg, "user guest is there", "on_auth username param") test:ok(succeeded, "on_auth success param") conn:close() conn = nil -- check guest connection without authentication(no increment) counter = 0 succeeded = false conn = netbox.connect(HOST, PORT) conn:ping() test:is(counter, 0, "on_auth hasn't been fired") test:ok(not succeeded, "on_auth not successed param") conn:close() conn = nil test:isnil(session.on_auth(nil, auth_trigger), "removal returns nil") test:isnil(session.on_auth(nil, auth_trigger2), "removal returns nil") test:isnil(session.on_auth(nil, auth_trigger3), "removal returns nil") test:is(#session.on_auth(), 0, "the number of triggers"); test:is(session.uid(), 1, "box.session.uid()") test:is(session.user(), "admin", "box.session.user()") test:is(session.sync(), 0, "box.session.sync()") -- cleanup space:drop() box.schema.user.revoke('guest', 'read,write,execute', 'universe') box.schema.user.revoke('test', 'read,write,execute', 'universe') box.schema.user.drop('test', { if_exists = true}) box.schema.user.drop("test2", { if_exists = true}) os.exit(test:check() == true and 0 or -1) tarantool_1.9.1.26.g63eb81e3c/test/box-tap/trigger_yield.test.lua0000775000000000000000000000122413306560010023045 0ustar rootroot#!/usr/bin/env tarantool box.cfg{ pid_file = "box.pid", memtx_memory = 104857600, log = "tarantool.log" } fiber = require('fiber') box.schema.space.create('test', {if_not_exists = true}) box.space.test:create_index('pk', {if_not_exists = true}) box.space.test:truncate() function fail() fiber.sleep(0.0001) error("fail") end box.space.test:on_replace(fail) function insert() box.space.test:auto_increment{fiber.id()} end fibers = {} for i = 1, 100 do table.insert(fibers, fiber.create(insert)) end for _,f in pairs(fibers) do while f:status() ~= 'dead' do fiber.sleep(0.0001) end end print('done: '..box.space.test:len()) os.exit() tarantool_1.9.1.26.g63eb81e3c/test/app-tap/0000775000000000000000000000000013306565107016535 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/app-tap/inspector.test.lua0000775000000000000000000000054413306560010022216 0ustar rootroot#!/usr/bin/env tarantool local socket = require('socket') test_run = require('test_run') inspector = test_run.new() print('create instance') print(inspector:cmd("create server replica with rpl_master=default, script='box/box.lua'\n")) print('start instance') print(inspector:cmd('start server replica')) inspector:cmd('stop server replica') os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/snapshot.test.lua0000775000000000000000000001000013306560010022033 0ustar rootroot#!/usr/bin/env tarantool local math = require('math') local fiber = require('fiber') local tap = require('tap') local ffi = require('ffi') local fio = require('fio') box.cfg{ log="tarantool.log", memtx_memory=107374182, rows_per_wal=5000} local test = tap.test("snapshot") test:plan(5) ------------------------------------------------------------------------------- -- gh-695: Avoid overwriting tuple data with information necessary for smfree() ------------------------------------------------------------------------------- local continue_snapshoting = true local snap_chan = fiber.channel() local function noise() fiber.name('noise-'..fiber.id()) while continue_snapshoting do if box.space.test:len() < 300000 then local value = string.rep('a', math.random(255)+1) box.space.test:auto_increment{fiber.time64(), value} end fiber.sleep(0) end end local function purge() fiber.name('purge-'..fiber.id()) while continue_snapshoting do local min = box.space.test.index.primary:min() if min ~= nil then box.space.test:delete{min[1]} end fiber.sleep(0) end end local function snapshot(lsn) fiber.name('snapshot') while continue_snapshoting do local new_lsn = box.info.lsn if new_lsn ~= lsn then lsn = new_lsn; pcall(box.snapshot) end fiber.sleep(0.001) end snap_chan:put("!") end box.once("snapshot.test", function() box.schema.space.create('test') box.space.test:create_index('primary') end) fiber.create(noise) fiber.create(purge) fiber.create(noise) fiber.create(purge) fiber.create(noise) fiber.create(purge) fiber.create(noise) fiber.create(purge) fiber.create(snapshot, box.info.lsn) fiber.sleep(0.3) continue_snapshoting = false snap_chan:get() test:ok(true, 'gh-695: avoid overwriting tuple data necessary for smfree()') ------------------------------------------------------------------------------- -- gh-1185: Crash in matras_touch in snapshot_daemon.test ------------------------------------------------------------------------------- local s1 = box.schema.create_space('test1', { engine = 'memtx'}) local i1 = s1:create_index('test', { type = 'tree', parts = {1, 'unsigned'} }) local s2 = box.schema.create_space('test2', { engine = 'memtx'}) local i2 = s2:create_index('test', { type = 'tree', parts = {1, 'unsigned'} }) for i = 1,1000 do s1:insert{i, i, i} end fiber.create(function () box.snapshot() end) fiber.sleep(0) s2:insert{1, 2, 3} s2:update({1}, {{'+', 2, 2}}) s1:drop() s2:drop() test:ok(true, "gh-1185: no crash in matras_touch") ------------------------------------------------------------------------------- -- gh-1084: box.snapshot() aborts if the server is out of file descriptors ------------------------------------------------------------------------------- local function gh1094() local msg = "gh-1094: box.snapshot() doesn't abort if out of file descriptors" local nfile local ulimit = io.popen('ulimit -n') if ulimit then nfile = tonumber(ulimit:read()) ulimit:close() end if not nfile or nfile > 1024 then -- descriptors limit is to high, just skip test test:ok(true, msg) return end local files = {} for i = 1,nfile do files[i] = fio.open('/dev/null') if files[i] == nil then break end end local sf, mf = pcall(box.snapshot) for i, f in pairs(files) do f:close() end local ss, ms = pcall(box.snapshot) test:ok(not sf and ss, msg) end gh1094() -- gh-2045 - test snapshot if nothing changed -- we wan't check snapshot update time because it may take long time to wait box.snapshot() box.snapshot() box.snapshot() test:ok(true, 'No crash for second snapshot w/o any changes') files = fio.glob(box.cfg.memtx_dir .. '/*.snap') table.sort(files) fio.unlink(files[#files]) box.snapshot() test:ok(fio.stat(files[#files]) ~= nil, "Snapshot was recreated") box.space.test:drop() test:check() os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/http_client.skipcond0000664000000000000000000000022013306560010022564 0ustar rootrootimport os # Travis CI fails to start httpd.py for unknown reason if os.environ.get('TRAVIS', False): self.skip = 1 # vim: set ft=python : tarantool_1.9.1.26.g63eb81e3c/test/app-tap/pwd.test.lua0000775000000000000000000000120313306560010020773 0ustar rootroot#!/usr/bin/env tarantool local pwd = require("pwd") local test = require("tap").test("pwd") test:plan(6) local base_group = pwd.getgr() local base_user = pwd.getpw() test:is_deeply(pwd.getpw(base_user.id), base_user, "checking user by id") test:is_deeply(pwd.getpw(base_user.name), base_user, "checking user by name") test:is_deeply(pwd.getgr(base_group.id), base_group, "checking group by id") test:is_deeply(pwd.getgr(base_group.name), base_group, "checking group by name") test:ok(#pwd.getpwall() > 0, "check output of getpwall") test:ok(#pwd.getgrall() > 0, "check output of getgrall") os.exit(test:check() == true and 0 or 1) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/suite.ini0000664000000000000000000000021413306560010020350 0ustar rootroot[default] core = app description = application server tests (TAP) lua_libs = lua/require_mod.lua lua/serializer_test.lua is_parallel = True tarantool_1.9.1.26.g63eb81e3c/test/app-tap/http_client.test.lua0000775000000000000000000003532013306560010022525 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local client = require('http.client') local json = require('json') local test = tap.test("curl") local fiber = require('fiber') local socketlib = require('socket') local os = require('os') local TARANTOOL_SRC_DIR = os.getenv("TARANTOOL_SRC_DIR") or "../.." test:diag("TARANTOOL_SRC_DIR=%s", TARANTOOL_SRC_DIR) local function merge(...) local res = {} for i = 1, select('#', ...) do local t = select(i, ...) for k, v in pairs(t) do res[k] = v end end return res end local function start_server(test, sock_family, sock_addr) test:diag("starting HTTP server on %s...", sock_addr) local arg, url, opts if sock_family == 'AF_INET' then arg = string.format("--inet %s", sock_addr) url = string.format("http://%s/", sock_addr) opts = {} elseif sock_family == 'AF_UNIX' then arg = string.format("--unix %s", sock_addr) url = "http://localhost/" opts = {unix_socket = sock_addr} else error(string.format('invalid socket family: %s', sock_family)) end local cmd = string.format("%s/test/app-tap/httpd.py %s", TARANTOOL_SRC_DIR, arg) local server = io.popen(cmd) test:is(server:read("*l"), "heartbeat", "server started") test:diag("trying to connect to %s", url) local r for i=1,10 do r = client.get(url, merge(opts, {timeout = 0.01})) if r.status == 200 then break end fiber.sleep(0.01) end test:is(r.status, 200, "connection is ok") if r.status ~= 200 then os.exit(1) end return server, url, opts end local function stop_server(test, server) test:diag("stopping HTTP server") server:close() end local function test_http_client(test, url, opts) test:plan(9) test:isnil(rawget(_G, 'http'), "global namespace is not polluted"); test:isnil(rawget(_G, 'http.client'), "global namespace is not polluted"); local r = client.get(url, opts) test:is(r.status, 200, 'simple 200') test:is(r.proto[1], 1, 'proto major http 1.1') test:is(r.proto[2], 1, 'proto major http 1.1') test:ok(r.body:match("hello") ~= nil, "body") test:ok(tonumber(r.headers["content-length"]) > 0, "content-length > 0") test:is(client.get("http://localhost:0/").status, 595, 'bad url') local r = client.request('GET', url, nil, opts) test:is(r.status, 200, 'request') end local function test_cancel_and_timeout(test, url, opts) test:plan(2) local ch = fiber.channel(1) local http = client:new() local f = fiber.create(function() ch:put(http:get(url, opts)) end) f:cancel() local r = ch:get() test:ok(r.status == 408 and string.find(r.reason, "Timeout"), "After cancel fiber timeout is returned") local r = http:get(url, merge(opts, {timeout = 0.0001})) test:ok(r.status == 408 and string.find(r.reason, "Timeout"), "Timeout check") end local function test_post_and_get(test, url, opts) test:plan(19) local http = client.new() test:ok(http ~= nil, "client is created") local headers = { header1 = "1", header2 = "2" } local my_body = { key = "value" } local json_body = json.encode(my_body) local responses = {} local data = {a = 'b'} headers['Content-Type'] = 'application/json' local fibers = 7 local ch = fiber.channel(fibers) opts = merge(opts, {headers = headers}) fiber.create(function() responses.good_get = http:get(url, opts) ch:put(1) end) fiber.create(function() responses.get2 = http:get(url .. "abc", opts) ch:put(1) end) fiber.create(function() responses.good_post = http:post(url, json_body, opts) ch:put(1) end) fiber.create(function() responses.empty_post = http:post(url, nil, opts) ch:put(1) end) fiber.create(function() responses.good_put = http:put(url, json_body, opts) ch:put(1) end) fiber.create(function() responses.bad_get = http:get(url .. 'this/page/not/exists', opts) ch:put(1) end) fiber.create(function() responses.absent_get = http:get(url .. 'absent', opts) ch:put(1) end) for i=1,fibers do ch:get() end local r = responses.good_get test:is(r.status, 200, "GET: default http code page exists") test:is(r.body, "hello world", "GET: default right body") r = responses.get2 test:is(r.status, 200, "GET: http code page exists") test:is(r.body, "abc", "GET: right body") r = responses.absent_get test:is(r.status, 500, "GET: absent method http code page exists") test:is(r.body, "No such method", "GET: absent method right body") r = responses.empty_post test:is(r.status, 200, "POST: good status") test:ok(r.headers['header1'] == headers.header1 and r.headers['header2'] == headers.header2, "POST: good headers") test:isnil(r.body, "POST: empty body") r = responses.good_post test:is(r.status, 200, "POST: good status") test:ok(r.headers['header1'] == headers.header1 and r.headers['header2'] == headers.header2, "POST: good headers") test:is(r.body, json_body, "POST: body") r = responses.good_put test:is(r.status, 200, "PUT: good status") test:ok(r.headers['header'] == headers.header and r.headers['header2'] == headers.header2, "PUT: good headers") r = responses.bad_get test:is(r.status, 404, "GET: http page not exists") test:isnt(r.body:len(), 0, "GET: not empty body page not exists") test:ok(string.find(r.body, "Not Found"), "GET: right body page not exists") local st = http:stat() test:ok(st.sockets_added == st.sockets_deleted and st.active_requests == 0, "stats checking") end local function test_errors(test) test:plan(3) local http = client:new() local status, err = pcall(http.get, http, "htp://mail.ru") test:ok(not status and string.find(json.encode(err), "Unsupported protocol"), "GET: exception on bad protocol") status, err = pcall(http.post, http, "htp://mail.ru", "") test:ok(not status and string.find(json.encode(err), "Unsupported protocol"), "POST: exception on bad protocol") local r = http:get("http://do_not_exist_8ffad33e0cb01e6a01a03d00089e71e5b2b7e9930dfcba.ru") test:is(r.status, 595, "GET: response on bad url") end local function test_headers(test, url, opts) test:plan(15) local http = client:new() local r = http:get(url .. 'headers', opts) test:is(type(r.headers["set-cookie"]), 'string', "set-cookie check") test:ok(r.headers["set-cookie"]:match("likes=cheese"), "set-cookie check") test:ok(r.headers["set-cookie"]:match("age = 17"), "set-cookie check") test:is(r.headers["content-type"], "application/json", "content-type check") test:is(r.headers["my_header"], "value1,value2", "other header check") test:is(r.cookies["likes"][1], "cheese", "cookie value check") test:ok(r.cookies["likes"][2][1]:match("Expires"), "cookie option check") test:ok(r.cookies["likes"][2][3]:match("HttpOnly"), "cookie option check") test:is(r.cookies["age"][1], "17", "cookie value check") test:is(#r.cookies["age"][2], 1, "cookie option check") test:is(r.cookies["age"][2][1], "Secure", "cookie option check") test:ok(r.cookies["good_name"] ~= nil , "cookie name check") test:ok(r.cookies["bad@name"] == nil , "cookie name check") test:ok(r.cookies["badname"] == nil , "cookie name check") test:ok(r.cookies["badcookie"] == nil , "cookie name check") end local function test_special_methods(test, url, opts) test:plan(14) local http = client.new() local responses = {} local fibers = 7 local ch = fiber.channel(fibers) local _ fiber.create(function() responses.patch_data = http:patch(url, "{\"key\":\"val\"}", opts) ch:put(1) end) fiber.create(function() responses.delete_data = http:delete(url, opts) ch:put(1) end) fiber.create(function() responses.options_data = http:options(url, opts) ch:put(1) end) fiber.create(function() responses.head_data = http:head(url, opts) ch:put(1) end) fiber.create(function() responses.trace_data = http:trace(url, opts) ch:put(1) end) fiber.create(function() responses.connect_data = http:connect(url, opts) ch:put(1) end) fiber.create(function() responses.custom_data = http:request("CUSTOM", url, nil, opts) ch:put(1) end) for i = 1, fibers do ch:get() end test:is(responses.patch_data.status, 200, "HTTP:PATCH request") test:ok(json.decode(responses.patch_data.body).key == "val", "HTTP:PATCH request content") test:is(responses.delete_data.status, 200, "HTTP:DELETE request") test:ok(responses.delete_data.headers.method == "DELETE", "HTTP:DELETE request content") test:is(responses.options_data.status, 200, "HTTP:OPTIONS request") test:ok(responses.options_data.headers.method == "OPTIONS", "HTTP:OPTIONS request content") test:is(responses.head_data.status, 200, "HTTP:HEAD request code") test:ok(responses.head_data.headers.method == "HEAD", "HTTP:HEAD request content") test:is(responses.connect_data.status, 200, "HTTP:CONNECT request") test:ok(responses.connect_data.headers.method == "CONNECT", "HTTP:OPTIONS request content") test:is(responses.trace_data.status, 200, "HTTP:TRACE request") test:ok(responses.trace_data.headers.method == "TRACE", "HTTP:TRACE request content") test:is(responses.custom_data.status, 400, "HTTP:CUSTOM request") test:ok(responses.custom_data.headers.method == "CUSTOM", "HTTP:CUSTOM request content") end local function test_concurrent(test, url, opts) test:plan(3) local http = client.new() local headers = { my_header = "1", my_header2 = "2" } local my_body = { key = "value" } local json_body = json.encode(my_body) local num_test = 10 local num_load = 10 local curls = { } local headers = { } -- Init [[ for i = 1, num_test do headers["My-header" .. i] = "my-value" end for i = 1, num_test do table.insert(curls, { url = url, http = client.new(), body = json.encode({stat = {"ok"}, info = {"ok"} }), headers = headers, connect_timeout = 5, timeout = 5 }) end -- ]] local ch = fiber.channel(num_test * 2 * num_load) -- Start test -- Creating concurrent clients for i=1,num_test do local obj = curls[i] for j=1,num_load do fiber.create(function() local r = obj.http:post(obj.url, obj.body, merge(opts, { headers = obj.headers, keepalive_idle = 30, keepalive_interval = 60, connect_timeout = obj.connect_timeout, timeout = obj.timeout, })) ch:put(r.status) end) fiber.create(function() local r = obj.http:get(obj.url, merge(opts, { headers = obj.headers, keepalive_idle = 30, keepalive_interval = 60, connect_timeout = obj.connect_timeout, timeout = obj.timeout, })) ch:put(r.status) end) end end local ok_sockets_added = true local ok_active = true local ok_timeout = true local ok_req = true -- Join test local rest = num_test while true do local ticks = 0 for i = 1, num_load do local obj = curls[i] -- checking that stats in concurrent are ok if obj.http ~= nil and obj.http:stat().active_requests == 0 then local st = obj.http:stat() if st.sockets_added ~= st.sockets_deleted then ok_sockets_added = false rest = 0 end if st.active_requests ~= 0 then ok_active = false rest = 0 end -- waiting requests to finish before kill the client local r = ch:get() if r ~= 200 then ok_req = false end r = ch:get() if r ~= 200 then print(r) end end curls[i].http = nil end rest = rest - 1 if rest <= 0 then break end end test:is(ok_req, true, "All requests are ok") test:ok(ok_sockets_added, "free sockets") test:ok(ok_active, "no active requests") end function run_tests(test, sock_family, sock_addr) test:plan(9) local server, url, opts = start_server(test, sock_family, sock_addr) test:test("http.client", test_http_client, url, opts) test:test("cancel and timeout", test_cancel_and_timeout, url, opts) test:test("basic http post/get", test_post_and_get, url, opts) test:test("errors", test_errors) test:test("headers", test_headers, url, opts) test:test("special methods", test_special_methods, url, opts) if sock_family == 'AF_UNIX' and jit.os ~= "Linux" then -- -- BSD-based operating systems (including OS X) will fail -- connect() to a Unix domain socket with ECONNREFUSED -- if the queue of pending connections is full. Hence the -- "concurrent" test, which opens a lot of connections -- simultaneously, cannot run on those platforms. Linux, -- however, is fine - instead of returning ECONNEREFUSED -- it will suspend connect() until backlog is processed. -- test:skip("concurrent") else test:test("concurrent", test_concurrent, url, opts) end stop_server(test, server) end test:plan(2) test:test("http over AF_INET", function(test) local s = socketlib('AF_INET', 'SOCK_STREAM', 0) s:bind('127.0.0.1', 0) local host = s:name().host local port = s:name().port s:close() run_tests(test, 'AF_INET', string.format("%s:%d", host, port)) end) test:test("http over AF_UNIX", function(test) local path = os.tmpname() os.remove(path) local status = pcall(client.get, 'http://localhost/', {unix_socket = path}) if not status then -- Unix domain sockets are not supported, skip the test. return end run_tests(test, 'AF_UNIX', path) os.remove(path) end) os.exit(test:check() == true and 0 or -1) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/init_script.result0000664000000000000000000000232013306565107022321 0ustar rootroot-- -- Access to box.cfg from init script -- box.cfg 1 background:false 2 checkpoint_count:2 3 checkpoint_interval:3600 4 coredump:false 5 force_recovery:false 6 hot_standby:false 7 listen:port 8 log:tarantool.log 9 log_format:plain 10 log_level:5 11 log_nonblock:true 12 memtx_dir:. 13 memtx_max_tuple_size:1048576 14 memtx_memory:107374182 15 memtx_min_tuple_size:16 16 pid_file:box.pid 17 read_only:false 18 readahead:16320 19 replication_connect_timeout:30 20 replication_sync_lag:10 21 replication_timeout:1 22 rows_per_wal:500000 23 slab_alloc_factor:1.05 24 too_long_threshold:0.5 25 vinyl_bloom_fpr:0.05 26 vinyl_cache:134217728 27 vinyl_dir:. 28 vinyl_max_tuple_size:1048576 29 vinyl_memory:134217728 30 vinyl_page_size:8192 31 vinyl_range_size:1073741824 32 vinyl_read_threads:1 33 vinyl_run_count_per_level:2 34 vinyl_run_size_ratio:3.5 35 vinyl_timeout:60 36 vinyl_write_threads:2 37 wal_dir:. 38 wal_dir_rescan_delay:2 39 wal_max_size:268435456 40 wal_mode:write 41 worker_pool_threads:4 -- -- Test insert from detached fiber -- --- - [1, 2, 4, 8] ... -- -- Test insert from init script -- [1, 2, 4, 8] [2, 4, 8, 16] [4, 8, 16] -- -- Check that require function(math.floor) reachable in the init script -- 0 0 1 25 tarantool_1.9.1.26.g63eb81e3c/test/app-tap/logger_pipe.test.lua0000775000000000000000000000016613306560010022504 0ustar rootroot#!/usr/bin/env tarantool os.setenv('TEST_VAR', '48') box.cfg { log = '|echo $TEST_VAR; cat > /dev/null' } os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/inspector.result0000664000000000000000000000005113306560010021763 0ustar rootrootcreate instance true start instance true tarantool_1.9.1.26.g63eb81e3c/test/app-tap/tap.result0000664000000000000000000000502713306560010020551 0ustar rootrootTAP version 13 1..32 ok - true ok - extra information is not printed on success not ok - extra printed using yaml only on failure --- state: some userful information to debug on failure details: a table argument formatted using yaml.encode() ... not ok - failed ok - test marked as ok and skipped # skip ok - tonumber(48) is 48 ok - 0xff is not 64 not ok - 1 is not 1 --- unexpected: 1 got: 1 ... ok - nil is nil not ok - 48 is nil --- expected: nil got: 48 ... ok - 10 is a number ok - 0 is also a number ok - "blabla" is string not ok - 48 is string --- expected: string got: number ... not ok - nil is string --- expected: string got: nil ... ok - true is boolean not ok - 1 is boolean --- expected: boolean got: number ... ok - {} is a table not ok - udata --- expected: userdata got: nil ... not ok - udata --- expected: userdata got: userdata ... ok - udata not ok - cdata type --- expected: ctype got: string ... not ok - cdata type --- expected: ctype got: number ... ok - cdata type not ok - cdata type --- expected: ctype got: ctype ... # subtest 1 1..2 ok - true ok - true # subtest 1: end ok - subtest 1 1..1 ok - true in subtest # hello from subtest ok - subtest 2 # 1 level 1..1 # 2 level 1..1 # 3 level 1..1 # 4 level 1..1 # 5 level 1..1 ok - ok # 5 level: end ok - 5 level # 4 level: end ok - 4 level # 3 level: end ok - 3 level # 2 level: end ok - 2 level # 1 level: end ok - 1 level # bad plan 1..3 ok - true # bad plan: end not ok - bad plan --- planned: 3 run: 1 ... # failed subtest 1..1 not ok - failed subtest # failed subtest: end not ok - failed subtests --- planned: 1 failed: 1 ... # is_deeply 1..6 ok - 1 and 1 ok - abc and abc ok - empty tables ok - {1} and {1} not ok - {1} and {2} --- path: //1 expected: 2 got: 1 ... not ok - {1,2,{3,4}} and {1,2,{3,5}} --- path: //3/2 expected: 5 got: 4 ... # is_deeply: end not ok - failed subtests --- planned: 6 failed: 2 ... # like 1..2 ok - like(abcde, cd) ok - unlike(abcde, acd) # like: end ok - like # failed subtest: 15 tarantool_1.9.1.26.g63eb81e3c/test/app-tap/string.test.lua0000775000000000000000000001314313306565107021531 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local test = tap.test("string extensions") test:plan(5) test:test("split", function(test) test:plan(10) -- testing basic split (works over gsplit) test:ok(not pcall(string.split, "", ""), "empty separator") test:ok(not pcall(string.split, "a", ""), "empty separator") test:is_deeply((""):split("z"), {""}, "empty split") test:is_deeply(("a"):split("a"), {"", ""}, "split self") test:is_deeply( (" 1 2 3 "):split(), {"1", "2", "3"}, "complex split on empty separator" ) test:is_deeply( (" 1 2 3 "):split(" "), {"", "1", "2", "", "3", "", ""}, "complex split on space separator" ) test:is_deeply( (" 1 2 \n\n\n\r\t\n3 "):split(), {"1", "2", "3"}, "complex split on empty separator" ) test:is_deeply( ("a*bb*c*ddd"):split("*"), {"a", "bb", "c", "ddd"}, "another * separator" ) test:is_deeply( ("dog:fred:bonzo:alice"):split(":", 2), {"dog", "fred", "bonzo:alice"}, "testing max separator" ) test:is_deeply( ("///"):split("/"), {"", "", "", ""}, "testing splitting on one char" ) end) -- gh-2214 - string.ljust()/string.rjust() Lua API test:test("ljust/rjust/center", function(test) test:plan(18) test:is(("help"):ljust(0), "help", "ljust, length 0, do nothing") test:is(("help"):rjust(0), "help", "rjust, length 0, do nothing") test:is(("help"):center(0), "help", "center, length 0, do nothing") test:is(("help"):ljust(3), "help", "ljust, length 3, do nothing") test:is(("help"):rjust(3), "help", "rjust, length 3, do nothing") test:is(("help"):center(3), "help", "center, length 3, do nothing") test:is(("help"):ljust(5), "help ", "ljust, length 5, one extra charachter") test:is(("help"):rjust(5), " help", "rjust, length 5, one extra charachter") test:is(("help"):center(5), "help ", "center, length 5, one extra charachter") test:is(("help"):ljust(6), "help ", "ljust, length 6, two extra charachters") test:is(("help"):rjust(6), " help", "rjust, length 6, two extra charachters") test:is(("help"):center(6), " help ", "center, length 6, two extra charachters") test:is(("help"):ljust(6, '.'), "help..", "ljust, length 6, two extra charachters, custom fill char") test:is(("help"):rjust(6, '.'), "..help", "rjust, length 6, two extra charachters, custom fill char") test:is(("help"):center(6, '.'), ".help.", "center, length 6, two extra charachters, custom fill char") local errmsg = "%(char expected, got string%)" local _, err = pcall(function() ("help"):ljust(6, "XX") end) test:ok(err and err:match(errmsg), "wrong params") _, err = pcall(function() ("help"):rjust(6, "XX") end) test:ok(err and err:match(errmsg), "wrong params") _, err = pcall(function() ("help"):center(6, "XX") end) test:ok(err and err:match(errmsg), "wrong params") end) -- gh-2215 - string.startswith()/string.endswith() Lua API test:test("startswith/endswith", function(test) test:plan(21) test:ok((""):startswith(""), "empty+empty startswith") test:ok((""):endswith(""), "empty+empty endswith") test:ok(not (""):startswith("a"), "empty+non-empty startswith") test:ok(not (""):endswith("a"), "empty+non-empty endswith") test:ok(("a"):startswith(""), "non-empty+empty startswith") test:ok(("a"):endswith(""), "non-empty+empty endswith") test:ok(("12345"):startswith("123") , "simple startswith") test:ok(("12345"):startswith("123", 1, 5) , "startswith with good begin/end") test:ok(("12345"):startswith("123", 1, 3) , "startswith with good begin/end") test:ok(("12345"):startswith("123", -5, 3) , "startswith with good negative begin/end") test:ok(("12345"):startswith("123", -5, -3) , "startswith with good negative begin/end") test:ok(not ("12345"):startswith("123", 2, 5) , "bad startswith with good begin/end") test:ok(not ("12345"):startswith("123", 1, 2) , "bad startswith with good begin/end") test:ok(("12345"):endswith("345") , "simple endswith") test:ok(("12345"):endswith("345", 1, 5) , "endswith with good begin/end") test:ok(("12345"):endswith("345", 3, 5) , "endswith with good begin/end") test:ok(("12345"):endswith("345", -3, 5) , "endswith with good begin/end") test:ok(("12345"):endswith("345", -3, -1) , "endswith with good begin/end") test:ok(not ("12345"):endswith("345", 1, 4) , "bad endswith with good begin/end") test:ok(not ("12345"):endswith("345", 4, 5) , "bad endswith with good begin/end") local _, err = pcall(function() ("help"):startswith({'n', 1}) end) test:ok(err and err:match("%(string expected, got table%)"), "wrong params") end) test:test("hex", function(test) test:plan(2) test:is(string.hex("hello"), "68656c6c6f", "hex non-empty string") test:is(string.hex(""), "", "hex empty string") end) test:test("strip", function(test) test:plan(6) local str = " hello hello " test:is(string.len(string.strip(str)), 11, "strip") test:is(string.len(string.lstrip(str)), 12, "lstrip") test:is(string.len(string.rstrip(str)), 13, "rstrip") local _, err = pcall(string.strip, 12) test:ok(err and err:match("%(string expected, got number%)")) _, err = pcall(string.lstrip, 12) test:ok(err and err:match("%(string expected, got number%)")) _, err = pcall(string.rstrip, 12) test:ok(err and err:match("%(string expected, got number%)")) end ) os.exit(test:check() == true and 0 or -1) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/clock.test.lua0000775000000000000000000000110513306560010021275 0ustar rootroot#!/usr/bin/env tarantool clock = require("clock") test = require("tap").test("csv") test:plan(10) test:ok(clock.realtime() > 0, "realtime") test:ok(clock.thread() > 0, "thread") test:ok(clock.monotonic() > 0, "monotonic") test:ok(clock.proc() > 0, "proc") test:ok(clock.realtime64() > 0, "realtime64") test:ok(clock.thread64() > 0, "thread64") test:ok(clock.monotonic64() > 0, "monotonic64") test:ok(clock.proc64() > 0, "proc64") test:ok(clock.monotonic() <= clock.monotonic(), "time is monotonic") test:ok(math.abs(clock.realtime() - os.time()) < 2, "clock.realtime ~ os.time") tarantool_1.9.1.26.g63eb81e3c/test/app-tap/tarantoolctl.test.lua0000775000000000000000000003527513306565107022743 0ustar rootroot#!/usr/bin/env tarantool local ffi = require('ffi') local fio = require('fio') local tap = require('tap') local uuid = require('uuid') local yaml = require('yaml') local errno = require('errno') local fiber = require('fiber') local test_run = require('test_run').new() local function recursive_rmdir(path) path = fio.abspath(path) local path_content = fio.glob(fio.pathjoin(path, '*')) for _, val in ipairs(fio.glob(fio.pathjoin(path, '.*'))) do if fio.basename(val) ~= '.' and fio.basename(val) ~= '..' then table.insert(path_content, val) end end for _, file in ipairs(path_content) do local stat = fio.stat(file) if stat:is_dir() then recursive_rmdir(file) else if fio.unlink(file) == false then print(string.format('!!! failed to unlink file "%s"', file)) print(string.format('!!! [errno %s]: %s', errno(), errno.strerror())) end end end if fio.rmdir(path) == false then print(string.format('!!! failed to rmdir path "%s"', file)) print(string.format('!!! [errno %s]: %s', errno(), errno.strerror())) end end ffi.cdef[[ typedef int32_t pid_t; int kill(pid_t pid, int sig); /* For execution with background == false */ pid_t fork(void); int open(const char *pathname, int flags, int mode); int close(int fd); int dup2(int oldfd, int newfd); int execvp(const char *file, char *const argv[]); ]] -- background checks tctlcfg_code = [[default_cfg = { pid_file = '.', wal_dir = '.', memtx_dir = '.' , vinyl_dir = '.', log = '.', background = true, } instance_dir = require('fio').abspath('.')]] local function cleanup_instance(dir, name) local pid = io.open(fio.pathjoin(dir, name .. ".pid")) if pid ~= nil then pid = tonumber(pid:read("*a")) end if pid ~= nil then ffi.C.kill(pid, 9) end end local function create_script(dir, name, code) local path = fio.pathjoin(dir, name) local script = fio.open(path, {'O_CREAT', 'O_WRONLY'}, tonumber('0777', 8)) assert(script ~= nil, ("assertion: Failed to open '%s' for writing"):format(path)) script:write(code) script:close() return path end local function run_command(dir, command) local suffix = uuid.str():sub(1, 8) local fstdout = fio.pathjoin(dir, 'stdout-' .. suffix) local fstderr = fio.pathjoin(dir, 'stderr-' .. suffix) local line = [[/bin/sh -c 'cd "%s" && %s >"%s" 2>"%s"']] line = line:format(dir, command, fstdout, fstderr) local res = os.execute(line) local fstdout_e, fstderr_e = io.open(fstdout):read('*a'), io.open(fstderr):read('*a') fio.unlink(fstdout); fio.unlink(fstderr); return res/256, fstdout_e, fstderr_e end local function tctl_wait(dir, name) if name then local path = fio.pathjoin(dir, name .. '.control') while not fio.stat(path) do fiber.sleep(0.01) end ::again:: while true do local stat, nb = pcall(require('net.box').new, path, { wait_connected = true, console = true }) if stat == false then fiber.sleep(0.01) goto again else break end local stat, msg = pcall(nb.eval, nb, 'require("fiber").time()') if stat == false then fiber.sleep(0.01) else break end end end end local function tctl_command(dir, cmd, args, name) local pid = nil if not fio.stat(fio.pathjoin(dir, '.tarantoolctl')) then create_script(dir, '.tarantoolctl', tctlcfg_code) end local command = ('tarantoolctl %s %s'):format(cmd, args) return run_command(dir, command) end local function check_ok(test, dir, cmd, args, e_res, e_stdout, e_stderr) local res, stdout, stderr = tctl_command(dir, cmd, args) stdout, stderr = stdout or '', stderr or '' local ares = true if (e_res ~= nil) then local val = test:is(res, e_res, ("check '%s' command status for '%s'"):format(cmd,args)) ares = ares and val end if e_stdout ~= nil then local val = test:is(res, e_res, ("check '%s' stdout for '%s'"):format(cmd,args)) ares = ares and val if not val then print(("Expected to find '%s' in '%s'"):format(e_stdout, stdout)) end end if e_stderr ~= nil then local val = test:ok(stderr:find(e_stderr), ("check '%s' stderr for '%s'"):format(cmd,args)) ares = ares and val if not val then print(("Expected to find '%s' in '%s'"):format(e_stderr, stderr)) end end if not ares then print(res, stdout, stderr) end end local test = tap.test('tarantoolctl') test:plan(6) -- basic start/stop test -- must be stopped afterwards do local dir = fio.tempdir() local code = [[ box.cfg{memtx_memory = 104857600} ]] create_script(dir, 'script.lua', code) local status, err = pcall(function() test:test("basic test", function(test_i) test_i:plan(16) check_ok(test_i, dir, 'start', 'script', 0, nil, "Starting instance") tctl_wait(dir, 'script') check_ok(test_i, dir, 'status', 'script', 0, nil, "is running") check_ok(test_i, dir, 'start', 'script', 1, nil, "is already running") check_ok(test_i, dir, 'status', 'script', 0, nil, "is running") check_ok(test_i, dir, 'stop', 'script', 0, nil, "Stopping") check_ok(test_i, dir, 'status', 'script', 1, nil, "is stopped") check_ok(test_i, dir, 'stop', 'script', 0, nil, "is not running") check_ok(test_i, dir, 'status', 'script', 1, nil, "is stopped" ) end) end) cleanup_instance(dir, 'script') recursive_rmdir(dir) if status == false then print(("Error: %s"):format(err)) os.exit() end end -- check sandboxes do local dir = fio.tempdir() -- bad code local code = [[ box.cfg{ ]] create_script(dir, 'bad_script.lua', code) local code = [[ box.cfg{memtx_memory = 104857600} ]] create_script(dir, 'good_script.lua', code) local status, err = pcall(function() test:test("basic test for bad script", function(test_i) test_i:plan(8) check_ok(test_i, dir, 'start', 'script', 1, nil, 'Instance script is not found') check_ok(test_i, dir, 'start', 'bad_script', 1, nil, 'unexpected symbol near') check_ok(test_i, dir, 'start', 'good_script', 0) tctl_wait(dir, 'good_script') -- wait here check_ok(test_i, dir, 'eval', 'good_script bad_script.lua', 3, nil, 'Error while reloading config:') check_ok(test_i, dir, 'stop', 'good_script', 0) end) end) cleanup_instance(dir, 'good_script') recursive_rmdir(dir) if status == false then print(("Error: %s"):format(err)) os.exit() end end -- check answers in case of eval do local dir = fio.tempdir() -- bad code local code = [[ error('help'); return 1]] create_script(dir, 'bad_script.lua', code) local code = [[ return 1]] create_script(dir, 'ok_script.lua', code) local code = [[ box.cfg{memtx_memory = 104857600} box.once('help', function() end)]] create_script(dir, 'good_script.lua', code) local status, err = pcall(function() test:test("check answers in case of call", function(test_i) test_i:plan(6) check_ok(test_i, dir, 'start', 'good_script', 0) tctl_wait(dir, 'good_script') check_ok(test_i, dir, 'eval', 'good_script bad_script.lua', 3, nil, 'Error while reloading config') check_ok(test_i, dir, 'eval', 'good_script ok_script.lua', 0, '---\n- 1\n...', nil) check_ok(test_i, dir, 'stop', 'good_script', 0) end) end) cleanup_instance(dir, 'good_script') recursive_rmdir(dir) if status == false then print(("Error: %s"):format(err)) os.exit() end end -- check basic help do local dir = fio.tempdir() local function test_help(test, dir, cmd, e_stderr) local desc = dir and 'with config' or 'without config' dir = dir or './' local res, stdout, stderr = run_command(dir, cmd) if e_stderr ~= nil then if not test:ok(stderr:find(e_stderr), ("check stderr of '%s' %s"):format(cmd, desc)) then print(("Expected to find '%s' in '%s'"):format(e_stderr, stderr)) end end end create_script(dir, '.tarantoolctl', tctlcfg_code) local status, err = pcall(function() test:test("check basic help", function(test_i) test_i:plan(4) test_help(test_i, nil, "tarantoolctl", "Usage:") test_help(test_i, nil, "tarantoolctl help", "Usage:") test_help(test_i, nil, "tarantoolctl --help", "Usage:") test_help(test_i, dir, "tarantoolctl", "Usage:") end) end) recursive_rmdir(dir) if status == false then print(("Error: %s"):format(err)) os.exit() end end -- check cat do local dir = fio.tempdir() local filler_code = [[ box.cfg{memtx_memory = 104857600, background=false} local space = box.schema.create_space("test") space:create_index("primary") space:insert({[1] = 1, [2] = 2, [3] = 3, [4] = 4}) space:replace({[1] = 2, [2] = 2, [3] = 3, [4] = 4}) space:delete({[1] = 1}) space:update({[1] = 2}, {[1] = {[1] = '\x3d', [2] = 3, [3] = 4}}) space:upsert({[1] = 3, [2] = 4, [3] = 5, [4] = 6}, {[1] = {[1] = '\x3d', [2] = 3, [3] = 4}}) space:upsert({[1] = 3, [2] = 4, [3] = 5, [4] = 6}, {[1] = {[1] = '\x3d', [2] = 3, [3] = 4}}) os.exit(0) ]] create_script(dir, 'filler.lua', filler_code) local function check_ctlcat_xlog(test, dir, args, delim, lc) local command_base = 'tarantoolctl cat filler/00000000000000000000.xlog' local desc = args and "cat + " .. args or "cat" args = args and " " .. args or "" local res, stdout, stderr = run_command(dir, command_base .. args) test:is(res, 0, desc .. " result") test:is(select(2, stdout:gsub(delim, delim)), lc, desc .. " line count") end local function check_ctlcat_snap(test, dir, args, delim, lc) local command_base = 'tarantoolctl cat filler/00000000000000000000.snap' local desc = args and "cat + " .. args or "cat" args = args and " " .. args or "" local res, stdout, stderr = run_command(dir, command_base .. args) test:is(res, 0, desc .. " result") test:is(select(2, stdout:gsub(delim, delim)), lc, desc .. " line count") end local status, err = pcall(function() test:test("fill and test cat output", function(test_i) test_i:plan(29) check_ok(test_i, dir, 'start', 'filler', 0) check_ctlcat_xlog(test_i, dir, nil, "---\n", 6) check_ctlcat_xlog(test_i, dir, "--space=512", "---\n", 6) check_ctlcat_xlog(test_i, dir, "--space=666", "---\n", 0) check_ctlcat_xlog(test_i, dir, "--show-system", "---\n", 9) check_ctlcat_xlog(test_i, dir, "--format=json", "\n", 6) check_ctlcat_xlog(test_i, dir, "--format=lua", "\n", 6) check_ctlcat_xlog(test_i, dir, "--from=3 --to=6 --format=json", "\n", 2) check_ctlcat_xlog(test_i, dir, "--from=3 --to=6 --format=json --show-system", "\n", 3) check_ctlcat_xlog(test_i, dir, "--from=6 --to=3 --format=json --show-system", "\n", 0) check_ctlcat_xlog(test_i, dir, "--from=3 --to=6 --format=json --show-system --replica 1", "\n", 3) check_ctlcat_xlog(test_i, dir, "--from=3 --to=6 --format=json --show-system --replica 1 --replica 2", "\n", 3) check_ctlcat_xlog(test_i, dir, "--from=3 --to=6 --format=json --show-system --replica 2", "\n", 0) check_ctlcat_snap(test_i, dir, "--space=280", "---\n", 17) check_ctlcat_snap(test_i, dir, "--space=288", "---\n", 40) end) end) recursive_rmdir(dir) if status == false then print(("Error: %s"):format(err)) os.exit() end end -- check play do local dir = fio.tempdir() local filler_code = [[ box.cfg{memtx_memory = 104857600, background=false} local space = box.schema.create_space("test") space:create_index("primary") space:insert({[1] = 1, [2] = 2, [3] = 3, [4] = 4}) space:replace({[1] = 2, [2] = 2, [3] = 3, [4] = 4}) space:delete({[1] = 1}) space:update({[1] = 2}, {[1] = {[1] = '\x3d', [2] = 3, [3] = 4}}) space:upsert({[1] = 3, [2] = 4, [3] = 5, [4] = 6}, {[1] = {[1] = '\x3d', [2] = 3, [3] = 4}}) space:upsert({[1] = 3, [2] = 4, [3] = 5, [4] = 6}, {[1] = {[1] = '\x3d', [2] = 3, [3] = 4}}) os.exit(0) ]] create_script(dir, 'filler.lua', filler_code) local remote_code = [[ box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 104857600 } local space = box.schema.create_space("test") space:create_index("primary") box.schema.user.grant("guest", "read,write", "space", "test") require('console').listen(os.getenv("ADMIN")) ]] local remote_path = create_script(dir, 'remote.lua', remote_code) test_run:cmd(("create server remote with script='%s'"):format(remote_path)) test_run:cmd("start server remote") local port = tonumber( test_run:eval("remote", "return require('uri').parse(box.cfg.listen).service")[1] ) local command_base = ('tarantoolctl play localhost:%d filler/00000000000000000000.xlog'):format(port) local status, err = pcall(function() test:test("fill and test play output", function(test_i) test_i:plan(6) check_ok(test_i, dir, 'start', 'filler', 0) local lsn_before = test_run:get_lsn("remote", 1) test_i:is(lsn_before, 4, "check lsn before") local res, stdout, stderr = run_command(dir, command_base) test_i:is(res, 0, "execution result") test_i:is(test_run:get_lsn("remote", 1), 10, "check lsn after") local res, stdout, stderr = run_command(dir, command_base) test_i:is(res, 0, "execution result") test_i:is(test_run:get_lsn("remote", 1), 16, "check lsn after") end) end) test_run:cmd("stop server remote") test_run:cmd("cleanup server remote") recursive_rmdir(dir) if status == false then print(("Error: %s"):format(err)) os.exit() end end os.exit(test:check() == true and 0 or -1) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/minimal.test.lua0000775000000000000000000000322213306560010021632 0ustar rootroot#!/usr/bin/env tarantool print('Hello, World!') -- -- Command-line argument handling -- local script = io.open('script-args.lua', 'w') script:write([[ -- Tarantool binary print('arg[-1]', arg[-1]:match('tarantool')) -- Script name print('arg[0] ', arg[0]) -- Command-line arguments print('arg', arg[1], arg[2], arg[3]) print('...', ...) ]]) script:close() io.flush() os.execute("tarantool ./script-args.lua 1 2 3") -- -- LUA_PATH and LUA_CPATH argument handling -- local script = io.open('script-path.lua', 'w') script:write([[ print(package.path) os.exit(0) ]]) script:close() local script = io.open('script-cpath.lua', 'w') script:write([[ print(package.cpath) os.exit(0) ]]) script:close() io.flush() -- gh-1428: Ensure that LUA_PATH/LUA_CPATH have the same behaviour, as in -- LuaJIT/Lua local tap = require('tap').test('lua_path/lua_cpath') tap:plan(8) for _, env in ipairs({ {'LUA_PATH', 'script-path.lua', package.path}, {'LUA_CPATH', 'script-cpath.lua', package.cpath} }) do for _, res in ipairs({ {' is empty', '', ''}, {' isn\'t empty (without ";;")', 'bla-bla.lua', 'bla-bla.lua' }, {' isn\'t empty (without ";;")', 'bla-bla.lua;', 'bla-bla.lua;' }, {' isn\'t empty (with ";;")', 'bla-bla.lua;.*;;', 'bla-bla.lua;' .. env[3] }, }) do local cmd = table.concat({ ("%s='%s'"):format(env[1], res[2]), ('tarantool %s'):format(env[2]), }, ' ') local fh = io.popen(cmd) local rv = fh:read():gsub('-', '%%-'):gsub('+', '%%+'):gsub('?', '%%?') tap:like(res[3], rv, env[1] .. res[1]) fh:close() end end tap:check() tarantool_1.9.1.26.g63eb81e3c/test/app-tap/uri.test.lua0000775000000000000000000000456213306560010021013 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local uri = require('uri') local function test_parse(test) -- Tests for uri.parse() Lua bindings. -- Parser itself is tested by test/unit/uri unit test. test:plan(28) local u u = uri.parse("scheme://login:password@host:service".. "/path1/path2/path3?q1=v1&q2=v2&q3=v3:1|v3:2#fragment") test:is(u.scheme, "scheme", "scheme") test:is(u.login, "login", "login") test:is(u.password, "password", "password") test:is(u.host, "host", "host") test:is(u.service, "service", "service") test:is(u.path, "/path1/path2/path3", "path") test:is(u.query, "q1=v1&q2=v2&q3=v3:1|v3:2", "query") test:is(u.fragment, "fragment", "fragment") u = uri.parse("scheme://login:@host:service".. "/path1/path2/path3?q1=v1&q2=v2&q3=v3:1|v3:2#fragment") test:is(u.scheme, "scheme", "scheme") test:is(u.login, "login", "login") test:is(u.password, "", "password") test:is(u.host, "host", "host") test:is(u.service, "service", "service") test:is(u.path, "/path1/path2/path3", "path") test:is(u.query, "q1=v1&q2=v2&q3=v3:1|v3:2", "query") test:is(u.fragment, "fragment", "fragment") u = uri.parse('login@host') test:is(u.login, "login", "login") test:is(u.password, nil, "password") test:is(u.host, "host", "host") u = uri.parse('127.0.0.1') test:is(u.host, '127.0.0.1', 'ipv4') test:is(u.ipv4, '127.0.0.1', 'ipv4') u = uri.parse('[2a00:1148:b0ba:2016:12bf:48ff:fe78:fd10]') test:is(u.host, '2a00:1148:b0ba:2016:12bf:48ff:fe78:fd10', 'ipv6') test:is(u.ipv6, '2a00:1148:b0ba:2016:12bf:48ff:fe78:fd10', 'ipv6') u = uri.parse('/tmp/unix.sock') test:is(u.host, 'unix/', 'unix') test:is(u.service, '/tmp/unix.sock', 'unix') test:is(u.unix, '/tmp/unix.sock', 'unix') u = uri.parse("") test:isnil(u, "invalid uri", u) u = uri.parse("://") test:isnil(u, "invalid uri", u) end local function test_format(test) test:plan(3) local u = uri.parse("user:password@localhost") test:is(uri.format(u), "user@localhost", "password removed") test:is(uri.format(u, false), "user@localhost", "password removed") test:is(uri.format(u, true), "user:password@localhost", "password kept") end tap.test("uri", function(test) test:plan(2) test:test("parse", test_parse) test:test("format", test_format) end) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/info.test.lua0000775000000000000000000000106313306560010021140 0ustar rootroot#!/usr/bin/env tarantool local tarantool = require('tarantool') require('tap').test("info", function(test) test:plan(8) test:like(tarantool.version, '^[1-9]', "version") test:ok(_TARANTOOL == tarantool.version, "version") test:isstring(tarantool.build.target, "build.target") test:isstring(tarantool.build.compiler, "build.compiler") test:isstring(tarantool.build.flags, "build.flags") test:isstring(tarantool.build.options, "build.options") test:ok(tarantool.uptime() > 0, "uptime") test:ok(tarantool.pid() > 0, "pid") end) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/msgpack.test.lua0000775000000000000000000000422413306560010021634 0ustar rootroot#!/usr/bin/env tarantool package.path = "lua/?.lua;"..package.path local tap = require('tap') local common = require('serializer_test') local function is_map(s) local b = string.byte(string.sub(s, 1, 1)) return b >= 0x80 and b <= 0x8f or b == 0xde or b == 0xdf end local function is_array(s) local b = string.byte(string.sub(s, 1, 1)) return b >= 0x90 and b <= 0x9f or b == 0xdc or b == 0xdd end local function test_offsets(test, s) test:plan(6) local arr1 = {1, 2, 3} local arr2 = {4, 5, 6} local dump = s.encode(arr1)..s.encode(arr2) test:is(dump:len(), 8, "length of part1 + part2") local a local offset = 1 a, offset = s.decode(dump, offset) test:is_deeply(a, arr1, "decoded part1") test:is(offset, 5, "offset of part2") a, offset = s.decode(dump, offset) test:is_deeply(a, arr2, "decoded part2") test:is(offset, 9, "offset of end") test:ok(not pcall(s.decode, dump, offset), "invalid offset") end local function test_misc(test, s) test:plan(4) local ffi = require('ffi') local buffer = require('buffer') local buf = ffi.cast("const char *", "\x91\x01") local bufcopy = ffi.cast('const char *', buf) local bufend, result = s.ibuf_decode(buf) local st,e = pcall(s.ibuf_decode, buffer.ibuf().rpos) test:is(buf, bufcopy, "ibuf_decode argument is constant") test:is(buf + 2, bufend, 'ibuf_decode position') test:is_deeply(result, {1}, "ibuf_decode result") test:ok(not st and e:match("null"), "null ibuf") end tap.test("msgpack", function(test) local serializer = require('msgpack') test:plan(10) test:test("unsigned", common.test_unsigned, serializer) test:test("signed", common.test_signed, serializer) test:test("double", common.test_double, serializer) test:test("boolean", common.test_boolean, serializer) test:test("string", common.test_string, serializer) test:test("nil", common.test_nil, serializer) test:test("table", common.test_table, serializer, is_array, is_map) test:test("ucdata", common.test_ucdata, serializer) test:test("offsets", test_offsets, serializer) test:test("misc", test_misc, serializer) end) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/errno.test.lua0000775000000000000000000000151413306560010021333 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local errno = require('errno') local test = tap.test("errno") test:plan(1) test:test("primary", function(test) test:plan(10) test:is(type(errno), "table", "type of table") test:ok(errno.EINVAL ~= nil, "errno.EINVAL is available") test:ok(errno.EBADF ~= nil , "errno.EBADF is available" ) test:ok(errno(0) ~= nil, "errno set to 0") test:is(errno(errno.EBADF), 0, "setting errno.EBADF") test:is(errno(), errno.EBADF, "checking errno.EBADF") test:is(errno(errno.EINVAL), errno.EBADF, "setting errno.EINVAL") test:is(errno(), errno.EINVAL, "checking errno.EINVAL") test:is(errno.strerror(), "Invalid argument", "checking strerror without argument") test:is(errno.strerror(errno.EBADF), "Bad file descriptor", "checking strerror with argument") end) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/httpd.py0000775000000000000000000000672513306560010020233 0ustar rootroot#!/usr/bin/env python2 import sys from gevent.pywsgi import WSGIServer from gevent import spawn, sleep, socket def absent(): code = "500 Server Error" headers = [('Content-Type', 'application/json')] body = ["No such method"] return code, body, headers def hello(): code = "200 OK" body = ["hello world"] headers = [('Content-Type', 'application/json')] return code, body, headers def hello1(): code = "200 OK" body = [b"abc"] headers = [('Content-Type', 'application/json')] return code, body, headers def headers(): code = "200 OK" body = [b"cookies"] headers = [('Content-Type', 'application/json'), ('Content-Type', 'application/yaml'), ('Set-Cookie', 'likes=cheese; Expires=Wed, 21 Oct 2015 07:28:00 GMT; Secure; HttpOnly'), ('Set-Cookie', 'bad@name=no;'), ('Set-Cookie', 'badcookie'), ('Set-Cookie', 'good_name=yes;'), ('Set-Cookie', 'age = 17; NOSuchOption; EmptyOption=Value;Secure'), ('my_header', 'value1'), ('my_header', 'value2'), ] return code, body, headers paths = { "/": hello, "/abc": hello1, "/absent": absent, "/headers": headers, } def read_handle(env, response): code = "404 Not Found" headers = [] body = ['Not Found'] if env["PATH_INFO"] in paths: code, body, headers = paths[env["PATH_INFO"]]() for key,value in env.iteritems(): if "HTTP_" in key: headers.append((key[5:].lower(), value)) response(code, headers) return body def post_handle(env, response): code = "200 OK" body = [env['wsgi.input'].read()] headers = [] for key,value in env.iteritems(): if "HTTP_" in key: headers.append((key[5:].lower(), value)) response(code, headers) return body def other_handle(env, response, method, code): headers = [('Content-Type', 'text/plain'), ("method", method)] body = [method] for key,value in env.iteritems(): if "HTTP_" in key: headers.append((key[5:].lower(), value)) response(code, headers) return body OTHER_METHODS = { "TRACE": True, "CONNECT": True, "OPTIONS": True, "DELETE": True , "HEAD": True } def handle(env, response) : method = env["REQUEST_METHOD"].upper() if method == "GET": return read_handle(env, response) elif method == "PUT" or method == "POST" or method == "PATCH": return post_handle(env, response) elif method in OTHER_METHODS: return other_handle(env, response, method, "200 Ok") return other_handle(env, response, method, "400 Bad Request") def heartbeat(): try: while True: sys.stdout.write("heartbeat\n") sys.stdout.flush() sleep(1e-1) except IOError: sys.exit(1) def usage(): sys.stderr.write("Usage: %s { --inet HOST:PORT | --unix PATH }\n" % sys.argv[0]) sys.exit(1) if len(sys.argv) != 3: usage() if sys.argv[1] == "--inet": host, port = sys.argv[2].split(':') sock_family = socket.AF_INET sock_addr = (host, int(port)) elif sys.argv[1] == "--unix": path = sys.argv[2] sock_family = socket.AF_UNIX sock_addr = path else: usage() sock = socket.socket(sock_family, socket.SOCK_STREAM) sock.bind(sock_addr) sock.listen(10) server = WSGIServer(sock, handle, log=None) spawn(heartbeat) server.serve_forever() tarantool_1.9.1.26.g63eb81e3c/test/app-tap/module_api.test.lua0000775000000000000000000000327613306560010022333 0ustar rootroot#!/usr/bin/env tarantool box.cfg{log = "tarantool.log"} build_path = os.getenv("BUILDDIR") package.cpath = build_path .. '/test/app-tap/?.so;' .. build_path .. '/test/app-tap/?.dylib;' local function test_pushcdata(test, module) test:plan(6) local ffi = require('ffi') ffi.cdef('struct module_api_test { int a; };') local gc_counter = 0; local ct = ffi.typeof('struct module_api_test') ffi.metatype(ct, { __tostring = function(obj) return 'ok' end; __gc = function(obj) gc_counter = gc_counter + 1; end }) local ctid = tonumber(ct) local obj, ptr = module.pushcdata(ctid) test:is(ffi.typeof(obj), ct, 'pushcdata typeof') test:is(tostring(obj), 'ok', 'pushcdata metatable') local ctid2, ptr2 = module.checkcdata(obj) test:is(ctid, ctid2, 'checkcdata type') test:is(ptr, ptr2, 'checkcdata value') test:is(gc_counter, 0, 'pushcdata gc') obj = nil collectgarbage('collect') test:is(gc_counter, 1, 'pushcdata gc') end local test = require('tap').test("module_api", function(test) test:plan(22) local status, module = pcall(require, 'module_api') test:is(status, true, "module") test:ok(status, "module is loaded") if not status then return end local space = box.schema.space.create("test") space:create_index('primary') for name, fun in pairs(module) do if string.sub(name,1, 5) == 'test_' then test:ok(fun(), name .. " is ok") end end local status, msg = pcall(module.check_error) test:like(msg, 'luaT_error', 'luaT_error') test:test("pushcdata", test_pushcdata, module) space:drop() end) os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/CMakeLists.txt0000664000000000000000000000004613306560010021261 0ustar rootrootbuild_module(module_api module_api.c) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/yaml.test.lua0000775000000000000000000000665113306565107021173 0ustar rootroot#!/usr/bin/env tarantool package.path = "lua/?.lua;"..package.path local tap = require('tap') local common = require('serializer_test') local function is_map(s) return s:match("---[\n ]%w+%:") or s:match("---[\n ]{%w+%:") end local function is_array(s) return s:match("---[\n ]%[") or s:match("---[\n ]- "); end local function test_compact(test, s) test:plan(9) local ss = s.new() ss.cfg{encode_load_metatables = true, decode_save_metatables = true} test:is(ss.encode({10, 15, 20}), "---\n- 10\n- 15\n- 20\n...\n", "block array") test:is(ss.encode(setmetatable({10, 15, 20}, { __serialize="array"})), "---\n- 10\n- 15\n- 20\n...\n", "block array") test:is(ss.encode(setmetatable({10, 15, 20}, { __serialize="sequence"})), "---\n- 10\n- 15\n- 20\n...\n", "block array") test:is(ss.encode({setmetatable({10, 15, 20}, { __serialize="seq"})}), "---\n- [10, 15, 20]\n...\n", "flow array") test:is(getmetatable(ss.decode(ss.encode({10, 15, 20}))).__serialize, "seq", "decoded __serialize is seq") test:is(ss.encode({k = 'v'}), "---\nk: v\n...\n", "block map") test:is(ss.encode(setmetatable({k = 'v'}, { __serialize="mapping"})), "---\nk: v\n...\n", "block map") test:is(ss.encode({setmetatable({k = 'v'}, { __serialize="map"})}), "---\n- {'k': 'v'}\n...\n", "flow map") test:is(getmetatable(ss.decode(ss.encode({k = 'v'}))).__serialize, "map", "decoded __serialize is map") ss = nil end local function test_output(test, s) test:plan(12) test:is(s.encode({true}), '---\n- true\n...\n', "encode for true") test:is(s.decode("---\nyes\n..."), true, "decode for 'yes'") test:is(s.encode({false}), '---\n- false\n...\n', "encode for false") test:is(s.decode("---\nno\n..."), false, "decode for 'no'") test:is(s.encode({s.NULL}), '---\n- null\n...\n', "encode for nil") test:is(s.decode("---\n~\n..."), s.NULL, "decode for ~") test:is(s.encode("\x80\x92\xe8s\x16"), '--- !!binary gJLocxY=\n...\n', "encode for binary") test:is(s.encode("\x08\x5c\xc2\x80\x12\x2f"), '--- !!binary CFzCgBIv\n...\n', "encode for binary (2) - gh-354") test:is(s.encode("\xe0\x82\x85\x00"), '--- !!binary 4IKFAA==\n...\n', "encode for binary (3) - gh-1302") -- gh-883: console can hang tarantool process local t = {} for i=0x8000,0xffff,1 do table.insert(t, require('pickle').pack( 'i', i )); end local _, count = string.gsub(s.encode(t), "!!binary", "") test:is(count, 30880, "encode for binary (4) - gh-883") test:is(s.encode("фЫр!"), '--- фЫр!\n...\n', "encode for utf-8") test:is(s.encode("Tutorial -- Header\n====\n\nText"), "--- |-\n Tutorial -- Header\n ====\n\n Text\n...\n", "tutorial string"); end tap.test("yaml", function(test) local serializer = require('yaml') test:plan(10) test:test("unsigned", common.test_unsigned, serializer) test:test("signed", common.test_signed, serializer) test:test("double", common.test_double, serializer) test:test("boolean", common.test_boolean, serializer) test:test("string", common.test_string, serializer) test:test("nil", common.test_nil, serializer) test:test("table", common.test_table, serializer, is_array, is_map) test:test("ucdata", common.test_ucdata, serializer) test:test("compact", test_compact, serializer) test:test("output", test_output, serializer) end) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/console.test.lua0000775000000000000000000002054613306565107021672 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local console = require('console') local socket = require('socket') local yaml = require('yaml') local fiber = require('fiber') local ffi = require('ffi') local log = require('log') local fio = require('fio') -- Suppress console log messages log.level(4) local CONSOLE_SOCKET = fio.pathjoin(fio.cwd(), 'tarantool-test-console.sock') local IPROTO_SOCKET = fio.pathjoin(fio.cwd(), 'tarantool-test-iproto.sock') os.remove(CONSOLE_SOCKET) os.remove(IPROTO_SOCKET) -- local EOL = "\n...\n" test = tap.test("console") test:plan(59) -- Start console and connect to it local server = console.listen(CONSOLE_SOCKET) test:ok(server ~= nil, "console.listen started") local client = socket.tcp_connect("unix/", CONSOLE_SOCKET) local handshake = client:read{chunk = 128} test:ok(string.match(handshake, '^Tarantool .*console') ~= nil, 'Handshake') test:ok(client ~= nil, "connect to console") -- Execute some command client:write("1\n") test:is(yaml.decode(client:read(EOL))[1], 1, "eval") -- doesn't crash and doesn't hang client:write("_G\n") test:is(#client:read(EOL) > 0, true, "_G") -- Check internal state of `console` module client:write("require('fiber').id()\n") local fid1 = yaml.decode(client:read(EOL))[1] local state = fiber.find(fid1).storage.console local server_info = state.client:peer() local client_info = state.client:name() test:is(client_info.host, client_info.host, "state.socker:peer().host") test:is(client_info.port, client_info.port, "state.socker:peer().port") server_info = nil client_info = nil -- Check console.delimiter() client:write("require('console').delimiter(';')\n") test:is(yaml.decode(client:read(EOL)), '', "set delimiter to ';'") test:is(state.delimiter, ';', "state.delimiter is ';'") client:write("require('console').delimiter();\n") test:is(yaml.decode(client:read(EOL))[1], ';', "get delimiter is ';'") client:write("require('console').delimiter('');\n") test:is(yaml.decode(client:read(EOL)), '', "clear delimiter") box.cfg{ listen=IPROTO_SOCKET; memtx_memory = 107374182, log="tarantool.log", } -- Connect to iproto console (CALL) client:write(string.format("require('console').connect('/')\n")) -- error: Connection is not established test:ok(yaml.decode(client:read(EOL))[1].error:find('not established'), 'remote network error') client:write(string.format("require('console').connect('%s')\n", IPROTO_SOCKET)) -- error: Execute access is denied for user 'guest' to function 'dostring test:ok(yaml.decode(client:read(EOL))[1].error:find('denied'), 'remote access denied') -- create user box.schema.user.create('test', { password = 'pass' }) client:write(string.format("require('console').connect('test:pass@%s')\n", IPROTO_SOCKET)) -- error: Execute access denied for user 'test' to function 'dostring test:ok(yaml.decode(client:read(EOL))[1].error:find('denied'), 'remote access denied') -- Add permissions to execute for `test` box.schema.user.grant('test', 'execute', 'universe') client:write(string.format("require('console').connect('test:pass@%s')\n", IPROTO_SOCKET)) test:ok(yaml.decode(client:read(EOL)), "remote connect") -- Log in with an empty password box.schema.user.create('test2', { password = '' }) box.schema.user.grant('test2', 'execute', 'universe') client:write(string.format("require('console').connect('test2@%s')\n", IPROTO_SOCKET)) test:ok(yaml.decode(client:read(EOL)), "remote connect") client:write(string.format("require('console').connect('test2:@%s')\n", IPROTO_SOCKET)) test:ok(yaml.decode(client:read(EOL)), "remote connect") -- Execute some command client:write("require('fiber').id()\n") local fid2 = yaml.decode(client:read(EOL))[1] test:isnt(fid1, fid2, "remote eval") test:is(state.remote.host, "unix/", "remote state.remote.host") test:is(state.remote.port, IPROTO_SOCKET, "remote state.remote.port") test:is(state.prompt, string.format("%s:%s", "unix/", IPROTO_SOCKET), "remote state.prompt") -- Check exception handling (gh-643) client:write("error('test')\n") test:ok(yaml.decode(client:read(EOL))[1].error:match('test') ~= nil, "exception handling") client:write("setmetatable({}, { __serialize = function() error('test') end})\n") test:ok(yaml.decode(client:read(EOL))[1].error:match('test') ~= nil, "exception handling") -- Disconnect from iproto client:write("~.\n") -- Check that iproto console has been disconnected client:write("require('fiber').id()\n") local fid1x = yaml.decode(client:read(EOL))[1] test:is(fid1, fid1x, "remote disconnect") -- Connect to admin port client:write(string.format("require('console').connect('%s')\n", CONSOLE_SOCKET)) test:ok(yaml.decode(client:read(EOL))[1], 'admin connect') client:write("2 + 2\n") test:ok(yaml.decode(client:read(EOL))[1] == 4, "admin eval") -- gh-1177: Error message for display of a net.box result client:write("require('net.box').connect('unix/', '"..IPROTO_SOCKET.."')\n") test:isnil(yaml.decode(client:read(EOL))[1].error, "gh-1177 __serialize") -- there is no way to disconnect here -- Disconect from console client:shutdown() client:write('') client:close() -- Stop console server:shutdown() server:close() fiber.sleep(0) -- workaround for gh-712: console.test.lua fails in Fedora -- Check that admin console has been stopped test:isnil(socket.tcp_connect("unix/", CONSOLE_SOCKET), "console.listen stopped") -- Stop iproto box.cfg{listen = ''} os.remove(IPROTO_SOCKET) local s = console.listen('127.0.0.1:0') addr = s:name() test:is(addr.family, 'AF_INET', 'console.listen uri support') test:is(addr.host, '127.0.0.1', 'console.listen uri support') test:isnt(addr.port, 0, 'console.listen uri support') s:close() local s = console.listen('console://unix/:'..CONSOLE_SOCKET) addr = s:name() test:is(addr.family, 'AF_UNIX', 'console.listen uri support') test:is(addr.host, 'unix/', 'console.listen uri support') test:is(addr.port, CONSOLE_SOCKET, 'console.listen uri support') s:close() -- -- gh-1938: on_connect/on_disconnect/on_auth triggers -- local session_id = box.session.id() local triggers_ran = 0 local function console_on_connect() test:is(box.session.user(), "admin", "on_connect session.user()") test:like(box.session.peer(), "unix", "on_connect session.peer()") test:isnt(box.session.id(), session_id, "on_connect session.id()") triggers_ran = triggers_ran + 1 end local function console_on_disconnect() test:is(box.session.user(), "admin", "on_disconnect session.user()") test:isnt(box.session.id(), session_id, "on_disconnect session.id()") triggers_ran = triggers_ran + 1 end local function console_on_auth(username, success) test:is(box.session.user(), "admin", "on_auth session.user()") test:like(box.session.peer(), "unix", "on_auth session.peer()") test:isnt(box.session.id(), session_id, "on_auth session.id()") test:is(username, "admin", "on_auth argument") test:is(success, true, "on_auth argument 2") triggers_ran = triggers_ran + 1 end box.session.on_connect(console_on_connect) box.session.on_disconnect(console_on_disconnect) box.session.on_auth(console_on_auth) -- check on_connect/on_disconnect/on_auth triggers local server = console.listen('console://unix/:'..CONSOLE_SOCKET) client = socket.tcp_connect("unix/", CONSOLE_SOCKET) _ = client:read(128) client:write("1\n") test:is(yaml.decode(client:read(EOL))[1], 1, "eval with triggers") client:shutdown() client:close() while triggers_ran < 3 do fiber.yield() end -- check on_auth with error() local function console_on_auth_error() error("Authorization error") triggers_ran = triggers_ran + 1 end box.session.on_auth(console_on_auth_error) client = socket.tcp_connect("unix/", CONSOLE_SOCKET) _ = client:read(128) test:is(client:read(1024), "", "on_auth aborts connection") client:close() while triggers_ran < 4 do fiber.yield() end test:is(triggers_ran, 4, "on_connect -> on_auth_error order") box.session.on_auth(nil, console_on_auth_error) -- -- gh-2642 "box.session.type()" -- client = socket.tcp_connect("unix/", CONSOLE_SOCKET) _ = client:read(128) client:write("box.session.type();\n") test:is(yaml.decode(client:read(EOL))[1], "console", "session type") client:close() server:close() box.session.on_connect(nil, console_on_connect) box.session.on_disconnect(nil, console_on_disconnect) box.session.on_auth(nil, console_on_auth) box.schema.user.drop('test') box.schema.user.drop('test2') session_id = nil triggers_ran = nil os.remove(CONSOLE_SOCKET) os.remove(IPROTO_SOCKET) test:check() os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/pcall.result0000664000000000000000000000117313306560010021056 0ustar rootroot-------------------------------------------------------------------------------- -- #267: Bad exception catching -------------------------------------------------------------------------------- pcall inside xpcall: true pcall is ok pcall with Lua error(): false some message pcall with box.error(): false Illegal parameters, some message pcall with box.error(): typeof ctype pcall with box.error(): .type ClientError pcall with box.error(): .code 1 pcall with box.error(): .message Illegal parameters, some message pcall with box.error(): .match() some pcall with no return: 1 pcall with multireturn: true 1 2 3 tarantool_1.9.1.26.g63eb81e3c/test/app-tap/init_script.test.lua0000775000000000000000000000300513306560010022532 0ustar rootroot#!/usr/bin/env tarantool -- -- Testing init script -- box.cfg{ listen = os.getenv("LISTEN"), pid_file = "box.pid", memtx_memory=107374182, log="tarantool.log" } yaml = require('yaml') fiber = require('fiber') if box.space.tweedledum ~= nil then box.space.space1:drop() end space = box.schema.space.create('tweedledum') space:create_index('primary', { type = 'hash' }) print[[ -- -- Access to box.cfg from init script -- ]] t = {} for k,v in pairs(box.cfg) do if k == 'listen' then v = 'port' end if type(v) ~= 'table' and type(v) ~= 'function' then table.insert(t,k..':'..tostring(v)) end end table.sort(t) print('box.cfg') for k,v in pairs(t) do print(k, v) end -- -- Insert tests -- local function do_insert() space:insert{1, 2, 4, 8} end fiber1 = fiber.create(do_insert) print[[ -- -- Test insert from detached fiber -- ]] print(yaml.encode(space:select())) print[[ -- -- Test insert from init script -- ]] space:insert{2, 4, 8, 16} print(space:get(1)) print(space:get(2)) -- -- Run a dummy insert to avoid race conditions under valgrind -- space:insert{4, 8, 16} print(space:get(4)) print[[ -- -- Check that require function(math.floor) reachable in the init script -- ]] floor = require("math").floor print(floor(0.5)) print(floor(0.9)) print(floor(1.1)) mod = require('require_mod') print(mod.test(10, 15)) -- -- A test case for https://github.com/tarantool/tarantool/issues/53 -- assert (require ~= nil) fiber.sleep(0.0) assert (require ~= nil) space:drop() os.exit() tarantool_1.9.1.26.g63eb81e3c/test/app-tap/iconv.test.lua0000775000000000000000000000322313306560010021323 0ustar rootroot#!/usr/bin/env tarantool local tap = require('tap') local iconv = require('iconv') test = tap.test("iconv") test:plan(11) local simple_str = 'ascii string' local cyrillic_str = 'русский текст' local c_ascii_8 = iconv.new('ASCII', 'UTF-8') local c_8_ascii = iconv.new('UTF-8', 'ASCII') test:is(c_ascii_8(simple_str), simple_str, 'check ascii->utf8 on simple string') test:is(c_8_ascii(simple_str), simple_str, 'check utf8->ascii on simple string') local c16be_8 = iconv.new('UTF-16BE', 'UTF-8') local c8_16be = iconv.new('UTF-8', 'UTF-16BE') test:is(c16be_8(c8_16be(simple_str)), simple_str, 'UTF conversion with ascii string') test:is(c8_16be(c16be_8(cyrillic_str)), cyrillic_str, 'UTF conversion with non-ascii symbols') local c16_16be = iconv.new('UTF-16', 'UTF-16BE') local c1251_16 = iconv.new('WINDOWS-1251', 'UTF-16') local c8_1251 = iconv.new('UTF-8', 'WINDOWS-1251') test:is(c8_16be(c16be_8(cyrillic_str)), cyrillic_str, 'UTF conversion with non-ascii symbols') -- test complex converting path test:is(c8_1251(c1251_16(c16_16be(c16be_8(cyrillic_str)))), cyrillic_str, 'complex multi-format conversion') -- test huge string huge_str = string.rep(cyrillic_str, 50) test:is(c16be_8(c8_16be(huge_str)), huge_str, "huge string") local stat, err = pcall(iconv.new, 'NOT EXISTS', 'UTF-8') test:is(stat, false, 'error was thrown on bad encoding') test:ok(err:match('Invalid') ~= nil, 'correct error') local stat, err = pcall(c_ascii_8, cyrillic_str) test:is(stat, false, 'error was thrown on sequence') test:ok(err:match('Incomplete multibyte sequence') ~= nil, 'correct error') os.exit(test:check() == true and 0 or 1) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/pcall.test.lua0000775000000000000000000000272713306560010021310 0ustar rootroot#!/usr/bin/env tarantool local ffi = require('ffi') print[[ -------------------------------------------------------------------------------- -- #267: Bad exception catching -------------------------------------------------------------------------------- ]] box.cfg{ log="tarantool.log", memtx_memory=107374182, } function pcalltest() local ERRMSG = "module 'some_invalid_module' not found" local status, msg = pcall(require, 'some_invalid_module') if status == false and msg ~= nil and msg:match(ERRMSG) ~= nil then return 'pcall is ok' else return 'pcall is broken' end end local status, msg = xpcall(pcalltest, function(msg) print('error handler:', msg) end) print('pcall inside xpcall:', status, msg) local status, msg = pcall(function() error('some message') end) print('pcall with Lua error():', status, msg:match('some message')) local status, msg = pcall(function() box.error(box.error.ILLEGAL_PARAMS, 'some message') end) print('pcall with box.error():', status, msg) print('pcall with box.error(): typeof', ffi.typeof(msg)) print('pcall with box.error(): .type', msg.type) print('pcall with box.error(): .code', msg.code) print('pcall with box.error(): .message', msg.message) -- Tarantool 1.6 backward compatibility print('pcall with box.error(): .match()', msg:match('some')) print('pcall with no return:', select('#', pcall(function() end))) print('pcall with multireturn:', pcall(function() return 1, 2, 3 end)) os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/trigger.test.lua0000775000000000000000000000514113306560010021651 0ustar rootroot#!/usr/bin/env tarantool local table_clear = require('table.clear') box.cfg{ log = "tarantool.log" } local trigger = require('internal.trigger') local test = require('tap').test('trigger') test:plan(3) local trigger_list = trigger.new("sweet trigger") test:ok(trigger_list ~= nil, "test that trigger list is created") test:test("simple trigger test", function(test) test:plan(10) local cnt = 0 local function trigger_cnt() cnt = cnt + 1 end -- Append first trigger trigger_list(trigger_cnt) trigger_list:run() test:is(cnt, 1, "check first run") -- Append second trigger trigger_list(trigger_cnt) trigger_list:run() test:is(cnt, 3, "check first run") -- Check listing local list_copy = trigger_list() test:is(#list_copy, 2, "trigger() count") table.remove(list_copy) test:is(#trigger_list(), 2, "check that we've returned copy") -- Delete both triggers test:is(trigger_list(nil, trigger_cnt), trigger_cnt, "pop trigger") trigger_list:run() test:is(#trigger_list(), 1, "check trigger count after delete") test:is(cnt, 4, "check third run") test:is(trigger_list(nil, trigger_cnt), trigger_cnt, "pop trigger") trigger_list:run() test:is(#trigger_list(), 0, "check trigger count after delete") -- Check that we've failed to delete trigger local stat, err = pcall(getmetatable(trigger_list).__call, trigger_list, nil, trigger_cnt) test:ok(string.find(err, "is not found"), "check error") end) test:test("errored trigger test", function(test) test:plan(6) -- -- Check that trigger:run() fails on the first error -- local cnt = 0 local function trigger_cnt() cnt = cnt + 1 end local function trigger_errored() error("test error") end test:is(#trigger_list(), 0, "check for empty triggers") -- Append first trigger trigger_list(trigger_cnt) trigger_list:run() test:is(cnt, 1, "check simple trigger") -- Append errored trigger trigger_list(trigger_errored) local status = pcall(function() trigger_list:run() end) test:is(cnt, 2, "check simple+error trigger") -- Flush triggers table_clear(trigger_list) test:is(#trigger_list(), 0, "successfull flush") -- Append first trigger trigger_list(trigger_errored) local status = pcall(function() trigger_list:run() end) test:is(cnt, 2, "check error trigger") -- Append errored trigger trigger_list(trigger_cnt) local status = pcall(function() trigger_list:run() end) test:is(cnt, 2, "check error+simple trigger") end) os.exit(test:check() == true and 0 or -1) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/table.test.lua0000775000000000000000000001342413306560010021300 0ustar rootroot#!/usr/bin/env tarantool local yaml = require('yaml').new() yaml.cfg{ encode_invalid_numbers = true, encode_load_metatables = true, encode_use_tostring = true, encode_invalid_as_nil = true, } local test = require('tap').test('table') test:plan(31) do -- check basic table.copy (deepcopy) local example_table = { {1, 2, 3}, {"help, I'm very nested", {{{ }}} } } local copy_table = table.deepcopy(example_table) test:is_deeply( example_table, copy_table, "checking, that deepcopy behaves ok" ) test:isnt( example_table, copy_table, "checking, that tables are different" ) test:isnt( example_table[1], copy_table[1], "checking, that tables are different" ) test:isnt( example_table[2], copy_table[2], "checking, that tables are different" ) test:isnt( example_table[2][2], copy_table[2][2], "checking, that tables are different" ) test:isnt( example_table[2][2][1], copy_table[2][2][1], "checking, that tables are different" ) end do -- check basic table.copy (deepcopy) local example_table = { {1, 2, 3}, {"help, I'm very nested", {{{ }}} } } local copy_table = table.copy(example_table, true) test:is_deeply( example_table, copy_table, "checking, that deepcopy behaves ok + shallow" ) test:isnt( example_table, copy_table, "checking, that tables are different + shallow" ) test:is( example_table[1], copy_table[1], "checking, that tables are the same + shallow" ) test:is( example_table[2], copy_table[2], "checking, that tables are the same + shallow" ) test:is( example_table[2][2], copy_table[2][2], "checking, that tables are the same + shallow" ) test:is( example_table[2][2][1], copy_table[2][2][1], "checking, that tables are the same + shallow" ) end do -- check cycle resolution for table.copy (deepcopy) local recursive_table_1 = {} local recursive_table_2 = {} recursive_table_1[1] = recursive_table_2 recursive_table_2[1] = recursive_table_1 local copy_table_1 = table.deepcopy(recursive_table_1) local copy_table_2 = table.deepcopy(recursive_table_2) test:isnt( copy_table_1, recursive_table_1, "table 1. checking, that tables are different" ) test:isnt( copy_table_1[1], recursive_table_1[1], "table 1. checking, that tables are different" ) test:isnt( copy_table_1[1][1], recursive_table_1[1][1], "table 1. checking, that tables are different" ) test:is( copy_table_1, copy_table_1[1][1], "table 1. checking, that cyclic reference is ok" ) test:isnt( copy_table_2, recursive_table_2, "table 2. checking, that tables are different" ) test:isnt( copy_table_2[1], recursive_table_2[1], "table 2. checking, that tables are different" ) test:isnt( copy_table_2[1][1], recursive_table_2[1][1], "table 2. checking, that tables are different" ) test:is( copy_table_2, copy_table_2[1][1], "table 2. checking, that cyclic reference is ok" ) end do -- check usage of __copy metamethod local copy_mt = nil; copy_mt = { __copy = function(self) local new_self = { a = 1} return setmetatable(new_self, copy_mt) end } local one_self = setmetatable({ a = 2 }, copy_mt) local another_self = table.deepcopy(one_self) test:isnt(one_self, another_self, "checking that output tables differs") test:is( getmetatable(one_self), getmetatable(another_self), "checking that we've called __copy" ) test:isnt(one_self.a, another_self.a, "checking that we've called __copy") end do -- check usage of __copy metamethod + shallow local copy_mt = nil; copy_mt = { __copy = function(self) local new_self = { a = 1} return setmetatable(new_self, copy_mt) end } local one_self = setmetatable({ a = 2 }, copy_mt) local another_self = table.copy(one_self, true) test:isnt( one_self, another_self, "checking that output objects differs + shallow" ) test:is( getmetatable(one_self), getmetatable(another_self), "checking that we've called __copy + shallow (same obj types)" ) test:isnt( one_self.a, another_self.a, "checking that we've called __copy + shallow (diff obj values)" ) end do -- check usage of not __copy metamethod on second level + shallow local copy_mt = nil; copy_mt = { __copy = function(self) local new_self = { a = 1 } return setmetatable(new_self, copy_mt) end } local one_self = { setmetatable({ a = 2 }, copy_mt) } local another_self = table.copy(one_self, true) test:isnt( one_self, another_self, "checking that output tables differs + shallow" ) test:isnil( getmetatable(one_self), "checking that we've called __copy + shallow and no mt" ) test:isnil( getmetatable(another_self), "checking that we've called __copy + shallow and no mt" ) test:is( one_self[1], another_self[1], "checking that we've called __copy + shallow and object is the same" ) test:is( one_self[1].a, another_self[1].a, "checking that we've called __copy + shallow and object val is the same" ) end os.exit(test:check() == true and 0 or 1) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/lua/0000775000000000000000000000000013306560010017302 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/app-tap/lua/serializer_test.lua0000664000000000000000000002775213306560010023232 0ustar rootrootlocal ffi = require('ffi') local function rt(test, s, x, t) local buf1 = s.encode(x) local x1, offset1 = s.decode(buf1) local xstr if type(x) == "table" then xstr = "table" elseif ffi.istype('float', x) then xstr = string.format('%0.2f (ffi float)', tonumber(x)) elseif ffi.istype('double', x) then xstr = string.format('%0.2f (ffi double)', tonumber(x)) elseif ffi.istype("bool", x) then xstr = string.format("%s (ffi bool)", x == 1 and "true" or "false") elseif type(x) == "cdata" then xstr = tostring(x) xstr = xstr:match("cdata<.+>:") or xstr else xstr = tostring(x) end test:is_deeply(x, x1, "encode/decode for "..xstr) if t ~= nil then test:is(type(x1), t, "encode/decode type for "..xstr) end end local function test_unsigned(test, s) test:plan(104) rt(test, s, 0, "number") rt(test, s, 0LL, "number") rt(test, s, 0ULL, "number") rt(test, s, 1, "number") rt(test, s, 1LL, "number") rt(test, s, 1ULL, "number") rt(test, s, 127, "number") rt(test, s, 127LL, "number") rt(test, s, 127ULL, "number") rt(test, s, 128, "number") rt(test, s, 128LL, "number") rt(test, s, 128ULL, "number") rt(test, s, 255, "number") rt(test, s, 255LL, "number") rt(test, s, 255ULL, "number") rt(test, s, 256, "number") rt(test, s, 256LL, "number") rt(test, s, 256ULL, "number") rt(test, s, 65535, "number") rt(test, s, 65535LL, "number") rt(test, s, 65535ULL, "number") rt(test, s, 65536, "number") rt(test, s, 65536LL, "number") rt(test, s, 65536ULL, "number") rt(test, s, 4294967294, "number") rt(test, s, 4294967294LL, "number") rt(test, s, 4294967294ULL, "number") rt(test, s, 4294967295, "number") rt(test, s, 4294967295LL, "number") rt(test, s, 4294967295ULL, "number") rt(test, s, 4294967296, "number") rt(test, s, 4294967296LL, "number") rt(test, s, 4294967296ULL, "number") rt(test, s, 4294967297, "number") rt(test, s, 4294967297LL, "number") rt(test, s, 4294967297ULL, "number") -- 1e52 - maximum int that can be stored to double without losing precision -- decrease double capacity to fit .14g output format rt(test, s, 99999999999999, "number") rt(test, s, 99999999999999LL, "number") rt(test, s, 99999999999999ULL, "number") rt(test, s, 100000000000000, "cdata") rt(test, s, 100000000000000LL, "cdata") rt(test, s, 100000000000000ULL, "cdata") rt(test, s, 9223372036854775807LL, "cdata") rt(test, s, 9223372036854775807ULL, "cdata") rt(test, s, 9223372036854775808ULL, "cdata") rt(test, s, 9223372036854775809ULL, "cdata") rt(test, s, 18446744073709551614ULL, "cdata") rt(test, s, 18446744073709551615ULL, "cdata") rt(test, s, -1ULL, "cdata") -- don't use 'unsigned char' or 'signed char' because output -- depends -fsigned-char flag. rt(test, s, ffi.new('char', 128), 'number') rt(test, s, ffi.new('unsigned short', 128), 'number') rt(test, s, ffi.new('unsigned int', 128), 'number') end local function test_signed(test, s) test:plan(52) rt(test, s, -1, 'number') rt(test, s, -1LL, 'number') rt(test, s, -31, 'number') rt(test, s, -31LL, 'number') rt(test, s, -32, 'number') rt(test, s, -32LL, 'number') rt(test, s, -127, 'number') rt(test, s, -127LL, 'number') rt(test, s, -128, 'number') rt(test, s, -128LL, 'number') rt(test, s, -32767, 'number') rt(test, s, -32767LL, 'number') rt(test, s, -32768, 'number') rt(test, s, -32768LL, 'number') rt(test, s, -2147483647, 'number') rt(test, s, -2147483647LL, 'number') rt(test, s, -2147483648, 'number') rt(test, s, -2147483648LL, 'number') -- 1e52 - maximum int that can be stored to double without losing precision -- decrease double capacity to fit .14g output format rt(test, s, -99999999999999, "number") rt(test, s, -99999999999999LL, "number") rt(test, s, -100000000000000, "cdata") rt(test, s, -100000000000000LL, "cdata") rt(test, s, -9223372036854775806LL, 'cdata') rt(test, s, -9223372036854775807LL, 'cdata') rt(test, s, ffi.new('short', -128), 'number') rt(test, s, ffi.new('int', -128), 'number') end local function test_double(test, s) test:plan(s.cfg and 15 or 9) rt(test, s, -1.1) rt(test, s, 3.1415926535898) rt(test, s, -3.1415926535898) rt(test, s, -1e100) rt(test, s, 1e100) rt(test, s, ffi.new('float', 123456)) rt(test, s, ffi.new('double', 123456)) rt(test, s, ffi.new('float', 12.121)) rt(test, s, ffi.new('double', 12.121)) if not s.cfg then return end -- -- cfg: encode_invalid_numbers / decode_invalid_numbers -- local nan = 0/0 local inf = 1/0 local ss = s.new() ss.cfg{encode_invalid_numbers = false} test:ok(not pcall(ss.encode, nan), "encode exception on nan") test:ok(not pcall(ss.encode, inf), "encode exception on inf") ss.cfg{encode_invalid_numbers = true} local xnan = ss.encode(nan) local xinf = ss.encode(inf) ss.cfg{decode_invalid_numbers = false} test:ok(not pcall(ss.decode, xnan), "decode exception on nan") test:ok(not pcall(ss.decode, xinf), "decode exception on inf") ss.cfg{decode_invalid_numbers = true} rt(test, s, nan) rt(test, s, inf) ss = nil end local function test_boolean(test, s) test:plan(4) rt(test, s, false) rt(test, s, true) rt(test, s, ffi.new('bool', true)) rt(test, s, ffi.new('bool', false)) end local function test_string(test, s) test:plan(8) rt(test, s, "") rt(test, s, "abcde") rt(test, s, "Кудыкины горы") -- utf-8 rt(test, s, string.rep("x", 33)) rt(test, s, '$a\t $') rt(test, s, '$a\t $') rt(test, s, [[$a\t $]]) rt(test, s, [[$a\\t $]]) end local function test_nil(test, s) test:plan(6) rt(test, s, nil) rt(test, s, s.NULL) test:iscdata(s.NULL, 'void *', '.NULL is cdata') test:ok(s.NULL == nil, '.NULL == nil') rt(test, s, {1, 2, 3, s.NULL, 5}) local t = s.decode(s.encode({1, 2, 3, [5] = 5})) test:is(t[4], s.NULL, "sparse array with NULL") end local function test_table(test, s, is_array, is_map) test:plan(s.cfg and 31 or 13) rt(test, s, {}) test:ok(is_array(s.encode({})), "empty table is array") rt(test, s, {1, 2, 3}) test:ok(is_array(s.encode({1, 2, 3})), "array is array") rt(test, s, {k1 = 'v1', k2 = 'v2', k3 = 'v3'}) test:ok(is_map(s.encode({k1 = 'v1', k2 = 'v2', k3 = 'v3'})), "map is map") -- utf-8 pairs rt(test, s, {Метапеременная = { 'Метазначение' }}) rt(test, s, {test = { 'Результат' }}) local arr = setmetatable({1, 2, 3, k1 = 'v1', k2 = 'v2', 4, 5}, { __serialize = 'seq'}) local map = setmetatable({1, 2, 3, 4, 5}, { __serialize = 'map'}) local obj = setmetatable({}, { __serialize = function(x) return 'serialize' end }) -- __serialize on encode test:ok(is_array(s.encode(arr)), "array load __serialize") -- map test:ok(is_map(s.encode(map)), "map load __serialize") -- string (from __serialize hook) test:is(s.decode(s.encode(obj)), "serialize", "object load __serialize") -- __serialize on decode test:is(getmetatable(s.decode(s.encode(arr))).__serialize, "seq", "array save __serialize") test:is(getmetatable(s.decode(s.encode(map))).__serialize, "map", "map save __serialize") if not s.cfg then return end -- -- encode_load_metatables -- local ss = s.new() ss.cfg{encode_load_metatables = false} -- map test:ok(is_map(ss.encode(arr)), "array ignore __serialize") -- array test:ok(is_array(ss.encode(map)), "map ignore __serialize") -- array test:ok(is_array(ss.encode(obj)), "object ignore __serialize") ss.cfg{encode_load_metatables = true} -- array test:ok(is_array(ss.encode(arr)), "array load __serialize") -- map test:ok(is_map(ss.encode(map)), "map load __serialize") -- string (from __serialize hook) test:is(ss.decode(ss.encode(obj)), "serialize", "object load __serialize") ss = nil -- -- decode_save_metatables -- local arr = {1, 2, 3} local map = {k1 = 'v1', k2 = 'v2', k3 = 'v3'} ss = s.new() ss.cfg{decode_save_metatables = false} test:isnil(getmetatable(ss.decode(ss.encode(arr))), "array __serialize") test:isnil(getmetatable(ss.decode(ss.encode(map))), "map __serialize") ss.cfg{decode_save_metatables = true} test:is(getmetatable(ss.decode(ss.encode(arr))).__serialize, "seq", "array save __serialize") test:is(getmetatable(ss.decode(ss.encode(map))).__serialize, "map", "map save __serialize") ss = nil -- -- encode_sparse_convert / encode_sparse_ratio / encode_sparse_safe -- local ss = s.new() ss.cfg{encode_sparse_ratio = 2, encode_sparse_safe = 10} ss.cfg{encode_sparse_convert = false} test:ok(is_array(ss.encode({[1] = 1, [3] = 3, [4] = 4, [6] = 6, [9] = 9, [12] = 12})), "sparse convert off") test:ok(is_array(ss.encode({[1] = 1, [3] = 3, [4] = 4, [6] = 6, [10] = 10})), "sparse convert off") test:ok(not pcall(ss.encode, {[1] = 1, [3] = 3, [4] = 4, [6] = 6, [12] = 12}), "excessively sparse array") ss.cfg{encode_sparse_convert = true} test:ok(is_array(ss.encode({[1] = 1, [3] = 3, [4] = 4, [6] = 6, [9] = 9, [12] = 12})), "sparse convert on") test:ok(is_array(ss.encode({[1] = 1, [3] = 3, [4] = 4, [6] = 6, [10] = 10})), "sparse convert on") test:ok(is_map(ss.encode({[1] = 1, [3] = 3, [4] = 4, [6] = 6, [12] = 12})), "sparse convert on") -- map test:ok(is_map(ss.encode({1, 2, 3, 4, 5, [100] = 100})), "sparse safe 1") ss.cfg{encode_sparse_safe = 100} -- array test:ok(is_array(ss.encode({1, 2, 3, 4, 5, [100] = 100})), "sparse safe 2") ss = nil end local function test_ucdata(test, s) test:plan(11) -- -- encode_use_unpack / encode_use_tostring -- ffi.cdef[[struct serializer_cdata_test {}]] local ctype = ffi.typeof('struct serializer_cdata_test') ffi.metatype(ctype, { __index = { __serialize = function(obj) return 'unpack' end, }, __tostring = function(obj) return 'tostring' end }); local cdata = ffi.new(ctype) -- use fiber's userdata for test (supports both __serialize and __tostring) local udata = require('fiber').self() local ss = s.new() ss.cfg{ encode_load_metatables = false, encode_use_tostring = false, encode_invalid_as_nil = false } test:ok(not pcall(ss.encode, cdata), "encode exception on cdata") test:ok(not pcall(ss.encode, udata), "encode exception on udata") ss.cfg{encode_invalid_as_nil = true} test:ok(ss.decode(ss.encode(cdata)) == nil, "encode_invalid_as_nil") test:ok(ss.decode(ss.encode(udata)) == nil, "encode_invalid_as_nil") ss.cfg{encode_load_metatables = true, encode_use_tostring = false} test:is(ss.decode(ss.encode(cdata)), 'unpack', 'cdata __serialize') test:istable(ss.decode(ss.encode(udata)), 'udata __serialize') ss.cfg{encode_load_metatables = false, encode_use_tostring = true} test:is(ss.decode(ss.encode(cdata)), 'tostring', 'cdata __tostring') test:isstring(ss.decode(ss.encode(udata)), 'udata __tostring') ss.cfg{encode_load_metatables = true, encode_use_tostring = true} test:is(ss.decode(ss.encode(cdata)), 'unpack', 'cdata hook priority') test:istable(ss.decode(ss.encode(udata)), 'udata hook priority') -- gh-1226: luaL_convertfield should ignore __serialize hook for ctypes test:like(ss.decode(ss.encode(ctype)), 'ctype= 0x80 and b <= 0x8f or b == 0xde or b == 0xdf end local function is_array(s) local b = string.byte(string.sub(s, 1, 1)) return b >= 0x90 and b <= 0x9f or b == 0xdc or b == 0xdd end local function test_offsets(test, s) test:plan(6) local arr1 = {1, 2, 3} local arr2 = {4, 5, 6} local dump = s.encode(arr1)..s.encode(arr2) test:is(dump:len(), 8, "length of part1 + part2") local a local offset = 1 a, offset = s.decode(dump, offset) test:is_deeply(a, arr1, "decoded part1") test:is(offset, 5, "offset of part2") a, offset = s.decode(dump, offset) test:is_deeply(a, arr2, "decoded part2") test:is(offset, 9, "offset of end") test:ok(not pcall(s.decode, dump, offset), "invalid offset") end local function test_other(test, s) test:plan(19) local buf = string.char(0x93, 0x6e, 0xcb, 0x42, 0x2b, 0xed, 0x30, 0x47, 0x6f, 0xff, 0xff, 0xac, 0x77, 0x6b, 0x61, 0x71, 0x66, 0x7a, 0x73, 0x7a, 0x75, 0x71, 0x71, 0x78) local num = s.decode(buf)[2] test:ok(num < 59971740600 and num > 59971740599, "gh-633 double decode") -- gh-596: msgpack and msgpackffi have different behaviour local arr = {1, 2, 3} local map = {k1 = 'v1', k2 = 'v2', k3 = 'v3'} test:is(getmetatable(s.decode(s.encode(arr))).__serialize, "seq", "array save __serialize") test:is(getmetatable(s.decode(s.encode(map))).__serialize, "map", "map save __serialize") -- gh-1095: `-128` is packed as `d1ff80` instead of `d080` test:is(#s.encode(0x7f), 1, "len(encode(0x7f))") test:is(#s.encode(0x80), 2, "len(encode(0x80))") test:is(#s.encode(0xff), 2, "len(encode(0xff))") test:is(#s.encode(0x100), 3, "len(encode(0x100))") test:is(#s.encode(0xffff), 3, "len(encode(0xffff))") test:is(#s.encode(0x10000), 5, "len(encode(0x10000))") test:is(#s.encode(0xffffffff), 5, "len(encode(0xffffffff))") test:is(#s.encode(0x100000000), 9, "len(encode(0x100000000))") test:is(#s.encode(-0x20), 1, "len(encode(-0x20))") test:is(#s.encode(-0x21), 2, "len(encode(-0x21))") test:is(#s.encode(-0x80), 2, "len(encode(-0x80))") test:is(#s.encode(-0x81), 3, "len(encode(-0x81))") test:is(#s.encode(-0x8000), 3, "len(encode(-0x8000))") test:is(#s.encode(-0x8001), 5, "len(encode(-0x8001))") test:is(#s.encode(-0x80000000), 5, "len(encode(-0x80000000))") test:is(#s.encode(-0x80000001), 9, "len(encode(-0x80000001))") end tap.test("msgpackffi", function(test) local serializer = require('msgpackffi') test:plan(9) test:test("unsigned", common.test_unsigned, serializer) test:test("signed", common.test_signed, serializer) test:test("double", common.test_double, serializer) test:test("boolean", common.test_boolean, serializer) test:test("string", common.test_string, serializer) test:test("nil", common.test_nil, serializer) test:test("table", common.test_table, serializer, is_array, is_map) -- udata/cdata hooks are not implemented --test:test("ucdata", common.test_ucdata, serializer) test:test("offsets", test_offsets, serializer) test:test("other", test_other, serializer) end) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/logger_pipe.result0000664000000000000000000000000313306560010022246 0ustar rootroot48 tarantool_1.9.1.26.g63eb81e3c/test/app-tap/module_api.c0000664000000000000000000002347513306560010021016 0ustar rootroot#include #include #include #include #include #include #include #include #include #include #define STR2(x) #x #define STR(x) STR2(x) /* Test for constants */ static const char *consts[] = { PACKAGE_VERSION, STR(PACKAGE_VERSION_MINOR), STR(PACKAGE_VERSION_MAJOR), STR(PACKAGE_VERSION_PATCH), TARANTOOL_C_FLAGS, TARANTOOL_CXX_FLAGS, MODULE_LIBDIR, MODULE_LUADIR, MODULE_INCLUDEDIR }; static int test_say(lua_State *L) { say_debug("test debug"); say_info("test info"); say_verbose("test verbose"); say_warn("test warn"); say_crit("test crit"); say_error("test error"); errno = 0; say_syserror("test sysserror"); lua_pushboolean(L, 1); return 1; } static ssize_t coio_call_func(va_list ap) { return va_arg(ap, int); } static int test_coio_call(lua_State *L) { ssize_t rc = coio_call(coio_call_func, 48); lua_pushboolean(L, rc == 48); return 1; } static int test_coio_getaddrinfo(lua_State *L) { struct addrinfo hints; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_ADDRCONFIG|AI_PASSIVE; hints.ai_protocol = 0; struct addrinfo *ai = NULL; if (coio_getaddrinfo("localhost", "80", &hints, &ai, 0.1) == 0) freeaddrinfo(ai); lua_pushboolean(L, 1); return 1; } static int test_pushcheck_cdata(lua_State *L) { uint32_t uint64_ctypeid = luaL_ctypeid(L, "uint64_t"); *(uint64_t *) luaL_pushcdata(L, uint64_ctypeid) = 48; uint32_t test_ctypeid = 0; luaL_checkcdata(L, -1, &test_ctypeid); lua_pushboolean(L, test_ctypeid != 0 && uint64_ctypeid == test_ctypeid); return 1; } static int test_pushuint64(lua_State *L) { uint32_t ctypeid = 0; uint64_t num = 18446744073709551615ULL; luaL_pushuint64(L, num); uint64_t r = *(uint64_t *) luaL_checkcdata(L, -1, &ctypeid); lua_pushboolean(L, r == num && ctypeid == luaL_ctypeid(L, "uint64_t")); return 1; } static int test_pushint64(lua_State *L) { uint32_t ctypeid = 0; int64_t num = 9223372036854775807LL; luaL_pushint64(L, num); int64_t r = *(int64_t *) luaL_checkcdata(L, -1, &ctypeid); lua_pushboolean(L, r == num && ctypeid == luaL_ctypeid(L, "int64_t")); return 1; } static int test_checkuint64(lua_State *L) { lua_pushnumber(L, 12345678); if (luaL_checkuint64(L, -1) != 12345678) return 0; lua_pop(L, 1); lua_pushliteral(L, "18446744073709551615"); if (luaL_checkuint64(L, -1) != 18446744073709551615ULL) return 0; lua_pop(L, 1); luaL_pushuint64(L, 18446744073709551615ULL); if (luaL_checkuint64(L, -1) != 18446744073709551615ULL) return 0; lua_pop(L, 1); lua_pushboolean(L, 1); return 1; } static int test_checkint64(lua_State *L) { lua_pushnumber(L, 12345678); if (luaL_checkint64(L, -1) != 12345678) return 0; lua_pop(L, 1); lua_pushliteral(L, "9223372036854775807"); if (luaL_checkint64(L, -1) != 9223372036854775807LL) return 0; lua_pop(L, 1); luaL_pushint64(L, 9223372036854775807LL); if (luaL_checkint64(L, -1) != 9223372036854775807LL) return 0; lua_pop(L, 1); lua_pushboolean(L, 1); return 1; } static int test_touint64(lua_State *L) { lua_pushliteral(L, "xxx"); if (luaL_touint64(L, -1) != 0) return 0; lua_pop(L, 1); luaL_pushuint64(L, 18446744073709551615ULL); if (luaL_touint64(L, -1) != 18446744073709551615ULL) return 0; lua_pop(L, 1); lua_pushliteral(L, "not a cdata"); luaL_pushuint64(L, 18446744073709551615ULL); if (luaL_touint64(L, -1) != 18446744073709551615ULL) return 0; lua_pop(L, 2); lua_pushboolean(L, 1); return 1; } static int test_toint64(lua_State *L) { lua_pushliteral(L, "xxx"); if (luaL_toint64(L, -1) != 0) return 0; lua_pop(L, 1); luaL_pushint64(L, 9223372036854775807); if (luaL_toint64(L, -1) != 9223372036854775807) return 0; lua_pop(L, 1); lua_pushliteral(L, "not a cdata"); luaL_pushuint64(L, 18446744073709551615ULL); if (luaL_touint64(L, -1) != 18446744073709551615ULL) return 0; lua_pop(L, 2); lua_pushboolean(L, 1); return 1; } int fiber_test_func(va_list va) { do { fiber_set_cancellable(true); fiber_sleep(0.01); if (fiber_is_cancelled()) { box_error_set(__FILE__, __LINE__, 10, "test error"); return -1; } fiber_set_cancellable(false); } while (1); return 0; } static int test_fiber(lua_State *L) { struct fiber *fiber = fiber_new("test fiber", fiber_test_func); fiber_set_joinable(fiber, true); fiber_start(fiber); fiber_cancel(fiber); int ret = fiber_join(fiber); box_error_t *err = box_error_last(); lua_pushboolean(L, (int)(ret != 0 && box_error_code(err) == 10)); return 1; } static int test_cord(lua_State *L) { struct slab_cache *slabc = cord_slab_cache(); assert(slabc != NULL); struct ibuf ibuf; ibuf_create(&ibuf, slabc, 16320); ibuf_destroy(&ibuf); lua_pushboolean(L, 1); return 1; } static int test_pushcdata(lua_State *L) { if (lua_gettop(L) < 1) luaL_error(L, "invalid arguments"); uint32_t ctypeid = lua_tointeger(L, 1); void *data = luaL_pushcdata(L, ctypeid); lua_pushlightuserdata(L, data); return 2; } static int test_checkcdata(lua_State *L) { if (lua_gettop(L) < 1) luaL_error(L, "invalid arguments"); uint32_t ctypeid = 0; void *data = luaL_checkcdata(L, 1, &ctypeid); lua_pushinteger(L, ctypeid); lua_pushlightuserdata(L, data); return 2; } static int test_clock(lua_State *L) { /* Test compilation */ clock_realtime(); clock_monotonic(); clock_process(); clock_thread(); clock_realtime64(); clock_monotonic64(); clock_process64(); clock_thread64(); lua_pushboolean(L, 1); return 1; } static int test_pushtuple(lua_State *L) { char tuple_buf[64]; char *tuple_end = tuple_buf; tuple_end = mp_encode_array(tuple_end, 3); tuple_end = mp_encode_uint(tuple_end, 456734643353); tuple_end = mp_encode_str(tuple_end, "abcddcba", 8); tuple_end = mp_encode_array(tuple_end, 2); tuple_end = mp_encode_map(tuple_end, 2); tuple_end = mp_encode_uint(tuple_end, 8); tuple_end = mp_encode_uint(tuple_end, 4); tuple_end = mp_encode_array(tuple_end, 1); tuple_end = mp_encode_str(tuple_end, "a", 1); tuple_end = mp_encode_str(tuple_end, "b", 1); tuple_end = mp_encode_nil(tuple_end); assert(tuple_end <= tuple_buf + sizeof(tuple_buf)); box_tuple_format_t *fmt = box_tuple_format_default(); luaT_pushtuple(L, box_tuple_new(fmt, tuple_buf, tuple_end)); struct tuple *tuple = luaT_istuple(L, -1); if (tuple == NULL) goto error; char lua_buf[sizeof(tuple_buf)]; int lua_buf_size = box_tuple_to_buf(tuple, lua_buf, sizeof(lua_buf)); if (lua_buf_size != tuple_end - tuple_buf) goto error; if (memcmp(tuple_buf, lua_buf, lua_buf_size) != 0) goto error; lua_pushboolean(L, true); return 1; error: lua_pushboolean(L, false); return 1; } static int test_key_def_api(lua_State *L) { uint32_t fieldno1[] = {3, 0}; uint32_t type1[] = {FIELD_TYPE_UNSIGNED, FIELD_TYPE_STRING}; uint32_t fieldno2[] = {1}; uint32_t type2[] = {FIELD_TYPE_UNSIGNED}; box_key_def_t *key_defs[] = { box_key_def_new(fieldno1, type1, 2), box_key_def_new(fieldno2, type2, 1)}; box_tuple_format_t *format = box_tuple_format_new(key_defs, 2); char buf[64], *buf_end; buf_end = buf; buf_end = mp_encode_array(buf_end, 4); buf_end = mp_encode_str(buf_end, "bb", 2); buf_end = mp_encode_uint(buf_end, 1); buf_end = mp_encode_str(buf_end, "abcd", 4); buf_end = mp_encode_uint(buf_end, 6); box_tuple_t *tuple1 = box_tuple_new(format, buf, buf_end); box_tuple_ref(tuple1); buf_end = buf; buf_end = mp_encode_array(buf_end, 4); buf_end = mp_encode_str(buf_end, "aa", 2); buf_end = mp_encode_uint(buf_end, 8); buf_end = mp_encode_nil(buf_end); buf_end = mp_encode_uint(buf_end, 6); box_tuple_t *tuple2 = box_tuple_new(format, buf, buf_end); /* Enocode key */ buf_end = buf; buf_end = mp_encode_array(buf_end, 2); buf_end = mp_encode_uint(buf_end, 6); buf_end = mp_encode_str(buf_end, "aa", 2); bool cmp1 = box_tuple_compare(tuple1, tuple2, key_defs[0]) > 0; bool cmp2 = box_tuple_compare(tuple1, tuple2, key_defs[1]) < 0; bool cmp3 = box_tuple_compare_with_key(tuple1, buf, key_defs[0]) > 0; bool cmp4 = box_tuple_compare_with_key(tuple2, buf, key_defs[0]) == 0; box_tuple_unref(tuple1); lua_pushboolean(L, cmp1 && cmp2 && cmp3 && cmp4); box_tuple_format_unref(format); box_key_def_delete(key_defs[0]); box_key_def_delete(key_defs[1]); return 1; } static int check_error(lua_State *L) { box_error_raise(ER_UNSUPPORTED, "test for luaT_error"); luaT_error(L); return 1; } static int test_call(lua_State *L) { assert(luaL_loadbuffer(L, "", 0, "=eval") == 0); assert(luaT_call(L, 0, LUA_MULTRET) == 0); lua_pushboolean(L, true); return 1; } static int cpcall_handler(lua_State *L) { return 0; } static int test_cpcall(lua_State *L) { assert(luaT_cpcall(L, cpcall_handler, 0) == 0); (void)cpcall_handler; lua_pushboolean(L, true); return 1; } static int test_state(lua_State *L) { lua_State *tarantool_L = luaT_state(); assert(lua_newthread(tarantool_L) != 0); (void)tarantool_L; lua_pushboolean(L, true); return 1; } LUA_API int luaopen_module_api(lua_State *L) { (void) consts; static const struct luaL_Reg lib[] = { {"test_say", test_say }, {"test_coio_call", test_coio_call }, {"test_coio_getaddrinfo", test_coio_getaddrinfo }, {"test_pushcheck_cdata", test_pushcheck_cdata }, {"test_pushuint64", test_pushuint64 }, {"test_pushint64", test_pushint64 }, {"test_checkuint64", test_checkuint64 }, {"test_checkint64", test_checkint64 }, {"test_touint64", test_touint64 }, {"test_toint64", test_toint64 }, {"test_fiber", test_fiber }, {"test_cord", test_cord }, {"pushcdata", test_pushcdata }, {"checkcdata", test_checkcdata }, {"test_clock", test_clock }, {"test_pushtuple", test_pushtuple}, {"test_key_def_api", test_key_def_api}, {"check_error", check_error}, {"test_call", test_call}, {"test_cpcall", test_cpcall}, {"test_state", test_state}, {NULL, NULL} }; luaL_register(L, "module_api", lib); return 1; } tarantool_1.9.1.26.g63eb81e3c/test/app-tap/tap.test.lua0000775000000000000000000000665113306560010021001 0ustar rootroot#!/usr/bin/env tarantool -- -- Test suite for The Test Anything Protocol module implemented -- using module itself. -- -- Load 'tap' module local tap = require "tap" -- -- Create a root test -- test = tap.test("root test") -- Disable stack traces for this test because Tarantool test system also -- checks test output. test.trace = false -- -- ok, fail and skip predicates -- test:plan(32) -- plan to run 3 test test:ok(true, 'true') -- basic function local extra = { state = 'some userful information to debug on failure', details = 'a table argument formatted using yaml.encode()' } test:ok(true, "extra information is not printed on success", extra) test:ok(false, "extra printed using yaml only on failure", extra) test:fail('failed') -- always fail the test test:skip('test marked as ok and skipped') -- -- is and isnt predicates -- test:is(tonumber("48"), 48, "tonumber(48) is 48") test:isnt(0xff, 64, "0xff is not 64") test:isnt(1, 1, "1 is not 1") -- -- type predicates -- test:isnil(nil, 'nil is nil') test:isnil(48, '48 is nil') test:isnumber(10, '10 is a number') test:isnumber(0, '0 is also a number') test:isstring("blabla", '"blabla" is string') test:isstring(48, '48 is string') test:isstring(nil, 'nil is string') test:isboolean(true, 'true is boolean') test:isboolean(1, '1 is boolean') test:istable({}, '{} is a table') local udata = require('fiber').self() test:isudata(nil, 'fiber', 'udata') test:isudata(udata, 'some utype', 'udata') test:isudata(udata, 'fiber', 'udata') local ffi = require('ffi') test:iscdata('xx', 'int', 'cdata type') test:iscdata(10, 'int', 'cdata type') test:iscdata(ffi.new('int', 10), 'int', 'cdata type') test:iscdata(ffi.new('unsigned int', 10), 'int', 'cdata type') -- -- Any test also can create unlimited number of sub tests. -- Subtest with callbacks (preferred). -- test:test("subtest 1", function(t) t:plan(2) t:ok(true, 'true') t:ok(true, 'true') -- test:check() is called automatically end) -- each subtest is counted in parent -- -- Subtest without callbacks. -- sub2 = test:test("subtest 2") sub2:plan(1) sub2:ok(true, 'true in subtest') sub2:diag('hello from subtest') sub2:check() -- please call check() explicitly -- -- Multisubtest -- test:test("1 level", function(t) t:plan(1) t:test("2 level", function(t) t:plan(1) t:test("3 level", function(t) t:plan(1) t:test("4 level", function(t) t:plan(1) t:test("5 level", function(t) t:plan(1) t:ok(true, 'ok') end) end) end) end) end) --- --- Subtest with bad plan() --- test:test("bad plan", function(t) t:plan(3) t:ok(true, 'true') end) test:test("failed subtest", function(t) t:plan(1) t:fail("failed subtest") end) test:test('is_deeply', function(t) t:plan(6) t:is_deeply(1, 1, '1 and 1') t:is_deeply('abc', 'abc', 'abc and abc') t:is_deeply({}, {}, 'empty tables') t:is_deeply({1}, {1}, '{1} and {1}') t:is_deeply({1}, {2}, '{1} and {2}') t:is_deeply({1, 2, { 3, 4 }}, {1, 2, { 3, 5 }}, '{1,2,{3,4}} and {1,2,{3,5}}') end) test:test('like', function(t) t:plan(2) t:like('abcde', 'cd', 'like(abcde, cd)') t:unlike('abcde', 'acd', 'unlike(abcde, acd)') end) -- -- Finish root test. Since we used non-callback variant, we have to -- call check explicitly. -- test:check() -- call check() explicitly os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/app-tap/csv.test.lua0000775000000000000000000001020013306560010020771 0ustar rootroot#!/usr/bin/env tarantool local function table2str(t) local res = "" for k, line in pairs(t) do local s = "" for k2, field in pairs(line) do s = s .. '|' .. field .. '|\t' end res = res .. s .. '\n' end return res end local function myread(self, bytes) self.i = self.i + bytes return self.v:sub(self.i - bytes + 1, self.i) end local csv = require('csv') local fio = require('fio') local tap = require('tap') local test1_ans = '|a|\t|b|\t\n|1|\t|ha\n"ha"\nha|\t\n|3|\t|4|\t\n' local test2_ans = '||\t||\t||\t\n||\t||\t\n||\t\n' local test3_ans = '||\t||\t\n|kp"v|\t\n' local test4_ans = '|123|\t|5|\t|92|\t|0|\t|0|\t\n|1|\t|12 34|\t|56|\t' .. '|quote , |\t|66|\t\n|ok|\t\n' local test5_ans = "|1|\t\n|23|\t|456|\t|abcac|\t|'multiword field 4'|\t\n" .. "|none|\t|none|\t|0|\t\n||\t||\t||\t\n|aba|\t|adda|\t|f" .. "3|\t|0|\t\n|local res = internal.pwrite(self.fh|\t|dat" .. "a|\t|len|\t|offset)|\t\n|iflag = bit.bor(iflag|\t|fio." .. "c.flag[ flag ])|\t\n||\t||\t||\t\n" local test6_ans = "|23|\t|456|\t|abcac|\t|'multiword field 4'|\t\n|none|" .. "\t|none|\t|0|\t\n||\t||\t||\t\n|aba|\t|adda|\t|f3|\t|" .. "0|\t\n|local res = internal.pwrite(self.fh|\t|data|\t" .. "|len|\t|offset)|\t\n|iflag = bit.bor(iflag|\t|fio.c.f" .. "lag[ flag ])|\t\n||\t||\t||\t\n" test = tap.test("csv") test:plan(11) readable = {} readable.read = myread readable.v = "a,b\n1,\"ha\n\"\"ha\"\"\nha\"\n3,4\n" readable.i = 0 test:is(table2str(csv.load(readable)), test1_ans, "obj test1") readable.v = ", ,\n , \n\n" readable.i = 0 test:is(table2str(csv.load(readable, {chunk_size = 1} )), test2_ans, "obj test2") readable.v = ", \r\nkp\"\"v" readable.i = 0 test:is(table2str(csv.load(readable, {chunk_size = 3})), test3_ans, "obj test3") tmpdir = fio.tempdir() file1 = fio.pathjoin(tmpdir, 'file.1') file2 = fio.pathjoin(tmpdir, 'file.2') file3 = fio.pathjoin(tmpdir, 'file.3') file4 = fio.pathjoin(tmpdir, 'file.4') local f = fio.open(file1, { 'O_WRONLY', 'O_TRUNC', 'O_CREAT' }, 0777) f:write("123 , 5 , 92 , 0, 0\n" .. "1, 12 34, 56, \"quote , \", 66\nok") f:close() f = fio.open(file1, {'O_RDONLY'}) test:is(table2str(csv.load(f, {chunk_size = 10})), test4_ans, "fio test1") f:close() f = fio.open(file2, { 'O_WRONLY', 'O_TRUNC', 'O_CREAT' }, 0777) f:write("1\n23,456,abcac,\'multiword field 4\'\n" .. "none,none,0\n" .. ",,\n" .. "aba,adda,f3,0\n" .. "local res = internal.pwrite(self.fh, data, len, offset)\n" .. "iflag = bit.bor(iflag, fio.c.flag[ flag ])\n" .. ",," ) f:close() f = fio.open(file2, {'O_RDONLY'}) --symbol by symbol reading test:is(table2str(csv.load(f, {chunk_size = 1})), test5_ans, "fio test2") f:close() f = fio.open(file2, {'O_RDONLY'}) opts = {chunk_size = 7, skip_head_lines = 1} --7 symbols per chunk test:is(table2str(csv.load(f, opts)), test6_ans, "fio test3") f:close() t = { {'quote" d', ',and, comma', 'both " of " t,h,e,m'}, {'"""', ',","'}, {'mul\nti\nli\r\nne\n\n', 'field'}, {""}, {'"'}, {"\n"} } f = require("fio").open(file3, { "O_WRONLY", "O_TRUNC" , "O_CREAT"}, 0x1FF) csv.dump(t, {}, f) f:close() f = fio.open(file3, {'O_RDONLY'}) t2 = csv.load(f, {chunk_size = 5}) f:close() test:is(table2str(t), table2str(t2), "test roundtrip") test:is(table2str(t), table2str(csv.load(csv.dump(t))), "test load(dump(t))") test:is(table2str(csv.load('a,b,c,')), '|a|\t|b|\t|c|\t||\t\n', "final comma") local str = "ячсмитьб-Pincall;79031111111\r\n" str = str .. str .. str .. str .. str .. str str = "Vendor;Prefix\r\n" .. str f = fio.open(file4, { "O_WRONLY", "O_TRUNC" , "O_CREAT"}, 0x1FF) f:write(str) f:close() test:is(#csv.load(fio.open(file4, {'O_RDONLY'}), {separator = ';', chunk_size = 3}), 7, "gh-1210 (1)") test:is(#csv.load(fio.open(file4, {'O_RDONLY'}), {separator = ';', chunk_size = 4}), 7, "gh-1210 (2)") fio.unlink(file1) fio.unlink(file2) fio.unlink(file3) fio.unlink(file4) fio.rmdir(tmpdir) test:check() tarantool_1.9.1.26.g63eb81e3c/test/app-tap/logger.test.lua0000775000000000000000000000670613306560010021475 0ustar rootroot#!/usr/bin/env tarantool local test = require('tap').test('log') test:plan(24) -- -- Check that Tarantool creates ADMIN session for #! script -- local filename = "1.log" local message = "Hello, World!" box.cfg{ log=filename, memtx_memory=107374182, } local log = require('log') local fio = require('fio') local json = require('json') local fiber = require('fiber') local file = io.open(filename) while file:read() do end log.info(message) local line = file:read() test:is(line:sub(-message:len()), message, "message") s, err = pcall(json.decode, line) test:ok(not s, "plain") -- -- gh-700: Crash on calling log.info() with formatting characters -- log.info("gh-700: %%s %%f %%d") test:is(file:read():match('I>%s+(.*)'), "gh-700: %%s %%f %%d", "formatting") log.info("gh-2340: %s %D") test:is(file:read():match('I>%s+(.*)'), "gh-2340: %s %D", "formatting without arguments") log.info({key="value"}) test:is(file:read():match('I>%s+(.*)'), '{"key":"value"}', "table is handled as json") -- --gh-2923 dropping message field -- log.info({message="value"}) test:is(file:read():match('I>%s+(.*)'), '{"message":"value"}', "table is handled as json") function help() log.info("gh-2340: %s %s", 'help') end xpcall(help, function(err) test:ok(err:match("bad argument #3"), "found error string") test:ok(err:match("logger.test.lua:"), "found error place") end) file:close() test:ok(log.pid() >= 0, "pid()") -- logger uses 'debug', try to set it to nil debug = nil log.info("debug is nil") debug = require('debug') test:ok(log.info(true) == nil, 'check tarantool crash (gh-2516)') s, err = pcall(box.cfg, {log_format='json', log="syslog:identity:tarantool"}) test:ok(not s, "check json not in syslog") box.cfg{log=filename, memtx_memory=107374182, log_format = "json"} local file = io.open(filename) while file:read() do end log.error("error") local line = file:read() message = json.decode(line) test:is(type(message), 'table', "json valid in log.error") test:is(message.level, "ERROR", "check type error") test:is(message.message, "error", "check error message") log.info({key="value", level=48}) local line = file:read() message = json.decode(line) test:is(type(message), 'table', "json valid in log.info") test:is(message.level, "INFO", "check type info") test:is(message.message, nil, "check message is nil") test:is(message.key, "value", "custom table encoded") log.info('this is "') local line = file:read() message = json.decode(line) test:is(message.message, "this is \"", "check message with escaped character") -- gh-3248 trash in log file with logging large objects log.info(string.rep('a', 32000)) line = file:read() test:ok(line:len() < 20000, "big line truncated") log.info("json") local line = file:read() message = json.decode(line) test:is(message.message, "json", "check message with internal key word") log.log_format("plain") log.info("hello") line = file:read() test:ok(not line:match("{"), "log change format") s, e = pcall(log.log_format, "non_format") test:ok(not s, "bad format") file:close() log.log_format("json") fio.rename(filename, filename .. "2") log.rotate() file = fio.open(filename) while file == nil do file = fio.open(filename) fiber.sleep(0.0001) end line = file:read() while line == nil or line == "" do line = file:read() fiber.sleep(0.0001) end index = line:find('\n') line = line:sub(1, index) message = json.decode(line) test:is(message.message, "log file has been reopened", "check message after log.rotate()") file:close() test:check() os.exit() tarantool_1.9.1.26.g63eb81e3c/test/luajit-tap/0000775000000000000000000000000013306560010017231 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/luajit-tap/suite.ini0000664000000000000000000000010413306560010021056 0ustar rootroot[default] core = app description = Luajit tests is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/luajit-tap/gh.test.lua0000775000000000000000000000064013306560010021313 0ustar rootroot#!/usr/bin/env tarantool -- Miscellaneous test for LuaJIT bugs tap = require('tap') test = tap.test("gh") test:plan(2) -- -- gh-3196: incorrect string length if Lua hash returns 0 -- h = "\x1F\x93\xE2\x1C\xCA\xDE\x28\x08\x26\x01\xED\x0A\x2F\xE4\x21\x02\x97\x77\xD9\x3E" test:is(h:len(), 20) h = "\x0F\x93\xE2\x1C\xCA\xDE\x28\x08\x26\x01\xED\x0A\x2F\xE4\x21\x02\x97\x77\xD9\x3E" test:is(h:len(), 20) test:check() tarantool_1.9.1.26.g63eb81e3c/test/replication/0000775000000000000000000000000013306565107017504 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/prune.test.lua0000664000000000000000000000506013306560010022303 0ustar rootrootprint '-------------------------------------------------------------' print 'gh-806: cant prune old replicas by deleting their server ids' print '-------------------------------------------------------------' env = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') replica_set = require('fast_replica') fiber = require('fiber') test_run:cleanup_cluster() box.space._cluster:len() == 1 box.schema.user.grant('guest', 'read,write,execute', 'universe') -- Create space and fill it space = box.schema.create_space('test', {engine = engine}) index = box.space.test:create_index('primary') for i=1,10 do space:insert{i, 'test'} end -- create max number of replicas and check replica_set.join(test_run, box.schema.REPLICA_MAX - 2) while box.space._cluster:len() ~= box.schema.REPLICA_MAX - 1 do fiber.sleep(0.001) end box.space._cluster:len() == box.schema.REPLICA_MAX - 1 -- try to add one more replica uuid = require('uuid') box.space._cluster:insert{box.schema.REPLICA_MAX, uuid.str()} -- Delete all replication nodes replica_set.drop_all(test_run) box.space._cluster:len() == 1 -- Save a snapshot without removed replicas in vclock box.snapshot() -- Master is not crashed then recovering xlog with {replica_id: 0} in header test_run:cmd('restart server default') replica_set = require('fast_replica') fiber = require('fiber') -- Rejoin replica and check replica_set.join(test_run, 1) while box.space._cluster:len() ~= 2 do fiber.sleep(0.001) end -- Check server ids test_run:cmd('eval replica1 "return box.info.id"') box.space._cluster:len() == 2 -- Cleanup replica_set.drop_all(test_run) box.space._cluster:len() == 1 -- delete replica from master replica_set.join(test_run, 1) while box.space._cluster:len() ~= 2 do fiber.sleep(0.001) end -- Check server ids test_run:cmd('eval replica1 "return box.info.id"') box.space._cluster:len() == 2 replica_set.unregister(test_run, 2) while test_run:cmd('eval replica1 "box.info.replication[1].upstream.status"')[1] ~= 'stopped' do fiber.sleep(0.001) end test_run:cmd('eval replica1 "box.info.replication[1].upstream.message"') -- restart replica and check that replica isn't able to join to cluster test_run:cmd('restart server replica1') test_run:cmd('switch default') box.space._cluster:len() == 1 test_run:cmd('eval replica1 "box.info.replication[1].upstream.status"') test_run:cmd('eval replica1 "box.info.replication[1].upstream.message"')[1]:match("is not registered with replica set") ~= nil replica_set.delete(test_run, 2) box.space.test:drop() box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/replication/replica_timeout.lua0000664000000000000000000000051513306560010023361 0ustar rootroot#!/usr/bin/env tarantool local TIMEOUT = tonumber(arg[1]) box.cfg({ listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), memtx_memory = 107374182, replication_timeout = TIMEOUT, replication_connect_timeout = TIMEOUT * 3, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication/ddl.lua0000664000000000000000000000214213306560010020735 0ustar rootroot#!/usr/bin/env tarantool -- get instance name from filename (autobootstrap1.lua => autobootstrap1) local INSTANCE_ID = string.match(arg[0], "%d") local USER = 'cluster' local PASSWORD = 'somepassword' local SOCKET_DIR = require('fio').cwd() local function instance_uri(instance_id) --return 'localhost:'..(3310 + instance_id) return SOCKET_DIR..'/autobootstrap'..instance_id..'.sock'; end -- start console first require('console').listen(os.getenv('ADMIN')) box.cfg({ listen = instance_uri(INSTANCE_ID); -- log_level = 7; replication = { USER..':'..PASSWORD..'@'..instance_uri(1); USER..':'..PASSWORD..'@'..instance_uri(2); USER..':'..PASSWORD..'@'..instance_uri(3); USER..':'..PASSWORD..'@'..instance_uri(4); }; replication_connect_timeout = 0.5, }) box.once("bootstrap", function() local test_run = require('test_run').new() box.schema.user.create(USER, { password = PASSWORD }) box.schema.user.grant(USER, 'replication') box.schema.space.create('test', {engine = test_run:get_cfg('engine')}) box.space.test:create_index('primary') end) tarantool_1.9.1.26.g63eb81e3c/test/replication/join_vclock.result0000664000000000000000000000251513306560010023233 0ustar rootrootfiber = require('fiber') --- ... env = require('test_run') --- ... replica_set = require('fast_replica') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... errinj = box.error.injection --- ... errinj.set("ERRINJ_RELAY_FINAL_SLEEP", true) --- - ok ... box.schema.user.grant('guest', 'replication') --- ... s = box.schema.space.create('test', {engine = engine}); --- ... index = s:create_index('primary') --- ... ch = fiber.channel(1) --- ... done = false --- ... function repl_f() local i = 0 while not done do s:replace({i, i}) fiber.sleep(0.001) i = i + 1 end ch:put(true) end --- ... _ = fiber.create(repl_f) --- ... replica_set.join(test_run, 1) --- ... test_run:cmd("switch replica1") --- - true ... test_run:cmd("switch default") --- - true ... done = true --- ... ch:get() --- - true ... errinj.set("ERRINJ_RELAY_FINAL_SLEEP", false) --- - ok ... test_run:cmd("switch replica1") --- - true ... cnt = box.space.test.index[0]:count() --- ... box.space.test.index.primary:max()[1] == cnt - 1 --- - true ... test_run:cmd("switch default") --- - true ... replica_set.drop_all(test_run) --- ... box.space.test:drop() --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... box.schema.user.revoke('guest', 'replication') --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/replicaset_ro_mostly.result0000664000000000000000000000267213306560010025201 0ustar rootroot-- gh-3257 check bootstrap with read-only replica in cluster. -- Old behaviour: failed, since read-only is chosen by uuid. test_run = require('test_run').new() --- ... SERVERS = {'replica_uuid_ro1', 'replica_uuid_ro2'} --- ... uuid = require('uuid') --- ... uuid1 = uuid.new() --- ... uuid2 = uuid.new() --- ... function sort_cmp(a, b) return a.time_low > b.time_low and true or false end --- ... function sort(t) table.sort(t, sort_cmp) return t end --- ... UUID = sort({uuid1, uuid2}, sort_cmp) --- ... create_cluster_cmd1 = 'create server %s with script="replication/%s.lua"' --- ... create_cluster_cmd2 = 'start server %s with args="%s", wait_load=False, wait=False' --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function create_cluster_uuid(servers, uuids) for i, name in ipairs(servers) do test_run:cmd(create_cluster_cmd1:format(name, name)) test_run:cmd(create_cluster_cmd2:format(name, uuids[i])) end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- Deploy a cluster. create_cluster_uuid(SERVERS, UUID) --- ... test_run:wait_fullmesh(SERVERS) --- ... -- Add third replica name = 'replica_uuid_ro3' --- ... test_run:cmd(create_cluster_cmd1:format(name, name)) --- - true ... test_run:cmd(create_cluster_cmd2:format(name, uuid.new())) --- - true ... test_run:cmd('switch replica_uuid_ro3') --- - true ... test_run:cmd('switch default') --- - true ... -- Cleanup. test_run:drop_cluster(SERVERS) --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap_guest1.lua0000777000000000000000000000000013306560010031204 2autobootstrap_guest.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/suite.ini0000664000000000000000000000053513306560010021325 0ustar rootroot[default] core = tarantool script = master.lua description = tarantool/box, replication disabled = consistent.test.lua release_disabled = catch.test.lua errinj.test.lua gc.test.lua before_replace.test.lua quorum.test.lua recover_missing_xlog.test.lua config = suite.cfg lua_libs = lua/fast_replica.lua long_run = prune.test.lua is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/replication/errinj.test.lua0000664000000000000000000001635613306560010022455 0ustar rootrootenv = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') box.schema.user.grant('guest', 'read,write,execute', 'universe') errinj = box.error.injection box.schema.user.grant('guest', 'replication') s = box.schema.space.create('test', {engine = engine}); index = s:create_index('primary') test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") test_run:cmd("start server replica") test_run:cmd("switch replica") fiber = require('fiber') s = box.space.test test_run:cmd("setopt delimiter ';'") -- vinyl does not support index.len() so we use index.count() instead function wait_repl(cnt) for i = 1, 20 do if s.index[0]:count() >= cnt then return true end fiber.sleep(0.01) end return false end; test_run:cmd("setopt delimiter ''"); test_run:cmd("switch default") d = require('digest') test_run:cmd("setopt delimiter ';'") function test_f(st, tx) if tx then box.begin() end for i = st, st + 9 do local _ = s:insert({i, d.urandom(8192)}) end if tx then box.commit() end end; test_run:cmd("setopt delimiter ''"); test_f(1) errinj.set("ERRINJ_WAL_WRITE_PARTIAL", 16384) test_f(11, true) errinj.set("ERRINJ_WAL_WRITE_PARTIAL", -1) test_f(11, true) test_f(21, true) test_run:cmd("switch replica") wait_repl(30) test_run:cmd("switch default") box.space.test.index[0]:count() errinj.set("ERRINJ_WAL_WRITE_DISK", true) test_f(31, true) errinj.set("ERRINJ_WAL_WRITE_DISK", false) test_f(31, true) test_f(41, true) test_run:cmd("switch replica") wait_repl(50) test_run:cmd("switch default") box.space.test.index[0]:count() -- Check that master doesn't stall on WALs without EOF (gh-2294). errinj.set("ERRINJ_WAL_WRITE_EOF", true) box.snapshot() test_f(51, true) test_run:cmd("switch replica") wait_repl(60) test_run:cmd("switch default") errinj.set("ERRINJ_WAL_WRITE_EOF", false) box.snapshot() -- Check that replication doesn't freeze if slave bumps LSN -- while master is down (gh-3038). To do this, -- 1. Stop replication by injecting an error on the slave. -- 2. Bump LSN on the slave while replication is inactive. -- 3. Restore replication. -- 4. Generate some records on the master. -- 5. Make sure they'll make it to the slave. test_run:cmd("switch replica") box.error.injection.set("ERRINJ_WAL_WRITE", true) test_run:cmd("switch default") s:replace{9000, "won't make it"} test_run:cmd("switch replica") while box.info.replication[1].upstream.status == 'follow' do fiber.sleep(0.0001) end box.error.injection.set("ERRINJ_WAL_WRITE", false) s:replace{9001, "bump lsn"} box.cfg{replication={}} box.cfg{replication = os.getenv('MASTER')} test_run:cmd("switch default") test_f(61, true) test_run:cmd("switch replica") wait_repl(70) test_run:cmd("switch default") test_run:cmd("stop server replica") test_run:cmd("cleanup server replica") -- Set minuscule timeout to make replication stop -- immediately after join. box.cfg{replication_timeout = 0.0001} test_run:cmd("start server replica") test_run:cmd("switch replica") fiber = require'fiber' while box.info.replication[1].upstream.message ~= 'timed out' do fiber.sleep(0.0001) end test_run:cmd("switch default") -- Disable heartbeat messages on the master so as not -- to trigger acks on the replica. errinj.set("ERRINJ_RELAY_REPORT_INTERVAL", 5) box.cfg{replication_timeout = 0.05} test_run:cmd("switch replica") -- wait for reconnect while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end box.info.replication[1].upstream.status box.info.replication[1].upstream.lag > 0 box.info.replication[1].upstream.lag < 1 -- wait for ack timeout while box.info.replication[1].upstream.message ~= 'timed out' do fiber.sleep(0.0001) end test_run:cmd("switch default") errinj.set("ERRINJ_RELAY_REPORT_INTERVAL", 0) box.cfg{replication_timeout = 5} test_run:cmd("switch replica") -- wait for reconnect while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end -- wait for ack timeout again, should be ok fiber.sleep(0.01) {box.info.replication[1].upstream.status, box.info.replication[1].upstream.message} test_run:cmd("switch default") test_run:cmd("stop server replica") test_run:cmd("cleanup server replica") errinj = box.error.injection errinj.set("ERRINJ_RELAY_EXIT_DELAY", 0.01) test_run:cmd("start server replica") test_run:cmd("switch replica") fiber = require('fiber') old_repl = box.cfg.replication -- shutdown applier box.cfg{replication = {}, replication_timeout = 0.1} while box.info.replication[1].upstream ~= nil do fiber.sleep(0.0001) end -- reconnect box.cfg{replication = {old_repl}} while box.info.replication[1].upstream.status ~= 'disconnected' do fiber.sleep(0.0001) end while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end test_run:cmd("switch default") test_run:cmd("stop server replica") test_run:cmd("cleanup server replica") errinj.set("ERRINJ_RELAY_EXIT_DELAY", 0) box.cfg{replication_timeout = 0.01} test_run:cmd("create server replica_timeout with rpl_master=default, script='replication/replica_timeout.lua'") test_run:cmd("start server replica_timeout with args='0.01'") test_run:cmd("switch replica_timeout") fiber = require('fiber') while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end box.info.replication[1].upstream.status test_run:cmd("switch default") errinj.set("ERRINJ_RELAY_REPORT_INTERVAL", 5) test_run:cmd("switch replica_timeout") -- Check replica's disconnection on timeout (gh-3025). -- If master stops send heartbeat messages to replica, -- due to infinite read timeout connection never breaks, -- replica shows state 'follow' so old behaviour hangs -- here in infinite loop. while box.info.replication[1].upstream.message ~= 'timed out' do fiber.sleep(0.0001) end test_run:cmd("switch default") test_run:cmd("stop server replica_timeout") test_run:cmd("cleanup server replica_timeout") errinj.set("ERRINJ_RELAY_REPORT_INTERVAL", 0) -- Check replica's ACKs don't prevent the master from sending -- heartbeat messages (gh-3160). test_run:cmd("start server replica_timeout with args='0.009'") test_run:cmd("switch replica_timeout") fiber = require('fiber') while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end box.info.replication[1].upstream.status -- follow for i = 0, 15 do fiber.sleep(0.01) if box.info.replication[1].upstream.status ~= 'follow' then break end end box.info.replication[1].upstream.status -- follow test_run:cmd("switch default") test_run:cmd("stop server replica_timeout") test_run:cmd("cleanup server replica_timeout") box.snapshot() for i = 0, 9999 do box.space.test:replace({i, 4, 5, 'test'}) end -- Check that replication_timeout is not taken into account -- during the join stage, i.e. a replica with a minuscule -- timeout successfully bootstraps and breaks connection only -- after subscribe. test_run:cmd("start server replica_timeout with args='0.00001'") test_run:cmd("switch replica_timeout") fiber = require('fiber') while box.info.replication[1].upstream.message ~= 'timed out' do fiber.sleep(0.0001) end test_run:cmd("stop server default") test_run:cmd("deploy server default") test_run:cmd("start server default") test_run:cmd("switch default") test_run:cmd("stop server replica_timeout") test_run:cmd("cleanup server replica_timeout") tarantool_1.9.1.26.g63eb81e3c/test/replication/rebootstrap.lua0000664000000000000000000000131213306560010022534 0ustar rootroot#!/usr/bin/env tarantool -- get instance name from filename (quorum1.lua => quorum1) local INSTANCE_ID = string.match(arg[0], "%d") local SOCKET_DIR = require('fio').cwd() local function instance_uri(instance_id) return SOCKET_DIR..'/rebootstrap'..instance_id..'.sock'; end -- start console first require('console').listen(os.getenv('ADMIN')) box.cfg({ listen = instance_uri(INSTANCE_ID), instance_uuid = '12345678-abcd-1234-abcd-123456789ef' .. INSTANCE_ID, replication_timeout = 0.1, replication_connect_timeout = 0.5, replication = { instance_uri(1); instance_uri(2); }; }) box.once("bootstrap", function() box.schema.user.grant('guest', 'replication') end) tarantool_1.9.1.26.g63eb81e3c/test/replication/on_replace1.lua0000777000000000000000000000000013306560010025170 2on_replace.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/consistent.test.lua0000664000000000000000000001015313306560010023342 0ustar rootrootenv = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') --# create server replica with rpl_master=default, script='replication/replica.lua' --# start server replica --# set connection default box.schema.user.grant('guest', 'replication') -- Wait until the grant reaches the replica --# set connection replica while box.space['_priv']:len() < 1 do box.fiber.sleep(0.01) end --# setopt delimiter ';' --# set connection default, replica do begin_lsn = -1 function _set_pri_lsn(_lsn) begin_lsn = _lsn end function _print_lsn() return (box.info.lsn - begin_lsn + 1) end function _insert(_begin, _end, msg) local a = {} for i = _begin, _end do table.insert(a, box.space[0]:insert{i, msg..' - '..i}) end return unpack(a) end function _select(_begin, _end) local a = {} while box.info.lsn < begin_lsn + _end + 2 do box.fiber.sleep(0.001) end for i = _begin, _end do table.insert(a, box.space[0]:get{i}) end return unpack(a) end end; --# setopt delimiter '' --# set connection default --# set variable replica_port to 'replica.listen' -- set begin lsn on master and replica. begin_lsn = box.info.lsn a = box.net.box.new('127.0.0.1', replica_port) a:call('_set_pri_lsn', box.info.lsn) a:close() s = box.schema.space.create('tweedledum', {id = 0, engine = engine}); -- vinyl does not support hash index index = s:create_index('primary', {type = (engine == 'vinyl' and 'tree' or 'hash') }) _insert(1, 10, 'master') _select(1, 10) --# set connection replica _select(1, 10) --# set connection default -- Master LSN: _print_lsn() --# set connection replica -- Replica LSN: _print_lsn() ----------------------------- -- Master LSN > Replica LSN ----------------------------- -------------------- -- Replica to Master -------------------- old_replication = box.cfg.replication box.cfg{replication=""} --# set connection default _insert(11, 20, 'master') _select(11, 20) --# set connection replica _insert (11, 15, 'replica') _select (11, 15) --# set connection default -- Master LSN: _print_lsn() --# set connection replica -- Replica LSN: _print_lsn() ------------------- -- rollback Replica ------------------- box.cfg{replication=old_replication} _select(11, 20) --# set connection default -- Master LSN: _print_lsn() --# set connection replica -- Replica LSN: _print_lsn() ------------------------------ -- Master LSN == Replica LSN ------------------------------ -------------------- -- Replica to Master -------------------- box.cfg{replication=""} --# set connection default _insert(21, 30, 'master') _select(21, 30) --# set connection replica _insert(21, 30, 'replica') _select(21, 30) --# set connection default -- Master LSN: _print_lsn() --# set connection replica -- Replica LSN: _print_lsn() ------------------- -- rollback Replica ------------------- box.cfg{replication=old_replication} _select(21, 30) --# set connection default -- Master LSN: _print_lsn() --# set connection replica -- Replica LSN: _print_lsn() ----------------------------- -- Master LSN < Replica LSN ----------------------------- -------------------- -- Replica to Master -------------------- box.cfg{replication=""} --# set connection default _insert(31, 40, 'master') _select(31, 40) --# set connection replica _insert(31, 50, 'replica') _select(31, 50) --# set connection default -- Master LSN: _print_lsn() --# set connection replica -- Replica LSN: _print_lsn() ------------------- -- rollback Replica ------------------- box.cfg{replication=old_replication} _select(31, 50) --# set connection default _insert(41, 60, 'master') --# set connection replica _select(41, 60) --# set connection default -- Master LSN: _print_lsn() --# set connection replica -- Replica LSN: _print_lsn() -- Test that a replica replies with master connection URL on update request --# push filter '127.0.0.1:.*' to '127.0.0.1:' box.space[0]:insert{0, 'replica is RO'} --# clear filter --# stop server replica --# cleanup server replica --# set connection default box.space[0]:drop() box.schema.user.revoke('guest', 'replication') tarantool_1.9.1.26.g63eb81e3c/test/replication/ddl1.lua0000777000000000000000000000000013306560010022260 2ddl.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/replica_uuid_ro3.lua0000777000000000000000000000000013306560010027266 2replica_uuid_ro.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap.result0000664000000000000000000000700613306560010023641 0ustar rootrootenv = require('test_run') --- ... vclock_diff = require('fast_replica').vclock_diff --- ... test_run = env.new() --- ... SERVERS = { 'autobootstrap1', 'autobootstrap2', 'autobootstrap3' } --- ... -- -- Start servers -- test_run:create_cluster(SERVERS) --- ... -- -- Wait for full mesh -- test_run:wait_fullmesh(SERVERS) --- ... -- -- Check vclock -- vclock1 = test_run:get_vclock('autobootstrap1') --- ... vclock_diff(vclock1, test_run:get_vclock('autobootstrap2')) --- - 0 ... vclock_diff(vclock1, test_run:get_vclock('autobootstrap3')) --- - 0 ... -- -- Insert rows on each server -- _ = test_run:cmd("switch autobootstrap1") --- ... _ = box.space.test:insert({box.info.id}) --- ... _ = test_run:cmd("switch autobootstrap2") --- ... _ = box.space.test:insert({box.info.id}) --- ... _ = test_run:cmd("switch autobootstrap3") --- ... _ = box.space.test:insert({box.info.id}) --- ... _ = test_run:cmd("switch default") --- ... -- -- Synchronize -- vclock = test_run:get_cluster_vclock(SERVERS) --- ... vclock2 = test_run:wait_cluster_vclock(SERVERS, vclock) --- ... vclock_diff(vclock1, vclock2) --- - 3 ... -- -- Check result -- _ = test_run:cmd("switch autobootstrap1") --- ... box.space.test:select() --- - - [1] - [2] - [3] ... _ = test_run:cmd("switch autobootstrap2") --- ... box.space.test:select() --- - - [1] - [2] - [3] ... _ = test_run:cmd("switch autobootstrap3") --- ... box.space.test:select() --- - - [1] - [2] - [3] ... _ = test_run:cmd("switch default") --- ... _ = test_run:cmd("switch autobootstrap1") --- ... u1 = box.schema.user.create('test_u') --- ... box.schema.user.grant('test_u', 'read,write,create', 'universe') --- ... box.session.su('test_u') --- ... _ = box.schema.space.create('test_u'):create_index('pk') --- ... box.session.su('admin') --- ... _ = box.space.test_u:replace({1, 2, 3, 4}) --- ... box.space.test_u:select() --- - - [1, 2, 3, 4] ... -- Synchronize vclock = test_run:get_vclock('autobootstrap1') --- ... _ = test_run:wait_vclock("autobootstrap2", vclock) --- ... _ = test_run:wait_vclock("autobootstrap3", vclock) --- ... _ = test_run:cmd("switch autobootstrap2") --- ... box.space.test_u:select() --- - - [1, 2, 3, 4] ... _ = test_run:cmd("switch autobootstrap3") --- ... box.space.test_u:select() --- - - [1, 2, 3, 4] ... -- -- Rebootstrap one node and check that others follow. -- _ = test_run:cmd("switch autobootstrap1") --- ... _ = test_run:cmd("restart server autobootstrap1 with cleanup=1") _ = box.space.test_u:replace({5, 6, 7, 8}) --- ... box.space.test_u:select() --- - - [1, 2, 3, 4] - [5, 6, 7, 8] ... _ = test_run:cmd("switch default") --- ... test_run:wait_fullmesh(SERVERS) --- ... vclock = test_run:get_vclock("autobootstrap1") --- ... _ = test_run:wait_vclock("autobootstrap2", vclock) --- ... _ = test_run:wait_vclock("autobootstrap3", vclock) --- ... _ = test_run:cmd("switch autobootstrap2") --- ... box.space.test_u:select() --- - - [1, 2, 3, 4] - [5, 6, 7, 8] ... _ = test_run:cmd("switch autobootstrap3") --- ... box.space.test_u:select() --- - - [1, 2, 3, 4] - [5, 6, 7, 8] ... _ = test_run:cmd("switch default") --- ... _ = test_run:cmd("switch autobootstrap1") --- ... for i = 0, 99 do box.schema.space.create('space' .. tostring(i)):format({{'id', 'unsigned'}}) end --- ... _ = test_run:cmd("switch autobootstrap2") --- ... _ = test_run:cmd("switch autobootstrap3") --- ... _ = test_run:cmd("switch autobootstrap1") --- ... for i = 0, 99 do box.space['space' .. tostring(i)]:drop() end --- ... _ = test_run:cmd("switch default") --- ... -- -- Stop servers -- test_run:drop_cluster(SERVERS) --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/rebootstrap1.lua0000777000000000000000000000000013306560010025662 2rebootstrap.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/on_replace.result0000664000000000000000000000466413306560010023051 0ustar rootroot-- -- Check that replication applier invokes on_replace triggers -- env = require('test_run') --- ... test_run = env.new() --- ... fiber = require('fiber') --- ... _ = box.schema.space.create('test') --- ... _ = box.space.test:create_index('primary') --- ... box.schema.user.grant('guest', 'replication') --- ... test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:cmd("switch replica") --- - true ... session_type = nil --- ... -- -- gh-2642: box.session.type() in replication applier -- _ = box.space.test:on_replace(function() session_type = box.session.type() end) --- ... box.space.test:insert{1} --- - [1] ... -- -- console -- session_type --- - console ... test_run:cmd("switch default") --- - true ... box.space.test:insert{2} --- - [2] ... test_run:cmd("switch replica") --- - true ... while box.space.test:count() < 2 do fiber.sleep(0.01) end --- ... -- -- applier -- session_type --- - applier ... test_run:cmd("switch default") --- - true ... -- -- cleanup -- test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server replica") --- - true ... box.space.test:drop() --- ... box.schema.user.revoke('guest', 'replication') --- ... -- gh-2682 on_replace on slave server with data change SERVERS = { 'on_replace1', 'on_replace2' } --- ... test_run:create_cluster(SERVERS) --- ... test_run:wait_fullmesh(SERVERS) --- ... test_run:cmd('switch on_replace1') --- - true ... fiber = require'fiber' --- ... s1 = box.schema.space.create('s1') --- ... _ = s1:create_index('pk') --- ... s2 = box.schema.space.create('s2') --- ... _ = s2:create_index('pk') --- ... test_run:cmd('switch on_replace2') --- - true ... fiber = require'fiber' --- ... while box.space.s2 == nil do fiber.sleep(0.00001) end --- ... _ = box.space.s1:on_replace(function (old, new) box.space.s2:replace(new) end) --- ... test_run:cmd('switch on_replace1') --- - true ... box.space.s1:replace({1, 2, 3, 4}) --- - [1, 2, 3, 4] ... while #(box.space.s2:select()) == 0 do fiber.sleep(0.00001) end --- ... test_run:cmd('switch on_replace2') --- - true ... box.space.s1:select() --- - - [1, 2, 3, 4] ... box.space.s2:select() --- - - [1, 2, 3, 4] ... test_run:cmd('switch on_replace1') --- - true ... box.space.s1:select() --- - - [1, 2, 3, 4] ... box.space.s2:select() --- - - [1, 2, 3, 4] ... _ = test_run:cmd('switch default') --- ... test_run:drop_cluster(SERVERS) --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/replica.lua0000664000000000000000000000040013306560010021604 0ustar rootroot#!/usr/bin/env tarantool box.cfg({ listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), memtx_memory = 107374182, replication_connect_timeout = 0.5, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication/master_quorum.lua0000664000000000000000000000160613306560010023101 0ustar rootroot#!/usr/bin/env tarantool -- get instance name from filename (master_quorum1.lua => master_quorum1) local INSTANCE_ID = string.match(arg[0], "%d") local SOCKET_DIR = require('fio').cwd() local function instance_uri(instance_id) --return 'localhost:'..(3310 + instance_id) return SOCKET_DIR..'/master_quorum'..instance_id..'.sock'; end -- start console first require('console').listen(os.getenv('ADMIN')) box.cfg({ listen = instance_uri(INSTANCE_ID); -- log_level = 7; replication = { instance_uri(1); instance_uri(2); }; replication_connect_quorum = 0; replication_connect_timeout = 0.1; }) test_run = require('test_run').new() engine = test_run:get_cfg('engine') box.once("bootstrap", function() box.schema.user.grant("guest", 'replication') box.schema.space.create('test', {engine = engine}) box.space.test:create_index('primary') end) tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap1.lua0000777000000000000000000000000013306560010026566 2autobootstrap.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/once.test.lua0000664000000000000000000000131413306560010022074 0ustar rootrootfiber = require('fiber') box.once() box.once("key") box.once("key", "key") box.once("key", nil) box.once("key", function() end) once = nil function f(arg) if once ~= nil then once = once + arg else once = arg end end box.once("test", f, 1) once box.once("test", f, 1) once -- Check that box.once() does not fail if the instance is read-only, -- instead it waits until the instance enters read-write mode. once = nil box.cfg{read_only = true} ch = fiber.channel(1) _ = fiber.create(function() box.once("ro", f, 1) ch:put(true) end) fiber.sleep(0.001) once -- nil box.cfg{read_only = false} ch:get() once -- 1 box.cfg{read_only = true} box.once("ro", f, 1) -- ok, already done once -- 1 box.cfg{read_only = false} tarantool_1.9.1.26.g63eb81e3c/test/replication/rebootstrap.result0000664000000000000000000000144013306560010023273 0ustar rootroottest_run = require('test_run').new() --- ... SERVERS = {'rebootstrap1', 'rebootstrap2'} --- ... test_run:create_cluster(SERVERS) --- ... test_run:wait_fullmesh(SERVERS) --- ... -- -- gh-3422: If quorum can't be formed, because some replicas are -- re-bootstrapping, box.cfg{} must wait for bootstrap to complete -- instead of stopping synchronization and leaving the instance -- in 'orphan' mode. -- test_run:cmd('stop server rebootstrap1') --- - true ... test_run:cmd('restart server rebootstrap2 with cleanup=True, wait=False, wait_load=False') --- - true ... test_run:cmd('start server rebootstrap1') --- - true ... test_run:cmd('switch rebootstrap1') --- - true ... box.info.status -- running --- - running ... test_run:cmd('switch default') --- - true ... test_run:drop_cluster(SERVERS) --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/recover_missing_xlog.test.lua0000664000000000000000000000253113306565107025415 0ustar rootrootenv = require('test_run') test_run = env.new() SERVERS = { 'autobootstrap1', 'autobootstrap2', 'autobootstrap3' } -- Start servers test_run:create_cluster(SERVERS) -- Wait for full mesh test_run:wait_fullmesh(SERVERS) test_run:cmd("switch autobootstrap1") for i = 0, 9 do box.space.test:insert{i, 'test' .. i} end box.space.test:count() test_run:cmd('switch default') vclock1 = test_run:get_vclock('autobootstrap1') vclock2 = test_run:wait_cluster_vclock(SERVERS, vclock1) test_run:cmd("switch autobootstrap2") box.space.test:count() box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.01) test_run:cmd("stop server autobootstrap1") fio = require('fio') -- This test checks ability to recover missing local data -- from remote replica. See #3210. -- Delete data on first master and test that after restart, -- due to difference in vclock it will be able to recover -- all missing data from replica. -- Also check that there is no concurrency, i.e. master is -- in 'read-only' mode unless it receives all data. fio.unlink(fio.pathjoin(fio.abspath("."), string.format('autobootstrap1/%020d.xlog', 8))) test_run:cmd("start server autobootstrap1") test_run:cmd("switch autobootstrap1") for i = 10, 19 do box.space.test:insert{i, 'test' .. i} end fiber = require('fiber') box.space.test:select() -- Cleanup. test_run:cmd('switch default') test_run:drop_cluster(SERVERS) tarantool_1.9.1.26.g63eb81e3c/test/replication/join_vclock.test.lua0000664000000000000000000000207113306560010023451 0ustar rootrootfiber = require('fiber') env = require('test_run') replica_set = require('fast_replica') test_run = env.new() engine = test_run:get_cfg('engine') box.schema.user.grant('guest', 'read,write,execute', 'universe') errinj = box.error.injection errinj.set("ERRINJ_RELAY_FINAL_SLEEP", true) box.schema.user.grant('guest', 'replication') s = box.schema.space.create('test', {engine = engine}); index = s:create_index('primary') ch = fiber.channel(1) done = false function repl_f() local i = 0 while not done do s:replace({i, i}) fiber.sleep(0.001) i = i + 1 end ch:put(true) end _ = fiber.create(repl_f) replica_set.join(test_run, 1) test_run:cmd("switch replica1") test_run:cmd("switch default") done = true ch:get() errinj.set("ERRINJ_RELAY_FINAL_SLEEP", false) test_run:cmd("switch replica1") cnt = box.space.test.index[0]:count() box.space.test.index.primary:max()[1] == cnt - 1 test_run:cmd("switch default") replica_set.drop_all(test_run) box.space.test:drop() box.schema.user.revoke('guest', 'read,write,execute', 'universe') box.schema.user.revoke('guest', 'replication') tarantool_1.9.1.26.g63eb81e3c/test/replication/misc.test.lua0000664000000000000000000000646413306560010022116 0ustar rootrootuuid = require('uuid') test_run = require('test_run').new() box.schema.user.grant('guest', 'replication') -- gh-2991 - Tarantool asserts on box.cfg.replication update if one of -- servers is dead replication_timeout = box.cfg.replication_timeout replication_connect_timeout = box.cfg.replication_connect_timeout box.cfg{replication_timeout=0.05, replication_connect_timeout=0.05, replication={}} box.cfg{replication = {'127.0.0.1:12345', box.cfg.listen}} box.cfg{replication_timeout = replication_timeout, replication_connect_timeout = replication_connect_timeout} -- gh-3111 - Allow to rebootstrap a replica from a read-only master replica_uuid = uuid.new() test_run:cmd('create server test with rpl_master=default, script="replication/replica_uuid.lua"') test_run:cmd(string.format('start server test with args="%s"', replica_uuid)) test_run:cmd('stop server test') test_run:cmd('cleanup server test') box.cfg{read_only = true} test_run:cmd(string.format('start server test with args="%s"', replica_uuid)) test_run:cmd('stop server test') test_run:cmd('cleanup server test') box.cfg{read_only = false} -- gh-3160 - Send heartbeats if there are changes from a remote master only SERVERS = { 'autobootstrap1', 'autobootstrap2', 'autobootstrap3' } -- Deploy a cluster. test_run:create_cluster(SERVERS) test_run:wait_fullmesh(SERVERS) test_run:cmd("switch autobootstrap1") test_run = require('test_run').new() box.cfg{replication_timeout = 0.01, replication_connect_timeout=0.01} test_run:cmd("switch autobootstrap2") test_run = require('test_run').new() box.cfg{replication_timeout = 0.01, replication_connect_timeout=0.01} test_run:cmd("switch autobootstrap3") test_run = require('test_run').new() fiber=require('fiber') box.cfg{replication_timeout = 0.01, replication_connect_timeout=0.01} _ = box.schema.space.create('test_timeout'):create_index('pk') test_run:cmd("setopt delimiter ';'") function test_timeout() for i = 0, 99 do box.space.test_timeout:replace({1}) fiber.sleep(0.005) local rinfo = box.info.replication if rinfo[1].upstream and rinfo[1].upstream.status ~= 'follow' or rinfo[2].upstream and rinfo[2].upstream.status ~= 'follow' or rinfo[3].upstream and rinfo[3].upstream.status ~= 'follow' then return error('Replication broken') end end return true end ; test_run:cmd("setopt delimiter ''"); test_timeout() -- gh-3247 - Sequence-generated value is not replicated in case -- the request was sent via iproto. test_run:cmd("switch autobootstrap1") net_box = require('net.box') _ = box.schema.space.create('space1') _ = box.schema.sequence.create('seq') _ = box.space.space1:create_index('primary', {sequence = true} ) _ = box.space.space1:create_index('secondary', {parts = {2, 'unsigned'}}) box.schema.user.grant('guest', 'read,write', 'space', 'space1') c = net_box.connect(box.cfg.listen) c.space.space1:insert{box.NULL, "data"} -- fails, but bumps sequence value c.space.space1:insert{box.NULL, 1, "data"} box.space.space1:select{} vclock = test_run:get_vclock("autobootstrap1") _ = test_run:wait_vclock("autobootstrap2", vclock) test_run:cmd("switch autobootstrap2") box.space.space1:select{} test_run:cmd("switch autobootstrap1") box.space.space1:drop() test_run:cmd("switch default") test_run:drop_cluster(SERVERS) box.schema.user.revoke('guest', 'replication') tarantool_1.9.1.26.g63eb81e3c/test/replication/on_replace.lua0000664000000000000000000000200413306560010022276 0ustar rootroot#!/usr/bin/env tarantool -- get instance name from filename (on_replace1.lua => on_replace1) local INSTANCE_ID = string.match(arg[0], "%d") local USER = 'cluster' local PASSWORD = 'somepassword' local SOCKET_DIR = require('fio').cwd() local function instance_uri(instance_id) --return 'localhost:'..(3310 + instance_id) return SOCKET_DIR..'/on_replace'..instance_id..'.sock'; end -- start console first require('console').listen(os.getenv('ADMIN')) box.cfg({ listen = instance_uri(INSTANCE_ID); -- log_level = 7; replication = { USER..':'..PASSWORD..'@'..instance_uri(1); USER..':'..PASSWORD..'@'..instance_uri(2); }; replication_connect_timeout = 0.5, }) env = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') box.once("bootstrap", function() box.schema.user.create(USER, { password = PASSWORD }) box.schema.user.grant(USER, 'replication') box.schema.space.create('test', {engine = engine}) box.space.test:create_index('primary') end) tarantool_1.9.1.26.g63eb81e3c/test/replication/ddl3.lua0000777000000000000000000000000013306560010022262 2ddl.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap_guest2.lua0000777000000000000000000000000013306560010031205 2autobootstrap_guest.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/replica_uuid_ro.lua0000664000000000000000000000212613306560010023341 0ustar rootroot#!/usr/bin/env tarantool -- get instance name from filename (replica_uuid_ro1.lua => replica_uuid_ro1) local INSTANCE_ID = string.match(arg[0], "%d") local USER = 'cluster' local PASSWORD = 'somepassword' local SOCKET_DIR = require('fio').cwd() local function instance_uri(instance_id) --return 'localhost:'..(3310 + instance_id) return SOCKET_DIR..'/replica_uuid_ro'..instance_id..'.sock'; end -- start console first require('console').listen(os.getenv('ADMIN')) box.cfg({ instance_uuid = arg[1]; listen = instance_uri(INSTANCE_ID); -- log_level = 7; replication = { USER..':'..PASSWORD..'@'..instance_uri(1); USER..':'..PASSWORD..'@'..instance_uri(2); }; read_only = (INSTANCE_ID ~= '1' and true or false); replication_connect_timeout = 0.5, }) box.once("bootstrap", function() local test_run = require('test_run').new() box.schema.user.create(USER, { password = PASSWORD }) box.schema.user.grant(USER, 'replication') box.schema.space.create('test', {engine = test_run:get_cfg('engine')}) box.space.test:create_index('primary') end) tarantool_1.9.1.26.g63eb81e3c/test/replication/quorum.result0000664000000000000000000001463413306560010022270 0ustar rootroottest_run = require('test_run').new() --- ... SERVERS = {'quorum1', 'quorum2', 'quorum3'} --- ... -- Deploy a cluster. test_run:create_cluster(SERVERS) --- ... test_run:wait_fullmesh(SERVERS) --- ... -- Stop one replica and try to restart another one. -- It should successfully restart, but stay in the -- 'orphan' mode, which disables write accesses. -- There are three ways for the replica to leave the -- 'orphan' mode: -- * reconfigure replication -- * reset box.cfg.replication_connect_quorum -- * wait until a quorum is formed asynchronously test_run:cmd('stop server quorum1') --- - true ... test_run:cmd('switch quorum2') --- - true ... test_run:cmd('restart server quorum2') box.info.status -- orphan --- - orphan ... box.ctl.wait_rw(0.001) -- timeout --- - error: timed out ... box.info.ro -- true --- - true ... box.space.test:replace{100} -- error --- - error: Can't modify data because this instance is in read-only mode. ... box.cfg{replication={}} --- ... box.info.status -- running --- - running ... test_run:cmd('restart server quorum2') box.info.status -- orphan --- - orphan ... box.ctl.wait_rw(0.001) -- timeout --- - error: timed out ... box.info.ro -- true --- - true ... box.space.test:replace{100} -- error --- - error: Can't modify data because this instance is in read-only mode. ... box.cfg{replication_connect_quorum = 2} --- ... box.ctl.wait_rw() --- ... box.info.ro -- false --- - false ... box.info.status -- running --- - running ... test_run:cmd('restart server quorum2') box.info.status -- orphan --- - orphan ... box.ctl.wait_rw(0.001) -- timeout --- - error: timed out ... box.info.ro -- true --- - true ... box.space.test:replace{100} -- error --- - error: Can't modify data because this instance is in read-only mode. ... test_run:cmd('start server quorum1') --- - true ... box.ctl.wait_rw() --- ... box.info.ro -- false --- - false ... box.info.status -- running --- - running ... -- Check that the replica follows all masters. box.info.id == 1 or box.info.replication[1].upstream.status == 'follow' --- - true ... box.info.id == 2 or box.info.replication[2].upstream.status == 'follow' --- - true ... box.info.id == 3 or box.info.replication[3].upstream.status == 'follow' --- - true ... -- Check that box.cfg() doesn't return until the instance -- catches up with all configured replicas. test_run:cmd('switch quorum3') --- - true ... box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.001) --- - ok ... test_run:cmd('switch quorum2') --- - true ... box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.001) --- - ok ... test_run:cmd('stop server quorum1') --- - true ... for i = 1, 100 do box.space.test:insert{i} end --- ... fiber = require('fiber') --- ... fiber.sleep(0.1) --- ... test_run:cmd('start server quorum1') --- - true ... test_run:cmd('switch quorum1') --- - true ... box.space.test:count() -- 100 --- - 100 ... -- Rebootstrap one node of the cluster and check that others follow. -- Note, due to ERRINJ_RELAY_TIMEOUT there is a substantial delay -- between the moment the node starts listening and the moment it -- completes bootstrap and subscribes. Other nodes will try and -- fail to subscribe to the restarted node during this period. -- This is OK - they have to retry until the bootstrap is complete. test_run:cmd('switch quorum3') --- - true ... box.snapshot() --- - ok ... test_run:cmd('switch quorum2') --- - true ... box.snapshot() --- - ok ... test_run:cmd('switch quorum1') --- - true ... test_run:cmd('restart server quorum1 with cleanup=1') box.space.test:count() -- 100 --- - 100 ... -- The rebootstrapped replica will be assigned id = 4, -- because ids 1..3 are busy. test_run:cmd('switch quorum2') --- - true ... fiber = require('fiber') --- ... while box.info.replication[4].upstream.status ~= 'follow' do fiber.sleep(0.001) end --- ... box.info.replication[4].upstream.status --- - follow ... test_run:cmd('switch quorum3') --- - true ... fiber = require('fiber') --- ... while box.info.replication[4].upstream.status ~= 'follow' do fiber.sleep(0.001) end --- ... box.info.replication[4].upstream.status --- - follow ... -- Cleanup. test_run:cmd('switch default') --- - true ... test_run:drop_cluster(SERVERS) --- ... -- -- gh-3278: test different replication and replication_connect_quorum configs. -- box.schema.user.grant('guest', 'replication') --- ... space = box.schema.space.create('test', {engine = test_run:get_cfg('engine')}); --- ... index = box.space.test:create_index('primary') --- ... -- Insert something just to check that replica with quorum = 0 works as expected. space:insert{1} --- - [1] ... test_run:cmd("create server replica with rpl_master=default, script='replication/replica_no_quorum.lua'") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:cmd("switch replica") --- - true ... box.info.status -- running --- - running ... box.space.test:select() --- - - [1] ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica") --- - true ... listen = box.cfg.listen --- ... box.cfg{listen = ''} --- ... test_run:cmd("start server replica") --- - true ... test_run:cmd("switch replica") --- - true ... box.info.status -- running --- - running ... test_run:cmd("switch default") --- - true ... -- Check that replica is able to reconnect, case was broken with earlier quorum "fix". box.cfg{listen = listen} --- ... space:insert{2} --- - [2] ... vclock = test_run:get_vclock("default") --- ... _ = test_run:wait_vclock("replica", vclock) --- ... test_run:cmd("switch replica") --- - true ... box.info.status -- running --- - running ... box.space.test:select() --- - - [1] - [2] ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server replica") --- - true ... space:drop() --- ... box.schema.user.revoke('guest', 'replication') --- ... -- Second case, check that master-master works. SERVERS = {'master_quorum1', 'master_quorum2'} --- ... -- Deploy a cluster. test_run:create_cluster(SERVERS) --- ... test_run:wait_fullmesh(SERVERS) --- ... test_run:cmd("switch master_quorum1") --- - true ... repl = box.cfg.replication --- ... box.cfg{replication = ""} --- ... box.space.test:insert{1} --- - [1] ... box.cfg{replication = repl} --- ... vclock = test_run:get_vclock("master_quorum1") --- ... _ = test_run:wait_vclock("master_quorum2", vclock) --- ... test_run:cmd("switch master_quorum2") --- - true ... box.space.test:select() --- - - [1] ... test_run:cmd("switch default") --- - true ... -- Cleanup. test_run:drop_cluster(SERVERS) --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/on_replace2.lua0000777000000000000000000000000013306560010025171 2on_replace.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/gc.test.lua0000664000000000000000000001324513306565107021563 0ustar rootroottest_run = require('test_run').new() engine = test_run:get_cfg('engine') replica_set = require('fast_replica') fiber = require('fiber') test_run:cleanup_cluster() -- Make each snapshot trigger garbage collection. default_checkpoint_count = box.cfg.checkpoint_count box.cfg{checkpoint_count = 1} function wait_gc(n) while #box.internal.gc.info().checkpoints > n do fiber.sleep(0.01) end end -- Grant permissions needed for replication. box.schema.user.grant('guest', 'read,write,execute', 'universe') box.schema.user.grant('guest', 'replication') -- By default, relay thread reports status to tx once a second. -- To reduce the test execute time, let's set it to 50 ms. box.error.injection.set("ERRINJ_RELAY_REPORT_INTERVAL", 0.05) -- Create and populate the space we will replicate. s = box.schema.space.create('test', {engine = engine}); _ = s:create_index('pk') for i = 1, 100 do s:auto_increment{} end box.snapshot() for i = 1, 100 do s:auto_increment{} end -- Make sure replica join will take long enough for us to -- invoke garbage collection. box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.05) -- While the replica is receiving the initial data set, -- make a snapshot and invoke garbage collection, then -- remove the timeout injection so that we don't have to -- wait too long for the replica to start. test_run:cmd("setopt delimiter ';'") fiber.create(function() fiber.sleep(0.1) box.snapshot() box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0) end) test_run:cmd("setopt delimiter ''"); -- Start the replica. test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") test_run:cmd("start server replica") -- Despite the fact that we invoked garbage collection that -- would have normally removed the snapshot the replica was -- bootstrapped from, the replica should still receive all -- data from the master. Check it. test_run:cmd("switch replica") fiber = require('fiber') while box.space.test:count() < 200 do fiber.sleep(0.01) end box.space.test:count() test_run:cmd("switch default") -- Check that garbage collection removed the snapshot once -- the replica released the corresponding checkpoint. wait_gc(1) #box.internal.gc.info().checkpoints == 1 or box.internal.gc.info() -- Make sure the replica will receive data it is subscribed -- to long enough for us to invoke garbage collection. box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.05) -- Send more data to the replica. for i = 1, 100 do s:auto_increment{} end -- Invoke garbage collection. Check that it doesn't remove -- xlogs needed by the replica. box.snapshot() #box.internal.gc.info().checkpoints == 2 or box.internal.gc.info() -- Remove the timeout injection so that the replica catches -- up quickly. box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0) -- Check that the replica received all data from the master. test_run:cmd("switch replica") while box.space.test:count() < 300 do fiber.sleep(0.01) end box.space.test:count() test_run:cmd("switch default") -- Now garbage collection should resume and delete files left -- from the old checkpoint. wait_gc(1) #box.internal.gc.info().checkpoints == 1 or box.internal.gc.info() -- -- Check that the master doesn't delete xlog files sent to the -- replica until it receives a confirmation that the data has -- been applied (gh-2825). -- test_run:cmd("switch replica") -- Prevent the replica from applying any rows. box.error.injection.set("ERRINJ_WAL_DELAY", true) test_run:cmd("switch default") -- Generate some data on the master. for i = 1, 5 do s:auto_increment{} end box.snapshot() -- rotate xlog for i = 1, 5 do s:auto_increment{} end fiber.sleep(0.1) -- wait for master to relay data -- Garbage collection must not delete the old xlog file -- (and the corresponding snapshot), because it is still -- needed by the replica. #box.internal.gc.info().checkpoints == 2 or box.internal.gc.info() test_run:cmd("switch replica") -- Unblock the replica and make it fail to apply a row. box.info.replication[1].upstream.message == nil box.error.injection.set("ERRINJ_WAL_WRITE", true) box.error.injection.set("ERRINJ_WAL_DELAY", false) while box.info.replication[1].upstream.message == nil do fiber.sleep(0.01) end box.info.replication[1].upstream.message test_run:cmd("switch default") -- Restart the replica to reestablish replication. test_run:cmd("restart server replica") -- Wait for the replica to catch up. test_run:cmd("switch replica") fiber = require('fiber') while box.space.test:count() < 310 do fiber.sleep(0.01) end box.space.test:count() test_run:cmd("switch default") -- Now it's safe to drop the old xlog. wait_gc(1) #box.internal.gc.info().checkpoints == 1 or box.internal.gc.info() -- Stop the replica. test_run:cmd("stop server replica") test_run:cmd("cleanup server replica") -- Invoke garbage collection. Check that it doesn't remove -- the checkpoint last used by the replica. _ = s:auto_increment{} box.snapshot() #box.internal.gc.info().checkpoints == 2 or box.internal.gc.info() -- The checkpoint should only be deleted after the replica -- is unregistered. test_run:cleanup_cluster() #box.internal.gc.info().checkpoints == 1 or box.internal.gc.info() -- -- Test that concurrent invocation of the garbage collector works fine. -- s:truncate() for i = 1, 10 do s:replace{i} end box.snapshot() replica_set.join(test_run, 3) replica_set.stop_all(test_run) for i = 11, 50 do s:replace{i} if i % 10 == 0 then box.snapshot() end end replica_set.start_all(test_run) replica_set.wait_all(test_run) replica_set.drop_all(test_run) -- Cleanup. s:drop() box.error.injection.set("ERRINJ_RELAY_REPORT_INTERVAL", 0) box.schema.user.revoke('guest', 'replication') box.schema.user.revoke('guest', 'read,write,execute', 'universe') box.cfg{checkpoint_count = default_checkpoint_count} tarantool_1.9.1.26.g63eb81e3c/test/replication/hot_standby.test.lua0000664000000000000000000000653313306560010023476 0ustar rootrootenv = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') box.schema.user.grant('guest', 'replication') box.schema.func.create('_set_pri_lsn') box.schema.user.grant('guest', 'execute', 'function', '_set_pri_lsn') test_run:cmd("create server hot_standby with script='replication/hot_standby.lua', rpl_master=default") test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") test_run:cmd("start server hot_standby") test_run:cmd("start server replica") test_run:cmd("setopt delimiter ';'") test_run:cmd("set connection default, hot_standby, replica") fiber = require('fiber'); while box.info.id == 0 do fiber.sleep(0.01) end; while box.space['_priv']:len() < 1 do fiber.sleep(0.001) end; do local pri_id = '' local begin_lsn = 0 function _set_pri_lsn(_id, _lsn) pri_id = _id begin_lsn = _lsn end function _get_pri_lsn() return box.info.vclock[pri_id] end function _print_lsn() return (_get_pri_lsn() - begin_lsn + 1) end function _insert(_begin, _end) local a = {} for i = _begin, _end do table.insert(a, box.space.tweedledum:insert{i, 'the tuple '..i}) end return a end function _select(_begin, _end) local a = {} for i = _begin, _end do local tuple = box.space.tweedledum:get{i} if tuple ~= nil then table.insert(a, tuple) end end return a end function _wait_lsn(_lsnd) while _get_pri_lsn() < _lsnd + begin_lsn do fiber.sleep(0.001) end begin_lsn = begin_lsn + _lsnd end end; test_run:cmd("setopt delimiter ''"); test_run:cmd("switch replica") fiber = require('fiber') test_run:cmd("switch hot_standby") fiber = require('fiber') box.info.status test_run:cmd("switch default") fiber = require('fiber') box.info.status space = box.schema.space.create('tweedledum', {engine = engine}) index = space:create_index('primary', {type = 'tree'}) -- set begin lsn on master, replica and hot_standby. test_run:cmd("set variable replica_port to 'replica.listen'") REPLICA = require('uri').parse(tostring(replica_port)) REPLICA ~= nil a = (require 'net.box').connect(REPLICA.host, REPLICA.service) a:call('_set_pri_lsn', {box.info.id, box.info.lsn}) a:close() _insert(1, 10) _select(1, 10) test_run:cmd("switch replica") _wait_lsn(10) _select(1, 10) test_run:cmd("stop server default") test_run:cmd("switch hot_standby") while box.info.status ~= 'running' do fiber.sleep(0.001) end test_run:cmd("switch replica") -- hot_standby.listen is garbage, since hot_standby.lua -- uses MASTER environment variable for its listen test_run:cmd("set variable hot_standby_port to 'hot_standby.master'") HOT_STANDBY = require('uri').parse(tostring(hot_standby_port)) HOT_STANDBY ~= nil a = (require 'net.box').connect(HOT_STANDBY.host, HOT_STANDBY.service) a:call('_set_pri_lsn', {box.info.id, box.info.lsn}) a:close() test_run:cmd("switch hot_standby") _insert(11, 20) _select(11, 20) test_run:cmd("switch replica") _wait_lsn(10) _select(11, 20) test_run:cmd("deploy server default") test_run:cmd("start server default") test_run:cmd("switch default") test_run:cmd("stop server hot_standby") test_run:cmd("stop server replica") test_run:cmd("cleanup server hot_standby") test_run:cmd("cleanup server replica") tarantool_1.9.1.26.g63eb81e3c/test/replication/hot_standby.lua0000664000000000000000000000060613306560010022513 0ustar rootroot#!/usr/bin/env tarantool require('console').listen(os.getenv('ADMIN')) box.cfg({ listen = os.getenv("MASTER"), memtx_memory = 107374182, custom_proc_title = "hot_standby", wal_dir = "master", memtx_dir = "master", vinyl_dir = "master", hot_standby = true, replication_connect_timeout = 0.5, }) tarantool_1.9.1.26.g63eb81e3c/test/replication/quorum2.lua0000777000000000000000000000000013306560010023633 2quorum.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/quorum1.lua0000777000000000000000000000000013306560010023632 2quorum.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/on_replace.test.lua0000664000000000000000000000357213306560010023267 0ustar rootroot-- -- Check that replication applier invokes on_replace triggers -- env = require('test_run') test_run = env.new() fiber = require('fiber') _ = box.schema.space.create('test') _ = box.space.test:create_index('primary') box.schema.user.grant('guest', 'replication') test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") test_run:cmd("start server replica") test_run:cmd("switch replica") session_type = nil -- -- gh-2642: box.session.type() in replication applier -- _ = box.space.test:on_replace(function() session_type = box.session.type() end) box.space.test:insert{1} -- -- console -- session_type test_run:cmd("switch default") box.space.test:insert{2} test_run:cmd("switch replica") while box.space.test:count() < 2 do fiber.sleep(0.01) end -- -- applier -- session_type test_run:cmd("switch default") -- -- cleanup -- test_run:cmd("stop server replica") test_run:cmd("cleanup server replica") box.space.test:drop() box.schema.user.revoke('guest', 'replication') -- gh-2682 on_replace on slave server with data change SERVERS = { 'on_replace1', 'on_replace2' } test_run:create_cluster(SERVERS) test_run:wait_fullmesh(SERVERS) test_run:cmd('switch on_replace1') fiber = require'fiber' s1 = box.schema.space.create('s1') _ = s1:create_index('pk') s2 = box.schema.space.create('s2') _ = s2:create_index('pk') test_run:cmd('switch on_replace2') fiber = require'fiber' while box.space.s2 == nil do fiber.sleep(0.00001) end _ = box.space.s1:on_replace(function (old, new) box.space.s2:replace(new) end) test_run:cmd('switch on_replace1') box.space.s1:replace({1, 2, 3, 4}) while #(box.space.s2:select()) == 0 do fiber.sleep(0.00001) end test_run:cmd('switch on_replace2') box.space.s1:select() box.space.s2:select() test_run:cmd('switch on_replace1') box.space.s1:select() box.space.s2:select() _ = test_run:cmd('switch default') test_run:drop_cluster(SERVERS) tarantool_1.9.1.26.g63eb81e3c/test/replication/quorum.test.lua0000664000000000000000000001167013306560010022506 0ustar rootroottest_run = require('test_run').new() SERVERS = {'quorum1', 'quorum2', 'quorum3'} -- Deploy a cluster. test_run:create_cluster(SERVERS) test_run:wait_fullmesh(SERVERS) -- Stop one replica and try to restart another one. -- It should successfully restart, but stay in the -- 'orphan' mode, which disables write accesses. -- There are three ways for the replica to leave the -- 'orphan' mode: -- * reconfigure replication -- * reset box.cfg.replication_connect_quorum -- * wait until a quorum is formed asynchronously test_run:cmd('stop server quorum1') test_run:cmd('switch quorum2') test_run:cmd('restart server quorum2') box.info.status -- orphan box.ctl.wait_rw(0.001) -- timeout box.info.ro -- true box.space.test:replace{100} -- error box.cfg{replication={}} box.info.status -- running test_run:cmd('restart server quorum2') box.info.status -- orphan box.ctl.wait_rw(0.001) -- timeout box.info.ro -- true box.space.test:replace{100} -- error box.cfg{replication_connect_quorum = 2} box.ctl.wait_rw() box.info.ro -- false box.info.status -- running test_run:cmd('restart server quorum2') box.info.status -- orphan box.ctl.wait_rw(0.001) -- timeout box.info.ro -- true box.space.test:replace{100} -- error test_run:cmd('start server quorum1') box.ctl.wait_rw() box.info.ro -- false box.info.status -- running -- Check that the replica follows all masters. box.info.id == 1 or box.info.replication[1].upstream.status == 'follow' box.info.id == 2 or box.info.replication[2].upstream.status == 'follow' box.info.id == 3 or box.info.replication[3].upstream.status == 'follow' -- Check that box.cfg() doesn't return until the instance -- catches up with all configured replicas. test_run:cmd('switch quorum3') box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.001) test_run:cmd('switch quorum2') box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.001) test_run:cmd('stop server quorum1') for i = 1, 100 do box.space.test:insert{i} end fiber = require('fiber') fiber.sleep(0.1) test_run:cmd('start server quorum1') test_run:cmd('switch quorum1') box.space.test:count() -- 100 -- Rebootstrap one node of the cluster and check that others follow. -- Note, due to ERRINJ_RELAY_TIMEOUT there is a substantial delay -- between the moment the node starts listening and the moment it -- completes bootstrap and subscribes. Other nodes will try and -- fail to subscribe to the restarted node during this period. -- This is OK - they have to retry until the bootstrap is complete. test_run:cmd('switch quorum3') box.snapshot() test_run:cmd('switch quorum2') box.snapshot() test_run:cmd('switch quorum1') test_run:cmd('restart server quorum1 with cleanup=1') box.space.test:count() -- 100 -- The rebootstrapped replica will be assigned id = 4, -- because ids 1..3 are busy. test_run:cmd('switch quorum2') fiber = require('fiber') while box.info.replication[4].upstream.status ~= 'follow' do fiber.sleep(0.001) end box.info.replication[4].upstream.status test_run:cmd('switch quorum3') fiber = require('fiber') while box.info.replication[4].upstream.status ~= 'follow' do fiber.sleep(0.001) end box.info.replication[4].upstream.status -- Cleanup. test_run:cmd('switch default') test_run:drop_cluster(SERVERS) -- -- gh-3278: test different replication and replication_connect_quorum configs. -- box.schema.user.grant('guest', 'replication') space = box.schema.space.create('test', {engine = test_run:get_cfg('engine')}); index = box.space.test:create_index('primary') -- Insert something just to check that replica with quorum = 0 works as expected. space:insert{1} test_run:cmd("create server replica with rpl_master=default, script='replication/replica_no_quorum.lua'") test_run:cmd("start server replica") test_run:cmd("switch replica") box.info.status -- running box.space.test:select() test_run:cmd("switch default") test_run:cmd("stop server replica") listen = box.cfg.listen box.cfg{listen = ''} test_run:cmd("start server replica") test_run:cmd("switch replica") box.info.status -- running test_run:cmd("switch default") -- Check that replica is able to reconnect, case was broken with earlier quorum "fix". box.cfg{listen = listen} space:insert{2} vclock = test_run:get_vclock("default") _ = test_run:wait_vclock("replica", vclock) test_run:cmd("switch replica") box.info.status -- running box.space.test:select() test_run:cmd("switch default") test_run:cmd("stop server replica") test_run:cmd("cleanup server replica") space:drop() box.schema.user.revoke('guest', 'replication') -- Second case, check that master-master works. SERVERS = {'master_quorum1', 'master_quorum2'} -- Deploy a cluster. test_run:create_cluster(SERVERS) test_run:wait_fullmesh(SERVERS) test_run:cmd("switch master_quorum1") repl = box.cfg.replication box.cfg{replication = ""} box.space.test:insert{1} box.cfg{replication = repl} vclock = test_run:get_vclock("master_quorum1") _ = test_run:wait_vclock("master_quorum2", vclock) test_run:cmd("switch master_quorum2") box.space.test:select() test_run:cmd("switch default") -- Cleanup. test_run:drop_cluster(SERVERS) tarantool_1.9.1.26.g63eb81e3c/test/replication/wal_off.test.lua0000664000000000000000000000256113306560010022572 0ustar rootroot-- -- gh-1233: JOIN/SUBSCRIBE must fail if master has wal_mode = "none" -- env = require('test_run') test_run = env.new() test_run:cmd('switch default') fiber = require('fiber') box.schema.user.grant('guest', 'replication') test_run:cmd("create server wal_off with rpl_master=default, script='replication/wal_off.lua'") test_run:cmd("start server wal_off") test_run:cmd('switch default') wal_off_uri = test_run:eval('wal_off', 'return box.cfg.listen')[1] wal_off_uri ~= nil wal_off_id = test_run:eval('wal_off', 'return box.info.id')[1] box.cfg { replication = wal_off_uri } check = "Replication does not support wal_mode = 'none'" while box.info.replication[wal_off_id].upstream.message ~= check do fiber.sleep(0) end box.info.replication[wal_off_id].upstream ~= nil box.info.replication[wal_off_id].downstream ~= nil box.info.replication[wal_off_id].upstream.status box.info.replication[wal_off_id].upstream.message box.cfg { replication = "" } test_run:cmd('switch wal_off') box.schema.user.revoke('guest', 'replication') test_run:cmd('switch default') box.cfg { replication = wal_off_uri } check = "Read access to universe" while string.find(box.info.replication[wal_off_id].upstream.message, check) == nil do fiber.sleep(0.01) end box.cfg { replication = "" } test_run:cmd("stop server wal_off") test_run:cmd("cleanup server wal_off") box.schema.user.revoke('guest', 'replication') tarantool_1.9.1.26.g63eb81e3c/test/replication/ddl4.lua0000777000000000000000000000000013306560010022263 2ddl.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/status.result0000664000000000000000000001257113306560010022261 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default with cleanup=1') test_run:cmd('switch default') --- - true ... -- -- No replication -- master_id = box.info.id --- ... #box.info.vclock == 0 --- - true ... #box.info.replication == 1 --- - true ... box.space._cluster:count() == 1 --- - true ... box.info.uuid == box.space._cluster:get(master_id)[2] --- - true ... -- LSN is nil until a first request is made box.info.vclock[master_id] == nil --- - true ... --- box.info.lsn == box.info.vclock[master_id] box.info.lsn == 0 --- - true ... -- Make the first request box.schema.user.grant('guest', 'replication') --- ... -- LSN is 1 after the first request #box.info.vclock == 1 --- - true ... box.info.vclock[master_id] == 1 --- - true ... box.info.lsn == box.info.vclock[master_id] --- - true ... master = box.info.replication[master_id] --- ... master.id == master_id --- - true ... master.uuid == box.space._cluster:get(master_id)[2] --- - true ... master.lsn == box.info.vclock[master_id] --- - true ... master.upstream == nil --- - true ... master.downstream == nil --- - true ... -- Start Master -> Slave replication test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") --- - true ... test_run:cmd("start server replica") --- - true ... -- -- Master -- test_run:cmd('switch default') --- - true ... #box.info.vclock == 1 -- box.info.vclock[replica_id] is nil --- - true ... #box.info.replication == 2 --- - true ... box.space._cluster:count() == 2 --- - true ... -- master's status master_id = box.info.id --- ... box.info.vclock[master_id] == 2 -- grant + registration == 2 --- - true ... box.info.lsn == box.info.vclock[master_id] --- - true ... master = box.info.replication[master_id] --- ... master.id == master_id --- - true ... master.uuid == box.space._cluster:get(master_id)[2] --- - true ... master.lsn == box.info.vclock[master_id] --- - true ... master.upstream == nil --- - true ... master.downstream == nil --- - true ... -- replica's status replica_id = test_run:get_server_id('replica') --- ... box.info.vclock[replica_id] == nil --- - true ... replica = box.info.replication[replica_id] --- ... replica.id == replica_id --- - true ... replica.uuid == box.space._cluster:get(replica_id)[2] --- - true ... -- replica.lsn == box.info.vclock[replica_id] replica.lsn == 0 --- - true ... replica.upstream == nil --- - true ... replica.downstream.vclock[master_id] == box.info.vclock[master_id] --- - true ... replica.downstream.vclock[replica_id] == box.info.vclock[replica_id] --- - true ... -- -- Replica -- test_run:cmd('switch replica') --- - true ... #box.info.vclock == 1 -- box.info.vclock[replica_id] is nil --- - true ... #box.info.replication == 2 --- - true ... box.space._cluster:count() == 2 --- - true ... -- master's status master_id = test_run:get_server_id('default') --- ... box.info.vclock[master_id] == 2 --- - true ... master = box.info.replication[master_id] --- ... master.id == master_id --- - true ... master.uuid == box.space._cluster:get(master_id)[2] --- - true ... master.upstream.status == "follow" --- - true ... master.upstream.lag < 1 --- - true ... master.upstream.idle < 1 --- - true ... master.upstream.peer:match("localhost") --- - localhost ... master.downstream == nil --- - true ... -- replica's status replica_id = box.info.id --- ... box.info.vclock[replica_id] == nil --- - true ... -- box.info.lsn == box.info.vclock[replica_id] box.info.lsn == 0 --- - true ... replica = box.info.replication[replica_id] --- ... replica.id == replica_id --- - true ... replica.uuid == box.space._cluster:get(replica_id)[2] --- - true ... -- replica.lsn == box.info.vclock[replica_id] replica.lsn == 0 --- - true ... replica.upstream == nil --- - true ... replica.downstream == nil --- - true ... -- -- ClientError during replication -- test_run:cmd('switch replica') --- - true ... box.space._schema:insert({'dup'}) --- - ['dup'] ... test_run:cmd('switch default') --- - true ... box.space._schema:insert({'dup'}) --- - ['dup'] ... test_run:cmd('switch replica') --- - true ... r = box.info.replication[1] --- ... r.upstream.status == "stopped" --- - true ... r.upstream.message:match('Duplicate') ~= nil --- - true ... test_run:cmd('switch default') --- - true ... box.space._schema:delete({'dup'}) --- - ['dup'] ... test_run:cmd("push filter ', lsn: [0-9]+' to ', lsn: '") --- - true ... test_run:grep_log('replica', 'error applying row: .*') --- - 'error applying row: {type: ''INSERT'', lsn: , space_id: 272, index_id: 0, tuple: ["dup"]}' ... test_run:cmd("clear filter") --- - true ... -- -- Check box.info.replication login -- test_run:cmd('switch replica') --- - true ... test_run:cmd("set variable master_port to 'replica.master'") --- - true ... replica_uri = os.getenv("LISTEN") --- ... box.cfg{replication = {"guest@localhost:" .. master_port, replica_uri}} --- ... master_id = test_run:get_server_id('default') --- ... master = box.info.replication[master_id] --- ... master.id == master_id --- - true ... master.upstream.status == "follow" --- - false ... master.upstream.peer:match("guest") --- - guest ... master.upstream.peer:match("localhost") --- - localhost ... master.downstream == nil --- - true ... test_run:cmd('switch default') --- - true ... -- -- Cleanup -- box.schema.user.revoke('guest', 'replication') --- ... test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server replica") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/replication/rebootstrap.test.lua0000664000000000000000000000123713306560010023520 0ustar rootroottest_run = require('test_run').new() SERVERS = {'rebootstrap1', 'rebootstrap2'} test_run:create_cluster(SERVERS) test_run:wait_fullmesh(SERVERS) -- -- gh-3422: If quorum can't be formed, because some replicas are -- re-bootstrapping, box.cfg{} must wait for bootstrap to complete -- instead of stopping synchronization and leaving the instance -- in 'orphan' mode. -- test_run:cmd('stop server rebootstrap1') test_run:cmd('restart server rebootstrap2 with cleanup=True, wait=False, wait_load=False') test_run:cmd('start server rebootstrap1') test_run:cmd('switch rebootstrap1') box.info.status -- running test_run:cmd('switch default') test_run:drop_cluster(SERVERS) tarantool_1.9.1.26.g63eb81e3c/test/replication/prune.result0000664000000000000000000000607713306560010022073 0ustar rootrootprint '-------------------------------------------------------------' --- ... print 'gh-806: cant prune old replicas by deleting their server ids' --- ... print '-------------------------------------------------------------' --- ... env = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... replica_set = require('fast_replica') --- ... fiber = require('fiber') --- ... test_run:cleanup_cluster() --- ... box.space._cluster:len() == 1 --- - true ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... -- Create space and fill it space = box.schema.create_space('test', {engine = engine}) --- ... index = box.space.test:create_index('primary') --- ... for i=1,10 do space:insert{i, 'test'} end --- ... -- create max number of replicas and check replica_set.join(test_run, box.schema.REPLICA_MAX - 2) --- ... while box.space._cluster:len() ~= box.schema.REPLICA_MAX - 1 do fiber.sleep(0.001) end --- ... box.space._cluster:len() == box.schema.REPLICA_MAX - 1 --- - true ... -- try to add one more replica uuid = require('uuid') --- ... box.space._cluster:insert{box.schema.REPLICA_MAX, uuid.str()} --- - error: 'Replica count limit reached: 32' ... -- Delete all replication nodes replica_set.drop_all(test_run) --- ... box.space._cluster:len() == 1 --- - true ... -- Save a snapshot without removed replicas in vclock box.snapshot() --- - ok ... -- Master is not crashed then recovering xlog with {replica_id: 0} in header test_run:cmd('restart server default') replica_set = require('fast_replica') --- ... fiber = require('fiber') --- ... -- Rejoin replica and check replica_set.join(test_run, 1) --- ... while box.space._cluster:len() ~= 2 do fiber.sleep(0.001) end --- ... -- Check server ids test_run:cmd('eval replica1 "return box.info.id"') --- - [2] ... box.space._cluster:len() == 2 --- - true ... -- Cleanup replica_set.drop_all(test_run) --- ... box.space._cluster:len() == 1 --- - true ... -- delete replica from master replica_set.join(test_run, 1) --- ... while box.space._cluster:len() ~= 2 do fiber.sleep(0.001) end --- ... -- Check server ids test_run:cmd('eval replica1 "return box.info.id"') --- - [2] ... box.space._cluster:len() == 2 --- - true ... replica_set.unregister(test_run, 2) --- ... while test_run:cmd('eval replica1 "box.info.replication[1].upstream.status"')[1] ~= 'stopped' do fiber.sleep(0.001) end --- ... test_run:cmd('eval replica1 "box.info.replication[1].upstream.message"') --- - ['The local instance id 2 is read-only'] ... -- restart replica and check that replica isn't able to join to cluster test_run:cmd('restart server replica1') --- - true ... test_run:cmd('switch default') --- - true ... box.space._cluster:len() == 1 --- - true ... test_run:cmd('eval replica1 "box.info.replication[1].upstream.status"') --- - ['stopped'] ... test_run:cmd('eval replica1 "box.info.replication[1].upstream.message"')[1]:match("is not registered with replica set") ~= nil --- - true ... replica_set.delete(test_run, 2) --- ... box.space.test:drop() --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/master.lua0000664000000000000000000000034313306560010021466 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg({ listen = os.getenv("LISTEN"), memtx_memory = 107374182, replication_connect_timeout = 0.5, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication/before_replace.test.lua0000664000000000000000000000441313306565107024124 0ustar rootroot-- -- Using space:before_replace to resolve replication conflicts. -- env = require('test_run') test_run = env.new() SERVERS = { 'autobootstrap1', 'autobootstrap2', 'autobootstrap3' } -- Deploy a cluster. test_run:create_cluster(SERVERS) test_run:wait_fullmesh(SERVERS) -- Setup space:before_replace trigger on all replicas. -- The trigger favors tuples with a greater value. test_run:cmd("setopt delimiter ';'") test_run:cmd("switch autobootstrap1"); _ = box.space.test:before_replace(function(old, new) if old ~= nil and new ~= nil then return new[2] > old[2] and new or old end end); test_run:cmd("switch autobootstrap2"); _ = box.space.test:before_replace(function(old, new) if old ~= nil and new ~= nil then return new[2] > old[2] and new or old end end); test_run:cmd("switch autobootstrap3"); _ = box.space.test:before_replace(function(old, new) if old ~= nil and new ~= nil then return new[2] > old[2] and new or old end end); test_run:cmd("setopt delimiter ''"); -- Stall replication and generate incompatible data -- on the replicas. test_run:cmd("switch autobootstrap1") box.error.injection.set('ERRINJ_RELAY_TIMEOUT', 0.01) for i = 1, 10 do box.space.test:replace{i, i % 3 == 1 and i * 10 or i} end test_run:cmd("switch autobootstrap2") box.error.injection.set('ERRINJ_RELAY_TIMEOUT', 0.01) for i = 1, 10 do box.space.test:replace{i, i % 3 == 2 and i * 10 or i} end test_run:cmd("switch autobootstrap3") box.error.injection.set('ERRINJ_RELAY_TIMEOUT', 0.01) for i = 1, 10 do box.space.test:replace{i, i % 3 == 0 and i * 10 or i} end -- Synchronize. test_run:cmd("switch default") vclock = test_run:get_cluster_vclock(SERVERS) vclock2 = test_run:wait_cluster_vclock(SERVERS, vclock) -- Check that all replicas converged to the same data -- and the state persists after restart. test_run:cmd("switch autobootstrap1") box.space.test:select() test_run:cmd('restart server autobootstrap1') box.space.test:select() test_run:cmd("switch autobootstrap2") box.space.test:select() test_run:cmd('restart server autobootstrap2') box.space.test:select() test_run:cmd("switch autobootstrap3") box.space.test:select() test_run:cmd('restart server autobootstrap3') box.space.test:select() -- Cleanup. test_run:cmd("switch default") test_run:drop_cluster(SERVERS) tarantool_1.9.1.26.g63eb81e3c/test/replication/errinj.result0000664000000000000000000002174413306560010022231 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... errinj = box.error.injection --- ... box.schema.user.grant('guest', 'replication') --- ... s = box.schema.space.create('test', {engine = engine}); --- ... index = s:create_index('primary') --- ... test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:cmd("switch replica") --- - true ... fiber = require('fiber') --- ... s = box.space.test --- ... test_run:cmd("setopt delimiter ';'") --- - true ... -- vinyl does not support index.len() so we use index.count() instead function wait_repl(cnt) for i = 1, 20 do if s.index[0]:count() >= cnt then return true end fiber.sleep(0.01) end return false end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... test_run:cmd("switch default") --- - true ... d = require('digest') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function test_f(st, tx) if tx then box.begin() end for i = st, st + 9 do local _ = s:insert({i, d.urandom(8192)}) end if tx then box.commit() end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... test_f(1) --- ... errinj.set("ERRINJ_WAL_WRITE_PARTIAL", 16384) --- - ok ... test_f(11, true) --- - error: Failed to write to disk ... errinj.set("ERRINJ_WAL_WRITE_PARTIAL", -1) --- - ok ... test_f(11, true) --- ... test_f(21, true) --- ... test_run:cmd("switch replica") --- - true ... wait_repl(30) --- - true ... test_run:cmd("switch default") --- - true ... box.space.test.index[0]:count() --- - 30 ... errinj.set("ERRINJ_WAL_WRITE_DISK", true) --- - ok ... test_f(31, true) --- - error: Failed to write to disk ... errinj.set("ERRINJ_WAL_WRITE_DISK", false) --- - ok ... test_f(31, true) --- ... test_f(41, true) --- ... test_run:cmd("switch replica") --- - true ... wait_repl(50) --- - true ... test_run:cmd("switch default") --- - true ... box.space.test.index[0]:count() --- - 50 ... -- Check that master doesn't stall on WALs without EOF (gh-2294). errinj.set("ERRINJ_WAL_WRITE_EOF", true) --- - ok ... box.snapshot() --- - ok ... test_f(51, true) --- ... test_run:cmd("switch replica") --- - true ... wait_repl(60) --- - true ... test_run:cmd("switch default") --- - true ... errinj.set("ERRINJ_WAL_WRITE_EOF", false) --- - ok ... box.snapshot() --- - ok ... -- Check that replication doesn't freeze if slave bumps LSN -- while master is down (gh-3038). To do this, -- 1. Stop replication by injecting an error on the slave. -- 2. Bump LSN on the slave while replication is inactive. -- 3. Restore replication. -- 4. Generate some records on the master. -- 5. Make sure they'll make it to the slave. test_run:cmd("switch replica") --- - true ... box.error.injection.set("ERRINJ_WAL_WRITE", true) --- - ok ... test_run:cmd("switch default") --- - true ... s:replace{9000, "won't make it"} --- - [9000, 'won''t make it'] ... test_run:cmd("switch replica") --- - true ... while box.info.replication[1].upstream.status == 'follow' do fiber.sleep(0.0001) end --- ... box.error.injection.set("ERRINJ_WAL_WRITE", false) --- - ok ... s:replace{9001, "bump lsn"} --- - [9001, 'bump lsn'] ... box.cfg{replication={}} --- ... box.cfg{replication = os.getenv('MASTER')} --- ... test_run:cmd("switch default") --- - true ... test_f(61, true) --- ... test_run:cmd("switch replica") --- - true ... wait_repl(70) --- - true ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server replica") --- - true ... -- Set minuscule timeout to make replication stop -- immediately after join. box.cfg{replication_timeout = 0.0001} --- ... test_run:cmd("start server replica") --- - true ... test_run:cmd("switch replica") --- - true ... fiber = require'fiber' --- ... while box.info.replication[1].upstream.message ~= 'timed out' do fiber.sleep(0.0001) end --- ... test_run:cmd("switch default") --- - true ... -- Disable heartbeat messages on the master so as not -- to trigger acks on the replica. errinj.set("ERRINJ_RELAY_REPORT_INTERVAL", 5) --- - ok ... box.cfg{replication_timeout = 0.05} --- ... test_run:cmd("switch replica") --- - true ... -- wait for reconnect while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end --- ... box.info.replication[1].upstream.status --- - follow ... box.info.replication[1].upstream.lag > 0 --- - true ... box.info.replication[1].upstream.lag < 1 --- - true ... -- wait for ack timeout while box.info.replication[1].upstream.message ~= 'timed out' do fiber.sleep(0.0001) end --- ... test_run:cmd("switch default") --- - true ... errinj.set("ERRINJ_RELAY_REPORT_INTERVAL", 0) --- - ok ... box.cfg{replication_timeout = 5} --- ... test_run:cmd("switch replica") --- - true ... -- wait for reconnect while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end --- ... -- wait for ack timeout again, should be ok fiber.sleep(0.01) --- ... {box.info.replication[1].upstream.status, box.info.replication[1].upstream.message} --- - - follow ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server replica") --- - true ... errinj = box.error.injection --- ... errinj.set("ERRINJ_RELAY_EXIT_DELAY", 0.01) --- - ok ... test_run:cmd("start server replica") --- - true ... test_run:cmd("switch replica") --- - true ... fiber = require('fiber') --- ... old_repl = box.cfg.replication --- ... -- shutdown applier box.cfg{replication = {}, replication_timeout = 0.1} --- ... while box.info.replication[1].upstream ~= nil do fiber.sleep(0.0001) end --- ... -- reconnect box.cfg{replication = {old_repl}} --- ... while box.info.replication[1].upstream.status ~= 'disconnected' do fiber.sleep(0.0001) end --- ... while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end --- ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server replica") --- - true ... errinj.set("ERRINJ_RELAY_EXIT_DELAY", 0) --- - ok ... box.cfg{replication_timeout = 0.01} --- ... test_run:cmd("create server replica_timeout with rpl_master=default, script='replication/replica_timeout.lua'") --- - true ... test_run:cmd("start server replica_timeout with args='0.01'") --- - true ... test_run:cmd("switch replica_timeout") --- - true ... fiber = require('fiber') --- ... while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end --- ... box.info.replication[1].upstream.status --- - follow ... test_run:cmd("switch default") --- - true ... errinj.set("ERRINJ_RELAY_REPORT_INTERVAL", 5) --- - ok ... test_run:cmd("switch replica_timeout") --- - true ... -- Check replica's disconnection on timeout (gh-3025). -- If master stops send heartbeat messages to replica, -- due to infinite read timeout connection never breaks, -- replica shows state 'follow' so old behaviour hangs -- here in infinite loop. while box.info.replication[1].upstream.message ~= 'timed out' do fiber.sleep(0.0001) end --- ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica_timeout") --- - true ... test_run:cmd("cleanup server replica_timeout") --- - true ... errinj.set("ERRINJ_RELAY_REPORT_INTERVAL", 0) --- - ok ... -- Check replica's ACKs don't prevent the master from sending -- heartbeat messages (gh-3160). test_run:cmd("start server replica_timeout with args='0.009'") --- - true ... test_run:cmd("switch replica_timeout") --- - true ... fiber = require('fiber') --- ... while box.info.replication[1].upstream.status ~= 'follow' do fiber.sleep(0.0001) end --- ... box.info.replication[1].upstream.status -- follow --- - follow ... for i = 0, 15 do fiber.sleep(0.01) if box.info.replication[1].upstream.status ~= 'follow' then break end end --- ... box.info.replication[1].upstream.status -- follow --- - follow ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica_timeout") --- - true ... test_run:cmd("cleanup server replica_timeout") --- - true ... box.snapshot() --- - ok ... for i = 0, 9999 do box.space.test:replace({i, 4, 5, 'test'}) end --- ... -- Check that replication_timeout is not taken into account -- during the join stage, i.e. a replica with a minuscule -- timeout successfully bootstraps and breaks connection only -- after subscribe. test_run:cmd("start server replica_timeout with args='0.00001'") --- - true ... test_run:cmd("switch replica_timeout") --- - true ... fiber = require('fiber') --- ... while box.info.replication[1].upstream.message ~= 'timed out' do fiber.sleep(0.0001) end --- ... test_run:cmd("stop server default") --- - true ... test_run:cmd("deploy server default") --- - true ... test_run:cmd("start server default") --- - true ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica_timeout") --- - true ... test_run:cmd("cleanup server replica_timeout") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/replication/status.test.lua0000664000000000000000000001001113306560010022465 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd('restart server default with cleanup=1') test_run:cmd('switch default') -- -- No replication -- master_id = box.info.id #box.info.vclock == 0 #box.info.replication == 1 box.space._cluster:count() == 1 box.info.uuid == box.space._cluster:get(master_id)[2] -- LSN is nil until a first request is made box.info.vclock[master_id] == nil --- box.info.lsn == box.info.vclock[master_id] box.info.lsn == 0 -- Make the first request box.schema.user.grant('guest', 'replication') -- LSN is 1 after the first request #box.info.vclock == 1 box.info.vclock[master_id] == 1 box.info.lsn == box.info.vclock[master_id] master = box.info.replication[master_id] master.id == master_id master.uuid == box.space._cluster:get(master_id)[2] master.lsn == box.info.vclock[master_id] master.upstream == nil master.downstream == nil -- Start Master -> Slave replication test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") test_run:cmd("start server replica") -- -- Master -- test_run:cmd('switch default') #box.info.vclock == 1 -- box.info.vclock[replica_id] is nil #box.info.replication == 2 box.space._cluster:count() == 2 -- master's status master_id = box.info.id box.info.vclock[master_id] == 2 -- grant + registration == 2 box.info.lsn == box.info.vclock[master_id] master = box.info.replication[master_id] master.id == master_id master.uuid == box.space._cluster:get(master_id)[2] master.lsn == box.info.vclock[master_id] master.upstream == nil master.downstream == nil -- replica's status replica_id = test_run:get_server_id('replica') box.info.vclock[replica_id] == nil replica = box.info.replication[replica_id] replica.id == replica_id replica.uuid == box.space._cluster:get(replica_id)[2] -- replica.lsn == box.info.vclock[replica_id] replica.lsn == 0 replica.upstream == nil replica.downstream.vclock[master_id] == box.info.vclock[master_id] replica.downstream.vclock[replica_id] == box.info.vclock[replica_id] -- -- Replica -- test_run:cmd('switch replica') #box.info.vclock == 1 -- box.info.vclock[replica_id] is nil #box.info.replication == 2 box.space._cluster:count() == 2 -- master's status master_id = test_run:get_server_id('default') box.info.vclock[master_id] == 2 master = box.info.replication[master_id] master.id == master_id master.uuid == box.space._cluster:get(master_id)[2] master.upstream.status == "follow" master.upstream.lag < 1 master.upstream.idle < 1 master.upstream.peer:match("localhost") master.downstream == nil -- replica's status replica_id = box.info.id box.info.vclock[replica_id] == nil -- box.info.lsn == box.info.vclock[replica_id] box.info.lsn == 0 replica = box.info.replication[replica_id] replica.id == replica_id replica.uuid == box.space._cluster:get(replica_id)[2] -- replica.lsn == box.info.vclock[replica_id] replica.lsn == 0 replica.upstream == nil replica.downstream == nil -- -- ClientError during replication -- test_run:cmd('switch replica') box.space._schema:insert({'dup'}) test_run:cmd('switch default') box.space._schema:insert({'dup'}) test_run:cmd('switch replica') r = box.info.replication[1] r.upstream.status == "stopped" r.upstream.message:match('Duplicate') ~= nil test_run:cmd('switch default') box.space._schema:delete({'dup'}) test_run:cmd("push filter ', lsn: [0-9]+' to ', lsn: '") test_run:grep_log('replica', 'error applying row: .*') test_run:cmd("clear filter") -- -- Check box.info.replication login -- test_run:cmd('switch replica') test_run:cmd("set variable master_port to 'replica.master'") replica_uri = os.getenv("LISTEN") box.cfg{replication = {"guest@localhost:" .. master_port, replica_uri}} master_id = test_run:get_server_id('default') master = box.info.replication[master_id] master.id == master_id master.upstream.status == "follow" master.upstream.peer:match("guest") master.upstream.peer:match("localhost") master.downstream == nil test_run:cmd('switch default') -- -- Cleanup -- box.schema.user.revoke('guest', 'replication') test_run:cmd("stop server replica") test_run:cmd("cleanup server replica") tarantool_1.9.1.26.g63eb81e3c/test/replication/recover_missing_xlog.result0000664000000000000000000000370213306565107025175 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... SERVERS = { 'autobootstrap1', 'autobootstrap2', 'autobootstrap3' } --- ... -- Start servers test_run:create_cluster(SERVERS) --- ... -- Wait for full mesh test_run:wait_fullmesh(SERVERS) --- ... test_run:cmd("switch autobootstrap1") --- - true ... for i = 0, 9 do box.space.test:insert{i, 'test' .. i} end --- ... box.space.test:count() --- - 10 ... test_run:cmd('switch default') --- - true ... vclock1 = test_run:get_vclock('autobootstrap1') --- ... vclock2 = test_run:wait_cluster_vclock(SERVERS, vclock1) --- ... test_run:cmd("switch autobootstrap2") --- - true ... box.space.test:count() --- - 10 ... box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.01) --- - ok ... test_run:cmd("stop server autobootstrap1") --- - true ... fio = require('fio') --- ... -- This test checks ability to recover missing local data -- from remote replica. See #3210. -- Delete data on first master and test that after restart, -- due to difference in vclock it will be able to recover -- all missing data from replica. -- Also check that there is no concurrency, i.e. master is -- in 'read-only' mode unless it receives all data. fio.unlink(fio.pathjoin(fio.abspath("."), string.format('autobootstrap1/%020d.xlog', 8))) --- - true ... test_run:cmd("start server autobootstrap1") --- - true ... test_run:cmd("switch autobootstrap1") --- - true ... for i = 10, 19 do box.space.test:insert{i, 'test' .. i} end --- ... fiber = require('fiber') --- ... box.space.test:select() --- - - [0, 'test0'] - [1, 'test1'] - [2, 'test2'] - [3, 'test3'] - [4, 'test4'] - [5, 'test5'] - [6, 'test6'] - [7, 'test7'] - [8, 'test8'] - [9, 'test9'] - [10, 'test10'] - [11, 'test11'] - [12, 'test12'] - [13, 'test13'] - [14, 'test14'] - [15, 'test15'] - [16, 'test16'] - [17, 'test17'] - [18, 'test18'] - [19, 'test19'] ... -- Cleanup. test_run:cmd('switch default') --- - true ... test_run:drop_cluster(SERVERS) --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/catch.test.lua0000664000000000000000000000411013306560010022227 0ustar rootrootenv = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') box.schema.user.grant('guest', 'read,write,execute', 'universe') net_box = require('net.box') errinj = box.error.injection box.schema.user.grant('guest', 'replication') test_run:cmd("create server replica with rpl_master=default, script='replication/replica_timeout.lua'") test_run:cmd("start server replica with args='1'") test_run:cmd("switch replica") test_run:cmd("switch default") s = box.schema.space.create('test', {engine = engine}); -- vinyl does not support hash index index = s:create_index('primary', {type = (engine == 'vinyl' and 'tree' or 'hash') }) test_run:cmd("switch replica") fiber = require('fiber') while box.space.test == nil do fiber.sleep(0.01) end test_run:cmd("switch default") test_run:cmd("stop server replica") -- insert values on the master while replica is stopped and can't fetch them for i=1,100 do s:insert{i, 'this is test message12345'} end -- sleep after every tuple errinj.set("ERRINJ_RELAY_TIMEOUT", 1000.0) test_run:cmd("start server replica with args='0.01'") test_run:cmd("switch replica") -- Check that replica doesn't enter read-write mode before -- catching up with the master: to check that we inject sleep into -- the master relay_send function and attempt a data modifying -- statement in replica while it's still fetching data from the -- master. -- In the next two cases we try to delete a tuple while replica is -- catching up with the master (local delete, remote delete) case -- -- #1: delete tuple on replica -- box.space.test ~= nil d = box.space.test:delete{1} box.space.test:get(1) ~= nil -- case #2: delete tuple by net.box test_run:cmd("switch default") test_run:cmd("set variable r_uri to 'replica.listen'") c = net_box.connect(r_uri) d = c.space.test:delete{1} c.space.test:get(1) ~= nil -- check sync errinj.set("ERRINJ_RELAY_TIMEOUT", 0) -- cleanup test_run:cmd("stop server replica") test_run:cmd("cleanup server replica") box.space.test:drop() box.schema.user.revoke('guest', 'replication') box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/replication/replica_uuid.lua0000664000000000000000000000044313306560010022641 0ustar rootroot#!/usr/bin/env tarantool box.cfg({ instance_uuid = arg[1], listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), memtx_memory = 107374182, replication_connect_timeout = 0.5, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap_guest.test.lua0000664000000000000000000000246413306560010025274 0ustar rootrootenv = require('test_run') vclock_diff = require('fast_replica').vclock_diff test_run = env.new() SERVERS = { 'autobootstrap_guest1', 'autobootstrap_guest2', 'autobootstrap_guest3' } -- -- Start servers -- test_run:create_cluster(SERVERS) -- -- Wait for full mesh -- test_run:wait_fullmesh(SERVERS) -- -- Check vclock -- vclock1 = test_run:get_vclock('autobootstrap_guest1') vclock_diff(vclock1, test_run:get_vclock('autobootstrap_guest2')) vclock_diff(vclock1, test_run:get_vclock('autobootstrap_guest3')) -- -- Insert rows on each server -- _ = test_run:cmd("switch autobootstrap_guest1") _ = box.space.test:insert({box.info.id}) _ = test_run:cmd("switch autobootstrap_guest2") _ = box.space.test:insert({box.info.id}) _ = test_run:cmd("switch autobootstrap_guest3") _ = box.space.test:insert({box.info.id}) _ = test_run:cmd("switch default") -- -- Synchronize -- vclock = test_run:get_cluster_vclock(SERVERS) vclock2 = test_run:wait_cluster_vclock(SERVERS, vclock) vclock_diff(vclock1, vclock2) -- -- Check result -- _ = test_run:cmd("switch autobootstrap_guest1") box.space.test:select() _ = test_run:cmd("switch autobootstrap_guest2") box.space.test:select() _ = test_run:cmd("switch autobootstrap_guest3") box.space.test:select() _ = test_run:cmd("switch default") -- -- Stop servers -- test_run:drop_cluster(SERVERS) tarantool_1.9.1.26.g63eb81e3c/test/replication/rebootstrap2.lua0000777000000000000000000000000013306560010025663 2rebootstrap.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/ddl.result0000664000000000000000000000105013306560010021467 0ustar rootroottest_run = require('test_run').new() --- ... SERVERS = { 'ddl1', 'ddl2', 'ddl3', 'ddl4' } --- ... -- Deploy a cluster. test_run:create_cluster(SERVERS) --- ... test_run:wait_fullmesh(SERVERS) --- ... test_run:cmd("switch ddl1") --- - true ... test_run = require('test_run').new() --- ... fiber = require('fiber') --- ... for i = 0, 199 do box.space.test:replace({1, 2, 3, 4}) box.space.test:truncate() box.space.test:truncate() end --- ... fiber.sleep(0.001) --- ... test_run:cmd("switch default") --- - true ... test_run:drop_cluster(SERVERS) --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/gc.result0000664000000000000000000001506713306565107021346 0ustar rootroottest_run = require('test_run').new() --- ... engine = test_run:get_cfg('engine') --- ... replica_set = require('fast_replica') --- ... fiber = require('fiber') --- ... test_run:cleanup_cluster() --- ... -- Make each snapshot trigger garbage collection. default_checkpoint_count = box.cfg.checkpoint_count --- ... box.cfg{checkpoint_count = 1} --- ... function wait_gc(n) while #box.internal.gc.info().checkpoints > n do fiber.sleep(0.01) end end --- ... -- Grant permissions needed for replication. box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... box.schema.user.grant('guest', 'replication') --- ... -- By default, relay thread reports status to tx once a second. -- To reduce the test execute time, let's set it to 50 ms. box.error.injection.set("ERRINJ_RELAY_REPORT_INTERVAL", 0.05) --- - ok ... -- Create and populate the space we will replicate. s = box.schema.space.create('test', {engine = engine}); --- ... _ = s:create_index('pk') --- ... for i = 1, 100 do s:auto_increment{} end --- ... box.snapshot() --- - ok ... for i = 1, 100 do s:auto_increment{} end --- ... -- Make sure replica join will take long enough for us to -- invoke garbage collection. box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.05) --- - ok ... -- While the replica is receiving the initial data set, -- make a snapshot and invoke garbage collection, then -- remove the timeout injection so that we don't have to -- wait too long for the replica to start. test_run:cmd("setopt delimiter ';'") --- - true ... fiber.create(function() fiber.sleep(0.1) box.snapshot() box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0) end) test_run:cmd("setopt delimiter ''"); --- ... -- Start the replica. test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") --- - true ... test_run:cmd("start server replica") --- - true ... -- Despite the fact that we invoked garbage collection that -- would have normally removed the snapshot the replica was -- bootstrapped from, the replica should still receive all -- data from the master. Check it. test_run:cmd("switch replica") --- - true ... fiber = require('fiber') --- ... while box.space.test:count() < 200 do fiber.sleep(0.01) end --- ... box.space.test:count() --- - 200 ... test_run:cmd("switch default") --- - true ... -- Check that garbage collection removed the snapshot once -- the replica released the corresponding checkpoint. wait_gc(1) --- ... #box.internal.gc.info().checkpoints == 1 or box.internal.gc.info() --- - true ... -- Make sure the replica will receive data it is subscribed -- to long enough for us to invoke garbage collection. box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0.05) --- - ok ... -- Send more data to the replica. for i = 1, 100 do s:auto_increment{} end --- ... -- Invoke garbage collection. Check that it doesn't remove -- xlogs needed by the replica. box.snapshot() --- - ok ... #box.internal.gc.info().checkpoints == 2 or box.internal.gc.info() --- - true ... -- Remove the timeout injection so that the replica catches -- up quickly. box.error.injection.set("ERRINJ_RELAY_TIMEOUT", 0) --- - ok ... -- Check that the replica received all data from the master. test_run:cmd("switch replica") --- - true ... while box.space.test:count() < 300 do fiber.sleep(0.01) end --- ... box.space.test:count() --- - 300 ... test_run:cmd("switch default") --- - true ... -- Now garbage collection should resume and delete files left -- from the old checkpoint. wait_gc(1) --- ... #box.internal.gc.info().checkpoints == 1 or box.internal.gc.info() --- - true ... -- -- Check that the master doesn't delete xlog files sent to the -- replica until it receives a confirmation that the data has -- been applied (gh-2825). -- test_run:cmd("switch replica") --- - true ... -- Prevent the replica from applying any rows. box.error.injection.set("ERRINJ_WAL_DELAY", true) --- - ok ... test_run:cmd("switch default") --- - true ... -- Generate some data on the master. for i = 1, 5 do s:auto_increment{} end --- ... box.snapshot() -- rotate xlog --- - ok ... for i = 1, 5 do s:auto_increment{} end --- ... fiber.sleep(0.1) -- wait for master to relay data --- ... -- Garbage collection must not delete the old xlog file -- (and the corresponding snapshot), because it is still -- needed by the replica. #box.internal.gc.info().checkpoints == 2 or box.internal.gc.info() --- - true ... test_run:cmd("switch replica") --- - true ... -- Unblock the replica and make it fail to apply a row. box.info.replication[1].upstream.message == nil --- - true ... box.error.injection.set("ERRINJ_WAL_WRITE", true) --- - ok ... box.error.injection.set("ERRINJ_WAL_DELAY", false) --- - ok ... while box.info.replication[1].upstream.message == nil do fiber.sleep(0.01) end --- ... box.info.replication[1].upstream.message --- - Failed to write to disk ... test_run:cmd("switch default") --- - true ... -- Restart the replica to reestablish replication. test_run:cmd("restart server replica") --- - true ... -- Wait for the replica to catch up. test_run:cmd("switch replica") --- - true ... fiber = require('fiber') --- ... while box.space.test:count() < 310 do fiber.sleep(0.01) end --- ... box.space.test:count() --- - 310 ... test_run:cmd("switch default") --- - true ... -- Now it's safe to drop the old xlog. wait_gc(1) --- ... #box.internal.gc.info().checkpoints == 1 or box.internal.gc.info() --- - true ... -- Stop the replica. test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server replica") --- - true ... -- Invoke garbage collection. Check that it doesn't remove -- the checkpoint last used by the replica. _ = s:auto_increment{} --- ... box.snapshot() --- - ok ... #box.internal.gc.info().checkpoints == 2 or box.internal.gc.info() --- - true ... -- The checkpoint should only be deleted after the replica -- is unregistered. test_run:cleanup_cluster() --- ... #box.internal.gc.info().checkpoints == 1 or box.internal.gc.info() --- - true ... -- -- Test that concurrent invocation of the garbage collector works fine. -- s:truncate() --- ... for i = 1, 10 do s:replace{i} end --- ... box.snapshot() --- - ok ... replica_set.join(test_run, 3) --- ... replica_set.stop_all(test_run) --- ... for i = 11, 50 do s:replace{i} if i % 10 == 0 then box.snapshot() end end --- ... replica_set.start_all(test_run) --- ... replica_set.wait_all(test_run) --- ... replica_set.drop_all(test_run) --- ... -- Cleanup. s:drop() --- ... box.error.injection.set("ERRINJ_RELAY_REPORT_INTERVAL", 0) --- - ok ... box.schema.user.revoke('guest', 'replication') --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... box.cfg{checkpoint_count = default_checkpoint_count} --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/replica_no_quorum.lua0000664000000000000000000000044413306560010023720 0ustar rootroot#!/usr/bin/env tarantool box.cfg({ listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), memtx_memory = 107374182, replication_connect_quorum = 0, replication_connect_timeout = 0.1, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication/master_quorum2.lua0000777000000000000000000000000013306560010026561 2master_quorum.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/quorum.lua0000664000000000000000000000165713306560010021534 0ustar rootroot#!/usr/bin/env tarantool -- get instance name from filename (quorum1.lua => quorum1) local INSTANCE_ID = string.match(arg[0], "%d") local SOCKET_DIR = require('fio').cwd() local function instance_uri(instance_id) --return 'localhost:'..(3310 + instance_id) return SOCKET_DIR..'/quorum'..instance_id..'.sock'; end -- start console first require('console').listen(os.getenv('ADMIN')) box.cfg({ listen = instance_uri(INSTANCE_ID); replication_timeout = 0.05; replication_sync_lag = 0.01; replication_connect_timeout = 0.1; replication_connect_quorum = 3; replication = { instance_uri(1); instance_uri(2); instance_uri(3); }; }) box.once("bootstrap", function() local test_run = require('test_run').new() box.schema.user.grant("guest", 'replication') box.schema.space.create('test', {engine = test_run:get_cfg('engine')}) box.space.test:create_index('primary') end) tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap2.lua0000777000000000000000000000000013306560010026567 2autobootstrap.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/ddl.test.lua0000664000000000000000000000071013306560010021712 0ustar rootroottest_run = require('test_run').new() SERVERS = { 'ddl1', 'ddl2', 'ddl3', 'ddl4' } -- Deploy a cluster. test_run:create_cluster(SERVERS) test_run:wait_fullmesh(SERVERS) test_run:cmd("switch ddl1") test_run = require('test_run').new() fiber = require('fiber') for i = 0, 199 do box.space.test:replace({1, 2, 3, 4}) box.space.test:truncate() box.space.test:truncate() end fiber.sleep(0.001) test_run:cmd("switch default") test_run:drop_cluster(SERVERS) tarantool_1.9.1.26.g63eb81e3c/test/replication/replica_uuid_ro2.lua0000777000000000000000000000000013306560010027265 2replica_uuid_ro.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/replica_uuid_ro1.lua0000777000000000000000000000000013306560010027264 2replica_uuid_ro.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/catch.result0000664000000000000000000000514213306560010022014 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... net_box = require('net.box') --- ... errinj = box.error.injection --- ... box.schema.user.grant('guest', 'replication') --- ... test_run:cmd("create server replica with rpl_master=default, script='replication/replica_timeout.lua'") --- - true ... test_run:cmd("start server replica with args='1'") --- - true ... test_run:cmd("switch replica") --- - true ... test_run:cmd("switch default") --- - true ... s = box.schema.space.create('test', {engine = engine}); --- ... -- vinyl does not support hash index index = s:create_index('primary', {type = (engine == 'vinyl' and 'tree' or 'hash') }) --- ... test_run:cmd("switch replica") --- - true ... fiber = require('fiber') --- ... while box.space.test == nil do fiber.sleep(0.01) end --- ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica") --- - true ... -- insert values on the master while replica is stopped and can't fetch them for i=1,100 do s:insert{i, 'this is test message12345'} end --- ... -- sleep after every tuple errinj.set("ERRINJ_RELAY_TIMEOUT", 1000.0) --- - ok ... test_run:cmd("start server replica with args='0.01'") --- - true ... test_run:cmd("switch replica") --- - true ... -- Check that replica doesn't enter read-write mode before -- catching up with the master: to check that we inject sleep into -- the master relay_send function and attempt a data modifying -- statement in replica while it's still fetching data from the -- master. -- In the next two cases we try to delete a tuple while replica is -- catching up with the master (local delete, remote delete) case -- -- #1: delete tuple on replica -- box.space.test ~= nil --- - true ... d = box.space.test:delete{1} --- - error: Can't modify data because this instance is in read-only mode. ... box.space.test:get(1) ~= nil --- - true ... -- case #2: delete tuple by net.box test_run:cmd("switch default") --- - true ... test_run:cmd("set variable r_uri to 'replica.listen'") --- - true ... c = net_box.connect(r_uri) --- ... d = c.space.test:delete{1} --- - error: Can't modify data because this instance is in read-only mode. ... c.space.test:get(1) ~= nil --- - true ... -- check sync errinj.set("ERRINJ_RELAY_TIMEOUT", 0) --- - ok ... -- cleanup test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server replica") --- - true ... box.space.test:drop() --- ... box.schema.user.revoke('guest', 'replication') --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/wal_off.result0000664000000000000000000000333313306560010022347 0ustar rootroot-- -- gh-1233: JOIN/SUBSCRIBE must fail if master has wal_mode = "none" -- env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('switch default') --- - true ... fiber = require('fiber') --- ... box.schema.user.grant('guest', 'replication') --- ... test_run:cmd("create server wal_off with rpl_master=default, script='replication/wal_off.lua'") --- - true ... test_run:cmd("start server wal_off") --- - true ... test_run:cmd('switch default') --- - true ... wal_off_uri = test_run:eval('wal_off', 'return box.cfg.listen')[1] --- ... wal_off_uri ~= nil --- - true ... wal_off_id = test_run:eval('wal_off', 'return box.info.id')[1] --- ... box.cfg { replication = wal_off_uri } --- ... check = "Replication does not support wal_mode = 'none'" --- ... while box.info.replication[wal_off_id].upstream.message ~= check do fiber.sleep(0) end --- ... box.info.replication[wal_off_id].upstream ~= nil --- - true ... box.info.replication[wal_off_id].downstream ~= nil --- - true ... box.info.replication[wal_off_id].upstream.status --- - stopped ... box.info.replication[wal_off_id].upstream.message --- - Replication does not support wal_mode = 'none' ... box.cfg { replication = "" } --- ... test_run:cmd('switch wal_off') --- - true ... box.schema.user.revoke('guest', 'replication') --- ... test_run:cmd('switch default') --- - true ... box.cfg { replication = wal_off_uri } --- ... check = "Read access to universe" --- ... while string.find(box.info.replication[wal_off_id].upstream.message, check) == nil do fiber.sleep(0.01) end --- ... box.cfg { replication = "" } --- ... test_run:cmd("stop server wal_off") --- - true ... test_run:cmd("cleanup server wal_off") --- - true ... box.schema.user.revoke('guest', 'replication') --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/misc.result0000664000000000000000000001013113306560010021657 0ustar rootrootuuid = require('uuid') --- ... test_run = require('test_run').new() --- ... box.schema.user.grant('guest', 'replication') --- ... -- gh-2991 - Tarantool asserts on box.cfg.replication update if one of -- servers is dead replication_timeout = box.cfg.replication_timeout --- ... replication_connect_timeout = box.cfg.replication_connect_timeout --- ... box.cfg{replication_timeout=0.05, replication_connect_timeout=0.05, replication={}} --- ... box.cfg{replication = {'127.0.0.1:12345', box.cfg.listen}} --- - error: 'Incorrect value for option ''replication'': failed to connect to one or more replicas' ... box.cfg{replication_timeout = replication_timeout, replication_connect_timeout = replication_connect_timeout} --- ... -- gh-3111 - Allow to rebootstrap a replica from a read-only master replica_uuid = uuid.new() --- ... test_run:cmd('create server test with rpl_master=default, script="replication/replica_uuid.lua"') --- - true ... test_run:cmd(string.format('start server test with args="%s"', replica_uuid)) --- - true ... test_run:cmd('stop server test') --- - true ... test_run:cmd('cleanup server test') --- - true ... box.cfg{read_only = true} --- ... test_run:cmd(string.format('start server test with args="%s"', replica_uuid)) --- - true ... test_run:cmd('stop server test') --- - true ... test_run:cmd('cleanup server test') --- - true ... box.cfg{read_only = false} --- ... -- gh-3160 - Send heartbeats if there are changes from a remote master only SERVERS = { 'autobootstrap1', 'autobootstrap2', 'autobootstrap3' } --- ... -- Deploy a cluster. test_run:create_cluster(SERVERS) --- ... test_run:wait_fullmesh(SERVERS) --- ... test_run:cmd("switch autobootstrap1") --- - true ... test_run = require('test_run').new() --- ... box.cfg{replication_timeout = 0.01, replication_connect_timeout=0.01} --- ... test_run:cmd("switch autobootstrap2") --- - true ... test_run = require('test_run').new() --- ... box.cfg{replication_timeout = 0.01, replication_connect_timeout=0.01} --- ... test_run:cmd("switch autobootstrap3") --- - true ... test_run = require('test_run').new() --- ... fiber=require('fiber') --- ... box.cfg{replication_timeout = 0.01, replication_connect_timeout=0.01} --- ... _ = box.schema.space.create('test_timeout'):create_index('pk') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function test_timeout() for i = 0, 99 do box.space.test_timeout:replace({1}) fiber.sleep(0.005) local rinfo = box.info.replication if rinfo[1].upstream and rinfo[1].upstream.status ~= 'follow' or rinfo[2].upstream and rinfo[2].upstream.status ~= 'follow' or rinfo[3].upstream and rinfo[3].upstream.status ~= 'follow' then return error('Replication broken') end end return true end ; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... test_timeout() --- - true ... -- gh-3247 - Sequence-generated value is not replicated in case -- the request was sent via iproto. test_run:cmd("switch autobootstrap1") --- - true ... net_box = require('net.box') --- ... _ = box.schema.space.create('space1') --- ... _ = box.schema.sequence.create('seq') --- ... _ = box.space.space1:create_index('primary', {sequence = true} ) --- ... _ = box.space.space1:create_index('secondary', {parts = {2, 'unsigned'}}) --- ... box.schema.user.grant('guest', 'read,write', 'space', 'space1') --- ... c = net_box.connect(box.cfg.listen) --- ... c.space.space1:insert{box.NULL, "data"} -- fails, but bumps sequence value --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... c.space.space1:insert{box.NULL, 1, "data"} --- - [2, 1, 'data'] ... box.space.space1:select{} --- - - [2, 1, 'data'] ... vclock = test_run:get_vclock("autobootstrap1") --- ... _ = test_run:wait_vclock("autobootstrap2", vclock) --- ... test_run:cmd("switch autobootstrap2") --- - true ... box.space.space1:select{} --- - - [2, 1, 'data'] ... test_run:cmd("switch autobootstrap1") --- - true ... box.space.space1:drop() --- ... test_run:cmd("switch default") --- - true ... test_run:drop_cluster(SERVERS) --- ... box.schema.user.revoke('guest', 'replication') --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/once.result0000664000000000000000000000225313306560010021656 0ustar rootrootfiber = require('fiber') --- ... box.once() --- - error: 'Illegal parameters, Usage: box.once(key, func, ...)' ... box.once("key") --- - error: 'Illegal parameters, Usage: box.once(key, func, ...)' ... box.once("key", "key") --- - error: 'Illegal parameters, Usage: box.once(key, func, ...)' ... box.once("key", nil) --- - error: 'Illegal parameters, Usage: box.once(key, func, ...)' ... box.once("key", function() end) --- ... once = nil --- ... function f(arg) if once ~= nil then once = once + arg else once = arg end end --- ... box.once("test", f, 1) --- ... once --- - 1 ... box.once("test", f, 1) --- ... once --- - 1 ... -- Check that box.once() does not fail if the instance is read-only, -- instead it waits until the instance enters read-write mode. once = nil --- ... box.cfg{read_only = true} --- ... ch = fiber.channel(1) --- ... _ = fiber.create(function() box.once("ro", f, 1) ch:put(true) end) --- ... fiber.sleep(0.001) --- ... once -- nil --- - null ... box.cfg{read_only = false} --- ... ch:get() --- - true ... once -- 1 --- - 1 ... box.cfg{read_only = true} --- ... box.once("ro", f, 1) -- ok, already done --- ... once -- 1 --- - 1 ... box.cfg{read_only = false} --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap_guest.lua0000664000000000000000000000160613306560010024313 0ustar rootroot#!/usr/bin/env tarantool -- get instance name from filename (autobootstrap_guest1.lua => autobootstrap_guest1) local INSTANCE_ID = string.match(arg[0], "%d") local SOCKET_DIR = require('fio').cwd() local function instance_uri(instance_id) --return 'localhost:'..(3310 + instance_id) return SOCKET_DIR..'/autobootstrap_guest'..instance_id..'.sock'; end -- start console first require('console').listen(os.getenv('ADMIN')) box.cfg({ listen = instance_uri(INSTANCE_ID); -- log_level = 7; replication = { instance_uri(1); instance_uri(2); instance_uri(3); }; replication_connect_timeout = 0.5, }) box.once("bootstrap", function() local test_run = require('test_run').new() box.schema.user.grant("guest", 'replication') box.schema.space.create('test', {engine = test_run:get_cfg('engine')}) box.space.test:create_index('primary') end) tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap.test.lua0000664000000000000000000000547713306560010024074 0ustar rootrootenv = require('test_run') vclock_diff = require('fast_replica').vclock_diff test_run = env.new() SERVERS = { 'autobootstrap1', 'autobootstrap2', 'autobootstrap3' } -- -- Start servers -- test_run:create_cluster(SERVERS) -- -- Wait for full mesh -- test_run:wait_fullmesh(SERVERS) -- -- Check vclock -- vclock1 = test_run:get_vclock('autobootstrap1') vclock_diff(vclock1, test_run:get_vclock('autobootstrap2')) vclock_diff(vclock1, test_run:get_vclock('autobootstrap3')) -- -- Insert rows on each server -- _ = test_run:cmd("switch autobootstrap1") _ = box.space.test:insert({box.info.id}) _ = test_run:cmd("switch autobootstrap2") _ = box.space.test:insert({box.info.id}) _ = test_run:cmd("switch autobootstrap3") _ = box.space.test:insert({box.info.id}) _ = test_run:cmd("switch default") -- -- Synchronize -- vclock = test_run:get_cluster_vclock(SERVERS) vclock2 = test_run:wait_cluster_vclock(SERVERS, vclock) vclock_diff(vclock1, vclock2) -- -- Check result -- _ = test_run:cmd("switch autobootstrap1") box.space.test:select() _ = test_run:cmd("switch autobootstrap2") box.space.test:select() _ = test_run:cmd("switch autobootstrap3") box.space.test:select() _ = test_run:cmd("switch default") _ = test_run:cmd("switch autobootstrap1") u1 = box.schema.user.create('test_u') box.schema.user.grant('test_u', 'read,write,create', 'universe') box.session.su('test_u') _ = box.schema.space.create('test_u'):create_index('pk') box.session.su('admin') _ = box.space.test_u:replace({1, 2, 3, 4}) box.space.test_u:select() -- Synchronize vclock = test_run:get_vclock('autobootstrap1') _ = test_run:wait_vclock("autobootstrap2", vclock) _ = test_run:wait_vclock("autobootstrap3", vclock) _ = test_run:cmd("switch autobootstrap2") box.space.test_u:select() _ = test_run:cmd("switch autobootstrap3") box.space.test_u:select() -- -- Rebootstrap one node and check that others follow. -- _ = test_run:cmd("switch autobootstrap1") _ = test_run:cmd("restart server autobootstrap1 with cleanup=1") _ = box.space.test_u:replace({5, 6, 7, 8}) box.space.test_u:select() _ = test_run:cmd("switch default") test_run:wait_fullmesh(SERVERS) vclock = test_run:get_vclock("autobootstrap1") _ = test_run:wait_vclock("autobootstrap2", vclock) _ = test_run:wait_vclock("autobootstrap3", vclock) _ = test_run:cmd("switch autobootstrap2") box.space.test_u:select() _ = test_run:cmd("switch autobootstrap3") box.space.test_u:select() _ = test_run:cmd("switch default") _ = test_run:cmd("switch autobootstrap1") for i = 0, 99 do box.schema.space.create('space' .. tostring(i)):format({{'id', 'unsigned'}}) end _ = test_run:cmd("switch autobootstrap2") _ = test_run:cmd("switch autobootstrap3") _ = test_run:cmd("switch autobootstrap1") for i = 0, 99 do box.space['space' .. tostring(i)]:drop() end _ = test_run:cmd("switch default") -- -- Stop servers -- test_run:drop_cluster(SERVERS) tarantool_1.9.1.26.g63eb81e3c/test/replication/replicaset_ro_mostly.test.lua0000664000000000000000000000235713306560010025422 0ustar rootroot-- gh-3257 check bootstrap with read-only replica in cluster. -- Old behaviour: failed, since read-only is chosen by uuid. test_run = require('test_run').new() SERVERS = {'replica_uuid_ro1', 'replica_uuid_ro2'} uuid = require('uuid') uuid1 = uuid.new() uuid2 = uuid.new() function sort_cmp(a, b) return a.time_low > b.time_low and true or false end function sort(t) table.sort(t, sort_cmp) return t end UUID = sort({uuid1, uuid2}, sort_cmp) create_cluster_cmd1 = 'create server %s with script="replication/%s.lua"' create_cluster_cmd2 = 'start server %s with args="%s", wait_load=False, wait=False' test_run:cmd("setopt delimiter ';'") function create_cluster_uuid(servers, uuids) for i, name in ipairs(servers) do test_run:cmd(create_cluster_cmd1:format(name, name)) test_run:cmd(create_cluster_cmd2:format(name, uuids[i])) end end; test_run:cmd("setopt delimiter ''"); -- Deploy a cluster. create_cluster_uuid(SERVERS, UUID) test_run:wait_fullmesh(SERVERS) -- Add third replica name = 'replica_uuid_ro3' test_run:cmd(create_cluster_cmd1:format(name, name)) test_run:cmd(create_cluster_cmd2:format(name, uuid.new())) test_run:cmd('switch replica_uuid_ro3') test_run:cmd('switch default') -- Cleanup. test_run:drop_cluster(SERVERS) tarantool_1.9.1.26.g63eb81e3c/test/replication/consistent.result0000664000000000000000000002402613306560010023125 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... --# create server replica with rpl_master=default, script='replication/replica.lua' --# start server replica --# set connection default box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... -- Wait until the grant reaches the replica --# set connection replica while box.space['_priv']:len() < 1 do box.fiber.sleep(0.01) end --- ... --# setopt delimiter ';' --# set connection default, replica do begin_lsn = -1 function _set_pri_lsn(_lsn) begin_lsn = _lsn end function _print_lsn() return (box.info.lsn - begin_lsn + 1) end function _insert(_begin, _end, msg) local a = {} for i = _begin, _end do table.insert(a, box.space[0]:insert{i, msg..' - '..i}) end return unpack(a) end function _select(_begin, _end) local a = {} while box.info.lsn < begin_lsn + _end + 2 do box.fiber.sleep(0.001) end for i = _begin, _end do table.insert(a, box.space[0]:get{i}) end return unpack(a) end end; --- ... --# setopt delimiter '' --# set connection default --# set variable replica_port to 'replica.primary_port' -- set begin lsn on master and replica. begin_lsn = box.info.lsn --- ... a = box.net.box.new('127.0.0.1', replica_port) --- ... a:call('_set_pri_lsn', box.info.lsn) --- - [] ... a:close() --- - true ... s = box.schema.space.create('tweedledum', {id = 0, engine = engine}); --- ... -- vinyl does not support hash index index = s:create_index('primary', {type = (engine == 'vinyl' and 'tree' or 'hash') }) --- ... _insert(1, 10, 'master') --- - [1, 'master - 1'] - [2, 'master - 2'] - [3, 'master - 3'] - [4, 'master - 4'] - [5, 'master - 5'] - [6, 'master - 6'] - [7, 'master - 7'] - [8, 'master - 8'] - [9, 'master - 9'] - [10, 'master - 10'] ... _select(1, 10) --- - [1, 'master - 1'] - [2, 'master - 2'] - [3, 'master - 3'] - [4, 'master - 4'] - [5, 'master - 5'] - [6, 'master - 6'] - [7, 'master - 7'] - [8, 'master - 8'] - [9, 'master - 9'] - [10, 'master - 10'] ... --# set connection replica _select(1, 10) --- - [1, 'master - 1'] - [2, 'master - 2'] - [3, 'master - 3'] - [4, 'master - 4'] - [5, 'master - 5'] - [6, 'master - 6'] - [7, 'master - 7'] - [8, 'master - 8'] - [9, 'master - 9'] - [10, 'master - 10'] ... --# set connection default -- Master LSN: _print_lsn() --- - 13 ... --# set connection replica -- Replica LSN: _print_lsn() --- - 13 ... ----------------------------- -- Master LSN > Replica LSN ----------------------------- -------------------- -- Replica to Master -------------------- old_replication_source = box.cfg.replication_source --- ... box.cfg{replication_source=""} --- ... --# set connection default _insert(11, 20, 'master') --- - [11, 'master - 11'] - [12, 'master - 12'] - [13, 'master - 13'] - [14, 'master - 14'] - [15, 'master - 15'] - [16, 'master - 16'] - [17, 'master - 17'] - [18, 'master - 18'] - [19, 'master - 19'] - [20, 'master - 20'] ... _select(11, 20) --- - [11, 'master - 11'] - [12, 'master - 12'] - [13, 'master - 13'] - [14, 'master - 14'] - [15, 'master - 15'] - [16, 'master - 16'] - [17, 'master - 17'] - [18, 'master - 18'] - [19, 'master - 19'] - [20, 'master - 20'] ... --# set connection replica _insert (11, 15, 'replica') --- - [11, 'replica - 11'] - [12, 'replica - 12'] - [13, 'replica - 13'] - [14, 'replica - 14'] - [15, 'replica - 15'] ... _select (11, 15) --- - [11, 'replica - 11'] - [12, 'replica - 12'] - [13, 'replica - 13'] - [14, 'replica - 14'] - [15, 'replica - 15'] ... --# set connection default -- Master LSN: _print_lsn() --- - 23 ... --# set connection replica -- Replica LSN: _print_lsn() --- - 18 ... ------------------- -- rollback Replica ------------------- box.cfg{replication_source=old_replication_source} --- ... _select(11, 20) --- - [11, 'replica - 11'] - [12, 'replica - 12'] - [13, 'replica - 13'] - [14, 'replica - 14'] - [15, 'replica - 15'] - [16, 'master - 16'] - [17, 'master - 17'] - [18, 'master - 18'] - [19, 'master - 19'] - [20, 'master - 20'] ... --# set connection default -- Master LSN: _print_lsn() --- - 23 ... --# set connection replica -- Replica LSN: _print_lsn() --- - 23 ... ------------------------------ -- Master LSN == Replica LSN ------------------------------ -------------------- -- Replica to Master -------------------- box.cfg{replication_source=""} --- ... --# set connection default _insert(21, 30, 'master') --- - [21, 'master - 21'] - [22, 'master - 22'] - [23, 'master - 23'] - [24, 'master - 24'] - [25, 'master - 25'] - [26, 'master - 26'] - [27, 'master - 27'] - [28, 'master - 28'] - [29, 'master - 29'] - [30, 'master - 30'] ... _select(21, 30) --- - [21, 'master - 21'] - [22, 'master - 22'] - [23, 'master - 23'] - [24, 'master - 24'] - [25, 'master - 25'] - [26, 'master - 26'] - [27, 'master - 27'] - [28, 'master - 28'] - [29, 'master - 29'] - [30, 'master - 30'] ... --# set connection replica _insert(21, 30, 'replica') --- - [21, 'replica - 21'] - [22, 'replica - 22'] - [23, 'replica - 23'] - [24, 'replica - 24'] - [25, 'replica - 25'] - [26, 'replica - 26'] - [27, 'replica - 27'] - [28, 'replica - 28'] - [29, 'replica - 29'] - [30, 'replica - 30'] ... _select(21, 30) --- - [21, 'replica - 21'] - [22, 'replica - 22'] - [23, 'replica - 23'] - [24, 'replica - 24'] - [25, 'replica - 25'] - [26, 'replica - 26'] - [27, 'replica - 27'] - [28, 'replica - 28'] - [29, 'replica - 29'] - [30, 'replica - 30'] ... --# set connection default -- Master LSN: _print_lsn() --- - 33 ... --# set connection replica -- Replica LSN: _print_lsn() --- - 33 ... ------------------- -- rollback Replica ------------------- box.cfg{replication_source=old_replication_source} --- ... _select(21, 30) --- - [21, 'replica - 21'] - [22, 'replica - 22'] - [23, 'replica - 23'] - [24, 'replica - 24'] - [25, 'replica - 25'] - [26, 'replica - 26'] - [27, 'replica - 27'] - [28, 'replica - 28'] - [29, 'replica - 29'] - [30, 'replica - 30'] ... --# set connection default -- Master LSN: _print_lsn() --- - 33 ... --# set connection replica -- Replica LSN: _print_lsn() --- - 33 ... ----------------------------- -- Master LSN < Replica LSN ----------------------------- -------------------- -- Replica to Master -------------------- box.cfg{replication_source=""} --- ... --# set connection default _insert(31, 40, 'master') --- - [31, 'master - 31'] - [32, 'master - 32'] - [33, 'master - 33'] - [34, 'master - 34'] - [35, 'master - 35'] - [36, 'master - 36'] - [37, 'master - 37'] - [38, 'master - 38'] - [39, 'master - 39'] - [40, 'master - 40'] ... _select(31, 40) --- - [31, 'master - 31'] - [32, 'master - 32'] - [33, 'master - 33'] - [34, 'master - 34'] - [35, 'master - 35'] - [36, 'master - 36'] - [37, 'master - 37'] - [38, 'master - 38'] - [39, 'master - 39'] - [40, 'master - 40'] ... --# set connection replica _insert(31, 50, 'replica') --- - [31, 'replica - 31'] - [32, 'replica - 32'] - [33, 'replica - 33'] - [34, 'replica - 34'] - [35, 'replica - 35'] - [36, 'replica - 36'] - [37, 'replica - 37'] - [38, 'replica - 38'] - [39, 'replica - 39'] - [40, 'replica - 40'] - [41, 'replica - 41'] - [42, 'replica - 42'] - [43, 'replica - 43'] - [44, 'replica - 44'] - [45, 'replica - 45'] - [46, 'replica - 46'] - [47, 'replica - 47'] - [48, 'replica - 48'] - [49, 'replica - 49'] - [50, 'replica - 50'] ... _select(31, 50) --- - [31, 'replica - 31'] - [32, 'replica - 32'] - [33, 'replica - 33'] - [34, 'replica - 34'] - [35, 'replica - 35'] - [36, 'replica - 36'] - [37, 'replica - 37'] - [38, 'replica - 38'] - [39, 'replica - 39'] - [40, 'replica - 40'] - [41, 'replica - 41'] - [42, 'replica - 42'] - [43, 'replica - 43'] - [44, 'replica - 44'] - [45, 'replica - 45'] - [46, 'replica - 46'] - [47, 'replica - 47'] - [48, 'replica - 48'] - [49, 'replica - 49'] - [50, 'replica - 50'] ... --# set connection default -- Master LSN: _print_lsn() --- - 43 ... --# set connection replica -- Replica LSN: _print_lsn() --- - 53 ... ------------------- -- rollback Replica ------------------- box.cfg{replication_source=old_replication_source} --- ... _select(31, 50) --- - [31, 'replica - 31'] - [32, 'replica - 32'] - [33, 'replica - 33'] - [34, 'replica - 34'] - [35, 'replica - 35'] - [36, 'replica - 36'] - [37, 'replica - 37'] - [38, 'replica - 38'] - [39, 'replica - 39'] - [40, 'replica - 40'] - [41, 'replica - 41'] - [42, 'replica - 42'] - [43, 'replica - 43'] - [44, 'replica - 44'] - [45, 'replica - 45'] - [46, 'replica - 46'] - [47, 'replica - 47'] - [48, 'replica - 48'] - [49, 'replica - 49'] - [50, 'replica - 50'] ... --# set connection default _insert(41, 60, 'master') --- - [41, 'master - 41'] - [42, 'master - 42'] - [43, 'master - 43'] - [44, 'master - 44'] - [45, 'master - 45'] - [46, 'master - 46'] - [47, 'master - 47'] - [48, 'master - 48'] - [49, 'master - 49'] - [50, 'master - 50'] - [51, 'master - 51'] - [52, 'master - 52'] - [53, 'master - 53'] - [54, 'master - 54'] - [55, 'master - 55'] - [56, 'master - 56'] - [57, 'master - 57'] - [58, 'master - 58'] - [59, 'master - 59'] - [60, 'master - 60'] ... --# set connection replica _select(41, 60) --- - [41, 'replica - 41'] - [42, 'replica - 42'] - [43, 'replica - 43'] - [44, 'replica - 44'] - [45, 'replica - 45'] - [46, 'replica - 46'] - [47, 'replica - 47'] - [48, 'replica - 48'] - [49, 'replica - 49'] - [50, 'replica - 50'] - [51, 'master - 51'] - [52, 'master - 52'] - [53, 'master - 53'] - [54, 'master - 54'] - [55, 'master - 55'] - [56, 'master - 56'] - [57, 'master - 57'] - [58, 'master - 58'] - [59, 'master - 59'] - [60, 'master - 60'] ... --# set connection default -- Master LSN: _print_lsn() --- - 63 ... --# set connection replica -- Replica LSN: _print_lsn() --- - 63 ... -- Test that a replica replies with master connection URL on update request --# push filter '127.0.0.1:.*' to '127.0.0.1:' box.space[0]:insert{0, 'replica is RO'} --- - error: 'Can''t modify data on a replication slave. My master is: 127.0.0.1: ... --# clear filter --# stop server replica --# cleanup server replica --# set connection default box.space[0]:drop() --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/before_replace.result0000664000000000000000000000655113306565107023710 0ustar rootroot-- -- Using space:before_replace to resolve replication conflicts. -- env = require('test_run') --- ... test_run = env.new() --- ... SERVERS = { 'autobootstrap1', 'autobootstrap2', 'autobootstrap3' } --- ... -- Deploy a cluster. test_run:create_cluster(SERVERS) --- ... test_run:wait_fullmesh(SERVERS) --- ... -- Setup space:before_replace trigger on all replicas. -- The trigger favors tuples with a greater value. test_run:cmd("setopt delimiter ';'") --- - true ... test_run:cmd("switch autobootstrap1"); --- - true ... _ = box.space.test:before_replace(function(old, new) if old ~= nil and new ~= nil then return new[2] > old[2] and new or old end end); --- ... test_run:cmd("switch autobootstrap2"); --- - true ... _ = box.space.test:before_replace(function(old, new) if old ~= nil and new ~= nil then return new[2] > old[2] and new or old end end); --- ... test_run:cmd("switch autobootstrap3"); --- - true ... _ = box.space.test:before_replace(function(old, new) if old ~= nil and new ~= nil then return new[2] > old[2] and new or old end end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- Stall replication and generate incompatible data -- on the replicas. test_run:cmd("switch autobootstrap1") --- - true ... box.error.injection.set('ERRINJ_RELAY_TIMEOUT', 0.01) --- - ok ... for i = 1, 10 do box.space.test:replace{i, i % 3 == 1 and i * 10 or i} end --- ... test_run:cmd("switch autobootstrap2") --- - true ... box.error.injection.set('ERRINJ_RELAY_TIMEOUT', 0.01) --- - ok ... for i = 1, 10 do box.space.test:replace{i, i % 3 == 2 and i * 10 or i} end --- ... test_run:cmd("switch autobootstrap3") --- - true ... box.error.injection.set('ERRINJ_RELAY_TIMEOUT', 0.01) --- - ok ... for i = 1, 10 do box.space.test:replace{i, i % 3 == 0 and i * 10 or i} end --- ... -- Synchronize. test_run:cmd("switch default") --- - true ... vclock = test_run:get_cluster_vclock(SERVERS) --- ... vclock2 = test_run:wait_cluster_vclock(SERVERS, vclock) --- ... -- Check that all replicas converged to the same data -- and the state persists after restart. test_run:cmd("switch autobootstrap1") --- - true ... box.space.test:select() --- - - [1, 10] - [2, 20] - [3, 30] - [4, 40] - [5, 50] - [6, 60] - [7, 70] - [8, 80] - [9, 90] - [10, 100] ... test_run:cmd('restart server autobootstrap1') box.space.test:select() --- - - [1, 10] - [2, 20] - [3, 30] - [4, 40] - [5, 50] - [6, 60] - [7, 70] - [8, 80] - [9, 90] - [10, 100] ... test_run:cmd("switch autobootstrap2") --- - true ... box.space.test:select() --- - - [1, 10] - [2, 20] - [3, 30] - [4, 40] - [5, 50] - [6, 60] - [7, 70] - [8, 80] - [9, 90] - [10, 100] ... test_run:cmd('restart server autobootstrap2') box.space.test:select() --- - - [1, 10] - [2, 20] - [3, 30] - [4, 40] - [5, 50] - [6, 60] - [7, 70] - [8, 80] - [9, 90] - [10, 100] ... test_run:cmd("switch autobootstrap3") --- - true ... box.space.test:select() --- - - [1, 10] - [2, 20] - [3, 30] - [4, 40] - [5, 50] - [6, 60] - [7, 70] - [8, 80] - [9, 90] - [10, 100] ... test_run:cmd('restart server autobootstrap3') box.space.test:select() --- - - [1, 10] - [2, 20] - [3, 30] - [4, 40] - [5, 50] - [6, 60] - [7, 70] - [8, 80] - [9, 90] - [10, 100] ... -- Cleanup. test_run:cmd("switch default") --- - true ... test_run:drop_cluster(SERVERS) --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap.lua0000664000000000000000000000205713306560010023105 0ustar rootroot#!/usr/bin/env tarantool -- get instance name from filename (autobootstrap1.lua => autobootstrap1) local INSTANCE_ID = string.match(arg[0], "%d") local USER = 'cluster' local PASSWORD = 'somepassword' local SOCKET_DIR = require('fio').cwd() local function instance_uri(instance_id) --return 'localhost:'..(3310 + instance_id) return SOCKET_DIR..'/autobootstrap'..instance_id..'.sock'; end -- start console first require('console').listen(os.getenv('ADMIN')) box.cfg({ listen = instance_uri(INSTANCE_ID); -- log_level = 7; replication = { USER..':'..PASSWORD..'@'..instance_uri(1); USER..':'..PASSWORD..'@'..instance_uri(2); USER..':'..PASSWORD..'@'..instance_uri(3); }; replication_connect_timeout = 0.5, }) box.once("bootstrap", function() local test_run = require('test_run').new() box.schema.user.create(USER, { password = PASSWORD }) box.schema.user.grant(USER, 'replication') box.schema.space.create('test', {engine = test_run:get_cfg('engine')}) box.space.test:create_index('primary') end) tarantool_1.9.1.26.g63eb81e3c/test/replication/lua/0000775000000000000000000000000013306560010020251 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/lua/fast_replica.lua0000664000000000000000000000425313306560010023414 0ustar rootroot function join(inspector, n) local path = os.getenv('TARANTOOL_SRC_DIR') for i=1,n do local rid = tostring(i) os.execute('mkdir -p tmp') os.execute('cp '..path..'/test/replication/replica.lua ./tmp/replica'..rid..'.lua') os.execute('chmod +x ./tmp/replica'..rid..'.lua') local out_dir = box.cfg.wal_dir inspector:cmd("create server replica"..rid.." with rpl_master=default, script='"..out_dir.."/../tmp/replica"..rid..".lua'") inspector:cmd("start server replica"..rid) end end function call_all(callback) local all = box.space._cluster:select{} for _, tuple in pairs(all) do local id = tuple[1] if id ~= box.info.id then callback(id) end end end function unregister(inspector, id) box.space._cluster:delete{id} end function start(inspector, id) inspector:cmd('start server replica'..tostring(id - 1)) end function stop(inspector, id) inspector:cmd('stop server replica'..tostring(id - 1)) end function wait(inspector, id) inspector:wait_lsn('replica'..tostring(id - 1), 'default') end function delete(inspector, id) inspector:cmd('stop server replica'..tostring(id - 1)) inspector:cmd('delete server replica'..tostring(id - 1)) end function drop(inspector, id) unregister(inspector, id) delete(inspector, id) end function start_all(inspector) call_all(function (id) start(inspector, id) end) end function stop_all(inspector) call_all(function (id) stop(inspector, id) end) end function wait_all(inspector) call_all(function (id) wait(inspector, id) end) end function drop_all(inspector) call_all(function (id) drop(inspector, id) end) end function vclock_diff(left, right) local diff = 0 for id, lsn in ipairs(left) do diff = diff + (right[id] or 0) - left[id] end for id, lsn in ipairs(right) do if left[id] == nil then diff = diff + right[id] end end return diff end return { join = join; start_all = start_all; stop_all = stop_all; wait_all = wait_all; drop_all = drop_all; vclock_diff = vclock_diff; unregister = unregister; delete = delete; } tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap_guest.result0000664000000000000000000000312713306560010025050 0ustar rootrootenv = require('test_run') --- ... vclock_diff = require('fast_replica').vclock_diff --- ... test_run = env.new() --- ... SERVERS = { 'autobootstrap_guest1', 'autobootstrap_guest2', 'autobootstrap_guest3' } --- ... -- -- Start servers -- test_run:create_cluster(SERVERS) --- ... -- -- Wait for full mesh -- test_run:wait_fullmesh(SERVERS) --- ... -- -- Check vclock -- vclock1 = test_run:get_vclock('autobootstrap_guest1') --- ... vclock_diff(vclock1, test_run:get_vclock('autobootstrap_guest2')) --- - 0 ... vclock_diff(vclock1, test_run:get_vclock('autobootstrap_guest3')) --- - 0 ... -- -- Insert rows on each server -- _ = test_run:cmd("switch autobootstrap_guest1") --- ... _ = box.space.test:insert({box.info.id}) --- ... _ = test_run:cmd("switch autobootstrap_guest2") --- ... _ = box.space.test:insert({box.info.id}) --- ... _ = test_run:cmd("switch autobootstrap_guest3") --- ... _ = box.space.test:insert({box.info.id}) --- ... _ = test_run:cmd("switch default") --- ... -- -- Synchronize -- vclock = test_run:get_cluster_vclock(SERVERS) --- ... vclock2 = test_run:wait_cluster_vclock(SERVERS, vclock) --- ... vclock_diff(vclock1, vclock2) --- - 3 ... -- -- Check result -- _ = test_run:cmd("switch autobootstrap_guest1") --- ... box.space.test:select() --- - - [1] - [2] - [3] ... _ = test_run:cmd("switch autobootstrap_guest2") --- ... box.space.test:select() --- - - [1] - [2] - [3] ... _ = test_run:cmd("switch autobootstrap_guest3") --- ... box.space.test:select() --- - - [1] - [2] - [3] ... _ = test_run:cmd("switch default") --- ... -- -- Stop servers -- test_run:drop_cluster(SERVERS) --- ... tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap3.lua0000777000000000000000000000000013306560010026570 2autobootstrap.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/ddl2.lua0000777000000000000000000000000013306560010022261 2ddl.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/quorum3.lua0000777000000000000000000000000013306560010023634 2quorum.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/suite.cfg0000664000000000000000000000045013306560010021301 0ustar rootroot{ "misc.test.lua": {}, "once.test.lua": {}, "on_replace.test.lua": {}, "status.test.lua": {}, "wal_off.test.lua": {}, "hot_standby.test.lua": {}, "rebootstrap.test.lua": {}, "*": { "memtx": {"engine": "memtx"}, "vinyl": {"engine": "vinyl"} } } tarantool_1.9.1.26.g63eb81e3c/test/replication/autobootstrap_guest3.lua0000777000000000000000000000000013306560010031206 2autobootstrap_guest.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/wal_off.lua0000664000000000000000000000044213306560010021610 0ustar rootroot#!/usr/bin/env tarantool box.cfg({ listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), memtx_memory = 107374182, wal_mode = 'none', replication_connect_timeout = 0.5, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication/master_quorum1.lua0000777000000000000000000000000013306560010026560 2master_quorum.luaustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication/hot_standby.result0000664000000000000000000001264313306560010023254 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... box.schema.user.grant('guest', 'replication') --- ... box.schema.func.create('_set_pri_lsn') --- ... box.schema.user.grant('guest', 'execute', 'function', '_set_pri_lsn') --- ... test_run:cmd("create server hot_standby with script='replication/hot_standby.lua', rpl_master=default") --- - true ... test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") --- - true ... test_run:cmd("start server hot_standby") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:cmd("setopt delimiter ';'") --- - true ... test_run:cmd("set connection default, hot_standby, replica") fiber = require('fiber'); --- ... while box.info.id == 0 do fiber.sleep(0.01) end; --- ... while box.space['_priv']:len() < 1 do fiber.sleep(0.001) end; --- ... do local pri_id = '' local begin_lsn = 0 function _set_pri_lsn(_id, _lsn) pri_id = _id begin_lsn = _lsn end function _get_pri_lsn() return box.info.vclock[pri_id] end function _print_lsn() return (_get_pri_lsn() - begin_lsn + 1) end function _insert(_begin, _end) local a = {} for i = _begin, _end do table.insert(a, box.space.tweedledum:insert{i, 'the tuple '..i}) end return a end function _select(_begin, _end) local a = {} for i = _begin, _end do local tuple = box.space.tweedledum:get{i} if tuple ~= nil then table.insert(a, tuple) end end return a end function _wait_lsn(_lsnd) while _get_pri_lsn() < _lsnd + begin_lsn do fiber.sleep(0.001) end begin_lsn = begin_lsn + _lsnd end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... test_run:cmd("switch replica") --- - true ... fiber = require('fiber') --- ... test_run:cmd("switch hot_standby") --- - true ... fiber = require('fiber') --- ... box.info.status --- - hot_standby ... test_run:cmd("switch default") --- - true ... fiber = require('fiber') --- ... box.info.status --- - running ... space = box.schema.space.create('tweedledum', {engine = engine}) --- ... index = space:create_index('primary', {type = 'tree'}) --- ... -- set begin lsn on master, replica and hot_standby. test_run:cmd("set variable replica_port to 'replica.listen'") --- - true ... REPLICA = require('uri').parse(tostring(replica_port)) --- ... REPLICA ~= nil --- - true ... a = (require 'net.box').connect(REPLICA.host, REPLICA.service) --- ... a:call('_set_pri_lsn', {box.info.id, box.info.lsn}) --- ... a:close() --- ... _insert(1, 10) --- - - [1, 'the tuple 1'] - [2, 'the tuple 2'] - [3, 'the tuple 3'] - [4, 'the tuple 4'] - [5, 'the tuple 5'] - [6, 'the tuple 6'] - [7, 'the tuple 7'] - [8, 'the tuple 8'] - [9, 'the tuple 9'] - [10, 'the tuple 10'] ... _select(1, 10) --- - - [1, 'the tuple 1'] - [2, 'the tuple 2'] - [3, 'the tuple 3'] - [4, 'the tuple 4'] - [5, 'the tuple 5'] - [6, 'the tuple 6'] - [7, 'the tuple 7'] - [8, 'the tuple 8'] - [9, 'the tuple 9'] - [10, 'the tuple 10'] ... test_run:cmd("switch replica") --- - true ... _wait_lsn(10) --- ... _select(1, 10) --- - - [1, 'the tuple 1'] - [2, 'the tuple 2'] - [3, 'the tuple 3'] - [4, 'the tuple 4'] - [5, 'the tuple 5'] - [6, 'the tuple 6'] - [7, 'the tuple 7'] - [8, 'the tuple 8'] - [9, 'the tuple 9'] - [10, 'the tuple 10'] ... test_run:cmd("stop server default") --- - true ... test_run:cmd("switch hot_standby") --- - true ... while box.info.status ~= 'running' do fiber.sleep(0.001) end --- ... test_run:cmd("switch replica") --- - true ... -- hot_standby.listen is garbage, since hot_standby.lua -- uses MASTER environment variable for its listen test_run:cmd("set variable hot_standby_port to 'hot_standby.master'") --- - true ... HOT_STANDBY = require('uri').parse(tostring(hot_standby_port)) --- ... HOT_STANDBY ~= nil --- - true ... a = (require 'net.box').connect(HOT_STANDBY.host, HOT_STANDBY.service) --- ... a:call('_set_pri_lsn', {box.info.id, box.info.lsn}) --- ... a:close() --- ... test_run:cmd("switch hot_standby") --- - true ... _insert(11, 20) --- - - [11, 'the tuple 11'] - [12, 'the tuple 12'] - [13, 'the tuple 13'] - [14, 'the tuple 14'] - [15, 'the tuple 15'] - [16, 'the tuple 16'] - [17, 'the tuple 17'] - [18, 'the tuple 18'] - [19, 'the tuple 19'] - [20, 'the tuple 20'] ... _select(11, 20) --- - - [11, 'the tuple 11'] - [12, 'the tuple 12'] - [13, 'the tuple 13'] - [14, 'the tuple 14'] - [15, 'the tuple 15'] - [16, 'the tuple 16'] - [17, 'the tuple 17'] - [18, 'the tuple 18'] - [19, 'the tuple 19'] - [20, 'the tuple 20'] ... test_run:cmd("switch replica") --- - true ... _wait_lsn(10) --- ... _select(11, 20) --- - - [11, 'the tuple 11'] - [12, 'the tuple 12'] - [13, 'the tuple 13'] - [14, 'the tuple 14'] - [15, 'the tuple 15'] - [16, 'the tuple 16'] - [17, 'the tuple 17'] - [18, 'the tuple 18'] - [19, 'the tuple 19'] - [20, 'the tuple 20'] ... test_run:cmd("deploy server default") --- - true ... test_run:cmd("start server default") --- - true ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server hot_standby") --- - true ... test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server hot_standby") --- - true ... test_run:cmd("cleanup server replica") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/0000775000000000000000000000000013306565107016610 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/wal_off/iterator_lt_gt.test.lua0000664000000000000000000000207113306560010023277 0ustar rootroot-- test for https://github.com/tarantool/tarantool/issues/769 env = require('test_run') test_run = env.new() s = box.schema.create_space('test') i = s:create_index('primary', { type = 'TREE', parts = {1, 'unsigned', 2, 'unsigned'} }) s:insert{0, 0} s:insert{2, 0} for i=1,10000 do s:insert{1, i} end test_itrs = {'EQ', 'REQ', 'GT', 'LT', 'GE', 'LE'} test_res = {} too_longs = {} test_run:cmd("setopt delimiter ';'") function test_run_itr(itr, key) for i=1,50 do local gen, param, state = s.index.primary:pairs({key}, {iterator = itr}) local state, v = gen(param, state) test_res[itr .. ' ' .. key] = v end end; jit.off(test_run_itr); for _,itr in pairs(test_itrs) do local t = os.clock() for key = 0,2 do test_run_itr(itr, key) end local diff = os.clock() - t if diff > 0.05 then table.insert(too_longs, 'Some of the iterators takes too long to position: '.. diff) end end; test_run:cmd("setopt delimiter ''"); test_res too_longs s:drop() test_itr = nil test_run_itr = nil test_itrs = nil s = nil 'done' tarantool_1.9.1.26.g63eb81e3c/test/wal_off/oom.result0000664000000000000000000002264013306565107020646 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default') test_run:cmd("push filter 'error: Failed to allocate [0-9]+ ' to 'error: Failed to allocate '") --- - true ... space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... i = 1; --- ... while true do space:insert{space:len(), string.rep('test', i)} i = i + 1 end; --- - error: Failed to allocate bytes in slab allocator for memtx_tuple ... space:len() > 5000; --- - true ... i = 1; --- ... while true do space:insert{space:len(), string.rep('test', i)} i = i + 1 end; --- - error: Failed to allocate bytes in slab allocator for memtx_tuple ... space:len() > 5000; --- - true ... i = 1; --- ... while true do space:insert{space:len(), string.rep('test', i)} i = i + 1 end; --- - error: Failed to allocate bytes in slab allocator for memtx_tuple ... test_run:cmd("setopt delimiter ''"); --- - true ... space:len() > 5000 --- - true ... space.index['primary']:get{0} --- - [0, 'test'] ... space.index['primary']:get{5} --- - [5, 'testtesttesttesttesttest'] ... space.index['primary']:get{9} --- - [9, 'testtesttesttesttesttesttesttesttesttest'] ... space.index['primary']:get{11} --- - [11, 'testtesttesttesttesttesttesttesttesttesttesttest'] ... space.index['primary']:get{15} --- - [15, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] ... -- check that iterators work i = 0 --- ... t = {} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for state, v in space:pairs() do table.insert(t, v) i = i + 1 if i == 50 then break end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... t --- - - [0, 'test'] - [1, 'testtest'] - [2, 'testtesttest'] - [3, 'testtesttesttest'] - [4, 'testtesttesttesttest'] - [5, 'testtesttesttesttesttest'] - [6, 'testtesttesttesttesttesttest'] - [7, 'testtesttesttesttesttesttesttest'] - [8, 'testtesttesttesttesttesttesttesttest'] - [9, 'testtesttesttesttesttesttesttesttesttest'] - [10, 'testtesttesttesttesttesttesttesttesttesttest'] - [11, 'testtesttesttesttesttesttesttesttesttesttesttest'] - [12, 'testtesttesttesttesttesttesttesttesttesttesttesttest'] - [13, 'testtesttesttesttesttesttesttesttesttesttesttesttesttest'] - [14, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [15, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [16, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [17, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [18, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [19, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [20, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [21, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [22, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [23, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [24, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [25, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [26, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [27, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [28, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [29, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [30, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [31, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [32, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [33, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [34, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [35, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [36, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [37, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [38, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [39, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [40, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [41, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [42, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [43, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [44, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [45, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [46, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [47, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [48, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] - [49, 'testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest'] ... space:truncate() --- ... space:insert{0, 'test'} --- - [0, 'test'] ... space.index['primary']:get{0} --- - [0, 'test'] ... collectgarbage('collect') --- - 0 ... -- -- Check that statement-level rollback does not leak tuples -- space:truncate() --- ... function insert(a) space:insert(a) end --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function dup_key() box.begin() space:insert{1} local i = 1 while i < 2000 do local status, _ = pcall(insert, {1, string.rep('test', i)}) if status then error('Unexpected success when inserting a duplicate') end if box.error.last().code ~= box.error.TUPLE_FOUND then box.error.raise() end i = i + 1 end box.commit() return i end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... dup_key() --- - 2000 ... space:select{} --- - - [1] ... -- -- Cleanup -- space:drop() --- ... t = nil --- ... -- https://github.com/tarantool/tarantool/issues/962 index:delete() failed test_run:cmd('restart server default') arena_bytes = box.cfg.memtx_memory --- ... str = string.rep('a', 15000) -- about size of index memory block --- ... space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... collectgarbage('collect') --- - 0 ... for i=1,10000 do space:insert{i, str} end --- - error: Failed to allocate bytes in slab allocator for memtx_tuple ... definitely_used = index:count() * 16 * 1024 --- ... 2 * definitely_used > arena_bytes -- at least half memory used --- - true ... to_del = index:count() --- ... for i=1,to_del do space:delete{i} end --- ... index:count() --- - 0 ... collectgarbage('collect') --- - 0 ... for i=1,10000 do space:insert{i, str} end --- - error: Failed to allocate bytes in slab allocator for memtx_tuple ... definitely_used = index:count() * 16 * 1024 --- ... 2 * definitely_used > arena_bytes -- at least half memory used --- - true ... space:truncate() --- ... index:count() --- - 0 ... space:drop() --- ... str = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/suite.ini0000664000000000000000000000015513306560010020427 0ustar rootroot[default] core = tarantool script = wal.lua description = tarantool/box, wal_mode = none is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/wal_off/alter.test.lua0000664000000000000000000000105213306565107021376 0ustar rootroot-- wal is off, good opportunity to test something more CPU intensive: env = require('test_run') test_run = env.new() -- need a clean server to count the number of tuple formats test_run:cmd('restart server default with cleanup=1') spaces = {} box.schema.FORMAT_ID_MAX test_run:cmd("setopt delimiter ';'") -- too many formats for k = 1, box.schema.FORMAT_ID_MAX, 1 do local s = box.schema.space.create('space'..k) table.insert(spaces, s) end; #spaces; -- cleanup for k, v in pairs(spaces) do v:drop() end; test_run:cmd("setopt delimiter ''"); tarantool_1.9.1.26.g63eb81e3c/test/wal_off/alter.result0000664000000000000000000000127513306565107021164 0ustar rootroot-- wal is off, good opportunity to test something more CPU intensive: env = require('test_run') --- ... test_run = env.new() --- ... -- need a clean server to count the number of tuple formats test_run:cmd('restart server default with cleanup=1') spaces = {} --- ... box.schema.FORMAT_ID_MAX --- - 65534 ... test_run:cmd("setopt delimiter ';'") --- - true ... -- too many formats for k = 1, box.schema.FORMAT_ID_MAX, 1 do local s = box.schema.space.create('space'..k) table.insert(spaces, s) end; --- - error: 'Tuple format limit reached: 65536' ... #spaces; --- - 65515 ... -- cleanup for k, v in pairs(spaces) do v:drop() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/wal.lua0000664000000000000000000000036413306560010020065 0ustar rootroot#!/usr/bin/env tarantool box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", wal_mode = "none" } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/wal_off/wal_mode.result0000664000000000000000000000134513306560010021626 0ustar rootroottest_run = require('test_run').new() --- ... box.cfg.wal_mode --- - none ... space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... space:insert{1} --- - [1] ... space:insert{2} --- - [2] ... space:insert{3} --- - [3] ... space.index['primary']:get(1) --- - [1] ... space.index['primary']:get(2) --- - [2] ... space.index['primary']:get(3) --- - [3] ... space.index['primary']:get(4) --- ... box.snapshot() --- - ok ... _, e = pcall(box.snapshot) --- ... e.type, e.errno --- - null - null ... e.errno --- - null ... _, e = pcall(box.snapshot) --- ... e.type, e.errno --- - null - null ... e.errno --- - null ... space:drop() --- ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/lua.test.lua0000664000000000000000000000433213306560010021040 0ustar rootrootenv = require('test_run') test_run = env.new() space = box.schema.space.create('tweedledum') index1 = space:create_index('primary', { type ='hash', parts = {1, 'string'}, unique = true }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) -- A test case for Bug#1042738 -- https://bugs.launchpad.net/tarantool/+bug/1042738 -- Iteration over a non-unique TREE index test_run:cmd("setopt delimiter ';'") for i = 1, 1000 do space:truncate() for j = 1, 30 do space:insert{tostring(j), os.time(), 1} end count = 0 for state, v in space.index[1]:pairs() do count = count + 1 end if count ~= 30 then error('bug at iteration '..i..', count is '..count) end end; test_run:cmd("setopt delimiter ''"); space:truncate() -- -- A test case for Bug#1043858 server crash on lua stack overflow on CentOS -- 5.4 -- for i = 1, 100000, 1 do space:insert{tostring(i), i} end local t1 = space.index['secondary']:select() space:drop() -- -- A test case for https://github.com/tarantool/tarantool/issues/65 -- Space does not exist error on repetitive access to space 0 in Lua -- space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) test_run:cmd("setopt delimiter ';'") function mktuple(n) local fields = { [n] = n } for i = 1,n do fields[i] = i end local t = space:replace(fields) assert(t[1] == 1, "tuple check") assert(t[n] == n, "tuple check") return string.format("count %u len %u", #t, t:bsize()) end; test_run:cmd("setopt delimiter ''"); mktuple(5000) mktuple(100000) space:drop() -- https://github.com/tarantool/tarantool/issues/1323 -- index:count() works too long fiber = require('fiber') s = box.schema.create_space('test') i1 = s:create_index('test', {parts = {1, 'unsigned'}}) for i = 1,10000 do s:insert{i} end count = 0 done = false function test1() for i = 1,100 do count = count + i1:count() end end function test2() for j = 1,100 do test1() fiber.sleep(0) end done = true end fib = fiber.create(test2) for i = 1,100 do if done then break end fiber.sleep(0.01) end done and "count was calculated fast enough" or "count took too long to calculate" count box.space.test:drop() tarantool_1.9.1.26.g63eb81e3c/test/wal_off/tuple.test.lua0000664000000000000000000000161013306560010021404 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd("restart server default") -- -- Test various tuple bugs which do not require a write ahead log. -- -- ------------------------------------------------------- -- gh-372 Assertion with a function that inserts increasingly -- large tables -- ------------------------------------------------------- tester = box.schema.space.create('tester') index = tester:create_index('primary',{}) test_run:cmd("setopt delimiter ';'") function tuple_max() local n = 'a' while true do n = n..n local status, reason = pcall(tester.insert, tester, {#n, n}) if not status then return #n, reason end collectgarbage('collect') end end; test_run:cmd("setopt delimiter ''"); n, reason = tuple_max() n n + 32 >= box.cfg.memtx_max_tuple_size reason tester:drop() tuple_max = nil collectgarbage('collect') tarantool_1.9.1.26.g63eb81e3c/test/wal_off/snapshot_stress.test.lua0000664000000000000000000001612213306560010023521 0ustar rootroot-- The test emulates account system. There are increasing number or accounts -- and a lot of double entry transactions are made that moving random -- ammount from random account to another random accont. -- Snapshots are made every snapshot_interval seconds and then checked for consistency env = require('test_run') test_run = env.new() -- Settings: You may increase theese value to make test longer -- number of worker fibers: workers_count = 80 -- number of iterations per fiber (operations + add new account + add space) iteration_count = 8 -- number of operations per iterations operation_count = 8 -- limit of random string length in every account string_max_size = 128 -- initial number of accounts accounts_start = 5 -- delay between snapshots snapshot_interval = 0.005 fiber = require('fiber') fio = require('fio') log = require('log') tarantool_bin_path = arg[-1] work_dir = fio.cwd() script_path = fio.pathjoin(work_dir, 'snap_script.lua') cmd_template = [[/bin/sh -c 'cd "%s" && "%s" ./snap_script.lua 2> /dev/null']] cmd = string.format(cmd_template, work_dir, tarantool_bin_path) open_flags = {'O_CREAT', 'O_WRONLY', 'O_TRUNC'} script = fio.open(script_path, open_flags, tonumber('0777', 8)) script:write("os.exit(-1)") script:close() res = os.execute(cmd) str_res = 'precheck ' .. (res ~= 0 and ' ok(1)' or 'failed(1)') str_res script = fio.open(script_path, open_flags, tonumber('0777', 8)) script:write("os.exit(0)") script:close() res = os.execute(cmd) str_res = 'precheck ' .. (res == 0 and ' ok(2)' or 'failed(2)') str_res snap_search_wildcard = fio.pathjoin(box.cfg.memtx_dir, '*.snap'); snaps = fio.glob(snap_search_wildcard); initial_snap_count = #snaps if box.space.accounts then box.space.accounts:drop() end if box.space.operations then box.space.operations:drop() end if box.space.deleting then box.space.deleting:drop() end s1 = box.schema.create_space("accounts") i1 = s1:create_index('primary', { type = 'HASH', parts = {1, 'unsigned'} }) s2 = box.schema.create_space("operations") i2 = s2:create_index('primary', { type = 'HASH', parts = {1, 'unsigned'} }) s3 = box.schema.create_space("deleting") i3 = s3:create_index('primary', { type = 'TREE', parts = {1, 'unsigned'} }) n_accs = 0 n_ops = 0 n_spaces = 0 workers_done = 0 test_run:cmd("setopt delimiter ';'") garbage = {}; str = "" for i = 1,string_max_size do str = str .. '-' garbage[i - 1] = str end; function get_new_space_name() n_spaces = n_spaces + 1 return "test" .. tostring(n_spaces - 1) end; tmp = get_new_space_name() if box.space[tmp] then box.space[tmp]:drop() tmp = get_new_space_name() end tmp = nil n_spaces = 0 function get_rnd_acc() return math.floor(math.random() * n_accs) end; function get_rnd_val() return math.floor(math.random() * 10) end; function get_rnd_str() return garbage[math.floor(math.random() * string_max_size)] end; additional_spaces = { }; function add_space() local tmp_space = box.schema.create_space(get_new_space_name()) table.insert(additional_spaces, tmp_space) tmp_space:create_index('test') n_spaces = n_spaces + 1 end; function add_acc() s1:insert{n_accs, 0} n_accs = n_accs + 1 end; function add_op(n1, n2, v) s2:insert{n_ops, n1, n2, v} n_ops = n_ops + 1 end; function acc_add(n, v) s1:update({n}, {{'+', 2, v}, {'=', 3, get_rnd_str()}}) end; function do_op(n1, n2, v) box.begin() add_op(n1, n2, v) acc_add(n1, v) acc_add(n2, -v) box.commit() end; function do_rand_op() do_op(get_rnd_acc(), get_rnd_acc(), get_rnd_val()) end; function remove_smth() s3:delete{i3:min()[1]} end; function init() for i = 1,accounts_start do add_acc() end for i = 1,workers_count*iteration_count do s3:auto_increment{"I hate dentists!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"} end end; function work_itr() for j = 1,operation_count do do_rand_op() fiber.sleep(0) end add_acc() remove_smth() add_space() end; function work() for i = 1,iteration_count do if not pcall(work_itr) then log.info("work_itr failed") break end end workers_done = workers_done + 1 end; snaps_done = false; function snaps() while (workers_done ~= workers_count) do pcall(box.snapshot) fiber.sleep(snapshot_interval) end snaps_done = true end; function wait() while (not snaps_done) do fiber.sleep(0.01) end end; init(); log.info('Part I: creating snapshot start'); for i = 1,workers_count do fiber.create(work) end; local tmp_fib = fiber.create(snaps); wait(); log.info('Part I: creating snapshot done'); #s1:select{}; #s2:select{}; s1:drop(); s2:drop(); for k,v in pairs(additional_spaces) do v:drop() end; s1 = nil s2 = nil additional_spaces = nil; script_code = [[ fio = require'fio' new_snap_dir = "]] .. fio.pathjoin(box.cfg.memtx_dir, "snap_test") .. [[" os.execute("mkdir " .. new_snap_dir) os.execute("cp ]] .. fio.pathjoin(box.cfg.memtx_dir, "*.xlog") .. [[ " .. new_snap_dir .. "/") os.execute("cp ]] .. fio.pathjoin(box.cfg.memtx_dir, "*.vylog") .. [[ " .. new_snap_dir .. "/") os.execute("cp ]] .. fio.pathjoin(box.cfg.memtx_dir, "*.snap") .. [[ " .. new_snap_dir .. "/") box.cfg{ memtx_memory = 536870912, memtx_dir = new_snap_dir, wal_dir = new_snap_dir, vinyl_dir = new_snap_dir, wal_mode = "none" } log = require('log') s1 = box.space.accounts s2 = box.space.operations total_sum = 0 t1 = {} for k,v in s1:pairs() do t1[ v[1] ] = v[2] total_sum = total_sum + v[2] end if total_sum ~= 0 then log.info('error: total sum mismatch') os.execute("rm -r " .. new_snap_dir) os.exit(-1) end t2 = {} function acc_inc(n1, v) t2[n1] = (t2[n1] and t2[n1] or 0) + v end for k,v in s2:pairs() do acc_inc(v[2], v[4]) acc_inc(v[3], -v[4]) end bad = false for k,v in pairs(t1) do if (t2[k] and t2[k] or 0) ~= v then bad = true end end for k,v in pairs(t2) do if (t1[k] and t1[k] or 0) ~= v then bad = true end end if bad then log.info('error: operation apply mismatch') os.execute("rm -r " .. new_snap_dir) os.exit(-1) end log.info('success: snapshot is ok') os.execute("rm -r " .. new_snap_dir) os.exit(0) ]]; script = fio.open(script_path, open_flags, tonumber('0777', 8)) script:write(script_code) script:close() log.info('Part II: checking snapshot start'); snaps = fio.glob(snap_search_wildcard); snaps_find_status = #snaps <= initial_snap_count and 'where are my snapshots?' or 'snaps found'; snaps_find_status; snapshot_check_failed = false while #snaps > initial_snap_count do if not snapshot_check_failed and os.execute(cmd) ~= 0 then snapshot_check_failed = true end max_snap = nil for k,v in pairs(snaps) do if max_snap == nil or v > max_snap then max_snap = v max_snap_k = k end end if max_snap:sub(1, 1) ~= "/" then max_snap = fio.pathjoin(box.cfg.memtx_dir, max_snap) end fio.unlink(max_snap) max_vylog = fio.basename(max_snap, '.snap') .. '.vylog' max_vylog = fio.pathjoin(box.cfg.vinyl_dir, max_vylog) fio.unlink(max_vylog) snaps[max_snap_k] = nil end; snapshot_check_failed; log.info('Part II: checking snapshot done'); test_run:cmd("setopt delimiter ''"); tarantool_1.9.1.26.g63eb81e3c/test/wal_off/lua.result0000664000000000000000000000513013306560010020614 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... space = box.schema.space.create('tweedledum') --- ... index1 = space:create_index('primary', { type ='hash', parts = {1, 'string'}, unique = true }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) --- ... -- A test case for Bug#1042738 -- https://bugs.launchpad.net/tarantool/+bug/1042738 -- Iteration over a non-unique TREE index test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 1000 do space:truncate() for j = 1, 30 do space:insert{tostring(j), os.time(), 1} end count = 0 for state, v in space.index[1]:pairs() do count = count + 1 end if count ~= 30 then error('bug at iteration '..i..', count is '..count) end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... space:truncate() --- ... -- -- A test case for Bug#1043858 server crash on lua stack overflow on CentOS -- 5.4 -- for i = 1, 100000, 1 do space:insert{tostring(i), i} end --- ... local t1 = space.index['secondary']:select() --- ... space:drop() --- ... -- -- A test case for https://github.com/tarantool/tarantool/issues/65 -- Space does not exist error on repetitive access to space 0 in Lua -- space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function mktuple(n) local fields = { [n] = n } for i = 1,n do fields[i] = i end local t = space:replace(fields) assert(t[1] == 1, "tuple check") assert(t[n] == n, "tuple check") return string.format("count %u len %u", #t, t:bsize()) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... mktuple(5000) --- - count 5000 len 14621 ... mktuple(100000) --- - count 100000 len 368553 ... space:drop() --- ... -- https://github.com/tarantool/tarantool/issues/1323 -- index:count() works too long fiber = require('fiber') --- ... s = box.schema.create_space('test') --- ... i1 = s:create_index('test', {parts = {1, 'unsigned'}}) --- ... for i = 1,10000 do s:insert{i} end --- ... count = 0 --- ... done = false --- ... function test1() for i = 1,100 do count = count + i1:count() end end --- ... function test2() for j = 1,100 do test1() fiber.sleep(0) end done = true end --- ... fib = fiber.create(test2) --- ... for i = 1,100 do if done then break end fiber.sleep(0.01) end --- ... done and "count was calculated fast enough" or "count took too long to calculate" --- - count was calculated fast enough ... count --- - 100000000 ... box.space.test:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/iterator_lt_gt.result0000664000000000000000000000274713306560010023070 0ustar rootroot-- test for https://github.com/tarantool/tarantool/issues/769 env = require('test_run') --- ... test_run = env.new() --- ... s = box.schema.create_space('test') --- ... i = s:create_index('primary', { type = 'TREE', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... s:insert{0, 0} s:insert{2, 0} --- ... for i=1,10000 do s:insert{1, i} end --- ... test_itrs = {'EQ', 'REQ', 'GT', 'LT', 'GE', 'LE'} --- ... test_res = {} --- ... too_longs = {} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function test_run_itr(itr, key) for i=1,50 do local gen, param, state = s.index.primary:pairs({key}, {iterator = itr}) local state, v = gen(param, state) test_res[itr .. ' ' .. key] = v end end; --- ... jit.off(test_run_itr); --- ... for _,itr in pairs(test_itrs) do local t = os.clock() for key = 0,2 do test_run_itr(itr, key) end local diff = os.clock() - t if diff > 0.05 then table.insert(too_longs, 'Some of the iterators takes too long to position: '.. diff) end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... test_res --- - LE 1: [1, 10000] LT 2: [1, 10000] REQ 1: [1, 10000] EQ 2: [2, 0] LE 2: [2, 0] EQ 0: [0, 0] LT 1: [0, 0] GE 0: [0, 0] REQ 0: [0, 0] GT 0: [1, 1] GT 1: [2, 0] LE 0: [0, 0] REQ 2: [2, 0] EQ 1: [1, 1] GE 1: [1, 1] GE 2: [2, 0] ... too_longs --- - [] ... s:drop() --- ... test_itr = nil test_run_itr = nil test_itrs = nil s = nil --- ... 'done' --- - done ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/expirationd.test.lua0000664000000000000000000000176213306560010022611 0ustar rootrootfiber = require('fiber') -- test for expirationd. iterator must continue iterating after space insert/delete env = require('test_run') test_run = env.new() s0 = box.schema.space.create('tweedledum') i0 = s0:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) s0:insert{20000} test_run:cmd("setopt delimiter ';'") for i = 1, 10000 do a = math.floor(math.random() * 10000) s0:replace{a} end; hit_end = false; gen, param, state = i0:pairs({}, {iterator = box.index.ALL}); for i = 1, 10000 do for j = 1, 10 do state, tuple = gen(param, state) if (tuple) then if (tuple[1] == 20000) then hit_end = true end if (math.random() > 0.9) then s0:delete{tuple[1]} end else gen, param, state = i0:pairs({}, {iterator = box.index.ALL}) end end for j = 1, 5 do a = math.floor(math.random() * 10000) if #s0:select{a} == 0 then s0:insert{a} end end if hit_end then break end end; hit_end; test_run:cmd("setopt delimiter ''"); s0:drop() s0 = nil tarantool_1.9.1.26.g63eb81e3c/test/wal_off/snapshot_stress.result0000664000000000000000000001752513306560010023310 0ustar rootroot-- The test emulates account system. There are increasing number or accounts -- and a lot of double entry transactions are made that moving random -- ammount from random account to another random accont. -- Snapshots are made every snapshot_interval seconds and then checked for consistency env = require('test_run') --- ... test_run = env.new() --- ... -- Settings: You may increase theese value to make test longer -- number of worker fibers: workers_count = 80 --- ... -- number of iterations per fiber (operations + add new account + add space) iteration_count = 8 --- ... -- number of operations per iterations operation_count = 8 --- ... -- limit of random string length in every account string_max_size = 128 --- ... -- initial number of accounts accounts_start = 5 --- ... -- delay between snapshots snapshot_interval = 0.005 --- ... fiber = require('fiber') --- ... fio = require('fio') --- ... log = require('log') --- ... tarantool_bin_path = arg[-1] --- ... work_dir = fio.cwd() --- ... script_path = fio.pathjoin(work_dir, 'snap_script.lua') --- ... cmd_template = [[/bin/sh -c 'cd "%s" && "%s" ./snap_script.lua 2> /dev/null']] --- ... cmd = string.format(cmd_template, work_dir, tarantool_bin_path) --- ... open_flags = {'O_CREAT', 'O_WRONLY', 'O_TRUNC'} --- ... script = fio.open(script_path, open_flags, tonumber('0777', 8)) --- ... script:write("os.exit(-1)") --- - true ... script:close() --- - true ... res = os.execute(cmd) --- ... str_res = 'precheck ' .. (res ~= 0 and ' ok(1)' or 'failed(1)') --- ... str_res --- - precheck ok(1) ... script = fio.open(script_path, open_flags, tonumber('0777', 8)) --- ... script:write("os.exit(0)") --- - true ... script:close() --- - true ... res = os.execute(cmd) --- ... str_res = 'precheck ' .. (res == 0 and ' ok(2)' or 'failed(2)') --- ... str_res --- - precheck ok(2) ... snap_search_wildcard = fio.pathjoin(box.cfg.memtx_dir, '*.snap'); --- ... snaps = fio.glob(snap_search_wildcard); --- ... initial_snap_count = #snaps --- ... if box.space.accounts then box.space.accounts:drop() end --- ... if box.space.operations then box.space.operations:drop() end --- ... if box.space.deleting then box.space.deleting:drop() end --- ... s1 = box.schema.create_space("accounts") --- ... i1 = s1:create_index('primary', { type = 'HASH', parts = {1, 'unsigned'} }) --- ... s2 = box.schema.create_space("operations") --- ... i2 = s2:create_index('primary', { type = 'HASH', parts = {1, 'unsigned'} }) --- ... s3 = box.schema.create_space("deleting") --- ... i3 = s3:create_index('primary', { type = 'TREE', parts = {1, 'unsigned'} }) --- ... n_accs = 0 --- ... n_ops = 0 --- ... n_spaces = 0 --- ... workers_done = 0 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... garbage = {}; --- ... str = "" for i = 1,string_max_size do str = str .. '-' garbage[i - 1] = str end; --- ... function get_new_space_name() n_spaces = n_spaces + 1 return "test" .. tostring(n_spaces - 1) end; --- ... tmp = get_new_space_name() if box.space[tmp] then box.space[tmp]:drop() tmp = get_new_space_name() end tmp = nil n_spaces = 0 function get_rnd_acc() return math.floor(math.random() * n_accs) end; --- ... function get_rnd_val() return math.floor(math.random() * 10) end; --- ... function get_rnd_str() return garbage[math.floor(math.random() * string_max_size)] end; --- ... additional_spaces = { }; --- ... function add_space() local tmp_space = box.schema.create_space(get_new_space_name()) table.insert(additional_spaces, tmp_space) tmp_space:create_index('test') n_spaces = n_spaces + 1 end; --- ... function add_acc() s1:insert{n_accs, 0} n_accs = n_accs + 1 end; --- ... function add_op(n1, n2, v) s2:insert{n_ops, n1, n2, v} n_ops = n_ops + 1 end; --- ... function acc_add(n, v) s1:update({n}, {{'+', 2, v}, {'=', 3, get_rnd_str()}}) end; --- ... function do_op(n1, n2, v) box.begin() add_op(n1, n2, v) acc_add(n1, v) acc_add(n2, -v) box.commit() end; --- ... function do_rand_op() do_op(get_rnd_acc(), get_rnd_acc(), get_rnd_val()) end; --- ... function remove_smth() s3:delete{i3:min()[1]} end; --- ... function init() for i = 1,accounts_start do add_acc() end for i = 1,workers_count*iteration_count do s3:auto_increment{"I hate dentists!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"} end end; --- ... function work_itr() for j = 1,operation_count do do_rand_op() fiber.sleep(0) end add_acc() remove_smth() add_space() end; --- ... function work() for i = 1,iteration_count do if not pcall(work_itr) then log.info("work_itr failed") break end end workers_done = workers_done + 1 end; --- ... snaps_done = false; --- ... function snaps() while (workers_done ~= workers_count) do pcall(box.snapshot) fiber.sleep(snapshot_interval) end snaps_done = true end; --- ... function wait() while (not snaps_done) do fiber.sleep(0.01) end end; --- ... init(); --- ... log.info('Part I: creating snapshot start'); --- ... for i = 1,workers_count do fiber.create(work) end; --- ... local tmp_fib = fiber.create(snaps); --- ... wait(); --- ... log.info('Part I: creating snapshot done'); --- ... #s1:select{}; --- - 645 ... #s2:select{}; --- - 5120 ... s1:drop(); --- ... s2:drop(); --- ... for k,v in pairs(additional_spaces) do v:drop() end; --- ... s1 = nil s2 = nil additional_spaces = nil; --- ... script_code = [[ fio = require'fio' new_snap_dir = "]] .. fio.pathjoin(box.cfg.memtx_dir, "snap_test") .. [[" os.execute("mkdir " .. new_snap_dir) os.execute("cp ]] .. fio.pathjoin(box.cfg.memtx_dir, "*.xlog") .. [[ " .. new_snap_dir .. "/") os.execute("cp ]] .. fio.pathjoin(box.cfg.memtx_dir, "*.vylog") .. [[ " .. new_snap_dir .. "/") os.execute("cp ]] .. fio.pathjoin(box.cfg.memtx_dir, "*.snap") .. [[ " .. new_snap_dir .. "/") box.cfg{ memtx_memory = 536870912, memtx_dir = new_snap_dir, wal_dir = new_snap_dir, vinyl_dir = new_snap_dir, wal_mode = "none" } log = require('log') s1 = box.space.accounts s2 = box.space.operations total_sum = 0 t1 = {} for k,v in s1:pairs() do t1[ v[1] ] = v[2] total_sum = total_sum + v[2] end if total_sum ~= 0 then log.info('error: total sum mismatch') os.execute("rm -r " .. new_snap_dir) os.exit(-1) end t2 = {} function acc_inc(n1, v) t2[n1] = (t2[n1] and t2[n1] or 0) + v end for k,v in s2:pairs() do acc_inc(v[2], v[4]) acc_inc(v[3], -v[4]) end bad = false for k,v in pairs(t1) do if (t2[k] and t2[k] or 0) ~= v then bad = true end end for k,v in pairs(t2) do if (t1[k] and t1[k] or 0) ~= v then bad = true end end if bad then log.info('error: operation apply mismatch') os.execute("rm -r " .. new_snap_dir) os.exit(-1) end log.info('success: snapshot is ok') os.execute("rm -r " .. new_snap_dir) os.exit(0) ]]; --- ... script = fio.open(script_path, open_flags, tonumber('0777', 8)) script:write(script_code) script:close() log.info('Part II: checking snapshot start'); --- ... snaps = fio.glob(snap_search_wildcard); --- ... snaps_find_status = #snaps <= initial_snap_count and 'where are my snapshots?' or 'snaps found'; --- ... snaps_find_status; --- - snaps found ... snapshot_check_failed = false while #snaps > initial_snap_count do if not snapshot_check_failed and os.execute(cmd) ~= 0 then snapshot_check_failed = true end max_snap = nil for k,v in pairs(snaps) do if max_snap == nil or v > max_snap then max_snap = v max_snap_k = k end end if max_snap:sub(1, 1) ~= "/" then max_snap = fio.pathjoin(box.cfg.memtx_dir, max_snap) end fio.unlink(max_snap) max_vylog = fio.basename(max_snap, '.snap') .. '.vylog' max_vylog = fio.pathjoin(box.cfg.vinyl_dir, max_vylog) fio.unlink(max_vylog) snaps[max_snap_k] = nil end; --- ... snapshot_check_failed; --- - false ... log.info('Part II: checking snapshot done'); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/oom.test.lua0000664000000000000000000000516613306565107021073 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd('restart server default') test_run:cmd("push filter 'error: Failed to allocate [0-9]+ ' to 'error: Failed to allocate '") space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) test_run:cmd("setopt delimiter ';'") i = 1; while true do space:insert{space:len(), string.rep('test', i)} i = i + 1 end; space:len() > 5000; i = 1; while true do space:insert{space:len(), string.rep('test', i)} i = i + 1 end; space:len() > 5000; i = 1; while true do space:insert{space:len(), string.rep('test', i)} i = i + 1 end; test_run:cmd("setopt delimiter ''"); space:len() > 5000 space.index['primary']:get{0} space.index['primary']:get{5} space.index['primary']:get{9} space.index['primary']:get{11} space.index['primary']:get{15} -- check that iterators work i = 0 t = {} test_run:cmd("setopt delimiter ';'") for state, v in space:pairs() do table.insert(t, v) i = i + 1 if i == 50 then break end end; test_run:cmd("setopt delimiter ''"); t space:truncate() space:insert{0, 'test'} space.index['primary']:get{0} collectgarbage('collect') -- -- Check that statement-level rollback does not leak tuples -- space:truncate() function insert(a) space:insert(a) end test_run:cmd("setopt delimiter ';'") function dup_key() box.begin() space:insert{1} local i = 1 while i < 2000 do local status, _ = pcall(insert, {1, string.rep('test', i)}) if status then error('Unexpected success when inserting a duplicate') end if box.error.last().code ~= box.error.TUPLE_FOUND then box.error.raise() end i = i + 1 end box.commit() return i end; test_run:cmd("setopt delimiter ''"); dup_key() space:select{} -- -- Cleanup -- space:drop() t = nil -- https://github.com/tarantool/tarantool/issues/962 index:delete() failed test_run:cmd('restart server default') arena_bytes = box.cfg.memtx_memory str = string.rep('a', 15000) -- about size of index memory block space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) collectgarbage('collect') for i=1,10000 do space:insert{i, str} end definitely_used = index:count() * 16 * 1024 2 * definitely_used > arena_bytes -- at least half memory used to_del = index:count() for i=1,to_del do space:delete{i} end index:count() collectgarbage('collect') for i=1,10000 do space:insert{i, str} end definitely_used = index:count() * 16 * 1024 2 * definitely_used > arena_bytes -- at least half memory used space:truncate() index:count() space:drop() str = nil tarantool_1.9.1.26.g63eb81e3c/test/wal_off/rtree_benchmark.result0000664000000000000000000000717513306560010023201 0ustar rootrootn_records = 10000 --- ... n_iterations = 10000 --- ... n_neighbors = 10 --- ... env = require('test_run') --- ... test_run = env.new() --- ... file = io.open("rtree_benchmark.res", "w") --- ... s = box.schema.space.create('rtreebench') --- ... _ = s:create_index('primary') --- ... _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) --- ... file:write(" *** 2D *** \n") --- - true ... rect_width = 180 / math.pow(n_records, 1 / 2) --- ... start = os.time() --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, n_records do s:insert{i,{180*math.random(),180*math.random()}} end; --- ... file:write(string.format("Elapsed time for inserting %d records: %d\n", n_records, os.time() - start)); --- - true ... start = os.time(); --- ... n = 0; --- ... for i = 1, n_iterations do x = (180 - rect_width) * math.random() y = (180 - rect_width) * math.random() for k,v in s.index.spatial:pairs({x,y,x+rect_width,y+rect_width}, {iterator = 'LE'}) do n = n + 1 end end; --- ... file:write(string.format("Elapsed time for %d belongs searches selecting %d records: %d\n", n_iterations, n, os.time() - start)); --- - true ... start = os.time(); --- ... n = 0 for i = 1, n_iterations do x = 180 * math.random() y = 180 * math.random() for k,v in pairs(s.index.spatial:select({x,y }, {limit = n_neighbors, iterator = 'NEIGHBOR'})) do n = n + 1 end end; --- ... file:write(string.format("Elapsed time for %d nearest %d neighbors searches selecting %d records: %d\n", n_iterations, n_neighbors, n, os.time() - start)); --- - true ... start = os.time(); --- ... for i = 1, n_records do s:delete{i} end; --- ... file:write(string.format("Elapsed time for deleting %d records: %d\n", n_records, os.time() - start)); --- - true ... s:drop(); --- ... dimension = 8; --- ... s = box.schema.space.create('rtreebench'); --- ... _ = s:create_index('primary'); --- ... _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}, dimension = dimension}); --- ... file:write(" *** 8D *** \n") rect_width = 180 / math.pow(n_records, 1 / dimension) start = os.time(); --- ... for i = 1, n_records do local record = {} for j = 1, dimension do table.insert(record, 180*math.random()) end s:insert{i,record} end; --- ... file:write(string.format("Elapsed time for inserting %d records: %d\n", n_records, os.time() - start)); --- - true ... start = os.time(); --- ... n = 0; --- ... for i = 1, n_iterations do local rect = {} for j = 1, dimension do table.insert(rect, (180 - rect_width) * math.random()) end for j = 1, dimension do table.insert(rect, rect[j] + rect_width) end for k,v in s.index.spatial:pairs(rect, {iterator = 'LE'}) do n = n + 1 end end; --- ... file:write(string.format("Elapsed time for %d belongs searches selecting %d records: %d\n", n_iterations, n, os.time() - start)); --- - true ... start = os.time(); --- ... n = 0 for i = 1, 0 do local rect = {} for j = 1, dimension do table.insert(rect, 180*math.random()) end for k,v in pairs(s.index.spatial:select(rect, {limit = n_neighbors, iterator = 'NEIGHBOR'})) do n = n + 1 end end; --- ... file:write(string.format("Elapsed time for %d nearest %d neighbors searches selecting %d records: %d\n", n_iterations, n_neighbors, n, os.time() - start)); --- - true ... start = os.time(); --- ... for i = 1, n_records do s:delete{i} end; --- ... file:write(string.format("Elapsed time for deleting %d records: %d\n", n_records, os.time() - start)); --- - true ... s:drop(); --- ... file:close(); --- - true ... test_run:cmd("setopt delimiter ''"); --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/func_max.test.lua0000664000000000000000000000133113306560010022053 0ustar rootrootsession = box.session session.su('admin') env = require('test_run') test_run = env.new() -- -- Check max function limit -- test_run:cmd("setopt delimiter ';'") function func_limit() local i = 1 while true do box.schema.func.create('func'..i) i = i + 1 end return i end; function drop_limit_func() local i = 1 while true do box.schema.func.drop('func'..i) i = i + 1 end end; func_limit(); drop_limit_func(); box.schema.user.create('testuser'); box.schema.user.grant('testuser', 'read, write, execute,create', 'universe'); session.su('testuser'); func_limit(); drop_limit_func(); session.su('admin') box.schema.user.drop('testuser'); test_run:cmd("setopt delimiter ''"); tarantool_1.9.1.26.g63eb81e3c/test/wal_off/tuple.result0000664000000000000000000000222513306560010021166 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("restart server default") -- -- Test various tuple bugs which do not require a write ahead log. -- -- ------------------------------------------------------- -- gh-372 Assertion with a function that inserts increasingly -- large tables -- ------------------------------------------------------- tester = box.schema.space.create('tester') --- ... index = tester:create_index('primary',{}) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function tuple_max() local n = 'a' while true do n = n..n local status, reason = pcall(tester.insert, tester, {#n, n}) if not status then return #n, reason end collectgarbage('collect') end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... n, reason = tuple_max() --- ... n --- - 1048576 ... n + 32 >= box.cfg.memtx_max_tuple_size --- - true ... reason --- - 'Failed to allocate 1048603 bytes for tuple: tuple is too large. Check ''memtx_max_tuple_size'' configuration option.' ... tester:drop() --- ... tuple_max = nil --- ... collectgarbage('collect') --- - 0 ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/func_max.result0000664000000000000000000000213113306560010021631 0ustar rootrootsession = box.session --- ... session.su('admin') --- ... env = require('test_run') --- ... test_run = env.new() --- ... -- -- Check max function limit -- test_run:cmd("setopt delimiter ';'") --- - true ... function func_limit() local i = 1 while true do box.schema.func.create('func'..i) i = i + 1 end return i end; --- ... function drop_limit_func() local i = 1 while true do box.schema.func.drop('func'..i) i = i + 1 end end; --- ... func_limit(); --- - error: 'A limit on the total number of functions has been reached: 32000' ... drop_limit_func(); --- - error: Function 'func32000' does not exist ... box.schema.user.create('testuser'); --- ... box.schema.user.grant('testuser', 'read, write, execute,create', 'universe'); --- ... session.su('testuser'); --- ... func_limit(); --- - error: 'A limit on the total number of functions has been reached: 32000' ... drop_limit_func(); --- - error: Function 'func32000' does not exist ... session.su('admin') box.schema.user.drop('testuser'); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/expirationd.result0000664000000000000000000000221013306560010022355 0ustar rootrootfiber = require('fiber') --- ... -- test for expirationd. iterator must continue iterating after space insert/delete env = require('test_run') --- ... test_run = env.new() --- ... s0 = box.schema.space.create('tweedledum') --- ... i0 = s0:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) --- ... s0:insert{20000} --- - [20000] ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 10000 do a = math.floor(math.random() * 10000) s0:replace{a} end; --- ... hit_end = false; --- ... gen, param, state = i0:pairs({}, {iterator = box.index.ALL}); --- ... for i = 1, 10000 do for j = 1, 10 do state, tuple = gen(param, state) if (tuple) then if (tuple[1] == 20000) then hit_end = true end if (math.random() > 0.9) then s0:delete{tuple[1]} end else gen, param, state = i0:pairs({}, {iterator = box.index.ALL}) end end for j = 1, 5 do a = math.floor(math.random() * 10000) if #s0:select{a} == 0 then s0:insert{a} end end if hit_end then break end end; --- ... hit_end; --- - true ... test_run:cmd("setopt delimiter ''"); --- - true ... s0:drop() --- ... s0 = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/wal_off/rtree_benchmark.test.lua0000664000000000000000000000632013306560010023411 0ustar rootrootn_records = 10000 n_iterations = 10000 n_neighbors = 10 env = require('test_run') test_run = env.new() file = io.open("rtree_benchmark.res", "w") s = box.schema.space.create('rtreebench') _ = s:create_index('primary') _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) file:write(" *** 2D *** \n") rect_width = 180 / math.pow(n_records, 1 / 2) start = os.time() test_run:cmd("setopt delimiter ';'") for i = 1, n_records do s:insert{i,{180*math.random(),180*math.random()}} end; file:write(string.format("Elapsed time for inserting %d records: %d\n", n_records, os.time() - start)); start = os.time(); n = 0; for i = 1, n_iterations do x = (180 - rect_width) * math.random() y = (180 - rect_width) * math.random() for k,v in s.index.spatial:pairs({x,y,x+rect_width,y+rect_width}, {iterator = 'LE'}) do n = n + 1 end end; file:write(string.format("Elapsed time for %d belongs searches selecting %d records: %d\n", n_iterations, n, os.time() - start)); start = os.time(); n = 0 for i = 1, n_iterations do x = 180 * math.random() y = 180 * math.random() for k,v in pairs(s.index.spatial:select({x,y }, {limit = n_neighbors, iterator = 'NEIGHBOR'})) do n = n + 1 end end; file:write(string.format("Elapsed time for %d nearest %d neighbors searches selecting %d records: %d\n", n_iterations, n_neighbors, n, os.time() - start)); start = os.time(); for i = 1, n_records do s:delete{i} end; file:write(string.format("Elapsed time for deleting %d records: %d\n", n_records, os.time() - start)); s:drop(); dimension = 8; s = box.schema.space.create('rtreebench'); _ = s:create_index('primary'); _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}, dimension = dimension}); file:write(" *** 8D *** \n") rect_width = 180 / math.pow(n_records, 1 / dimension) start = os.time(); for i = 1, n_records do local record = {} for j = 1, dimension do table.insert(record, 180*math.random()) end s:insert{i,record} end; file:write(string.format("Elapsed time for inserting %d records: %d\n", n_records, os.time() - start)); start = os.time(); n = 0; for i = 1, n_iterations do local rect = {} for j = 1, dimension do table.insert(rect, (180 - rect_width) * math.random()) end for j = 1, dimension do table.insert(rect, rect[j] + rect_width) end for k,v in s.index.spatial:pairs(rect, {iterator = 'LE'}) do n = n + 1 end end; file:write(string.format("Elapsed time for %d belongs searches selecting %d records: %d\n", n_iterations, n, os.time() - start)); start = os.time(); n = 0 for i = 1, 0 do local rect = {} for j = 1, dimension do table.insert(rect, 180*math.random()) end for k,v in pairs(s.index.spatial:select(rect, {limit = n_neighbors, iterator = 'NEIGHBOR'})) do n = n + 1 end end; file:write(string.format("Elapsed time for %d nearest %d neighbors searches selecting %d records: %d\n", n_iterations, n_neighbors, n, os.time() - start)); start = os.time(); for i = 1, n_records do s:delete{i} end; file:write(string.format("Elapsed time for deleting %d records: %d\n", n_records, os.time() - start)); s:drop(); file:close(); test_run:cmd("setopt delimiter ''"); tarantool_1.9.1.26.g63eb81e3c/test/wal_off/wal_mode.test.lua0000664000000000000000000000074613306560010022053 0ustar rootroottest_run = require('test_run').new() box.cfg.wal_mode space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) space:insert{1} space:insert{2} space:insert{3} space.index['primary']:get(1) space.index['primary']:get(2) space.index['primary']:get(3) space.index['primary']:get(4) box.snapshot() _, e = pcall(box.snapshot) e.type, e.errno e.errno _, e = pcall(box.snapshot) e.type, e.errno e.errno space:drop() test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/test-run.py0000777000000000000000000000000013306560010023434 2../test-run/test-run.pyustar rootroottarantool_1.9.1.26.g63eb81e3c/test/CMakeLists.txt0000664000000000000000000000345013306560010017721 0ustar rootrootenable_tnt_compile_flags() include_directories(${LUAJIT_INCLUDE_DIRS}) include_directories(${MSGPUCK_INCLUDE_DIRS}) function(build_module module files) add_library(${module} SHARED ${files}) target_link_libraries(${module} ${MSGPUCK_LIBRARIES}) set_target_properties(${module} PROPERTIES PREFIX "") add_dependencies(${module} api) if(TARGET_OS_DARWIN) set_target_properties(${module} PROPERTIES LINK_FLAGS "-undefined dynamic_lookup") endif(TARGET_OS_DARWIN) endfunction() add_compile_flags("C;CXX" "-Wno-unused-parameter") if(POLICY CMP0037) cmake_policy(SET CMP0037 OLD) endif(POLICY CMP0037) add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/small COMMAND ${CMAKE_COMMAND} -E create_symlink ${PROJECT_SOURCE_DIR}/src/lib/small/test/ ${CMAKE_CURRENT_BINARY_DIR}/small COMMENT Create a symlink for libsmall to fix out-of-source tests) add_custom_target(symlink_small_tests ALL DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/small) add_custom_target(test DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/small COMMAND ${PROJECT_SOURCE_DIR}/test/test-run.py --builddir=${PROJECT_BINARY_DIR} --vardir=${PROJECT_BINARY_DIR}/test/var) add_custom_target(test-force DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/small COMMAND ${PROJECT_SOURCE_DIR}/test/test-run.py --builddir=${PROJECT_BINARY_DIR} --vardir=${PROJECT_BINARY_DIR}/test/var --force) add_subdirectory(app) add_subdirectory(app-tap) add_subdirectory(box) add_subdirectory(unit) # Move tarantoolctl config if (NOT ${PROJECT_BINARY_DIR} STREQUAL ${PROJECT_SOURCE_DIR}) configure_file( "${PROJECT_SOURCE_DIR}/test/.tarantoolctl" "${PROJECT_BINARY_DIR}/test/.tarantoolctl" ) endif() # Disable connector_c for 1.6 #add_subdirectory(connector_c) tarantool_1.9.1.26.g63eb81e3c/test/unit/0000775000000000000000000000000013306565107016152 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/unit/cbus_stress.c0000664000000000000000000002145113306560010020644 0ustar rootroot#include #include #include #include #include #include #include "memory.h" #include "fiber.h" #include "cbus.h" #include "unit.h" /* * Number of test threads. * * Each test thread will connect to, disconnect from, and send * messages to random neighbors in a loop. */ static const int thread_count = 32; /* Number of loop iterations performed by each test thread. */ static const int loop_count = 300; /* Chance of connecting to a random neighbor in a loop iteration. */ static const int connect_prob = 30; /* Chance of disconnecting from a random neighbor in a loop iteration. */ static const int disconnect_prob = 20; /* This structure represents a connection to a test thread. */ struct conn { bool active; struct cpipe to; struct cpipe from; }; /* Test thread. */ struct thread { /* Thread id (between 0 and thread_count - 1, inclusive) */ int id; /* Name of endpoint hosted by this thread. */ char name[32]; /* Cord corresponding to this thread. */ struct cord cord; /* Pipe from this to the main thread. */ struct cpipe main_pipe; /* Pipe from the main to this thread. */ struct cpipe thread_pipe; /* Test thread id => connection. */ struct conn *connections; /* * Array of connected thread ids. * Used for picking a random thread to connect to. */ int *connected; int connected_count; /* * Array of disconnected thread ids. * Used for picking a random thread to disconnect from. */ int *disconnected; int disconnected_count; /* * This message is sent: * - from the main thread to this thread to signal test start * - from this thread to the main thread when the test is complete */ struct cmsg cmsg; /* * Number of messages sent/received by this thread. * Sum 'send' must be equal to sum 'received' over all test threads. */ int sent, received; }; /* Array of test threads. */ static struct thread *threads; /* * Number of threads that are still performing the test. * When it reaches 0, the main thread is signalled to stop. */ static int active_thread_count; static const char * thread_name(int id) { return threads[id].name; } static int thread_func(va_list ap); /* Spawn a test thread. */ static void thread_create(struct thread *t, int id) { assert(thread_count > 1); assert(id >= 0 && id < thread_count); const int neighbor_count = thread_count - 1; t->id = id; snprintf(t->name, sizeof(t->name), "thread_%d", id); assert(t->name != NULL); t->connections = calloc(thread_count, sizeof(*t->connections)); assert(t->connections != NULL); t->connected_count = 0; t->connected = calloc(neighbor_count, sizeof(*t->connected)); assert(t->connected != NULL); t->disconnected_count = 0; t->disconnected = calloc(neighbor_count, sizeof(*t->disconnected)); assert(t->disconnected != NULL); /* Initially, we are not connected to anyone. */ for (int i = 0; i < thread_count; i++) { if (i == id) continue; /* can't connect to self */ t->disconnected[t->disconnected_count++] = i; } assert(t->disconnected_count == neighbor_count); t->sent = t->received = 0; active_thread_count++; if (cord_costart(&t->cord, t->name, thread_func, t) != 0) unreachable(); cpipe_create(&t->thread_pipe, t->name); } static int test_func(va_list ap); static void thread_start_test_cb(struct cmsg *cmsg) { struct thread *t = container_of(cmsg, struct thread, cmsg); struct fiber *test_fiber = fiber_new("test", test_func); assert(test_fiber != NULL); fiber_start(test_fiber, t); } /* Signal a test thread to start the test. */ static void thread_start_test(struct thread *t) { static struct cmsg_hop start_route[] = { { thread_start_test_cb, NULL } }; cmsg_init(&t->cmsg, start_route); cpipe_push(&t->thread_pipe, &t->cmsg); } /* Join a test thread. */ static void thread_destroy(struct thread *t) { cbus_stop_loop(&t->thread_pipe); cpipe_destroy(&t->thread_pipe); if (cord_join(&t->cord) != 0) unreachable(); free(t->connected); free(t->disconnected); free(t->connections); } /* Connect to the test thread with the given id. */ static void thread_connect(struct thread *t, int dest_id) { assert(dest_id != t->id); assert(dest_id < thread_count); struct conn *conn = &t->connections[dest_id]; assert(!conn->active); cbus_pair(thread_name(dest_id), t->name, &conn->to, &conn->from, NULL, NULL, NULL); conn->active = true; } /* Disconnect from the test thread with the given id. */ static void thread_disconnect(struct thread *t, int dest_id) { assert(dest_id != t->id); assert(dest_id < thread_count); struct conn *conn = &t->connections[dest_id]; assert(conn->active); cbus_unpair(&conn->to, &conn->from, NULL, NULL, NULL); conn->active = false; } /* Connect to a random test thread. */ static void thread_connect_random(struct thread *t) { assert(t->disconnected_count > 0); assert(t->connected_count + t->disconnected_count == thread_count - 1); int idx = rand() % t->disconnected_count; int dest_id = t->disconnected[idx]; t->disconnected[idx] = t->disconnected[--t->disconnected_count]; t->connected[t->connected_count++] = dest_id; thread_connect(t, dest_id); } /* Disconnect from a random test thread. */ static void thread_disconnect_random(struct thread *t) { assert(t->connected_count > 0); assert(t->connected_count + t->disconnected_count == thread_count - 1); int idx = rand() % t->connected_count; int dest_id = t->connected[idx]; t->connected[idx] = t->connected[--t->connected_count]; t->disconnected[t->disconnected_count++] = dest_id; thread_disconnect(t, dest_id); } struct thread_msg { struct cmsg cmsg; int dest_id; }; static void thread_msg_received_cb(struct cmsg *cmsg) { struct thread_msg *msg = container_of(cmsg, struct thread_msg, cmsg); struct thread *t = &threads[msg->dest_id]; t->received++; free(msg); } /* Send a message to the test thread with the given id. */ static void thread_send(struct thread *t, int dest_id) { static struct cmsg_hop route[] = { { thread_msg_received_cb, NULL } }; struct conn *c = &t->connections[dest_id]; assert(c->active); struct thread_msg *msg = malloc(sizeof(*msg)); assert(msg != NULL); cmsg_init(&msg->cmsg, route); msg->dest_id = dest_id; cpipe_push(&c->to, &msg->cmsg); t->sent++; } /* Send a message to a random connected test thread. */ static void thread_send_random(struct thread *t) { assert(t->connected_count > 0); int idx = rand() % t->connected_count; int dest_id = t->connected[idx]; thread_send(t, dest_id); } static void test_iter(struct thread *t) { if (t->disconnected_count > 0 && (t->connected_count == 0 || rand() % 100 < connect_prob)) thread_connect_random(t); if (t->connected_count > 1 && rand() % 100 < disconnect_prob) thread_disconnect_random(t); thread_send_random(t); } static void test_complete_cb(struct cmsg *cmsg) { (void)cmsg; assert(active_thread_count > 0); if (--active_thread_count == 0) { /* Stop the main fiber when all workers are done. */ fiber_cancel(fiber()); } } static int test_func(va_list ap) { struct thread *t = va_arg(ap, struct thread *); /* Perform the test. */ for (int i = 0; i < loop_count; i++) { test_iter(t); fiber_yield_timeout(0); } /* Disconnect from all neighbors. */ for (int i = 0; i < thread_count; i++) { struct conn *c = &t->connections[i]; if (c->active) thread_disconnect(t, i); } /* Notify the main thread that we are done. */ static struct cmsg_hop complete_route[] = { { test_complete_cb, NULL } }; cmsg_init(&t->cmsg, complete_route); cpipe_push(&t->main_pipe, &t->cmsg); return 0; } static int thread_func(va_list ap) { struct thread *t = va_arg(ap, struct thread *); cpipe_create(&t->main_pipe, "main"); struct cbus_endpoint endpoint; cbus_endpoint_create(&endpoint, t->name, fiber_schedule_cb, fiber()); cbus_loop(&endpoint); cbus_endpoint_destroy(&endpoint, cbus_process); cpipe_destroy(&t->main_pipe); return 0; } static int main_func(va_list ap) { (void)ap; struct cbus_endpoint endpoint; cbus_endpoint_create(&endpoint, "main", fiber_schedule_cb, fiber()); threads = calloc(thread_count, sizeof(*threads)); assert(threads != NULL); for (int i = 0; i < thread_count; i++) thread_create(&threads[i], i); for (int i = 0; i < thread_count; i++) thread_start_test(&threads[i]); cbus_loop(&endpoint); int sent = 0, received = 0; for (int i = 0; i < thread_count; i++) { struct thread *t = &threads[i]; sent += t->sent; received += t->received; thread_destroy(t); } assert(sent == received); cbus_endpoint_destroy(&endpoint, cbus_process); free(threads); threads = NULL; ev_break(loop(), EVBREAK_ALL); return 0; } int main() { srand(time(NULL)); memory_init(); fiber_init(fiber_c_invoke); cbus_init(); header(); struct fiber *main_fiber = fiber_new("main", main_func); assert(main_fiber != NULL); fiber_wakeup(main_fiber); ev_run(loop(), 0); footer(); cbus_free(); fiber_free(); memory_free(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber_channel.result0000664000000000000000000000122013306560010022150 0ustar rootroot *** fiber_channel_basic *** 1..10 ok 1 - fiber_channel_new() ok 2 - fiber_channel_size() ok 3 - fiber_channel_count() ok 4 - fiber_channel_is_full() ok 5 - fiber_channel_is_empty() ok 6 - fiber_channel_size(1) ok 7 - fiber_channel_count(1) ok 8 - fiber_channel_is_full(1) ok 9 - fiber_channel_is_empty(1) ok 10 - fiber_channel_get() *** fiber_channel_basic: done *** *** fiber_channel_get *** 1..7 ok 1 - fiber_channel_put(0) ok 2 - fiber_channel_put_timeout(0) ok 3 - fiber_channel_get(0) ok 4 - fiber_channel_put_timeout(1) ok 5 - fiber_channel_get(1) ok 6 - fiber_channel_put(closed) ok 7 - fiber_channel_get(closed) *** fiber_channel_get: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/reflection_c.result0000664000000000000000000000011513306560010022027 0ustar rootroot1..4 ok 1 - assignable ok 2 - assignable ok 3 - assignable ok 4 - assignable tarantool_1.9.1.26.g63eb81e3c/test/unit/cbus.c0000664000000000000000000001460613306560010017245 0ustar rootroot#include "memory.h" #include "fiber.h" #include "cbus.h" #include "unit.h" #include "trigger.h" /** * Test triggers on cpipe flush. Cpipe flush send all buffered * messages to a consumer. Flush is called either at the end of * an event loop, or when a messages queue is full. This event * can be used to make some prepare actions before flush. */ /** Counter of flush events. */ static int flushed_cnt = 0; /** Expected value of flushed_cnt at the end of the test. */ static int expected_flushed_cnt = 0; /** * Worker thread. In the test only one worker is started and the * main thread sends to it messages to trigger tests one by one. */ struct cord worker; /** Queue of messages from the main to the worker thread. */ struct cpipe pipe_to_worker; /** Queue of messages from the worker to the main thread. */ struct cpipe pipe_to_main; /** * Trigger which is called on flush to the main thread event. Here * we test only this flush direction (from worker to main), becase * the direction from the main to the worker works in the same * way. */ struct trigger on_flush_to_main; /** Common callbacks. {{{ ------------------------------------- */ /** Dummy callback to fill cmsg rotes with more hops. */ static void do_nothing(struct cmsg *m) { (void) m; } /** Callback called on each flush to the main thread. */ static void flush_cb(struct trigger *t, void *e) { (void) t; (void) e; ++flushed_cnt; printf("flush event, counter = %d\n", flushed_cnt); } /** Callback to finish the test. It breaks the main event loop. */ static void finish_execution(struct cmsg *m) { (void) m; fiber_cancel(fiber()); printf("break main fiber and finish test\n"); is(flushed_cnt, expected_flushed_cnt, "flushed_cnt at the end of the test"); } /** }}} Common callbacks. ------------------------------------- */ /** Worker routines. {{{ -------------------------------------- */ static int worker_f(va_list ap) { (void) ap; cpipe_create(&pipe_to_main, "main"); struct cbus_endpoint endpoint; cbus_endpoint_create(&endpoint, "worker", fiber_schedule_cb, fiber()); cbus_loop(&endpoint); cbus_endpoint_destroy(&endpoint, cbus_process); cpipe_destroy(&pipe_to_main); return 0; } static void worker_start() { printf("start worker\n"); fail_if(cord_costart(&worker, "worker", worker_f, NULL) != 0); cpipe_create(&pipe_to_worker, "worker"); } static void worker_stop() { printf("finish worker\n"); cbus_stop_loop(&pipe_to_worker); cpipe_destroy(&pipe_to_worker); fail_if(cord_join(&worker) != 0); } /** }}} Worker routines. -------------------------------------- */ /** * Test that if messages are not too many, the flush callback * is called only once per event loop, even if multiple flush * events are created. {{{ --------------------------------------- */ static void do_forced_flush(struct cmsg *m) { (void) m; static struct cmsg_hop forced_flush_rote = { do_nothing, NULL }; static struct cmsg_hop finish_route = { finish_execution, NULL }; static struct cmsg forced_flush_msg; static struct cmsg finish_msg; cmsg_init(&forced_flush_msg, &forced_flush_rote); cmsg_init(&finish_msg, &finish_route); cpipe_push(&pipe_to_main, &forced_flush_msg); cpipe_flush_input(&pipe_to_main); cpipe_push(&pipe_to_main, &finish_msg); expected_flushed_cnt = 1; } static void test_forced_flush(struct cmsg *m) { (void) m; is(flushed_cnt, 1, "1 flush after test_several_messages"); printf("\n*** Test forced flush ***\n"); flushed_cnt = 0; static struct cmsg_hop test_forced_flush_route = { do_forced_flush, NULL }; static struct cmsg test_forced_flush_msg; cmsg_init(&test_forced_flush_msg, &test_forced_flush_route); cpipe_push(&pipe_to_worker, &test_forced_flush_msg); } /** }}} Test forced flush. ------------------------------------ */ /** * Test that flush is called once per event loop event if several * messages was pushed. {{{ -------------------------------------- */ /** Do some event and check flush to was not called. */ static void do_some_event(struct cmsg *m) { (void) m; is(flushed_cnt, 0, "no flush during loop"); } /** * Create the following scenario for the worker: * do_some_event() -> do_some_event() -> do_nothing() -> flush(). * Each do_some_event cheks, that flush was not called. */ static void test_several_messages(struct cmsg *m) { (void) m; is(flushed_cnt, 1, "1 flush after test_single_msg"); printf("\n*** Test several messages ***\n"); flushed_cnt = 0; static struct cmsg_hop test_event_route[] = { { do_some_event, &pipe_to_main }, { do_nothing, NULL }, }; static struct cmsg_hop test_several_msg_route[] = { { do_some_event, &pipe_to_main }, { test_forced_flush, NULL }, }; static struct cmsg test_event_msg[2]; static struct cmsg test_several_msg; cmsg_init(&test_event_msg[0], test_event_route); cmsg_init(&test_event_msg[1], test_event_route); cmsg_init(&test_several_msg, test_several_msg_route); cpipe_push(&pipe_to_worker, &test_event_msg[0]); cpipe_push(&pipe_to_worker, &test_event_msg[1]); cpipe_push(&pipe_to_worker, &test_several_msg); } /** }}} Test several messages. -------------------------------- */ /** * Test that flush trigger works for a single message. * {{{ ----------------------------------------------------------- */ static void test_single_msg() { printf("\n*** Test single message ***\n"); static struct cmsg_hop test_single_flush_route[] = { { do_nothing, &pipe_to_main }, /* Schedule the next test. */ { test_several_messages, NULL }, }; static struct cmsg test_msg; cmsg_init(&test_msg, test_single_flush_route); cpipe_push(&pipe_to_worker, &test_msg); } /** }}} Test single message. ---------------------------------- */ static int main_f(va_list ap) { (void) ap; struct cbus_endpoint endpoint; cbus_endpoint_create(&endpoint, "main", fiber_schedule_cb, fiber()); worker_start(); trigger_create(&on_flush_to_main, flush_cb, NULL, NULL); trigger_add(&pipe_to_main.on_flush, &on_flush_to_main); test_single_msg(); cbus_loop(&endpoint); worker_stop(); cbus_endpoint_destroy(&endpoint, cbus_process); ev_break(loop(), EVBREAK_ALL); return 0; } int main() { header(); plan(6); memory_init(); fiber_init(fiber_c_invoke); cbus_init(); printf("start main fiber\n"); struct fiber *main_fiber = fiber_new("main", main_f); assert(main_fiber != NULL); fiber_wakeup(main_fiber); printf("start main loop\n"); ev_run(loop(), 0); printf("finish main loop\n"); cbus_free(); fiber_free(); memory_free(); int rc = check_plan(); footer(); return rc; } tarantool_1.9.1.26.g63eb81e3c/test/unit/rope.c0000664000000000000000000000336713306560010017260 0ustar rootroot#include "salad/rope.h" #include "unit.h" #include "rope_common.h" static void test_rope_extract(struct rope *rope, rope_size_t pos) { printf("extract pos = %zu: ", (size_t) pos); struct rope_node *node = rope_extract_node(rope, pos); rope_check(rope); str_print(node->data, node->leaf_size); printf("\n"); } static inline void test_rope_cut(struct rope *rope, rope_size_t offset, rope_size_t size) { printf("erase offset = %zu, size = %zu \n", (size_t) offset, (size_t) size); while (size-- > 0) rope_erase(rope, offset); rope_pretty_print(rope, str_print); rope_check(rope); } static void test_rope() { struct rope *rope = test_rope_new(); test_rope_insert(rope, rope_size(rope), "who's gonna be"); test_rope_insert(rope, rope_size(rope), ""); test_rope_insert(rope, rope_size(rope), ", Mr. Black"); test_rope_insert(rope, rope_size(rope), ", but they You got four of "); test_rope_insert(rope, rope_size(rope), "special> don't know each other"); test_rope_insert(rope, -1, ", so nobody wants to back."); test_rope_insert(rope, rope_size(rope) - 1, " down"); test_rope_insert(rope, -1, ""); test_rope_cut(rope, 0, 5); test_rope_cut(rope, 0, 9); test_rope_cut(rope, 179, 7); test_rope_cut(rope, 173, 1); test_rope_cut(rope, 58, 7); test_rope_cut(rope, 63, 10); test_rope_cut(rope, 79, 25); test_rope_cut(rope, 25, 5); test_rope_cut(rope, 126, 5); test_rope_extract(rope, 0); test_rope_extract(rope, 5); test_rope_extract(rope, 19); test_rope_extract(rope, 59); test_rope_extract(rope, 124); rope_delete(rope); } int main() { test_rope(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/int96.result0000664000000000000000000000004213306560010020343 0ustar rootroot *** test *** *** test: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/column_mask.c0000664000000000000000000001357513306560010020625 0ustar rootroot#include "column_mask.h" #include "tuple_update.h" #include "unit.h" #include "msgpuck.h" #include "trivia/util.h" #define MAX_OPS 20 #define MAX_FIELDS 100 #define LONG_TUPLE {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} #define LONG_TUPLE_LEN 70 /** Template for a tuple creation. */ struct tuple_op_template { /** Op: '=', '+', ... */ char op; /** Field number. */ int fieldno; /* * Parameter of the operation. Only unsigned integers are * allowed. */ int arg; }; /** Template for update operations array. */ struct tuple_update_template { /** Update operation templates. */ const struct tuple_op_template ops[MAX_OPS]; /** Actual length of the @ops. */ int count; }; /** Template for a tuple creation. */ struct tuple_template { /** Tuple fields. Only unsigned integers are allowed. */ const int fields[MAX_FIELDS]; /** Actual length of the @fields. */ int count; }; /* * Create a new raw tuple from a template. * @param tuple Tuple template. * @param[out] end End of the result raw tuple. * * @retval Begining of the new raw tuple. */ static char * tuple_new_raw(const struct tuple_template *tuple, char **end) { size_t size = mp_sizeof_array(tuple->count); for (int i = 0; i < tuple->count; ++i) size += mp_sizeof_uint(tuple->fields[i]); char *ret = (char *)malloc(size); fail_if(ret == NULL); char *pos = mp_encode_array(ret, tuple->count); for (int i = 0; i < tuple->count; ++i) pos = mp_encode_uint(pos, tuple->fields[i]); *end = pos; return ret; } /** * Create a new update operations array from a template. * @param update Update template. * @param[out] end End of the result array. * * @retval Beginning of the update operations array. */ static char * tuple_new_update(const struct tuple_update_template *update, char **end) { const struct tuple_op_template *ops = update->ops; int count = update->count; size_t size = mp_sizeof_array(count) + (mp_sizeof_str(1) + mp_sizeof_array(3)) * count; for (int i = 0; i < count; ++i) { if (ops[i].fieldno >= 0) size += mp_sizeof_uint(ops[i].fieldno); else size += mp_sizeof_int(ops[i].fieldno); size += mp_sizeof_uint(ops[i].arg); } char *ret = (char *)malloc(size); fail_if(ret == NULL); char *pos = mp_encode_array(ret, count); for (int i = 0; i < count; ++i) { pos = mp_encode_array(pos, 3); pos = mp_encode_str(pos, &ops[i].op, 1); if (ops[i].fieldno >= 0) pos = mp_encode_uint(pos, ops[i].fieldno); else pos = mp_encode_int(pos, ops[i].fieldno); pos = mp_encode_uint(pos, ops[i].arg); } *end = pos; return ret; } static char buffer[2048]; static size_t pos = 0; /** Allocator for the tuple_update function. */ static void * tuple_update_alloc_f(void *unused, size_t size) { (void) unused; fail_if(pos + size > sizeof(buffer)); char *ret = &buffer[pos]; pos += size; return ret; } /** * Execute an update operation from the template and compare it * with the expected tuple and expected column_mask. * * @param orignal Tuple to update. * @param update Update operations * @param expected Expected update result tuple. * @param expected_mask Expected update result column_mask. */ static void check_update_result(const struct tuple_template *original, const struct tuple_update_template *update, const struct tuple_template *expected, uint64_t expected_mask) { char *old_end, *new_end, *ops_end; char *old = tuple_new_raw(original, &old_end); char *new = tuple_new_raw(expected, &new_end); char *ops = tuple_new_update(update, &ops_end); uint32_t actual_len; uint64_t column_mask; const char *actual = tuple_update_execute(tuple_update_alloc_f, NULL, ops, ops_end, old, old_end, &actual_len, 1, &column_mask); fail_if(actual == NULL); is((int32_t)actual_len, new_end - new, "check result length"); is(memcmp(actual, new, actual_len), 0, "tuple update is correct"); is(column_mask, expected_mask, "column_mask is correct"); free(old); free(new); free(ops); /* reset the global buffer. */ pos = 0; } static inline void basic_test() { const struct tuple_template statements[] = { { {1, 2, 3}, 3 }, { {4, 5, 6}, 3 }, { {1, 2, 3}, 3 }, { {1, 2, 3}, 3 }, { {1, 2, 3}, 3 }, { {1, 2, 3}, 3 }, { {1, 2}, 2 }, { {1, 2, 3, 4}, 4 }, { LONG_TUPLE, LONG_TUPLE_LEN }, }; const struct tuple_update_template update_ops[] = { /* simple update, one field. */ { {{'=', 3, 30}}, 1 }, /* field range update. */ { {{'#', 3, 1}}, 1 }, { {{'!', 2, 100}}, 1 }, /* negative field numbers. */ { {{'#', -1 , 1}}, 1 }, { {{'=', -1, 100}}, 1 }, { {{'!', -1, 100}}, 1 }, /* * change field_count and then try to optimize the * negative fieldno update. */ { {{'!', 3, 3}, {'=', -3, 10}}, 2 }, { {{'#', -1, 1}, {'=', 2, 20}}, 2 }, /* Change fieldnumbers >= 64. */ { {{'=', 64, 1}, {'!', 65, 1}, {'#', -1, 1}, {'=', 32, 1}}, 4 }, }; const struct tuple_template results[] = { { {1, 2, 30}, 3 }, { {4, 5}, 2 }, { {1, 100, 2, 3}, 4 }, { {1, 2}, 2 }, { {1, 2, 100}, 3 }, { {1, 2, 3, 100}, 4 }, { {10, 2, 3}, 3 }, { {1, 20, 3}, 3 }, { LONG_TUPLE, LONG_TUPLE_LEN }, }; const uint64_t column_masks[] = { 1 << 2, COLUMN_MASK_FULL << 2, COLUMN_MASK_FULL << 1, COLUMN_MASK_FULL << 2, 1 << 2, COLUMN_MASK_FULL << 3, (COLUMN_MASK_FULL << 2) | 1, (COLUMN_MASK_FULL << 3) | (1 << 1), ((uint64_t) 1) << 63 | ((uint64_t) 1) << 31, }; assert(lengthof(statements) == lengthof(update_ops)); assert(lengthof(statements) == lengthof(results)); assert(lengthof(statements) == lengthof(column_masks)); for (size_t i = 0; i < lengthof(statements); ++i) check_update_result(&statements[i], &update_ops[i], &results[i], column_masks[i]); } int main() { header(); plan(27); basic_test(); footer(); check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/scramble.result0000664000000000000000000000000613306560010021162 0ustar rootroot0 1 1 tarantool_1.9.1.26.g63eb81e3c/test/unit/rlist.result0000664000000000000000000000532513306560010020540 0ustar rootroot1..87 ok 1 - list is empty ok 2 - rlist_nil is empty ok 3 - head2 is empty ok 4 - rlist_nil is empty after swap ok 5 - head2 is empty after swap ok 6 - head is empty after swap ok 7 - first item ok 8 - last item ok 9 - element (foreach) 0 ok 10 - element (foreach) 1 ok 11 - element (foreach) 2 ok 12 - element (foreach) 3 ok 13 - element (foreach) 4 ok 14 - element (foreach) 5 ok 15 - element (foreach) 6 ok 16 - element (foreach_reverse) 6 ok 17 - element (foreach_reverse) 5 ok 18 - element (foreach_reverse) 4 ok 19 - element (foreach_reverse) 3 ok 20 - element (foreach_reverse) 2 ok 21 - element (foreach_reverse) 1 ok 22 - element (foreach_reverse) 0 ok 23 - first item ok 24 - first item ok 25 - last item ok 26 - last item ok 27 - rlist_next ok 28 - rlist_prev ok 29 - element (foreach) 0 ok 30 - element (foreach) 1 ok 31 - element (foreach) 2 ok 32 - element (foreach) 3 ok 33 - element (foreach) 4 ok 34 - element (foreach) 5 ok 35 - element (foreach) 6 ok 36 - element (foreach_reverse) 6 ok 37 - element (foreach_reverse) 5 ok 38 - element (foreach_reverse) 4 ok 39 - element (foreach_reverse) 3 ok 40 - element (foreach_reverse) 2 ok 41 - element (foreach_reverse) 1 ok 42 - element (foreach_reverse) 0 ok 43 - rlist_entry ok 44 - rlist_first_entry ok 45 - rlist_next_entry ok 46 - rlist_prev_entry ok 47 - element (foreach_entry) 0 ok 48 - element (foreach_entry) 1 ok 49 - element (foreach_entry) 2 ok 50 - element (foreach_entry) 3 ok 51 - element (foreach_entry) 4 ok 52 - element (foreach_entry) 5 ok 53 - element (foreach_entry) 6 ok 54 - element (foreach_entry_reverse) 6 ok 55 - element (foreach_entry_reverse) 5 ok 56 - element (foreach_entry_reverse) 4 ok 57 - element (foreach_entry_reverse) 3 ok 58 - element (foreach_entry_reverse) 2 ok 59 - element (foreach_entry_reverse) 1 ok 60 - element (foreach_entry_reverse) 0 ok 61 - head2 is empty ok 62 - head2 isnt empty ok 63 - Item was moved ok 64 - element (second deleted) 0 ok 65 - element (second deleted) 1 ok 66 - element (second deleted) 5 ok 67 - element (second deleted) 6 ok 68 - element (second deleted) 6 ok 69 - element (second deleted) 5 ok 70 - element (second deleted) 1 ok 71 - element (second deleted) 0 ok 72 - list is empty ok 73 - element (foreach_entry_reverse) 0 ok 74 - element (foreach_entry_reverse) 1 ok 75 - element (foreach_entry_reverse) 2 ok 76 - element (foreach_entry_reverse) 3 ok 77 - element (foreach_entry_reverse) 4 ok 78 - element (foreach_entry_reverse) 5 ok 79 - element (foreach_entry_reverse) 6 ok 80 - element (foreach_entry) 6 ok 81 - element (foreach_entry) 5 ok 82 - element (foreach_entry) 4 ok 83 - element (foreach_entry) 3 ok 84 - element (foreach_entry) 2 ok 85 - element (foreach_entry) 1 ok 86 - element (foreach_entry) 0 ok 87 - prev is null tarantool_1.9.1.26.g63eb81e3c/test/unit/suite.ini0000664000000000000000000000010613306560010017765 0ustar rootroot[default] core = unittest description = unit tests is_parallel = True tarantool_1.9.1.26.g63eb81e3c/test/unit/bitset_index.c0000664000000000000000000001501413306560010020764 0ustar rootroot#include #include #include #include #include #include #include "unit.h" enum { NUMS_SIZE = 1 << 16 }; static void test_resize(void) { header(); struct bitset_index index; bitset_index_create(&index, realloc); struct bitset_iterator it; bitset_iterator_create(&it, realloc); struct bitset_expr expr; bitset_expr_create(&expr, realloc); size_t key = 23411111; size_t value = 2321321; bitset_index_insert(&index, &key, sizeof(key), value); fail_unless(bitset_index_expr_equals(&expr, &key, sizeof(key)) == 0); fail_unless(bitset_index_init_iterator(&index, &it, &expr) == 0); fail_unless(bitset_iterator_next(&it) == value); fail_unless(bitset_iterator_next(&it) == SIZE_MAX); bitset_expr_destroy(&expr); bitset_iterator_destroy(&it); bitset_index_destroy(&index); footer(); } static void test_size_and_count(void) { header(); struct bitset_index index; bitset_index_create(&index, realloc); enum { P = 10, SIZE = (1 << P) + 1 }; for(size_t i = 0; i < SIZE; i++) { bitset_index_insert(&index, &i, sizeof(i), i); } fail_unless(bitset_index_size(&index) == SIZE); fail_unless(bitset_index_count(&index, 0) == SIZE / 2); fail_unless(bitset_index_count(&index, 1) == SIZE / 2); fail_unless(bitset_index_count(&index, 4) == SIZE / 2); fail_unless(bitset_index_count(&index, P) == 1); fail_unless(bitset_index_count(&index, P + 1) == 0); fail_unless(bitset_index_count(&index, 2147483648) == 0); bitset_index_destroy(&index); footer(); } static void check_keys(struct bitset_index *index, size_t *keys, size_t *values, size_t size) { struct bitset_iterator it; bitset_iterator_create(&it, realloc); struct bitset_expr expr; bitset_expr_create(&expr, realloc); printf("Checking keys... "); for (size_t i = 0; i < size; i++) { /* ignore removed keys */ if (keys[i] == SIZE_MAX) { continue; } fail_unless(bitset_index_expr_equals(&expr, &keys[i], sizeof(keys[i])) == 0); fail_unless(bitset_index_init_iterator(index, &it, &expr) == 0); size_t pos; bool pair_found = false; while ( (pos = bitset_iterator_next(&it)) != SIZE_MAX) { if (pos == values[i]) { pair_found = true; break; } } fail_unless(pair_found); } printf("ok\n"); bitset_iterator_destroy(&it); bitset_expr_destroy(&expr); } static void test_insert_remove(void) { header(); struct bitset_index index; bitset_index_create(&index, realloc); size_t NUMS_SIZE = 1 << 11; size_t *keys = malloc(NUMS_SIZE * sizeof(size_t)); size_t *values = malloc(NUMS_SIZE * sizeof(size_t)); size_t count0 = 0; size_t count1 = 0; printf("Generating test set... "); for(size_t i = 0; i < NUMS_SIZE; i++) { keys[i] = rand(); values[i] = rand(); count0 += (keys[i] & 1) != 0 ? 1 : 0; count1 += (keys[i] & 2) != 0 ? 1 : 0; } printf("ok\n"); printf("Inserting pairs... "); for(size_t i = 0; i < NUMS_SIZE; i++) { bitset_index_insert(&index, &keys[i], sizeof(keys[i]), values[i]); } printf("ok\n"); check_keys(&index, keys, values, NUMS_SIZE); fail_unless(bitset_index_count(&index, 0) == count0); fail_unless(bitset_index_count(&index, 1) == count1); printf("Removing random pairs... "); for(size_t i = 0; i < NUMS_SIZE; i++) { if (rand() % 5 == 0) { bitset_index_remove_value(&index, values[i]); keys[i] = SIZE_MAX; } } printf("ok\n"); check_keys(&index, keys, values, NUMS_SIZE); bitset_index_destroy(&index); free(keys); free(values); footer(); } static void test_simple(int mode, size_t search_mask) { fail_unless(mode >= 0 && mode < 3); struct bitset_index index; bitset_index_create(&index, realloc); struct bitset_iterator it; bitset_iterator_create(&it, realloc); struct bitset_expr expr; bitset_expr_create(&expr, realloc); size_t check_count = 0; for (size_t key = 0; key < NUMS_SIZE; key++) { bitset_index_insert(&index, &key, sizeof(key), key); if (mode == 0) { check_count++; } else if (mode == 1 && (key & search_mask) == search_mask) { check_count++; } else if (mode == 2 && (key & search_mask) != 0) { check_count++; } } if (mode == 0) { fail_unless(bitset_index_expr_all(&expr) == 0); } else if (mode == 1) { fail_unless(bitset_index_expr_all_set(&expr, &search_mask, sizeof(search_mask)) == 0); } else if (mode == 2) { fail_unless(bitset_index_expr_any_set(&expr, &search_mask, sizeof(search_mask)) == 0); } fail_unless(bitset_index_init_iterator(&index, &it, &expr) == 0); size_t found_count = 0; for (size_t key = 0; key < NUMS_SIZE; key++) { size_t r = bitset_iterator_next(&it); if (mode == 0) { fail_unless(key == r); found_count++; } else if (mode == 1 && (key & search_mask) == search_mask) { found_count++; } else if (mode == 2 && (key & search_mask) != 0){ found_count++; } } fail_unless(bitset_iterator_next(&it) == SIZE_MAX); fail_unless(found_count == check_count); bitset_expr_destroy(&expr); bitset_iterator_destroy(&it); bitset_index_destroy(&index); } static void test_empty_simple(void) { header(); test_simple(1, 0); /* empty result */ footer(); } static void test_all_simple(void) { header(); test_simple(0, 0); /* all */ footer(); } static void test_all_set_simple(void) { header(); size_t search_mask = 66; /* 0b1000010 */ test_simple(1, search_mask); /* all bits set */ footer(); } static void test_any_set_simple(void) { header(); size_t search_mask = 66; /* 0b1000010 */ test_simple(2, search_mask); /* any bits set */ footer(); } static void test_equals_simple(void) { header(); struct bitset_index index; bitset_index_create(&index, realloc); struct bitset_iterator it; bitset_iterator_create(&it, realloc); struct bitset_expr expr; bitset_expr_create(&expr, realloc); size_t mask = ~((size_t ) 7); for (size_t i = 0; i < NUMS_SIZE; i++) { size_t key = i & mask; size_t value = i; bitset_index_insert(&index, &key, sizeof(key), value); } size_t key1 = (rand() % NUMS_SIZE) & mask; fail_unless(bitset_index_expr_equals(&expr, &key1, sizeof(key1)) == 0); fail_unless(bitset_index_init_iterator(&index, &it, &expr) == 0); for (size_t i = key1; i <= (key1 + ~mask); i++) { fail_unless(i == bitset_iterator_next(&it)); } fail_unless(bitset_iterator_next(&it) == SIZE_MAX); bitset_expr_destroy(&expr); bitset_iterator_destroy(&it); bitset_index_destroy(&index); footer(); } int main(void) { setbuf(stdout, NULL); test_size_and_count(); test_resize(); test_insert_remove(); test_empty_simple(); test_all_simple(); test_all_set_simple(); test_any_set_simple(); test_equals_simple(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/bitset_basic.result0000664000000000000000000000045313306560010022033 0ustar rootroot *** test_cardinality *** *** test_cardinality: done *** *** test_get_set *** Generating test set... ok Settings bits... ok Checking bits... ok Unsetting random bits... ok Checking set bits... ok Checking all bits... ok Unsetting all bits... ok Checking all bits... ok *** test_get_set: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/bps_tree.cc0000664000000000000000000005177413306560010020266 0ustar rootroot#include #include #include #include #include #include #include "unit.h" #include "sptree.h" #include "qsort_arg.h" #ifndef MAX #define MAX(a,b) (((a)>(b))?(a):(b)) #endif //#ifndef MAX SPTREE_DEF(test, realloc, qsort_arg); typedef int64_t type_t; #define TYPE_F "%" PRId64 static int compare(type_t a, type_t b); /* check compiling with another name and settings */ #define BPS_TREE_NAME testtest #define BPS_TREE_BLOCK_SIZE 512 /* value is to low specially for tests */ #define BPS_TREE_EXTENT_SIZE 16*1024 /* value is to low specially for tests */ #define BPS_TREE_COMPARE(a, b, arg) compare(a, b) #define BPS_TREE_COMPARE_KEY(a, b, arg) compare(a, b) #define bps_tree_elem_t char #define bps_tree_key_t char #define bps_tree_arg_t int #include "salad/bps_tree.h" #undef BPS_TREE_NAME #undef BPS_TREE_BLOCK_SIZE #undef BPS_TREE_EXTENT_SIZE #undef BPS_TREE_COMPARE #undef BPS_TREE_COMPARE_KEY #undef bps_tree_elem_t #undef bps_tree_key_t #undef bps_tree_arg_t /* true tree with true settings */ #define BPS_TREE_NAME test #define BPS_TREE_BLOCK_SIZE 128 /* value is to low specially for tests */ #define BPS_TREE_EXTENT_SIZE 2048 /* value is to low specially for tests */ #define BPS_TREE_COMPARE(a, b, arg) compare(a, b) #define BPS_TREE_COMPARE_KEY(a, b, arg) compare(a, b) #define bps_tree_elem_t type_t #define bps_tree_key_t type_t #define bps_tree_arg_t int #define BPS_TREE_DEBUG_BRANCH_VISIT #include "salad/bps_tree.h" #undef BPS_TREE_NAME #undef BPS_TREE_BLOCK_SIZE #undef BPS_TREE_EXTENT_SIZE #undef BPS_TREE_COMPARE #undef BPS_TREE_COMPARE_KEY #undef bps_tree_elem_t #undef bps_tree_key_t #undef bps_tree_arg_t /* tree for approximate_count test */ #define BPS_TREE_NAME approx #define BPS_TREE_BLOCK_SIZE 128 /* value is to low specially for tests */ #define BPS_TREE_EXTENT_SIZE 2048 /* value is to low specially for tests */ #define BPS_TREE_COMPARE(a, b, arg) ((a) < (b) ? -1 : (a) > (b) ? 1 : 0) #define BPS_TREE_COMPARE_KEY(a, b, arg) (((a) >> 32) < (b) ? -1 : ((a) >> 32) > (b) ? 1 : 0) #define bps_tree_elem_t uint64_t #define bps_tree_key_t uint32_t #define bps_tree_arg_t int #include "salad/bps_tree.h" #define bps_insert_and_check(tree_name, tree, elem, replaced) \ {\ tree_name##_iterator iter;\ if (tree_name##_insert_get_iterator((tree), (elem), \ (replaced), &iter) == 0) {\ type_t check_value =\ *tree_name##_iterator_get_elem((tree), &iter);\ if (check_value != (type_t)(elem)) {\ printf("iterator doesn't point to the inserted "\ "element: %lld != %lld", (long long) (elem),\ (long long) check_value);\ fail("elem != check_value", "true");\ }\ }\ } static int node_comp(const void *p1, const void *p2, void* unused) { (void)unused; return *((const type_t *)p1) < *((const type_t *)p2) ? -1 : *((const type_t *)p2) < *((const type_t *)p1) ? 1 : 0; } static int compare(type_t a, type_t b) { return a < b ? -1 : a > b ? 1 : 0; } static int extents_count = 0; static void * extent_alloc(void *ctx) { int *p_extents_count = (int *)ctx; assert(p_extents_count == &extents_count); ++*p_extents_count; return malloc(BPS_TREE_EXTENT_SIZE); } static void extent_free(void *ctx, void *extent) { int *p_extents_count = (int *)ctx; assert(p_extents_count == &extents_count); --*p_extents_count; free(extent); } static void simple_check() { header(); const unsigned int rounds = 2000; test tree; test_create(&tree, 0, extent_alloc, extent_free, &extents_count); printf("Insert 1..X, remove 1..X\n"); for (unsigned int i = 0; i < rounds; i++) { type_t v = i; if (test_find(&tree, v) != NULL) fail("element already in tree (1)", "true"); test_insert(&tree, v, 0); if (test_debug_check(&tree)) { test_print(&tree, TYPE_F); fail("debug check nonzero", "true"); } } if (test_size(&tree) != rounds) fail("Tree count mismatch (1)", "true"); for (unsigned int i = 0; i < rounds; i++) { type_t v = i; if (test_find(&tree, v) == NULL) fail("element in tree (1)", "false"); test_delete(&tree, v); if (test_debug_check(&tree)) { test_print(&tree, TYPE_F); fail("debug check nonzero", "true"); } } if (test_size(&tree) != 0) fail("Tree count mismatch (2)", "true"); printf("Insert 1..X, remove X..1\n"); for (unsigned int i = 0; i < rounds; i++) { type_t v = i; if (test_find(&tree, v) != NULL) fail("element already in tree (2)", "true"); test_insert(&tree, v, 0); if (test_debug_check(&tree)) { test_print(&tree, TYPE_F); fail("debug check nonzero", "true"); } } if (test_size(&tree) != rounds) fail("Tree count mismatch (3)", "true"); for (unsigned int i = 0; i < rounds; i++) { type_t v = rounds - 1 - i; if (test_find(&tree, v) == NULL) fail("element in tree (2)", "false"); test_delete(&tree, v); if (test_debug_check(&tree)) { test_print(&tree, TYPE_F); fail("debug check nonzero", "true"); } } if (test_size(&tree) != 0) fail("Tree count mismatch (4)", "true"); printf("Insert X..1, remove 1..X\n"); for (unsigned int i = 0; i < rounds; i++) { type_t v = rounds - 1 - i; if (test_find(&tree, v) != NULL) fail("element already in tree (3)", "true"); test_insert(&tree, v, 0); if (test_debug_check(&tree)) { test_print(&tree, TYPE_F); fail("debug check nonzero", "true"); } } if (test_size(&tree) != rounds) fail("Tree count mismatch (5)", "true"); for (unsigned int i = 0; i < rounds; i++) { type_t v = i; if (test_find(&tree, v) == NULL) fail("element in tree (3)", "false"); test_delete(&tree, v); if (test_debug_check(&tree)) { test_print(&tree, TYPE_F); fail("debug check nonzero", "true"); } } if (test_size(&tree) != 0) fail("Tree count mismatch (6)", "true"); printf("Insert X..1, remove X..1\n"); for (unsigned int i = 0; i < rounds; i++) { type_t v = rounds - 1 - i; if (test_find(&tree, v) != NULL) fail("element already in tree (4)", "true"); test_insert(&tree, v, 0); if (test_debug_check(&tree)) { fail("debug check nonzero", "true"); test_print(&tree, TYPE_F); } } if (test_size(&tree) != rounds) fail("Tree count mismatch (7)", "true"); for (unsigned int i = 0; i < rounds; i++) { type_t v = rounds - 1 - i; if (test_find(&tree, v) == NULL) fail("element in tree (4)", "false"); test_delete(&tree, v); if (test_debug_check(&tree)) { test_print(&tree, TYPE_F); fail("debug check nonzero", "true"); } } if (test_size(&tree) != 0) fail("Tree count mismatch (8)", "true"); test_destroy(&tree); footer(); } static bool check_trees_are_identical(test *tree, sptree_test *spt_test) { if (test_size(tree) != spt_test->size) return false; int n = test_size(tree); test_iterator iterator = test_iterator_first(tree); sptree_test_iterator *spitr = sptree_test_iterator_init(spt_test); for (int i = 0; i < n; i++) { type_t v1 = *test_iterator_get_elem(tree, &iterator); type_t v2 = *(type_t *)sptree_test_iterator_next(spitr); test_iterator_next(tree, &iterator); if (v1 != v2) { sptree_test_iterator_free(spitr); return false; } } sptree_test_iterator_free(spitr); return true; } static void compare_with_sptree_check() { header(); sptree_test spt_test; sptree_test_init(&spt_test, sizeof(type_t), 0, 0, 0, &node_comp, 0, 0); test tree; test_create(&tree, 0, extent_alloc, extent_free, &extents_count); const int rounds = 16 * 1024; const int elem_limit = 1024; for (int i = 0; i < rounds; i++) { type_t rnd = rand() % elem_limit; int find_res1 = sptree_test_find(&spt_test, &rnd) ? 1 : 0; int find_res2 = test_find(&tree, rnd) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (find_res1 == 0) { sptree_test_replace(&spt_test, &rnd, NULL); test_insert(&tree, rnd, 0); } else { sptree_test_delete(&spt_test, &rnd); test_delete(&tree, rnd); } if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } sptree_test_destroy(&spt_test); test_destroy(&tree); footer(); } static void compare_with_sptree_check_branches() { header(); sptree_test spt_test; sptree_test_init(&spt_test, sizeof(type_t), 0, 0, 0, &node_comp, 0, 0); test tree; test_create(&tree, 0, extent_alloc, extent_free, &extents_count); const int elem_limit = 1024; for (int i = 0; i < elem_limit; i++) { type_t v = (type_t)i; int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (find_res1) { fail("trees integrity", "false"); } sptree_test_replace(&spt_test, &v, NULL); test_insert(&tree, v, 0); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } for (int i = 0; i < elem_limit; i++) { type_t v = (type_t)i; int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (!find_res1) fail("trees integrity", "false"); sptree_test_delete(&spt_test, &v); test_delete(&tree, v); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } for (int i = 0; i < elem_limit; i++) { type_t v = (type_t)(elem_limit - i - 1); int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (find_res1) fail("trees integrity", "false"); sptree_test_replace(&spt_test, &v, NULL); test_insert(&tree, v, 0); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } for (int i = 0; i < elem_limit; i++) { type_t v = (type_t)(elem_limit - i - 1); int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (!find_res1) fail("trees integrity", "false"); sptree_test_delete(&spt_test, &v); test_delete(&tree, v); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } for (int i = 0; i < elem_limit; i++) { type_t v = (type_t)i; int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (find_res1) fail("trees integrity", "false"); sptree_test_replace(&spt_test, &v, NULL); test_insert(&tree, v, 0); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } for (int i = 0; i < elem_limit; i++) { type_t v; if (i & 1) v = (type_t)(elem_limit / 2 + i / 2); else v = (type_t)(elem_limit / 2 - i / 2 - 1); int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (!find_res1) fail("trees integrity", "false"); sptree_test_delete(&spt_test, &v); test_delete(&tree, v); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } for (int i = 0; i < elem_limit; i++) { type_t v = (type_t)i; int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (find_res1) fail("trees integrity", "false"); sptree_test_replace(&spt_test, &v, NULL); test_insert(&tree, v, 0); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } for (int i = 0; i < elem_limit; i++) { type_t v; if (i & 1) v = (type_t)(i / 2); else v = (type_t)(elem_limit - i / 2 - 1); int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (!find_res1) fail("trees integrity", "false"); sptree_test_delete(&spt_test, &v); test_delete(&tree, v); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } for (int i = 0; i < elem_limit; i++) { type_t v; if (i < elem_limit / 2) v = (type_t)(i * 2); else v = (type_t)((i - elem_limit / 2) * 2 + 1); int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (find_res1) fail("trees integrity", "false"); sptree_test_replace(&spt_test, &v, NULL); test_insert(&tree, v, 0); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } for (int i = 0; i < elem_limit; i++) { type_t v; if (i < elem_limit / 2) v = (type_t)(i * 2); else v = (type_t)((i - elem_limit / 2) * 2 + 1); int find_res1 = sptree_test_find(&spt_test, &v) ? 1 : 0; int find_res2 = test_find(&tree, v) ? 1 : 0; if (find_res1 ^ find_res2) fail("trees identity", "false"); if (!find_res1) fail("trees integrity", "false"); sptree_test_delete(&spt_test, &v); test_delete(&tree, v); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); if (!check_trees_are_identical(&tree, &spt_test)) fail("trees identity", "false"); } if (tree.debug_insert_leaf_branches_mask != tree.debug_insert_leaf_branches_max_mask) fail("not all insert leaf branches was tested", "true"); if (tree.debug_insert_inner_branches_mask != tree.debug_insert_inner_branches_max_mask) fail("not all insert inner branches was tested", "true"); if (tree.debug_delete_leaf_branches_mask != tree.debug_delete_leaf_branches_max_mask) fail("not all delete leaf branches was tested", "true"); if (tree.debug_delete_inner_branches_mask != tree.debug_delete_inner_branches_max_mask) fail("not all delete inner branches was tested", "true"); sptree_test_destroy(&spt_test); test_destroy(&tree); footer(); } static void bps_tree_debug_self_check() { header(); int res = test_debug_check_internal_functions(false); if (res) printf("self test returned error %d\n", res); test_debug_check_internal_functions(true); footer(); } static void loading_test() { header(); test tree; const type_t test_count = 1000; type_t arr[test_count]; for (type_t i = 0; i < test_count; i++) arr[i] = i; for (type_t i = 0; i <= test_count; i++) { test_create(&tree, 0, extent_alloc, extent_free, &extents_count); if (test_build(&tree, arr, i)) fail("building failed", "true"); if (test_debug_check(&tree)) fail("debug check nonzero", "true"); struct test_iterator iterator; iterator = test_iterator_first(&tree); for (type_t j = 0; j < i; j++) { type_t *v = test_iterator_get_elem(&tree, &iterator); if (!v || *v != j) fail("wrong build result", "true"); test_iterator_next(&tree, &iterator); } if (!test_iterator_is_invalid(&iterator)) fail("wrong build result", "true"); test_destroy(&tree); } footer(); } static void printing_test() { header(); test tree; test_create(&tree, 0, extent_alloc, extent_free, &extents_count); const type_t rounds = 22; for (type_t i = 0; i < rounds; i++) { type_t v = rounds + i; printf("Inserting " TYPE_F "\n", v); test_insert(&tree, v, 0); test_print(&tree, TYPE_F); v = rounds - i - 1; printf("Inserting " TYPE_F "\n", v); test_insert(&tree, v, 0); test_print(&tree, TYPE_F); } test_destroy(&tree); footer(); } static void white_box_test() { header(); test tree; test_create(&tree, 0, extent_alloc, extent_free, &extents_count); assert(BPS_TREE_test_MAX_COUNT_IN_LEAF == 14); assert(BPS_TREE_test_MAX_COUNT_IN_INNER == 10); printf("full leaf:\n"); for (type_t i = 0; i < 14; i++) { test_insert(&tree, i, 0); } test_print(&tree, TYPE_F); printf("split now:\n"); test_insert(&tree, 14, 0); test_print(&tree, TYPE_F); printf("full 2 leafs:\n"); for (type_t i = 15; i < 28; i++) { test_insert(&tree, i, 0); } test_print(&tree, TYPE_F); printf("split now:\n"); test_insert(&tree, 28, 0); test_print(&tree, TYPE_F); printf("full 3 leafs:\n"); for (type_t i = 29; i < 42; i++) { test_insert(&tree, i, 0); } test_print(&tree, TYPE_F); printf("split now:\n"); test_insert(&tree, 42, 0); test_print(&tree, TYPE_F); test_destroy(&tree); test_create(&tree, 0, extent_alloc, extent_free, &extents_count); type_t arr[140]; for (type_t i = 0; i < 140; i++) arr[i] = i; test_build(&tree, arr, 140); printf("full 10 leafs:\n"); test_print(&tree, TYPE_F); printf("2-level split now:\n"); test_insert(&tree, 140, 0); test_print(&tree, TYPE_F); test_destroy(&tree); footer(); } static void approximate_count() { header(); srand(0); approx tree; approx_create(&tree, 0, extent_alloc, extent_free, &extents_count); uint32_t in_leaf_max_count = BPS_TREE_approx_MAX_COUNT_IN_LEAF; uint32_t in_leaf_min_count = in_leaf_max_count * 2 / 3; uint32_t in_leaf_ave_count = in_leaf_max_count * 5 / 6; uint32_t in_inner_max_count = BPS_TREE_approx_MAX_COUNT_IN_INNER; uint32_t in_inner_min_count = in_inner_max_count * 2 / 3; uint32_t in_inner_ave_count = in_inner_max_count * 5 / 6; double X = in_leaf_ave_count; double Y = in_inner_ave_count; double low_border_leaf = double(in_leaf_min_count) / in_leaf_ave_count; double upper_border_leaf = double(in_leaf_max_count) / in_leaf_ave_count; double low_border_inner = double(in_inner_min_count) / in_inner_ave_count; double upper_border_inner = double(in_inner_max_count) / in_inner_ave_count; const uint32_t short_sequence_count = 50; const uint32_t long_sequence_count = 30; const uint32_t long_sequence_multiplier = 20; const uint32_t arr_size = short_sequence_count * (short_sequence_count + 1) / 2 + long_sequence_count * (long_sequence_count + 1) * long_sequence_multiplier / 2; uint64_t arr[arr_size]; uint64_t count = 0; for (uint64_t i = 1; i <= short_sequence_count; i++) for (uint64_t j = 0; j < i; j++) arr[count++] = ((i * 100) << 32) | j; for (uint64_t i = 1; i <= long_sequence_count; i++) for (uint64_t j = 0; j < i * long_sequence_multiplier; j++) arr[count++] = ((i * 100 + 50) << 32) | j; printf("Count: %llu %u\n", (unsigned long long)count, arr_size); assert(count == arr_size); for (uint64_t i = 0; i < count * 10; i++) { uint64_t j = rand() % count; uint64_t k = rand() % count; uint64_t tmp = arr[j]; arr[j] = arr[k]; arr[k] = tmp; } for (uint64_t i = 0; i < count; i++) approx_insert(&tree, arr[i], NULL); printf("Count: %zu\n", tree.size); count = 0; int err_count = 0; const uint32_t over_possible = (short_sequence_count + long_sequence_count + 1) * 100; for (uint32_t i = 50; i < over_possible; i += 25) { uint64_t true_count = 0; if (i % 100 == 0) { uint64_t j = i / 100; if (j >= 1 && j <= short_sequence_count) true_count = j; } else if (i % 50 == 0) { uint64_t j = i / 100; if (j >= 1 && j <= long_sequence_count) true_count = j * long_sequence_multiplier; } count += true_count; uint64_t approx_count = approx_approximate_count(&tree, i); if (approx_count <= X) { if (approx_count != true_count) { err_count++; if (err_count <= 10) printf("searching %u found %llu expected %llu\n", i, (unsigned long long)approx_count, (unsigned long long)true_count); } continue; } double H = ceil(log(approx_count / X) / log(Y)); double low = approx_count * low_border_leaf * pow(low_border_inner, H - 1); double up = approx_count * upper_border_leaf * pow(upper_border_inner, H - 1); if (true_count < low || true_count > up) { err_count++; if (err_count <= 10) printf("searching %u found %llu expected %llu\n", i, (unsigned long long)approx_count, (unsigned long long)true_count); } }; printf("Error count: %d\n", err_count); printf("Count: %llu\n", (unsigned long long)count); approx_destroy(&tree); footer(); } static void insert_get_iterator() { header(); test tree; test_create(&tree, 0, extent_alloc, extent_free, &extents_count); type_t value = 100000; bps_insert_and_check(test, &tree, value, NULL) type_t i = 0; for (; i < 10000; i += 2) bps_insert_and_check(test, &tree, i, NULL); for (i = -2; i > -10000; i -= 2) bps_insert_and_check(test, &tree, i, NULL); for (i = -9999; i < 10000; i += 2) bps_insert_and_check(test, &tree, i, NULL) footer(); } int main(void) { simple_check(); compare_with_sptree_check(); compare_with_sptree_check_branches(); bps_tree_debug_self_check(); loading_test(); printing_test(); white_box_test(); approximate_count(); if (extents_count != 0) fail("memory leak!", "true"); insert_get_iterator(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_write_iterator.result0000664000000000000000000000242313306560010023160 0ustar rootroot *** test_basic *** 1..46 ok 1 - stmt 0 is correct ok 2 - stmt 1 is correct ok 3 - stmt 2 is correct ok 4 - stmt 3 is correct ok 5 - correct results count ok 6 - stmt 0 is correct ok 7 - stmt 1 is correct ok 8 - stmt 2 is correct ok 9 - stmt 3 is correct ok 10 - correct results count ok 11 - stmt 0 is correct ok 12 - stmt 1 is correct ok 13 - correct results count ok 14 - stmt 0 is correct ok 15 - stmt 1 is correct ok 16 - correct results count ok 17 - stmt 0 is correct ok 18 - correct results count ok 19 - stmt 0 is correct ok 20 - stmt 1 is correct ok 21 - correct results count ok 22 - stmt 0 is correct ok 23 - stmt 1 is correct ok 24 - correct results count ok 25 - correct results count ok 26 - stmt 0 is correct ok 27 - stmt 1 is correct ok 28 - correct results count ok 29 - stmt 0 is correct ok 30 - stmt 1 is correct ok 31 - stmt 2 is correct ok 32 - correct results count ok 33 - stmt 0 is correct ok 34 - correct results count ok 35 - stmt 0 is correct ok 36 - correct results count ok 37 - stmt 0 is correct ok 38 - correct results count ok 39 - stmt 0 is correct ok 40 - stmt 1 is correct ok 41 - stmt 2 is correct ok 42 - correct results count ok 43 - stmt 0 is correct ok 44 - stmt 1 is correct ok 45 - stmt 2 is correct ok 46 - correct results count *** test_basic: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber_cond.c0000664000000000000000000000276613306560010020407 0ustar rootroot#include "memory.h" #include "fiber.h" #include "fiber_cond.h" #include "unit.h" static int fiber_cond_basic_f(va_list ap) { struct fiber_cond *cond = va_arg(ap, struct fiber_cond *); int *check = va_arg(ap, int *); int rc; rc = fiber_cond_wait_timeout(cond, 0.0); ok(rc != 0, "timeout"); rc = fiber_cond_wait(cond); is(rc, 0, "signal"); (*check)++; rc = fiber_cond_wait(cond); is(rc, 0, "broadcast"); return 0; } static void fiber_cond_basic() { struct fiber_cond *cond = fiber_cond_new(); int check = 0; struct fiber *f1 = fiber_new("f1", fiber_cond_basic_f); assert(f1 != NULL); fiber_start(f1, cond, &check); fiber_set_joinable(f1, true); struct fiber *f2 = fiber_new("f2", fiber_cond_basic_f); assert(f2 != NULL); fiber_start(f2, cond, &check); fiber_set_joinable(f2, true); /* check timeout */ fiber_sleep(0.0); fiber_sleep(0.0); /* Wake up the first fiber */ fiber_cond_signal(cond); fiber_sleep(0.0); /* Wake ip the second fiber */ fiber_cond_signal(cond); fiber_sleep(0.0); /* Check that fiber scheduling is fair */ is(check, 2, "order"); fiber_cond_broadcast(cond); fiber_sleep(0.0); fiber_join(f1); fiber_join(f2); fiber_cond_delete(cond); } static int main_f(va_list ap) { (void) ap; fiber_cond_basic(); ev_break(loop(), EVBREAK_ALL); return 0; } int main() { plan(7); memory_init(); fiber_init(fiber_c_invoke); struct fiber *f = fiber_new("main", main_f); fiber_wakeup(f); ev_run(loop(), 0); fiber_free(); memory_free(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/rope_stress.c0000664000000000000000000000316713306560010020661 0ustar rootroot#include "salad/rope.h" #include #include "unit.h" #include "rope_common.h" static char *data[] = {"a", "bc", "def", "ghij", "klmno"}; static void test_rope_stress_small() { header(); struct rope *rope = rope_new(str_getn, NULL, mem_alloc, mem_free, NULL); const int iterations = 500; int i = 0; for (i = 0; i < iterations; ++i) { char *d = data[((rope_size_t) rand())%5]; int len = strlen(d); rope_size_t size = rope_size(rope); rope_size_t offset = ((rope_size_t) rand()) % (size + 1); rope_insert(rope, offset, d, len); fail_unless(size + len == rope_size(rope)); rope_check(rope); size = rope_size(rope); offset = ((rope_size_t) rand()) % size; if (offset == size) offset--; rope_erase(rope, offset); fail_unless(size == rope_size(rope) + 1); rope_check(rope); } rope_delete(rope); footer(); } static void test_rope_stress_large() { header(); struct rope *rope = rope_new(str_getn, NULL, mem_alloc, mem_free, NULL); const int iterations = 50000; int i = 0; for (i = 0; i < iterations; ++i) { char *d = data[((rope_size_t) rand())%5]; int len = strlen(d); rope_size_t size = rope_size(rope); rope_size_t offset = ((rope_size_t) rand()) % (size + 1); rope_insert(rope, offset, d, len); fail_unless(size + len == rope_size(rope)); size = rope_size(rope); offset = ((rope_size_t) rand()) % size; if (offset == size) offset--; rope_erase(rope, offset); fail_unless(size == rope_size(rope) + 1); if (i % 1000 == 0) rope_check(rope); } rope_delete(rope); footer(); } int main() { srand(time(NULL)); test_rope_stress_small(); test_rope_stress_large(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_cache.c0000664000000000000000000000754613306565107020113 0ustar rootroot#include "trivia/util.h" #include "vy_iterators_helper.h" #include "unit.h" const struct vy_stmt_template key_template = STMT_TEMPLATE(0, SELECT, vyend); static void test_basic() { header(); plan(6); struct vy_cache cache; uint32_t fields[] = { 0 }; uint32_t types[] = { FIELD_TYPE_UNSIGNED }; struct key_def *key_def; struct tuple_format *format; create_test_cache(fields, types, lengthof(fields), &cache, &key_def, &format); struct tuple *select_all = vy_new_simple_stmt(format, NULL, NULL, &key_template); /* * Fill the cache with 3 chains. */ const struct vy_stmt_template chain1[] = { STMT_TEMPLATE(1, REPLACE, 100), STMT_TEMPLATE(2, REPLACE, 200), STMT_TEMPLATE(3, REPLACE, 300), STMT_TEMPLATE(4, REPLACE, 400), STMT_TEMPLATE(5, REPLACE, 500), STMT_TEMPLATE(6, REPLACE, 600), }; vy_cache_insert_templates_chain(&cache, format, chain1, lengthof(chain1), &key_template, ITER_GE); is(cache.cache_tree.size, 6, "cache is filled with 6 statements"); const struct vy_stmt_template chain2[] = { STMT_TEMPLATE(10, REPLACE, 1001), STMT_TEMPLATE(11, REPLACE, 1002), STMT_TEMPLATE(12, REPLACE, 1003), STMT_TEMPLATE(13, REPLACE, 1004), STMT_TEMPLATE(14, REPLACE, 1005), STMT_TEMPLATE(15, REPLACE, 1006), }; vy_cache_insert_templates_chain(&cache, format, chain2, lengthof(chain2), &key_template, ITER_GE); is(cache.cache_tree.size, 12, "cache is filled with 12 statements"); const struct vy_stmt_template chain3[] = { STMT_TEMPLATE(16, REPLACE, 1107), STMT_TEMPLATE(17, REPLACE, 1108), STMT_TEMPLATE(18, REPLACE, 1109), STMT_TEMPLATE(19, REPLACE, 1110), STMT_TEMPLATE(20, REPLACE, 1111), STMT_TEMPLATE(21, REPLACE, 1112), }; vy_cache_insert_templates_chain(&cache, format, chain3, lengthof(chain3), &key_template, ITER_GE); is(cache.cache_tree.size, 18, "cache is filled with 18 statements"); /* * Try to restore opened and positioned iterator. * At first, start the iterator and make several iteration * steps. * At second, change cache version be insertion a new * statement. * At third, restore the opened on the first step * iterator on the several statements back. * * Key1 Key2 NewKey Key3 Key4 Key5 * ^ ^ ^ * restore to new stmt current position * | | * +- - - - < - - - - < - - - - -+ */ struct vy_cache_iterator itr; struct vy_read_view rv; rv.vlsn = INT64_MAX; const struct vy_read_view *rv_p = &rv; vy_cache_iterator_open(&itr, &cache, ITER_GE, select_all, &rv_p); /* Start iterator and make several steps. */ struct tuple *ret; bool unused; for (int i = 0; i < 4; ++i) vy_cache_iterator_next(&itr, &ret, &unused); ok(vy_stmt_are_same(ret, &chain1[3], format, NULL, NULL), "next_key * 4"); /* * Emulate new statement insertion: break the first chain * and insert into the cache the new statement. */ const struct vy_stmt_template to_insert = STMT_TEMPLATE(22, REPLACE, 201); vy_cache_on_write_template(&cache, format, &to_insert); vy_cache_insert_templates_chain(&cache, format, &to_insert, 1, &key_template, ITER_GE); /* * Restore after the cache had changed. Restoration * makes position of the iterator be one statement after * the last_stmt. So restore on chain1[0], but the result * must be chain1[1]. */ struct tuple *last_stmt = vy_new_simple_stmt(format, NULL, NULL, &chain1[0]); ok(vy_cache_iterator_restore(&itr, last_stmt, &ret, &unused) >= 0, "restore"); ok(vy_stmt_are_same(ret, &chain1[1], format, NULL, NULL), "restore on position after last"); tuple_unref(last_stmt); vy_cache_iterator_close(&itr); tuple_unref(select_all); destroy_test_cache(&cache, key_def, format); check_plan(); footer(); } int main() { vy_iterator_C_test_init(1LLU * 1024LLU * 1024LLU * 1024LLU); test_basic(); vy_iterator_C_test_finish(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/light.cc0000664000000000000000000001663213306560010017564 0ustar rootroot#include #include #include #include #include #include #include #include "unit.h" typedef uint64_t hash_value_t; typedef uint32_t hash_t; static const size_t light_extent_size = 16 * 1024; static size_t extents_count = 0; hash_t hash(hash_value_t value) { return (hash_t) value; } bool equal(hash_value_t v1, hash_value_t v2) { return v1 == v2; } bool equal_key(hash_value_t v1, hash_value_t v2) { return v1 == v2; } #define LIGHT_NAME #define LIGHT_DATA_TYPE uint64_t #define LIGHT_KEY_TYPE uint64_t #define LIGHT_CMP_ARG_TYPE int #define LIGHT_EQUAL(a, b, arg) equal(a, b) #define LIGHT_EQUAL_KEY(a, b, arg) equal_key(a, b) #include "salad/light.h" inline void * my_light_alloc(void *ctx) { size_t *p_extents_count = (size_t *)ctx; assert(p_extents_count == &extents_count); ++*p_extents_count; return malloc(light_extent_size); } inline void my_light_free(void *ctx, void *p) { size_t *p_extents_count = (size_t *)ctx; assert(p_extents_count == &extents_count); --*p_extents_count; free(p); } static void simple_test() { header(); struct light_core ht; light_create(&ht, light_extent_size, my_light_alloc, my_light_free, &extents_count, 0); std::vector vect; size_t count = 0; const size_t rounds = 1000; const size_t start_limits = 20; for(size_t limits = start_limits; limits <= 2 * rounds; limits *= 10) { while (vect.size() < limits) vect.push_back(false); for (size_t i = 0; i < rounds; i++) { hash_value_t val = rand() % limits; hash_t h = hash(val); hash_t fnd = light_find(&ht, h, val); bool has1 = fnd != light_end; bool has2 = vect[val]; assert(has1 == has2); if (has1 != has2) { fail("find key failed!", "true"); return; } if (!has1) { count++; vect[val] = true; light_insert(&ht, h, val); } else { count--; vect[val] = false; light_delete(&ht, fnd); } if (count != ht.count) fail("count check failed!", "true"); bool identical = true; for (hash_value_t test = 0; test < limits; test++) { if (vect[test]) { if (light_find(&ht, hash(test), test) == light_end) identical = false; } else { if (light_find(&ht, hash(test), test) != light_end) identical = false; } } if (!identical) fail("internal test failed!", "true"); int check = light_selfcheck(&ht); if (check) fail("internal test failed!", "true"); } } light_destroy(&ht); footer(); } static void collision_test() { header(); struct light_core ht; light_create(&ht, light_extent_size, my_light_alloc, my_light_free, &extents_count, 0); std::vector vect; size_t count = 0; const size_t rounds = 100; const size_t start_limits = 20; for(size_t limits = start_limits; limits <= 2 * rounds; limits *= 10) { while (vect.size() < limits) vect.push_back(false); for (size_t i = 0; i < rounds; i++) { hash_value_t val = rand() % limits; hash_t h = hash(val); hash_t fnd = light_find(&ht, h * 1024, val); bool has1 = fnd != light_end; bool has2 = vect[val]; assert(has1 == has2); if (has1 != has2) { fail("find key failed!", "true"); return; } if (!has1) { count++; vect[val] = true; light_insert(&ht, h * 1024, val); } else { count--; vect[val] = false; light_delete(&ht, fnd); } if (count != ht.count) fail("count check failed!", "true"); bool identical = true; for (hash_value_t test = 0; test < limits; test++) { if (vect[test]) { if (light_find(&ht, hash(test) * 1024, test) == light_end) identical = false; } else { if (light_find(&ht, hash(test) * 1024, test) != light_end) identical = false; } } if (!identical) fail("internal test failed!", "true"); int check = light_selfcheck(&ht); if (check) fail("internal test failed!", "true"); } } light_destroy(&ht); footer(); } static void iterator_test() { header(); struct light_core ht; light_create(&ht, light_extent_size, my_light_alloc, my_light_free, &extents_count, 0); const size_t rounds = 1000; const size_t start_limits = 20; const size_t iterator_count = 16; struct light_iterator iterators[iterator_count]; for (size_t i = 0; i < iterator_count; i++) light_iterator_begin(&ht, iterators + i); size_t cur_iterator = 0; hash_value_t strage_thing = 0; for(size_t limits = start_limits; limits <= 2 * rounds; limits *= 10) { for (size_t i = 0; i < rounds; i++) { hash_value_t val = rand() % limits; hash_t h = hash(val); hash_t fnd = light_find(&ht, h, val); if (fnd == light_end) { light_insert(&ht, h, val); } else { light_delete(&ht, fnd); } hash_value_t *pval = light_iterator_get_and_next(&ht, iterators + cur_iterator); if (pval) strage_thing ^= *pval; if (!pval || (rand() % iterator_count) == 0) { if (rand() % iterator_count) { hash_value_t val = rand() % limits; hash_t h = hash(val); light_iterator_key(&ht, iterators + cur_iterator, h, val); } else { light_iterator_begin(&ht, iterators + cur_iterator); } } cur_iterator++; if (cur_iterator >= iterator_count) cur_iterator = 0; } } light_destroy(&ht); if (strage_thing >> 20) { printf("impossible!\n"); // prevent strage_thing to be optimized out } footer(); } static void iterator_freeze_check() { header(); const int test_data_size = 1000; hash_value_t comp_buf[test_data_size]; const int test_data_mod = 2000; srand(0); struct light_core ht; for (int i = 0; i < 10; i++) { light_create(&ht, light_extent_size, my_light_alloc, my_light_free, &extents_count, 0); int comp_buf_size = 0; int comp_buf_size2 = 0; for (int j = 0; j < test_data_size; j++) { hash_value_t val = rand() % test_data_mod; hash_t h = hash(val); light_insert(&ht, h, val); } struct light_iterator iterator; light_iterator_begin(&ht, &iterator); hash_value_t *e; while ((e = light_iterator_get_and_next(&ht, &iterator))) { comp_buf[comp_buf_size++] = *e; } struct light_iterator iterator1; light_iterator_begin(&ht, &iterator1); light_iterator_freeze(&ht, &iterator1); struct light_iterator iterator2; light_iterator_begin(&ht, &iterator2); light_iterator_freeze(&ht, &iterator2); for (int j = 0; j < test_data_size; j++) { hash_value_t val = rand() % test_data_mod; hash_t h = hash(val); light_insert(&ht, h, val); } int tested_count = 0; while ((e = light_iterator_get_and_next(&ht, &iterator1))) { if (*e != comp_buf[tested_count]) { fail("version restore failed (1)", "true"); } tested_count++; if (tested_count > comp_buf_size) { fail("version restore failed (2)", "true"); } } light_iterator_destroy(&ht, &iterator1); for (int j = 0; j < test_data_size; j++) { hash_value_t val = rand() % test_data_mod; hash_t h = hash(val); hash_t pos = light_find(&ht, h, val); if (pos != light_end) light_delete(&ht, pos); } tested_count = 0; while ((e = light_iterator_get_and_next(&ht, &iterator2))) { if (*e != comp_buf[tested_count]) { fail("version restore failed (3)", "true"); } tested_count++; if (tested_count > comp_buf_size) { fail("version restore failed (4)", "true"); } } light_destroy(&ht); } footer(); } int main(int, const char**) { srand(time(0)); simple_test(); collision_test(); iterator_test(); iterator_freeze_check(); if (extents_count != 0) fail("memory leak!", "true"); } tarantool_1.9.1.26.g63eb81e3c/test/unit/say.c0000664000000000000000000001575513306560010017113 0ustar rootroot#include #include #include #include "unit.h" #include "say.h" #include #include #include #include #include int parse_logger_type(const char *input) { enum say_logger_type type; int rc = say_parse_logger_type(&input, &type); if (rc == 0) switch (type) { case SAY_LOGGER_BOOT: note("type: boot"); break; case SAY_LOGGER_STDERR: note("type: stderr"); break; case SAY_LOGGER_FILE: note("type: file"); break; case SAY_LOGGER_PIPE: note("type: pipe"); break; case SAY_LOGGER_SYSLOG: note("type: syslog"); break; } note("next: %s", input); return rc; } int parse_syslog_opts(const char *input) { struct say_syslog_opts opts; if (say_parse_syslog_opts(input, &opts) == -1) { return -1; } if (opts.identity) note("identity: %s", opts.identity); if (opts.facility) note("facility: %i", opts.facility); say_free_syslog_opts(&opts); return 0; } static int format_func_custom(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap) { int total = 0; (void) log; (void) level; (void) filename; (void) line; (void) error; SNPRINT(total, snprintf, buf, len, "\"msg\" = \""); SNPRINT(total, vsnprintf, buf, len, format, ap); SNPRINT(total, snprintf, buf, len, "\"\n"); return total; } pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond = PTHREAD_COND_INITIALIZER; pthread_cond_t cond_sync = PTHREAD_COND_INITIALIZER; bool is_raised = false; int created_logs = 0; const char *tmp_dir; struct create_log { struct log logger; int id; }; static void * dummy_log(void *arg) { struct create_log *create_log = (struct create_log *) arg; char tmp_filename[30]; sprintf(tmp_filename, "%s/%i.log", tmp_dir, create_log->id); tt_pthread_mutex_lock(&mutex); log_create(&create_log->logger, tmp_filename, false); /* signal that log is created */ created_logs++; tt_pthread_cond_signal(&cond_sync); /* wait until rotate signal is raised */ while (!is_raised) tt_pthread_cond_wait(&cond, &mutex); created_logs--; if (created_logs == 0) pthread_cond_signal(&cond_sync); tt_pthread_mutex_unlock(&mutex); return NULL; } static void test_log_rotate() { char template[] = "/tmp/tmpdir.XXXXXX"; tmp_dir = mkdtemp(template); const int NUMBER_LOGGERS = 10; struct create_log *loggers = (struct create_log *) calloc(NUMBER_LOGGERS, sizeof(struct create_log)); if (loggers == NULL) { return; } int running = 0; for (int i = 0; i < NUMBER_LOGGERS; i++) { pthread_t thread; loggers[i].id = i; if (tt_pthread_create(&thread, NULL, dummy_log, (void *) &loggers[i]) >= 0) running++; } tt_pthread_mutex_lock(&mutex); /* wait loggers are created */ while (created_logs < running) tt_pthread_cond_wait(&cond_sync, &mutex); tt_pthread_mutex_unlock(&mutex); say_logrotate(NULL, NULL, 0); for (int i = 0; i < created_logs; i++) { log_destroy(&loggers[i].logger); } memset(loggers, '#', NUMBER_LOGGERS * sizeof(struct create_log)); free(loggers); is_raised = true; tt_pthread_cond_broadcast(&cond); tt_pthread_mutex_lock(&mutex); /* wait threads are finished */ while (created_logs > 0) tt_pthread_cond_wait(&cond_sync, &mutex); tt_pthread_mutex_unlock(&mutex); } static int main_f(va_list ap) { struct errinj *inj = errinj_by_name("ERRINJ_LOG_ROTATE"); inj->bparam = true; /* test on log_rotate signal handling */ test_log_rotate(); inj->bparam = false; ev_break(loop(), EVBREAK_ALL); return 0; } int main() { memory_init(); fiber_init(fiber_c_invoke); say_logger_init("/dev/null", S_INFO, 0, "plain", 0); plan(33); #define PARSE_LOGGER_TYPE(input, rc) \ ok(parse_logger_type(input) == rc, "%s", input) PARSE_LOGGER_TYPE("", 0); PARSE_LOGGER_TYPE("/dev/null", 0); PARSE_LOGGER_TYPE("|", 0); PARSE_LOGGER_TYPE("|/usr/bin/cronolog", 0); PARSE_LOGGER_TYPE("file:", 0); PARSE_LOGGER_TYPE("file:instance.log", 0); PARSE_LOGGER_TYPE("pipe:", 0); PARSE_LOGGER_TYPE("pipe:gzip > instance.log.gz", 0); PARSE_LOGGER_TYPE("syslog:", 0); PARSE_LOGGER_TYPE("syslog:identity=", 0); PARSE_LOGGER_TYPE("unknown:", -1); PARSE_LOGGER_TYPE("unknown:example.org", -1); #define PARSE_SYSLOG_OPTS(input, rc) \ ok(parse_syslog_opts(input) == rc, "%s", input) PARSE_SYSLOG_OPTS("", 0); PARSE_SYSLOG_OPTS("identity=tarantool", 0); PARSE_SYSLOG_OPTS("facility=user", 0); PARSE_SYSLOG_OPTS("identity=xtarantoolx,facility=local1", 0); PARSE_SYSLOG_OPTS("identity=xtarantoolx,facility=kern", 0); PARSE_SYSLOG_OPTS("identity=xtarantoolx,facility=uucp", 0); PARSE_SYSLOG_OPTS("identity=xtarantoolx,facility=foo", -1); PARSE_SYSLOG_OPTS("facility=authpriv,identity=bar", 0); PARSE_SYSLOG_OPTS("invalid=", -1); PARSE_SYSLOG_OPTS("facility=local1,facility=local2", -1); PARSE_SYSLOG_OPTS("identity=foo,identity=bar", -1); char template[] = "/tmp/tmpdir.XXXXXX"; const char *tmp_dir = mkdtemp(template); if (tmp_dir == NULL) { diag("unit/say: failed to create temp dir: %s", strerror(errno)); return check_plan(); } char tmp_filename[30]; sprintf(tmp_filename, "%s/1.log", tmp_dir); struct log test_log; log_create(&test_log, tmp_filename, false); log_set_format(&test_log, say_format_plain); log_say(&test_log, 0, NULL, 0, NULL, "hello %s\n", "user"); log_set_format(&test_log, say_format_json); log_say(&test_log, 0, NULL, 0, NULL, "hello %s", "user"); log_set_format(&test_log, format_func_custom); log_say(&test_log, 0, NULL, 0, NULL, "hello %s", "user"); FILE* fd = fopen(tmp_filename, "r+"); const size_t len = 4096; char line[len]; if (fgets(line, len, fd) != NULL) { ok(strstr(line, "hello user") != NULL, "plain"); fgets(line, len, fd); } if (fgets(line, len, fd) != NULL) { ok(strstr(line, "\"message\": \"hello user\"") != NULL, "json"); } if (fgets(line, len, fd) != NULL) { ok(strstr(line, "\"msg\" = \"hello user\"") != NULL, "custom"); } log_destroy(&test_log); coio_init(); coio_enable(); struct fiber *test = fiber_new("loggers", main_f); if (test == NULL) { diag_log(); return check_plan(); } fiber_wakeup(test); ev_run(loop(), 0); /* * Ignore possible failure of log_create(). It may fail * connecting to /dev/log or its analogs. We need only * the format function here, as we change log.fd to file * descriptor. */ log_create(&test_log, "syslog:identity=tarantool,facility=local0", false); test_log.fd = fileno(fd); /* * redirect stderr to /dev/null in order to filter * it out from result file. */ ok(freopen("/dev/null", "w", stderr) != NULL, "freopen"); ok(strncmp(test_log.syslog_ident, "tarantool", 9) == 0, "parsed identity"); ok(test_log.syslog_facility == SYSLOG_LOCAL0, "parsed facility"); long before = ftell(fd); ok(before >= 0, "ftell"); ok(log_say(&test_log, 0, NULL, 0, NULL, "hello %s", "user") > 0, "log_say"); ok(fseek(fd, before, SEEK_SET) >= 0, "fseek"); if (fgets(line, len, fd) != NULL) { ok(strstr(line, "<131>") != NULL, "syslog line"); } log_destroy(&test_log); fiber_free(); memory_free(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/guard.cc0000664000000000000000000000214413306560010017550 0ustar rootroot#include "memory.h" #include "fiber.h" #include "unit.h" static void sigsegf_handler(int signo) { note("signal handler called"); exit(0); } static int __attribute__((noinline)) stack_break_f(char *ptr) { char block[2048]; char sum = 0; memset(block, 0xff, 2048); sum += block[block[4]]; ptrdiff_t stack_diff = ptr > block ? ptr - block : block - ptr; if (stack_diff < 65536) sum += stack_break_f(ptr); return sum; } static char stack_buf[SIGSTKSZ]; static int main_f(va_list ap) { stack_t stack; stack.ss_sp = stack_buf; stack.ss_size = SIGSTKSZ; stack.ss_flags = 0; sigaltstack(&stack, NULL); struct sigaction sa; sa.sa_handler = sigsegf_handler; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_ONSTACK; sigaction(SIGSEGV, &sa, NULL); sigaction(SIGBUS, &sa, NULL); int res = stack_break_f((char *)&stack); ev_break(loop(), EVBREAK_ALL); return res; } int main() { memory_init(); fiber_init(fiber_cxx_invoke); struct fiber *fmain = fiber_new_xc("main", main_f); fiber_wakeup(fmain); ev_run(loop(), 0); fiber_free(); memory_free(); fail("signal handler was not executed", ""); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_mem.c0000664000000000000000000002051313306565107017613 0ustar rootroot#include #include "memory.h" #include "fiber.h" #include "vy_iterators_helper.h" static void test_basic(void) { header(); plan(12); /* Create key_def */ uint32_t fields[] = { 0 }; uint32_t types[] = { FIELD_TYPE_UNSIGNED }; struct key_def *key_def = box_key_def_new(fields, types, 1); assert(key_def != NULL); struct vy_mem *mem = create_test_mem(key_def); is(mem->min_lsn, INT64_MAX, "mem->min_lsn on empty mem"); is(mem->max_lsn, -1, "mem->max_lsn on empty mem"); const struct vy_stmt_template stmts[] = { STMT_TEMPLATE(100, REPLACE, 1), STMT_TEMPLATE(101, REPLACE, 1), STMT_TEMPLATE(102, REPLACE, 1), STMT_TEMPLATE(103, REPLACE, 1), STMT_TEMPLATE(104, REPLACE, 1) }; /* Check min/max lsn */ const struct tuple *stmt = vy_mem_insert_template(mem, &stmts[0]); is(mem->min_lsn, INT64_MAX, "mem->min_lsn after prepare"); is(mem->max_lsn, -1, "mem->max_lsn after prepare"); vy_mem_commit_stmt(mem, stmt); is(mem->min_lsn, 100, "mem->min_lsn after commit"); is(mem->max_lsn, 100, "mem->max_lsn after commit"); /* Check vy_mem_older_lsn */ const struct tuple *older = stmt; stmt = vy_mem_insert_template(mem, &stmts[1]); is(vy_mem_older_lsn(mem, stmt), older, "vy_mem_older_lsn 1"); is(vy_mem_older_lsn(mem, older), NULL, "vy_mem_older_lsn 2"); vy_mem_commit_stmt(mem, stmt); /* Check rollback */ const struct tuple *olderolder = stmt; older = vy_mem_insert_template(mem, &stmts[2]); stmt = vy_mem_insert_template(mem, &stmts[3]); is(vy_mem_older_lsn(mem, stmt), older, "vy_mem_rollback 1"); vy_mem_rollback_stmt(mem, older); is(vy_mem_older_lsn(mem, stmt), olderolder, "vy_mem_rollback 2"); /* Check version */ stmt = vy_mem_insert_template(mem, &stmts[4]); is(mem->version, 6, "vy_mem->version") vy_mem_commit_stmt(mem, stmt); is(mem->version, 6, "vy_mem->version") /* Clean up */ vy_mem_delete(mem); key_def_delete(key_def); fiber_gc(); footer(); check_plan(); } static void test_iterator_restore_after_insertion() { header(); plan(1); /* Create key_def */ uint32_t fields[] = { 0 }; uint32_t types[] = { FIELD_TYPE_UNSIGNED }; struct key_def *key_def = box_key_def_new(fields, types, 1); assert(key_def != NULL); /* Create format */ struct tuple_format *format = tuple_format_new(&vy_tuple_format_vtab, &key_def, 1, 0, NULL, 0, NULL); assert(format != NULL); tuple_format_ref(format); /* Create lsregion */ struct lsregion lsregion; struct slab_cache *slab_cache = cord_slab_cache(); lsregion_create(&lsregion, slab_cache->arena); struct tuple *select_key = vy_stmt_new_select(format, "", 0); uint64_t restore_on_value = 20; uint64_t restore_on_value_reverse = 60; char data[16]; char *end = data; end = mp_encode_array(end, 1); end = mp_encode_uint(end, restore_on_value); struct tuple *restore_on_key = vy_stmt_new_replace(format, data, end); vy_stmt_set_lsn(restore_on_key, 100); end = data; end = mp_encode_array(end, 1); end = mp_encode_uint(end, restore_on_value_reverse); struct tuple *restore_on_key_reverse = vy_stmt_new_replace(format, data, end); vy_stmt_set_lsn(restore_on_key_reverse, 100); bool wrong_output = false; int i_fail = 0; for (uint64_t i = 0; i < ((1000ULL * 3) << 2); i++) { uint64_t v = i; bool direct = !(v & 1); v >>= 1; bool has40_50 = v & 1; v >>= 1; bool has40_150 = v & 1; v >>= 1; const size_t possible_count = 9; uint64_t middle_value = possible_count / 2 * 10; /* 40 */ bool hasX_100[possible_count]; /* X = 0,10,20,30,40,50,60,70,80 */ bool addX_100[possible_count]; /* X = 0,10,20,30,40,50,60,70,80 */ bool add_smth = false; for (size_t j = 0; j < possible_count; j++) { uint64_t trinity = v % 3; v /= 3; hasX_100[j] = trinity == 1; addX_100[j] = trinity == 2; add_smth = add_smth || addX_100[j]; } if (!add_smth) continue; uint64_t expected_count = 0; uint64_t expected_values[possible_count]; int64_t expected_lsns[possible_count]; if (direct) { for (size_t j = 0; j < possible_count; j++) { if (hasX_100[j]) { expected_values[expected_count] = j * 10; expected_lsns[expected_count] = 100; expected_count++; } else if (j == possible_count / 2 && has40_50) { expected_values[expected_count] = middle_value; expected_lsns[expected_count] = 50; expected_count++; } } } else { for (size_t k = possible_count; k > 0; k--) { size_t j = k - 1; if (hasX_100[j]) { expected_values[expected_count] = j * 10; expected_lsns[expected_count] = 100; expected_count++; } else if (j == possible_count / 2 && has40_50) { expected_values[expected_count] = middle_value; expected_lsns[expected_count] = 50; expected_count++; } } } /* Create mem */ struct vy_mem *mem = create_test_mem(key_def); if (has40_50) { const struct vy_stmt_template temp = STMT_TEMPLATE(50, REPLACE, 40); vy_mem_insert_template(mem, &temp); } if (has40_150) { const struct vy_stmt_template temp = STMT_TEMPLATE(150, REPLACE, 40); vy_mem_insert_template(mem, &temp); } for (size_t j = 0; j < possible_count; j++) { if (hasX_100[j]) { const struct vy_stmt_template temp = STMT_TEMPLATE(100, REPLACE, j * 10); vy_mem_insert_template(mem, &temp); } } struct vy_mem_iterator itr; struct vy_mem_iterator_stat stats = {0, {0, 0}}; struct vy_read_view rv; rv.vlsn = 100; const struct vy_read_view *prv = &rv; vy_mem_iterator_open(&itr, &stats, mem, direct ? ITER_GE : ITER_LE, select_key, &prv); struct tuple *t; int rc = vy_mem_iterator_next_key(&itr, &t); assert(rc == 0); size_t j = 0; while (t != NULL) { if (j >= expected_count) { wrong_output = true; break; } uint32_t val = 42; tuple_field_u32(t, 0, &val); if (val != expected_values[j] || vy_stmt_lsn(t) != expected_lsns[j]) { wrong_output = true; break; } j++; if (direct && val >= middle_value) break; else if(!direct && val <= middle_value) break; int rc = vy_mem_iterator_next_key(&itr, &t); assert(rc == 0); } if (t == NULL && j != expected_count) wrong_output = true; if (wrong_output) { i_fail = i; break; } for (size_t j = 0; j < possible_count; j++) { if (addX_100[j]) { const struct vy_stmt_template temp = STMT_TEMPLATE(100, REPLACE, j * 10); vy_mem_insert_template(mem, &temp); } } expected_count = 0; if (direct) { for (size_t j = 0; j < possible_count; j++) { if (j * 10 <= restore_on_value) continue; if (hasX_100[j] || addX_100[j]) { expected_values[expected_count] = j * 10; expected_lsns[expected_count] = 100; expected_count++; } else if (j == possible_count / 2 && has40_50) { expected_values[expected_count] = middle_value; expected_lsns[expected_count] = 50; expected_count++; } } } else { for (size_t k = possible_count; k > 0; k--) { size_t j = k - 1; if (j * 10 >= restore_on_value_reverse) continue; if (hasX_100[j] || addX_100[j]) { expected_values[expected_count] = j * 10; expected_lsns[expected_count] = 100; expected_count++; } else if (j == possible_count / 2 && has40_50) { expected_values[expected_count] = middle_value; expected_lsns[expected_count] = 50; expected_count++; } } } if (direct) rc = vy_mem_iterator_restore(&itr, restore_on_key, &t); else rc = vy_mem_iterator_restore(&itr, restore_on_key_reverse, &t); j = 0; while (t != NULL) { if (j >= expected_count) { wrong_output = true; break; } uint32_t val = 42; tuple_field_u32(t, 0, &val); if (val != expected_values[j] || vy_stmt_lsn(t) != expected_lsns[j]) { wrong_output = true; break; } j++; int rc = vy_mem_iterator_next_key(&itr, &t); assert(rc == 0); } if (j != expected_count) wrong_output = true; if (wrong_output) { i_fail = i; break; } vy_mem_delete(mem); lsregion_gc(&lsregion, 2); } ok(!wrong_output, "check wrong_output %d", i_fail); /* Clean up */ tuple_unref(select_key); tuple_unref(restore_on_key); tuple_unref(restore_on_key_reverse); tuple_format_unref(format); lsregion_destroy(&lsregion); key_def_delete(key_def); fiber_gc(); check_plan(); footer(); } int main(int argc, char *argv[]) { vy_iterator_C_test_init(0); test_basic(); test_iterator_restore_after_insertion(); vy_iterator_C_test_finish(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/cbus.result0000664000000000000000000000100513306560010020326 0ustar rootroot *** main *** 1..6 start main fiber start main loop start worker *** Test single message *** flush event, counter = 1 ok 1 - 1 flush after test_single_msg *** Test several messages *** ok 2 - no flush during loop ok 3 - no flush during loop ok 4 - no flush during loop flush event, counter = 1 ok 5 - 1 flush after test_several_messages *** Test forced flush *** flush event, counter = 1 break main fiber and finish test ok 6 - flushed_cnt at the end of the test finish worker finish main loop *** main: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber_stress.cc0000664000000000000000000000143013306560010021135 0ustar rootroot#include "memory.h" #include "fiber.h" enum { ITERATIONS = 50000, FIBERS = 100 }; static int yield_f(va_list ap) { for (int i = 0; i < ITERATIONS; i++) { fiber_wakeup(fiber()); fiber_yield(); } return 0; } static int benchmark_f(va_list ap) { struct fiber *fibers[FIBERS]; for (int i = 0; i < FIBERS; i++) { fibers[i] = fiber_new_xc("yield-wielder", yield_f); fiber_wakeup(fibers[i]); } /** Wait for fibers to die. */ for (int i = 0; i < FIBERS; i++) { while (fibers[i]->fid > 0) fiber_sleep(0.001); } ev_break(loop(), EVBREAK_ALL); return 0; } int main() { memory_init(); fiber_init(fiber_cxx_invoke); struct fiber *benchmark = fiber_new_xc("benchmark", benchmark_f); fiber_wakeup(benchmark); ev_run(loop(), 0); fiber_free(); memory_free(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/uri.c0000664000000000000000000002172713306560010017112 0ustar rootroot#include "unit.h" #include #include int test(const char *s, const char *scheme, const char *login, const char *password, const char *host, const char *service, const char *path, const char *query, const char *fragment, int host_hint) { plan(19); struct uri uri; is(uri_parse(&uri, s), 0, "%s: parse", s); /* fprintf(stdout, #key ": %p %d %.*s\n", uri.key, (int) uri.key ## _len, (int) uri.key ## _len, uri.key); */ #define chk(key) do { \ ok((key && uri.key && strlen(key) == uri.key ## _len && \ memcmp(key, uri.key, uri.key ## _len) == 0) || \ (!key && !uri.key), "%s: " #key, s); } while (0); chk(scheme); chk(login); chk(password); chk(host); chk(service); chk(path); chk(query); chk(fragment); is(uri.host_hint, host_hint, "%s: host_hint", s); char str1[1024]; uri_format(str1, sizeof(str1), &uri, true); is(uri_parse(&uri, s), 0, "%s: parse", s); chk(scheme); chk(login); chk(password); chk(host); chk(service); chk(path); chk(query); chk(fragment); #undef chk return check_plan(); } int test_invalid() { plan(2); /* Invalid */ struct uri u; isnt(uri_parse(&u, ""), 0 , "empty is invalid"); isnt(uri_parse(&u, "://"), 0 , ":// is invalid"); return check_plan(); } int main(void) { plan(63); /* General */ test("host", NULL, NULL, NULL, "host", NULL, NULL, NULL, NULL, 0); test("host/", NULL, NULL, NULL, "host", NULL, "/", NULL, NULL, 0); test("host/path1/path2/path3", NULL, NULL, NULL, "host", NULL, "/path1/path2/path3", NULL, NULL, 0); test("host/path1/path2/path3?q1=v1&q2=v2#fragment", NULL, NULL, NULL, "host", NULL, "/path1/path2/path3", "q1=v1&q2=v2", "fragment", 0); test("host:service", NULL, NULL, NULL, "host", "service", NULL, NULL, NULL, 0); test("host:service/", NULL, NULL, NULL, "host", "service", "/", NULL, NULL, 0); test("host:service/path1/path2/path3", NULL, NULL, NULL, "host", "service", "/path1/path2/path3", NULL, NULL, 0); test("host:service/path1/path2/path3?q1=v1&q2=v2#fragment", NULL, NULL, NULL, "host", "service", "/path1/path2/path3", "q1=v1&q2=v2", "fragment", 0); test("login@host", NULL, "login", NULL, "host", NULL, NULL, NULL, NULL, 0); test("login@host/", NULL, "login", NULL, "host", NULL, "/", NULL, NULL, 0); test("login@host/path1/path2/path3", NULL, "login", NULL, "host", NULL, "/path1/path2/path3", NULL, NULL, 0); test("login@host/path1/path2/path3?q1=v1&q2=v2#fragment", NULL, "login", NULL, "host", NULL, "/path1/path2/path3", "q1=v1&q2=v2", "fragment", 0); test("login:password@host", NULL, "login", "password", "host", NULL, NULL, NULL, NULL, 0); test("login:@host", NULL, "login", "", "host", NULL, NULL, NULL, NULL, 0); test("login:password@host/", NULL, "login", "password", "host", NULL, "/", NULL, NULL, 0); test("login:password@host/path1/path2/path3", NULL, "login", "password", "host", NULL, "/path1/path2/path3", NULL, NULL, 0); test("login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment", NULL, "login", "password", "host", NULL, "/path1/path2/path3", "q1=v1&q2=v2", "fragment", 0); test("login:password@host:service", NULL, "login", "password", "host", "service", NULL, NULL, NULL, 0); test("login:password@host:service/", NULL, "login", "password", "host", "service", "/", NULL, NULL, 0); test("login:password@host:service/path1/path2/path3", NULL, "login", "password", "host", "service", "/path1/path2/path3", NULL, NULL, 0); test("login:password@host:service/path1/path2/path3?q1=v1&q2=v2" "#fragment", NULL, "login", "password", "host", "service", "/path1/path2/path3", "q1=v1&q2=v2", "fragment", 0); test("scheme://login:password@host:service", "scheme", "login", "password", "host", "service", NULL, NULL, NULL, 0); test("scheme://login:password@host:service/", "scheme", "login", "password", "host", "service", "/", NULL, NULL, 0); test("scheme://login:password@host:service/path1/path2/path3", "scheme", "login", "password", "host", "service", "/path1/path2/path3", NULL, NULL, 0); test("scheme://login:password@host:service/path1/path2/path3?" "q1=v1&q2=v2#fragment", "scheme", "login", "password", "host", "service", "/path1/path2/path3", "q1=v1&q2=v2", "fragment", 0); test("host/path", NULL, NULL, NULL, "host", NULL, "/path", NULL, NULL, 0); test("host//", NULL, NULL, NULL, "host", NULL, "//", NULL, NULL, 0); test("host//path", NULL, NULL, NULL, "host", NULL, "//path", NULL, NULL, 0); test("host/;abc?q", NULL, NULL, NULL, "host", NULL, "/;abc", "q", NULL, 0); test("scheme://login:password@host:service/@path1/:path2?" "q1=v1&q2=v2#fragment", "scheme", "login", "password", "host", "service", "/@path1/:path2", "q1=v1&q2=v2", "fragment", 0); test("host/~user", NULL, NULL, NULL, "host", NULL, "/~user", NULL, NULL, 0); /* Host */ test("try.tarantool.org", NULL, NULL, NULL, "try.tarantool.org", NULL, NULL, NULL, NULL, 0); test("try.tarantool.org", NULL, NULL, NULL, "try.tarantool.org", NULL, NULL, NULL, NULL, 0); test("www.llanfairpwllgwyngyllgogerychwyrndrobwyll-" "llantysiliogogogoch.com", NULL, NULL, NULL, "www.llanfairpwllgwyngyllgogerychwyrndrobwyll-" "llantysiliogogogoch.com", NULL, NULL, NULL, NULL, 0); /* IPv4 / IPv6 addreses */ test("0.0.0.0", NULL, NULL, NULL, "0.0.0.0", NULL, NULL, NULL, NULL, 1); test("127.0.0.1", NULL, NULL, NULL, "127.0.0.1", NULL, NULL, NULL, NULL, 1); test("127.0.0.1:3313", NULL, NULL, NULL, "127.0.0.1", "3313", NULL, NULL, NULL, 1); test("scheme://login:password@127.0.0.1:3313", "scheme", "login", "password", "127.0.0.1", "3313", NULL, NULL, NULL, 1); test("[2001::11a3:09d7::1]", NULL, NULL, NULL, "2001::11a3:09d7::1", NULL, NULL, NULL, NULL, 2); test("scheme://login:password@[2001::11a3:09d7::1]:3313", "scheme", "login", "password", "2001::11a3:09d7::1", "3313", NULL, NULL, NULL, 2); test("scheme://[2001:0db8:11a3:09d7::1]", "scheme", NULL, NULL, "2001:0db8:11a3:09d7::1", NULL, NULL, NULL, NULL, 2); test("[::ffff:11.2.3.4]", NULL, NULL, NULL, "::ffff:11.2.3.4", NULL, NULL, NULL, NULL, 2); test("scheme://login:password@[::ffff:11.2.3.4]:3313", "scheme", "login", "password", "::ffff:11.2.3.4", "3313", NULL, NULL, NULL, 2); /* Port */ test("1", NULL, NULL, NULL, NULL, "1", NULL, NULL, NULL, 0); test("10", NULL, NULL, NULL, NULL, "10", NULL, NULL, NULL, 0); test("331", NULL, NULL, NULL, NULL, "331", NULL, NULL, NULL,0); test("3313", NULL, NULL, NULL, NULL, "3313", NULL, NULL, NULL, 0); /* Unix */ test("/", NULL, NULL, NULL, "unix/", "/", NULL, NULL, NULL, 3); test("/path1/path2/path3", NULL, NULL, NULL, "unix/", "/path1/path2/path3", NULL, NULL, NULL, 3); test("login:password@/path1/path2/path3", NULL, "login", "password", "unix/", "/path1/path2/path3", NULL, NULL, NULL, 3); test("unix/:/path1/path2/path3", NULL, NULL, NULL, "unix/", "/path1/path2/path3", NULL, NULL, NULL, 3); test("unix/:/path1/path2/path3:", NULL, NULL, NULL, "unix/", "/path1/path2/path3", NULL, NULL, NULL, 3); test("unix/:/path1/path2/path3:/", NULL, NULL, NULL, "unix/", "/path1/path2/path3", "/", NULL, NULL, 3); test("unix/:/path1/path2/path3?q1=v1&q2=v2#fragment", NULL, NULL, NULL, "unix/", "/path1/path2/path3", NULL, "q1=v1&q2=v2", "fragment", 3); test("unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment", NULL, NULL, NULL, "unix/", "/path1/path2/path3", "/p1/p2", "q1=v1&q2=v2", "fragment", 3); /* fixed grammar #2933 */ test("login:password@unix/:/path1/path2/path3", NULL, "login", "password", "unix/", "/path1/path2/path3", NULL, NULL, NULL, 3); test("login:password@unix/:/path1/path2/path3:", NULL, "login", "password", "unix/", "/path1/path2/path3", NULL, NULL, NULL, 3); test("scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3", "scheme", "login", "password", "unix/", "/tmp/unix.sock", "/path1/path2/path3", NULL, NULL, 3); test("unix/:./relative/path.sock:/test", NULL, NULL, NULL, "unix/", "./relative/path.sock", "/test", NULL, NULL, 3); test("scheme://unix/:./relative/path.sock:/test", "scheme", NULL, NULL, "unix/", "./relative/path.sock", "/test", NULL, NULL, 3); /* Web */ test("http://tarantool.org/dist/master/debian/pool/main/t/tarantool/" "tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz", "http", NULL, NULL, "tarantool.org", NULL, "/dist/master/debian/pool/main/t/tarantool/" "tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz", NULL, NULL, 0); test("https://www.google.com/search?" "safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool" "&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl", "https", NULL, NULL, "www.google.com", NULL, "/search", "safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool" "&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl", NULL, 0); test_invalid(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/heap.result0000664000000000000000000000151613306560010020316 0ustar rootroot *** test_insert_1_to_3 *** *** test_insert_1_to_3: done *** *** test_insert_3_to_1 *** *** test_insert_3_to_1: done *** *** test_insert_50_to_150_mod_100 *** *** test_insert_50_to_150_mod_100: done *** *** test_insert_many_random *** *** test_insert_many_random: done *** *** test_insert_10_to_1_pop *** *** test_insert_10_to_1_pop: done *** *** test_insert_many_pop_many_random *** *** test_insert_many_pop_many_random: done *** *** test_insert_pop_workload *** *** test_insert_pop_workload: done *** *** test_pop_last *** *** test_pop_last: done *** *** test_insert_update_workload *** *** test_insert_update_workload: done *** *** test_random_delete_workload *** *** test_random_delete_workload: done *** *** test_delete_last_node *** *** test_delete_last_node: done *** *** test_heapify *** *** test_heapify: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/coll.cpp0000664000000000000000000001131613306565107017611 0ustar rootroot#include "box/coll.h" #include #include #include #include #include #include #include #include #include #include #include "third_party/PMurHash.h" using namespace std; enum { HASH_SEED = 13 }; struct comp { struct coll *coll; comp(struct coll *coll_) : coll(coll_) {} bool operator()(const char *a, const char *b) const { int cmp = coll->cmp(a, strlen(a), b, strlen(b), coll); return cmp < 0; } }; void test_sort_strings(vector &strings, struct coll *coll) { sort(strings.begin(), strings.end(), comp(coll)); cout << strings[0] << endl; for (size_t i = 1; i < strings.size(); i++) { int cmp = coll->cmp(strings[i], strlen(strings[i]), strings[i - 1], strlen(strings[i - 1]), coll); cout << strings[i] << (cmp < 0 ? " LESS" : cmp > 0 ? " GREATER " : " EQUAL") << endl; } }; void manual_test() { cout << "\t*** " << __func__ << " ***" << endl; vector strings; struct coll_def def; memset(&def, 0, sizeof(def)); def.locale = "ru_RU"; def.locale_len = strlen(def.locale); def.type = COLL_TYPE_ICU; def.name = "test"; def.name_len = strlen(def.name); struct coll *coll; cout << " -- default ru_RU -- " << endl; coll = coll_new(&def); assert(coll != NULL); strings = {"Б", "бб", "е", "ЕЕЕЕ", "ё", "Ё", "и", "И", "123", "45" }; test_sort_strings(strings, coll); coll_delete(coll); cout << " -- --||-- + upper first -- " << endl; def.icu.case_first = COLL_ICU_CF_UPPER_FIRST; coll = coll_new(&def); assert(coll != NULL); strings = {"Б", "бб", "е", "ЕЕЕЕ", "ё", "Ё", "и", "И", "123", "45" }; test_sort_strings(strings, coll); coll_delete(coll); cout << " -- --||-- + lower first -- " << endl; def.icu.case_first = COLL_ICU_CF_LOWER_FIRST; coll = coll_new(&def); assert(coll != NULL); strings = {"Б", "бб", "е", "ЕЕЕЕ", "ё", "Ё", "и", "И", "123", "45" }; test_sort_strings(strings, coll); coll_delete(coll); cout << " -- --||-- + secondary strength + numeric -- " << endl; def.icu.strength = COLL_ICU_STRENGTH_SECONDARY; def.icu.numeric_collation = COLL_ICU_ON; coll = coll_new(&def); assert(coll != NULL); strings = {"Б", "бб", "е", "ЕЕЕЕ", "ё", "Ё", "и", "И", "123", "45" }; test_sort_strings(strings, coll); coll_delete(coll); cout << " -- --||-- + case level -- " << endl; def.icu.case_level = COLL_ICU_ON; coll = coll_new(&def); assert(coll != NULL); strings = {"Б", "бб", "е", "ЕЕЕЕ", "ё", "Ё", "и", "И", "123", "45" }; test_sort_strings(strings, coll); coll_delete(coll); cout << " -- en_EN -- " << endl; def.locale = "en_EN-EN"; def.locale_len = strlen(def.locale); coll = coll_new(&def); assert(coll != NULL); strings = {"aa", "bb", "cc", "ch", "dd", "gg", "hh", "ii" }; test_sort_strings(strings, coll); coll_delete(coll); cout << " -- cs_CZ -- " << endl; def.locale = "cs_CZ"; def.locale_len = strlen(def.locale); coll = coll_new(&def); assert(coll != NULL); strings = {"aa", "bb", "cc", "ch", "dd", "gg", "hh", "ii" }; test_sort_strings(strings, coll); coll_delete(coll); cout << "\t*** " << __func__ << ": done ***" << endl; } unsigned calc_hash(const char *str, struct coll *coll) { size_t str_len = strlen(str); uint32_t h = HASH_SEED; uint32_t carry = 0; uint32_t actual_len = coll->hash(str, str_len, &h, &carry, coll); return PMurHash32_Result(h, carry, actual_len); } void hash_test() { cout << "\t*** " << __func__ << " ***" << endl; struct coll_def def; memset(&def, 0, sizeof(def)); def.locale = "ru_RU"; def.locale_len = strlen(def.locale); def.type = COLL_TYPE_ICU; def.name = "test"; def.name_len = strlen(def.name); struct coll *coll; /* Case sensitive */ coll = coll_new(&def); assert(coll != NULL); cout << "Case sensitive" << endl; cout << (calc_hash("ае", coll) != calc_hash("аё", coll) ? "OK" : "Fail") << endl; cout << (calc_hash("ае", coll) != calc_hash("аЕ", coll) ? "OK" : "Fail") << endl; cout << (calc_hash("аЕ", coll) != calc_hash("аё", coll) ? "OK" : "Fail") << endl; coll_delete(coll); /* Case insensitive */ def.icu.strength = COLL_ICU_STRENGTH_SECONDARY; coll = coll_new(&def); assert(coll != NULL); cout << "Case insensitive" << endl; cout << (calc_hash("ае", coll) != calc_hash("аё", coll) ? "OK" : "Fail") << endl; cout << (calc_hash("ае", coll) == calc_hash("аЕ", coll) ? "OK" : "Fail") << endl; cout << (calc_hash("аЕ", coll) != calc_hash("аё", coll) ? "OK" : "Fail") << endl; coll_delete(coll); cout << "\t*** " << __func__ << ": done ***" << endl; } int main(int, const char**) { memory_init(); fiber_init(fiber_c_invoke); manual_test(); hash_test(); fiber_free(); memory_free(); }tarantool_1.9.1.26.g63eb81e3c/test/unit/mhash.result0000664000000000000000000000021613306560010020475 0ustar rootroot *** mhash_int32_id_test *** *** mhash_int32_id_test: done *** *** mhash_int32_collision_test *** *** mhash_int32_collision_test: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/rope_stress.result0000664000000000000000000000021413306560010021743 0ustar rootroot *** test_rope_stress_small *** *** test_rope_stress_small: done *** *** test_rope_stress_large *** *** test_rope_stress_large: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/column_mask.result0000664000000000000000000000152213306560010021706 0ustar rootroot *** main *** 1..27 ok 1 - check result length ok 2 - tuple update is correct ok 3 - column_mask is correct ok 4 - check result length ok 5 - tuple update is correct ok 6 - column_mask is correct ok 7 - check result length ok 8 - tuple update is correct ok 9 - column_mask is correct ok 10 - check result length ok 11 - tuple update is correct ok 12 - column_mask is correct ok 13 - check result length ok 14 - tuple update is correct ok 15 - column_mask is correct ok 16 - check result length ok 17 - tuple update is correct ok 18 - column_mask is correct ok 19 - check result length ok 20 - tuple update is correct ok 21 - column_mask is correct ok 22 - check result length ok 23 - tuple update is correct ok 24 - column_mask is correct ok 25 - check result length ok 26 - tuple update is correct ok 27 - column_mask is correct *** main: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber_stress.result0000664000000000000000000000000013306560010022056 0ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/unit/say.result0000664000000000000000000000243013306560010020171 0ustar rootroot1..33 # type: file # next: ok 1 - # type: file # next: /dev/null ok 2 - /dev/null # type: pipe # next: ok 3 - | # type: pipe # next: /usr/bin/cronolog ok 4 - |/usr/bin/cronolog # type: file # next: ok 5 - file: # type: file # next: instance.log ok 6 - file:instance.log # type: pipe # next: ok 7 - pipe: # type: pipe # next: gzip > instance.log.gz ok 8 - pipe:gzip > instance.log.gz # type: syslog # next: ok 9 - syslog: # type: syslog # next: identity= ok 10 - syslog:identity= # next: unknown: ok 11 - unknown: # next: unknown:example.org ok 12 - unknown:example.org # facility: 24 ok 13 - # identity: tarantool # facility: 24 ok 14 - identity=tarantool # facility: 1 ok 15 - facility=user # identity: xtarantoolx # facility: 17 ok 16 - identity=xtarantoolx,facility=local1 # identity: xtarantoolx ok 17 - identity=xtarantoolx,facility=kern # identity: xtarantoolx # facility: 8 ok 18 - identity=xtarantoolx,facility=uucp ok 19 - identity=xtarantoolx,facility=foo # identity: bar # facility: 10 ok 20 - facility=authpriv,identity=bar ok 21 - invalid= ok 22 - facility=local1,facility=local2 ok 23 - identity=foo,identity=bar ok 24 - plain ok 25 - json ok 26 - custom ok 27 - freopen ok 28 - parsed identity ok 29 - parsed facility ok 30 - ftell ok 31 - log_say ok 32 - fseek ok 33 - syslog line tarantool_1.9.1.26.g63eb81e3c/test/unit/heap.c0000664000000000000000000003017313306560010017223 0ustar rootroot#include #include #include #include #include #include "trivia/util.h" #include "unit.h" #define HEAP_FORWARD_DECLARATION #include "salad/heap.h" #undef HEAP_FORWARD_DECLARATION const uint32_t TEST_CASE_SIZE = 1000; struct test_type { uint32_t val1; uint32_t val2; char c; struct heap_node node; }; /* If set, order by test_type->val2, otherwise by test_type->val1. */ static bool order_by_val2; int test_type_less(const heap_t *heap, const struct heap_node *a, const struct heap_node *b) { const struct test_type *left = (struct test_type *)((char *)a - offsetof(struct test_type, node)); const struct test_type *right = (struct test_type *)((char *)b - offsetof(struct test_type, node)); if (order_by_val2) return left->val2 < right->val2; return left->val1 < right->val1; } #define HEAP_NAME test_heap #define HEAP_LESS(h, a, b) test_type_less(h, a, b) #include "salad/heap.h" void free_all_nodes(heap_t *p_heap) { struct test_type *value; for (heap_off_t i = 0; i < p_heap->size; ++i) { value = (struct test_type *) ((char *)p_heap->harr[i] - offsetof(struct test_type, node)); free(value); } } static void test_insert_1_to_3() { header(); struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); for (uint32_t i = 0; i < 4; ++i) { value = (struct test_type *)malloc(sizeof(struct test_type)); value->val1 = i; test_heap_insert(&heap, &value->node); root_value = container_of(test_heap_top(&heap), struct test_type, node); if (root_value->val1 != 0) { fail("check that min.val1 is incorrect", "root_value->val1 != 0"); } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } } free_all_nodes(&heap); test_heap_destroy(&heap); footer(); } static void test_insert_3_to_1() { header(); struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); for (uint32_t i = 3; i > 0; --i) { value = (struct test_type *)malloc(sizeof(struct test_type)); value->val1 = i; test_heap_insert(&heap, &value->node); root_value = container_of(test_heap_top(&heap), struct test_type, node); if (root_value->val1 != i) { fail("check that min.val1 is incorrect", "root_value->val1 != i"); } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } } free_all_nodes(&heap); test_heap_destroy(&heap); footer(); } static void test_insert_50_to_150_mod_100() { header(); struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); for (uint32_t i = 50; i < 150; ++i) { value = (struct test_type *)malloc(sizeof(struct test_type)); value->val1 = i % 100; test_heap_insert(&heap, &value->node); root_value = container_of(test_heap_top(&heap), struct test_type, node); if (i < 100 && root_value->val1 != 50) { fail("min.val1 is incorrect", "i < 100 && root_value->val1 != 50"); } if (i >= 100 && root_value->val1 != 0) { fail("min.val1 is incorrect", "i >= 100 && root_value->val1 != 0"); } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } } for (int i = 0; i < 100; ++i) { root_value = container_of(test_heap_top(&heap), struct test_type, node); test_heap_pop(&heap); free(root_value); } test_heap_destroy(&heap); footer(); } static void test_insert_many_random() { header(); uint32_t ans = UINT_MAX; struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); for (uint32_t i = 0; i < TEST_CASE_SIZE; ++i) { value = (struct test_type *)malloc(sizeof(struct test_type)); value->val1 = rand(); ans = (value->val1 < ans ? value->val1 : ans); test_heap_insert(&heap, &value->node); root_value = container_of(test_heap_top(&heap), struct test_type, node); if (root_value->val1 != ans) { fail("min.val1 is incorrect", "root_value->val1 != ans"); } if (heap.size != i + 1) { fail("check that size is correct failed", "root->size != i + 2"); } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } } free_all_nodes(&heap); test_heap_destroy(&heap); footer(); } static void test_insert_10_to_1_pop() { header(); struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); for (uint32_t i = 10; i > 0; --i) { value = (struct test_type *)malloc(sizeof(struct test_type)); value->val1 = i; test_heap_insert(&heap, &value->node); root_value = container_of(test_heap_top(&heap), struct test_type, node); if (root_value->val1 != i) { fail("check that min.val1 is correct failed", "root_value->val1 != i"); } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } } for (uint32_t i = 1; i <= 10; ++i) { root_value = container_of(test_heap_top(&heap), struct test_type, node); test_heap_pop(&heap); if (root_value->val1 != i) { fail("check that min.val1 is correct failed", "root_value->val1 != i"); } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } free(root_value); } test_heap_destroy(&heap); footer(); } int uint32_compare(const void *a, const void *b) { const uint32_t *ua = (const uint32_t *)a; const uint32_t *ub = (const uint32_t *)b; if (*ua < *ub) { return -1; } else if (*ua > *ub) { return 1; } return 0; } static void test_insert_many_pop_many_random() { header(); uint32_t ans = UINT_MAX; struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); uint32_t keys_it = 0; uint32_t *keys = (uint32_t *)malloc(sizeof(uint32_t) * TEST_CASE_SIZE); if (keys == NULL) { fail("keys == NULL", "fail to alloc memory for keys array"); } for (uint32_t i = 0; i < TEST_CASE_SIZE; ++i) { value = (struct test_type *)malloc(sizeof(struct test_type)); keys[keys_it++] = value->val1 = rand(); ans = (value->val1 < ans ? value->val1 : ans); test_heap_insert(&heap, &value->node); root_value = container_of(test_heap_top(&heap), struct test_type, node); if (root_value->val1 != ans) { fail("check that min.val1 is correct failed", "root_value->val1 != ans"); } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } if (heap.size != i + 1) { fail("check that size is correct", "heap.size != i + 1"); } } qsort(keys, TEST_CASE_SIZE, sizeof(uint32_t), uint32_compare); bool f = true; for (uint32_t i = 0; i + 1 < TEST_CASE_SIZE; ++i) { f = f && (keys[i] < keys[i + 1]); } if(!f) { fail("check that keys is sorted failed", "!f"); } uint32_t full_size = heap.size; for (uint32_t i = 0; i < TEST_CASE_SIZE; ++i) { root_value = container_of(test_heap_top(&heap), struct test_type, node); test_heap_pop(&heap); if (root_value->val1 != keys[i]) { fail("check that min.val1 is correct failed", "root_value->val1 != keys[i]"); } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } if (heap.size != full_size - 1 - i) { fail("check that size is correct", "heap_test_size(root) != full_size - 1 - i"); } free(root_value); } test_heap_destroy(&heap); free(keys); footer(); } static void test_insert_pop_workload() { header(); uint32_t ans = UINT_MAX; struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); uint32_t current_size = 0; for(uint32_t i = 0; i < TEST_CASE_SIZE; ++i) { if (heap.size == 0 || rand() % 5) { current_size++; value = (struct test_type *) malloc(sizeof(struct test_type)); value->val1 = rand(); test_heap_insert(&heap, &value->node); } else { current_size--; root_value = container_of(test_heap_top(&heap), struct test_type, node); test_heap_pop(&heap); free(root_value); } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } if (heap.size != current_size) { fail("check that size is correct", "heap.size != current_size"); } } free_all_nodes(&heap); test_heap_destroy(&heap); footer(); } static void test_pop_last() { header(); uint32_t ans = UINT_MAX; struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); value = (struct test_type *)malloc(sizeof(struct test_type)); test_heap_insert(&heap, &value->node); test_heap_pop(&heap); if (heap.size != 0) { fail("test delete last node failed", "heap.size != 0"); } test_heap_destroy(&heap); free(value); footer(); } static void test_insert_update_workload() { header(); uint32_t nodes_it = 0; uint64_t current_size = 0; uint32_t ans = UINT_MAX; struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); struct test_type **nodes = (struct test_type **) malloc(sizeof(struct test_type *) * TEST_CASE_SIZE); struct heap_node *test_node = NULL, *root = NULL; for(uint32_t i = 0; i < TEST_CASE_SIZE; ++i) { if (nodes_it == current_size || heap.size == 0 || rand() % 5) { value = (struct test_type *) malloc(sizeof(struct test_type)); value->val1 = rand(); nodes[current_size++] = value; test_heap_insert(&heap, &value->node); } else { nodes[nodes_it]->val1 = rand(); test_heap_update(&heap, &(nodes[nodes_it]->node)); nodes_it++; } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } if (heap.size != current_size) { fail("check that size is correct", "heap_test_size(root) != current_size"); } } free_all_nodes(&heap); test_heap_destroy(&heap); free(nodes); footer(); } static void test_random_delete_workload() { header(); uint32_t nodes_it = 0; uint64_t current_size = 0; uint32_t ans = UINT_MAX; struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); struct test_type **nodes = (struct test_type **) malloc(sizeof(struct test_type *) * TEST_CASE_SIZE); struct heap_node *test_node = NULL, *root = NULL; for(uint32_t i = 0; i < TEST_CASE_SIZE; ++i) { if (nodes_it == current_size || heap.size == 0 || rand() % 5) { value = (struct test_type *) malloc(sizeof(struct test_type)); value->val1 = rand(); nodes[current_size++] = value; test_heap_insert(&heap, &value->node); } else { test_heap_delete(&heap, &(nodes[nodes_it]->node)); current_size--; nodes_it++; } if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } if (heap.size != current_size) { fail("check that size is correct", "heap.size != current_size"); } } free_all_nodes(&heap); test_heap_destroy(&heap); free(nodes); footer(); } static void test_delete_last_node() { header(); struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); for (int i = 0; i < 4; ++i) { value = (struct test_type *) malloc(sizeof(struct test_type)); value->val1 = 0; test_heap_insert(&heap, &value->node); } test_heap_delete(&heap, &value->node); if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } test_heap_destroy(&heap); footer(); } static void test_heapify() { header(); heap_t heap; test_heap_create(&heap); for (uint32_t i = 0; i < TEST_CASE_SIZE; ++i) { struct test_type *value = malloc(sizeof(struct test_type)); value->val1 = rand(); value->val2 = rand(); test_heap_insert(&heap, &value->node); } order_by_val2 = true; test_heap_update_all(&heap); if (test_heap_check(&heap)) { fail("check heap invariants failed", "test_heap_check(&heap)"); } order_by_val2 = false; free_all_nodes(&heap); test_heap_destroy(&heap); footer(); } int main(int argc, const char** argv) { srand(179); test_insert_1_to_3(); test_insert_3_to_1(); test_insert_50_to_150_mod_100(); test_insert_many_random(); test_insert_10_to_1_pop(); test_insert_many_pop_many_random(); test_insert_pop_workload(); test_pop_last(); test_insert_update_workload(); test_random_delete_workload(); test_delete_last_node(); test_heapify(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_mem.result0000664000000000000000000000103013306560010020664 0ustar rootroot *** test_basic *** 1..12 ok 1 - mem->min_lsn on empty mem ok 2 - mem->max_lsn on empty mem ok 3 - mem->min_lsn after prepare ok 4 - mem->max_lsn after prepare ok 5 - mem->min_lsn after commit ok 6 - mem->max_lsn after commit ok 7 - vy_mem_older_lsn 1 ok 8 - vy_mem_older_lsn 2 ok 9 - vy_mem_rollback 1 ok 10 - vy_mem_rollback 2 ok 11 - vy_mem->version ok 12 - vy_mem->version *** test_basic: done *** *** test_iterator_restore_after_insertion *** 1..1 ok 1 - check wrong_output 0 *** test_iterator_restore_after_insertion: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/bps_tree_iterator.result0000664000000000000000000000233413306560010023114 0ustar rootroot *** iterator_check *** Test tree size: 50000 --> (0,0) (0,1) (0,2) (0,3) (0,4) (2,0) (2,1) <-- (19998,4) (19998,3) (19998,2) (19998,1) (19998,0) (19996,4) (19996,3) Key -1, empty range [ptr, ptr): <-> Key 0, not empty range [ptr, ptr): (0,0) (0,1) (0,2) (0,3) (0,4) <-> (0,4) (0,3) (0,2) (0,1) (0,0) Key 10, not empty range [ptr, ptr): (10,0) (10,1) (10,2) (10,3) (10,4) <-> (10,4) (10,3) (10,2) (10,1) (10,0) Key 15, empty range [ptr, ptr): <-> Key 19998, not empty range [ptr, eof): (19998,0) (19998,1) (19998,2) (19998,3) (19998,4) <-> (19998,4) (19998,3) (19998,2) (19998,1) (19998,0) Key 20000, empty range [eof, eof): <-> Key -1, range [ptr, ptr): <-> Key 0, range [ptr, ptr): (0,0) (0,1) (0,2) (0,3) (0,4) <-> (0,4) (0,3) (0,2) (0,1) (0,0) Key 10, range [ptr, ptr): (10,0) (10,1) (10,2) (10,3) (10,4) <-> (10,4) (10,3) (10,2) (10,1) (10,0) Key 15, range [ptr, ptr): <-> Key 19998, range [ptr, eof): (19998,0) (19998,1) (19998,2) (19998,3) (19998,4) <-> (19998,4) (19998,3) (19998,2) (19998,1) (19998,0) Key 20000, range [eof, eof): <-> *** iterator_check: done *** *** iterator_invalidate_check *** *** iterator_invalidate_check: done *** *** iterator_freeze_check *** *** iterator_freeze_check: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/cbus_stress.result0000664000000000000000000000004213306560010021731 0ustar rootroot *** main *** *** main: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/queue.result0000664000000000000000000000073213306560010020524 0ustar rootroot *** test0 *** Initialized: STAILQ_INIT: STAILQ_REVERSE: *** test0: done *** *** test1 *** STAILQ_INIT: 1 STAILQ_REVERSE: 1 *** test1: done *** *** test2 *** STAILQ_INIT: 1 2 STAILQ_REVERSE: 2 1 *** test2: done *** *** test3 *** STAILQ_INIT: 1 2 3 STAILQ_REVERSE: 3 2 1 *** test3: done *** *** test_splice *** q1: q2: q1: q2: STAILQ_INIT: 1 2 3 q1: q2: 1 2 3 q1: 1 2 3 q2: q1: 1 q2: 2 3 q1: 1 3 q2: 2 q1: 1 3 2 q2: *** test_splice: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/queue.c0000664000000000000000000001142513306560010017431 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "third_party/queue.h" #include "unit.h" #include #include struct elem { STAILQ_ENTRY(elem) entry; int val; }; STAILQ_HEAD(elem_queue, elem); const char * queue2str(struct elem_queue *queue) { static char buf[1024]; buf[0] = '\0'; struct elem *elem; int n = 0; STAILQ_FOREACH(elem, queue, entry) { n += snprintf(buf + n, sizeof(buf) - n - 1, "%d ", elem->val); } return buf; } /** Test a queue with 0 elements. */ void test0() { header(); struct elem_queue queue = STAILQ_HEAD_INITIALIZER(queue); printf("Initialized: %s\n", queue2str(&queue)); STAILQ_INIT(&queue); printf("STAILQ_INIT: %s\n", queue2str(&queue)); STAILQ_REVERSE(&queue, elem, entry); printf("STAILQ_REVERSE: %s\n", queue2str(&queue)); footer(); } /** Test a queue with 1 element. */ void test1() { header(); struct elem el1; struct elem_queue queue = STAILQ_HEAD_INITIALIZER(queue); el1.val = 1; STAILQ_INSERT_TAIL(&queue, &el1, entry); printf("STAILQ_INIT: %s\n", queue2str(&queue)); STAILQ_REVERSE(&queue, elem, entry); printf("STAILQ_REVERSE: %s\n", queue2str(&queue)); footer(); } void test2() { header(); struct elem el1, el2; struct elem_queue queue = STAILQ_HEAD_INITIALIZER(queue); el1.val = 1; el2.val = 2; STAILQ_INSERT_TAIL(&queue, &el1, entry); STAILQ_INSERT_TAIL(&queue, &el2, entry); printf("STAILQ_INIT: %s\n", queue2str(&queue)); STAILQ_REVERSE(&queue, elem, entry); printf("STAILQ_REVERSE: %s\n", queue2str(&queue)); footer(); } void test3() { header(); struct elem el1, el2, el3; struct elem_queue queue = STAILQ_HEAD_INITIALIZER(queue); el1.val = 1; el2.val = 2; el3.val = 3; STAILQ_INSERT_TAIL(&queue, &el1, entry); STAILQ_INSERT_TAIL(&queue, &el2, entry); STAILQ_INSERT_TAIL(&queue, &el3, entry); printf("STAILQ_INIT: %s\n", queue2str(&queue)); STAILQ_REVERSE(&queue, elem, entry); printf("STAILQ_REVERSE: %s\n", queue2str(&queue)); footer(); } void test_splice() { header(); struct elem el1, el2, el3; struct elem_queue queue1 = STAILQ_HEAD_INITIALIZER(queue1); struct elem_queue queue2 = STAILQ_HEAD_INITIALIZER(queue2); STAILQ_SPLICE(&queue1, STAILQ_FIRST(&queue1), entry, &queue2); printf("q1: %s\n", queue2str(&queue1)); printf("q2: %s\n", queue2str(&queue2)); STAILQ_SPLICE(&queue2, STAILQ_FIRST(&queue2), entry, &queue1); printf("q1: %s\n", queue2str(&queue1)); printf("q2: %s\n", queue2str(&queue2)); el1.val = 1; el2.val = 2; el3.val = 3; STAILQ_INSERT_TAIL(&queue1, &el1, entry); STAILQ_INSERT_TAIL(&queue1, &el2, entry); STAILQ_INSERT_TAIL(&queue1, &el3, entry); printf("STAILQ_INIT: %s\n", queue2str(&queue1)); STAILQ_SPLICE(&queue1, STAILQ_FIRST(&queue1), entry, &queue2); printf("q1: %s\n", queue2str(&queue1)); printf("q2: %s\n", queue2str(&queue2)); STAILQ_SPLICE(&queue2, STAILQ_FIRST(&queue2), entry, &queue1); printf("q1: %s\n", queue2str(&queue1)); printf("q2: %s\n", queue2str(&queue2)); STAILQ_SPLICE(&queue1, STAILQ_NEXT(STAILQ_FIRST(&queue1), entry), entry, &queue2); printf("q1: %s\n", queue2str(&queue1)); printf("q2: %s\n", queue2str(&queue2)); STAILQ_SPLICE(&queue2, STAILQ_NEXT(STAILQ_FIRST(&queue2), entry), entry, &queue1); printf("q1: %s\n", queue2str(&queue1)); printf("q2: %s\n", queue2str(&queue2)); STAILQ_SPLICE(&queue2, STAILQ_FIRST(&queue2), entry, &queue1); printf("q1: %s\n", queue2str(&queue1)); printf("q2: %s\n", queue2str(&queue2)); footer(); } int main() { test0(); test1(); test2(); test3(); test_splice(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/reflection_c.c0000664000000000000000000000130313306560010020733 0ustar rootroot#include "reflection.h" #include "unit.h" static struct type_info type_Object = { .parent = NULL, .name = "Object", .methods = NULL, }; static struct type_info type_Database = { .parent = &type_Object, .name = "Database", .methods = NULL, }; static struct type_info type_Tarantool = { .parent = &type_Database, .name = "Tarantool", .methods = NULL }; int main() { plan(4); /* inheritance */ ok(type_assignable(&type_Object, &type_Tarantool), "assignable"); ok(type_assignable(&type_Database, &type_Tarantool), "assignable"); ok(type_assignable(&type_Tarantool, &type_Tarantool), "assignable"); ok(!type_assignable(&type_Tarantool, &type_Database), "assignable"); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/bitset_iterator.c0000664000000000000000000002446613306560010021521 0ustar rootroot#include #include #include #include #include #include #include "unit.h" enum { NUMS_SIZE = 1 << 16 }; static size_t NUMS[NUMS_SIZE]; static struct bitset ** bitsets_create(size_t count) { struct bitset **bitsets = malloc(count * sizeof(*bitsets)); fail_if(bitsets == NULL); for (size_t i = 0; i < count; i++) { bitsets[i] = malloc(sizeof(struct bitset)); fail_if(bitsets[i] == NULL); bitset_create(bitsets[i], realloc); } return bitsets; } static void bitsets_destroy(struct bitset **bitsets, size_t count) { for (size_t i = 0; i < count; i++) { bitset_destroy(bitsets[i]); free(bitsets[i]); } free(bitsets); } static void nums_fill(size_t *nums, size_t size) { const size_t STEP_MAX = 7; nums[0] = rand() % STEP_MAX; for (size_t i = 1; i < size; i++) { nums[i] = nums[i - 1] + 1 + rand() % STEP_MAX; } } static int nums_comparator(const void *a, const void *b) { size_t *aa = (size_t *) a; size_t *bb = (size_t *) b; if (*aa < *bb) { return -1; } else if (*aa > *bb) { return 1; } else { return 0; } } static void nums_sort(size_t *nums, size_t size) { qsort(nums, size, sizeof(*nums), nums_comparator); } static void nums_shuffle(size_t *nums, size_t size) { for (size_t i = 0; i < size - 1; i++) { size_t j = i + rand() / (RAND_MAX / (size- i) + 1); size_t tmp = nums[j]; nums[j] = nums[i]; nums[i] = tmp; } } static void test_empty_expr(void) { header(); struct bitset_expr expr; bitset_expr_create(&expr, realloc); struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_iterator_init(&it, &expr, NULL, 0) == 0); bitset_expr_destroy(&expr); size_t pos = bitset_iterator_next(&it); fail_unless(pos == SIZE_MAX); bitset_iterator_destroy(&it); footer(); } static void test_empty_expr_conj1(void) { header(); struct bitset_expr expr; bitset_expr_create(&expr, realloc); struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_iterator_init(&it, &expr, NULL, 0) == 0); bitset_expr_destroy(&expr); size_t pos = bitset_iterator_next(&it); fail_unless(pos == SIZE_MAX); bitset_iterator_destroy(&it); footer(); } static void test_empty_expr_conj2(void) { header(); size_t big_i = (size_t) 1 << 15; struct bitset **bitsets = bitsets_create(2); bitset_set(bitsets[0], 1); bitset_set(bitsets[0], big_i); struct bitset_expr expr; bitset_expr_create(&expr, realloc); struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_expr_add_param(&expr, 0, false) == 0); fail_unless(bitset_expr_add_param(&expr, 1, true) == 0); fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_iterator_init(&it, &expr, bitsets, 2) == 0); bitset_expr_destroy(&expr); fail_unless(bitset_iterator_next(&it) == 1); fail_unless(bitset_iterator_next(&it) == big_i); fail_unless(bitset_iterator_next(&it) == SIZE_MAX); bitset_iterator_destroy(&it); bitsets_destroy(bitsets, 2); footer(); } static void test_empty_result(void) { header(); struct bitset **bitsets = bitsets_create(2); bitset_set(bitsets[0], 1); bitset_set(bitsets[0], 2); bitset_set(bitsets[0], 3); bitset_set(bitsets[0], 193); bitset_set(bitsets[0], 1024); bitset_set(bitsets[0], 1025); bitset_set(bitsets[0], 16384); bitset_set(bitsets[0], 16385); bitset_set(bitsets[1], 17); bitset_set(bitsets[1], 194); bitset_set(bitsets[1], 1023); struct bitset_expr expr; bitset_expr_create(&expr, realloc); fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_expr_add_param(&expr, 0, false) == 0); fail_unless(bitset_expr_add_param(&expr, 1, false) == 0); struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_iterator_init(&it, &expr, bitsets, 2) == 0); bitset_expr_destroy(&expr); size_t pos = bitset_iterator_next(&it); fail_unless(pos == SIZE_MAX); bitset_iterator_destroy(&it); bitsets_destroy(bitsets, 2); footer(); } static void test_first_result(void) { header(); struct bitset **bitsets = bitsets_create(2); bitset_set(bitsets[0], 0); bitset_set(bitsets[0], 1023); bitset_set(bitsets[1], 0); bitset_set(bitsets[1], 1025); struct bitset_expr expr; bitset_expr_create(&expr, realloc); fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_expr_add_param(&expr, 0, false) == 0); fail_unless(bitset_expr_add_param(&expr, 1, false) == 0); struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_iterator_init(&it, &expr, bitsets, 2) == 0); bitset_expr_destroy(&expr); size_t pos = bitset_iterator_next(&it); fail_unless(pos == 0); fail_unless(bitset_iterator_next(&it) == SIZE_MAX); bitset_iterator_destroy(&it); bitsets_destroy(bitsets, 2); footer(); } static void test_simple() { header(); enum { BITSETS_SIZE = 32 }; struct bitset **bitsets = bitsets_create(BITSETS_SIZE); nums_shuffle(NUMS, NUMS_SIZE); size_t NOISE_SIZE = NUMS_SIZE / 3; for (size_t i = 0; i < NOISE_SIZE; i++) { bitset_set(bitsets[i % BITSETS_SIZE], NUMS[i]); } for (size_t i = NOISE_SIZE; i < NUMS_SIZE; i++) { for (size_t b = 0; b < BITSETS_SIZE; b++) { bitset_set(bitsets[b], NUMS[i]); } } struct bitset_expr expr; bitset_expr_create(&expr, realloc); fail_unless(bitset_expr_add_conj(&expr) == 0); for (size_t b = 0; b < BITSETS_SIZE; b++) { fail_unless(bitset_expr_add_param(&expr, b, false) == 0); } nums_sort(NUMS + NOISE_SIZE, NUMS_SIZE - NOISE_SIZE); struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_iterator_init(&it, &expr, bitsets, BITSETS_SIZE) == 0); bitset_expr_destroy(&expr); for (size_t i = NOISE_SIZE; i < NUMS_SIZE; i++) { fail_unless(bitset_iterator_next(&it) == NUMS[i]); } fail_unless(bitset_iterator_next(&it) == SIZE_MAX); bitset_iterator_destroy(&it); bitsets_destroy(bitsets, BITSETS_SIZE); footer(); } static void test_big() { header(); const size_t BITSETS_SIZE = 32; struct bitset **bitsets = bitsets_create(BITSETS_SIZE); nums_shuffle(NUMS, NUMS_SIZE); printf("Setting bits... "); for (size_t i = 0; i < NUMS_SIZE; i++) { for (size_t b = 0; b < BITSETS_SIZE; b++) { bitset_set(bitsets[b], NUMS[i]); if (b % 2 == 0 && i % 2 == 0) continue; } } printf("ok\n"); struct bitset_expr expr; bitset_expr_create(&expr, realloc); fail_unless(bitset_expr_add_conj(&expr) == 0); for(size_t b = 0; b < BITSETS_SIZE; b++) { fail_unless(bitset_expr_add_param(&expr, b, false) == 0); } struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_iterator_init(&it, &expr, bitsets, BITSETS_SIZE) == 0); bitset_expr_destroy(&expr); printf("Iterating... "); size_t pos; while ((pos = bitset_iterator_next(&it)) != SIZE_MAX) { size_t b; for(b = 0; b < BITSETS_SIZE; b++) { if(bitset_test(bitsets[b], pos)) continue; } fail_if(b < BITSETS_SIZE); } printf("ok\n"); bitset_iterator_destroy(&it); bitsets_destroy(bitsets, BITSETS_SIZE); footer(); } static void test_not_last() { header(); struct bitset **bitsets = bitsets_create(2); size_t big_i = (size_t) 1 << 15; bitset_set(bitsets[0], 0); bitset_set(bitsets[0], 11); bitset_set(bitsets[0], 1024); bitset_set(bitsets[1], 0); bitset_set(bitsets[1], 10); bitset_set(bitsets[1], 11); bitset_set(bitsets[1], 14); bitset_set(bitsets[1], big_i); struct bitset_expr expr; bitset_expr_create(&expr, realloc); fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_expr_add_param(&expr, 0, true) == 0); fail_unless(bitset_expr_add_param(&expr, 1, false) == 0); struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_iterator_init(&it, &expr, bitsets, 2) == 0); bitset_expr_destroy(&expr); size_t result[] = {10, 14, big_i}; size_t result_size = 3; size_t pos; for (size_t i = 0; i < result_size; i++) { pos = bitset_iterator_next(&it); fail_unless (result[i] == pos); } fail_unless ((pos = bitset_iterator_next(&it)) == SIZE_MAX); bitset_iterator_destroy(&it); bitsets_destroy(bitsets, 2); footer(); } static void test_not_empty() { header(); enum { BITSETS_SIZE = 4, CHECK_COUNT = (size_t) 1 << 14 }; struct bitset **bitsets = bitsets_create(BITSETS_SIZE); nums_shuffle(NUMS, NUMS_SIZE); for (size_t i = 0; i < NUMS_SIZE; i++) { bitset_set(bitsets[i % BITSETS_SIZE], NUMS[i]); } struct bitset_expr expr; bitset_expr_create(&expr, realloc); for(size_t b = 0; b < BITSETS_SIZE; b++) { fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_expr_add_param(&expr, b, true) == 0); } struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_iterator_init(&it, &expr, bitsets, BITSETS_SIZE) == 0); bitset_expr_destroy(&expr); for (size_t i = 0; i < CHECK_COUNT; i++) { size_t pos = bitset_iterator_next(&it); fail_unless (i == pos); } bitset_iterator_destroy(&it); bitsets_destroy(bitsets, BITSETS_SIZE); footer(); } static void test_disjunction() { header(); enum { BITSETS_SIZE = 32 }; struct bitset **bitsets = bitsets_create(BITSETS_SIZE); nums_shuffle(NUMS, NUMS_SIZE); for (size_t i = 0; i < NUMS_SIZE; i++) { bitset_set(bitsets[i % BITSETS_SIZE], NUMS[i]); } struct bitset_expr expr; bitset_expr_create(&expr, realloc); for (size_t b = 0; b < BITSETS_SIZE; b++) { fail_unless(bitset_expr_add_conj(&expr) == 0); fail_unless(bitset_expr_add_param(&expr, b, false) == 0); } nums_sort(NUMS, NUMS_SIZE); struct bitset_iterator it; bitset_iterator_create(&it, realloc); fail_unless(bitset_iterator_init(&it, &expr, bitsets, BITSETS_SIZE) == 0); bitset_expr_destroy(&expr); for (size_t i = 0; i < NUMS_SIZE; i++) { size_t pos = bitset_iterator_next(&it); fail_unless(pos == NUMS[i]); } size_t pos = bitset_iterator_next(&it); fail_unless(pos == SIZE_MAX); bitset_iterator_destroy(&it); bitsets_destroy(bitsets, BITSETS_SIZE); footer(); } int main(void) { setbuf(stdout, NULL); nums_fill(NUMS, NUMS_SIZE); test_empty_expr(); test_empty_expr_conj1(); test_empty_expr_conj2(); test_empty_result(); test_first_result(); test_simple(); test_big(); test_not_empty(); test_not_last(); test_disjunction(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/reflection_cxx.result0000664000000000000000000000141313306560010022411 0ustar rootroot1..30 ok 1 - type.name ok 2 - type.parent ok 3 - type.parent ok 4 - is_instance ok 5 - is_instance ok 6 - is_instance ok 7 - is_instance ok 8 - methods order ok 9 - methods order ok 10 - methods order ok 11 - methods order ok 12 - methods order ok 13 - method.owner ok 14 - method.name ok 15 - method.rtype (non void) ok 16 - method.rtype (void) ok 17 - method.nargs (zero) ok 18 - method.nargs (non-zero) ok 19 - method.atype ok 20 - method.isconst ok 21 - !method.isconst ok 22 - !invokable ok 23 - !invokable ok 24 - !invokable<>(invalid object) ok 25 - invokable ok 26 - invokable ok 27 - invoke (int) ok 28 - invoke (const char *) ok 29 - invoke (void) ok 30 - !invokable<>() on const method with non-const object tarantool_1.9.1.26.g63eb81e3c/test/unit/histogram.c0000664000000000000000000000712713306560010020306 0ustar rootroot#include #include #include #include "histogram.h" #include "memory.h" #include "unit.h" #include "trivia/util.h" static int int64_cmp(const void *p1, const void *p2) { int64_t v1 = *(int64_t *)p1; int64_t v2 = *(int64_t *)p2; if (v1 > v2) return 1; if (v1 < v2) return -1; return 0; } static void int64_sort(int64_t *data, size_t len) { qsort(data, len, sizeof(*data), int64_cmp); } static int64_t * gen_buckets(size_t *p_n_buckets) { size_t n_buckets = 1 + rand() % 20; int64_t *buckets = calloc(n_buckets, sizeof(*buckets)); for (size_t i = 0; i < n_buckets; i++) buckets[i] = (i > 0 ? buckets[i - 1] : 0) + 1 + rand() % 2000; *p_n_buckets = n_buckets; return buckets; } static int64_t * gen_rand_data(size_t *p_len) { size_t len = 900 + rand() % 200; int64_t *data = calloc(len, sizeof(*data)); for (size_t i = 0; i < len; i++) data[i] = rand() % 10000; *p_len = len; return data; } static int64_t gen_rand_value(int64_t min, int64_t max) { assert(max >= min); return min + rand() % (max - min + 1); } static void test_counts(void) { header(); size_t n_buckets; int64_t *buckets = gen_buckets(&n_buckets); size_t data_len; int64_t *data = gen_rand_data(&data_len); struct histogram *hist = histogram_new(buckets, n_buckets); for (size_t i = 0; i < data_len; i++) histogram_collect(hist, data[i]); fail_if(hist->total != data_len); for (size_t b = 0; b < n_buckets; b++) { size_t expected = 0; for (size_t i = 0; i < data_len; i++) { if (data[i] <= buckets[b] && (b == 0 || data[i] > buckets[b - 1])) expected++; } fail_if(hist->buckets[b].count != expected); } histogram_delete(hist); free(data); free(buckets); footer(); } static void test_discard(void) { header(); size_t n_buckets; int64_t *buckets = gen_buckets(&n_buckets); struct histogram *hist = histogram_new(buckets, n_buckets); size_t bucket_sz = gen_rand_value(2, 10); size_t data_len = (n_buckets + 1) * bucket_sz; int64_t *data = calloc(data_len, sizeof(*data)); for (size_t b = 0; b <= n_buckets; b++) { int64_t min = (b == 0 ? INT64_MIN : buckets[b - 1] + 1); int64_t max = (b == n_buckets ? INT64_MAX : buckets[b]); for (size_t i = 0; i < bucket_sz; i++) data[b * bucket_sz + i] = gen_rand_value(min, max); } for (size_t i = 0; i < data_len; i++) histogram_collect(hist, data[i]); for (size_t i = 0; i < data_len; i++) { if (i % bucket_sz < bucket_sz / 2) histogram_discard(hist, data[i]); } bucket_sz = (bucket_sz + 1) / 2; for (size_t b = 0; b < n_buckets; b++) fail_if(hist->buckets[b].count != bucket_sz); fail_if(hist->total != bucket_sz * (n_buckets + 1)); histogram_delete(hist); free(data); free(buckets); footer(); } static void test_percentile(void) { header(); size_t n_buckets; int64_t *buckets = gen_buckets(&n_buckets); size_t data_len; int64_t *data = gen_rand_data(&data_len); int64_t max = -1; for (size_t i = 0; i < data_len; i++) { if (max < data[i]) max = data[i]; } struct histogram *hist = histogram_new(buckets, n_buckets); for (size_t i = 0; i < data_len; i++) histogram_collect(hist, data[i]); int64_sort(data, data_len); for (int pct = 5; pct < 100; pct += 5) { int64_t val = data[data_len * pct / 100]; int64_t expected = max; for (size_t b = 0; b < n_buckets; b++) { if (buckets[b] >= val) { expected = buckets[b]; break; } } int64_t result = histogram_percentile(hist, pct); fail_if(result != expected); } histogram_delete(hist); free(data); free(buckets); footer(); } int main() { srand(time(NULL)); test_counts(); test_discard(); test_percentile(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/xrow.result0000664000000000000000000000254113306560010020377 0ustar rootroot1..3 1..40 ok 1 - round trip ok 2 - roundtrip.version_id ok 3 - roundtrip.protocol ok 4 - roundtrip.uuid ok 5 - roundtrip.salt_len ok 6 - roundtrip.salt ok 7 - decode iproto ok 8 - iproto.version_id ok 9 - iproto.protocol ok 10 - iproto.uuid ok 11 - iproto.salt_len ok 12 - iproto.salt ok 13 - decode lua ok 14 - lua.version_id ok 15 - lua.protocol ok 16 - lua.uuid ok 17 - lua.salt_len ok 18 - decode iproto166 ok 19 - iproto166.version_id ok 20 - iproto166.protocol ok 21 - iproto166.uuid ok 22 - iproto166.salt_len ok 23 - iproto166.salt ok 24 - decode lua166 ok 25 - lua166.version_id ok 26 - lua166.protocol ok 27 - lua166.uuid ok 28 - lua166.salt_len ok 29 - invalid 0 ok 30 - invalid 1 ok 31 - invalid 2 ok 32 - invalid 3 ok 33 - invalid 4 ok 34 - invalid 5 ok 35 - invalid 6 ok 36 - invalid 7 ok 37 - invalid 8 ok 38 - invalid 9 ok 39 - invalid 10 ok 40 - invalid 11 ok 1 - subtests 1..10 ok 1 - bad msgpack end ok 2 - encode ok 3 - header map size ok 4 - header decode ok 5 - decoded type ok 6 - decoded replica_id ok 7 - decoded lsn ok 8 - decoded tm ok 9 - decoded sync ok 10 - decoded bodycnt ok 2 - subtests 1..1 ok 1 - request_str ok 3 - subtests tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber_channel_stress.result0000664000000000000000000000004613306560010023560 0ustar rootroot *** main_f *** *** main_f: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/rtree_multidim.result0000664000000000000000000000072113306560010022423 0ustar rootroot *** rand_test *** DIMENSION: 1, page size: 512, max fill good: 1 *** rand_test: done *** *** rand_test *** DIMENSION: 2, page size: 1024, max fill good: 1 *** rand_test: done *** *** rand_test *** DIMENSION: 3, page size: 1024, max fill good: 1 *** rand_test: done *** *** rand_test *** DIMENSION: 8, page size: 4096, max fill good: 1 *** rand_test: done *** *** rand_test *** DIMENSION: 16, page size: 8192, max fill good: 1 *** rand_test: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/rope_basic.result0000664000000000000000000001020713306560010021504 0ustar rootroot *** test_empty_rope *** size = 0 string = '' └──nil *** test_empty_rope: done *** *** test_append *** insert offset = 0, str = ' a ' size = 3 string = ' a ' └──{ len = 3, height = 1, data = ' a '} insert offset = 3, str = ' b ' size = 6 string = ' a b ' │ ┌──nil └──{ len = 3, height = 2, data = ' a '} └──{ len = 3, height = 1, data = ' b '} insert offset = 6, str = ' c ' size = 9 string = ' a b c ' │ ┌──{ len = 3, height = 1, data = ' a '} └──{ len = 3, height = 2, data = ' b '} └──{ len = 3, height = 1, data = ' c '} *** test_append: done *** *** test_prepend *** insert offset = 0, str = ' c ' size = 3 string = ' c ' └──{ len = 3, height = 1, data = ' c '} insert offset = 0, str = ' b ' size = 6 string = ' b c ' │ ┌──{ len = 3, height = 1, data = ' b '} └──{ len = 3, height = 2, data = ' c '} └──nil insert offset = 0, str = ' a ' size = 9 string = ' a b c ' │ ┌──{ len = 3, height = 1, data = ' a '} └──{ len = 3, height = 2, data = ' b '} └──{ len = 3, height = 1, data = ' c '} *** test_prepend: done *** *** test_insert *** insert offset = 0, str = ' a ' size = 5 string = ' a ' └──{ len = 5, height = 1, data = ' a '} insert offset = 4, str = 'b ' size = 7 string = ' ab ' │ ┌──{ len = 4, height = 1, data = ' a'} └──{ len = 2, height = 2, data = 'b '} └──{ len = 1, height = 1, data = ' '} insert offset = 5, str = 'c ' size = 9 string = ' abc ' │ ┌──{ len = 4, height = 1, data = ' a'} └──{ len = 1, height = 3, data = 'b'} │ ┌──{ len = 2, height = 1, data = 'c '} └──{ len = 1, height = 2, data = ' '} └──{ len = 1, height = 1, data = ' '} insert offset = 1, str = ' ' size = 10 string = ' abc ' │ ┌──{ len = 1, height = 1, data = ' '} │ ┌──{ len = 1, height = 2, data = ' '} │ │ └──{ len = 3, height = 1, data = ' a'} └──{ len = 1, height = 3, data = 'b'} │ ┌──{ len = 2, height = 1, data = 'c '} └──{ len = 1, height = 2, data = ' '} └──{ len = 1, height = 1, data = ' '} insert offset = 9, str = ' ' size = 11 string = ' abc ' │ ┌──{ len = 1, height = 1, data = ' '} │ ┌──{ len = 1, height = 2, data = ' '} │ │ └──{ len = 3, height = 1, data = ' a'} └──{ len = 1, height = 4, data = 'b'} │ ┌──{ len = 2, height = 1, data = 'c '} └──{ len = 1, height = 3, data = ' '} │ ┌──{ len = 1, height = 1, data = ' '} └──{ len = 1, height = 2, data = ' '} └──nil insert offset = 4, str = '*' size = 12 string = ' *abc ' │ ┌──{ len = 1, height = 1, data = ' '} │ ┌──{ len = 1, height = 3, data = ' '} │ │ │ ┌──{ len = 2, height = 1, data = ' '} │ │ └──{ len = 1, height = 2, data = '*'} │ │ └──{ len = 1, height = 1, data = 'a'} └──{ len = 1, height = 4, data = 'b'} │ ┌──{ len = 2, height = 1, data = 'c '} └──{ len = 1, height = 3, data = ' '} │ ┌──{ len = 1, height = 1, data = ' '} └──{ len = 1, height = 2, data = ' '} └──nil insert offset = 8, str = '*' size = 13 string = ' *abc* ' │ ┌──{ len = 1, height = 1, data = ' '} │ ┌──{ len = 1, height = 3, data = ' '} │ │ │ ┌──{ len = 2, height = 1, data = ' '} │ │ └──{ len = 1, height = 2, data = '*'} │ │ └──{ len = 1, height = 1, data = 'a'} └──{ len = 1, height = 4, data = 'b'} │ ┌──{ len = 1, height = 1, data = 'c'} │ ┌──{ len = 1, height = 2, data = '*'} │ │ └──{ len = 1, height = 1, data = ' '} └──{ len = 1, height = 3, data = ' '} │ ┌──{ len = 1, height = 1, data = ' '} └──{ len = 1, height = 2, data = ' '} └──nil *** test_insert: done *** *** test_erase *** erase offset = 0 size = 0 string = '' └──nil erase offset = 0 size = 1 string = 'b' └──{ len = 1, height = 1, data = 'b'} *** test_erase: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_iterators_helper.h0000664000000000000000000001475313306565107022426 0ustar rootroot#ifndef INCLUDES_TARANTOOL_TEST_VY_ITERATORS_HELPER_H #define INCLUDES_TARANTOOL_TEST_VY_ITERATORS_HELPER_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "unit.h" #include "vy_stmt.h" #include "small/rlist.h" #include "small/lsregion.h" #include "vy_mem.h" #include "vy_cache.h" #include "vy_read_view.h" #define vyend 99999999 #define MAX_FIELDS_COUNT 100 #define STMT_TEMPLATE(lsn, type, ...) \ { { __VA_ARGS__, vyend }, IPROTO_##type, lsn, false, 0, 0 } #define STMT_TEMPLATE_OPTIMIZED(lsn, type, ...) \ { { __VA_ARGS__, vyend }, IPROTO_##type, lsn, true, 0, 0 } extern struct tuple_format_vtab vy_tuple_format_vtab; extern struct tuple_format *vy_key_format; extern struct vy_mem_env mem_env; extern struct vy_cache_env cache_env; #if defined(__cplusplus) extern "C" { #endif /** * Initialize subsystems neccessary for correct vinyl iterators * working. * @param cache_size Vinyl cache quota limit. */ void vy_iterator_C_test_init(size_t cache_size); /** Close subsystems, opened in vy_iterator_C_test_init(). */ void vy_iterator_C_test_finish(); /** Template for creation a vinyl statement. */ struct vy_stmt_template { /** Array of statement fields, ended with 'vyend'. */ const int fields[MAX_FIELDS_COUNT]; /** Statement type: REPLACE/UPSERT/DELETE/UPSERT. */ enum iproto_type type; /** Statement lsn. */ int64_t lsn; /* * True, if statement must have column mask, that allows * to skip it in the write_iterator. */ bool optimize_update; /* * In case of upsert it is possible to use only one 'add' operation. * This is the column number of the operation. */ uint32_t upsert_field; /** And that is the value to add. */ int32_t upsert_value; }; /** * Create a new vinyl statement using the specified template. * * @param format * @param upsert_format Format for upsert statements. * @param format_with_colmask Format for statements with a * colmask. * @param templ Statement template. * * @return Created statement. */ struct tuple * vy_new_simple_stmt(struct tuple_format *format, struct tuple_format *upsert_format, struct tuple_format *format_with_colmask, const struct vy_stmt_template *templ); /** * Insert into the mem the statement, created by the specified * template. * * @param vy_mem Mem to insert into. * @param templ Statement template to insert. * * @retval Lsregion allocated statement. */ const struct tuple * vy_mem_insert_template(struct vy_mem *mem, const struct vy_stmt_template *templ); /** * Insert into the cache the statement template chain, got from * the read iterator. * @param cache Cache to insert into. * @param format Statements format. * @param chain Statement template array. * @param length Length of @a chain. * @param key_templ Key template. * @param order Iteration order. */ void vy_cache_insert_templates_chain(struct vy_cache *cache, struct tuple_format *format, const struct vy_stmt_template *chain, uint length, const struct vy_stmt_template *key_templ, enum iterator_type order); /** * Vy_cache_on_write wrapper for statement templates. * @param cache Cache to update to. * @param templ Written statement template. */ void vy_cache_on_write_template(struct vy_cache *cache, struct tuple_format *format, const struct vy_stmt_template *templ); /** * Create a list of read views using the specified vlsns. * * @param rlist[out] Result list of read views. * @param rvs[out] Read views array. * @param vlsns Array of read view lsns, sorted in ascending * order. * @param count Size of the @vlsns. */ void init_read_views_list(struct rlist *rlist, struct vy_read_view *rvs, const int *vlsns, int count); /** * Create vy_mem with the specified key_def, using the @region as * allocator. * * @param def Key definition. * * @return New vy_mem. */ struct vy_mem * create_test_mem(struct key_def *def); /** * Create vy_cache, key_def and tuple_format, using a specified * array of key fields. * @param fields Array of key field numbers. * @param types Array of key field types. * @param key_cnt Length of @a fields and @a types. * @param[out] cache Cache to create. * @param[out] def Key def to create. * @param[out] format Tuple format to create. */ void create_test_cache(uint32_t *fields, uint32_t *types, int key_cnt, struct vy_cache *cache, struct key_def **def, struct tuple_format **format); /** * Destroy cache and its resources. * @param vy_cache Cache to destroy. * @param key_def Key def to delete. * @param format Tuple format to unref. */ void destroy_test_cache(struct vy_cache *cache, struct key_def *def, struct tuple_format *format); /** * Check that the template specifies completely the same statement * as @stmt. * * @param stmt Actual value. * @param templ Expected value. * @param format Template statement format. * @param upsert_format Template upsert statement format. * @param format_with_colmask Template statement format with colmask. * * @retval stmt === template. */ bool vy_stmt_are_same(const struct tuple *actual, const struct vy_stmt_template *expected, struct tuple_format *format, struct tuple_format *upsert_format, struct tuple_format *format_with_colmask); #if defined(__cplusplus) } #endif #endif tarantool_1.9.1.26.g63eb81e3c/test/unit/int96.cc0000664000000000000000000000352113306560010017417 0ustar rootroot#include #include "unit.h" #define check(expr) if (!(expr)) printf("failed at %s:%d\n", __FILE__, __LINE__) static void test() { header(); const uint64_t a = 0xFFFFFFFFFFFFFFFFull / 2; int96_num num, num1, num2; int96_set_unsigned(&num, 0); int96_set_unsigned(&num1, a); int96_set_unsigned(&num2, a); int96_invert(&num2); check(int96_is_neg_int64(&num2)); check(int96_extract_neg_int64(&num2) == int64_t(-a)); check(int96_is_uint64(&num)); check(int96_extract_uint64(&num) == 0); int96_add(&num, &num1); check(int96_is_uint64(&num)); check(int96_extract_uint64(&num) == a); int96_add(&num, &num1); check(int96_is_uint64(&num)); check(int96_extract_uint64(&num) == a * 2); for (int i = 1; i < 1000; i++) { for(int j = 0; j < i; j++) { int96_add(&num, &num1); check(!int96_is_uint64(&num) && !int96_is_neg_int64(&num)); } for(int j = 0; j < i - 1; j++) { int96_add(&num, &num2); check(!int96_is_uint64(&num) && !int96_is_neg_int64(&num)); } int96_add(&num, &num2); check(int96_is_uint64(&num)); check(int96_extract_uint64(&num) == a * 2); } int96_add(&num, &num2); check(int96_is_uint64(&num)); check(int96_extract_uint64(&num) == a); int96_add(&num, &num2); check(int96_is_uint64(&num)); check(int96_extract_uint64(&num) == 0); int96_add(&num, &num2); check(int96_is_neg_int64(&num)); check(int96_extract_neg_int64(&num) == int64_t(-a)); for (int i = 1; i < 1000; i++) { for(int j = 0; j < i; j++) { int96_add(&num, &num2); check(!int96_is_uint64(&num) && !int96_is_neg_int64(&num)); } for(int j = 0; j < i - 1; j++) { int96_add(&num, &num1); check(!int96_is_uint64(&num) && !int96_is_neg_int64(&num)); } int96_add(&num, &num1); check(int96_is_neg_int64(&num)); check(int96_extract_neg_int64(&num) == int64_t(-a)); } footer(); } int main(int, const char **) { test(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_point_lookup.c0000664000000000000000000002104713306565107021562 0ustar rootroot#include "trivia/util.h" #include "unit.h" #include "vy_index.h" #include "vy_cache.h" #include "vy_run.h" #include "fiber.h" #include #include #include #include "vy_iterators_helper.h" #include "vy_write_iterator.h" #include "identifier.h" uint32_t schema_version; static int write_run(struct vy_run *run, const char *dir_name, struct vy_index *index, struct vy_stmt_stream *wi) { struct vy_run_writer writer; if (vy_run_writer_create(&writer, run, dir_name, index->space_id, index->id, index->cmp_def, index->key_def, 4096, 0.1, 100500) != 0) goto fail; if (wi->iface->start(wi) != 0) goto fail_abort_writer; int rc; struct tuple *stmt = NULL; while ((rc = wi->iface->next(wi, &stmt)) == 0 && stmt != NULL) { rc = vy_run_writer_append_stmt(&writer, stmt); if (rc != 0) break; } wi->iface->stop(wi); if (rc == 0) rc = vy_run_writer_commit(&writer); if (rc != 0) goto fail_abort_writer; return 0; fail_abort_writer: vy_run_writer_abort(&writer); fail: return -1; } static void test_basic() { header(); plan(15); /** Suppress info messages from vy_run_writer. */ say_set_log_level(S_WARN); const size_t QUOTA = 100 * 1024 * 1024; int64_t generation = 0; struct slab_cache *slab_cache = cord_slab_cache(); int rc; struct vy_index_env index_env; rc = vy_index_env_create(&index_env, ".", &generation, NULL, NULL); is(rc, 0, "vy_index_env_create"); struct vy_run_env run_env; vy_run_env_create(&run_env); struct vy_cache_env cache_env; vy_cache_env_create(&cache_env, slab_cache); vy_cache_env_set_quota(&cache_env, QUOTA); struct vy_cache cache; uint32_t fields[] = { 0 }; uint32_t types[] = { FIELD_TYPE_UNSIGNED }; struct key_def *key_def = box_key_def_new(fields, types, 1); isnt(key_def, NULL, "key_def is not NULL"); vy_cache_create(&cache, &cache_env, key_def); struct tuple_format *format = tuple_format_new(&vy_tuple_format_vtab, &key_def, 1, 0, NULL, 0, NULL); isnt(format, NULL, "tuple_format_new is not NULL"); tuple_format_ref(format); struct index_opts index_opts = index_opts_default; struct index_def *index_def = index_def_new(512, 0, "primary", sizeof("primary") - 1, TREE, &index_opts, key_def, NULL); struct vy_index *pk = vy_index_new(&index_env, &cache_env, &mem_env, index_def, format, NULL); isnt(pk, NULL, "index is not NULL") struct vy_range *range = vy_range_new(1, NULL, NULL, pk->cmp_def); isnt(pk, NULL, "range is not NULL") vy_index_add_range(pk, range); struct rlist read_views = RLIST_HEAD_INITIALIZER(read_views); char dir_tmpl[] = "./vy_point_test.XXXXXX"; char *dir_name = mkdtemp(dir_tmpl); isnt(dir_name, NULL, "temp dir name is not NULL") char path[PATH_MAX]; strcpy(path, dir_name); strcat(path, "/512"); rc = mkdir(path, 0777); is(rc, 0, "temp dir create (2)"); strcat(path, "/0"); rc = mkdir(path, 0777); is(rc, 0, "temp dir create (3)"); /* Filling the index with test data */ /* Prepare variants */ const size_t num_of_keys = 100; bool in_mem1[num_of_keys]; /* UPSERT value += 1, lsn 4 */ bool in_mem2[num_of_keys]; /* UPSERT value += 2, lsn 3 */ bool in_run1[num_of_keys]; /* UPSERT value += 4, lsn 2 */ bool in_run2[num_of_keys]; /* UPSERT value += 8, lsn 1 */ bool in_cache[num_of_keys]; uint32_t expect[num_of_keys]; int64_t expect_lsn[num_of_keys]; for (size_t i = 0; i < num_of_keys; i++) { in_mem1[i] = i & 1; in_mem2[i] = i & 2; in_run1[i] = i & 4; in_run2[i] = i & 8; in_cache[i] = i & 16; expect[i] = (in_mem1[i] ? 1 : 0) + (in_mem2[i] ? 2 : 0) + (in_run1[i] ? 4 : 0) + (in_run2[i] ? 8 : 0); expect_lsn[i] = expect[i] == 0 ? 0 : 5 - bit_ctz_u32(expect[i]); } for (size_t i = 0; i < num_of_keys; i++) { if (!in_cache[i]) continue; if (expect[i] != 0) { struct vy_stmt_template tmpl_key = STMT_TEMPLATE(0, SELECT, i); struct vy_stmt_template tmpl_val = STMT_TEMPLATE(expect_lsn[i], REPLACE, i, expect[i]); vy_cache_insert_templates_chain(&cache, format, &tmpl_val, 1, &tmpl_key, ITER_EQ); } } /* create second mem */ for (size_t i = 0; i < num_of_keys; i++) { if (!in_mem2[i]) continue; struct vy_stmt_template tmpl_val = STMT_TEMPLATE(3, UPSERT, i, 2); tmpl_val.upsert_field = 1; tmpl_val.upsert_value = 2; vy_mem_insert_template(pk->mem, &tmpl_val); } rc = vy_index_rotate_mem(pk); is(rc, 0, "vy_index_rotate_mem"); /* create first mem */ for (size_t i = 0; i < num_of_keys; i++) { if (!in_mem1[i]) continue; struct vy_stmt_template tmpl_val = STMT_TEMPLATE(4, UPSERT, i, 1); tmpl_val.upsert_field = 1; tmpl_val.upsert_value = 1; vy_mem_insert_template(pk->mem, &tmpl_val); } /* create second run */ struct vy_mem *run_mem = vy_mem_new(pk->mem->env, *pk->env->p_generation, pk->cmp_def, pk->mem_format, pk->mem_format_with_colmask, pk->upsert_format, 0); for (size_t i = 0; i < num_of_keys; i++) { if (!in_run2[i]) continue; struct vy_stmt_template tmpl_val = STMT_TEMPLATE(1, UPSERT, i, 8); tmpl_val.upsert_field = 1; tmpl_val.upsert_value = 8; vy_mem_insert_template(run_mem, &tmpl_val); } struct vy_stmt_stream *write_stream = vy_write_iterator_new(pk->cmp_def, pk->disk_format, pk->upsert_format, pk->id == 0, true, &read_views); vy_write_iterator_new_mem(write_stream, run_mem); struct vy_run *run = vy_run_new(&run_env, 1); isnt(run, NULL, "vy_run_new"); rc = write_run(run, dir_name, pk, write_stream); is(rc, 0, "vy_run_write"); write_stream->iface->close(write_stream); vy_mem_delete(run_mem); vy_index_add_run(pk, run); struct vy_slice *slice = vy_slice_new(1, run, NULL, NULL, pk->cmp_def); vy_range_add_slice(range, slice); vy_run_unref(run); /* create first run */ run_mem = vy_mem_new(pk->mem->env, *pk->env->p_generation, pk->cmp_def, pk->mem_format, pk->mem_format_with_colmask, pk->upsert_format, 0); for (size_t i = 0; i < num_of_keys; i++) { if (!in_run1[i]) continue; struct vy_stmt_template tmpl_val = STMT_TEMPLATE(2, UPSERT, i, 4); tmpl_val.upsert_field = 1; tmpl_val.upsert_value = 4; vy_mem_insert_template(run_mem, &tmpl_val); } write_stream = vy_write_iterator_new(pk->cmp_def, pk->disk_format, pk->upsert_format, pk->id == 0, true, &read_views); vy_write_iterator_new_mem(write_stream, run_mem); run = vy_run_new(&run_env, 2); isnt(run, NULL, "vy_run_new"); rc = write_run(run, dir_name, pk, write_stream); is(rc, 0, "vy_run_write"); write_stream->iface->close(write_stream); vy_mem_delete(run_mem); vy_index_add_run(pk, run); slice = vy_slice_new(1, run, NULL, NULL, pk->cmp_def); vy_range_add_slice(range, slice); vy_run_unref(run); /* Compare with expected */ bool results_ok = true; bool has_errors = false; for (int64_t vlsn = 0; vlsn <= 6; vlsn++) { struct vy_read_view rv; rv.vlsn = vlsn == 6 ? INT64_MAX : vlsn; const struct vy_read_view *prv = &rv; for (size_t i = 0; i < num_of_keys; i++) { uint32_t expect = 0; int64_t expect_lsn = 0; if (in_run2[i] && vlsn >= 1) { expect += 8; expect_lsn = 1; } if (in_run1[i] && vlsn >= 2) { expect += 4; expect_lsn = 2; } if (in_mem2[i] && vlsn >= 3) { expect += 2; expect_lsn = 3; } if (in_mem1[i] && vlsn >= 4) { expect += 1; expect_lsn = 4; } struct vy_stmt_template tmpl_key = STMT_TEMPLATE(0, SELECT, i); struct tuple *key = vy_new_simple_stmt(format, pk->upsert_format, pk->mem_format_with_colmask, &tmpl_key); struct tuple *res; rc = vy_point_lookup(pk, NULL, &prv, key, &res); tuple_unref(key); if (rc != 0) { has_errors = true; continue; } if (expect == 0) { /* No value expected. */ if (res != NULL) results_ok = false; continue; } else { if (res == NULL) { results_ok = false; continue; } } uint32_t got = 0; tuple_field_u32(res, 1, &got); if (got != expect && expect_lsn != vy_stmt_lsn(res)) results_ok = false; tuple_unref(res); } } is(results_ok, true, "select results"); is(has_errors, false, "no errors happened"); vy_index_unref(pk); index_def_delete(index_def); tuple_format_unref(format); vy_cache_destroy(&cache); key_def_delete(key_def); vy_cache_env_destroy(&cache_env); vy_run_env_destroy(&run_env); vy_index_env_destroy(&index_env); strcpy(path, "rm -rf "); strcat(path, dir_name); system(path); check_plan(); footer(); } int main() { identifier_init(); plan(1); vy_iterator_C_test_init(128 * 1024); crc32_init(); test_basic(); vy_iterator_C_test_finish(); identifier_destroy(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/rtree_iterator.cc0000664000000000000000000002111713306560010021501 0ustar rootroot#include #include #include #include #include "unit.h" #include "salad/rtree.h" static int extent_count = 0; const uint32_t extent_size = 1024 * 8; static void * extent_alloc(void *ctx) { int *p_extent_count = (int *)ctx; assert(p_extent_count == &extent_count); ++*p_extent_count; return malloc(extent_size); } static void extent_free(void *ctx, void *page) { int *p_extent_count = (int *)ctx; assert(p_extent_count == &extent_count); --*p_extent_count; free(page); } static void iterator_check() { header(); struct rtree tree; rtree_init(&tree, 2, extent_size, extent_alloc, extent_free, &extent_count, RTREE_EUCLID); /* Filling tree */ const size_t count1 = 10000; const size_t count2 = 5; struct rtree_rect rect; size_t count = 0; record_t rec; struct rtree_iterator iterator; rtree_iterator_init(&iterator); for (size_t i = 0; i < count1; i++) { coord_t coord = i * 2 * count2; /* note that filled with even numbers */ for (size_t j = 0; j < count2; j++) { rtree_set2d(&rect, coord, coord, coord + j, coord + j); rtree_insert(&tree, &rect, record_t(++count)); } } printf("Test tree size: %d\n", (int)rtree_number_of_records(&tree)); /* Test that tree filled ok */ for (size_t i = 0; i < count1; i++) { for (size_t j = 0; j < count2; j++) { coord_t coord = i * 2 * count2; rtree_set2d(&rect, coord, coord, coord + j, coord + j); if (!rtree_search(&tree, &rect, SOP_BELONGS, &iterator)) { fail("Integrity check failed (1)", "false"); } for (size_t k = 0; k <= j; k++) { if (!rtree_iterator_next(&iterator)) { fail("Integrity check failed (2)", "false"); } } if (rtree_iterator_next(&iterator)) { fail("Integrity check failed (3)", "true"); } coord = (i * 2 + 1) * count2;; rtree_set2d(&rect, coord, coord, coord + j, coord + j); if (rtree_search(&tree, &rect, SOP_BELONGS, &iterator)) { fail("Integrity check failed (4)", "true"); } } } /* Print 7 elems closest to coordinate basis */ { static struct rtree_rect basis; printf("--> "); if (!rtree_search(&tree, &basis, SOP_NEIGHBOR, &iterator)) { fail("Integrity check failed (5)", "false"); } for (int i = 0; i < 7; i++) { rec = rtree_iterator_next(&iterator); if (rec == 0) { fail("Integrity check failed (6)", "false"); } printf("%p ", rec); } printf("\n"); } /* Print 7 elems closest to the point [(count1-1)*count2*2, (count1-1)*count2*2] */ { printf("<-- "); coord_t coord = (count1 - 1) * count2 * 2; rtree_set2d(&rect, coord, coord, coord, coord); if (!rtree_search(&tree, &rect, SOP_NEIGHBOR, &iterator)) { fail("Integrity check failed (5)", "false"); } for (int i = 0; i < 7; i++) { rec = rtree_iterator_next(&iterator); if (rec == 0) { fail("Integrity check failed (6)", "false"); } printf("%p ", rec); } printf("\n"); } /* Test strict belongs */ for (size_t i = 0; i < count1; i++) { for (size_t j = 0; j < count2; j++) { coord_t coord = i * 2 * count2; rtree_set2d(&rect, coord - 0.1, coord - 0.1, coord + j, coord + j); if (!rtree_search(&tree, &rect, SOP_STRICT_BELONGS, &iterator) && j != 0) { fail("Integrity check failed (7)", "false"); } for (size_t k = 0; k < j; k++) { if (!rtree_iterator_next(&iterator)) { fail("Integrity check failed (8)", "false"); } } if (rtree_iterator_next(&iterator)) { fail("Integrity check failed (9)", "true"); } coord = (i * 2 + 1) * count2; rtree_set2d(&rect, coord, coord, coord + j, coord + j); if (rtree_search(&tree, &rect, SOP_STRICT_BELONGS, &iterator)) { fail("Integrity check failed (10)", "true"); } } } /* Test contains */ for (size_t i = 0; i < count1; i++) { for (size_t j = 0; j < count2; j++) { coord_t coord = i * 2 * count2; rtree_set2d(&rect, coord, coord, coord + j, coord + j); if (!rtree_search(&tree, &rect, SOP_CONTAINS, &iterator)) { fail("Integrity check failed (11)", "false"); } for (size_t k = j; k < count2; k++) { if (!rtree_iterator_next(&iterator)) { fail("Integrity check failed (12)", "false"); } } if (rtree_iterator_next(&iterator)) { fail("Integrity check failed (13)", "true"); } coord = (i * 2 + 1) * count2; rtree_set2d(&rect, coord, coord, coord + j, coord + j); if (rtree_search(&tree, &rect, SOP_CONTAINS, &iterator)) { fail("Integrity check failed (14)", "true"); } } } /* Test strict contains */ for (size_t i = 0; i < count1; i++) { for (size_t j = 0; j < count2; j++) { coord_t coord = i * 2 * count2; rtree_set2d(&rect, coord + 0.1, coord + 0.1, coord + j, coord + j); rtree_rect_normalize(&rect, 2); if (!rtree_search(&tree, &rect, SOP_STRICT_CONTAINS, &iterator) && j != 0 && j != count2 - 1) { fail("Integrity check failed (11)", "false"); } if (j) { for (size_t k = j; k < count2 - 1; k++) { if (!rtree_iterator_next(&iterator)) { fail("Integrity check failed (12)", "false"); } } } if (rtree_iterator_next(&iterator)) { fail("Integrity check failed (13)", "true"); } coord = (i * 2 + 1) * count2; rtree_set2d(&rect, coord, coord, coord + j, coord + j); if (rtree_search(&tree, &rect, SOP_STRICT_CONTAINS, &iterator)) { fail("Integrity check failed (14)", "true"); } } } rtree_purge(&tree); rtree_iterator_destroy(&iterator); rtree_destroy(&tree); footer(); } static void iterator_invalidate_check() { header(); const size_t test_size = 300; const size_t max_delete_count = 100; const size_t max_insert_count = 200; const size_t attempt_count = 100; struct rtree_rect rect; /* invalidation during deletion */ srand(0); for (size_t attempt = 0; attempt < attempt_count; attempt++) { size_t del_pos = rand() % test_size; size_t del_cnt = rand() % max_delete_count + 1; if (del_pos + del_cnt > test_size) { del_cnt = test_size - del_pos; } struct rtree tree; rtree_init(&tree, 2, extent_size, extent_alloc, extent_free, &extent_count, RTREE_EUCLID); struct rtree_iterator iterators[test_size]; for (size_t i = 0; i < test_size; i++) rtree_iterator_init(iterators + i); for (size_t i = 0; i < test_size; i++) { rtree_set2d(&rect, i, i, i, i); rtree_insert(&tree, &rect, record_t(i+1)); } rtree_set2d(&rect, 0, 0, test_size, test_size); if (!rtree_search(&tree, &rect, SOP_BELONGS, &iterators[0]) || !rtree_iterator_next(&iterators[0])) { fail("Integrity check failed (15)", "false"); } for (size_t i = 1; i < test_size; i++) { iterators[i] = iterators[i - 1]; if (!rtree_iterator_next(&iterators[i])) { fail("Integrity check failed (16)", "false"); } } for (size_t i = del_pos; i < del_pos + del_cnt; i++) { rtree_set2d(&rect, i, i, i, i); if (!rtree_remove(&tree, &rect, record_t(i+1))) { fail("Integrity check failed (17)", "false"); } } for (size_t i = 0; i < test_size; i++) { if (rtree_iterator_next(&iterators[i])) { fail("Iterator was not invalidated (18)", "true"); } } for (size_t i = 0; i < test_size; i++) rtree_iterator_destroy(iterators + i); rtree_destroy(&tree); } /* invalidation during insertion */ srand(0); for (size_t attempt = 0; attempt < attempt_count; attempt++) { size_t ins_pos = rand() % test_size; size_t ins_cnt = rand() % max_insert_count + 1; struct rtree tree; rtree_init(&tree, 2, extent_size, extent_alloc, extent_free, &extent_count, RTREE_EUCLID); struct rtree_iterator iterators[test_size]; for (size_t i = 0; i < test_size; i++) rtree_iterator_init(iterators + i); for (size_t i = 0; i < test_size; i++) { rtree_set2d(&rect, i, i, i, i); rtree_insert(&tree, &rect, record_t(i+1)); } rtree_set2d(&rect, 0, 0, test_size, test_size); rtree_search(&tree, &rect, SOP_BELONGS, &iterators[0]); if (!rtree_iterator_next(&iterators[0])) { fail("Integrity check failed (19)", "false"); } for (size_t i = 1; i < test_size; i++) { iterators[i] = iterators[i - 1]; if (!rtree_iterator_next(&iterators[0])) { fail("Integrity check failed (20)", "false"); } } for (size_t i = ins_pos; i < ins_pos + ins_cnt; i++) { rtree_set2d(&rect, i, i, i, i); rtree_insert(&tree, &rect, record_t(test_size + i - ins_pos + 1)); } for (size_t i = 0; i < test_size; i++) { if (rtree_iterator_next(&iterators[i])) { fail("Iterator was not invalidated (22)", "true"); } } for (size_t i = 0; i < test_size; i++) rtree_iterator_destroy(iterators + i); rtree_destroy(&tree); } footer(); } int main(void) { iterator_check(); iterator_invalidate_check(); if (extent_count != 0) { fail("memory leak!", "false"); } } tarantool_1.9.1.26.g63eb81e3c/test/unit/coio.cc0000664000000000000000000000345713306560010017407 0ustar rootroot#include "memory.h" #include "fiber.h" #include "coio.h" #include "coio_task.h" #include "fio.h" #include "unit.h" #include "unit.h" int touch_f(va_list ap) { FILE *f = va_arg(ap, FILE *); const char *c = "c"; while (true) { int rc = fwrite(c, strlen(c), 1, f); fail_unless(rc == 1); fflush(f); fiber_sleep(0.01); if (fiber_is_cancelled()) return -1; } return 0; } static void stat_notify_test(FILE *f, const char *filename) { header(); struct fiber *touch = fiber_new_xc("touch", touch_f); fiber_start(touch, f); ev_stat stat; note("filename: %s", filename); coio_stat_init(&stat, filename); coio_stat_stat_timeout(&stat, TIMEOUT_INFINITY); fail_unless(stat.prev.st_size < stat.attr.st_size); fiber_cancel(touch); footer(); } static void stat_timeout_test(const char *filename) { header(); ev_stat stat; coio_stat_init(&stat, filename); coio_stat_stat_timeout(&stat, 0.01); footer(); } static ssize_t coio_test_wakeup(va_list ap) { usleep(1000); return 0; } static int test_call_f(va_list ap) { header(); int res = coio_call(coio_test_wakeup); note("call done with res %i", res); footer(); return res; } static int main_f(va_list ap) { const char *filename = "1.out"; FILE *f = fopen(filename, "w+"); stat_timeout_test(filename); stat_notify_test(f, filename); fclose(f); (void) remove(filename); coio_enable(); struct fiber *call_fiber = fiber_new_xc("coio_call wakeup", test_call_f); fiber_set_joinable(call_fiber, true); fiber_start(call_fiber); fiber_wakeup(call_fiber); fiber_cancel(call_fiber); fiber_join(call_fiber); ev_break(loop(), EVBREAK_ALL); return 0; } int main() { memory_init(); fiber_init(fiber_cxx_invoke); struct fiber *test = fiber_new_xc("coio_stat", main_f); fiber_wakeup(test); ev_run(loop(), 0); fiber_free(); memory_free(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/guava.c0000664000000000000000000000251513306560010017410 0ustar rootroot#include #include #include #include "unit.h" #include "salad/guava.h" static void check_guava_correctness(uint64_t code) { int32_t last = 0; for (int32_t shards = 1; shards <= 100000; shards++) { int32_t b = guava(code, shards); if (b != last) { fail_if(shards - 1 != b); last = b; } } } static void correctness_check() { header(); int64_t i_vals[] = {0, 1, 2}; for (size_t i = 0; i < sizeof(i_vals) / sizeof(int64_t); ++i) check_guava_correctness(i_vals[i]); srand(time(NULL)); for (size_t i = 0; i < 20; ++i) check_guava_correctness(rand() % 7); footer(); } static void sameresult_check() { header(); fail_if(guava(100, 20) != guava(100, 20)); footer(); } static void lcg_compat_check() { header(); int32_t golden100[] = { 0, 55, 62, 8, 45, 59, 86, 97, 82, 59, 73, 37, 17, 56, 86, 21, 90, 37, 38, 83 }; for (size_t i = 0; i < sizeof(golden100) / sizeof(int64_t); ++i) check_guava_correctness(golden100[i]); fail_if(6 != guava(10863919174838991ULL, 11)); fail_if(3 != guava(2016238256797177309ULL, 11)); fail_if(5 != guava(1673758223894951030ULL, 11)); fail_if(80343 != guava(2, 100001)); fail_if(22152 != guava(2201, 100001)); fail_if(15018 != guava(2202, 100001)); footer(); } int main(void) { correctness_check(); lcg_compat_check(); sameresult_check(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/rmean.result0000664000000000000000000000061013306560010020475 0ustar rootrootStat. 2 names, timer simulation *** test_100rps *** Send 100 requests every second for 10 seconds Calc rps at third and last second EV1: rps 60, total 300 EV2: rps 0, total 0 EV1: rps 100, total 1000 EV2: rps 0, total 0 *** test_100rps: done *** *** test_mean15rps *** Send 15 rps on the average, and 3 rps to EV2 EV1: rps 15, total 1150 EV2: rps 3, total 30 *** test_mean15rps: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber.cc0000664000000000000000000000724413306560010017543 0ustar rootroot#include "memory.h" #include "fiber.h" #include "unit.h" #include "trivia/util.h" static int noop_f(va_list ap) { return 0; } static int cancel_f(va_list ap) { fiber_set_cancellable(true); while (true) { fiber_sleep(0.001); fiber_testcancel(); } return 0; } static int exception_f(va_list ap) { tnt_raise(OutOfMemory, 42, "allocator", "exception"); return 0; } static int no_exception_f(va_list ap) { try { tnt_raise(OutOfMemory, 42, "allocator", "exception"); } catch (Exception *e) { ; } return 0; } static int cancel_dead_f(va_list ap) { note("cancel dead has started"); fiber_set_cancellable(true); tnt_raise(OutOfMemory, 42, "allocator", "exception"); return 0; } static size_t fiber_stack_size_default; static void NOINLINE stack_expand(void *ptr) { char buf[2048]; memset(buf, 0x45, 2048); ptrdiff_t stack_diff = (buf - (char *)ptr); stack_diff = stack_diff >= 0 ? stack_diff : -stack_diff; if (stack_diff < (ptrdiff_t)fiber_stack_size_default) stack_expand(ptr); } static int test_stack_f(va_list ap) { char s; stack_expand(&s); return 0; } static void fiber_join_test() { header(); struct fiber *fiber = fiber_new_xc("join", noop_f); fiber_set_joinable(fiber, true); fiber_wakeup(fiber); fiber_join(fiber); fiber = fiber_new_xc("cancel", cancel_f); fiber_set_joinable(fiber, true); fiber_wakeup(fiber); fiber_sleep(0); fiber_cancel(fiber); fiber_join(fiber); fiber = fiber_new_xc("exception", exception_f); fiber_set_joinable(fiber, true); fiber_wakeup(fiber); try { if (fiber_join(fiber) != 0) diag_raise(); fail("exception not raised", ""); } catch (Exception *e) { note("exception propagated"); } fputs("#gh-1238: log uncaught errors\n", stderr); fiber = fiber_new_xc("exception", exception_f); fiber_wakeup(fiber); /* * A fiber which is using exception should not * push them up the stack. */ fiber = fiber_new_xc("no_exception", no_exception_f); fiber_set_joinable(fiber, true); fiber_wakeup(fiber); fiber_join(fiber); /* * Trying to cancel a dead joinable cancellable fiber lead to * a crash, because cancel would try to schedule it. */ fiber = fiber_new_xc("cancel_dead", cancel_dead_f); fiber_set_joinable(fiber, true); fiber_wakeup(fiber); /** Let the fiber schedule */ fiber_wakeup(fiber()); fiber_yield(); note("by this time the fiber should be dead already"); fiber_cancel(fiber); fiber_join(fiber); struct fiber_attr *fiber_attr; fiber_attr = fiber_attr_new(); fiber_stack_size_default = fiber_attr_getstacksize(fiber_attr); fiber_attr_setstacksize(fiber_attr, fiber_stack_size_default * 2); fiber = fiber_new_ex("test_stack", fiber_attr, test_stack_f); fiber_attr_delete(fiber_attr); if (fiber == NULL) diag_raise(); fiber_set_joinable(fiber, true); fiber_wakeup(fiber); /** Let the fiber schedule */ fiber_wakeup(fiber()); fiber_yield(); note("big-stack fiber not crashed"); fiber_join(fiber); footer(); } void fiber_name_test() { header(); note("name of a new fiber: %s.\n", fiber_name(fiber())); fiber_set_name(fiber(), "Horace"); note("set new fiber name: %s.\n", fiber_name(fiber())); const char *long_name = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"\ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; fiber_set_name(fiber(), long_name); note("fiber name is truncated: %s.\n", fiber_name(fiber())); footer(); } static int main_f(va_list ap) { fiber_name_test(); fiber_join_test(); ev_break(loop(), EVBREAK_ALL); return 0; } int main() { memory_init(); fiber_init(fiber_cxx_invoke); struct fiber *main = fiber_new_xc("main", main_f); fiber_wakeup(main); ev_run(loop(), 0); fiber_free(); memory_free(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber_channel_stress.cc0000664000000000000000000000221013306560010022622 0ustar rootroot#include "memory.h" #include "fiber.h" #include "fiber_channel.h" #include "unit.h" enum { ITERATIONS = 100000, }; static int push_f(va_list ap) { struct fiber_channel *channel = va_arg(ap, struct fiber_channel *); for (int i = 0; i < ITERATIONS; i++) fiber_channel_put(channel, NULL); return 0; } static int pop_f(va_list ap) { struct fiber_channel *channel = va_arg(ap, struct fiber_channel *); for (int i = 0; i < ITERATIONS; i++) { void *ptr; fiber_channel_get(channel, &ptr); } return 0; } static int main_f(va_list ap) { header(); struct fiber *push = fiber_new_xc("push_f", push_f); fiber_set_joinable(push, true); struct fiber *pop = fiber_new_xc("pop_f", pop_f); fiber_set_joinable(pop, true); struct fiber_channel *channel = fiber_channel_new(1); fiber_start(push, channel); fiber_start(pop, channel); fiber_join(push); fiber_join(pop); fiber_channel_delete(channel); ev_break(loop(), EVBREAK_ALL); footer(); return 0; } int main() { memory_init(); fiber_init(fiber_c_invoke); struct fiber *main= fiber_new_xc("main", main_f); fiber_wakeup(main); ev_run(loop(), 0); fiber_free(); memory_free(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/vclock.cc0000664000000000000000000002555413306560010017741 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ extern "C" { #include "unit.h" } /* extern "C" */ #include #include "box/vclock.h" #define str2(x) #x #define str(x) str2(x) #define arg(...) __VA_ARGS__ static inline int test_compare_one(uint32_t a_count, const int64_t *lsns_a, uint32_t b_count, const int64_t *lsns_b) { struct vclock a; struct vclock b; vclock_create(&a); vclock_create(&b); for (uint32_t node_id = 0; node_id < a_count; node_id++) { if (lsns_a[node_id] > 0) vclock_follow(&a, node_id, lsns_a[node_id]); } for (uint32_t node_id = 0; node_id < b_count; node_id++) { if (lsns_b[node_id] > 0) vclock_follow(&b, node_id, lsns_b[node_id]); } return vclock_compare(&a, &b); } #define test2(xa, xb, res) ({\ const int64_t a[] = {xa}, b[] = {xb}; \ is(test_compare_one(sizeof(a) / sizeof(*a), a, sizeof(b) / sizeof(*b), b), res, \ "compare %s, %s => %d", str((xa)), str((xb)), res); }) #define test(a, b, res) ({ test2(arg(a), arg(b), res); \ test2(arg(b), arg(a), res != VCLOCK_ORDER_UNDEFINED ? -res : res); }) int test_compare() { plan(40); header(); test(arg(), arg(), 0); test(arg(), arg(10), -1); test(arg(0), arg(0), 0); test(arg(1), arg(1), 0); test(arg(1), arg(2), -1); test(arg(), arg(10, 1, 0), -1); test(arg(5), arg(10, 1, 0), -1); test(arg(10), arg(10, 1, 0), -1); test(arg(15), arg(10, 1, 0), VCLOCK_ORDER_UNDEFINED); test(arg(10, 1, 0), arg(10, 1, 1), -1); test(arg(10, 1, 0), arg(10, 2, 0), -1); test(arg(10, 1, 0), arg(10, 1, 0), 0); test(arg(10, 0, 1), arg(10, 1, 0), VCLOCK_ORDER_UNDEFINED); test(arg(10, 2, 1), arg(10, 1, 2), VCLOCK_ORDER_UNDEFINED); test(arg(10, 0, 1), arg(11, 0, 0), VCLOCK_ORDER_UNDEFINED); test(arg(10, 0, 5), arg(5, 0, 10), VCLOCK_ORDER_UNDEFINED); test(arg(10, 10, 10), arg(10, 10, 10), 0); test(arg(10, 10, 10), arg(10, 10, 10, 1), -1); test(arg(10, 10, 10), arg(10, 10, 10, 1, 2, 3), -1); test(arg(0, 0, 0), arg(10, 0, 0, 0, 0), -1); footer(); return check_plan(); } #undef test #undef test2 static void testset_create(vclockset_t *set, int64_t *files, int files_n, int node_n) { vclockset_new(set); for (int f = 0; f < files_n; f++) { struct vclock *vclock = (struct vclock *) malloc(sizeof(*vclock)); vclock_create(vclock); int64_t signature = 0; for (int32_t node_id = 0; node_id < node_n; node_id++) { int64_t lsn = *(files + f * node_n + node_id); if (lsn <= 0) continue; /* Calculate LSNSUM */ signature += lsn; /* Update cluster hash */ vclock_follow(vclock, node_id, lsn); } vclockset_insert(set, vclock); } } static void testset_destroy(vclockset_t *set) { struct vclock *cur = vclockset_first(set); while (cur != NULL) { struct vclock *next = vclockset_next(set, cur); vclockset_remove(set, cur); free(cur); cur = next; } } static int test_isearch() { plan(36); header(); enum { NODE_N = 4}; int64_t files[][NODE_N] = { { 10, 0, 0, 0}, /* =10.xlog */ { 12, 2, 0, 0}, /* =14.xlog */ { 14, 2, 0, 0}, /* =16.xlog */ { 14, 2, 2, 0}, /* =18.xlog */ { 14, 4, 2, 3}, /* =23.xlog */ { 14, 4, 2, 5}, /* =25.xlog */ }; enum { FILE_N = sizeof(files) / (sizeof(files[0])) }; int64_t queries[][NODE_N + 1] = { /* not found (lsns are too old) */ { 0, 0, 0, 0, /* => */ 10}, { 1, 0, 0, 0, /* => */ 10}, { 5, 0, 0, 0, /* => */ 10}, /* =10.xlog (left bound) */ { 10, 0, 0, 0, /* => */ 10}, { 10, 1, 0, 0, /* => */ 10}, { 10, 2, 0, 0, /* => */ 10}, { 10, 3, 0, 0, /* => */ 10}, { 10, 4, 0, 0, /* => */ 10}, /* =10.xlog (middle) */ { 11, 0, 0, 0, /* => */ 10}, { 11, 1, 0, 0, /* => */ 10}, { 11, 2, 0, 0, /* => */ 10}, { 11, 3, 0, 0, /* => */ 10}, { 11, 4, 0, 0, /* => */ 10}, { 11, 5, 3, 6, /* => */ 10}, /* =10.xlog (right bound) */ { 12, 0, 0, 0, /* => */ 10}, { 12, 1, 0, 0, /* => */ 10}, { 12, 1, 1, 1, /* => */ 10}, { 12, 1, 2, 5, /* => */ 10}, /* =14.xlog */ { 12, 2, 0, 0, /* => */ 14}, { 12, 3, 0, 0, /* => */ 14}, { 12, 4, 0, 0, /* => */ 14}, { 12, 5, 3, 6, /* => */ 14}, /* =16.xlog */ { 14, 2, 0, 0, /* => */ 16}, { 14, 2, 1, 0, /* => */ 16}, { 14, 2, 0, 1, /* => */ 16}, /* =18.xlog */ { 14, 2, 2, 0, /* => */ 18}, { 14, 2, 4, 0, /* => */ 18}, { 14, 2, 4, 3, /* => */ 18}, { 14, 2, 4, 5, /* => */ 18}, { 14, 4, 2, 0, /* => */ 18}, { 14, 5, 2, 0, /* => */ 18}, /* =23.xlog */ { 14, 4, 2, 3, /* => */ 23}, { 14, 5, 2, 3, /* => */ 23}, /* =25.xlog */ { 14, 4, 2, 5, /* => */ 25}, { 14, 5, 2, 6, /* => */ 25}, { 100, 9, 9, 9, /* => */ 25}, }; enum { QUERY_N = sizeof(queries) / (sizeof(queries[0])) }; vclockset_t set; testset_create(&set, (int64_t *) files, FILE_N, NODE_N); for (int q = 0; q < QUERY_N; q++) { struct vclock vclock; vclock_create(&vclock); int64_t *query = (int64_t *) queries + q * (NODE_N + 1); /* Update cluster hash */ for (uint32_t node_id = 0; node_id < NODE_N; node_id++) { int64_t lsn = *(query + node_id); if (lsn <= 0) continue; vclock_follow(&vclock, node_id, lsn); } int64_t check = *(query + NODE_N); struct vclock *res = vclockset_match(&set, &vclock); int64_t value = res != NULL ? vclock_sum(res) : INT64_MAX; is(value, check, "query #%d", q + 1); } testset_destroy(&set); footer(); return check_plan(); } static inline int test_tostring_one(uint32_t count, const int64_t *lsns, const char *res) { struct vclock vclock; vclock_create(&vclock); for (uint32_t node_id = 0; node_id < count; node_id++) { if (lsns[node_id] > 0) vclock_follow(&vclock, node_id, lsns[node_id]); } char *str = vclock_to_string(&vclock); int result = strcmp(str, res); if (result) diag("\n!!!new result!!! %s\n", str); free(str); return !result; } #define test(xa, res) ({\ const int64_t a[] = {xa}; \ ok(test_tostring_one(sizeof(a) / sizeof(*a), a, res), \ "tostring %s => %s", str((xa)), res); }) int test_tostring() { plan(8); header(); test(arg(), "{}"); test(arg(-1, -1, -1), "{}"); test(arg(1), "{0: 1}"); test(arg(1, 2), "{0: 1, 1: 2}"); test(arg(10, 15, 20), "{0: 10, 1: 15, 2: 20}"); test(arg(10, -1, 15, -1, 20), "{0: 10, 2: 15, 4: 20}"); test(arg(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), "{1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, " "9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15}"); test(arg(9223372036854775000, 9223372036854775001, 9223372036854775002, 9223372036854775003, 9223372036854775004, 9223372036854775005, 9223372036854775006, 9223372036854775007, 9223372036854775008, 9223372036854775009, 9223372036854775010, 9223372036854775011, 9223372036854775012, 9223372036854775013, 9223372036854775014, 9223372036854775015), "{0: 9223372036854775000, 1: 9223372036854775001, " "2: 9223372036854775002, 3: 9223372036854775003, " "4: 9223372036854775004, 5: 9223372036854775005, " "6: 9223372036854775006, 7: 9223372036854775007, " "8: 9223372036854775008, 9: 9223372036854775009, " "10: 9223372036854775010, 11: 9223372036854775011, " "12: 9223372036854775012, 13: 9223372036854775013, " "14: 9223372036854775014, 15: 9223372036854775015}"); footer(); return check_plan(); } #undef test static inline int test_fromstring_one(const char *str, uint32_t count, const int64_t *lsns) { struct vclock vclock; vclock_create(&vclock); size_t rc = vclock_from_string(&vclock, str); struct vclock check; vclock_create(&check); for (uint32_t node_id = 0; node_id < count; node_id++) { if (lsns[node_id] >= 0) check.lsn[node_id] = lsns[node_id]; } return (rc != 0 || vclock_compare(&vclock, &check) != 0); } #define test(s, xa) ({\ const int64_t a[] = {xa}; \ ok(!test_fromstring_one(s, sizeof(a) / sizeof(*a), a), \ "fromstring %s => %s", s, str((xa))); }) int test_fromstring() { plan(12); header(); test("{}", arg()); test(" \t \t { \t \t } \t \t ", arg()); test("{0: 10}", arg(10)); test("{0: 10,}", arg(10)); test("{\t 0\t :\t 10\t ,\t }", arg(10)); test("{0: 10, 1: 15, 3: 20}", arg(10, 15, -1, 20)); test("{2: 20, 0: 10, 4: 30}", arg(10, -1, 20, -1, 30)); test("{4: 30, 2: 20}", arg(-1, -1, 20, -1, 30)); test("{4: 30, 2: 20,}", arg(-1, -1, 20, -1, 30)); test("{0: 4294967295}", arg(4294967295)); test("{0: 4294967296}", arg(4294967296)); test("{0: 9223372036854775807}", arg(9223372036854775807)); footer(); return check_plan(); } #undef test #define test(str, offset) ({ \ struct vclock tmp; \ vclock_create(&tmp); \ is(vclock_from_string(&tmp, str), offset, \ "fromstring \"%s\" => %u", str, offset)}) int test_fromstring_invalid() { plan(32); header(); /* invalid symbols */ test("", 1); test(" ", 2); test("\t \t \t ", 7); test("}", 1); test("1: 10", 1); test("abcde", 1); test("12345", 1); test("\1\2\3\4\5\6", 1); /* truncated */ test("{", 2); test("{1\t ", 5); test("{1:\t ", 6); test("{1:10", 6); test("{1:10\t ", 8); test("{1:10,", 7); test("{1:10,\t \t ", 11); /* comma */ test("{1:10 2:20", 7); test("{1:10,,", 7); test("{1:10, 10,}", 10); /* invalid values */ test("{1:-1}", 4); test("{-1:1}", 2); test("{128:1}", 5); /* node_id > VCLOCK_MAX */ test("{1:abcde}", 4); test("{abcde:1}", 2); test("{1:1.1}", 5); test("{1.1:1}", 3); test("{4294967296:1}", 12); test("{1:9223372036854775808}", 23); test("{1:18446744073709551616}", 24); test("{1:18446744073709551616}", 24); test("{1:340282366920938463463374607431768211456}", 43); /* duplicate */ test("{1:10, 1:20}", 12); test("{1:20, 1:10}", 12); footer(); return check_plan(); } #undef test int main(void) { plan(5); test_compare(); test_isearch(); test_tostring(); test_fromstring(); test_fromstring_invalid(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/rope.result0000664000000000000000000004017713306560010020354 0ustar rootrootinsert offset = 0, str = 'who's gonna be' size = 14 string = 'who's gonna be' └──{ len = 14, height = 1, data = 'who's gonna be'} insert offset = 14, str = '' size = 20 string = 'who's gonna be' │ ┌──nil └──{ len = 14, height = 2, data = 'who's gonna be'} └──{ len = 6, height = 1, data = ''} insert offset = 20, str = ', Mr. Black' size = 41 string = 'who's gonna be, Mr. Black' │ ┌──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 6, height = 2, data = ''} └──{ len = 21, height = 1, data = ', Mr. Black'} insert offset = 41, str = ', but they , Mr. Black, but they , Mr. Black, but they , Mr. Black, but they , Mr. Black, but they , Mr. Black, but they don't know each other' │ ┌──{ len = 30, height = 1, data = 'You got four of '} │ ┌──{ len = 9, height = 2, data = 'guys all '} │ │ └──nil │ ┌──{ len = 19, height = 3, data = 'five fighting over '} │ │ └──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 6, height = 4, data = ''} │ ┌──{ len = 21, height = 1, data = ', Mr. Black'} └──{ len = 27, height = 2, data = ', but they , Mr. Black, but they don't know each other, so nobody wants to back.' │ ┌──{ len = 30, height = 1, data = 'You got four of '} │ ┌──{ len = 9, height = 2, data = 'guys all '} │ │ └──nil │ ┌──{ len = 19, height = 3, data = 'five fighting over '} │ │ └──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 6, height = 4, data = ''} │ ┌──{ len = 21, height = 1, data = ', Mr. Black'} └──{ len = 27, height = 3, data = ', but they , Mr. Black, but they don't know each other, so nobody wants to back down.' │ ┌──{ len = 30, height = 1, data = 'You got four of '} │ ┌──{ len = 9, height = 2, data = 'guys all '} │ │ └──nil │ ┌──{ len = 19, height = 3, data = 'five fighting over '} │ │ └──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 6, height = 4, data = ''} │ ┌──{ len = 21, height = 1, data = ', Mr. Black'} │ ┌──{ len = 27, height = 2, data = ', but they , Mr. Black, but they don't know each other, so nobody wants to back down.' │ ┌──{ len = 30, height = 1, data = 'You got four of '} │ ┌──{ len = 9, height = 2, data = 'guys all '} │ │ └──nil │ ┌──{ len = 19, height = 3, data = 'five fighting over '} │ │ └──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 6, height = 5, data = ''} │ ┌──{ len = 21, height = 1, data = ', Mr. Black'} │ ┌──{ len = 27, height = 2, data = ', but they , Mr. Black, but they don't know each other, so nobody wants to back down.' │ ┌──{ len = 25, height = 1, data = ' got got>You got four of '} │ ┌──{ len = 9, height = 2, data = 'guys all '} │ │ └──nil │ ┌──{ len = 19, height = 3, data = 'five fighting over '} │ │ └──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 6, height = 5, data = ''} │ ┌──{ len = 21, height = 1, data = ', Mr. Black'} │ ┌──{ len = 27, height = 2, data = ', but they , Mr. Black, but they don't know each other, so nobody wants to back down.' │ ┌──{ len = 16, height = 1, data = 'You got four of '} │ ┌──{ len = 9, height = 2, data = 'guys all '} │ │ └──nil │ ┌──{ len = 19, height = 3, data = 'five fighting over '} │ │ └──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 6, height = 5, data = ''} │ ┌──{ len = 21, height = 1, data = ', Mr. Black'} │ ┌──{ len = 27, height = 2, data = ', but they , Mr. Black, but they don't know each other, so nobody wants to back down., Mr. Black, but they don't know each other, so nobody wants to back down.point' │ ┌──{ len = 16, height = 1, data = 'You got four of '} │ ┌──{ len = 9, height = 2, data = 'guys all '} │ │ └──nil │ ┌──{ len = 19, height = 3, data = 'five fighting over '} │ │ └──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 6, height = 5, data = ''} │ ┌──{ len = 21, height = 1, data = ', Mr. Black'} │ ┌──{ len = 27, height = 2, data = ', but they Black, but they don't know each other, so nobody wants to back down.point' │ ┌──{ len = 16, height = 1, data = 'You got four of '} │ ┌──{ len = 9, height = 2, data = 'guys all '} │ │ └──nil │ ┌──{ len = 19, height = 3, data = 'five fighting over '} │ │ └──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 20, height = 4, data = ' Mr. Black'} │ ┌──{ len = 27, height = 1, data = ', but they don't know each other, so nobody wants to back down.point' │ ┌──{ len = 16, height = 1, data = 'You got four of '} │ ┌──{ len = 9, height = 2, data = 'guys all '} │ │ └──nil │ ┌──{ len = 19, height = 3, data = 'five fighting over '} │ │ └──{ len = 14, height = 1, data = 'who's gonna be'} └──{ len = 5, height = 5, data = ' Mr. '} │ ┌──{ len = 5, height = 1, data = 'Black'} │ ┌──{ len = 27, height = 2, data = ', but they int64_t vy_log_next_id(void) { static int64_t id = 0; return id++; } void vy_log_tx_begin(void) {} int vy_log_tx_commit(void) { return 0; } void vy_log_write(const struct vy_log_record *record) {} int vy_recovery_load_index(struct vy_recovery *recovery, uint32_t space_id, uint32_t index_id, int64_t index_lsn, bool snapshot_recovery, vy_recovery_cb cb, void *cb_arg) { unreachable(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/rtree_multidim.cc0000664000000000000000000003275513306560010021506 0ustar rootroot#include #include #include #include #include #include #include #include "unit.h" #include "salad/rtree.h" #include "../../src/lib/salad/rtree.h" #include #include using namespace std; const uint32_t extent_size = 1024 * 16; const coord_t SPACE_LIMIT = 100; const coord_t BOX_LIMIT = 10; const unsigned BOX_POINT_CHANCE_PERCENT = 5; const unsigned NEIGH_COUNT = 5; const unsigned AVERAGE_COUNT = 500; const unsigned TEST_ROUNDS = 1000; static int page_count = 0; static void * extent_alloc(void *ctx) { int *p_page_count = (int *)ctx; assert(p_page_count == &page_count); ++*p_page_count; return malloc(extent_size); } static void extent_free(void *ctx, void *page) { int *p_page_count = (int *)ctx; assert(p_page_count == &page_count); --*p_page_count; free(page); } struct CCoordPair { coord_t a, b; }; coord_t rand(coord_t lim) { return rand() % 1024 * lim / 1024; } template struct CBox { CCoordPair pairs[DIMENSION]; void RandomPoint() { for (unsigned i = 0; i < DIMENSION; i++) { pairs[i].b = pairs[i].a = rand(SPACE_LIMIT); } } void Randomize() { coord_t widths[DIMENSION] = {0}; if (rand() % 100 >= (int)BOX_POINT_CHANCE_PERCENT) for (unsigned i = 0; i < DIMENSION; i++) widths[i] = rand(BOX_LIMIT); for (unsigned i = 0; i < DIMENSION; i++) { pairs[i].a = rand(SPACE_LIMIT - widths[i]); pairs[i].b = pairs[i].a + widths[i]; } } void RandomizeBig() { coord_t widths[DIMENSION] = {0}; if (DIMENSION == 1) for (unsigned i = 0; i < DIMENSION; i++) widths[i] = rand(SPACE_LIMIT / 4); else if (DIMENSION == 2) for (unsigned i = 0; i < DIMENSION; i++) widths[i] = rand(SPACE_LIMIT / 3); else if (DIMENSION == 3) for (unsigned i = 0; i < DIMENSION; i++) widths[i] = rand(SPACE_LIMIT / 2); else for (unsigned i = 0; i < DIMENSION; i++) widths[i] = rand(SPACE_LIMIT); for (unsigned i = 0; i < DIMENSION; i++) { pairs[i].a = rand(SPACE_LIMIT - widths[i]); pairs[i].b = pairs[i].a + widths[i]; } } void FillRTreeRect(struct rtree_rect *rt) { for (unsigned i = 0; i < DIMENSION; i++) { rt->coords[2 * i] = pairs[i].a; rt->coords[2 * i + 1] = pairs[i].b; } } bool operator== (const struct rtree_rect *rt) const { for (unsigned i = 0; i < DIMENSION; i++) { if (rt->coords[2 * i] != pairs[i].a || rt->coords[2 * i + 1] != pairs[i].b) return false; } return true; } bool In(const CBox &another) const { for (unsigned i = 0; i < DIMENSION; i++) { if (pairs[i].a < another.pairs[i].a || pairs[i].b > another.pairs[i].b) return false; } return true; } bool InStrictly(const CBox &another) const { for (unsigned i = 0; i < DIMENSION; i++) { if (pairs[i].a <= another.pairs[i].a || pairs[i].b >= another.pairs[i].b) return false; } return true; } coord_t Distance2(const CBox &point) const { coord_t res = 0; for (unsigned i = 0; i < DIMENSION; i++) { if (point.pairs[i].a < pairs[i].a) { coord_t d = pairs[i].a - point.pairs[i].a; res += d * d; } else if (point.pairs[i].a > pairs[i].b) { coord_t d = point.pairs[i].a - pairs[i].b; res += d * d; } } return res; } coord_t DistanceMan(const CBox &point) const { coord_t res = 0; for (unsigned i = 0; i < DIMENSION; i++) { if (point.pairs[i].a < pairs[i].a) { coord_t d = pairs[i].a - point.pairs[i].a; res += d; } else if (point.pairs[i].a > pairs[i].b) { coord_t d = point.pairs[i].a - pairs[i].b; res += d; } } return res; } }; template struct CBoxSetEntry { CBox box; size_t id; size_t next; bool used; bool operator<(const CBoxSetEntry &a) const { return id < a.id; } }; template struct CBoxSet { vector > entries; size_t boxCount; size_t free; CBoxSet() : boxCount(0), free(SIZE_MAX) {} size_t getNewID() { size_t res; if (free != SIZE_MAX) { res = free; free = entries[free].next; } else { res = entries.size(); entries.resize(res + 1); } return res; } size_t AddBox(const CBox &box) { size_t id = getNewID(); entries[id].box = box; entries[id].id = id; entries[id].next = SIZE_MAX; entries[id].used = true; boxCount++; return id; } size_t RandUsedID() const { assert(boxCount); size_t res = rand() % entries.size(); while (!entries[res].used) if (++res >= entries.size()) res = 0; return res; } void DeleteBox(size_t id) { entries[id].used = false; entries[id].next = free; free = id; boxCount--; } void SelectIn(const CBox &box, vector > &result) const { result.clear(); for (size_t i = 0; i < entries.size(); i++) if (entries[i].used && entries[i].box.In(box)) result.push_back(entries[i]); } void SelectInStrictly(const CBox &box, vector > &result) const { result.clear(); for (size_t i = 0; i < entries.size(); i++) if (entries[i].used && entries[i].box.InStrictly(box)) result.push_back(entries[i]); } void SelectNeigh(const CBox &point, vector > &result) const; void SelectNeighMan(const CBox &point, vector > &result) const; }; template struct CEntryByDistance { const CBox &point; CEntryByDistance(const CBox &point_) : point(point_) {} bool operator()(const CBoxSetEntry &a, const CBoxSetEntry &b) const { coord_t da = a.box.Distance2(point); coord_t db = b.box.Distance2(point); return da < db ? true : da > db ? false : a.id < b.id; } }; template struct CEntryByDistanceMan { const CBox &point; CEntryByDistanceMan(const CBox &point_) : point(point_) {} bool operator()(const CBoxSetEntry &a, const CBoxSetEntry &b) const { coord_t da = a.box.DistanceMan(point); coord_t db = b.box.DistanceMan(point); return da < db ? true : da > db ? false : a.id < b.id; } }; template void CBoxSet::SelectNeigh(const CBox &point, vector > &result) const { result.clear(); CEntryByDistance comp(point); set, CEntryByDistance > set(comp); size_t i = 0; for (; i < entries.size() && set.size() < NEIGH_COUNT; i++) { if (!entries[i].used) continue; set.insert(entries[i]); } if (set.empty()) return; coord_t max_d = set.rbegin()->box.Distance2(point); for (; i < entries.size(); i++) { if (!entries[i].used) continue; coord_t d = entries[i].box.Distance2(point); if (d < max_d) { auto itr = set.end(); --itr; set.erase(itr); set.insert(entries[i]); max_d = set.rbegin()->box.Distance2(point); } } for (auto itr : set) result.push_back(itr); } template void CBoxSet::SelectNeighMan(const CBox &point, vector > &result) const { result.clear(); CEntryByDistanceMan comp(point); set, CEntryByDistanceMan > set(comp); size_t i = 0; for (; i < entries.size() && set.size() < NEIGH_COUNT; i++) { if (!entries[i].used) continue; set.insert(entries[i]); } if (set.empty()) return; coord_t max_d = set.rbegin()->box.DistanceMan(point); for (; i < entries.size(); i++) { if (!entries[i].used) continue; coord_t d = entries[i].box.DistanceMan(point); if (d < max_d) { auto itr = set.end(); --itr; set.erase(itr); set.insert(entries[i]); max_d = set.rbegin()->box.DistanceMan(point); } } for (auto itr : set) result.push_back(itr); } template static void test_select_neigh(const CBoxSet &set, const struct rtree *tree) { CBox box; box.RandomizeBig(); vector > res1; set.SelectNeigh(box, res1); struct rtree_rect rt; box.FillRTreeRect(&rt); struct rtree_iterator iterator; rtree_iterator_init(&iterator); vector > res2; if (rtree_search(tree, &rt, SOP_NEIGHBOR, &iterator)) { void *record; while((record = rtree_iterator_next(&iterator))) { CBoxSetEntry entry; entry.id = ((unsigned)(uintptr_t)record) - 1; entry.box = set.entries[entry.id].box; res2.push_back(entry); if (res2.size() == NEIGH_COUNT) break; } } if (res1.size() != res2.size()) { printf("%s result size differ %d %d\n", __func__, (int)res1.size(), (int)res2.size()); } else { for (size_t i = 0; i < res1.size(); i++) if (res1[i].id != res2[i].id && res1[i].box.Distance2(box) != res2[i].box.Distance2(box)) printf("%s result differ!\n", __func__); } rtree_iterator_destroy(&iterator); } template static void test_select_neigh_man(const CBoxSet &set, struct rtree *tree) { CBox box; box.RandomizeBig(); vector > res1; set.SelectNeighMan(box, res1); struct rtree_rect rt; box.FillRTreeRect(&rt); struct rtree_iterator iterator; rtree_iterator_init(&iterator); vector > res2; tree->distance_type = RTREE_MANHATTAN; /* dirty hack */ if (rtree_search(tree, &rt, SOP_NEIGHBOR, &iterator)) { void *record; while((record = rtree_iterator_next(&iterator))) { CBoxSetEntry entry; entry.id = ((unsigned)(uintptr_t)record) - 1; entry.box = set.entries[entry.id].box; res2.push_back(entry); if (res2.size() == NEIGH_COUNT) break; } } if (res1.size() != res2.size()) { printf("%s result size differ %d %d\n", __func__, (int)res1.size(), (int)res2.size()); } else { for (size_t i = 0; i < res1.size(); i++) if (res1[i].id != res2[i].id && res1[i].box.DistanceMan(box) != res2[i].box.DistanceMan(box)) printf("%s result differ!\n", __func__); } tree->distance_type = RTREE_EUCLID; /* dirty hack */ rtree_iterator_destroy(&iterator); } template static void test_select_in(const CBoxSet &set, const struct rtree *tree) { CBox box; box.RandomizeBig(); vector > res1; set.SelectIn(box, res1); struct rtree_rect rt; box.FillRTreeRect(&rt); struct rtree_iterator iterator; rtree_iterator_init(&iterator); vector > res2; if (rtree_search(tree, &rt, SOP_BELONGS, &iterator)) { void *record; while((record = rtree_iterator_next(&iterator))) { CBoxSetEntry entry; entry.id = ((unsigned)(uintptr_t)record) - 1; entry.box = set.entries[entry.id].box; res2.push_back(entry); } } sort(res1.begin(), res1.end()); sort(res2.begin(), res2.end()); if (res1.size() != res2.size()) { printf("%s result size differ %d %d\n", __func__, (int)res1.size(), (int)res2.size()); } else { for (size_t i = 0; i < res1.size(); i++) if (res1[i].id != res2[i].id) printf("%s result differ!\n", __func__); } rtree_iterator_destroy(&iterator); } template static void test_select_strict_in(const CBoxSet &set, const struct rtree *tree) { CBox box; box.RandomizeBig(); vector > res1; set.SelectInStrictly(box, res1); struct rtree_rect rt; box.FillRTreeRect(&rt); struct rtree_iterator iterator; rtree_iterator_init(&iterator); vector > res2; if (rtree_search(tree, &rt, SOP_STRICT_BELONGS, &iterator)) { void *record; while((record = rtree_iterator_next(&iterator))) { CBoxSetEntry entry; entry.id = ((unsigned)(uintptr_t)record) - 1; entry.box = set.entries[entry.id].box; res2.push_back(entry); } } sort(res1.begin(), res1.end()); sort(res2.begin(), res2.end()); if (res1.size() != res2.size()) { printf("%s result size differ %d %d\n", __func__, (int)res1.size(), (int)res2.size()); } else { for (size_t i = 0; i < res1.size(); i++) if (res1[i].id != res2[i].id) printf("%s result differ!\n", __func__); } rtree_iterator_destroy(&iterator); } template static void rand_test() { header(); CBoxSet set; struct rtree tree; rtree_init(&tree, DIMENSION, extent_size, extent_alloc, extent_free, &page_count, RTREE_EUCLID); printf("\tDIMENSION: %u, page size: %u, max fill good: %d\n", DIMENSION, tree.page_size, tree.page_max_fill >= 10); for (unsigned i = 0; i < TEST_ROUNDS; i++) { bool insert; if (set.boxCount == 0) { insert = true; } else if (set.boxCount == AVERAGE_COUNT) { insert = false; } else { insert = rand() % (AVERAGE_COUNT * 2) > set.boxCount; } if (insert) { CBox box; box.Randomize(); size_t id = set.AddBox(box); struct rtree_rect rt; box.FillRTreeRect(&rt); rtree_insert(&tree, &rt, (void *)(id + 1)); } else { size_t id = set.RandUsedID(); struct rtree_rect rt; set.entries[id].box.FillRTreeRect(&rt); if (!rtree_remove(&tree, &rt, (void *)(id + 1))) { printf("Error in remove\n"); } set.DeleteBox(id); } assert(set.boxCount == tree.n_records); test_select_neigh(set, &tree); test_select_neigh_man(set, &tree); test_select_in(set, &tree); test_select_strict_in(set, &tree); } rtree_destroy(&tree); footer(); } int main(void) { srand(time(0)); rand_test<1>(); rand_test<2>(); rand_test<3>(); rand_test<8>(); rand_test<16>(); if (page_count != 0) { fail("memory leak!", "true"); } } tarantool_1.9.1.26.g63eb81e3c/test/unit/mhash_bytemap.result0000664000000000000000000000021613306560010022216 0ustar rootroot *** mhash_int32_id_test *** *** mhash_int32_id_test: done *** *** mhash_int32_collision_test *** *** mhash_int32_collision_test: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/rlist.c0000664000000000000000000000654713306560010017453 0ustar rootroot#include "small/rlist.h" #include #include #include "unit.h" #define PLAN 87 #define ITEMS 7 struct test { char ch; int no; struct rlist list; }; static struct test items[ITEMS]; static RLIST_HEAD(head); static RLIST_HEAD(head2); int main(void) { int i; struct test *it; struct rlist *rlist; plan(PLAN); ok(rlist_empty(&head), "list is empty"); for (i = 0; i < ITEMS; i++) { items[i].no = i; rlist_add_tail(&head, &(items[i].list)); } RLIST_HEAD(empty_list); ok(rlist_empty(&empty_list), "rlist_nil is empty"); ok(rlist_empty(&head2), "head2 is empty"); rlist_swap(&head2, &empty_list); ok(rlist_empty(&empty_list), "rlist_nil is empty after swap"); ok(rlist_empty(&head2), "head2 is empty after swap"); rlist_swap(&head, &head2); ok(rlist_empty(&head), "head is empty after swap"); is(rlist_first(&head2), &items[0].list, "first item"); is(rlist_last(&head2), &items[ITEMS - 1].list, "last item"); i = 0; rlist_foreach(rlist, &head2) { is(rlist, &items[i].list, "element (foreach) %d", i); i++; } rlist_foreach_reverse(rlist, &head2) { i--; is(rlist, &items[i].list, "element (foreach_reverse) %d", i); } rlist_swap(&head2, &head); is(rlist_first(&head), &items[0].list, "first item"); isnt(rlist_first(&head), &items[ITEMS - 1].list, "first item"); is(rlist_last(&head), &items[ITEMS - 1].list, "last item"); isnt(rlist_last(&head), &items[0].list, "last item"); is(rlist_next(&head), &items[0].list, "rlist_next"); is(rlist_prev(&head), &items[ITEMS - 1].list, "rlist_prev"); i = 0; rlist_foreach(rlist, &head) { is(rlist, &items[i].list, "element (foreach) %d", i); i++; } rlist_foreach_reverse(rlist, &head) { i--; is(rlist, &items[i].list, "element (foreach_reverse) %d", i); } is(rlist_entry(&items[0].list, struct test, list), &items[0], "rlist_entry"); is(rlist_first_entry(&head, struct test, list), &items[0], "rlist_first_entry"); is(rlist_next_entry(&items[0], list), &items[1], "rlist_next_entry"); is(rlist_prev_entry(&items[2], list), &items[1], "rlist_prev_entry"); i = 0; rlist_foreach_entry(it, &head, list) { is(it, items + i, "element (foreach_entry) %d", i); i++; } rlist_foreach_entry_reverse(it, &head, list) { i--; is(it, items + i, "element (foreach_entry_reverse) %d", i); } rlist_del(&items[2].list); ok(rlist_empty(&head2), "head2 is empty"); rlist_move(&head2, &items[3].list); ok(!rlist_empty(&head2), "head2 isnt empty"); is(rlist_first_entry(&head2, struct test, list), &items[3], "Item was moved"); rlist_move_tail(&head2, &items[4].list); rlist_foreach_entry(it, &head, list) { is(it, items + i, "element (second deleted) %d", i); i++; if (i == 2) i += 3; } rlist_foreach_entry_reverse(it, &head, list) { i--; if (i == 4) i -= 3; is(it, items + i, "element (second deleted) %d", i); } rlist_create(&head); ok(rlist_empty(&head), "list is empty"); for (i = 0; i < ITEMS; i++) { items[i].no = i; rlist_add(&head, &(items[i].list)); } i = 0; rlist_foreach_entry_reverse(it, &head, list) { is(it, items + i, "element (foreach_entry_reverse) %d", i); i++; } rlist_foreach_entry(it, &head, list) { i--; is(it, items + i, "element (foreach_entry) %d", i); } rlist_create(&head); rlist_add_entry(&head, &items[0], list); ok(rlist_prev_entry_safe(&items[0], &head, list) == NULL, "prev is null"); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/rope_common.h0000664000000000000000000000220613306560010020624 0ustar rootroot#ifndef INCLUDES_TARANTOOL_TEST_UNIT_ROPE_COMMON_H #define INCLUDES_TARANTOOL_TEST_UNIT_ROPE_COMMON_H #include #include #include static inline void * str_getn(void *ctx, void *data, size_t size, size_t offset) { (void) ctx; return (char *) data + offset; } static inline void str_print(void *data, size_t n) { printf("%.*s", (int) n, (char *) data); } static inline void * mem_alloc(void *data, size_t size) { (void) data; return malloc(size); } static inline void mem_free(void *data, void *ptr) { (void) data; free(ptr); } static inline struct rope * test_rope_new() { return rope_new(str_getn, NULL, mem_alloc, mem_free, NULL); } static inline void test_rope_insert(struct rope *rope, rope_size_t offset, char *str) { printf("insert offset = %zu, str = '%s'\n", (size_t) offset, str); rope_insert(rope, offset, str, strlen(str)); rope_pretty_print(rope, str_print); rope_check(rope); } static inline void test_rope_erase(struct rope *rope, rope_size_t offset) { printf("erase offset = %zu\n", (size_t) offset); rope_erase(rope, offset); rope_pretty_print(rope, str_print); rope_check(rope); } #endif tarantool_1.9.1.26.g63eb81e3c/test/unit/mhash_body.c0000664000000000000000000000371113306560010020421 0ustar rootroot#define set(x) ({ \ k = put(x); \ val(k) = (x) << 1; \ }) #define rm(x) ({ \ mh_int_t k = get(x); \ del(k); \ }) #define tst(x) ({ \ mh_int_t k = get((x)); \ fail_unless(k != mh_end(h)); \ fail_unless(val(k) == ((x) << 1)); \ }) #define clr(x) fail_unless(get(x) == mh_end(h)) #define usd(x) fail_unless(get(x) != mh_end(h)) h = init(); destroy(h); h = init(); clear(h); /* access not yet initialized hash */ clr(9); /* set & test some data. there is first resize here */ set(1); set(2); set(3); tst(1); tst(2); tst(3); /* delete non existing entry; note: index must come from get */ set(4); k = get(4); del(k); del(k); del(get(4)); set(4); set(5); set(6); set(7); set(8); set(9); /* there is resize after 8 elems. verify they are inplace */ tst(4); tst(5); tst(6); tst(7); tst(8); tst(9); clear(h); /* after clear no items should exist */ clr(1); clr(2); clr(3); clr(4); clr(5); clr(6); clr(7); clr(8); clr(9); clr(10); clr(11); /* set after del */ set(1); rm(1); set(1); destroy(h); h = init(); set(0); set(1); set(2); set(3); set(4); set(5); set(6); set(7); usd(0); rm(0); clr(0); usd(1); rm(1); clr(1); usd(2); rm(2); clr(2); usd(3); rm(3); clr(3); usd(4); rm(4); clr(4); usd(5); rm(5); clr(5); usd(6); rm(6); clr(6); usd(7); rm(7); clr(7); set(8); set(9); set(10); tst(8); tst(9); tst(10); set(1); set(1); tst(1); rm(1); rm(1); clr(1); /* verify overflow of hash index over hash table */ int i; for (i = 0 ; i < 20; i++) { set(i); } for (i = 0 ; i < 20; i++) { tst(i); } destroy(h); h = init(); set(0); set(1); set(2); set(3); set(4); set(5); set(6); set(7); rm(0); rm(1); rm(2); rm(3); rm(4); destroy(h); /* verify reuse of deleted elements */ h = init(); set(1); int k1 = get(1); rm(1); set(1); int k2 = get(1); fail_unless(k1 == k2); destroy(h); #undef set #undef rm #undef tst #undef clr #undef usd #undef init #undef clear #undef destroy #undef get #undef put #undef del #undef key #undef val tarantool_1.9.1.26.g63eb81e3c/test/unit/light.result0000664000000000000000000000033613306560010020507 0ustar rootroot *** simple_test *** *** simple_test: done *** *** collision_test *** *** collision_test: done *** *** iterator_test *** *** iterator_test: done *** *** iterator_freeze_check *** *** iterator_freeze_check: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/unit.c0000664000000000000000000000234313306560010017263 0ustar rootroot#include "unit.h" #include #include enum { MAX_LEVELS = 10 }; static int tests_done[MAX_LEVELS]; static int tests_failed[MAX_LEVELS]; static int plan_test[MAX_LEVELS]; static int level = -1; void _space(FILE *stream) { for (int i = 0 ; i < level; i++) { fprintf(stream, " "); } } void plan(int count) { ++level; plan_test[level] = count; tests_done[level] = 0; tests_failed[level] = 0; _space(stdout); printf("%d..%d\n", 1, plan_test[level]); } int check_plan(void) { int r = 0; if (tests_done[level] != plan_test[level]) { _space(stderr); fprintf(stderr, "# Looks like you planned %d tests but ran %d.\n", plan_test[level], tests_done[level]); r = -1; } if (tests_failed[level]) { _space(stderr); fprintf(stderr, "# Looks like you failed %d test of %d run.\n", tests_failed[level], tests_done[level]); r = tests_failed[level]; } --level; if (level >= 0) { is(r, 0, "subtests"); } return r; } int _ok(int condition, const char *fmt, ...) { va_list ap; _space(stdout); printf("%s %d - ", condition ? "ok" : "not ok", ++tests_done[level]); if (!condition) tests_failed[level]++; va_start(ap, fmt); vprintf(fmt, ap); printf("\n"); va_end(ap); return condition; } tarantool_1.9.1.26.g63eb81e3c/test/unit/guava.result0000664000000000000000000000026013306560010020477 0ustar rootroot *** correctness_check *** *** correctness_check: done *** *** lcg_compat_check *** *** lcg_compat_check: done *** *** sameresult_check *** *** sameresult_check: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/xrow.cc0000664000000000000000000002273213306560010017452 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and iproto forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in iproto form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ extern "C" { #include "unit.h" } /* extern "C" */ #include "trivia/util.h" #include "box/xrow.h" #include "box/iproto_constants.h" #include "tt_uuid.h" #include "version.h" #include "random.h" #include "memory.h" #include "fiber.h" int test_iproto_constants() { /* * Check that there are no gaps in the iproto_key_strs * array. In a case of a gap the iproto_key_strs will be * accessed by an index out of the range * [0, IPROTO_KEY_MAX). */ for (int i = 0; i < IPROTO_KEY_MAX; ++i) (void) iproto_key_name((enum iproto_key) i); /* Same for iproto_type. */ for (uint32_t i = 0; i < IPROTO_TYPE_STAT_MAX; ++i) (void) iproto_type_name(i); return 0; } int test_greeting() { plan(40); char greetingbuf[IPROTO_GREETING_SIZE + 1]; struct greeting source, greeting; /* * Round-trip */ memset(&source, 0, sizeof(source)); tt_uuid_create(&source.uuid); source.version_id = version_id(2 + rand() % 98, rand() % 100, 9); strcpy(source.protocol, "Binary"); source.salt_len = 20 + rand() % 23; random_bytes(source.salt, source.salt_len); greeting_encode(greetingbuf, source.version_id, &source.uuid, source.salt, source.salt_len); int rc = greeting_decode(greetingbuf, &greeting); is(rc, 0, "round trip"); is(greeting.version_id, source.version_id, "roundtrip.version_id"); ok(strcmp(greeting.protocol, source.protocol) == 0, "roundtrip.protocol"); ok(tt_uuid_is_equal(&greeting.uuid, &source.uuid), "roundtrip.uuid"); is(greeting.salt_len, source.salt_len, "roundtrip.salt_len"); is(memcmp(greeting.salt, source.salt, greeting.salt_len), 0, "roundtrip.salt"); /* * Iproto greeting */ const char *greetingbuf_iproto = "Tarantool 1.6.7 (Binary) 7170b4af-c72f-4f07-8729-08fc678543a1 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n"; uint8_t iproto_salt[] = { 0x16, 0x7e, 0x63, 0x31, 0x62, 0x93, 0xbf, 0x2f, 0xd7, 0xcf, 0x8c, 0xff, 0x03, 0x60, 0x98, 0x73, 0x1b, 0x01, 0x42, 0x14, 0x13, 0x2f, 0x46, 0x27, 0x77, 0xcc, 0x32, 0xab, 0x2d, 0x21, 0x66, 0xbb }; rc = greeting_decode(greetingbuf_iproto, &greeting); is(rc, 0, "decode iproto"); is(greeting.version_id, version_id(1, 6, 7), "iproto.version_id"); ok(strcmp(greeting.protocol, "Binary") == 0, "iproto.protocol"); ok(strcmp(tt_uuid_str(&greeting.uuid), "7170b4af-c72f-4f07-8729-08fc678543a1") == 0, "iproto.uuid"); is(greeting.salt_len, sizeof(iproto_salt), "iproto.salt_len"); is(memcmp(greeting.salt, iproto_salt, greeting.salt_len), 0, "iproto.salt"); /* * Lua greeting */ const char *greetingbuf_lua = "Tarantool 1.6.7 (Lua console) \n" "type 'help' for interactive help \n"; rc = greeting_decode(greetingbuf_lua, &greeting); is(rc, 0, "decode lua"); is(greeting.version_id, version_id(1, 6, 7), "lua.version_id"); ok(strcmp(greeting.protocol, "Lua console") == 0, "lua.protocol"); ok(tt_uuid_is_nil(&greeting.uuid), "lua.uuid"); is(greeting.salt_len, 0, "lua.salt_len"); /* * Iproto greeting < 1.6.6 */ const char *greetingbuf_iproto_166 = "Tarantool 1.6.6-201-g2495838 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n"; rc = greeting_decode(greetingbuf_iproto_166, &greeting); is(rc, 0, "decode iproto166"); is(greeting.version_id, version_id(1, 6, 6), "iproto166.version_id"); ok(strcmp(greeting.protocol, "Binary") == 0, "iproto166.protocol"); ok(tt_uuid_is_nil(&greeting.uuid), "iproto166.uuid"); is(greeting.salt_len, sizeof(iproto_salt), "iproto166.salt_len"); is(memcmp(greeting.salt, iproto_salt, greeting.salt_len), 0, "iproto166.salt"); /* * Lua greeting < 1.6.6 */ const char *greetingbuf_lua_166 = "Tarantool 1.6.6-201-g2495838 (Lua console) \n" "type 'help' for interactive help \n"; rc = greeting_decode(greetingbuf_lua_166, &greeting); is(rc, 0, "decode lua166"); is(greeting.version_id, version_id(1, 6, 6), "lua166.version_id"); ok(strcmp(greeting.protocol, "Lua console") == 0, "lua166.protocol"); ok(tt_uuid_is_nil(&greeting.uuid), "lua166.uuid"); is(greeting.salt_len, 0, "lua166.salt_len"); /* * Invalid */ const char *invalid[] = { "Tarantool 1.6.7 (Binary) \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", "Tarantool1.6.7 (Binary) 7170b4af-c72f-4f07-8729-08fc678543a1 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", "Tarantool 1.6.7(Binary) 7170b4af-c72f-4f07-8729-08fc678543a1 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", "Tarantool 1.6.7 (Binary)7170b4af-c72f-4f07-8729-08fc678543a1 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", "Tarantool 1.6.7 (Binary) 7170b4af-c72f-4f07-8729-08fc678543a1 " "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= ", "Tarantool 1.6.7 (Binary) 7170b4af-c72f-4f07-8729-08fc678543 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", "Tarantool 1.6.7 (Binary) 7170b4af-c72f-4f07-8729-08fc678543a1 \n" "Fn5jMWKTvy/Xz4z \n", "Tarantool 1.6.7 (Binary) \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", "Tarantool 1.6.7 (Binary 7170b4af-c72f-4f07-8729-08fc678543a1 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", "Tarantool 1.6.7 Binary 7170b4af-c72f-4f07-8729-08fc678543a1 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", "Apache 2.4.6 (Binary) 7170b4af-c72f-4f07-8729-08fc678543a1 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", "Tarantool 1.6.7 \n" "Fn5jMWKTvy/Xz4z/A2CYcxsBQhQTL0Ynd8wyqy0hZrs= \n", }; int count = sizeof(invalid) / sizeof(invalid[0]); for (int i = 0; i < count; i++) { rc = greeting_decode(invalid[i], &greeting); isnt(rc, 0, "invalid %d", i); } return check_plan(); } void test_xrow_header_encode_decode() { plan(10); struct xrow_header header; char buffer[2048]; char *pos = mp_encode_uint(buffer, 300); is(xrow_header_decode(&header, (const char **) &pos, buffer + 100), -1, "bad msgpack end"); header.type = 100; header.replica_id = 200; header.lsn = 400; header.tm = 123.456; header.bodycnt = 0; uint64_t sync = 100500; struct iovec vec[1]; is(1, xrow_header_encode(&header, sync, vec, 200), "encode"); int fixheader_len = 200; pos = (char *)vec[0].iov_base + fixheader_len; is(mp_decode_map((const char **)&pos), 5, "header map size"); struct xrow_header decoded_header; const char *begin = (const char *)vec[0].iov_base; begin += fixheader_len; const char *end = (const char *)vec[0].iov_base; end += vec[0].iov_len; is(xrow_header_decode(&decoded_header, &begin, end), 0, "header decode"); is(header.type, decoded_header.type, "decoded type"); is(header.replica_id, decoded_header.replica_id, "decoded replica_id"); is(header.lsn, decoded_header.lsn, "decoded lsn"); is(header.tm, decoded_header.tm, "decoded tm"); is(decoded_header.sync, sync, "decoded sync"); is(decoded_header.bodycnt, 0, "decoded bodycnt"); check_plan(); } void test_request_str() { plan(1); struct xrow_header header; header.lsn = 100; struct request request; request.header = &header; request.type = 1; request.space_id = 512; request.index_id = 1; char buffer[2048]; request.key = buffer; char *pos = mp_encode_array(buffer, 1); pos = mp_encode_uint(pos, 200); request.tuple = pos; pos = mp_encode_array(pos, 1); pos = mp_encode_uint(pos, 300); request.ops = pos; pos = mp_encode_array(pos, 1); pos = mp_encode_uint(pos, 400); is(strcmp("{type: 'SELECT', lsn: 100, space_id: 512, index_id: 1, "\ "key: [200], tuple: [300], ops: [400]}", request_str(&request)), 0, "request_str"); check_plan(); } int main(void) { memory_init(); fiber_init(fiber_c_invoke); plan(3); random_init(); test_iproto_constants(); test_greeting(); test_xrow_header_encode_decode(); test_request_str(); random_free(); fiber_free(); memory_free(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/mhash.c0000664000000000000000000000415713306560010017411 0ustar rootroot#include #include #include "unit.h" #ifndef bytemap #define bytemap 0 #endif #define MH_SOURCE 1 #define mh_name _i32 struct mh_i32_node_t { int32_t key; int32_t val; }; #define mh_node_t struct mh_i32_node_t #define mh_arg_t void * #define mh_hash(a, arg) (a->key) #define mh_cmp(a, b, arg) ((a->key) != (b->key)) #define mh_bytemap bytemap #include "salad/mhash.h" #define mh_name _i32_collision struct mh_i32_collision_node_t { int32_t key; int32_t val; }; #define mh_node_t struct mh_i32_collision_node_t #define mh_arg_t void * #define mh_hash(a, arg) 42 #define mh_cmp(a, b, arg) ((a->key) != (b->key)) #define mh_bytemap bytemap #include "salad/mhash.h" #undef MH_SOURCE static void mhash_int32_id_test() { header(); int k; struct mh_i32_t *h; #define init() ({ mh_i32_new(); }) #define clear(x) ({ mh_i32_clear((x)); }) #define destroy(x) ({ mh_i32_delete((x)); }) #define get(x) ({ \ const struct mh_i32_node_t _node = { .key = (x) }; \ mh_i32_get(h, &_node, NULL); \ }) #define put(x) ({ \ const struct mh_i32_node_t _node = { .key = (x) }; \ mh_i32_put(h, &_node, NULL, NULL); \ }) #define key(k) (mh_i32_node(h, k)->key) #define val(k) (mh_i32_node(h, k)->val) #define del(k) ({ \ mh_i32_del(h, k, NULL); \ }) #include "mhash_body.c" footer(); } static void mhash_int32_collision_test() { header(); int k; struct mh_i32_collision_t *h; #define init() ({ mh_i32_collision_new(); }) #define clear(x) ({ mh_i32_collision_clear((x)); }) #define destroy(x) ({ mh_i32_collision_delete((x)); }) #define get(x) ({ \ const struct mh_i32_collision_node_t _node = { .key = (x) }; \ mh_i32_collision_get(h, &_node, NULL); \ }) #define put(x) ({ \ const struct mh_i32_collision_node_t _node = { .key = (x) }; \ mh_i32_collision_put(h, &_node, NULL, NULL); \ }) #define key(k) (mh_i32_collision_node(h, k)->key) #define val(k) (mh_i32_collision_node(h, k)->val) #define del(k) ({ \ mh_i32_collision_del(h, k, NULL); \ }) #include "mhash_body.c" footer(); } int main(void) { mhash_int32_id_test(); mhash_int32_collision_test(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/CMakeLists.txt0000664000000000000000000001514513306565107020720 0ustar rootrootadd_compile_flags("C;CXX" "-Wno-unused" "-Wno-unused-result") if(CC_HAS_WNO_TAUTOLOGICAL_COMPARE) add_compile_flags("C;CXX" "-Wno-tautological-compare") endif() file(GLOB all_sources *.c *.cc) set_source_files_compile_flags(${all_sources}) include_directories(${PROJECT_SOURCE_DIR}/src) include_directories(${PROJECT_BINARY_DIR}/src) include_directories(${PROJECT_SOURCE_DIR}/src/box) include_directories(${CMAKE_SOURCE_DIR}/third_party) include_directories(${ICU_INCLUDE_DIRS}) add_library(unit STATIC unit.c) add_executable(heap.test heap.c) target_link_libraries(heap.test unit) add_executable(heap_iterator.test heap_iterator.c) target_link_libraries(heap_iterator.test unit) add_executable(rlist.test rlist.c) target_link_libraries(rlist.test unit) add_executable(stailq.test stailq.c) target_link_libraries(stailq.test unit) add_executable(uri.test uri.c unit.c) target_link_libraries(uri.test uri unit) add_executable(queue.test queue.c) add_executable(mhash.test mhash.c) add_executable(mhash_bytemap.test mhash_bytemap.c) add_executable(rope_basic.test rope_basic.c) target_link_libraries(rope_basic.test salad) add_executable(rope_avl.test rope_avl.c) target_link_libraries(rope_avl.test salad) add_executable(rope_stress.test rope_stress.c) target_link_libraries(rope_stress.test salad) add_executable(rope.test rope.c) target_link_libraries(rope.test salad) add_executable(int96.test int96.cc) add_executable(bit.test bit.c bit.c) target_link_libraries(bit.test bit) add_executable(bitset_basic.test bitset_basic.c) target_link_libraries(bitset_basic.test bitset) add_executable(bitset_iterator.test bitset_iterator.c) target_link_libraries(bitset_iterator.test bitset) add_executable(bitset_index.test bitset_index.c) target_link_libraries(bitset_index.test bitset) add_executable(base64.test base64.c) target_link_libraries(base64.test misc unit) add_executable(uuid.test uuid.c) target_link_libraries(uuid.test uuid unit) add_executable(bps_tree.test bps_tree.cc) target_link_libraries(bps_tree.test small misc) add_executable(bps_tree_iterator.test bps_tree_iterator.cc) target_link_libraries(bps_tree_iterator.test small misc) add_executable(rtree.test rtree.cc) target_link_libraries(rtree.test salad small) add_executable(rtree_iterator.test rtree_iterator.cc) target_link_libraries(rtree_iterator.test salad small) add_executable(rtree_multidim.test rtree_multidim.cc) target_link_libraries(rtree_multidim.test salad small) add_executable(light.test light.cc) target_link_libraries(light.test small) add_executable(bloom.test bloom.cc) target_link_libraries(bloom.test salad) add_executable(vclock.test vclock.cc) target_link_libraries(vclock.test vclock unit) add_executable(xrow.test xrow.cc) target_link_libraries(xrow.test xrow unit) add_executable(fiber.test fiber.cc) set_source_files_properties(fiber.cc PROPERTIES COMPILE_FLAGS -O0) target_link_libraries(fiber.test core unit) if (NOT ENABLE_GCOV) # This test is known to be broken with GCOV add_executable(guard.test guard.cc) target_link_libraries(guard.test core unit) endif () add_executable(fiber_stress.test fiber_stress.cc) target_link_libraries(fiber_stress.test core) add_executable(fiber_cond.test fiber_cond.c unit.c) target_link_libraries(fiber_cond.test core) add_executable(fiber_channel.test fiber_channel.cc unit.c) target_link_libraries(fiber_channel.test core) add_executable(fiber_channel_stress.test fiber_channel_stress.cc) target_link_libraries(fiber_channel_stress.test core) add_executable(cbus_stress.test cbus_stress.c) target_link_libraries(cbus_stress.test core stat) add_executable(cbus.test cbus.c) target_link_libraries(cbus.test core unit stat) add_executable(coio.test coio.cc) target_link_libraries(coio.test core eio bit uri unit) if (ENABLE_BUNDLED_MSGPUCK) set(MSGPUCK_DIR ${PROJECT_SOURCE_DIR}/src/lib/msgpuck/) add_executable(msgpack.test ${MSGPUCK_DIR}/test/msgpuck.c ${MSGPUCK_DIR}/test/test.c) set_source_files_properties( ${MSGPUCK_DIR}/test/msgpuck.c ${MSGPUCK_DIR}/test/test.c PROPERTIES COMPILE_FLAGS "-I${MSGPUCK_DIR}/test") target_link_libraries(msgpack.test ${MSGPUCK_LIBRARIES}) endif () add_executable(scramble.test scramble.c) target_link_libraries(scramble.test scramble) add_executable(guava.test guava.c) target_link_libraries(guava.test salad small) add_executable(find_path.test find_path.c ${CMAKE_SOURCE_DIR}/src/find_path.c ) add_executable(reflection_c.test reflection_c.c unit.c ${CMAKE_SOURCE_DIR}/src/reflection.c) add_executable(reflection_cxx.test reflection_cxx.cc unit.c ${CMAKE_SOURCE_DIR}/src/reflection.c) add_executable(csv.test csv.c) target_link_libraries(csv.test csv) add_executable(rmean.test rmean.cc) target_link_libraries(rmean.test stat unit) add_executable(histogram.test histogram.c) target_link_libraries(histogram.test stat unit) add_executable(say.test say.c) target_link_libraries(say.test core unit) set(ITERATOR_TEST_SOURCES vy_iterators_helper.c ${PROJECT_SOURCE_DIR}/src/box/vy_stmt.c ${PROJECT_SOURCE_DIR}/src/box/vy_mem.c ${PROJECT_SOURCE_DIR}/src/box/vy_cache.c) set(ITERATOR_TEST_LIBS core tuple xrow unit) add_executable(vy_mem.test vy_mem.c ${ITERATOR_TEST_SOURCES}) target_link_libraries(vy_mem.test ${ITERATOR_TEST_LIBS}) add_executable(vy_point_lookup.test vy_point_lookup.c vy_iterators_helper.c vy_log_stub.c ${PROJECT_SOURCE_DIR}/src/box/vy_point_lookup.c ${PROJECT_SOURCE_DIR}/src/box/vy_write_iterator.c ${PROJECT_SOURCE_DIR}/src/box/vy_stmt.c ${PROJECT_SOURCE_DIR}/src/box/vy_mem.c ${PROJECT_SOURCE_DIR}/src/box/vy_run.c ${PROJECT_SOURCE_DIR}/src/box/vy_range.c ${PROJECT_SOURCE_DIR}/src/box/vy_tx.c ${PROJECT_SOURCE_DIR}/src/box/vy_read_set.c ${PROJECT_SOURCE_DIR}/src/box/vy_upsert.c ${PROJECT_SOURCE_DIR}/src/box/vy_index.c ${PROJECT_SOURCE_DIR}/src/box/vy_cache.c ${PROJECT_SOURCE_DIR}/src/box/index_def.c ${PROJECT_SOURCE_DIR}/src/box/schema_def.c ${PROJECT_SOURCE_DIR}/src/box/identifier.c ) target_link_libraries(vy_point_lookup.test core tuple xrow xlog unit) add_executable(column_mask.test column_mask.c) target_link_libraries(column_mask.test tuple unit) add_executable(vy_write_iterator.test vy_write_iterator.c ${PROJECT_SOURCE_DIR}/src/box/vy_run.c ${PROJECT_SOURCE_DIR}/src/box/vy_upsert.c ${PROJECT_SOURCE_DIR}/src/box/vy_write_iterator.c ${ITERATOR_TEST_SOURCES} ) target_link_libraries(vy_write_iterator.test xlog ${ITERATOR_TEST_LIBS}) add_executable(vy_cache.test vy_cache.c ${ITERATOR_TEST_SOURCES}) target_link_libraries(vy_cache.test ${ITERATOR_TEST_LIBS}) add_executable(coll.test coll.cpp) target_link_libraries(coll.test box) tarantool_1.9.1.26.g63eb81e3c/test/unit/mhash_bytemap.c0000664000000000000000000000004513306560010021122 0ustar rootroot#define bytemap 1 #include "mhash.c" tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_iterators_helper.c0000664000000000000000000001653013306565107022414 0ustar rootroot#include "vy_iterators_helper.h" #include "memory.h" #include "fiber.h" #include "tt_uuid.h" #include "say.h" struct tt_uuid INSTANCE_UUID; struct tuple_format *vy_key_format = NULL; struct vy_mem_env mem_env; struct vy_cache_env cache_env; void vy_iterator_C_test_init(size_t cache_size) { /* Suppress info messages. */ say_set_log_level(S_WARN); memory_init(); fiber_init(fiber_c_invoke); tuple_init(NULL); vy_cache_env_create(&cache_env, cord_slab_cache()); vy_cache_env_set_quota(&cache_env, cache_size); vy_key_format = tuple_format_new(&vy_tuple_format_vtab, NULL, 0, 0, NULL, 0, NULL); tuple_format_ref(vy_key_format); size_t mem_size = 64 * 1024 * 1024; vy_mem_env_create(&mem_env, mem_size); } void vy_iterator_C_test_finish() { vy_mem_env_destroy(&mem_env); tuple_format_unref(vy_key_format); vy_cache_env_destroy(&cache_env); tuple_free(); fiber_free(); memory_free(); } struct tuple * vy_new_simple_stmt(struct tuple_format *format, struct tuple_format *upsert_format, struct tuple_format *format_with_colmask, const struct vy_stmt_template *templ) { if (templ == NULL) return NULL; /* Calculate binary size. */ int i = 0; size_t size = 0; while (templ->fields[i] != vyend) { fail_if(i > MAX_FIELDS_COUNT); if (templ->fields[i] >= 0) size += mp_sizeof_uint(templ->fields[i]); else size += mp_sizeof_int(templ->fields[i]); ++i; } size += mp_sizeof_array(i); fail_if(templ->optimize_update && templ->type == IPROTO_UPSERT); if (templ->optimize_update) format = format_with_colmask; /* Encode the statement. */ char *buf = (char *) malloc(size); fail_if(buf == NULL); char *pos = mp_encode_array(buf, i); i = 0; struct tuple *ret = NULL; while (templ->fields[i] != vyend) { if (templ->fields[i] >= 0) pos = mp_encode_uint(pos, templ->fields[i]); else pos = mp_encode_int(pos, templ->fields[i]); ++i; } /* * Create the result statement, using one of the formats. */ switch (templ->type) { case IPROTO_INSERT: { ret = vy_stmt_new_insert(format, buf, pos); fail_if(ret == NULL); break; } case IPROTO_REPLACE: { ret = vy_stmt_new_replace(format, buf, pos); fail_if(ret == NULL); break; } case IPROTO_DELETE: { struct tuple *tmp = vy_stmt_new_replace(format, buf, pos); fail_if(tmp == NULL); ret = vy_stmt_new_surrogate_delete(format, tmp); fail_if(ret == NULL); tuple_unref(tmp); break; } case IPROTO_UPSERT: { /* * Create the upsert statement without operations. * Validation of result of UPSERT operations * applying is not a test for the iterators. * For the iterators only UPSERT type is * important. */ struct iovec operations[1]; char tmp[32]; char *ops = mp_encode_array(tmp, 1); ops = mp_encode_array(ops, 3); ops = mp_encode_str(ops, "+", 1); ops = mp_encode_uint(ops, templ->upsert_field); if (templ->upsert_value >= 0) ops = mp_encode_uint(ops, templ->upsert_value); else ops = mp_encode_int(ops, templ->upsert_value); operations[0].iov_base = tmp; operations[0].iov_len = ops - tmp; fail_if(templ->optimize_update); ret = vy_stmt_new_upsert(upsert_format, buf, pos, operations, 1); fail_if(ret == NULL); break; } case IPROTO_SELECT: { const char *key = buf; uint part_count = mp_decode_array(&key); ret = vy_stmt_new_select(vy_key_format, key, part_count); fail_if(ret == NULL); break; } default: fail_if(true); } free(buf); vy_stmt_set_lsn(ret, templ->lsn); if (templ->optimize_update) vy_stmt_set_column_mask(ret, 0); return ret; } const struct tuple * vy_mem_insert_template(struct vy_mem *mem, const struct vy_stmt_template *templ) { struct tuple *stmt = vy_new_simple_stmt(mem->format, mem->upsert_format, mem->format_with_colmask, templ); struct tuple *region_stmt = vy_stmt_dup_lsregion(stmt, &mem->env->allocator, mem->generation); assert(region_stmt != NULL); tuple_unref(stmt); if (templ->type == IPROTO_UPSERT) vy_mem_insert_upsert(mem, region_stmt); else vy_mem_insert(mem, region_stmt); return region_stmt; } void vy_cache_insert_templates_chain(struct vy_cache *cache, struct tuple_format *format, const struct vy_stmt_template *chain, uint length, const struct vy_stmt_template *key_templ, enum iterator_type order) { struct tuple *key = vy_new_simple_stmt(format, NULL, NULL, key_templ); struct tuple *prev_stmt = NULL; struct tuple *stmt = NULL; for (uint i = 0; i < length; ++i) { stmt = vy_new_simple_stmt(format, NULL, NULL, &chain[i]); vy_cache_add(cache, stmt, prev_stmt, key, order); if (i != 0) tuple_unref(prev_stmt); prev_stmt = stmt; stmt = NULL; } tuple_unref(key); if (prev_stmt != NULL) tuple_unref(prev_stmt); } void vy_cache_on_write_template(struct vy_cache *cache, struct tuple_format *format, const struct vy_stmt_template *templ) { struct tuple *written = vy_new_simple_stmt(format, NULL, NULL, templ); vy_cache_on_write(cache, written, NULL); tuple_unref(written); } void init_read_views_list(struct rlist *rlist, struct vy_read_view *rvs, const int *vlsns, int count) { rlist_create(rlist); for (int i = 0; i < count; ++i) { rvs[i].vlsn = vlsns[i]; rlist_add_tail_entry(rlist, &rvs[i], in_read_views); } } struct vy_mem * create_test_mem(struct key_def *def) { /* Create format */ struct key_def * const defs[] = { def }; struct tuple_format *format = tuple_format_new(&vy_tuple_format_vtab, defs, def->part_count, 0, NULL, 0, NULL); fail_if(format == NULL); /* Create format with column mask */ struct tuple_format *format_with_colmask = vy_tuple_format_new_with_colmask(format); assert(format_with_colmask != NULL); /* Create upsert format */ struct tuple_format *format_upsert = vy_tuple_format_new_upsert(format); assert(format_upsert != NULL); /* Create mem */ struct vy_mem *mem = vy_mem_new(&mem_env, 1, def, format, format_with_colmask, format_upsert, 0); fail_if(mem == NULL); return mem; } void create_test_cache(uint32_t *fields, uint32_t *types, int key_cnt, struct vy_cache *cache, struct key_def **def, struct tuple_format **format) { *def = box_key_def_new(fields, types, key_cnt); assert(*def != NULL); vy_cache_create(cache, &cache_env, *def); *format = tuple_format_new(&vy_tuple_format_vtab, def, 1, 0, NULL, 0, NULL); tuple_format_ref(*format); } void destroy_test_cache(struct vy_cache *cache, struct key_def *def, struct tuple_format *format) { tuple_format_unref(format); vy_cache_destroy(cache); key_def_delete(def); } bool vy_stmt_are_same(const struct tuple *actual, const struct vy_stmt_template *expected, struct tuple_format *format, struct tuple_format *upsert_format, struct tuple_format *format_with_colmask) { if (vy_stmt_type(actual) != expected->type) return false; struct tuple *tmp = vy_new_simple_stmt(format, upsert_format, format_with_colmask, expected); fail_if(tmp == NULL); uint32_t a_len, b_len; const char *a, *b; if (vy_stmt_type(actual) == IPROTO_UPSERT) { a = vy_upsert_data_range(actual, &a_len); } else { a = tuple_data_range(actual, &a_len); } if (vy_stmt_type(tmp) == IPROTO_UPSERT) { b = vy_upsert_data_range(tmp, &b_len); } else { b = tuple_data_range(tmp, &b_len); } if (a_len != b_len) { tuple_unref(tmp); return false; } if (vy_stmt_lsn(actual) != expected->lsn) { tuple_unref(tmp); return false; } bool rc = memcmp(a, b, a_len) == 0; tuple_unref(tmp); return rc; } tarantool_1.9.1.26.g63eb81e3c/test/unit/bitset_basic.c0000664000000000000000000000734113306560010020742 0ustar rootroot#include #include #include #include #include "unit.h" static void test_cardinality() { header(); struct bitset bm; bitset_create(&bm, realloc); fail_unless(bitset_cardinality(&bm) == 0); size_t cnt = 0; fail_if(bitset_set(&bm, 10) < 0); cnt++; fail_if(bitset_set(&bm, 15) < 0); cnt++; fail_if(bitset_set(&bm, 20) < 0); cnt++; fail_unless(bitset_cardinality(&bm) == cnt); fail_if(bitset_set(&bm, 10) < 0); fail_unless(bitset_cardinality(&bm) == cnt); fail_if(bitset_clear(&bm, 20) < 0); cnt--; fail_unless(bitset_cardinality(&bm) == cnt); fail_if(bitset_clear(&bm, 20) < 0); fail_unless(bitset_cardinality(&bm) == cnt); fail_if(bitset_clear(&bm, 666) < 0); fail_unless(bitset_cardinality(&bm) == cnt); fail_if(bitset_clear(&bm, 10) < 0); cnt--; fail_unless(bitset_cardinality(&bm) == cnt); fail_if(bitset_clear(&bm, 15) < 0); cnt--; fail_unless(bitset_cardinality(&bm) == cnt); bitset_destroy(&bm); footer(); } static void shuffle(size_t *arr, size_t size) { if (size <= 1) { return; } for (size_t i = 0; i < (size - 1); i++) { size_t j = i + rand() / (RAND_MAX / (size - i) + 1); size_t tmp = arr[i]; arr[i] = arr[j]; arr[j] = tmp; } } static int size_compator(const void *a, const void *b) { size_t *aa = (size_t *) a; size_t *bb = (size_t *) b; if (*aa < *bb) { return -1; } else if (*aa > *bb) { return 1; } else { return 0; } } static void test_get_set() { header(); struct bitset bm; bitset_create(&bm, realloc); const size_t NUM_SIZE = (size_t) 1 << 14; size_t *nums = malloc(NUM_SIZE * sizeof(size_t)); printf("Generating test set... "); for(size_t i = 0; i < NUM_SIZE; i++) { nums[i] = rand(); } /* Remove dups */ qsort(nums, NUM_SIZE, sizeof(size_t), size_compator); size_t prev = nums[0]; for(size_t i = 1; i < NUM_SIZE; i++) { if (nums[i] == prev) { nums[i] = SIZE_MAX; } else { prev = nums[i]; } } shuffle(nums, NUM_SIZE); printf("ok\n"); printf("Settings bits... "); for(size_t i = 0; i < NUM_SIZE; i++) { if (nums[i] == SIZE_MAX) continue; fail_if(bitset_set(&bm, nums[i]) < 0); } printf("ok\n"); printf("Checking bits... "); shuffle(nums, NUM_SIZE); for(size_t i = 0; i < NUM_SIZE; i++) { if (nums[i] == SIZE_MAX) continue; fail_unless(bitset_test(&bm, nums[i])); } printf("ok\n"); printf("Unsetting random bits... "); for(size_t k = 0; k < (NUM_SIZE >> 3); k++) { size_t i = rand() % NUM_SIZE; if (nums[i] == SIZE_MAX) continue; fail_if(bitset_clear(&bm, nums[i]) < 0); fail_if(bitset_test(&bm, nums[i])); nums[i] = SIZE_MAX; } printf("ok\n"); printf("Checking set bits... "); shuffle(nums, NUM_SIZE); for(size_t i = 0; i < NUM_SIZE; i++) { if (nums[i] == SIZE_MAX) { continue; } if (!bitset_test(&bm, nums[i])) { printf("Fail :%zu\n", nums[i]); } fail_unless(bitset_test(&bm, nums[i])); } printf("ok\n"); printf("Checking all bits... "); qsort(nums, NUM_SIZE, sizeof(size_t), size_compator); size_t *pn = nums; size_t i_max = MIN(1ULL << 14, RAND_MAX); for(size_t i = 0; i < i_max; i++) { if (*pn < SIZE_MAX && *pn == i) { fail_unless(bitset_test(&bm, *pn)); pn++; } else { fail_if(bitset_test(&bm, i)); } } printf("ok\n"); printf("Unsetting all bits... "); shuffle(nums, NUM_SIZE); for(size_t i = 0; i < NUM_SIZE; i++) { if (nums[i] == SIZE_MAX) { continue; } fail_if(bitset_clear(&bm, nums[i]) < 0); } printf("ok\n"); printf("Checking all bits... "); for(size_t i = 0; i < i_max; i++) { fail_if(bitset_test(&bm, i)); } printf("ok\n"); free(nums); bitset_destroy(&bm); footer(); } int main(int argc, char *argv[]) { setbuf(stdout, NULL); srand(time(NULL)); test_cardinality(); test_get_set(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/scramble.c0000664000000000000000000000255113306560010020075 0ustar rootroot#include "scramble.h" #include "random.h" #include "third_party/sha1.h" #include #include #include #include "unit.h" void test_scramble() { int salt[SCRAMBLE_SIZE/sizeof(int)]; for (unsigned i = 0; i < sizeof(salt)/sizeof(int); i++) salt[i] = rand(); char *password = "lechododilikraskaloh"; unsigned char hash2[SCRAMBLE_SIZE]; SHA1_CTX ctx; SHA1Init(&ctx); SHA1Update(&ctx, (unsigned char *) password, strlen(password)); SHA1Final(hash2, &ctx); SHA1Init(&ctx); SHA1Update(&ctx, hash2, SCRAMBLE_SIZE); SHA1Final(hash2, &ctx); char scramble[SCRAMBLE_SIZE]; scramble_prepare(scramble, salt, password, strlen(password)); printf("%d\n", scramble_check(scramble, salt, hash2)); password = "wrongpass"; scramble_prepare(scramble, salt, password, strlen(password)); printf("%d\n", scramble_check(scramble, salt, hash2) != 0); scramble_prepare(scramble, salt, password, 0); printf("%d\n", scramble_check(scramble, salt, hash2) != 0); } void test_password_prepare() { char buf[SCRAMBLE_BASE64_SIZE * 2]; int password[5]; for (unsigned i = 0; i < sizeof(password)/sizeof(int); i++) password[i] = rand(); password_prepare((char *) password, sizeof(password), buf, sizeof(buf)); fail_unless(strlen(buf) == SCRAMBLE_BASE64_SIZE); } int main() { random_init(); test_scramble(); test_password_prepare(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_write_iterator.c0000664000000000000000000004105313306565107022102 0ustar rootroot#include "memory.h" #include "fiber.h" #include "vy_write_iterator.h" #include "vy_iterators_helper.h" /** * Create the mem with the specified key_def and content, iterate * over it with write_iterator and compare actual result * statements with the expected ones. * * @param key_def Key definition for the mem. * @param content Mem content statements. * @param content_count Size of the @content. * @param expected Expected results of the iteration. * @param expected_count Size of the @expected. * @param vlsns Read view lsns for the write iterator. * @param vlsns_count Size of the @vlsns. * @param is_primary True, if the new mem belongs to the primary * index. * @param is_last_level True, if the new mem is the last level. */ void compare_write_iterator_results(struct key_def *key_def, const struct vy_stmt_template *content, int content_count, const struct vy_stmt_template *expected, int expected_count, const int *vlsns, int vlsns_count, bool is_primary, bool is_last_level) { struct vy_mem *mem = create_test_mem(key_def); for (int i = 0; i < content_count; ++i) vy_mem_insert_template(mem, &content[i]); struct rlist rv_list; struct vy_read_view *rv_array = malloc(sizeof(*rv_array) * vlsns_count); fail_if(rv_array == NULL); init_read_views_list(&rv_list, rv_array, vlsns, vlsns_count); struct vy_stmt_stream *wi = vy_write_iterator_new(key_def, mem->format, mem->upsert_format, is_primary, is_last_level, &rv_list); fail_if(wi == NULL); fail_if(vy_write_iterator_new_mem(wi, mem) != 0); struct tuple *ret; fail_if(wi->iface->start(wi) != 0); int i = 0; do { fail_if(wi->iface->next(wi, &ret) != 0); if (ret == NULL) break; fail_if(i >= expected_count); ok(vy_stmt_are_same(ret, &expected[i], mem->format, mem->upsert_format, mem->format_with_colmask), "stmt %d is correct", i); ++i; } while (ret != NULL); ok(i == expected_count, "correct results count"); /* Clean up */ wi->iface->close(wi); vy_mem_delete(mem); free(rv_array); } void test_basic(void) { header(); plan(46); /* Create key_def */ uint32_t fields[] = { 0 }; uint32_t types[] = { FIELD_TYPE_UNSIGNED }; struct key_def *key_def = box_key_def_new(fields, types, 1); assert(key_def != NULL); /* * STATEMENT: REPL REPL REPL DEL REPL REPL REPL REPL REPL REPL * LSN: 5 6 7 8 9 10 11 12 13 14 * READ VIEW: * * * * \____________/\________/\_________________/\___________/ * merge merge merge merge */ { const struct vy_stmt_template content[] = { STMT_TEMPLATE(5, REPLACE, 1, 1), STMT_TEMPLATE(6, REPLACE, 1, 2), STMT_TEMPLATE(7, REPLACE, 1, 3), STMT_TEMPLATE(8, REPLACE, 1, 4), STMT_TEMPLATE(9, REPLACE, 1, 5), STMT_TEMPLATE(10, REPLACE, 1, 6), STMT_TEMPLATE(11, REPLACE, 1, 7), STMT_TEMPLATE(12, REPLACE, 1, 8), STMT_TEMPLATE(13, REPLACE, 1, 9), STMT_TEMPLATE(14, REPLACE, 1, 10), }; const struct vy_stmt_template expected[] = { content[9], content[7], content[4], content[2] }; const int vlsns[] = {7, 9, 12}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, true); } { /* * STATEMENT: UPS UPS UPS UPS UPS UPS UPS UPS UPS UPS * LSN: 5 6 7 8 9 10 11 12 13 14 * READ VIEW: * * * * \________/\_________________/\_____________/\_____/ * squash squash squash squash */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(5, UPSERT, 1, 1), STMT_TEMPLATE(6, UPSERT, 1, 2), STMT_TEMPLATE(7, UPSERT, 1, 3), STMT_TEMPLATE(8, UPSERT, 1, 4), STMT_TEMPLATE(9, UPSERT, 1, 5), STMT_TEMPLATE(10, UPSERT, 1, 6), STMT_TEMPLATE(11, UPSERT, 1, 7), STMT_TEMPLATE(12, UPSERT, 1, 8), STMT_TEMPLATE(13, UPSERT, 1, 9), STMT_TEMPLATE(14, UPSERT, 1, 10), }; const struct vy_stmt_template expected[] = { content[9], STMT_TEMPLATE(13, UPSERT, 1, 7), STMT_TEMPLATE(10, UPSERT, 1, 3), STMT_TEMPLATE(6, UPSERT, 1, 1), }; const int vlsns[] = {6, 10, 13}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, false); } { /* * STATEMENT: REPL DEL UPS REPL * LSN: 5 6 7 8 * READ VIEW: * * \_______________/\_______/ * \_____\_/_____/ merge * skip last level merge * delete */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(5, REPLACE, 1, 1), STMT_TEMPLATE(6, DELETE, 1), STMT_TEMPLATE(7, UPSERT, 1, 2), STMT_TEMPLATE(8, REPLACE, 1, 3), }; const struct vy_stmt_template expected[] = { content[3], STMT_TEMPLATE(7, REPLACE, 1, 2) }; const int vlsns[] = {7}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, true); } { /* * STATEMENT: REPL REPL * LSN: 7 8 * READ VIEW: * * * No merge. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(7, REPLACE, 1, 1), STMT_TEMPLATE(8, REPLACE, 1, 2), }; const struct vy_stmt_template expected[] = { content[1], content[0] }; const int vlsns[] = {7, 8}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, true); } { /* * LINKED WITH: gh-1824, about pruning last DELETE. * STATEMENT: DEL REPL * LSN: 7 8 * READ VIEW: * * * * is_last_level = true. * No merge, skip DELETE from last level, although there the read * view on the DELETE exists. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(7, DELETE, 1), STMT_TEMPLATE(8, REPLACE, 1, 1), }; const struct vy_stmt_template expected[] = { content[1] }; const int vlsns[] = {7, 8}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, true); } { /* * LINKED WITH: gh-1824, about pruning last DELETE. * STATEMENT: DEL REPL * LSN: 7 8 * READ VIEW: * * * * is_last_level = false; * No merge, don't skip DELETE from last level. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(7, DELETE, 1), STMT_TEMPLATE(8, REPLACE, 1, 1), }; const struct vy_stmt_template expected[] = { content[1], content[0] }; const int vlsns[] = {7, 8}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, false); } { /* * STATEMENT: REPL DEL REPL REPL * LSN: 5 6 6 7 * READ VIEW: * * \_______________/\_______/ * \_____/\______/ * merge skip as * optimized * update * DEL and REPL with lsn 6 can be skipped for read view 6 for * secondary index, because they do not change secondary key. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(5, REPLACE, 1, 1), STMT_TEMPLATE_OPTIMIZED(6, DELETE, 1), STMT_TEMPLATE_OPTIMIZED(6, REPLACE, 1, 2), STMT_TEMPLATE(7, REPLACE, 1, 3) }; const struct vy_stmt_template expected[] = { content[3], content[0] }; const int vlsns[] = {6}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, false, true); } { /* * STATEMENT: DEL REPL * LSN: 6 6 * \______/ * skip both as optimized update */ const struct vy_stmt_template content[] = { STMT_TEMPLATE_OPTIMIZED(6, DELETE, 1), STMT_TEMPLATE_OPTIMIZED(6, REPLACE, 1, 2), }; const struct vy_stmt_template expected[] = {}; const int vlsns[] = {}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, false, false); } { /* * STATEMENT: UPS UPS UPS REPL * LSN: 6 7 8 9 * READ VIEW: * * \______/\________/ * merge merge * UPSERT before REPLACE must be squashed with only older * statements. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(6, UPSERT, 1, 1), STMT_TEMPLATE(7, UPSERT, 1, 2), STMT_TEMPLATE(8, UPSERT, 1, 3), STMT_TEMPLATE(9, REPLACE, 1, 4) }; const struct vy_stmt_template expected[] = { content[3], STMT_TEMPLATE(7, UPSERT, 1, 1) }; const int vlsns[] = {7}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, false); } { /* * STATEMENT: REPL REPL REPL REPL * LSN: 6 7 20 21 * READ VIEW: * *(10) * * *(22) *(23) * \________/\______/\_____/\______/\____________/ * merge nullify merge merge nullify * * Do not remember the read views with the same versions of the * key. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(6, REPLACE, 1, 1), STMT_TEMPLATE(7, REPLACE, 1, 2), STMT_TEMPLATE(20, REPLACE, 1, 3), STMT_TEMPLATE(21, REPLACE, 1, 4) }; const struct vy_stmt_template expected[] = { content[3], content[2], content[1] }; const int vlsns[] = {7, 10, 20, 21, 22, 23}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, true); } { /* * STATEMENT: REPL DEL REPL * LSN: 6 7 7 * \___/\__________/ * merge skip as optimized update * * last_level = false. * Check if the key is not fully skipped in a case of optimized * update as the newest version. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(6, REPLACE, 1, 1), STMT_TEMPLATE_OPTIMIZED(7, DELETE, 1), STMT_TEMPLATE_OPTIMIZED(7, REPLACE, 1, 2), }; const struct vy_stmt_template expected[] = { content[0] }; const int vlsns[] = {}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, false, false); } { /* * STATEMENT: REPL DEL REPL * LSN: 6 7 7 * \_________/|\___/ * skip last level | skip as optimized * delete. | update. * * last_level = true. First apply 'last level DELETE' optimization * and only then the 'optimized UPDATE'. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(6, REPLACE, 1, 1), STMT_TEMPLATE_OPTIMIZED(7, DELETE, 1), STMT_TEMPLATE_OPTIMIZED(7, REPLACE, 1, 2), }; const struct vy_stmt_template expected[] = { content[2] }; const int vlsns[] = {}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, false); } { /* * STATEMENT: REPL DEL REPL DEL REPL DEL * LSN: 4 5 6 7 8 9 * READ VIEW: * * * * \_______/\_______________/ * merge skip * * is_last_level = false * * Check that tautological DELETEs referenced by newer * read views are skipped. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(4, REPLACE, 1, 1), STMT_TEMPLATE(5, DELETE, 1), STMT_TEMPLATE(6, REPLACE, 1, 2), STMT_TEMPLATE(7, DELETE, 1), STMT_TEMPLATE(8, REPLACE, 1, 3), STMT_TEMPLATE(9, DELETE, 1), }; const struct vy_stmt_template expected[] = { content[1] }; const int vlsns[] = {5, 7, 9}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, false); } { /* * STATEMENT: INS DEL REPL DEL REPL REPL INS REPL * LSN: 2 3 4 5 6 7 8 9 * READ VIEW: * * * * * * \______/\_______/\_______/ * merge merge merge * * DEL DEL REPL INS REPL * \__________/ \__/ * discard convert to INS * * is_last_level = false * * If the oldest statement for a given key is an INSERT, all * leading DELETE statements should be discarded and the first * non-DELETE statement should be turned into an INSERT. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(2, INSERT, 1, 1), STMT_TEMPLATE(3, DELETE, 1), STMT_TEMPLATE(4, REPLACE, 1, 2), STMT_TEMPLATE(5, DELETE, 1), STMT_TEMPLATE(6, REPLACE, 1, 3), STMT_TEMPLATE(7, REPLACE, 1, 4), STMT_TEMPLATE(8, INSERT, 1, 5), STMT_TEMPLATE(9, REPLACE, 1, 6), }; const struct vy_stmt_template expected[] = { content[7], content[6], STMT_TEMPLATE(7, INSERT, 1, 4), }; const int vlsns[] = {3, 5, 7, 8, 9}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, false); } { /* * STATEMENT: DEL INS DEL INS REPL DEL INS * LSN: 3 4 5 6 7 8 9 * READ VIEW: * * * \______________/ \_____/ * merge merge * * INS REPL INS * \__/ * convert to REPL * * is_last_level = false * * If the oldest statement for a given key is NOT an INSERT * and the first key in the resulting history turns out to be * an INSERT, it should be converted to a REPLACE. */ const struct vy_stmt_template content[] = { STMT_TEMPLATE(3, DELETE, 1), STMT_TEMPLATE(4, INSERT, 1, 1), STMT_TEMPLATE(5, DELETE, 1), STMT_TEMPLATE(6, INSERT, 1, 2), STMT_TEMPLATE(7, REPLACE, 1, 3), STMT_TEMPLATE(8, DELETE, 1), STMT_TEMPLATE(9, INSERT, 1, 4), }; const struct vy_stmt_template expected[] = { content[6], content[4], STMT_TEMPLATE(6, REPLACE, 1, 2), }; const int vlsns[] = {6, 7}; int content_count = sizeof(content) / sizeof(content[0]); int expected_count = sizeof(expected) / sizeof(expected[0]); int vlsns_count = sizeof(vlsns) / sizeof(vlsns[0]); compare_write_iterator_results(key_def, content, content_count, expected, expected_count, vlsns, vlsns_count, true, false); } key_def_delete(key_def); fiber_gc(); footer(); check_plan(); } int main(int argc, char *argv[]) { vy_iterator_C_test_init(0); test_basic(); vy_iterator_C_test_finish(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/rtree_iterator.result0000664000000000000000000000035713306560010022435 0ustar rootroot *** iterator_check *** Test tree size: 50000 --> 0x1 0x2 0x3 0x4 0x5 0x6 0x7 <-- 0xc34c 0xc34d 0xc34e 0xc34f 0xc350 0xc34b 0xc34a *** iterator_check: done *** *** iterator_invalidate_check *** *** iterator_invalidate_check: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/bitset_index.result0000664000000000000000000000112413306560010022055 0ustar rootroot *** test_size_and_count *** *** test_size_and_count: done *** *** test_resize *** *** test_resize: done *** *** test_insert_remove *** Generating test set... ok Inserting pairs... ok Checking keys... ok Removing random pairs... ok Checking keys... ok *** test_insert_remove: done *** *** test_empty_simple *** *** test_empty_simple: done *** *** test_all_simple *** *** test_all_simple: done *** *** test_all_set_simple *** *** test_all_set_simple: done *** *** test_any_set_simple *** *** test_any_set_simple: done *** *** test_equals_simple *** *** test_equals_simple: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber_channel.cc0000664000000000000000000000417713306560010021235 0ustar rootroot#include "memory.h" #include "fiber.h" #include "fiber_channel.h" #include "unit.h" int status; void fiber_channel_basic() { header(); plan(10); struct fiber_channel *channel = fiber_channel_new(1); ok(channel != NULL, "fiber_channel_new()"); ok(fiber_channel_size(channel) == 1, "fiber_channel_size()"); ok(fiber_channel_count(channel) == 0, "fiber_channel_count()"); ok(fiber_channel_is_full(channel) == false, "fiber_channel_is_full()"); ok(fiber_channel_is_empty(channel) == true, "fiber_channel_is_empty()"); char dummy; fiber_channel_put(channel, &dummy); ok(fiber_channel_size(channel) == 1, "fiber_channel_size(1)"); ok(fiber_channel_count(channel) == 1, "fiber_channel_count(1)"); ok(fiber_channel_is_full(channel) == true, "fiber_channel_is_full(1)"); ok(fiber_channel_is_empty(channel) == false, "fiber_channel_is_empty(1)"); void *ptr; fiber_channel_get(channel, &ptr); ok(ptr == &dummy, "fiber_channel_get()"); fiber_channel_delete(channel); footer(); status = check_plan(); } void fiber_channel_get() { header(); plan(7); struct fiber_channel *channel = fiber_channel_new(1); char dummy; ok(fiber_channel_put_timeout(channel, &dummy, 0) == 0, "fiber_channel_put(0)"); ok(fiber_channel_put_timeout(channel, &dummy, 0) == -1, "fiber_channel_put_timeout(0)"); void *ptr; fiber_channel_get(channel, &ptr); ok(ptr == &dummy, "fiber_channel_get(0)"); ok(fiber_channel_put_timeout(channel, &dummy, 0.01) == 0, "fiber_channel_put_timeout(1)"); fiber_channel_get(channel, &ptr); ok(ptr == &dummy, "fiber_channel_get(1)"); fiber_channel_close(channel); ok(fiber_channel_put(channel, &dummy) == -1, "fiber_channel_put(closed)"); ok(fiber_channel_get(channel, &ptr) == -1, "fiber_channel_get(closed)"); fiber_channel_delete(channel); footer(); status = check_plan(); } int main_f(va_list ap) { (void) ap; fiber_channel_basic(); fiber_channel_get(); ev_break(loop(), EVBREAK_ALL); return 0; } int main() { memory_init(); fiber_init(fiber_c_invoke); struct fiber *main= fiber_new_xc("main", main_f); fiber_wakeup(main); ev_run(loop(), 0); fiber_free(); memory_free(); return status; } tarantool_1.9.1.26.g63eb81e3c/test/unit/bit.c0000664000000000000000000001070713306560010017065 0ustar rootroot#include #include #include #include #include #include "unit.h" static uint64_t vals[] = { 0UL, 1UL, 2UL, 32768UL, 65535UL, 65536UL, 726075912UL, 858993459UL, 1073741824UL, 1245250552UL, 1431655765UL, 1656977767UL, 2147483648UL, 2283114629UL, 2502548245UL, 4294967295UL, 708915120906848425UL, 1960191741125985428UL, 3689348814741910323UL, 5578377670650038654UL, 9223372036854775808UL, 10755112315580060033UL, 11163782031541429823UL, 13903686156871869732UL, 14237897302422917095UL, 14302190498657618739UL, 15766411510232741269UL, 15984546468465238145UL, 18446744073709551615UL }; static void test_ctz_clz(void) { header(); for (size_t i = 0; i < sizeof(vals) / sizeof(vals[0]); i++) { if (vals[i] == 0) continue; uint64_t val64 = vals[i]; uint32_t val32 = (uint32_t) vals[i]; printf("bit_ctz_u64(%" PRIu64 ") => %d\n", val64, bit_ctz_u64(val64)); printf("bit_clz_u64(%" PRIu64 ") => %d\n", val64, bit_clz_u64(val64)); if (vals[i] > UINT32_MAX) continue; printf("bit_ctz_u32(%" PRIu32 ") => %d\n", val32, bit_ctz_u32(val32)); printf("bit_clz_u32(%" PRIu32 ") => %d\n", val32, bit_clz_u32(val32)); } footer(); } static void test_count(void) { header(); for (size_t i = 0; i < sizeof(vals) / sizeof(vals[0]); i++) { uint64_t val64 = vals[i]; uint32_t val32 = (uint32_t) vals[i]; printf("bit_count_u64(%" PRIu64 ") => %d\n", val64, bit_count_u64(val64)); if (vals[i] > UINT32_MAX) continue; printf("bit_count_u32(%" PRIu32 ") => %d\n", val32, bit_count_u32(val32)); } footer(); } static void test_rotl_rotr_one(int rot) { for (size_t i = 0; i < sizeof(vals) / sizeof(vals[0]); i++) { uint64_t val64 = vals[i]; uint32_t val32 = (uint32_t) vals[i]; printf("bit_rotl_u64(%" PRIu64 ", %d) => %" PRIu64 "\n", val64, rot, bit_rotl_u64(val64, rot)); printf("bit_rotr_u64(%" PRIu64 ", %d) => %" PRIu64 "\n", val64, rot, bit_rotr_u64(val64, rot)); if (vals[i] > UINT32_MAX || rot > 32) continue; printf("bit_rotl_u32(%" PRIu32 ", %d) => %" PRIu32 "\n", val32, rot, bit_rotl_u32(val32, rot)); printf("bit_rotr_u32(%" PRIu32 ", %d) => %" PRIu32 "\n", val32, rot, bit_rotr_u32(val32, rot)); } } static void test_rotl_rotr(void) { header(); int rots[] = { 0, 1, 15, 16, 31, 32, 63, 64 }; for (unsigned r = 0; r < sizeof(rots) / sizeof(rots[0]); r++) { test_rotl_rotr_one(rots[r]); } footer(); } static void test_bswap(void) { header(); for (size_t i = 0; i < sizeof(vals) / sizeof(vals[0]); i++) { uint64_t val64 = vals[i]; uint32_t val32 = (uint32_t) vals[i]; printf("bswap_u64(%" PRIu64 ") => %" PRIu64 "\n", val64, bswap_u64(val64)); if (vals[i] > UINT32_MAX) continue; printf("bswap_u32(%" PRIu32 ") => %" PRIu32 "\n", val32, bswap_u32(val32)); } footer(); } static inline void test_index_print(const int *start, const int *end) { for (const int *cur = start; cur < end; cur++) { printf("%d ", *cur); } } static void test_index(void) { header(); int indexes[sizeof(int64_t) * CHAR_BIT + 1]; for (size_t i = 0; i < sizeof(vals) / sizeof(vals[0]); i++) { uint64_t val64 = vals[i]; uint32_t val32 = (uint32_t) vals[i]; printf("bit_index_u64(%" PRIu64 ", *, -1) => ", val64); test_index_print(indexes, bit_index_u64(val64, indexes, -1)); printf("\n"); if (vals[i] > UINT32_MAX) continue; printf("bit_index_u32(%" PRIu32 ", *, -1) => ", val32); test_index_print(indexes, bit_index_u32(val32, indexes, -1)); printf("\n"); } footer(); } static void test_bit_iter(void) { header(); struct bit_iterator it; uint64_t *data = vals + 6; size_t size = 10; size_t pos = 0; printf("Set: "); bit_iterator_init(&it, data, size, true); while ( (pos = bit_iterator_next(&it)) != SIZE_MAX) { printf("%zu, ", pos); fail_unless(bit_test(data, pos)); } printf("\n"); printf("Clear: "); bit_iterator_init(&it, data, size, false); while ( (pos = bit_iterator_next(&it)) != SIZE_MAX) { printf("%zu, ", pos); fail_if(bit_test(data, pos)); } printf("\n"); footer(); } static void test_bit_iter_empty(void) { header(); struct bit_iterator it; bit_iterator_init(&it, NULL, 0, true); fail_unless(bit_iterator_next(&it) == SIZE_MAX); bit_iterator_init(&it, NULL, 0, false); fail_unless(bit_iterator_next(&it) == SIZE_MAX); footer(); } int main(void) { test_ctz_clz(); test_count(); test_rotl_rotr(); test_bswap(); test_index(); test_bit_iter(); test_bit_iter_empty(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/bit.result0000664000000000000000000011742113306560010020162 0ustar rootroot *** test_ctz_clz *** bit_ctz_u64(1) => 0 bit_clz_u64(1) => 63 bit_ctz_u32(1) => 0 bit_clz_u32(1) => 31 bit_ctz_u64(2) => 1 bit_clz_u64(2) => 62 bit_ctz_u32(2) => 1 bit_clz_u32(2) => 30 bit_ctz_u64(32768) => 15 bit_clz_u64(32768) => 48 bit_ctz_u32(32768) => 15 bit_clz_u32(32768) => 16 bit_ctz_u64(65535) => 0 bit_clz_u64(65535) => 48 bit_ctz_u32(65535) => 0 bit_clz_u32(65535) => 16 bit_ctz_u64(65536) => 16 bit_clz_u64(65536) => 47 bit_ctz_u32(65536) => 16 bit_clz_u32(65536) => 15 bit_ctz_u64(726075912) => 3 bit_clz_u64(726075912) => 34 bit_ctz_u32(726075912) => 3 bit_clz_u32(726075912) => 2 bit_ctz_u64(858993459) => 0 bit_clz_u64(858993459) => 34 bit_ctz_u32(858993459) => 0 bit_clz_u32(858993459) => 2 bit_ctz_u64(1073741824) => 30 bit_clz_u64(1073741824) => 33 bit_ctz_u32(1073741824) => 30 bit_clz_u32(1073741824) => 1 bit_ctz_u64(1245250552) => 3 bit_clz_u64(1245250552) => 33 bit_ctz_u32(1245250552) => 3 bit_clz_u32(1245250552) => 1 bit_ctz_u64(1431655765) => 0 bit_clz_u64(1431655765) => 33 bit_ctz_u32(1431655765) => 0 bit_clz_u32(1431655765) => 1 bit_ctz_u64(1656977767) => 0 bit_clz_u64(1656977767) => 33 bit_ctz_u32(1656977767) => 0 bit_clz_u32(1656977767) => 1 bit_ctz_u64(2147483648) => 31 bit_clz_u64(2147483648) => 32 bit_ctz_u32(2147483648) => 31 bit_clz_u32(2147483648) => 0 bit_ctz_u64(2283114629) => 0 bit_clz_u64(2283114629) => 32 bit_ctz_u32(2283114629) => 0 bit_clz_u32(2283114629) => 0 bit_ctz_u64(2502548245) => 0 bit_clz_u64(2502548245) => 32 bit_ctz_u32(2502548245) => 0 bit_clz_u32(2502548245) => 0 bit_ctz_u64(4294967295) => 0 bit_clz_u64(4294967295) => 32 bit_ctz_u32(4294967295) => 0 bit_clz_u32(4294967295) => 0 bit_ctz_u64(708915120906848425) => 0 bit_clz_u64(708915120906848425) => 4 bit_ctz_u64(1960191741125985428) => 2 bit_clz_u64(1960191741125985428) => 3 bit_ctz_u64(3689348814741910323) => 0 bit_clz_u64(3689348814741910323) => 2 bit_ctz_u64(5578377670650038654) => 1 bit_clz_u64(5578377670650038654) => 1 bit_ctz_u64(9223372036854775808) => 63 bit_clz_u64(9223372036854775808) => 0 bit_ctz_u64(10755112315580060033) => 0 bit_clz_u64(10755112315580060033) => 0 bit_ctz_u64(11163782031541429823) => 0 bit_clz_u64(11163782031541429823) => 0 bit_ctz_u64(13903686156871869732) => 2 bit_clz_u64(13903686156871869732) => 0 bit_ctz_u64(14237897302422917095) => 0 bit_clz_u64(14237897302422917095) => 0 bit_ctz_u64(14302190498657618739) => 0 bit_clz_u64(14302190498657618739) => 0 bit_ctz_u64(15766411510232741269) => 0 bit_clz_u64(15766411510232741269) => 0 bit_ctz_u64(15984546468465238145) => 0 bit_clz_u64(15984546468465238145) => 0 bit_ctz_u64(18446744073709551615) => 0 bit_clz_u64(18446744073709551615) => 0 *** test_ctz_clz: done *** *** test_count *** bit_count_u64(0) => 0 bit_count_u32(0) => 0 bit_count_u64(1) => 1 bit_count_u32(1) => 1 bit_count_u64(2) => 1 bit_count_u32(2) => 1 bit_count_u64(32768) => 1 bit_count_u32(32768) => 1 bit_count_u64(65535) => 16 bit_count_u32(65535) => 16 bit_count_u64(65536) => 1 bit_count_u32(65536) => 1 bit_count_u64(726075912) => 11 bit_count_u32(726075912) => 11 bit_count_u64(858993459) => 16 bit_count_u32(858993459) => 16 bit_count_u64(1073741824) => 1 bit_count_u32(1073741824) => 1 bit_count_u64(1245250552) => 14 bit_count_u32(1245250552) => 14 bit_count_u64(1431655765) => 16 bit_count_u32(1431655765) => 16 bit_count_u64(1656977767) => 17 bit_count_u32(1656977767) => 17 bit_count_u64(2147483648) => 1 bit_count_u32(2147483648) => 1 bit_count_u64(2283114629) => 10 bit_count_u32(2283114629) => 10 bit_count_u64(2502548245) => 16 bit_count_u32(2502548245) => 16 bit_count_u64(4294967295) => 32 bit_count_u32(4294967295) => 32 bit_count_u64(708915120906848425) => 29 bit_count_u64(1960191741125985428) => 19 bit_count_u64(3689348814741910323) => 32 bit_count_u64(5578377670650038654) => 31 bit_count_u64(9223372036854775808) => 1 bit_count_u64(10755112315580060033) => 24 bit_count_u64(11163782031541429823) => 35 bit_count_u64(13903686156871869732) => 28 bit_count_u64(14237897302422917095) => 33 bit_count_u64(14302190498657618739) => 37 bit_count_u64(15766411510232741269) => 33 bit_count_u64(15984546468465238145) => 25 bit_count_u64(18446744073709551615) => 64 *** test_count: done *** *** test_rotl_rotr *** bit_rotl_u64(0, 0) => 0 bit_rotr_u64(0, 0) => 0 bit_rotl_u32(0, 0) => 0 bit_rotr_u32(0, 0) => 0 bit_rotl_u64(1, 0) => 1 bit_rotr_u64(1, 0) => 1 bit_rotl_u32(1, 0) => 1 bit_rotr_u32(1, 0) => 1 bit_rotl_u64(2, 0) => 2 bit_rotr_u64(2, 0) => 2 bit_rotl_u32(2, 0) => 2 bit_rotr_u32(2, 0) => 2 bit_rotl_u64(32768, 0) => 32768 bit_rotr_u64(32768, 0) => 32768 bit_rotl_u32(32768, 0) => 32768 bit_rotr_u32(32768, 0) => 32768 bit_rotl_u64(65535, 0) => 65535 bit_rotr_u64(65535, 0) => 65535 bit_rotl_u32(65535, 0) => 65535 bit_rotr_u32(65535, 0) => 65535 bit_rotl_u64(65536, 0) => 65536 bit_rotr_u64(65536, 0) => 65536 bit_rotl_u32(65536, 0) => 65536 bit_rotr_u32(65536, 0) => 65536 bit_rotl_u64(726075912, 0) => 726075912 bit_rotr_u64(726075912, 0) => 726075912 bit_rotl_u32(726075912, 0) => 726075912 bit_rotr_u32(726075912, 0) => 726075912 bit_rotl_u64(858993459, 0) => 858993459 bit_rotr_u64(858993459, 0) => 858993459 bit_rotl_u32(858993459, 0) => 858993459 bit_rotr_u32(858993459, 0) => 858993459 bit_rotl_u64(1073741824, 0) => 1073741824 bit_rotr_u64(1073741824, 0) => 1073741824 bit_rotl_u32(1073741824, 0) => 1073741824 bit_rotr_u32(1073741824, 0) => 1073741824 bit_rotl_u64(1245250552, 0) => 1245250552 bit_rotr_u64(1245250552, 0) => 1245250552 bit_rotl_u32(1245250552, 0) => 1245250552 bit_rotr_u32(1245250552, 0) => 1245250552 bit_rotl_u64(1431655765, 0) => 1431655765 bit_rotr_u64(1431655765, 0) => 1431655765 bit_rotl_u32(1431655765, 0) => 1431655765 bit_rotr_u32(1431655765, 0) => 1431655765 bit_rotl_u64(1656977767, 0) => 1656977767 bit_rotr_u64(1656977767, 0) => 1656977767 bit_rotl_u32(1656977767, 0) => 1656977767 bit_rotr_u32(1656977767, 0) => 1656977767 bit_rotl_u64(2147483648, 0) => 2147483648 bit_rotr_u64(2147483648, 0) => 2147483648 bit_rotl_u32(2147483648, 0) => 2147483648 bit_rotr_u32(2147483648, 0) => 2147483648 bit_rotl_u64(2283114629, 0) => 2283114629 bit_rotr_u64(2283114629, 0) => 2283114629 bit_rotl_u32(2283114629, 0) => 2283114629 bit_rotr_u32(2283114629, 0) => 2283114629 bit_rotl_u64(2502548245, 0) => 2502548245 bit_rotr_u64(2502548245, 0) => 2502548245 bit_rotl_u32(2502548245, 0) => 2502548245 bit_rotr_u32(2502548245, 0) => 2502548245 bit_rotl_u64(4294967295, 0) => 4294967295 bit_rotr_u64(4294967295, 0) => 4294967295 bit_rotl_u32(4294967295, 0) => 4294967295 bit_rotr_u32(4294967295, 0) => 4294967295 bit_rotl_u64(708915120906848425, 0) => 708915120906848425 bit_rotr_u64(708915120906848425, 0) => 708915120906848425 bit_rotl_u64(1960191741125985428, 0) => 1960191741125985428 bit_rotr_u64(1960191741125985428, 0) => 1960191741125985428 bit_rotl_u64(3689348814741910323, 0) => 3689348814741910323 bit_rotr_u64(3689348814741910323, 0) => 3689348814741910323 bit_rotl_u64(5578377670650038654, 0) => 5578377670650038654 bit_rotr_u64(5578377670650038654, 0) => 5578377670650038654 bit_rotl_u64(9223372036854775808, 0) => 9223372036854775808 bit_rotr_u64(9223372036854775808, 0) => 9223372036854775808 bit_rotl_u64(10755112315580060033, 0) => 10755112315580060033 bit_rotr_u64(10755112315580060033, 0) => 10755112315580060033 bit_rotl_u64(11163782031541429823, 0) => 11163782031541429823 bit_rotr_u64(11163782031541429823, 0) => 11163782031541429823 bit_rotl_u64(13903686156871869732, 0) => 13903686156871869732 bit_rotr_u64(13903686156871869732, 0) => 13903686156871869732 bit_rotl_u64(14237897302422917095, 0) => 14237897302422917095 bit_rotr_u64(14237897302422917095, 0) => 14237897302422917095 bit_rotl_u64(14302190498657618739, 0) => 14302190498657618739 bit_rotr_u64(14302190498657618739, 0) => 14302190498657618739 bit_rotl_u64(15766411510232741269, 0) => 15766411510232741269 bit_rotr_u64(15766411510232741269, 0) => 15766411510232741269 bit_rotl_u64(15984546468465238145, 0) => 15984546468465238145 bit_rotr_u64(15984546468465238145, 0) => 15984546468465238145 bit_rotl_u64(18446744073709551615, 0) => 18446744073709551615 bit_rotr_u64(18446744073709551615, 0) => 18446744073709551615 bit_rotl_u64(0, 1) => 0 bit_rotr_u64(0, 1) => 0 bit_rotl_u32(0, 1) => 0 bit_rotr_u32(0, 1) => 0 bit_rotl_u64(1, 1) => 2 bit_rotr_u64(1, 1) => 9223372036854775808 bit_rotl_u32(1, 1) => 2 bit_rotr_u32(1, 1) => 2147483648 bit_rotl_u64(2, 1) => 4 bit_rotr_u64(2, 1) => 1 bit_rotl_u32(2, 1) => 4 bit_rotr_u32(2, 1) => 1 bit_rotl_u64(32768, 1) => 65536 bit_rotr_u64(32768, 1) => 16384 bit_rotl_u32(32768, 1) => 65536 bit_rotr_u32(32768, 1) => 16384 bit_rotl_u64(65535, 1) => 131070 bit_rotr_u64(65535, 1) => 9223372036854808575 bit_rotl_u32(65535, 1) => 131070 bit_rotr_u32(65535, 1) => 2147516415 bit_rotl_u64(65536, 1) => 131072 bit_rotr_u64(65536, 1) => 32768 bit_rotl_u32(65536, 1) => 131072 bit_rotr_u32(65536, 1) => 32768 bit_rotl_u64(726075912, 1) => 1452151824 bit_rotr_u64(726075912, 1) => 363037956 bit_rotl_u32(726075912, 1) => 1452151824 bit_rotr_u32(726075912, 1) => 363037956 bit_rotl_u64(858993459, 1) => 1717986918 bit_rotr_u64(858993459, 1) => 9223372037284272537 bit_rotl_u32(858993459, 1) => 1717986918 bit_rotr_u32(858993459, 1) => 2576980377 bit_rotl_u64(1073741824, 1) => 2147483648 bit_rotr_u64(1073741824, 1) => 536870912 bit_rotl_u32(1073741824, 1) => 2147483648 bit_rotr_u32(1073741824, 1) => 536870912 bit_rotl_u64(1245250552, 1) => 2490501104 bit_rotr_u64(1245250552, 1) => 622625276 bit_rotl_u32(1245250552, 1) => 2490501104 bit_rotr_u32(1245250552, 1) => 622625276 bit_rotl_u64(1431655765, 1) => 2863311530 bit_rotr_u64(1431655765, 1) => 9223372037570603690 bit_rotl_u32(1431655765, 1) => 2863311530 bit_rotr_u32(1431655765, 1) => 2863311530 bit_rotl_u64(1656977767, 1) => 3313955534 bit_rotr_u64(1656977767, 1) => 9223372037683264691 bit_rotl_u32(1656977767, 1) => 3313955534 bit_rotr_u32(1656977767, 1) => 2975972531 bit_rotl_u64(2147483648, 1) => 4294967296 bit_rotr_u64(2147483648, 1) => 1073741824 bit_rotl_u32(2147483648, 1) => 1 bit_rotr_u32(2147483648, 1) => 1073741824 bit_rotl_u64(2283114629, 1) => 4566229258 bit_rotr_u64(2283114629, 1) => 9223372037996333122 bit_rotl_u32(2283114629, 1) => 271261963 bit_rotr_u32(2283114629, 1) => 3289040962 bit_rotl_u64(2502548245, 1) => 5005096490 bit_rotr_u64(2502548245, 1) => 9223372038106049930 bit_rotl_u32(2502548245, 1) => 710129195 bit_rotr_u32(2502548245, 1) => 3398757770 bit_rotl_u64(4294967295, 1) => 8589934590 bit_rotr_u64(4294967295, 1) => 9223372039002259455 bit_rotl_u32(4294967295, 1) => 4294967295 bit_rotr_u32(4294967295, 1) => 4294967295 bit_rotl_u64(708915120906848425, 1) => 1417830241813696850 bit_rotr_u64(708915120906848425, 1) => 9577829597308200020 bit_rotl_u64(1960191741125985428, 1) => 3920383482251970856 bit_rotr_u64(1960191741125985428, 1) => 980095870562992714 bit_rotl_u64(3689348814741910323, 1) => 7378697629483820646 bit_rotr_u64(3689348814741910323, 1) => 11068046444225730969 bit_rotl_u64(5578377670650038654, 1) => 11156755341300077308 bit_rotr_u64(5578377670650038654, 1) => 2789188835325019327 bit_rotl_u64(9223372036854775808, 1) => 1 bit_rotr_u64(9223372036854775808, 1) => 4611686018427387904 bit_rotl_u64(10755112315580060033, 1) => 3063480557450568451 bit_rotr_u64(10755112315580060033, 1) => 14600928194644805824 bit_rotl_u64(11163782031541429823, 1) => 3880819989373308031 bit_rotr_u64(11163782031541429823, 1) => 14805263052625490719 bit_rotl_u64(13903686156871869732, 1) => 9360628240034187849 bit_rotr_u64(13903686156871869732, 1) => 6951843078435934866 bit_rotl_u64(14237897302422917095, 1) => 10029050531136282575 bit_rotr_u64(14237897302422917095, 1) => 16342320688066234355 bit_rotl_u64(14302190498657618739, 1) => 10157636923605685863 bit_rotr_u64(14302190498657618739, 1) => 16374467286183585177 bit_rotl_u64(15766411510232741269, 1) => 13086078946755930923 bit_rotr_u64(15766411510232741269, 1) => 17106577791971146442 bit_rotl_u64(15984546468465238145, 1) => 13522348863220924675 bit_rotr_u64(15984546468465238145, 1) => 17215645271087394880 bit_rotl_u64(18446744073709551615, 1) => 18446744073709551615 bit_rotr_u64(18446744073709551615, 1) => 18446744073709551615 bit_rotl_u64(0, 15) => 0 bit_rotr_u64(0, 15) => 0 bit_rotl_u32(0, 15) => 0 bit_rotr_u32(0, 15) => 0 bit_rotl_u64(1, 15) => 32768 bit_rotr_u64(1, 15) => 562949953421312 bit_rotl_u32(1, 15) => 32768 bit_rotr_u32(1, 15) => 131072 bit_rotl_u64(2, 15) => 65536 bit_rotr_u64(2, 15) => 1125899906842624 bit_rotl_u32(2, 15) => 65536 bit_rotr_u32(2, 15) => 262144 bit_rotl_u64(32768, 15) => 1073741824 bit_rotr_u64(32768, 15) => 1 bit_rotl_u32(32768, 15) => 1073741824 bit_rotr_u32(32768, 15) => 1 bit_rotl_u64(65535, 15) => 2147450880 bit_rotr_u64(65535, 15) => 18446181123756130305 bit_rotl_u32(65535, 15) => 2147450880 bit_rotr_u32(65535, 15) => 4294836225 bit_rotl_u64(65536, 15) => 2147483648 bit_rotr_u64(65536, 15) => 2 bit_rotl_u32(65536, 15) => 2147483648 bit_rotr_u32(65536, 15) => 2 bit_rotl_u64(726075912, 15) => 23792055484416 bit_rotr_u64(726075912, 15) => 1445655480385951374 bit_rotl_u32(726075912, 15) => 2231637411 bit_rotr_u32(726075912, 15) => 336615054 bit_rotl_u64(858993459, 15) => 28147497664512 bit_rotr_u64(858993459, 15) => 7378585039493162598 bit_rotl_u32(858993459, 15) => 2576980377 bit_rotr_u32(858993459, 15) => 1717986918 bit_rotl_u64(1073741824, 15) => 35184372088832 bit_rotr_u64(1073741824, 15) => 32768 bit_rotl_u32(1073741824, 15) => 8192 bit_rotr_u32(1073741824, 15) => 32768 bit_rotl_u64(1245250552, 15) => 40804370087936 bit_rotr_u64(1245250552, 15) => 571957152676090994 bit_rotl_u32(1245250552, 15) => 2180785436 bit_rotr_u32(1245250552, 15) => 133207154 bit_rotl_u64(1431655765, 15) => 46912496107520 bit_rotr_u64(1431655765, 15) => 12297641732488604330 bit_rotl_u32(1431655765, 15) => 2863311530 bit_rotr_u32(1431655765, 15) => 2863311530 bit_rotl_u64(1656977767, 15) => 54295847469056 bit_rotr_u64(1656977767, 15) => 17495921602381006214 bit_rotl_u32(1656977767, 15) => 3165892961 bit_rotr_u32(1656977767, 15) => 4073637254 bit_rotl_u64(2147483648, 15) => 70368744177664 bit_rotr_u64(2147483648, 15) => 65536 bit_rotl_u32(2147483648, 15) => 16384 bit_rotr_u32(2147483648, 15) => 65536 bit_rotl_u64(2283114629, 15) => 74813100163072 bit_rotr_u64(2283114629, 15) => 2380715353018798123 bit_rotl_u32(2283114629, 15) => 3359818762 bit_rotr_u32(2283114629, 15) => 554373163 bit_rotl_u64(2502548245, 15) => 82003500892160 bit_rotr_u64(2502548245, 15) => 13126304063924808275 bit_rotl_u32(2502548245, 15) => 3985296020 bit_rotr_u32(2502548245, 15) => 3056282195 bit_rotl_u64(4294967295, 15) => 140737488322560 bit_rotr_u64(4294967295, 15) => 18446181123756261375 bit_rotl_u32(4294967295, 15) => 4294967295 bit_rotr_u32(4294967295, 15) => 4294967295 bit_rotl_u64(708915120906848425, 15) => 5279893075283707115 bit_rotr_u64(708915120906848425, 15) => 10471453717962410780 bit_rotl_u64(1960191741125985428, 15) => 108559631781274 bit_rotr_u64(1960191741125985428, 15) => 659837165714377456 bit_rotl_u64(3689348814741910323, 15) => 11068046444225730969 bit_rotr_u64(3689348814741910323, 15) => 7378697629483820646 bit_rotl_u64(5578377670650038654, 15) => 3492485472519661237 bit_rotr_u64(5578377670650038654, 15) => 1368138625390162978 bit_rotl_u64(9223372036854775808, 15) => 16384 bit_rotr_u64(9223372036854775808, 15) => 281474976710656 bit_rotl_u64(10755112315580060033, 15) => 16921572780133108384 bit_rotr_u64(10755112315580060033, 15) => 2522906961261232936 bit_rotl_u64(11163782031541429823, 15) => 15874627889163914614 bit_rotr_u64(11163782031541429823, 15) => 14735555722393159636 bit_rotl_u64(13903686156871869732, 15) => 16749599972631142521 bit_rotr_u64(13903686156871869732, 15) => 5352952463958352314 bit_rotl_u64(14237897302422917095, 15) => 10814437605877473995 bit_rotr_u64(14237897302422917095, 15) => 2292203766521423570 bit_rotl_u64(14302190498657618739, 15) => 14645067421692060477 bit_rotr_u64(14302190498657618739, 15) => 10837786021529578728 bit_rotl_u64(15766411510232741269, 15) => 14257838996763372902 bit_rotr_u64(15766411510232741269, 15) => 16945837700629926938 bit_rotl_u64(15984546468465238145, 15) => 4767449759914979050 bit_rotr_u64(15984546468465238145, 15) => 13908166408919157040 bit_rotl_u64(18446744073709551615, 15) => 18446744073709551615 bit_rotr_u64(18446744073709551615, 15) => 18446744073709551615 bit_rotl_u64(0, 16) => 0 bit_rotr_u64(0, 16) => 0 bit_rotl_u32(0, 16) => 0 bit_rotr_u32(0, 16) => 0 bit_rotl_u64(1, 16) => 65536 bit_rotr_u64(1, 16) => 281474976710656 bit_rotl_u32(1, 16) => 65536 bit_rotr_u32(1, 16) => 65536 bit_rotl_u64(2, 16) => 131072 bit_rotr_u64(2, 16) => 562949953421312 bit_rotl_u32(2, 16) => 131072 bit_rotr_u32(2, 16) => 131072 bit_rotl_u64(32768, 16) => 2147483648 bit_rotr_u64(32768, 16) => 9223372036854775808 bit_rotl_u32(32768, 16) => 2147483648 bit_rotr_u32(32768, 16) => 2147483648 bit_rotl_u64(65535, 16) => 4294901760 bit_rotr_u64(65535, 16) => 18446462598732840960 bit_rotl_u32(65535, 16) => 4294901760 bit_rotr_u32(65535, 16) => 4294901760 bit_rotl_u64(65536, 16) => 4294967296 bit_rotr_u64(65536, 16) => 1 bit_rotl_u32(65536, 16) => 1 bit_rotr_u32(65536, 16) => 1 bit_rotl_u64(726075912, 16) => 47584110968832 bit_rotr_u64(726075912, 16) => 722827740192975687 bit_rotl_u32(726075912, 16) => 168307527 bit_rotr_u32(726075912, 16) => 168307527 bit_rotl_u64(858993459, 16) => 56294995329024 bit_rotr_u64(858993459, 16) => 3689292519746581299 bit_rotl_u32(858993459, 16) => 858993459 bit_rotr_u32(858993459, 16) => 858993459 bit_rotl_u64(1073741824, 16) => 70368744177664 bit_rotr_u64(1073741824, 16) => 16384 bit_rotl_u32(1073741824, 16) => 16384 bit_rotr_u32(1073741824, 16) => 16384 bit_rotl_u64(1245250552, 16) => 81608740175872 bit_rotr_u64(1245250552, 16) => 285978576338045497 bit_rotl_u32(1245250552, 16) => 66603577 bit_rotr_u32(1245250552, 16) => 66603577 bit_rotl_u64(1431655765, 16) => 93824992215040 bit_rotr_u64(1431655765, 16) => 6148820866244302165 bit_rotl_u32(1431655765, 16) => 1431655765 bit_rotr_u32(1431655765, 16) => 1431655765 bit_rotl_u64(1656977767, 16) => 108591694938112 bit_rotr_u64(1656977767, 16) => 8747960801190503107 bit_rotl_u32(1656977767, 16) => 2036818627 bit_rotr_u32(1656977767, 16) => 2036818627 bit_rotl_u64(2147483648, 16) => 140737488355328 bit_rotr_u64(2147483648, 16) => 32768 bit_rotl_u32(2147483648, 16) => 32768 bit_rotr_u32(2147483648, 16) => 32768 bit_rotl_u64(2283114629, 16) => 149626200326144 bit_rotr_u64(2283114629, 16) => 10413729713364174869 bit_rotl_u32(2283114629, 16) => 2424670229 bit_rotr_u32(2283114629, 16) => 2424670229 bit_rotl_u64(2502548245, 16) => 164007001784320 bit_rotr_u64(2502548245, 16) => 15786524068817179945 bit_rotl_u32(2502548245, 16) => 3675624745 bit_rotr_u32(2502548245, 16) => 3675624745 bit_rotl_u64(4294967295, 16) => 281474976645120 bit_rotr_u64(4294967295, 16) => 18446462598732906495 bit_rotl_u32(4294967295, 16) => 4294967295 bit_rotr_u32(4294967295, 16) => 4294967295 bit_rotl_u64(708915120906848425, 16) => 10559786150567414230 bit_rotr_u64(708915120906848425, 16) => 5235726858981205390 bit_rotl_u64(1960191741125985428, 16) => 217119263562548 bit_rotr_u64(1960191741125985428, 16) => 329918582857188728 bit_rotl_u64(3689348814741910323, 16) => 3689348814741910323 bit_rotr_u64(3689348814741910323, 16) => 3689348814741910323 bit_rotl_u64(5578377670650038654, 16) => 6984970945039322474 bit_rotr_u64(5578377670650038654, 16) => 684069312695081489 bit_rotl_u64(9223372036854775808, 16) => 32768 bit_rotr_u64(9223372036854775808, 16) => 140737488355328 bit_rotl_u64(10755112315580060033, 16) => 15396401486556665153 bit_rotr_u64(10755112315580060033, 16) => 1261453480630616468 bit_rotl_u64(11163782031541429823, 16) => 13302511704618277613 bit_rotr_u64(11163782031541429823, 16) => 7367777861196579818 bit_rotl_u64(13903686156871869732, 16) => 15052455871552733427 bit_rotr_u64(13903686156871869732, 16) => 2676476231979176157 bit_rotl_u64(14237897302422917095, 16) => 3182131138045396375 bit_rotr_u64(14237897302422917095, 16) => 1146101883260711785 bit_rotl_u64(14302190498657618739, 16) => 10843390769674569339 bit_rotr_u64(14302190498657618739, 16) => 5418893010764789364 bit_rotl_u64(15766411510232741269, 16) => 10068933919817194189 bit_rotr_u64(15766411510232741269, 16) => 8472918850314963469 bit_rotl_u64(15984546468465238145, 16) => 9534899519829958100 bit_rotr_u64(15984546468465238145, 16) => 6954083204459578520 bit_rotl_u64(18446744073709551615, 16) => 18446744073709551615 bit_rotr_u64(18446744073709551615, 16) => 18446744073709551615 bit_rotl_u64(0, 31) => 0 bit_rotr_u64(0, 31) => 0 bit_rotl_u32(0, 31) => 0 bit_rotr_u32(0, 31) => 0 bit_rotl_u64(1, 31) => 2147483648 bit_rotr_u64(1, 31) => 8589934592 bit_rotl_u32(1, 31) => 2147483648 bit_rotr_u32(1, 31) => 2 bit_rotl_u64(2, 31) => 4294967296 bit_rotr_u64(2, 31) => 17179869184 bit_rotl_u32(2, 31) => 1 bit_rotr_u32(2, 31) => 4 bit_rotl_u64(32768, 31) => 70368744177664 bit_rotr_u64(32768, 31) => 281474976710656 bit_rotl_u32(32768, 31) => 16384 bit_rotr_u32(32768, 31) => 65536 bit_rotl_u64(65535, 31) => 140735340871680 bit_rotr_u64(65535, 31) => 562941363486720 bit_rotl_u32(65535, 31) => 2147516415 bit_rotr_u32(65535, 31) => 131070 bit_rotl_u64(65536, 31) => 140737488355328 bit_rotr_u64(65536, 31) => 562949953421312 bit_rotl_u32(65536, 31) => 32768 bit_rotr_u32(65536, 31) => 131072 bit_rotl_u64(726075912, 31) => 1559236148226686976 bit_rotr_u64(726075912, 31) => 6236944592906747904 bit_rotl_u32(726075912, 31) => 363037956 bit_rotr_u32(726075912, 31) => 1452151824 bit_rotl_u64(858993459, 31) => 1844674406941458432 bit_rotr_u64(858993459, 31) => 7378697627765833728 bit_rotl_u32(858993459, 31) => 2576980377 bit_rotr_u32(858993459, 31) => 1717986918 bit_rotl_u64(1073741824, 31) => 2305843009213693952 bit_rotr_u64(1073741824, 31) => 9223372036854775808 bit_rotl_u32(1073741824, 31) => 536870912 bit_rotr_u32(1073741824, 31) => 2147483648 bit_rotl_u64(1245250552, 31) => 2674155198082973696 bit_rotr_u64(1245250552, 31) => 10696620792331894784 bit_rotl_u32(1245250552, 31) => 622625276 bit_rotr_u32(1245250552, 31) => 2490501104 bit_rotl_u64(1431655765, 31) => 3074457344902430720 bit_rotr_u64(1431655765, 31) => 12297829379609722880 bit_rotl_u32(1431655765, 31) => 2863311530 bit_rotr_u32(1431655765, 31) => 2863311530 bit_rotl_u64(1656977767, 31) => 3558332659732054016 bit_rotr_u64(1656977767, 31) => 14233330638928216064 bit_rotl_u32(1656977767, 31) => 2975972531 bit_rotr_u32(1656977767, 31) => 3313955534 bit_rotl_u64(2147483648, 31) => 4611686018427387904 bit_rotr_u64(2147483648, 31) => 1 bit_rotl_u32(2147483648, 31) => 1073741824 bit_rotr_u32(2147483648, 31) => 1 bit_rotl_u64(2283114629, 31) => 4902951332287086592 bit_rotr_u64(2283114629, 31) => 1165061255438794753 bit_rotl_u32(2283114629, 31) => 3289040962 bit_rotr_u32(2283114629, 31) => 271261963 bit_rotl_u64(2502548245, 31) => 5374181434468597760 bit_rotr_u64(2502548245, 31) => 3049981664164839425 bit_rotl_u32(2502548245, 31) => 3398757770 bit_rotr_u32(2502548245, 31) => 710129195 bit_rotl_u64(4294967295, 31) => 9223372034707292160 bit_rotr_u64(4294967295, 31) => 18446744065119617025 bit_rotl_u32(4294967295, 31) => 4294967295 bit_rotr_u32(4294967295, 31) => 4294967295 bit_rotl_u64(708915120906848425, 31) => 17493991222969846085 bit_rotr_u64(708915120906848425, 31) => 14635732670750729495 bit_rotl_u64(1960191741125985428, 31) => 7114564028417572864 bit_rotr_u64(1960191741125985428, 31) => 10011512039960739841 bit_rotl_u64(3689348814741910323, 31) => 11068046444225730969 bit_rotr_u64(3689348814741910323, 31) => 7378697629483820646 bit_rotl_u64(5578377670650038654, 31) => 14774204534111940727 bit_rotr_u64(5578377670650038654, 31) => 3756585915319108063 bit_rotl_u64(9223372036854775808, 31) => 1073741824 bit_rotr_u64(9223372036854775808, 31) => 4294967296 bit_rotl_u64(10755112315580060033, 31) => 9280239606276614869 bit_rotr_u64(10755112315580060033, 31) => 227470277687356246 bit_rotl_u64(11163782031541429823, 31) => 141075175016160334 bit_rotr_u64(11163782031541429823, 31) => 564300700064641336 bit_rotl_u64(13903686156871869732, 31) => 9830956193977854066 bit_rotr_u64(13903686156871869732, 31) => 2430336628492313034 bit_rotl_u64(14237897302422917095, 31) => 11075626865162688020 bit_rotr_u64(14237897302422917095, 31) => 7409019313231648850 bit_rotl_u64(14302190498657618739, 31) => 13491136976614443837 bit_rotr_u64(14302190498657618739, 31) => 17071059759038672118 bit_rotl_u64(15766411510232741269, 31) => 362182200778999262 bit_rotr_u64(15766411510232741269, 31) => 1448728803115997048 bit_rotl_u64(15984546468465238145, 31) => 7083089369391317545 bit_rotr_u64(15984546468465238145, 31) => 9885613403855718565 bit_rotl_u64(18446744073709551615, 31) => 18446744073709551615 bit_rotr_u64(18446744073709551615, 31) => 18446744073709551615 bit_rotl_u64(0, 32) => 0 bit_rotr_u64(0, 32) => 0 bit_rotl_u32(0, 32) => 0 bit_rotr_u32(0, 32) => 0 bit_rotl_u64(1, 32) => 4294967296 bit_rotr_u64(1, 32) => 4294967296 bit_rotl_u32(1, 32) => 1 bit_rotr_u32(1, 32) => 1 bit_rotl_u64(2, 32) => 8589934592 bit_rotr_u64(2, 32) => 8589934592 bit_rotl_u32(2, 32) => 2 bit_rotr_u32(2, 32) => 2 bit_rotl_u64(32768, 32) => 140737488355328 bit_rotr_u64(32768, 32) => 140737488355328 bit_rotl_u32(32768, 32) => 32768 bit_rotr_u32(32768, 32) => 32768 bit_rotl_u64(65535, 32) => 281470681743360 bit_rotr_u64(65535, 32) => 281470681743360 bit_rotl_u32(65535, 32) => 65535 bit_rotr_u32(65535, 32) => 65535 bit_rotl_u64(65536, 32) => 281474976710656 bit_rotr_u64(65536, 32) => 281474976710656 bit_rotl_u32(65536, 32) => 65536 bit_rotr_u32(65536, 32) => 65536 bit_rotl_u64(726075912, 32) => 3118472296453373952 bit_rotr_u64(726075912, 32) => 3118472296453373952 bit_rotl_u32(726075912, 32) => 726075912 bit_rotr_u32(726075912, 32) => 726075912 bit_rotl_u64(858993459, 32) => 3689348813882916864 bit_rotr_u64(858993459, 32) => 3689348813882916864 bit_rotl_u32(858993459, 32) => 858993459 bit_rotr_u32(858993459, 32) => 858993459 bit_rotl_u64(1073741824, 32) => 4611686018427387904 bit_rotr_u64(1073741824, 32) => 4611686018427387904 bit_rotl_u32(1073741824, 32) => 1073741824 bit_rotr_u32(1073741824, 32) => 1073741824 bit_rotl_u64(1245250552, 32) => 5348310396165947392 bit_rotr_u64(1245250552, 32) => 5348310396165947392 bit_rotl_u32(1245250552, 32) => 1245250552 bit_rotr_u32(1245250552, 32) => 1245250552 bit_rotl_u64(1431655765, 32) => 6148914689804861440 bit_rotr_u64(1431655765, 32) => 6148914689804861440 bit_rotl_u32(1431655765, 32) => 1431655765 bit_rotr_u32(1431655765, 32) => 1431655765 bit_rotl_u64(1656977767, 32) => 7116665319464108032 bit_rotr_u64(1656977767, 32) => 7116665319464108032 bit_rotl_u32(1656977767, 32) => 1656977767 bit_rotr_u32(1656977767, 32) => 1656977767 bit_rotl_u64(2147483648, 32) => 9223372036854775808 bit_rotr_u64(2147483648, 32) => 9223372036854775808 bit_rotl_u32(2147483648, 32) => 2147483648 bit_rotr_u32(2147483648, 32) => 2147483648 bit_rotl_u64(2283114629, 32) => 9805902664574173184 bit_rotr_u64(2283114629, 32) => 9805902664574173184 bit_rotl_u32(2283114629, 32) => 2283114629 bit_rotr_u32(2283114629, 32) => 2283114629 bit_rotl_u64(2502548245, 32) => 10748362868937195520 bit_rotr_u64(2502548245, 32) => 10748362868937195520 bit_rotl_u32(2502548245, 32) => 2502548245 bit_rotr_u32(2502548245, 32) => 2502548245 bit_rotl_u64(4294967295, 32) => 18446744069414584320 bit_rotr_u64(4294967295, 32) => 18446744069414584320 bit_rotl_u32(4294967295, 32) => 4294967295 bit_rotr_u32(4294967295, 32) => 4294967295 bit_rotl_u64(708915120906848425, 32) => 16541238372230140555 bit_rotr_u64(708915120906848425, 32) => 16541238372230140555 bit_rotl_u64(1960191741125985428, 32) => 14229128056835145728 bit_rotr_u64(1960191741125985428, 32) => 14229128056835145728 bit_rotl_u64(3689348814741910323, 32) => 3689348814741910323 bit_rotr_u64(3689348814741910323, 32) => 3689348814741910323 bit_rotl_u64(5578377670650038654, 32) => 11101664994514329839 bit_rotr_u64(5578377670650038654, 32) => 11101664994514329839 bit_rotl_u64(9223372036854775808, 32) => 2147483648 bit_rotr_u64(9223372036854775808, 32) => 2147483648 bit_rotl_u64(10755112315580060033, 32) => 113735138843678123 bit_rotr_u64(10755112315580060033, 32) => 113735138843678123 bit_rotl_u64(11163782031541429823, 32) => 282150350032320668 bit_rotr_u64(11163782031541429823, 32) => 282150350032320668 bit_rotl_u64(13903686156871869732, 32) => 1215168314246156517 bit_rotr_u64(13903686156871869732, 32) => 1215168314246156517 bit_rotl_u64(14237897302422917095, 32) => 3704509656615824425 bit_rotr_u64(14237897302422917095, 32) => 3704509656615824425 bit_rotl_u64(14302190498657618739, 32) => 8535529879519336059 bit_rotr_u64(14302190498657618739, 32) => 8535529879519336059 bit_rotl_u64(15766411510232741269, 32) => 724364401557998524 bit_rotr_u64(15766411510232741269, 32) => 724364401557998524 bit_rotl_u64(15984546468465238145, 32) => 14166178738782635090 bit_rotr_u64(15984546468465238145, 32) => 14166178738782635090 bit_rotl_u64(18446744073709551615, 32) => 18446744073709551615 bit_rotr_u64(18446744073709551615, 32) => 18446744073709551615 bit_rotl_u64(0, 63) => 0 bit_rotr_u64(0, 63) => 0 bit_rotl_u64(1, 63) => 9223372036854775808 bit_rotr_u64(1, 63) => 2 bit_rotl_u64(2, 63) => 1 bit_rotr_u64(2, 63) => 4 bit_rotl_u64(32768, 63) => 16384 bit_rotr_u64(32768, 63) => 65536 bit_rotl_u64(65535, 63) => 9223372036854808575 bit_rotr_u64(65535, 63) => 131070 bit_rotl_u64(65536, 63) => 32768 bit_rotr_u64(65536, 63) => 131072 bit_rotl_u64(726075912, 63) => 363037956 bit_rotr_u64(726075912, 63) => 1452151824 bit_rotl_u64(858993459, 63) => 9223372037284272537 bit_rotr_u64(858993459, 63) => 1717986918 bit_rotl_u64(1073741824, 63) => 536870912 bit_rotr_u64(1073741824, 63) => 2147483648 bit_rotl_u64(1245250552, 63) => 622625276 bit_rotr_u64(1245250552, 63) => 2490501104 bit_rotl_u64(1431655765, 63) => 9223372037570603690 bit_rotr_u64(1431655765, 63) => 2863311530 bit_rotl_u64(1656977767, 63) => 9223372037683264691 bit_rotr_u64(1656977767, 63) => 3313955534 bit_rotl_u64(2147483648, 63) => 1073741824 bit_rotr_u64(2147483648, 63) => 4294967296 bit_rotl_u64(2283114629, 63) => 9223372037996333122 bit_rotr_u64(2283114629, 63) => 4566229258 bit_rotl_u64(2502548245, 63) => 9223372038106049930 bit_rotr_u64(2502548245, 63) => 5005096490 bit_rotl_u64(4294967295, 63) => 9223372039002259455 bit_rotr_u64(4294967295, 63) => 8589934590 bit_rotl_u64(708915120906848425, 63) => 9577829597308200020 bit_rotr_u64(708915120906848425, 63) => 1417830241813696850 bit_rotl_u64(1960191741125985428, 63) => 980095870562992714 bit_rotr_u64(1960191741125985428, 63) => 3920383482251970856 bit_rotl_u64(3689348814741910323, 63) => 11068046444225730969 bit_rotr_u64(3689348814741910323, 63) => 7378697629483820646 bit_rotl_u64(5578377670650038654, 63) => 2789188835325019327 bit_rotr_u64(5578377670650038654, 63) => 11156755341300077308 bit_rotl_u64(9223372036854775808, 63) => 4611686018427387904 bit_rotr_u64(9223372036854775808, 63) => 1 bit_rotl_u64(10755112315580060033, 63) => 14600928194644805824 bit_rotr_u64(10755112315580060033, 63) => 3063480557450568451 bit_rotl_u64(11163782031541429823, 63) => 14805263052625490719 bit_rotr_u64(11163782031541429823, 63) => 3880819989373308031 bit_rotl_u64(13903686156871869732, 63) => 6951843078435934866 bit_rotr_u64(13903686156871869732, 63) => 9360628240034187849 bit_rotl_u64(14237897302422917095, 63) => 16342320688066234355 bit_rotr_u64(14237897302422917095, 63) => 10029050531136282575 bit_rotl_u64(14302190498657618739, 63) => 16374467286183585177 bit_rotr_u64(14302190498657618739, 63) => 10157636923605685863 bit_rotl_u64(15766411510232741269, 63) => 17106577791971146442 bit_rotr_u64(15766411510232741269, 63) => 13086078946755930923 bit_rotl_u64(15984546468465238145, 63) => 17215645271087394880 bit_rotr_u64(15984546468465238145, 63) => 13522348863220924675 bit_rotl_u64(18446744073709551615, 63) => 18446744073709551615 bit_rotr_u64(18446744073709551615, 63) => 18446744073709551615 bit_rotl_u64(0, 64) => 0 bit_rotr_u64(0, 64) => 0 bit_rotl_u64(1, 64) => 1 bit_rotr_u64(1, 64) => 1 bit_rotl_u64(2, 64) => 2 bit_rotr_u64(2, 64) => 2 bit_rotl_u64(32768, 64) => 32768 bit_rotr_u64(32768, 64) => 32768 bit_rotl_u64(65535, 64) => 65535 bit_rotr_u64(65535, 64) => 65535 bit_rotl_u64(65536, 64) => 65536 bit_rotr_u64(65536, 64) => 65536 bit_rotl_u64(726075912, 64) => 726075912 bit_rotr_u64(726075912, 64) => 726075912 bit_rotl_u64(858993459, 64) => 858993459 bit_rotr_u64(858993459, 64) => 858993459 bit_rotl_u64(1073741824, 64) => 1073741824 bit_rotr_u64(1073741824, 64) => 1073741824 bit_rotl_u64(1245250552, 64) => 1245250552 bit_rotr_u64(1245250552, 64) => 1245250552 bit_rotl_u64(1431655765, 64) => 1431655765 bit_rotr_u64(1431655765, 64) => 1431655765 bit_rotl_u64(1656977767, 64) => 1656977767 bit_rotr_u64(1656977767, 64) => 1656977767 bit_rotl_u64(2147483648, 64) => 2147483648 bit_rotr_u64(2147483648, 64) => 2147483648 bit_rotl_u64(2283114629, 64) => 2283114629 bit_rotr_u64(2283114629, 64) => 2283114629 bit_rotl_u64(2502548245, 64) => 2502548245 bit_rotr_u64(2502548245, 64) => 2502548245 bit_rotl_u64(4294967295, 64) => 4294967295 bit_rotr_u64(4294967295, 64) => 4294967295 bit_rotl_u64(708915120906848425, 64) => 708915120906848425 bit_rotr_u64(708915120906848425, 64) => 708915120906848425 bit_rotl_u64(1960191741125985428, 64) => 1960191741125985428 bit_rotr_u64(1960191741125985428, 64) => 1960191741125985428 bit_rotl_u64(3689348814741910323, 64) => 3689348814741910323 bit_rotr_u64(3689348814741910323, 64) => 3689348814741910323 bit_rotl_u64(5578377670650038654, 64) => 5578377670650038654 bit_rotr_u64(5578377670650038654, 64) => 5578377670650038654 bit_rotl_u64(9223372036854775808, 64) => 9223372036854775808 bit_rotr_u64(9223372036854775808, 64) => 9223372036854775808 bit_rotl_u64(10755112315580060033, 64) => 10755112315580060033 bit_rotr_u64(10755112315580060033, 64) => 10755112315580060033 bit_rotl_u64(11163782031541429823, 64) => 11163782031541429823 bit_rotr_u64(11163782031541429823, 64) => 11163782031541429823 bit_rotl_u64(13903686156871869732, 64) => 13903686156871869732 bit_rotr_u64(13903686156871869732, 64) => 13903686156871869732 bit_rotl_u64(14237897302422917095, 64) => 14237897302422917095 bit_rotr_u64(14237897302422917095, 64) => 14237897302422917095 bit_rotl_u64(14302190498657618739, 64) => 14302190498657618739 bit_rotr_u64(14302190498657618739, 64) => 14302190498657618739 bit_rotl_u64(15766411510232741269, 64) => 15766411510232741269 bit_rotr_u64(15766411510232741269, 64) => 15766411510232741269 bit_rotl_u64(15984546468465238145, 64) => 15984546468465238145 bit_rotr_u64(15984546468465238145, 64) => 15984546468465238145 bit_rotl_u64(18446744073709551615, 64) => 18446744073709551615 bit_rotr_u64(18446744073709551615, 64) => 18446744073709551615 *** test_rotl_rotr: done *** *** test_bswap *** bswap_u64(0) => 0 bswap_u32(0) => 0 bswap_u64(1) => 72057594037927936 bswap_u32(1) => 16777216 bswap_u64(2) => 144115188075855872 bswap_u32(2) => 33554432 bswap_u64(32768) => 36028797018963968 bswap_u32(32768) => 8388608 bswap_u64(65535) => 18446462598732840960 bswap_u32(65535) => 4294901760 bswap_u64(65536) => 1099511627776 bswap_u32(65536) => 256 bswap_u64(726075912) => 579353752079695872 bswap_u32(726075912) => 134891307 bswap_u64(858993459) => 3689348813882916864 bswap_u32(858993459) => 858993459 bswap_u64(1073741824) => 274877906944 bswap_u32(1073741824) => 64 bswap_u64(1245250552) => 17871190736326623232 bswap_u32(1245250552) => 4160960842 bswap_u64(1431655765) => 6148914689804861440 bswap_u32(1431655765) => 1431655765 bswap_u64(1656977767) => 7456205483762778112 bswap_u32(1656977767) => 1736033122 bswap_u64(2147483648) => 549755813888 bswap_u32(2147483648) => 128 bswap_u64(2283114629) => 9624216077550485504 bswap_u32(2283114629) => 2240812424 bswap_u64(2502548245) => 1574898214622986240 bswap_u32(2502548245) => 366684565 bswap_u64(4294967295) => 18446744069414584320 bswap_u32(4294967295) => 4294967295 bswap_u64(708915120906848425) => 12198156707273299465 bswap_u64(1960191741125985428) => 10665782605024080923 bswap_u64(3689348814741910323) => 3689348814741910323 bswap_u64(5578377670650038654) => 9081809480708024909 bswap_u64(9223372036854775808) => 128 bswap_u64(10755112315580060033) => 9300377440395542933 bswap_u64(11163782031541429823) => 4568596173249113498 bswap_u64(13903686156871869732) => 2604731024148591552 bswap_u64(14237897302422917095) => 16649642015867049925 bswap_u64(14302190498657618739) => 3696175971416046534 bswap_u64(15766411510232741269) => 10769528423690522074 bswap_u64(15984546468465238145) => 9322619197622375645 bswap_u64(18446744073709551615) => 18446744073709551615 *** test_bswap: done *** *** test_index *** bit_index_u64(0, *, -1) => bit_index_u32(0, *, -1) => bit_index_u64(1, *, -1) => 0 bit_index_u32(1, *, -1) => 0 bit_index_u64(2, *, -1) => 1 bit_index_u32(2, *, -1) => 1 bit_index_u64(32768, *, -1) => 15 bit_index_u32(32768, *, -1) => 15 bit_index_u64(65535, *, -1) => 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 bit_index_u32(65535, *, -1) => 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 bit_index_u64(65536, *, -1) => 16 bit_index_u32(65536, *, -1) => 16 bit_index_u64(726075912, *, -1) => 3 9 11 16 17 18 22 24 25 27 29 bit_index_u32(726075912, *, -1) => 3 9 11 16 17 18 22 24 25 27 29 bit_index_u64(858993459, *, -1) => 0 1 4 5 8 9 12 13 16 17 20 21 24 25 28 29 bit_index_u32(858993459, *, -1) => 0 1 4 5 8 9 12 13 16 17 20 21 24 25 28 29 bit_index_u64(1073741824, *, -1) => 30 bit_index_u32(1073741824, *, -1) => 30 bit_index_u64(1245250552, *, -1) => 3 4 5 6 7 8 9 16 19 20 21 25 27 30 bit_index_u32(1245250552, *, -1) => 3 4 5 6 7 8 9 16 19 20 21 25 27 30 bit_index_u64(1431655765, *, -1) => 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 bit_index_u32(1431655765, *, -1) => 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 bit_index_u64(1656977767, *, -1) => 0 1 2 5 6 8 11 12 13 14 16 17 22 23 25 29 30 bit_index_u32(1656977767, *, -1) => 0 1 2 5 6 8 11 12 13 14 16 17 22 23 25 29 30 bit_index_u64(2147483648, *, -1) => 31 bit_index_u32(2147483648, *, -1) => 31 bit_index_u64(2283114629, *, -1) => 0 2 7 12 15 16 18 20 27 31 bit_index_u32(2283114629, *, -1) => 0 2 7 12 15 16 18 20 27 31 bit_index_u64(2502548245, *, -1) => 0 2 4 8 9 11 12 14 15 16 19 21 24 26 28 31 bit_index_u32(2502548245, *, -1) => 0 2 4 8 9 11 12 14 15 16 19 21 24 26 28 31 bit_index_u64(4294967295, *, -1) => 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 bit_index_u32(4294967295, *, -1) => 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 bit_index_u64(708915120906848425, *, -1) => 0 3 5 7 11 14 17 18 19 23 24 26 29 30 31 32 33 35 39 41 44 47 49 50 52 54 55 56 59 bit_index_u64(1960191741125985428, *, -1) => 2 4 7 10 19 20 21 22 24 26 30 31 50 52 53 56 57 59 60 bit_index_u64(3689348814741910323, *, -1) => 0 1 4 5 8 9 12 13 16 17 20 21 24 25 28 29 32 33 36 37 40 41 44 45 48 49 52 53 56 57 60 61 bit_index_u64(5578377670650038654, *, -1) => 1 2 3 4 5 6 8 11 16 20 25 27 28 31 32 33 34 35 37 38 39 45 46 49 51 53 54 56 58 59 62 bit_index_u64(9223372036854775808, *, -1) => 63 bit_index_u64(10755112315580060033, *, -1) => 0 7 8 12 18 20 23 24 32 33 35 37 39 40 42 44 46 47 48 54 56 58 60 63 bit_index_u64(11163782031541429823, *, -1) => 0 1 2 3 4 5 9 10 13 14 17 19 21 22 23 24 25 34 35 36 39 43 44 45 47 48 50 51 53 54 55 57 59 60 63 bit_index_u64(13903686156871869732, *, -1) => 2 5 8 10 13 16 18 19 20 22 23 28 32 34 37 38 39 44 46 47 48 49 52 53 54 55 62 63 bit_index_u64(14237897302422917095, *, -1) => 0 1 2 5 6 7 8 9 10 11 16 19 21 22 24 25 28 29 32 35 37 42 43 45 48 49 50 52 55 56 58 62 63 bit_index_u64(14302190498657618739, *, -1) => 0 1 4 5 8 9 11 14 18 20 21 22 25 26 28 29 30 32 33 35 36 37 38 41 42 44 47 48 49 51 52 53 54 57 58 62 63 bit_index_u64(15766411510232741269, *, -1) => 0 2 4 7 8 10 12 13 14 16 18 19 25 27 34 35 36 37 39 40 41 43 47 48 50 51 54 55 57 59 60 62 63 bit_index_u64(15984546468465238145, *, -1) => 0 7 13 14 19 20 23 26 30 31 33 36 38 42 47 50 52 54 55 56 58 59 60 62 63 bit_index_u64(18446744073709551615, *, -1) => 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 *** test_index: done *** *** test_bit_iter *** Set: 3, 9, 11, 16, 17, 18, 22, 24, 25, 27, 29, 64, 65, 68, 69, 72, 73, 76, 77, Clear: 0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 13, 14, 15, 19, 20, 21, 23, 26, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 66, 67, 70, 71, 74, 75, 78, 79, *** test_bit_iter: done *** *** test_bit_iter_empty *** *** test_bit_iter_empty: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/bps_tree_iterator.cc0000664000000000000000000004100013306560010022154 0ustar rootroot#include #include #include #include #include #include #include "unit.h" struct elem_t { long first; long second; bool operator!= (const struct elem_t& another) const { return first != another.first || second != another.second; } }; static int compare(const elem_t &a, const elem_t &b); static int compare_key(const elem_t &a, long b); #define BPS_TREE_NAME test #define BPS_TREE_BLOCK_SIZE 128 /* value is to low specially for tests */ #define BPS_TREE_EXTENT_SIZE 1024 /* value is to low specially for tests */ #define BPS_TREE_COMPARE(a, b, arg) compare(a, b) #define BPS_TREE_COMPARE_KEY(a, b, arg) compare_key(a, b) #define bps_tree_elem_t struct elem_t #define bps_tree_key_t long #define bps_tree_arg_t int #include "salad/bps_tree.h" static int compare(const elem_t &a, const elem_t &b) { return a.first < b.first ? -1 : a.first > b.first ? 1 : a.second < b.second ? -1 : a.second > b.second ? 1 : 0; } static int compare_key(const elem_t &a, long b) { return a.first < b ? -1 : a.first > b ? 1 : 0; } int total_extents_allocated = 0; static void * extent_alloc(void *ctx) { int *p_total_extents_allocated = (int *)ctx; assert(p_total_extents_allocated == &total_extents_allocated); ++*p_total_extents_allocated; return malloc(BPS_TREE_EXTENT_SIZE); } static void extent_free(void *ctx, void *extent) { int *p_total_extents_allocated = (int *)ctx; assert(p_total_extents_allocated == &total_extents_allocated); --*p_total_extents_allocated; free(extent); } static void iterator_check() { header(); test tree; test_create(&tree, 0, extent_alloc, extent_free, &total_extents_allocated); /* Stupid tests */ { test_iterator tmp1, tmp2; tmp1 = test_invalid_iterator(); tmp2 = test_invalid_iterator(); if (!test_iterator_is_invalid(&tmp1)) fail("invalid iterator is not invalid", "true"); if (!test_iterator_are_equal(&tree, &tmp1, &tmp2)) fail("invalid iterators are not equal", "true"); } /* Filing tree */ const long count1 = 10000; const long count2 = 5; for (long i = 0; i < count1; i++) { struct elem_t e; e.first = i * 2; /* note that filled with even numbers */ for (long j = 0; j < count2; j++) { e.second = j; test_insert(&tree, e, 0); } } printf("Test tree size: %d\n", (int)test_size(&tree)); /* Test that tree filled ok */ for (long i = 0; i < count1 * count2; i++) { long key = i % count1; if (test_find(&tree, key * 2) == 0) fail("Integrity check failed (1)", "true"); if (test_find(&tree, key * 2 + 1) != 0) fail("Integrity check failed (2)", "true"); } /* Print first 7 elems */ { printf("--> "); test_iterator iterator = test_iterator_first(&tree); for (int i = 0; i < 7; i++) { elem_t *elem = test_iterator_get_elem(&tree, &iterator); printf("(%ld,%ld) ", elem->first, elem->second); test_iterator_next(&tree, &iterator); } printf("\n"); } /* Print last 7 elems */ { printf("<-- "); test_iterator iterator = test_iterator_last(&tree); for (int i = 0; i < 7; i++) { elem_t *elem = test_iterator_get_elem(&tree, &iterator); printf("(%ld,%ld) ", elem->first, elem->second); test_iterator_prev(&tree, &iterator); } printf("\n"); } /* Iterate forward all elements 5 times */ { test_iterator iterator = test_iterator_first(&tree); for (long i = 0; i < count1 * count2 * 5; i++) { elem_t *elem = test_iterator_get_elem(&tree, &iterator); if (elem->first != ((i % (count1 * count2)) / count2) * 2) fail("iterate all failed (1)", "true"); if (elem->second != i % count2) fail("iterate all failed (2)", "true"); bool iterator_res = test_iterator_next(&tree, &iterator); if (!!iterator_res == !!test_iterator_is_invalid(&iterator)) fail("iterate all failed (3)", "true"); if (!iterator_res) { iterator_res = test_iterator_next(&tree, &iterator); if (!iterator_res || test_iterator_is_invalid(&iterator)) fail("iterate all failed (4)", "true"); } } } /* Iterate backward all elements 5 times */ { test_iterator iterator = test_iterator_last(&tree); for (long i = 0; i < count1 * count2 * 5; i++) { elem_t *elem = test_iterator_get_elem(&tree, &iterator); long j = count1 * count2 - 1 - (i % (count1 * count2)); if (elem->first != (j / count2) * 2) fail("iterate all failed (5)", "true"); if (elem->second != j % count2) fail("iterate all failed (6)", "true"); bool iterator_res = test_iterator_prev(&tree, &iterator); if (!!iterator_res == !!test_iterator_is_invalid(&iterator)) fail("iterate all failed (7)", "true"); if (!iterator_res) { iterator_res = test_iterator_prev(&tree, &iterator); if (!iterator_res || test_iterator_is_invalid(&iterator)) fail("iterate all failed (8)", "true"); } } } /* Check iterating in range from lower bound to upper bound */ /* Several probes */ const long keys[] = {-1, 0, 10, 15, count1*2 - 2, count1 * 2}; for (size_t i = 0; i < sizeof(keys) / sizeof(keys[0]); i++) { const long key = keys[i]; bool has_this_key1; test_iterator begin = test_lower_bound(&tree, key, &has_this_key1); bool has_this_key2; test_iterator end = test_upper_bound(&tree, key, &has_this_key2); if (has_this_key1 != has_this_key2) fail("Exact flag is broken", "true"); printf("Key %ld, %s range [%s, %s): ", key, has_this_key1 ? "not empty" : "empty", test_iterator_is_invalid(&begin) ? "eof" : "ptr", test_iterator_is_invalid(&end) ? "eof" : "ptr"); test_iterator runner = begin; while (!test_iterator_are_equal(&tree, &runner, &end)) { elem_t *elem = test_iterator_get_elem(&tree, &runner); printf("(%ld,%ld) ", elem->first, elem->second); test_iterator_next(&tree, &runner); } printf(" <-> "); runner = end; while (!test_iterator_are_equal(&tree, &runner, &begin)) { test_iterator_prev(&tree, &runner); elem_t *elem = test_iterator_get_elem(&tree, &runner); printf("(%ld,%ld) ", elem->first, elem->second); } printf("\n"); } /* Check iterating in range from lower bound to upper bound */ /* Automated */ for (long i = -1; i <= count1 + 1; i++) { test_iterator begin = test_lower_bound(&tree, i, 0); test_iterator end = test_upper_bound(&tree, i, 0); long real_count = 0; while (!test_iterator_are_equal(&tree, &begin, &end)) { elem_t *elem = test_iterator_get_elem(&tree, &begin); if (elem->first != i) fail("range iterator failed (1)", "true"); if (elem->second != real_count) fail("range iterator failed (2)", "true"); real_count++; test_iterator_next(&tree, &begin); } long must_be_count = 0; if (i >= 0 && i / 2 <= count1 - 1 && (i & 1) == 0) must_be_count = count2; if (real_count != must_be_count) fail("range iterator failed (3)", "true"); } /* Check iterating in range from lower bound to upper bound */ /* Several probes */ for (size_t i = 0; i < sizeof(keys) / sizeof(keys[0]); i++) { const long key = keys[i]; struct elem_t lower_elem_key = {key, 0}; struct elem_t upper_elem_key = {key, LONG_MAX}; test_iterator begin = test_lower_bound_elem(&tree, lower_elem_key, NULL); test_iterator end = test_upper_bound_elem(&tree, upper_elem_key, NULL); printf("Key %ld, range [%s, %s): ", key, test_iterator_is_invalid(&begin) ? "eof" : "ptr", test_iterator_is_invalid(&end) ? "eof" : "ptr"); test_iterator runner = begin; while (!test_iterator_are_equal(&tree, &runner, &end)) { elem_t *elem = test_iterator_get_elem(&tree, &runner); printf("(%ld,%ld) ", elem->first, elem->second); test_iterator_next(&tree, &runner); } printf(" <-> "); runner = end; while (!test_iterator_are_equal(&tree, &runner, &begin)) { test_iterator_prev(&tree, &runner); elem_t *elem = test_iterator_get_elem(&tree, &runner); printf("(%ld,%ld) ", elem->first, elem->second); } printf("\n"); } /* Check iterating in range from lower bound to upper bound */ /* Automated */ for (long i = -1; i <= count1 + 1; i++) { struct elem_t lower_elem_key = {i, 0}; struct elem_t upper_elem_key = {i, LONG_MAX}; test_iterator begin = test_lower_bound_elem(&tree, lower_elem_key, 0); test_iterator end = test_upper_bound_elem(&tree, upper_elem_key, 0); long real_count = 0; while (!test_iterator_are_equal(&tree, &begin, &end)) { elem_t *elem = test_iterator_get_elem(&tree, &begin); if (elem->first != i) fail("range iterator failed (1)", "true"); if (elem->second != real_count) fail("range iterator failed (2)", "true"); real_count++; test_iterator_next(&tree, &begin); } long must_be_count = 0; if (i >= 0 && i / 2 <= count1 - 1 && (i & 1) == 0) must_be_count = count2; if (real_count != must_be_count) fail("range iterator failed (3)", "true"); } test_destroy(&tree); footer(); } static void iterator_invalidate_check() { header(); const long test_size = 300; const long max_delete_count = 100; const long max_insert_count = 200; const long attempt_count = 100; struct test_iterator iterators[test_size]; struct test tree; /* invalidation during deletion */ srand(0); for (long attempt = 0; attempt < attempt_count; attempt++) { long del_pos = rand() % test_size; long del_cnt = rand() % max_delete_count + 1; if (del_pos + del_cnt > test_size) del_cnt = test_size - del_pos; test_create(&tree, 0, extent_alloc, extent_free, &total_extents_allocated); for (long i = 0; i < test_size; i++) { elem_t e; e.first = i * test_size * 2; e.second = i * test_size * 2; test_insert(&tree, e, 0); } iterators[0] = test_iterator_first(&tree); assert(test_iterator_get_elem(&tree, iterators)); for (long i = 1; i < test_size; i++) { iterators[i] = iterators[i - 1]; test_iterator_next(&tree, iterators + i); assert(test_iterator_get_elem(&tree, iterators + i)); } for (long i = del_pos; i < del_pos + del_cnt; i++) { elem_t e; e.first = i * test_size * 2; e.second = i * test_size * 2; int res = test_delete(&tree, e); assert(res == 0); } for (long i = 0; i < test_size; i++) { do { elem_t *e = test_iterator_get_elem(&tree, iterators + i); if (e) { if (e->first != e->second) fail("unexpected result of getting elem (1)", "true"); if (e->first % (test_size * 2)) fail("unexpected result of getting elem (2)", "true"); long v = e->first / (test_size * 2); if ( (v < 0 || v >= del_pos) && (v < del_pos + del_cnt || v >= test_size) ) fail("unexpected result of getting elem (3)", "true"); } } while(test_iterator_next(&tree, iterators + i)); } test_destroy(&tree); } /* invalidation during insertion */ srand(0); for (long attempt = 0; attempt < attempt_count; attempt++) { long ins_pos = rand() % test_size; long ins_cnt = rand() % max_insert_count + 1; test_create(&tree, 0, extent_alloc, extent_free, &total_extents_allocated); for (long i = 0; i < test_size; i++) { elem_t e; e.first = i * test_size * 2; e.second = i * test_size * 2; test_insert(&tree, e, 0); } iterators [0] = test_iterator_first(&tree); assert(test_iterator_get_elem(&tree, iterators)); for (long i = 1; i < test_size; i++) { iterators[i] = iterators[i - 1]; test_iterator_next(&tree, iterators + i); assert(test_iterator_get_elem(&tree, iterators + i)); } for (long i = 0; i < ins_cnt; i++) { elem_t e; e.first = ins_pos * test_size * 2 + i + 1; e.second = e.first; int res = test_insert(&tree, e, 0); assert(res == 0); } for (long i = 0; i < test_size; i++) { do { elem_t *e = test_iterator_get_elem(&tree, iterators + i); if (e) { if (e->first != e->second) fail("unexpected result of getting elem (4)", "true"); if (e->first % (test_size * 2)) { long v = e->first / (test_size * 2); long u = e->first % (test_size * 2); if (v != ins_pos) fail("unexpected result of getting elem (5)", "true"); if (u <= 0 || u > ins_cnt) fail("unexpected result of getting elem (6)", "true"); } else { long v = e->first / (test_size * 2); if ( (v < 0 || v >= test_size) ) fail("unexpected result of getting elem (7)", "true"); } } } while(test_iterator_next(&tree, iterators + i)); } test_destroy(&tree); } /* invalidation during deletion and insertion */ srand(0); for (long attempt = 0; attempt < attempt_count; attempt++) { long del_pos = rand() % test_size; long del_cnt = rand() % max_delete_count + 1; long ins_pos = rand() % test_size; long ins_cnt = rand() % max_insert_count + 1; if (del_pos + del_cnt > test_size) del_cnt = test_size - del_pos; test_create(&tree, 0, extent_alloc, extent_free, &total_extents_allocated); for (long i = 0; i < test_size; i++) { elem_t e; e.first = i * test_size * 2; e.second = i * test_size * 2; test_insert(&tree, e, 0); } iterators[0] = test_iterator_first(&tree); assert(test_iterator_get_elem(&tree, iterators)); for (long i = 1; i < test_size; i++) { iterators[i] = iterators[i - 1]; test_iterator_next(&tree, iterators + i); assert(test_iterator_get_elem(&tree, iterators + i)); } for (long i = del_pos; i < del_pos + del_cnt; i++) { elem_t e; e.first = i * test_size * 2; e.second = i * test_size * 2; int res = test_delete(&tree, e); assert(res == 0); } for (long i = 0; i < ins_cnt; i++) { elem_t e; e.first = ins_pos * test_size * 2 + i + 1; e.second = e.first; int res = test_insert(&tree, e, 0); assert(res == 0); } for (long i = 0; i < test_size; i++) { do { elem_t *e = test_iterator_get_elem(&tree, iterators + i); if (e) { if (e->first != e->second) fail("unexpected result of getting elem (8)", "true"); if (e->first % (test_size * 2)) { long v = e->first / (test_size * 2); long u = e->first % (test_size * 2); if (v != ins_pos) fail("unexpected result of getting elem (9)", "true"); if (u <= 0 || u > ins_cnt) fail("unexpected result of getting elem (a)", "true"); } else { long v = e->first / (test_size * 2); if ( (v < 0 || v >= del_pos) && (v < del_pos + del_cnt || v >= test_size) ) fail("unexpected result of getting elem (b)", "true"); } } } while(test_iterator_next(&tree, iterators + i)); } test_destroy(&tree); } footer(); } static void iterator_freeze_check() { header(); const int test_rounds_size = 10; const int test_data_size = 1000; elem_t comp_buf1[test_data_size]; elem_t comp_buf2[test_data_size]; const int test_data_mod = 2000; srand(0); struct test tree; for (int i = 0; i < 10; i++) { test_create(&tree, 0, extent_alloc, extent_free, &total_extents_allocated); int comp_buf_size1 = 0; int comp_buf_size2 = 0; for (int j = 0; j < test_data_size; j++) { elem_t e; e.first = rand() % test_data_mod; e.second = 0; test_insert(&tree, e, 0); int check = test_debug_check(&tree); fail_if(check); assert(check == 0); } struct test_iterator iterator = test_iterator_first(&tree); elem_t *e; while ((e = test_iterator_get_elem(&tree, &iterator))) { comp_buf1[comp_buf_size1++] = *e; test_iterator_next(&tree, &iterator); } struct test_iterator iterator1 = test_iterator_first(&tree); test_iterator_freeze(&tree, &iterator1); struct test_iterator iterator2 = test_iterator_first(&tree); test_iterator_freeze(&tree, &iterator2); for (int j = 0; j < test_data_size; j++) { elem_t e; e.first = rand() % test_data_mod; e.second = 0; test_insert(&tree, e, 0); int check = test_debug_check(&tree); fail_if(check); assert(check == 0); } int tested_count = 0; while ((e = test_iterator_get_elem(&tree, &iterator1))) { if (*e != comp_buf1[tested_count]) { fail("version restore failed (1)", "true"); } tested_count++; if (tested_count > comp_buf_size1) { fail("version restore failed (2)", "true"); } test_iterator_next(&tree, &iterator1); } test_iterator_destroy(&tree, &iterator1); for (int j = 0; j < test_data_size; j++) { elem_t e; e.first = rand() % test_data_mod; e.second = 0; test_delete(&tree, e); int check = test_debug_check(&tree); fail_if(check); assert(check == 0); } tested_count = 0; while ((e = test_iterator_get_elem(&tree, &iterator2))) { if (*e != comp_buf1[tested_count]) { fail("version restore failed (1)", "true"); } tested_count++; if (tested_count > comp_buf_size1) { fail("version restore failed (2)", "true"); } test_iterator_next(&tree, &iterator2); } test_destroy(&tree); } footer(); } int main(void) { srand(time(0)); iterator_check(); iterator_invalidate_check(); iterator_freeze_check(); if (total_extents_allocated) { fail("memory leak", "true"); } } tarantool_1.9.1.26.g63eb81e3c/test/unit/uuid.c0000664000000000000000000000415313306560010017253 0ustar rootroot#include "unit.h" #include #include static void uuid_test(struct tt_uuid a, struct tt_uuid b, int expected_result) { char a_str[UUID_STR_LEN + 1]; char b_str[UUID_STR_LEN + 1]; tt_uuid_to_string(&a, a_str); tt_uuid_to_string(&b, b_str); int cmp_result = tt_uuid_compare(&a, &b); char *sign = 0; if (cmp_result == 1) sign = ">"; else if (cmp_result == -1) sign = "<"; else sign = "="; is(cmp_result, expected_result, "%s %s %s", a_str, sign, b_str); } int main(void) { plan(2); uuid_test( (struct tt_uuid){.time_low = 1712399963, .time_mid = 34898, .time_hi_and_version = 18482, .clock_seq_hi_and_reserved = 175, .clock_seq_low = 139, .node = "Ad\325,b\353"}, (struct tt_uuid){.time_low = 409910263, .time_mid = 53143, .time_hi_and_version = 20014, .clock_seq_hi_and_reserved = 139, .clock_seq_low = 27, .node = "v\025Oo9I"}, 1); uuid_test( (struct tt_uuid){.time_low = 123421000, .time_mid = 36784, .time_hi_and_version = 11903, .clock_seq_hi_and_reserved = 175, .clock_seq_low = 80, .node = "Ad\325,b\353"}, (struct tt_uuid){.time_low = 532451999, .time_mid = 23976, .time_hi_and_version = 10437, .clock_seq_hi_and_reserved = 139, .clock_seq_low = 54, .node = "v\025Oo9I"}, -1); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/rope_avl.c0000664000000000000000000000146613306560010020120 0ustar rootroot#include "salad/rope.h" #include "unit.h" #include "rope_common.h" /******************************************************************/ static void test_avl_rotations() { header(); struct rope *rope = test_rope_new(); /* counterclockwise single rotation. */ test_rope_insert(rope, 0, "1"); test_rope_insert(rope, 1, "2"); test_rope_insert(rope, 2, "<"); /* clockwise single rotation */ test_rope_insert(rope, 0, "0"); test_rope_insert(rope, 0, ">"); /* counterclockwise double rotation */ test_rope_insert(rope, 1, "*"); /* clocckwise double rotation */ test_rope_insert(rope, 3, "p"); test_rope_insert(rope, 3, "p"); test_rope_insert(rope, 3, "p"); test_rope_insert(rope, 3, "p"); test_rope_insert(rope, 3, "*"); rope_delete(rope); footer(); } int main() { test_avl_rotations(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber.result0000664000000000000000000000074213306560010020470 0ustar rootroot#gh-1238: log uncaught errors SystemError Failed to allocate 42 bytes in allocator for exception: Cannot allocate memory *** fiber_name_test *** # name of a new fiber: main. # set new fiber name: Horace. # fiber name is truncated: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa. *** fiber_name_test: done *** *** fiber_join_test *** # exception propagated # cancel dead has started # by this time the fiber should be dead already # big-stack fiber not crashed *** fiber_join_test: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/rtree.cc0000664000000000000000000001560113306560010017571 0ustar rootroot#include #include #include #include #include #include "unit.h" #include "salad/rtree.h" static int page_count = 0; const uint32_t extent_size = 1024 * 8; static void * extent_alloc(void *ctx) { int *p_page_count = (int *)ctx; assert(p_page_count == &page_count); ++*p_page_count; return malloc(extent_size); } static void extent_free(void *ctx, void *page) { int *p_page_count = (int *)ctx; assert(p_page_count == &page_count); --*p_page_count; free(page); } static void simple_check() { struct rtree_rect rect; struct rtree_iterator iterator; rtree_iterator_init(&iterator); const size_t rounds = 2000; header(); struct rtree tree; rtree_init(&tree, 2, extent_size, extent_alloc, extent_free, &page_count, RTREE_EUCLID); printf("Insert 1..X, remove 1..X\n"); for (size_t i = 1; i <= rounds; i++) { record_t rec = (record_t)i; rtree_set2d(&rect, i, i, i + 0.5, i + 0.5); if (rtree_search(&tree, &rect, SOP_EQUALS, &iterator)) { fail("element already in tree (1)", "true"); } rtree_insert(&tree, &rect, rec); } if (rtree_number_of_records(&tree) != rounds) { fail("Tree count mismatch (1)", "true"); } for (size_t i = 1; i <= rounds; i++) { record_t rec = (record_t)i; rtree_set2d(&rect, i, i, i + 0.5, i + 0.5); if (!rtree_search(&tree, &rect, SOP_EQUALS, &iterator)) { fail("element in tree (1)", "false"); } if (rtree_iterator_next(&iterator) != rec) { fail("right search result (1)", "true"); } if (rtree_iterator_next(&iterator)) { fail("single search result (1)", "true"); } if (!rtree_remove(&tree, &rect, rec)) { fail("delete element in tree (1)", "false"); } if (rtree_search(&tree, &rect, SOP_EQUALS, &iterator)) { fail("element still in tree (1)", "true"); } } if (rtree_number_of_records(&tree) != 0) { fail("Tree count mismatch (1)", "true"); } printf("Insert 1..X, remove X..1\n"); for (size_t i = 1; i <= rounds; i++) { record_t rec = (record_t)i; rtree_set2d(&rect, i, i, i + 0.5, i + 0.5); if (rtree_search(&tree, &rect, SOP_EQUALS, &iterator)) { fail("element already in tree (2)", "true"); } rtree_insert(&tree, &rect, rec); } if (rtree_number_of_records(&tree) != rounds) { fail("Tree count mismatch (2)", "true"); } for (size_t i = rounds; i != 0; i--) { record_t rec = (record_t)i; rtree_set2d(&rect, i, i, i + 0.5, i + 0.5); if (!rtree_search(&tree, &rect, SOP_OVERLAPS, &iterator)) { fail("element in tree (2)", "false"); } if (rtree_iterator_next(&iterator) != rec) { fail("right search result (2)", "true"); } if (rtree_iterator_next(&iterator)) { fail("single search result (2)", "true"); } if (!rtree_remove(&tree, &rect, rec)) { fail("delete element in tree (2)", "false"); } if (rtree_search(&tree, &rect, SOP_OVERLAPS, &iterator)) { fail("element still in tree (2)", "true"); } } if (rtree_number_of_records(&tree) != 0) { fail("Tree count mismatch (2)", "true"); } printf("Insert X..1, remove 1..X\n"); for (size_t i = rounds; i != 0; i--) { record_t rec = (record_t)i; rtree_set2d(&rect, i, i, i + 0.5, i + 0.5); if (rtree_search(&tree, &rect, SOP_BELONGS, &iterator)) { fail("element already in tree (3)", "true"); } rtree_insert(&tree, &rect, rec); } if (rtree_number_of_records(&tree) != rounds) { fail("Tree count mismatch (3)", "true"); } for (size_t i = 1; i <= rounds; i++) { record_t rec = (record_t)i; rtree_set2d(&rect, i, i, i + 0.5, i + 0.5); if (!rtree_search(&tree, &rect, SOP_BELONGS, &iterator)) { fail("element in tree (3)", "false"); } if (rtree_iterator_next(&iterator) != rec) { fail("right search result (3)", "true"); } if (rtree_iterator_next(&iterator)) { fail("single search result (3)", "true"); } if (!rtree_remove(&tree, &rect, rec)) { fail("delete element in tree (3)", "false"); } if (rtree_search(&tree, &rect, SOP_BELONGS, &iterator)) { fail("element still in tree (3)", "true"); } } if (rtree_number_of_records(&tree) != 0) { fail("Tree count mismatch (3)", "true"); } printf("Insert X..1, remove X..1\n"); for (size_t i = rounds; i != 0; i--) { record_t rec = (record_t)i; rtree_set2d(&rect, i, i, i + 0.5, i + 0.5); if (rtree_search(&tree, &rect, SOP_CONTAINS, &iterator)) { fail("element already in tree (4)", "true"); } rtree_insert(&tree, &rect, rec); } if (rtree_number_of_records(&tree) != rounds) { fail("Tree count mismatch (4)", "true"); } for (size_t i = rounds; i != 0; i--) { record_t rec = (record_t)i; rtree_set2d(&rect, i, i, i + 0.5, i + 0.5); if (!rtree_search(&tree, &rect, SOP_CONTAINS, &iterator)) { fail("element in tree (4)", "false"); } if (rtree_iterator_next(&iterator) != rec) { fail("right search result (4)", "true"); } if (rtree_iterator_next(&iterator)) { fail("single search result (4)", "true"); } if (!rtree_remove(&tree, &rect, rec)) { fail("delete element in tree (4)", "false"); } if (rtree_search(&tree, &rect, SOP_CONTAINS, &iterator)) { fail("element still in tree (4)", "true"); } } if (rtree_number_of_records(&tree) != 0) { fail("Tree count mismatch (4)", "true"); } rtree_purge(&tree); rtree_destroy(&tree); rtree_iterator_destroy(&iterator); footer(); } static void rtree_test_build(struct rtree *tree, struct rtree_rect *arr, int count) { for (ssize_t i = 0; i < count; i++) { record_t rec = (record_t)(i + 1); rtree_insert(tree, &arr[i], rec); } } static void neighbor_test() { header(); const unsigned int test_count = 1000; struct rtree_rect arr[test_count]; static struct rtree_rect basis; for (size_t i = 0; i < test_count; i++) { rtree_set2d(&arr[i], i, i, i + 1, i + 1); } for (size_t i = 0; i <= test_count; i++) { struct rtree tree; rtree_init(&tree, 2, extent_size, extent_alloc, extent_free, &page_count, RTREE_EUCLID); rtree_test_build(&tree, arr, i); struct rtree_iterator iterator; rtree_iterator_init(&iterator); if (!rtree_search(&tree, &basis, SOP_NEIGHBOR, &iterator) && i != 0) { fail("search is successful", "true"); } for (size_t j = 0; j < i; j++) { record_t rec = rtree_iterator_next(&iterator); if (rec != record_t(j+1)) { fail("wrong search result", "true"); } } rtree_iterator_destroy(&iterator); rtree_destroy(&tree); } struct rtree_iterator iterator; rtree_iterator_init(&iterator); struct rtree tree; rtree_init(&tree, 2, extent_size, extent_alloc, extent_free, &page_count, RTREE_EUCLID); if (rtree_search(&tree, &basis, SOP_NEIGHBOR, &iterator)) { fail("found in empty", "true"); } /* * Test unchecked return value from rtree_search */ record_t rec = rtree_iterator_next(&iterator); if (rec != NULL) { fail("something found from empty iterator ", "true"); } rtree_iterator_destroy(&iterator); footer(); } int main(void) { simple_check(); neighbor_test(); if (page_count != 0) { fail("memory leak!", "true"); } } tarantool_1.9.1.26.g63eb81e3c/test/unit/vclock.result0000664000000000000000000001405713306560010020666 0ustar rootroot1..5 1..40 *** test_compare *** ok 1 - compare (), () => 0 ok 2 - compare (), () => 0 ok 3 - compare (), (10) => -1 ok 4 - compare (10), () => 1 ok 5 - compare (0), (0) => 0 ok 6 - compare (0), (0) => 0 ok 7 - compare (1), (1) => 0 ok 8 - compare (1), (1) => 0 ok 9 - compare (1), (2) => -1 ok 10 - compare (2), (1) => 1 ok 11 - compare (), (10, 1, 0) => -1 ok 12 - compare (10, 1, 0), () => 1 ok 13 - compare (5), (10, 1, 0) => -1 ok 14 - compare (10, 1, 0), (5) => 1 ok 15 - compare (10), (10, 1, 0) => -1 ok 16 - compare (10, 1, 0), (10) => 1 ok 17 - compare (15), (10, 1, 0) => 2147483647 ok 18 - compare (10, 1, 0), (15) => 2147483647 ok 19 - compare (10, 1, 0), (10, 1, 1) => -1 ok 20 - compare (10, 1, 1), (10, 1, 0) => 1 ok 21 - compare (10, 1, 0), (10, 2, 0) => -1 ok 22 - compare (10, 2, 0), (10, 1, 0) => 1 ok 23 - compare (10, 1, 0), (10, 1, 0) => 0 ok 24 - compare (10, 1, 0), (10, 1, 0) => 0 ok 25 - compare (10, 0, 1), (10, 1, 0) => 2147483647 ok 26 - compare (10, 1, 0), (10, 0, 1) => 2147483647 ok 27 - compare (10, 2, 1), (10, 1, 2) => 2147483647 ok 28 - compare (10, 1, 2), (10, 2, 1) => 2147483647 ok 29 - compare (10, 0, 1), (11, 0, 0) => 2147483647 ok 30 - compare (11, 0, 0), (10, 0, 1) => 2147483647 ok 31 - compare (10, 0, 5), (5, 0, 10) => 2147483647 ok 32 - compare (5, 0, 10), (10, 0, 5) => 2147483647 ok 33 - compare (10, 10, 10), (10, 10, 10) => 0 ok 34 - compare (10, 10, 10), (10, 10, 10) => 0 ok 35 - compare (10, 10, 10), (10, 10, 10, 1) => -1 ok 36 - compare (10, 10, 10, 1), (10, 10, 10) => 1 ok 37 - compare (10, 10, 10), (10, 10, 10, 1, 2, 3) => -1 ok 38 - compare (10, 10, 10, 1, 2, 3), (10, 10, 10) => 1 ok 39 - compare (0, 0, 0), (10, 0, 0, 0, 0) => -1 ok 40 - compare (10, 0, 0, 0, 0), (0, 0, 0) => 1 *** test_compare: done *** ok 1 - subtests 1..36 *** test_isearch *** ok 1 - query #1 ok 2 - query #2 ok 3 - query #3 ok 4 - query #4 ok 5 - query #5 ok 6 - query #6 ok 7 - query #7 ok 8 - query #8 ok 9 - query #9 ok 10 - query #10 ok 11 - query #11 ok 12 - query #12 ok 13 - query #13 ok 14 - query #14 ok 15 - query #15 ok 16 - query #16 ok 17 - query #17 ok 18 - query #18 ok 19 - query #19 ok 20 - query #20 ok 21 - query #21 ok 22 - query #22 ok 23 - query #23 ok 24 - query #24 ok 25 - query #25 ok 26 - query #26 ok 27 - query #27 ok 28 - query #28 ok 29 - query #29 ok 30 - query #30 ok 31 - query #31 ok 32 - query #32 ok 33 - query #33 ok 34 - query #34 ok 35 - query #35 ok 36 - query #36 *** test_isearch: done *** ok 2 - subtests 1..8 *** test_tostring *** ok 1 - tostring () => {} ok 2 - tostring (-1, -1, -1) => {} ok 3 - tostring (1) => {0: 1} ok 4 - tostring (1, 2) => {0: 1, 1: 2} ok 5 - tostring (10, 15, 20) => {0: 10, 1: 15, 2: 20} ok 6 - tostring (10, -1, 15, -1, 20) => {0: 10, 2: 15, 4: 20} ok 7 - tostring (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) => {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15} ok 8 - tostring (9223372036854775000, 9223372036854775001, 9223372036854775002, 9223372036854775003, 9223372036854775004, 9223372036854775005, 9223372036854775006, 9223372036854775007, 9223372036854775008, 9223372036854775009, 9223372036854775010, 9223372036854775011, 9223372036854775012, 9223372036854775013, 9223372036854775014, 9223372036854775015) => {0: 9223372036854775000, 1: 9223372036854775001, 2: 9223372036854775002, 3: 9223372036854775003, 4: 9223372036854775004, 5: 9223372036854775005, 6: 9223372036854775006, 7: 9223372036854775007, 8: 9223372036854775008, 9: 9223372036854775009, 10: 9223372036854775010, 11: 9223372036854775011, 12: 9223372036854775012, 13: 9223372036854775013, 14: 9223372036854775014, 15: 9223372036854775015} *** test_tostring: done *** ok 3 - subtests 1..12 *** test_fromstring *** ok 1 - fromstring {} => () ok 2 - fromstring { } => () ok 3 - fromstring {0: 10} => (10) ok 4 - fromstring {0: 10,} => (10) ok 5 - fromstring { 0 : 10 , } => (10) ok 6 - fromstring {0: 10, 1: 15, 3: 20} => (10, 15, -1, 20) ok 7 - fromstring {2: 20, 0: 10, 4: 30} => (10, -1, 20, -1, 30) ok 8 - fromstring {4: 30, 2: 20} => (-1, -1, 20, -1, 30) ok 9 - fromstring {4: 30, 2: 20,} => (-1, -1, 20, -1, 30) ok 10 - fromstring {0: 4294967295} => (4294967295) ok 11 - fromstring {0: 4294967296} => (4294967296) ok 12 - fromstring {0: 9223372036854775807} => (9223372036854775807) *** test_fromstring: done *** ok 4 - subtests 1..32 *** test_fromstring_invalid *** ok 1 - fromstring "" => 1 ok 2 - fromstring " " => 2 ok 3 - fromstring " " => 7 ok 4 - fromstring "}" => 1 ok 5 - fromstring "1: 10" => 1 ok 6 - fromstring "abcde" => 1 ok 7 - fromstring "12345" => 1 ok 8 - fromstring "" => 1 ok 9 - fromstring "{" => 2 ok 10 - fromstring "{1 " => 5 ok 11 - fromstring "{1: " => 6 ok 12 - fromstring "{1:10" => 6 ok 13 - fromstring "{1:10 " => 8 ok 14 - fromstring "{1:10," => 7 ok 15 - fromstring "{1:10, " => 11 ok 16 - fromstring "{1:10 2:20" => 7 ok 17 - fromstring "{1:10,," => 7 ok 18 - fromstring "{1:10, 10,}" => 10 ok 19 - fromstring "{1:-1}" => 4 ok 20 - fromstring "{-1:1}" => 2 ok 21 - fromstring "{128:1}" => 5 ok 22 - fromstring "{1:abcde}" => 4 ok 23 - fromstring "{abcde:1}" => 2 ok 24 - fromstring "{1:1.1}" => 5 ok 25 - fromstring "{1.1:1}" => 3 ok 26 - fromstring "{4294967296:1}" => 12 ok 27 - fromstring "{1:9223372036854775808}" => 23 ok 28 - fromstring "{1:18446744073709551616}" => 24 ok 29 - fromstring "{1:18446744073709551616}" => 24 ok 30 - fromstring "{1:340282366920938463463374607431768211456}" => 43 ok 31 - fromstring "{1:10, 1:20}" => 12 ok 32 - fromstring "{1:20, 1:10}" => 12 *** test_fromstring_invalid: done *** ok 5 - subtests tarantool_1.9.1.26.g63eb81e3c/test/unit/find_path.result0000664000000000000000000000004213306560010021326 0ustar rootroot *** main *** *** main: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/find_path.c0000664000000000000000000000036413306560010020241 0ustar rootroot#include #include #include #include "unit.h" extern const char * find_path(const char *); int main(int argc, char *argv[]) { header(); fail_unless(open(find_path(argv[0]), O_RDONLY) >= 0); footer(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/coll.result0000664000000000000000000000210413306565107020340 0ustar rootroot *** manual_test *** -- default ru_RU -- 123 45 GREATER Б GREATER бб GREATER е GREATER ё GREATER Ё GREATER ЕЕЕЕ GREATER и GREATER И GREATER -- --||-- + upper first -- 123 45 GREATER Б GREATER бб GREATER е GREATER Ё GREATER ё GREATER ЕЕЕЕ GREATER И GREATER и GREATER -- --||-- + lower first -- 123 45 GREATER Б GREATER бб GREATER е GREATER ё GREATER Ё GREATER ЕЕЕЕ GREATER и GREATER И GREATER -- --||-- + secondary strength + numeric -- 45 123 GREATER Б GREATER бб GREATER е GREATER ё GREATER Ё EQUAL ЕЕЕЕ GREATER и GREATER И EQUAL -- --||-- + case level -- 45 123 GREATER Б GREATER бб GREATER е GREATER ё GREATER Ё GREATER ЕЕЕЕ GREATER и GREATER И GREATER -- en_EN -- aa bb GREATER cc GREATER ch GREATER dd GREATER gg GREATER hh GREATER ii GREATER -- cs_CZ -- aa bb GREATER cc GREATER dd GREATER gg GREATER hh GREATER ch GREATER ii GREATER *** manual_test: done *** *** hash_test *** Case sensitive OK OK OK Case insensitive OK OK OK *** hash_test: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/rope_avl.result0000664000000000000000000001023613306560010021207 0ustar rootroot *** test_avl_rotations *** insert offset = 0, str = '1' size = 1 string = '1' └──{ len = 1, height = 1, data = '1'} insert offset = 1, str = '2' size = 2 string = '12' │ ┌──nil └──{ len = 1, height = 2, data = '1'} └──{ len = 1, height = 1, data = '2'} insert offset = 2, str = '<' size = 3 string = '12<' │ ┌──{ len = 1, height = 1, data = '1'} └──{ len = 1, height = 2, data = '2'} └──{ len = 1, height = 1, data = '<'} insert offset = 0, str = '0' size = 4 string = '012<' │ ┌──{ len = 1, height = 1, data = '0'} │ ┌──{ len = 1, height = 2, data = '1'} │ │ └──nil └──{ len = 1, height = 3, data = '2'} └──{ len = 1, height = 1, data = '<'} insert offset = 0, str = '>' size = 5 string = '>012<' │ ┌──{ len = 1, height = 1, data = '>'} │ ┌──{ len = 1, height = 2, data = '0'} │ │ └──{ len = 1, height = 1, data = '1'} └──{ len = 1, height = 3, data = '2'} └──{ len = 1, height = 1, data = '<'} insert offset = 1, str = '*' size = 6 string = '>*012<' │ ┌──nil │ ┌──{ len = 1, height = 2, data = '>'} │ │ └──{ len = 1, height = 1, data = '*'} └──{ len = 1, height = 3, data = '0'} │ ┌──{ len = 1, height = 1, data = '1'} └──{ len = 1, height = 2, data = '2'} └──{ len = 1, height = 1, data = '<'} insert offset = 3, str = 'p' size = 7 string = '>*0p12<' │ ┌──nil │ ┌──{ len = 1, height = 2, data = '>'} │ │ └──{ len = 1, height = 1, data = '*'} └──{ len = 1, height = 4, data = '0'} │ ┌──{ len = 1, height = 1, data = 'p'} │ ┌──{ len = 1, height = 2, data = '1'} │ │ └──nil └──{ len = 1, height = 3, data = '2'} └──{ len = 1, height = 1, data = '<'} insert offset = 3, str = 'p' size = 8 string = '>*0pp12<' │ ┌──nil │ ┌──{ len = 1, height = 2, data = '>'} │ │ └──{ len = 1, height = 1, data = '*'} └──{ len = 1, height = 4, data = '0'} │ ┌──{ len = 1, height = 1, data = 'p'} │ ┌──{ len = 1, height = 2, data = 'p'} │ │ └──{ len = 1, height = 1, data = '1'} └──{ len = 1, height = 3, data = '2'} └──{ len = 1, height = 1, data = '<'} insert offset = 3, str = 'p' size = 9 string = '>*0ppp12<' │ ┌──nil │ ┌──{ len = 1, height = 2, data = '>'} │ │ └──{ len = 1, height = 1, data = '*'} └──{ len = 1, height = 4, data = '0'} │ ┌──{ len = 1, height = 1, data = 'p'} │ ┌──{ len = 1, height = 2, data = 'p'} │ │ └──nil └──{ len = 1, height = 3, data = 'p'} │ ┌──{ len = 1, height = 1, data = '1'} └──{ len = 1, height = 2, data = '2'} └──{ len = 1, height = 1, data = '<'} insert offset = 3, str = 'p' size = 10 string = '>*0pppp12<' │ ┌──nil │ ┌──{ len = 1, height = 2, data = '>'} │ │ └──{ len = 1, height = 1, data = '*'} └──{ len = 1, height = 4, data = '0'} │ ┌──{ len = 1, height = 1, data = 'p'} │ ┌──{ len = 1, height = 2, data = 'p'} │ │ └──{ len = 1, height = 1, data = 'p'} └──{ len = 1, height = 3, data = 'p'} │ ┌──{ len = 1, height = 1, data = '1'} └──{ len = 1, height = 2, data = '2'} └──{ len = 1, height = 1, data = '<'} insert offset = 3, str = '*' size = 11 string = '>*0*pppp12<' │ ┌──nil │ ┌──{ len = 1, height = 2, data = '>'} │ │ └──{ len = 1, height = 1, data = '*'} │ ┌──{ len = 1, height = 3, data = '0'} │ │ │ ┌──{ len = 1, height = 1, data = '*'} │ │ └──{ len = 1, height = 2, data = 'p'} │ │ └──nil └──{ len = 1, height = 4, data = 'p'} │ ┌──{ len = 1, height = 1, data = 'p'} └──{ len = 1, height = 3, data = 'p'} │ ┌──{ len = 1, height = 1, data = '1'} └──{ len = 1, height = 2, data = '2'} └──{ len = 1, height = 1, data = '<'} *** test_avl_rotations: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/rmean.cc0000664000000000000000000000341313306560010017550 0ustar rootroot#include "rmean.h" #include "memory.h" #include "unit.h" #include "fiber.h" int print_stat(const char *name, int rps, int64_t total, void* ctx) { printf("%s: rps %d, total %d%c", name, rps, (int)total, name[2] == '2' ? '\n' : '\t'); return 0; } void test_100rps(rmean *st) { header(); printf("Send 100 requests every second for 10 seconds\n"); printf("Calc rps at third and last second\n"); for(int i = 0; i < 10; i++) { /* 10 seconds */ rmean_collect(st, 0, 100); /* send 100 requests */ rmean_roll(st->stats[0].value, 1); rmean_roll(st->stats[1].value, 1); if (i == 2 || i == 9) { /* two checks */ print_stat(st->stats[0].name, rmean_mean(st, 0), rmean_total(st, 0), NULL); print_stat(st->stats[1].name, rmean_mean(st, 1), rmean_total(st, 1), NULL); } } /* 10 seconds, 1000 in EV1, 100 rps */ footer(); } void test_mean15rps(rmean *st) { header(); printf("Send 15 rps on the average, and 3 rps to EV2\n"); for(int i = 0; i < 10; i++) { /* 10 seconds */ for(int j = 0; j < 15; j++) { rmean_collect(st, 0, 1); /* send 15 requests */ if((i * 3 + 2 + j) % 15 == 0) { rmean_roll(st->stats[0].value, 1); rmean_roll(st->stats[1].value, 1); } } rmean_collect(st, 1, 3); } print_stat(st->stats[0].name, rmean_mean(st, 0), rmean_total(st, 0), NULL); print_stat(st->stats[1].name, rmean_mean(st, 1), rmean_total(st, 1), NULL); /* 10 seconds, 1000 + 150 in EV1, 15 rps. 30 in EV2, 3 rps*/ footer(); } int main() { printf("Stat. 2 names, timer simulation\n"); memory_init(); fiber_init(fiber_cxx_invoke); struct rmean *st; const char *name[] = {"EV1", "EV2"}; st = rmean_new(name, 2); test_100rps(st); test_mean15rps(st); rmean_delete(st); fiber_free(); memory_free(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/uuid.result0000664000000000000000000000025313306560010020344 0ustar rootroot1..2 ok 1 - 6611265b-8852-4832-af8b-4164d52c62eb > 186ebbf7-cf97-4e2e-8b1b-76154f6f3949 ok 2 - 075b4148-8fb0-2e7f-af50-4164d52c62eb < 1fbc929f-5da8-28c5-8b36-76154f6f3949 tarantool_1.9.1.26.g63eb81e3c/test/unit/heap_iterator.result0000664000000000000000000000040213306560010022220 0ustar rootroot *** test_iterator_create *** *** test_iterator_create: done *** *** test_iterator_empty *** *** test_iterator_empty: done *** *** test_iterator_small *** *** test_iterator_small: done *** *** test_iterator_large *** *** test_iterator_large: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/reflection_cxx.cc0000664000000000000000000001107513306560010021465 0ustar rootroot#include "unit.h" #include #include "reflection.h" extern const struct type_info type_Object; struct Object { Object() : type(&type_Object) {} virtual ~Object() {} const struct type_info *type; Object(const struct type_info *type_arg) : type(type_arg) {} }; const struct type_info type_Object = make_type("Object", NULL); extern const struct type_info type_Database; struct Database: public Object { Database() : Object(&type_Database), m_int(0), m_str{'\0'} {} virtual const char * getString() const { return m_str; } virtual void putString(const char *str) { snprintf(m_str, sizeof(m_str), "%s", str); } virtual int getInt() const { return m_int; } virtual void putInt(int val) { m_int = val; } protected: Database(const struct type_info *type) : Object(type), m_int(0), m_str{'\0'} {} int m_int; char m_str[128]; }; static const struct method_info database_methods[] = { make_method(&type_Database, "getString", &Database::getString), make_method(&type_Database, "getInt", &Database::getInt), make_method(&type_Database, "putString", &Database::putString), make_method(&type_Database, "putInt", &Database::putInt), METHODS_SENTINEL }; const struct type_info type_Database = make_type("Database", &type_Object, database_methods); extern const struct type_info type_Tarantool; struct Tarantool: public Database { Tarantool() : Database(&type_Tarantool) {} void inc() { ++m_int; } }; static const struct method_info tarantool_methods[] = { make_method(&type_Tarantool, "inc", &Tarantool::inc), METHODS_SENTINEL }; const struct type_info type_Tarantool = make_type("Tarantool", &type_Database, tarantool_methods); int main() { plan(30); Object obj; Tarantool tntobj; const struct method_info *get_string = type_method_by_name(tntobj.type, "getString"); const struct method_info *put_string = type_method_by_name(tntobj.type, "putString"); const struct method_info *get_int = type_method_by_name(tntobj.type, "getInt"); const struct method_info *put_int = type_method_by_name(tntobj.type, "putInt"); const struct method_info *inc = type_method_by_name(tntobj.type, "inc"); /* struct type_info members */ ok(strcmp(type_Object.name, "Object") == 0, "type.name"); is(type_Object.parent, NULL, "type.parent"); is(type_Database.parent, &type_Object, "type.parent"); /* inheritance */ ok(type_assignable(&type_Object, &type_Tarantool), "is_instance"); ok(type_assignable(&type_Database, &type_Tarantool), "is_instance"); ok(type_assignable(&type_Tarantool, &type_Tarantool), "is_instance"); ok(!type_assignable(&type_Tarantool, &type_Database), "is_instance"); /* methods */ const char *methods_order[] = { "inc", "getString", "getInt", "putString", "putInt" }; int i = 0; type_foreach_method(&type_Tarantool, method) { ok(strcmp(method->name, methods_order[i]) == 0, "methods order"); ++i; } /* * struct method_info members */ is(get_string->owner, &type_Database, "method.owner"); ok(strcmp(get_string->name, "getString") == 0, "method.name"); is(get_string->rtype, CTYPE_CONST_CHAR_PTR, "method.rtype (non void)"); is(put_string->rtype, CTYPE_VOID, "method.rtype (void)"); is(get_string->nargs, 0, "method.nargs (zero)"); is(put_string->nargs, 1, "method.nargs (non-zero)"); is(put_string->atype[0], CTYPE_CONST_CHAR_PTR, "method.atype"); is(get_string->isconst, true, "method.isconst"); is(put_string->isconst, false, "!method.isconst"); /* * Invokable */ ok(!method_invokable(get_string, &tntobj), "!invokable"); ok(!(method_invokable (get_string, &tntobj)), "!invokable"); ok(!method_invokable(get_string, &obj), "!invokable<>(invalid object)"); ok(method_invokable(get_string, &tntobj), "invokable"); ok((method_invokable(put_string, &tntobj)), "invokable"); /* * Invoke */ /* int */ method_invoke(put_int, &tntobj, 48); int iret = method_invoke(get_int, &tntobj); is(iret, 48, "invoke (int)"); /* const char */ method_invoke(put_string, &tntobj, "test string"); const char *sret = method_invoke(get_string, &tntobj); ok(strcmp(sret, "test string") == 0, "invoke (const char *)"); method_invoke(inc, &tntobj); iret = method_invoke(get_int, &tntobj); is(iret, 49, "invoke (void)"); const Tarantool *tntconstptr = &tntobj; ok((!method_invokable(put_string, tntconstptr)), "!invokable<>() on const method with non-const object"); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/coio.result0000664000000000000000000000031713306560010020330 0ustar rootroot *** stat_timeout_test *** *** stat_timeout_test: done *** *** stat_notify_test *** # filename: 1.out *** stat_notify_test: done *** *** test_call_f *** # call done with res 0 *** test_call_f: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/base64.result0000664000000000000000000000702113306560010020462 0ustar rootroot1..28 *** main *** 1..3 ok 1 - length ok 2 - decode length ok ok 3 - encode/decode ok 1 - subtests 1..6 ok 1 - length ok 2 - no \n symbols ok 3 - no + symbols ok 4 - no = symbols ok 5 - decode length ok ok 6 - encode/decode ok 2 - subtests 1..4 ok 1 - length ok 2 - no = symbols ok 3 - decode length ok ok 4 - encode/decode ok 3 - subtests 1..4 ok 1 - length ok 2 - no \n symbols ok 3 - decode length ok ok 4 - encode/decode ok 4 - subtests 1..3 ok 1 - length ok 2 - decode length ok ok 3 - encode/decode ok 5 - subtests 1..6 ok 1 - length ok 2 - no \n symbols ok 3 - no + symbols ok 4 - no = symbols ok 5 - decode length ok ok 6 - encode/decode ok 6 - subtests 1..4 ok 1 - length ok 2 - no = symbols ok 3 - decode length ok ok 4 - encode/decode ok 7 - subtests 1..4 ok 1 - length ok 2 - no \n symbols ok 3 - decode length ok ok 4 - encode/decode ok 8 - subtests 1..3 ok 1 - length ok 2 - decode length ok ok 3 - encode/decode ok 9 - subtests 1..6 ok 1 - length ok 2 - no \n symbols ok 3 - no + symbols ok 4 - no = symbols ok 5 - decode length ok ok 6 - encode/decode ok 10 - subtests 1..4 ok 1 - length ok 2 - no = symbols ok 3 - decode length ok ok 4 - encode/decode ok 11 - subtests 1..4 ok 1 - length ok 2 - no \n symbols ok 3 - decode length ok ok 4 - encode/decode ok 12 - subtests 1..3 ok 1 - length ok 2 - decode length ok ok 3 - encode/decode ok 13 - subtests 1..6 ok 1 - length ok 2 - no \n symbols ok 3 - no + symbols ok 4 - no = symbols ok 5 - decode length ok ok 6 - encode/decode ok 14 - subtests 1..4 ok 1 - length ok 2 - no = symbols ok 3 - decode length ok ok 4 - encode/decode ok 15 - subtests 1..4 ok 1 - length ok 2 - no \n symbols ok 3 - decode length ok ok 4 - encode/decode ok 16 - subtests 1..3 ok 1 - length ok 2 - decode length ok ok 3 - encode/decode ok 17 - subtests 1..6 ok 1 - length ok 2 - no \n symbols ok 3 - no + symbols ok 4 - no = symbols ok 5 - decode length ok ok 6 - encode/decode ok 18 - subtests 1..4 ok 1 - length ok 2 - no = symbols ok 3 - decode length ok ok 4 - encode/decode ok 19 - subtests 1..4 ok 1 - length ok 2 - no \n symbols ok 3 - decode length ok ok 4 - encode/decode ok 20 - subtests 1..3 ok 1 - length ok 2 - decode length ok ok 3 - encode/decode ok 21 - subtests 1..6 ok 1 - length ok 2 - no \n symbols ok 3 - no + symbols ok 4 - no = symbols ok 5 - decode length ok ok 6 - encode/decode ok 22 - subtests 1..4 ok 1 - length ok 2 - no = symbols ok 3 - decode length ok ok 4 - encode/decode ok 23 - subtests 1..4 ok 1 - length ok 2 - no \n symbols ok 3 - decode length ok ok 4 - encode/decode ok 24 - subtests 1..3 ok 1 - length ok 2 - decode length ok ok 3 - encode/decode ok 25 - subtests 1..6 ok 1 - length ok 2 - no \n symbols ok 3 - no + symbols ok 4 - no = symbols ok 5 - decode length ok ok 6 - encode/decode ok 26 - subtests 1..4 ok 1 - length ok 2 - no = symbols ok 3 - decode length ok ok 4 - encode/decode ok 27 - subtests 1..4 ok 1 - length ok 2 - no \n symbols ok 3 - decode length ok ok 4 - encode/decode ok 28 - subtests *** main: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_point_lookup.result0000664000000000000000000000101113306565107022643 0ustar rootroot1..1 *** test_basic *** 1..15 ok 1 - vy_index_env_create ok 2 - key_def is not NULL ok 3 - tuple_format_new is not NULL ok 4 - index is not NULL ok 5 - range is not NULL ok 6 - temp dir name is not NULL ok 7 - temp dir create (2) ok 8 - temp dir create (3) ok 9 - vy_index_rotate_mem ok 10 - vy_run_new ok 11 - vy_run_write ok 12 - vy_run_new ok 13 - vy_run_write ok 14 - select results ok 15 - no errors happened ok 1 - subtests *** test_basic: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/bloom.result0000664000000000000000000000053413306565107020524 0ustar rootroot*** simple_test *** error_count = 0 fp_rate_too_big = 0 memory after destruction = 0 *** store_load_test *** error_count = 0 fp_rate_too_big = 0 memory after destruction = 0 *** spectrum_test *** bloom table size = 128 error_count = 0 fpr_rate_is_good = 1 bloom table size = 128 error_count = 0 fpr_rate_is_good = 1 memory after destruction = 0 tarantool_1.9.1.26.g63eb81e3c/test/unit/fiber_cond.result0000664000000000000000000000015613306560010021472 0ustar rootroot1..7 ok 1 - timeout ok 2 - timeout ok 3 - signal ok 4 - signal ok 5 - order ok 6 - broadcast ok 7 - broadcast tarantool_1.9.1.26.g63eb81e3c/test/unit/base64.c0000664000000000000000000000412013306560010017363 0ustar rootroot#include #include "unit.h" #include "trivia/util.h" #include static void base64_test(const char *str, int options, const char *no_symbols, int no_symbols_len) { plan(3 + no_symbols_len); int len = strlen(str); int base64_buflen = base64_bufsize(len + 1, options); char *base64_buf = malloc(base64_buflen); char *strbuf = malloc(len + 1); int rc = base64_encode(str, len + 1, base64_buf, base64_buflen, options); ok(rc <= base64_buflen, "length"); for (int i = 0; i < no_symbols_len; ++i) { char c = no_symbols[i]; if (c == '\n') { is(memchr(base64_buf, no_symbols[i], base64_buflen), NULL, "no \\n symbols"); } else { is(memchr(base64_buf, no_symbols[i], base64_buflen), NULL, "no %c symbols", no_symbols[i]); } } is(base64_decode(base64_buf, rc, strbuf, len + 1), len + 1, "decode length ok"); is(strcmp(str, strbuf), 0, "encode/decode"); free(base64_buf); free(strbuf); check_plan(); } static void base64_urlsafe_test(const char *str) { const char symbols[] = { '\n', '+', '=' }; base64_test(str, BASE64_URLSAFE, symbols, lengthof(symbols)); } static void base64_nopad_test(const char *str) { const char symbols[] = { '=' }; base64_test(str, BASE64_NOPAD, symbols, lengthof(symbols)); } static void base64_nowrap_test(const char *str) { const char symbols[] = { '\n' }; base64_test(str, BASE64_NOWRAP, symbols, lengthof(symbols)); } int main(int argc, char *argv[]) { plan(28); header(); const char *option_tests[] = { "", "a", "123", "1234567", "12345678", "\001\002\003\004\005\006\253\254\255", "Test +/+/+/ test test test test test test test test test "\ "test test test test test test test test test test test test "\ "test test test test test test test test test test test test "\ "test test test test test test test test test test\n\n" }; for (size_t i = 0; i < lengthof(option_tests); ++i) { base64_test(option_tests[i], 0, NULL, 0); base64_urlsafe_test(option_tests[i]); base64_nopad_test(option_tests[i]); base64_nowrap_test(option_tests[i]); } footer(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/stailq.result0000664000000000000000000000435213306560010020677 0ustar rootroot1..68 ok 1 - list is empty ok 2 - list is empty after reverse ok 3 - first item ok 4 - last item ok 5 - element (foreach) 0 ok 6 - element (foreach) 1 ok 7 - element (foreach) 2 ok 8 - element (foreach) 3 ok 9 - element (foreach) 4 ok 10 - element (foreach) 5 ok 11 - element (foreach) 6 ok 12 - first item ok 13 - head is not empty ok 14 - first entry ok 15 - shift item 0 ok 16 - shift item 1 ok 17 - shift item 2 ok 18 - shift item 3 ok 19 - shift item 4 ok 20 - shift item 5 ok 21 - shift item 6 ok 22 - list is empty after shift ok 23 - next is empty ok 24 - element (foreach_entry) 6 ok 25 - element (foreach_entry) 5 ok 26 - element (foreach_entry) 4 ok 27 - element (foreach_entry) 3 ok 28 - element (foreach_entry) 2 ok 29 - element (foreach_entry) 1 ok 30 - element (foreach_entry) 0 ok 31 - element (foreach_entry) 0 ok 32 - element (foreach_entry) 1 ok 33 - element (foreach_entry) 2 ok 34 - element (foreach_entry) 3 ok 35 - element (foreach_entry) 4 ok 36 - element (foreach_entry) 5 ok 37 - element (foreach_entry) 6 ok 38 - head is empty after cut at first ok 39 - tail element after cut at first 0 ok 40 - tail element after cut at first 1 ok 41 - tail element after cut at first 2 ok 42 - tail element after cut at first 3 ok 43 - tail element after cut at first 4 ok 44 - tail element after cut at first 5 ok 45 - tail element after cut at first 6 ok 46 - tail is empty after cut at last ok 47 - head element after cut at last 0 ok 48 - head element after cut at last 1 ok 49 - head element after cut at last 2 ok 50 - head element after cut at last 3 ok 51 - head element after cut at last 4 ok 52 - head element after cut at last 5 ok 53 - head element after cut at last 6 ok 54 - head element after cut at middle 0 ok 55 - head element after cut at middle 1 ok 56 - head element after cut at middle 2 ok 57 - head element after cut at middle 3 ok 58 - tail element after cut at middle 4 ok 59 - tail element after cut at middle 5 ok 60 - tail element after cut at middle 6 ok 61 - tail is empty after concat ok 62 - head element after concat 0 ok 63 - head element after concat 1 ok 64 - head element after concat 2 ok 65 - head element after concat 3 ok 66 - head element after concat 4 ok 67 - head element after concat 5 ok 68 - head element after concat 6 tarantool_1.9.1.26.g63eb81e3c/test/unit/bitset_iterator.result0000664000000000000000000000113113306560010022575 0ustar rootroot *** test_empty_expr *** *** test_empty_expr: done *** *** test_empty_expr_conj1 *** *** test_empty_expr_conj1: done *** *** test_empty_expr_conj2 *** *** test_empty_expr_conj2: done *** *** test_empty_result *** *** test_empty_result: done *** *** test_first_result *** *** test_first_result: done *** *** test_simple *** *** test_simple: done *** *** test_big *** Setting bits... ok Iterating... ok *** test_big: done *** *** test_not_empty *** *** test_not_empty: done *** *** test_not_last *** *** test_not_last: done *** *** test_disjunction *** *** test_disjunction: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/msgpack.result0000664000000000000000000021602513306560010021031 0ustar rootroot1..20 1..135 # *** test_uints *** # uint 0U ok 1 - mp_check_uint(0U) == 0 ok 2 - mp_decode(mp_encode(0U)) == 0U ok 3 - mp_check(0U) ok 4 - len(mp_encode_uint(0U) ok 5 - len(mp_decode_uint(0U)) ok 6 - len(mp_next_uint(0U)) ok 7 - len(mp_check_uint(0U)) ok 8 - mp_sizeof_uint(0U) ok 9 - mp_encode(0U) == "\x00" # uint 1U ok 10 - mp_check_uint(1U) == 0 ok 11 - mp_decode(mp_encode(1U)) == 1U ok 12 - mp_check(1U) ok 13 - len(mp_encode_uint(1U) ok 14 - len(mp_decode_uint(1U)) ok 15 - len(mp_next_uint(1U)) ok 16 - len(mp_check_uint(1U)) ok 17 - mp_sizeof_uint(1U) ok 18 - mp_encode(1U) == "\x01" # uint 0x7eU ok 19 - mp_check_uint(0x7eU) == 0 ok 20 - mp_decode(mp_encode(0x7eU)) == 0x7eU ok 21 - mp_check(0x7eU) ok 22 - len(mp_encode_uint(0x7eU) ok 23 - len(mp_decode_uint(0x7eU)) ok 24 - len(mp_next_uint(0x7eU)) ok 25 - len(mp_check_uint(0x7eU)) ok 26 - mp_sizeof_uint(0x7eU) ok 27 - mp_encode(0x7eU) == "\x7e" # uint 0x7fU ok 28 - mp_check_uint(0x7fU) == 0 ok 29 - mp_decode(mp_encode(0x7fU)) == 0x7fU ok 30 - mp_check(0x7fU) ok 31 - len(mp_encode_uint(0x7fU) ok 32 - len(mp_decode_uint(0x7fU)) ok 33 - len(mp_next_uint(0x7fU)) ok 34 - len(mp_check_uint(0x7fU)) ok 35 - mp_sizeof_uint(0x7fU) ok 36 - mp_encode(0x7fU) == "\x7f" # uint 0x80U ok 37 - mp_check_uint(0x80U) == 0 ok 38 - mp_decode(mp_encode(0x80U)) == 0x80U ok 39 - mp_check(0x80U) ok 40 - len(mp_encode_uint(0x80U) ok 41 - len(mp_decode_uint(0x80U)) ok 42 - len(mp_next_uint(0x80U)) ok 43 - len(mp_check_uint(0x80U)) ok 44 - mp_sizeof_uint(0x80U) ok 45 - mp_encode(0x80U) == "\xcc\x80" # uint 0xfeU ok 46 - mp_check_uint(0xfeU) == 0 ok 47 - mp_decode(mp_encode(0xfeU)) == 0xfeU ok 48 - mp_check(0xfeU) ok 49 - len(mp_encode_uint(0xfeU) ok 50 - len(mp_decode_uint(0xfeU)) ok 51 - len(mp_next_uint(0xfeU)) ok 52 - len(mp_check_uint(0xfeU)) ok 53 - mp_sizeof_uint(0xfeU) ok 54 - mp_encode(0xfeU) == "\xcc\xfe" # uint 0xffU ok 55 - mp_check_uint(0xffU) == 0 ok 56 - mp_decode(mp_encode(0xffU)) == 0xffU ok 57 - mp_check(0xffU) ok 58 - len(mp_encode_uint(0xffU) ok 59 - len(mp_decode_uint(0xffU)) ok 60 - len(mp_next_uint(0xffU)) ok 61 - len(mp_check_uint(0xffU)) ok 62 - mp_sizeof_uint(0xffU) ok 63 - mp_encode(0xffU) == "\xcc\xff" # uint 0xfffeU ok 64 - mp_check_uint(0xfffeU) == 0 ok 65 - mp_decode(mp_encode(0xfffeU)) == 0xfffeU ok 66 - mp_check(0xfffeU) ok 67 - len(mp_encode_uint(0xfffeU) ok 68 - len(mp_decode_uint(0xfffeU)) ok 69 - len(mp_next_uint(0xfffeU)) ok 70 - len(mp_check_uint(0xfffeU)) ok 71 - mp_sizeof_uint(0xfffeU) ok 72 - mp_encode(0xfffeU) == "\xcd\xff\xfe" # uint 0xffffU ok 73 - mp_check_uint(0xffffU) == 0 ok 74 - mp_decode(mp_encode(0xffffU)) == 0xffffU ok 75 - mp_check(0xffffU) ok 76 - len(mp_encode_uint(0xffffU) ok 77 - len(mp_decode_uint(0xffffU)) ok 78 - len(mp_next_uint(0xffffU)) ok 79 - len(mp_check_uint(0xffffU)) ok 80 - mp_sizeof_uint(0xffffU) ok 81 - mp_encode(0xffffU) == "\xcd\xff\xff" # uint 0x10000U ok 82 - mp_check_uint(0x10000U) == 0 ok 83 - mp_decode(mp_encode(0x10000U)) == 0x10000U ok 84 - mp_check(0x10000U) ok 85 - len(mp_encode_uint(0x10000U) ok 86 - len(mp_decode_uint(0x10000U)) ok 87 - len(mp_next_uint(0x10000U)) ok 88 - len(mp_check_uint(0x10000U)) ok 89 - mp_sizeof_uint(0x10000U) ok 90 - mp_encode(0x10000U) == "\xce\x00\x01\x00\x00" # uint 0xfffffffeU ok 91 - mp_check_uint(0xfffffffeU) == 0 ok 92 - mp_decode(mp_encode(0xfffffffeU)) == 0xfffffffeU ok 93 - mp_check(0xfffffffeU) ok 94 - len(mp_encode_uint(0xfffffffeU) ok 95 - len(mp_decode_uint(0xfffffffeU)) ok 96 - len(mp_next_uint(0xfffffffeU)) ok 97 - len(mp_check_uint(0xfffffffeU)) ok 98 - mp_sizeof_uint(0xfffffffeU) ok 99 - mp_encode(0xfffffffeU) == "\xce\xff\xff\xff\xfe" # uint 0xffffffffU ok 100 - mp_check_uint(0xffffffffU) == 0 ok 101 - mp_decode(mp_encode(0xffffffffU)) == 0xffffffffU ok 102 - mp_check(0xffffffffU) ok 103 - len(mp_encode_uint(0xffffffffU) ok 104 - len(mp_decode_uint(0xffffffffU)) ok 105 - len(mp_next_uint(0xffffffffU)) ok 106 - len(mp_check_uint(0xffffffffU)) ok 107 - mp_sizeof_uint(0xffffffffU) ok 108 - mp_encode(0xffffffffU) == "\xce\xff\xff\xff\xff" # uint 0x100000000ULL ok 109 - mp_check_uint(0x100000000ULL) == 0 ok 110 - mp_decode(mp_encode(0x100000000ULL)) == 0x100000000ULL ok 111 - mp_check(0x100000000ULL) ok 112 - len(mp_encode_uint(0x100000000ULL) ok 113 - len(mp_decode_uint(0x100000000ULL)) ok 114 - len(mp_next_uint(0x100000000ULL)) ok 115 - len(mp_check_uint(0x100000000ULL)) ok 116 - mp_sizeof_uint(0x100000000ULL) ok 117 - mp_encode(0x100000000ULL) == "\xcf\x00\x00\x00\x01\x00\x00\x00\x00" # uint 0xfffffffffffffffeULL ok 118 - mp_check_uint(0xfffffffffffffffeULL) == 0 ok 119 - mp_decode(mp_encode(0xfffffffffffffffeULL)) == 0xfffffffffffffffeULL ok 120 - mp_check(0xfffffffffffffffeULL) ok 121 - len(mp_encode_uint(0xfffffffffffffffeULL) ok 122 - len(mp_decode_uint(0xfffffffffffffffeULL)) ok 123 - len(mp_next_uint(0xfffffffffffffffeULL)) ok 124 - len(mp_check_uint(0xfffffffffffffffeULL)) ok 125 - mp_sizeof_uint(0xfffffffffffffffeULL) ok 126 - mp_encode(0xfffffffffffffffeULL) == "\xcf\xff\xff\xff\xff\xff\xff\xff\xfe" # uint 0xffffffffffffffffULL ok 127 - mp_check_uint(0xffffffffffffffffULL) == 0 ok 128 - mp_decode(mp_encode(0xffffffffffffffffULL)) == 0xffffffffffffffffULL ok 129 - mp_check(0xffffffffffffffffULL) ok 130 - len(mp_encode_uint(0xffffffffffffffffULL) ok 131 - len(mp_decode_uint(0xffffffffffffffffULL)) ok 132 - len(mp_next_uint(0xffffffffffffffffULL)) ok 133 - len(mp_check_uint(0xffffffffffffffffULL)) ok 134 - mp_sizeof_uint(0xffffffffffffffffULL) ok 135 - mp_encode(0xffffffffffffffffULL) == "\xcf\xff\xff\xff\xff\xff\xff\xff\xff" # *** test_uints: done *** ok 1 - subtests 1..153 # *** test_ints *** # int -0x01 ok 1 - mp_check_int(-0x01) == 0 ok 2 - mp_decode(mp_encode(-0x01)) == -0x01 ok 3 - mp_check(-0x01) ok 4 - len(mp_encode_int(-0x01) ok 5 - len(mp_decode_int(-0x01)) ok 6 - len(mp_next_int(-0x01)) ok 7 - len(mp_check_int(-0x01)) ok 8 - mp_sizeof_int(-0x01) ok 9 - mp_encode(-0x01) == "\xff" # int -0x1e ok 10 - mp_check_int(-0x1e) == 0 ok 11 - mp_decode(mp_encode(-0x1e)) == -0x1e ok 12 - mp_check(-0x1e) ok 13 - len(mp_encode_int(-0x1e) ok 14 - len(mp_decode_int(-0x1e)) ok 15 - len(mp_next_int(-0x1e)) ok 16 - len(mp_check_int(-0x1e)) ok 17 - mp_sizeof_int(-0x1e) ok 18 - mp_encode(-0x1e) == "\xe2" # int -0x1f ok 19 - mp_check_int(-0x1f) == 0 ok 20 - mp_decode(mp_encode(-0x1f)) == -0x1f ok 21 - mp_check(-0x1f) ok 22 - len(mp_encode_int(-0x1f) ok 23 - len(mp_decode_int(-0x1f)) ok 24 - len(mp_next_int(-0x1f)) ok 25 - len(mp_check_int(-0x1f)) ok 26 - mp_sizeof_int(-0x1f) ok 27 - mp_encode(-0x1f) == "\xe1" # int -0x20 ok 28 - mp_check_int(-0x20) == 0 ok 29 - mp_decode(mp_encode(-0x20)) == -0x20 ok 30 - mp_check(-0x20) ok 31 - len(mp_encode_int(-0x20) ok 32 - len(mp_decode_int(-0x20)) ok 33 - len(mp_next_int(-0x20)) ok 34 - len(mp_check_int(-0x20)) ok 35 - mp_sizeof_int(-0x20) ok 36 - mp_encode(-0x20) == "\xe0" # int -0x21 ok 37 - mp_check_int(-0x21) == 0 ok 38 - mp_decode(mp_encode(-0x21)) == -0x21 ok 39 - mp_check(-0x21) ok 40 - len(mp_encode_int(-0x21) ok 41 - len(mp_decode_int(-0x21)) ok 42 - len(mp_next_int(-0x21)) ok 43 - len(mp_check_int(-0x21)) ok 44 - mp_sizeof_int(-0x21) ok 45 - mp_encode(-0x21) == "\xd0\xdf" # int -0x7f ok 46 - mp_check_int(-0x7f) == 0 ok 47 - mp_decode(mp_encode(-0x7f)) == -0x7f ok 48 - mp_check(-0x7f) ok 49 - len(mp_encode_int(-0x7f) ok 50 - len(mp_decode_int(-0x7f)) ok 51 - len(mp_next_int(-0x7f)) ok 52 - len(mp_check_int(-0x7f)) ok 53 - mp_sizeof_int(-0x7f) ok 54 - mp_encode(-0x7f) == "\xd0\x81" # int -0x80 ok 55 - mp_check_int(-0x80) == 0 ok 56 - mp_decode(mp_encode(-0x80)) == -0x80 ok 57 - mp_check(-0x80) ok 58 - len(mp_encode_int(-0x80) ok 59 - len(mp_decode_int(-0x80)) ok 60 - len(mp_next_int(-0x80)) ok 61 - len(mp_check_int(-0x80)) ok 62 - mp_sizeof_int(-0x80) ok 63 - mp_encode(-0x80) == "\xd0\x80" # int -0x81 ok 64 - mp_check_int(-0x81) == 0 ok 65 - mp_decode(mp_encode(-0x81)) == -0x81 ok 66 - mp_check(-0x81) ok 67 - len(mp_encode_int(-0x81) ok 68 - len(mp_decode_int(-0x81)) ok 69 - len(mp_next_int(-0x81)) ok 70 - len(mp_check_int(-0x81)) ok 71 - mp_sizeof_int(-0x81) ok 72 - mp_encode(-0x81) == "\xd1\xff\x7f" # int -0x7fff ok 73 - mp_check_int(-0x7fff) == 0 ok 74 - mp_decode(mp_encode(-0x7fff)) == -0x7fff ok 75 - mp_check(-0x7fff) ok 76 - len(mp_encode_int(-0x7fff) ok 77 - len(mp_decode_int(-0x7fff)) ok 78 - len(mp_next_int(-0x7fff)) ok 79 - len(mp_check_int(-0x7fff)) ok 80 - mp_sizeof_int(-0x7fff) ok 81 - mp_encode(-0x7fff) == "\xd1\x80\x01" # int -0x8000 ok 82 - mp_check_int(-0x8000) == 0 ok 83 - mp_decode(mp_encode(-0x8000)) == -0x8000 ok 84 - mp_check(-0x8000) ok 85 - len(mp_encode_int(-0x8000) ok 86 - len(mp_decode_int(-0x8000)) ok 87 - len(mp_next_int(-0x8000)) ok 88 - len(mp_check_int(-0x8000)) ok 89 - mp_sizeof_int(-0x8000) ok 90 - mp_encode(-0x8000) == "\xd1\x80\x00" # int -0x8001 ok 91 - mp_check_int(-0x8001) == 0 ok 92 - mp_decode(mp_encode(-0x8001)) == -0x8001 ok 93 - mp_check(-0x8001) ok 94 - len(mp_encode_int(-0x8001) ok 95 - len(mp_decode_int(-0x8001)) ok 96 - len(mp_next_int(-0x8001)) ok 97 - len(mp_check_int(-0x8001)) ok 98 - mp_sizeof_int(-0x8001) ok 99 - mp_encode(-0x8001) == "\xd2\xff\xff\x7f\xff" # int -0x7fffffff ok 100 - mp_check_int(-0x7fffffff) == 0 ok 101 - mp_decode(mp_encode(-0x7fffffff)) == -0x7fffffff ok 102 - mp_check(-0x7fffffff) ok 103 - len(mp_encode_int(-0x7fffffff) ok 104 - len(mp_decode_int(-0x7fffffff)) ok 105 - len(mp_next_int(-0x7fffffff)) ok 106 - len(mp_check_int(-0x7fffffff)) ok 107 - mp_sizeof_int(-0x7fffffff) ok 108 - mp_encode(-0x7fffffff) == "\xd2\x80\x00\x00\x01" # int -0x80000000LL ok 109 - mp_check_int(-0x80000000LL) == 0 ok 110 - mp_decode(mp_encode(-0x80000000LL)) == -0x80000000LL ok 111 - mp_check(-0x80000000LL) ok 112 - len(mp_encode_int(-0x80000000LL) ok 113 - len(mp_decode_int(-0x80000000LL)) ok 114 - len(mp_next_int(-0x80000000LL)) ok 115 - len(mp_check_int(-0x80000000LL)) ok 116 - mp_sizeof_int(-0x80000000LL) ok 117 - mp_encode(-0x80000000LL) == "\xd2\x80\x00\x00\x00" # int -0x80000001LL ok 118 - mp_check_int(-0x80000001LL) == 0 ok 119 - mp_decode(mp_encode(-0x80000001LL)) == -0x80000001LL ok 120 - mp_check(-0x80000001LL) ok 121 - len(mp_encode_int(-0x80000001LL) ok 122 - len(mp_decode_int(-0x80000001LL)) ok 123 - len(mp_next_int(-0x80000001LL)) ok 124 - len(mp_check_int(-0x80000001LL)) ok 125 - mp_sizeof_int(-0x80000001LL) ok 126 - mp_encode(-0x80000001LL) == "\xd3\xff\xff\xff\xff\x7f\xff\xff\xff" # int -0x80000001LL ok 127 - mp_check_int(-0x80000001LL) == 0 ok 128 - mp_decode(mp_encode(-0x80000001LL)) == -0x80000001LL ok 129 - mp_check(-0x80000001LL) ok 130 - len(mp_encode_int(-0x80000001LL) ok 131 - len(mp_decode_int(-0x80000001LL)) ok 132 - len(mp_next_int(-0x80000001LL)) ok 133 - len(mp_check_int(-0x80000001LL)) ok 134 - mp_sizeof_int(-0x80000001LL) ok 135 - mp_encode(-0x80000001LL) == "\xd3\xff\xff\xff\xff\x7f\xff\xff\xff" # int -0x7fffffffffffffffLL ok 136 - mp_check_int(-0x7fffffffffffffffLL) == 0 ok 137 - mp_decode(mp_encode(-0x7fffffffffffffffLL)) == -0x7fffffffffffffffLL ok 138 - mp_check(-0x7fffffffffffffffLL) ok 139 - len(mp_encode_int(-0x7fffffffffffffffLL) ok 140 - len(mp_decode_int(-0x7fffffffffffffffLL)) ok 141 - len(mp_next_int(-0x7fffffffffffffffLL)) ok 142 - len(mp_check_int(-0x7fffffffffffffffLL)) ok 143 - mp_sizeof_int(-0x7fffffffffffffffLL) ok 144 - mp_encode(-0x7fffffffffffffffLL) == "\xd3\x80\x00\x00\x00\x00\x00\x00\x01" # int (int64_t)-0x8000000000000000LL ok 145 - mp_check_int((int64_t)-0x8000000000000000LL) == 0 ok 146 - mp_decode(mp_encode((int64_t)-0x8000000000000000LL)) == (int64_t)-0x8000000000000000LL ok 147 - mp_check((int64_t)-0x8000000000000000LL) ok 148 - len(mp_encode_int((int64_t)-0x8000000000000000LL) ok 149 - len(mp_decode_int((int64_t)-0x8000000000000000LL)) ok 150 - len(mp_next_int((int64_t)-0x8000000000000000LL)) ok 151 - len(mp_check_int((int64_t)-0x8000000000000000LL)) ok 152 - mp_sizeof_int((int64_t)-0x8000000000000000LL) ok 153 - mp_encode((int64_t)-0x8000000000000000LL) == "\xd3\x80\x00\x00\x00\x00\x00\x00\x00" # *** test_ints: done *** ok 2 - subtests 1..18 # *** test_bools *** # bool 1 ok 1 - mp_check_bool(1) == 0 ok 2 - mp_decode(mp_encode(1)) == 1 ok 3 - mp_check(1) ok 4 - len(mp_encode_bool(1) ok 5 - len(mp_decode_bool(1)) ok 6 - len(mp_next_bool(1)) ok 7 - len(mp_check_bool(1)) ok 8 - mp_sizeof_bool(1) ok 9 - mp_encode(1) == "\xc3" # bool 0 ok 10 - mp_check_bool(0) == 0 ok 11 - mp_decode(mp_encode(0)) == 0 ok 12 - mp_check(0) ok 13 - len(mp_encode_bool(0) ok 14 - len(mp_decode_bool(0)) ok 15 - len(mp_next_bool(0)) ok 16 - len(mp_check_bool(0)) ok 17 - mp_sizeof_bool(0) ok 18 - mp_encode(0) == "\xc2" # *** test_bools: done *** ok 3 - subtests 1..27 # *** test_floats *** # float (float) 1.0 ok 1 - mp_check_float((float) 1.0) == 0 ok 2 - mp_decode(mp_encode((float) 1.0)) == (float) 1.0 ok 3 - mp_check((float) 1.0) ok 4 - len(mp_encode_float((float) 1.0) ok 5 - len(mp_decode_float((float) 1.0)) ok 6 - len(mp_next_float((float) 1.0)) ok 7 - len(mp_check_float((float) 1.0)) ok 8 - mp_sizeof_float((float) 1.0) ok 9 - mp_encode((float) 1.0) == "\xca\x3f\x80\x00\x00" # float (float) 3.141593 ok 10 - mp_check_float((float) 3.141593) == 0 ok 11 - mp_decode(mp_encode((float) 3.141593)) == (float) 3.141593 ok 12 - mp_check((float) 3.141593) ok 13 - len(mp_encode_float((float) 3.141593) ok 14 - len(mp_decode_float((float) 3.141593)) ok 15 - len(mp_next_float((float) 3.141593)) ok 16 - len(mp_check_float((float) 3.141593)) ok 17 - mp_sizeof_float((float) 3.141593) ok 18 - mp_encode((float) 3.141593) == "\xca\x40\x49\x0f\xdc" # float (float) -1e38f ok 19 - mp_check_float((float) -1e38f) == 0 ok 20 - mp_decode(mp_encode((float) -1e38f)) == (float) -1e38f ok 21 - mp_check((float) -1e38f) ok 22 - len(mp_encode_float((float) -1e38f) ok 23 - len(mp_decode_float((float) -1e38f)) ok 24 - len(mp_next_float((float) -1e38f)) ok 25 - len(mp_check_float((float) -1e38f)) ok 26 - mp_sizeof_float((float) -1e38f) ok 27 - mp_encode((float) -1e38f) == "\xca\xfe\x96\x76\x99" # *** test_floats: done *** ok 4 - subtests 1..27 # *** test_doubles *** # double (double) 1.0 ok 1 - mp_check_double((double) 1.0) == 0 ok 2 - mp_decode(mp_encode((double) 1.0)) == (double) 1.0 ok 3 - mp_check((double) 1.0) ok 4 - len(mp_encode_double((double) 1.0) ok 5 - len(mp_decode_double((double) 1.0)) ok 6 - len(mp_next_double((double) 1.0)) ok 7 - len(mp_check_double((double) 1.0)) ok 8 - mp_sizeof_double((double) 1.0) ok 9 - mp_encode((double) 1.0) == "\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00" # double (double) 3.141592653589793 ok 10 - mp_check_double((double) 3.141592653589793) == 0 ok 11 - mp_decode(mp_encode((double) 3.141592653589793)) == (double) 3.141592653589793 ok 12 - mp_check((double) 3.141592653589793) ok 13 - len(mp_encode_double((double) 3.141592653589793) ok 14 - len(mp_decode_double((double) 3.141592653589793)) ok 15 - len(mp_next_double((double) 3.141592653589793)) ok 16 - len(mp_check_double((double) 3.141592653589793)) ok 17 - mp_sizeof_double((double) 3.141592653589793) ok 18 - mp_encode((double) 3.141592653589793) == "\xcb\x40\x09\x21\xfb\x54\x44\x2d\x18" # double (double) -1e99 ok 19 - mp_check_double((double) -1e99) == 0 ok 20 - mp_decode(mp_encode((double) -1e99)) == (double) -1e99 ok 21 - mp_check((double) -1e99) ok 22 - len(mp_encode_double((double) -1e99) ok 23 - len(mp_decode_double((double) -1e99)) ok 24 - len(mp_next_double((double) -1e99)) ok 25 - len(mp_check_double((double) -1e99)) ok 26 - mp_sizeof_double((double) -1e99) ok 27 - mp_encode((double) -1e99) == "\xcb\xd4\x7d\x42\xae\xa2\x87\x9f\x2e" # *** test_doubles: done *** ok 5 - subtests 1..6 # *** test_nils *** # nil ok 1 - mp_check_nil() ok 2 - len(mp_encode_nil() == 1 ok 3 - len(mp_decode_nil()) == 1 ok 4 - len(mp_next_nil()) == 1 ok 5 - len(mp_check_nil()) == 1 ok 6 - mp_sizeof_nil() == 1 # *** test_nils: done *** ok 6 - subtests 1..78 # *** test_strls *** # strl 0x00U ok 1 - mp_check_strl(0x00U) == 0 ok 2 - mp_decode(mp_encode(0x00U)) == 0x00U ok 3 - len(mp_encode_strl(0x00U) ok 4 - len(mp_decode_strl(0x00U)) ok 5 - mp_sizeof_strl(0x00U) ok 6 - mp_encode(0x00U) == "\xa0" # strl 0x01U ok 7 - mp_check_strl(0x01U) == 0 ok 8 - mp_decode(mp_encode(0x01U)) == 0x01U ok 9 - len(mp_encode_strl(0x01U) ok 10 - len(mp_decode_strl(0x01U)) ok 11 - mp_sizeof_strl(0x01U) ok 12 - mp_encode(0x01U) == "\xa1" # strl 0x1eU ok 13 - mp_check_strl(0x1eU) == 0 ok 14 - mp_decode(mp_encode(0x1eU)) == 0x1eU ok 15 - len(mp_encode_strl(0x1eU) ok 16 - len(mp_decode_strl(0x1eU)) ok 17 - mp_sizeof_strl(0x1eU) ok 18 - mp_encode(0x1eU) == "\xbe" # strl 0x1fU ok 19 - mp_check_strl(0x1fU) == 0 ok 20 - mp_decode(mp_encode(0x1fU)) == 0x1fU ok 21 - len(mp_encode_strl(0x1fU) ok 22 - len(mp_decode_strl(0x1fU)) ok 23 - mp_sizeof_strl(0x1fU) ok 24 - mp_encode(0x1fU) == "\xbf" # strl 0x20U ok 25 - mp_check_strl(0x20U) == 0 ok 26 - mp_decode(mp_encode(0x20U)) == 0x20U ok 27 - len(mp_encode_strl(0x20U) ok 28 - len(mp_decode_strl(0x20U)) ok 29 - mp_sizeof_strl(0x20U) ok 30 - mp_encode(0x20U) == "\xd9\x20" # strl 0xfeU ok 31 - mp_check_strl(0xfeU) == 0 ok 32 - mp_decode(mp_encode(0xfeU)) == 0xfeU ok 33 - len(mp_encode_strl(0xfeU) ok 34 - len(mp_decode_strl(0xfeU)) ok 35 - mp_sizeof_strl(0xfeU) ok 36 - mp_encode(0xfeU) == "\xd9\xfe" # strl 0xffU ok 37 - mp_check_strl(0xffU) == 0 ok 38 - mp_decode(mp_encode(0xffU)) == 0xffU ok 39 - len(mp_encode_strl(0xffU) ok 40 - len(mp_decode_strl(0xffU)) ok 41 - mp_sizeof_strl(0xffU) ok 42 - mp_encode(0xffU) == "\xd9\xff" # strl 0x0100U ok 43 - mp_check_strl(0x0100U) == 0 ok 44 - mp_decode(mp_encode(0x0100U)) == 0x0100U ok 45 - len(mp_encode_strl(0x0100U) ok 46 - len(mp_decode_strl(0x0100U)) ok 47 - mp_sizeof_strl(0x0100U) ok 48 - mp_encode(0x0100U) == "\xda\x01\x00" # strl 0xfffeU ok 49 - mp_check_strl(0xfffeU) == 0 ok 50 - mp_decode(mp_encode(0xfffeU)) == 0xfffeU ok 51 - len(mp_encode_strl(0xfffeU) ok 52 - len(mp_decode_strl(0xfffeU)) ok 53 - mp_sizeof_strl(0xfffeU) ok 54 - mp_encode(0xfffeU) == "\xda\xff\xfe" # strl 0xffffU ok 55 - mp_check_strl(0xffffU) == 0 ok 56 - mp_decode(mp_encode(0xffffU)) == 0xffffU ok 57 - len(mp_encode_strl(0xffffU) ok 58 - len(mp_decode_strl(0xffffU)) ok 59 - mp_sizeof_strl(0xffffU) ok 60 - mp_encode(0xffffU) == "\xda\xff\xff" # strl 0x00010000U ok 61 - mp_check_strl(0x00010000U) == 0 ok 62 - mp_decode(mp_encode(0x00010000U)) == 0x00010000U ok 63 - len(mp_encode_strl(0x00010000U) ok 64 - len(mp_decode_strl(0x00010000U)) ok 65 - mp_sizeof_strl(0x00010000U) ok 66 - mp_encode(0x00010000U) == "\xdb\x00\x01\x00\x00" # strl 0xfffffffeU ok 67 - mp_check_strl(0xfffffffeU) == 0 ok 68 - mp_decode(mp_encode(0xfffffffeU)) == 0xfffffffeU ok 69 - len(mp_encode_strl(0xfffffffeU) ok 70 - len(mp_decode_strl(0xfffffffeU)) ok 71 - mp_sizeof_strl(0xfffffffeU) ok 72 - mp_encode(0xfffffffeU) == "\xdb\xff\xff\xff\xfe" # strl 0xffffffffU ok 73 - mp_check_strl(0xffffffffU) == 0 ok 74 - mp_decode(mp_encode(0xffffffffU)) == 0xffffffffU ok 75 - len(mp_encode_strl(0xffffffffU) ok 76 - len(mp_decode_strl(0xffffffffU)) ok 77 - mp_sizeof_strl(0xffffffffU) ok 78 - mp_encode(0xffffffffU) == "\xdb\xff\xff\xff\xff" # *** test_strls: done *** ok 7 - subtests 1..78 # *** test_binls *** # binl 0x00U ok 1 - mp_check_binl(0x00U) == 0 ok 2 - mp_decode(mp_encode(0x00U)) == 0x00U ok 3 - len(mp_encode_binl(0x00U) ok 4 - len(mp_decode_binl(0x00U)) ok 5 - mp_sizeof_binl(0x00U) ok 6 - mp_encode(0x00U) == "\xc4\x00" # binl 0x01U ok 7 - mp_check_binl(0x01U) == 0 ok 8 - mp_decode(mp_encode(0x01U)) == 0x01U ok 9 - len(mp_encode_binl(0x01U) ok 10 - len(mp_decode_binl(0x01U)) ok 11 - mp_sizeof_binl(0x01U) ok 12 - mp_encode(0x01U) == "\xc4\x01" # binl 0x1eU ok 13 - mp_check_binl(0x1eU) == 0 ok 14 - mp_decode(mp_encode(0x1eU)) == 0x1eU ok 15 - len(mp_encode_binl(0x1eU) ok 16 - len(mp_decode_binl(0x1eU)) ok 17 - mp_sizeof_binl(0x1eU) ok 18 - mp_encode(0x1eU) == "\xc4\x1e" # binl 0x1fU ok 19 - mp_check_binl(0x1fU) == 0 ok 20 - mp_decode(mp_encode(0x1fU)) == 0x1fU ok 21 - len(mp_encode_binl(0x1fU) ok 22 - len(mp_decode_binl(0x1fU)) ok 23 - mp_sizeof_binl(0x1fU) ok 24 - mp_encode(0x1fU) == "\xc4\x1f" # binl 0x20U ok 25 - mp_check_binl(0x20U) == 0 ok 26 - mp_decode(mp_encode(0x20U)) == 0x20U ok 27 - len(mp_encode_binl(0x20U) ok 28 - len(mp_decode_binl(0x20U)) ok 29 - mp_sizeof_binl(0x20U) ok 30 - mp_encode(0x20U) == "\xc4\x20" # binl 0xfeU ok 31 - mp_check_binl(0xfeU) == 0 ok 32 - mp_decode(mp_encode(0xfeU)) == 0xfeU ok 33 - len(mp_encode_binl(0xfeU) ok 34 - len(mp_decode_binl(0xfeU)) ok 35 - mp_sizeof_binl(0xfeU) ok 36 - mp_encode(0xfeU) == "\xc4\xfe" # binl 0xffU ok 37 - mp_check_binl(0xffU) == 0 ok 38 - mp_decode(mp_encode(0xffU)) == 0xffU ok 39 - len(mp_encode_binl(0xffU) ok 40 - len(mp_decode_binl(0xffU)) ok 41 - mp_sizeof_binl(0xffU) ok 42 - mp_encode(0xffU) == "\xc4\xff" # binl 0x0100U ok 43 - mp_check_binl(0x0100U) == 0 ok 44 - mp_decode(mp_encode(0x0100U)) == 0x0100U ok 45 - len(mp_encode_binl(0x0100U) ok 46 - len(mp_decode_binl(0x0100U)) ok 47 - mp_sizeof_binl(0x0100U) ok 48 - mp_encode(0x0100U) == "\xc5\x01\x00" # binl 0xfffeU ok 49 - mp_check_binl(0xfffeU) == 0 ok 50 - mp_decode(mp_encode(0xfffeU)) == 0xfffeU ok 51 - len(mp_encode_binl(0xfffeU) ok 52 - len(mp_decode_binl(0xfffeU)) ok 53 - mp_sizeof_binl(0xfffeU) ok 54 - mp_encode(0xfffeU) == "\xc5\xff\xfe" # binl 0xffffU ok 55 - mp_check_binl(0xffffU) == 0 ok 56 - mp_decode(mp_encode(0xffffU)) == 0xffffU ok 57 - len(mp_encode_binl(0xffffU) ok 58 - len(mp_decode_binl(0xffffU)) ok 59 - mp_sizeof_binl(0xffffU) ok 60 - mp_encode(0xffffU) == "\xc5\xff\xff" # binl 0x00010000U ok 61 - mp_check_binl(0x00010000U) == 0 ok 62 - mp_decode(mp_encode(0x00010000U)) == 0x00010000U ok 63 - len(mp_encode_binl(0x00010000U) ok 64 - len(mp_decode_binl(0x00010000U)) ok 65 - mp_sizeof_binl(0x00010000U) ok 66 - mp_encode(0x00010000U) == "\xc6\x00\x01\x00\x00" # binl 0xfffffffeU ok 67 - mp_check_binl(0xfffffffeU) == 0 ok 68 - mp_decode(mp_encode(0xfffffffeU)) == 0xfffffffeU ok 69 - len(mp_encode_binl(0xfffffffeU) ok 70 - len(mp_decode_binl(0xfffffffeU)) ok 71 - mp_sizeof_binl(0xfffffffeU) ok 72 - mp_encode(0xfffffffeU) == "\xc6\xff\xff\xff\xfe" # binl 0xffffffffU ok 73 - mp_check_binl(0xffffffffU) == 0 ok 74 - mp_decode(mp_encode(0xffffffffU)) == 0xffffffffU ok 75 - len(mp_encode_binl(0xffffffffU) ok 76 - len(mp_decode_binl(0xffffffffU)) ok 77 - mp_sizeof_binl(0xffffffffU) ok 78 - mp_encode(0xffffffffU) == "\xc6\xff\xff\xff\xff" # *** test_binls: done *** ok 8 - subtests 1..96 # *** test_strs *** # str len=0x01 ok 1 - len(mp_decode_str(x, 1)) ok 2 - len(mp_decode_strbin(x, 1)) ok 3 - mp_check_str(mp_encode_str(x, 0x01)) ok 4 - len(mp_decode_str(x, 0x01) ok 5 - len(mp_next_str(x, 0x01) ok 6 - len(mp_check_str(x, 0x01) ok 7 - mp_sizeof_str(0x01) ok 8 - mp_encode_str(x, 0x01) == x # str len=0x1e ok 9 - len(mp_decode_str(x, 30)) ok 10 - len(mp_decode_strbin(x, 30)) ok 11 - mp_check_str(mp_encode_str(x, 0x1e)) ok 12 - len(mp_decode_str(x, 0x1e) ok 13 - len(mp_next_str(x, 0x1e) ok 14 - len(mp_check_str(x, 0x1e) ok 15 - mp_sizeof_str(0x1e) ok 16 - mp_encode_str(x, 0x1e) == x # str len=0x1f ok 17 - len(mp_decode_str(x, 31)) ok 18 - len(mp_decode_strbin(x, 31)) ok 19 - mp_check_str(mp_encode_str(x, 0x1f)) ok 20 - len(mp_decode_str(x, 0x1f) ok 21 - len(mp_next_str(x, 0x1f) ok 22 - len(mp_check_str(x, 0x1f) ok 23 - mp_sizeof_str(0x1f) ok 24 - mp_encode_str(x, 0x1f) == x # str len=0x20 ok 25 - len(mp_decode_str(x, 32)) ok 26 - len(mp_decode_strbin(x, 32)) ok 27 - mp_check_str(mp_encode_str(x, 0x20)) ok 28 - len(mp_decode_str(x, 0x20) ok 29 - len(mp_next_str(x, 0x20) ok 30 - len(mp_check_str(x, 0x20) ok 31 - mp_sizeof_str(0x20) ok 32 - mp_encode_str(x, 0x20) == x # str len=0xfe ok 33 - len(mp_decode_str(x, 254)) ok 34 - len(mp_decode_strbin(x, 254)) ok 35 - mp_check_str(mp_encode_str(x, 0xfe)) ok 36 - len(mp_decode_str(x, 0xfe) ok 37 - len(mp_next_str(x, 0xfe) ok 38 - len(mp_check_str(x, 0xfe) ok 39 - mp_sizeof_str(0xfe) ok 40 - mp_encode_str(x, 0xfe) == x # str len=0xff ok 41 - len(mp_decode_str(x, 255)) ok 42 - len(mp_decode_strbin(x, 255)) ok 43 - mp_check_str(mp_encode_str(x, 0xff)) ok 44 - len(mp_decode_str(x, 0xff) ok 45 - len(mp_next_str(x, 0xff) ok 46 - len(mp_check_str(x, 0xff) ok 47 - mp_sizeof_str(0xff) ok 48 - mp_encode_str(x, 0xff) == x # str len=0x100 ok 49 - len(mp_decode_str(x, 256)) ok 50 - len(mp_decode_strbin(x, 256)) ok 51 - mp_check_str(mp_encode_str(x, 0x100)) ok 52 - len(mp_decode_str(x, 0x100) ok 53 - len(mp_next_str(x, 0x100) ok 54 - len(mp_check_str(x, 0x100) ok 55 - mp_sizeof_str(0x100) ok 56 - mp_encode_str(x, 0x100) == x # str len=0x101 ok 57 - len(mp_decode_str(x, 257)) ok 58 - len(mp_decode_strbin(x, 257)) ok 59 - mp_check_str(mp_encode_str(x, 0x101)) ok 60 - len(mp_decode_str(x, 0x101) ok 61 - len(mp_next_str(x, 0x101) ok 62 - len(mp_check_str(x, 0x101) ok 63 - mp_sizeof_str(0x101) ok 64 - mp_encode_str(x, 0x101) == x # str len=0xfffe ok 65 - len(mp_decode_str(x, 65534)) ok 66 - len(mp_decode_strbin(x, 65534)) ok 67 - mp_check_str(mp_encode_str(x, 0xfffe)) ok 68 - len(mp_decode_str(x, 0xfffe) ok 69 - len(mp_next_str(x, 0xfffe) ok 70 - len(mp_check_str(x, 0xfffe) ok 71 - mp_sizeof_str(0xfffe) ok 72 - mp_encode_str(x, 0xfffe) == x # str len=0xffff ok 73 - len(mp_decode_str(x, 65535)) ok 74 - len(mp_decode_strbin(x, 65535)) ok 75 - mp_check_str(mp_encode_str(x, 0xffff)) ok 76 - len(mp_decode_str(x, 0xffff) ok 77 - len(mp_next_str(x, 0xffff) ok 78 - len(mp_check_str(x, 0xffff) ok 79 - mp_sizeof_str(0xffff) ok 80 - mp_encode_str(x, 0xffff) == x # str len=0x10000 ok 81 - len(mp_decode_str(x, 65536)) ok 82 - len(mp_decode_strbin(x, 65536)) ok 83 - mp_check_str(mp_encode_str(x, 0x10000)) ok 84 - len(mp_decode_str(x, 0x10000) ok 85 - len(mp_next_str(x, 0x10000) ok 86 - len(mp_check_str(x, 0x10000) ok 87 - mp_sizeof_str(0x10000) ok 88 - mp_encode_str(x, 0x10000) == x # str len=0x10001 ok 89 - len(mp_decode_str(x, 65537)) ok 90 - len(mp_decode_strbin(x, 65537)) ok 91 - mp_check_str(mp_encode_str(x, 0x10001)) ok 92 - len(mp_decode_str(x, 0x10001) ok 93 - len(mp_next_str(x, 0x10001) ok 94 - len(mp_check_str(x, 0x10001) ok 95 - mp_sizeof_str(0x10001) ok 96 - mp_encode_str(x, 0x10001) == x # *** test_strs: done *** ok 9 - subtests 1..96 # *** test_bins *** # bin len=0x01 ok 1 - len(mp_decode_bin(x, 1)) ok 2 - len(mp_decode_strbin(x, 1)) ok 3 - mp_check_bin(mp_encode_bin(x, 0x01)) ok 4 - len(mp_decode_bin(x, 0x01) ok 5 - len(mp_next_bin(x, 0x01) ok 6 - len(mp_check_bin(x, 0x01) ok 7 - mp_sizeof_bin(0x01) ok 8 - mp_encode_bin(x, 0x01) == x # bin len=0x1e ok 9 - len(mp_decode_bin(x, 30)) ok 10 - len(mp_decode_strbin(x, 30)) ok 11 - mp_check_bin(mp_encode_bin(x, 0x1e)) ok 12 - len(mp_decode_bin(x, 0x1e) ok 13 - len(mp_next_bin(x, 0x1e) ok 14 - len(mp_check_bin(x, 0x1e) ok 15 - mp_sizeof_bin(0x1e) ok 16 - mp_encode_bin(x, 0x1e) == x # bin len=0x1f ok 17 - len(mp_decode_bin(x, 31)) ok 18 - len(mp_decode_strbin(x, 31)) ok 19 - mp_check_bin(mp_encode_bin(x, 0x1f)) ok 20 - len(mp_decode_bin(x, 0x1f) ok 21 - len(mp_next_bin(x, 0x1f) ok 22 - len(mp_check_bin(x, 0x1f) ok 23 - mp_sizeof_bin(0x1f) ok 24 - mp_encode_bin(x, 0x1f) == x # bin len=0x20 ok 25 - len(mp_decode_bin(x, 32)) ok 26 - len(mp_decode_strbin(x, 32)) ok 27 - mp_check_bin(mp_encode_bin(x, 0x20)) ok 28 - len(mp_decode_bin(x, 0x20) ok 29 - len(mp_next_bin(x, 0x20) ok 30 - len(mp_check_bin(x, 0x20) ok 31 - mp_sizeof_bin(0x20) ok 32 - mp_encode_bin(x, 0x20) == x # bin len=0xfe ok 33 - len(mp_decode_bin(x, 254)) ok 34 - len(mp_decode_strbin(x, 254)) ok 35 - mp_check_bin(mp_encode_bin(x, 0xfe)) ok 36 - len(mp_decode_bin(x, 0xfe) ok 37 - len(mp_next_bin(x, 0xfe) ok 38 - len(mp_check_bin(x, 0xfe) ok 39 - mp_sizeof_bin(0xfe) ok 40 - mp_encode_bin(x, 0xfe) == x # bin len=0xff ok 41 - len(mp_decode_bin(x, 255)) ok 42 - len(mp_decode_strbin(x, 255)) ok 43 - mp_check_bin(mp_encode_bin(x, 0xff)) ok 44 - len(mp_decode_bin(x, 0xff) ok 45 - len(mp_next_bin(x, 0xff) ok 46 - len(mp_check_bin(x, 0xff) ok 47 - mp_sizeof_bin(0xff) ok 48 - mp_encode_bin(x, 0xff) == x # bin len=0x100 ok 49 - len(mp_decode_bin(x, 256)) ok 50 - len(mp_decode_strbin(x, 256)) ok 51 - mp_check_bin(mp_encode_bin(x, 0x100)) ok 52 - len(mp_decode_bin(x, 0x100) ok 53 - len(mp_next_bin(x, 0x100) ok 54 - len(mp_check_bin(x, 0x100) ok 55 - mp_sizeof_bin(0x100) ok 56 - mp_encode_bin(x, 0x100) == x # bin len=0x101 ok 57 - len(mp_decode_bin(x, 257)) ok 58 - len(mp_decode_strbin(x, 257)) ok 59 - mp_check_bin(mp_encode_bin(x, 0x101)) ok 60 - len(mp_decode_bin(x, 0x101) ok 61 - len(mp_next_bin(x, 0x101) ok 62 - len(mp_check_bin(x, 0x101) ok 63 - mp_sizeof_bin(0x101) ok 64 - mp_encode_bin(x, 0x101) == x # bin len=0xfffe ok 65 - len(mp_decode_bin(x, 65534)) ok 66 - len(mp_decode_strbin(x, 65534)) ok 67 - mp_check_bin(mp_encode_bin(x, 0xfffe)) ok 68 - len(mp_decode_bin(x, 0xfffe) ok 69 - len(mp_next_bin(x, 0xfffe) ok 70 - len(mp_check_bin(x, 0xfffe) ok 71 - mp_sizeof_bin(0xfffe) ok 72 - mp_encode_bin(x, 0xfffe) == x # bin len=0xffff ok 73 - len(mp_decode_bin(x, 65535)) ok 74 - len(mp_decode_strbin(x, 65535)) ok 75 - mp_check_bin(mp_encode_bin(x, 0xffff)) ok 76 - len(mp_decode_bin(x, 0xffff) ok 77 - len(mp_next_bin(x, 0xffff) ok 78 - len(mp_check_bin(x, 0xffff) ok 79 - mp_sizeof_bin(0xffff) ok 80 - mp_encode_bin(x, 0xffff) == x # bin len=0x10000 ok 81 - len(mp_decode_bin(x, 65536)) ok 82 - len(mp_decode_strbin(x, 65536)) ok 83 - mp_check_bin(mp_encode_bin(x, 0x10000)) ok 84 - len(mp_decode_bin(x, 0x10000) ok 85 - len(mp_next_bin(x, 0x10000) ok 86 - len(mp_check_bin(x, 0x10000) ok 87 - mp_sizeof_bin(0x10000) ok 88 - mp_encode_bin(x, 0x10000) == x # bin len=0x10001 ok 89 - len(mp_decode_bin(x, 65537)) ok 90 - len(mp_decode_strbin(x, 65537)) ok 91 - mp_check_bin(mp_encode_bin(x, 0x10001)) ok 92 - len(mp_decode_bin(x, 0x10001) ok 93 - len(mp_next_bin(x, 0x10001) ok 94 - len(mp_check_bin(x, 0x10001) ok 95 - mp_sizeof_bin(0x10001) ok 96 - mp_encode_bin(x, 0x10001) == x # *** test_bins: done *** ok 10 - subtests 1..54 # *** test_arrays *** # array 0 ok 1 - mp_check_array(0) == 0 ok 2 - mp_decode(mp_encode(0)) == 0 ok 3 - len(mp_encode_array(0) ok 4 - len(mp_decode_array(0)) ok 5 - mp_sizeof_array(0) ok 6 - mp_encode(0) == "\x90" # array 1 ok 7 - mp_check_array(1) == 0 ok 8 - mp_decode(mp_encode(1)) == 1 ok 9 - len(mp_encode_array(1) ok 10 - len(mp_decode_array(1)) ok 11 - mp_sizeof_array(1) ok 12 - mp_encode(1) == "\x91" # array 15 ok 13 - mp_check_array(15) == 0 ok 14 - mp_decode(mp_encode(15)) == 15 ok 15 - len(mp_encode_array(15) ok 16 - len(mp_decode_array(15)) ok 17 - mp_sizeof_array(15) ok 18 - mp_encode(15) == "\x9f" # array 16 ok 19 - mp_check_array(16) == 0 ok 20 - mp_decode(mp_encode(16)) == 16 ok 21 - len(mp_encode_array(16) ok 22 - len(mp_decode_array(16)) ok 23 - mp_sizeof_array(16) ok 24 - mp_encode(16) == "\xdc\x00\x10" # array 0xfffe ok 25 - mp_check_array(0xfffe) == 0 ok 26 - mp_decode(mp_encode(0xfffe)) == 0xfffe ok 27 - len(mp_encode_array(0xfffe) ok 28 - len(mp_decode_array(0xfffe)) ok 29 - mp_sizeof_array(0xfffe) ok 30 - mp_encode(0xfffe) == "\xdc\xff\xfe" # array 0xffff ok 31 - mp_check_array(0xffff) == 0 ok 32 - mp_decode(mp_encode(0xffff)) == 0xffff ok 33 - len(mp_encode_array(0xffff) ok 34 - len(mp_decode_array(0xffff)) ok 35 - mp_sizeof_array(0xffff) ok 36 - mp_encode(0xffff) == "\xdc\xff\xff" # array 0x10000 ok 37 - mp_check_array(0x10000) == 0 ok 38 - mp_decode(mp_encode(0x10000)) == 0x10000 ok 39 - len(mp_encode_array(0x10000) ok 40 - len(mp_decode_array(0x10000)) ok 41 - mp_sizeof_array(0x10000) ok 42 - mp_encode(0x10000) == "\xdd\x00\x01\x00\x00" # array 0xfffffffeU ok 43 - mp_check_array(0xfffffffeU) == 0 ok 44 - mp_decode(mp_encode(0xfffffffeU)) == 0xfffffffeU ok 45 - len(mp_encode_array(0xfffffffeU) ok 46 - len(mp_decode_array(0xfffffffeU)) ok 47 - mp_sizeof_array(0xfffffffeU) ok 48 - mp_encode(0xfffffffeU) == "\xdd\xff\xff\xff\xfe" # array 0xffffffffU ok 49 - mp_check_array(0xffffffffU) == 0 ok 50 - mp_decode(mp_encode(0xffffffffU)) == 0xffffffffU ok 51 - len(mp_encode_array(0xffffffffU) ok 52 - len(mp_decode_array(0xffffffffU)) ok 53 - mp_sizeof_array(0xffffffffU) ok 54 - mp_encode(0xffffffffU) == "\xdd\xff\xff\xff\xff" # *** test_arrays: done *** ok 11 - subtests 1..54 # *** test_maps *** # map 0 ok 1 - mp_check_map(0) == 0 ok 2 - mp_decode(mp_encode(0)) == 0 ok 3 - len(mp_encode_map(0) ok 4 - len(mp_decode_map(0)) ok 5 - mp_sizeof_map(0) ok 6 - mp_encode(0) == "\x80" # map 1 ok 7 - mp_check_map(1) == 0 ok 8 - mp_decode(mp_encode(1)) == 1 ok 9 - len(mp_encode_map(1) ok 10 - len(mp_decode_map(1)) ok 11 - mp_sizeof_map(1) ok 12 - mp_encode(1) == "\x81" # map 15 ok 13 - mp_check_map(15) == 0 ok 14 - mp_decode(mp_encode(15)) == 15 ok 15 - len(mp_encode_map(15) ok 16 - len(mp_decode_map(15)) ok 17 - mp_sizeof_map(15) ok 18 - mp_encode(15) == "\x8f" # map 16 ok 19 - mp_check_map(16) == 0 ok 20 - mp_decode(mp_encode(16)) == 16 ok 21 - len(mp_encode_map(16) ok 22 - len(mp_decode_map(16)) ok 23 - mp_sizeof_map(16) ok 24 - mp_encode(16) == "\xde\x00\x10" # map 0xfffe ok 25 - mp_check_map(0xfffe) == 0 ok 26 - mp_decode(mp_encode(0xfffe)) == 0xfffe ok 27 - len(mp_encode_map(0xfffe) ok 28 - len(mp_decode_map(0xfffe)) ok 29 - mp_sizeof_map(0xfffe) ok 30 - mp_encode(0xfffe) == "\xde\xff\xfe" # map 0xffff ok 31 - mp_check_map(0xffff) == 0 ok 32 - mp_decode(mp_encode(0xffff)) == 0xffff ok 33 - len(mp_encode_map(0xffff) ok 34 - len(mp_decode_map(0xffff)) ok 35 - mp_sizeof_map(0xffff) ok 36 - mp_encode(0xffff) == "\xde\xff\xff" # map 0x10000 ok 37 - mp_check_map(0x10000) == 0 ok 38 - mp_decode(mp_encode(0x10000)) == 0x10000 ok 39 - len(mp_encode_map(0x10000) ok 40 - len(mp_decode_map(0x10000)) ok 41 - mp_sizeof_map(0x10000) ok 42 - mp_encode(0x10000) == "\xdf\x00\x01\x00\x00" # map 0xfffffffeU ok 43 - mp_check_map(0xfffffffeU) == 0 ok 44 - mp_decode(mp_encode(0xfffffffeU)) == 0xfffffffeU ok 45 - len(mp_encode_map(0xfffffffeU) ok 46 - len(mp_decode_map(0xfffffffeU)) ok 47 - mp_sizeof_map(0xfffffffeU) ok 48 - mp_encode(0xfffffffeU) == "\xdf\xff\xff\xff\xfe" # map 0xffffffffU ok 49 - mp_check_map(0xffffffffU) == 0 ok 50 - mp_decode(mp_encode(0xffffffffU)) == 0xffffffffU ok 51 - len(mp_encode_map(0xffffffffU) ok 52 - len(mp_decode_map(0xffffffffU)) ok 53 - mp_sizeof_map(0xffffffffU) ok 54 - mp_encode(0xffffffffU) == "\xdf\xff\xff\xff\xff" # *** test_maps: done *** ok 12 - subtests 1..52 # *** test_next_on_arrays *** # next/check on array(0) ok 1 - mp_check(array 0)) ok 2 - len(array 0) == 1 ok 3 - len(mp_check(array 0)) == 1 ok 4 - len(mp_next(array 0)) == 1 # next/check on array(1) ok 5 - mp_check(array 1)) ok 6 - len(array 1) == 2 ok 7 - len(mp_check(array 1)) == 2 ok 8 - len(mp_next(array 1)) == 2 # next/check on array(15) ok 9 - mp_check(array 15)) ok 10 - len(array 15) == 16 ok 11 - len(mp_check(array 15)) == 16 ok 12 - len(mp_next(array 15)) == 16 # next/check on array(16) ok 13 - mp_check(array 16)) ok 14 - len(array 16) == 19 ok 15 - len(mp_check(array 16)) == 19 ok 16 - len(mp_next(array 16)) == 19 # next/check on array(17) ok 17 - mp_check(array 17)) ok 18 - len(array 17) == 20 ok 19 - len(mp_check(array 17)) == 20 ok 20 - len(mp_next(array 17)) == 20 # next/check on array(254) ok 21 - mp_check(array 254)) ok 22 - len(array 254) == 257 ok 23 - len(mp_check(array 254)) == 257 ok 24 - len(mp_next(array 254)) == 257 # next/check on array(255) ok 25 - mp_check(array 255)) ok 26 - len(array 255) == 258 ok 27 - len(mp_check(array 255)) == 258 ok 28 - len(mp_next(array 255)) == 258 # next/check on array(256) ok 29 - mp_check(array 256)) ok 30 - len(array 256) == 259 ok 31 - len(mp_check(array 256)) == 259 ok 32 - len(mp_next(array 256)) == 259 # next/check on array(257) ok 33 - mp_check(array 257)) ok 34 - len(array 257) == 260 ok 35 - len(mp_check(array 257)) == 260 ok 36 - len(mp_next(array 257)) == 260 # next/check on array(65534) ok 37 - mp_check(array 65534)) ok 38 - len(array 65534) == 65537 ok 39 - len(mp_check(array 65534)) == 65537 ok 40 - len(mp_next(array 65534)) == 65537 # next/check on array(65535) ok 41 - mp_check(array 65535)) ok 42 - len(array 65535) == 65538 ok 43 - len(mp_check(array 65535)) == 65538 ok 44 - len(mp_next(array 65535)) == 65538 # next/check on array(65536) ok 45 - mp_check(array 65536)) ok 46 - len(array 65536) == 65541 ok 47 - len(mp_check(array 65536)) == 65541 ok 48 - len(mp_next(array 65536)) == 65541 # next/check on array(65537) ok 49 - mp_check(array 65537)) ok 50 - len(array 65537) == 65542 ok 51 - len(mp_check(array 65537)) == 65542 ok 52 - len(mp_next(array 65537)) == 65542 # *** test_next_on_arrays: done *** ok 13 - subtests 1..52 # *** test_next_on_maps *** # next/check on map(0) ok 1 - mp_check(map 0)) ok 2 - len(map 0) == 1 ok 3 - len(mp_check(map 0)) == 1 ok 4 - len(mp_next(map 0)) == 1 # next/check on map(1) ok 5 - mp_check(map 1)) ok 6 - len(map 1) == 3 ok 7 - len(mp_check(map 1)) == 3 ok 8 - len(mp_next(map 1)) == 3 # next/check on map(15) ok 9 - mp_check(map 15)) ok 10 - len(map 15) == 31 ok 11 - len(mp_check(map 15)) == 31 ok 12 - len(mp_next(map 15)) == 31 # next/check on map(16) ok 13 - mp_check(map 16)) ok 14 - len(map 16) == 35 ok 15 - len(mp_check(map 16)) == 35 ok 16 - len(mp_next(map 16)) == 35 # next/check on map(17) ok 17 - mp_check(map 17)) ok 18 - len(map 17) == 37 ok 19 - len(mp_check(map 17)) == 37 ok 20 - len(mp_next(map 17)) == 37 # next/check on map(254) ok 21 - mp_check(map 254)) ok 22 - len(map 254) == 511 ok 23 - len(mp_check(map 254)) == 511 ok 24 - len(mp_next(map 254)) == 511 # next/check on map(255) ok 25 - mp_check(map 255)) ok 26 - len(map 255) == 513 ok 27 - len(mp_check(map 255)) == 513 ok 28 - len(mp_next(map 255)) == 513 # next/check on map(256) ok 29 - mp_check(map 256)) ok 30 - len(map 256) == 515 ok 31 - len(mp_check(map 256)) == 515 ok 32 - len(mp_next(map 256)) == 515 # next/check on map(257) ok 33 - mp_check(map 257)) ok 34 - len(map 257) == 517 ok 35 - len(mp_check(map 257)) == 517 ok 36 - len(mp_next(map 257)) == 517 # next/check on map(65534) ok 37 - mp_check(map 65534)) ok 38 - len(map 65534) == 131071 ok 39 - len(mp_check(map 65534)) == 131071 ok 40 - len(mp_next(map 65534)) == 131071 # next/check on map(65535) ok 41 - mp_check(map 65535)) ok 42 - len(map 65535) == 131073 ok 43 - len(mp_check(map 65535)) == 131073 ok 44 - len(mp_next(map 65535)) == 131073 # next/check on map(65536) ok 45 - mp_check(map 65536)) ok 46 - len(map 65536) == 131077 ok 47 - len(mp_check(map 65536)) == 131077 ok 48 - len(mp_next(map 65536)) == 131077 # next/check on map(65537) ok 49 - mp_check(map 65537)) ok 50 - len(map 65537) == 131079 ok 51 - len(mp_check(map 65537)) == 131079 ok 52 - len(mp_next(map 65537)) == 131079 # *** test_next_on_maps: done *** ok 14 - subtests 1..227 # *** test_compare_uints *** ok 1 - mp_compare_uint(0, 0) == 0 ok 2 - mp_compare_uint(0, 0) == 0 ok 3 - mp_compare_uint(0, 0) == 0 ok 4 - mp_compare_uint(0, 1) < 0 ok 5 - mp_compare_uint(0, 126) < 0 ok 6 - mp_compare_uint(0, 127) < 0 ok 7 - mp_compare_uint(0, 128) < 0 ok 8 - mp_compare_uint(0, 254) < 0 ok 9 - mp_compare_uint(0, 255) < 0 ok 10 - mp_compare_uint(0, 65534) < 0 ok 11 - mp_compare_uint(0, 65535) < 0 ok 12 - mp_compare_uint(0, 65536) < 0 ok 13 - mp_compare_uint(0, 4294967294) < 0 ok 14 - mp_compare_uint(0, 4294967295) < 0 ok 15 - mp_compare_uint(0, 4294967296) < 0 ok 16 - mp_compare_uint(0, 18446744073709551614) < 0 ok 17 - mp_compare_uint(0, 18446744073709551615) < 0 ok 18 - mp_compare_uint(1, 0) > 0 ok 19 - mp_compare_uint(1, 1) == 0 ok 20 - mp_compare_uint(1, 126) < 0 ok 21 - mp_compare_uint(1, 127) < 0 ok 22 - mp_compare_uint(1, 128) < 0 ok 23 - mp_compare_uint(1, 254) < 0 ok 24 - mp_compare_uint(1, 255) < 0 ok 25 - mp_compare_uint(1, 65534) < 0 ok 26 - mp_compare_uint(1, 65535) < 0 ok 27 - mp_compare_uint(1, 65536) < 0 ok 28 - mp_compare_uint(1, 4294967294) < 0 ok 29 - mp_compare_uint(1, 4294967295) < 0 ok 30 - mp_compare_uint(1, 4294967296) < 0 ok 31 - mp_compare_uint(1, 18446744073709551614) < 0 ok 32 - mp_compare_uint(1, 18446744073709551615) < 0 ok 33 - mp_compare_uint(126, 0) > 0 ok 34 - mp_compare_uint(126, 1) > 0 ok 35 - mp_compare_uint(126, 126) == 0 ok 36 - mp_compare_uint(126, 127) < 0 ok 37 - mp_compare_uint(126, 128) < 0 ok 38 - mp_compare_uint(126, 254) < 0 ok 39 - mp_compare_uint(126, 255) < 0 ok 40 - mp_compare_uint(126, 65534) < 0 ok 41 - mp_compare_uint(126, 65535) < 0 ok 42 - mp_compare_uint(126, 65536) < 0 ok 43 - mp_compare_uint(126, 4294967294) < 0 ok 44 - mp_compare_uint(126, 4294967295) < 0 ok 45 - mp_compare_uint(126, 4294967296) < 0 ok 46 - mp_compare_uint(126, 18446744073709551614) < 0 ok 47 - mp_compare_uint(126, 18446744073709551615) < 0 ok 48 - mp_compare_uint(127, 0) > 0 ok 49 - mp_compare_uint(127, 1) > 0 ok 50 - mp_compare_uint(127, 126) > 0 ok 51 - mp_compare_uint(127, 127) == 0 ok 52 - mp_compare_uint(127, 128) < 0 ok 53 - mp_compare_uint(127, 254) < 0 ok 54 - mp_compare_uint(127, 255) < 0 ok 55 - mp_compare_uint(127, 65534) < 0 ok 56 - mp_compare_uint(127, 65535) < 0 ok 57 - mp_compare_uint(127, 65536) < 0 ok 58 - mp_compare_uint(127, 4294967294) < 0 ok 59 - mp_compare_uint(127, 4294967295) < 0 ok 60 - mp_compare_uint(127, 4294967296) < 0 ok 61 - mp_compare_uint(127, 18446744073709551614) < 0 ok 62 - mp_compare_uint(127, 18446744073709551615) < 0 ok 63 - mp_compare_uint(128, 0) > 0 ok 64 - mp_compare_uint(128, 1) > 0 ok 65 - mp_compare_uint(128, 126) > 0 ok 66 - mp_compare_uint(128, 127) > 0 ok 67 - mp_compare_uint(128, 128) == 0 ok 68 - mp_compare_uint(128, 254) < 0 ok 69 - mp_compare_uint(128, 255) < 0 ok 70 - mp_compare_uint(128, 65534) < 0 ok 71 - mp_compare_uint(128, 65535) < 0 ok 72 - mp_compare_uint(128, 65536) < 0 ok 73 - mp_compare_uint(128, 4294967294) < 0 ok 74 - mp_compare_uint(128, 4294967295) < 0 ok 75 - mp_compare_uint(128, 4294967296) < 0 ok 76 - mp_compare_uint(128, 18446744073709551614) < 0 ok 77 - mp_compare_uint(128, 18446744073709551615) < 0 ok 78 - mp_compare_uint(254, 0) > 0 ok 79 - mp_compare_uint(254, 1) > 0 ok 80 - mp_compare_uint(254, 126) > 0 ok 81 - mp_compare_uint(254, 127) > 0 ok 82 - mp_compare_uint(254, 128) > 0 ok 83 - mp_compare_uint(254, 254) == 0 ok 84 - mp_compare_uint(254, 255) < 0 ok 85 - mp_compare_uint(254, 65534) < 0 ok 86 - mp_compare_uint(254, 65535) < 0 ok 87 - mp_compare_uint(254, 65536) < 0 ok 88 - mp_compare_uint(254, 4294967294) < 0 ok 89 - mp_compare_uint(254, 4294967295) < 0 ok 90 - mp_compare_uint(254, 4294967296) < 0 ok 91 - mp_compare_uint(254, 18446744073709551614) < 0 ok 92 - mp_compare_uint(254, 18446744073709551615) < 0 ok 93 - mp_compare_uint(255, 0) > 0 ok 94 - mp_compare_uint(255, 1) > 0 ok 95 - mp_compare_uint(255, 126) > 0 ok 96 - mp_compare_uint(255, 127) > 0 ok 97 - mp_compare_uint(255, 128) > 0 ok 98 - mp_compare_uint(255, 254) > 0 ok 99 - mp_compare_uint(255, 255) == 0 ok 100 - mp_compare_uint(255, 65534) < 0 ok 101 - mp_compare_uint(255, 65535) < 0 ok 102 - mp_compare_uint(255, 65536) < 0 ok 103 - mp_compare_uint(255, 4294967294) < 0 ok 104 - mp_compare_uint(255, 4294967295) < 0 ok 105 - mp_compare_uint(255, 4294967296) < 0 ok 106 - mp_compare_uint(255, 18446744073709551614) < 0 ok 107 - mp_compare_uint(255, 18446744073709551615) < 0 ok 108 - mp_compare_uint(65534, 0) > 0 ok 109 - mp_compare_uint(65534, 1) > 0 ok 110 - mp_compare_uint(65534, 126) > 0 ok 111 - mp_compare_uint(65534, 127) > 0 ok 112 - mp_compare_uint(65534, 128) > 0 ok 113 - mp_compare_uint(65534, 254) > 0 ok 114 - mp_compare_uint(65534, 255) > 0 ok 115 - mp_compare_uint(65534, 65534) == 0 ok 116 - mp_compare_uint(65534, 65535) < 0 ok 117 - mp_compare_uint(65534, 65536) < 0 ok 118 - mp_compare_uint(65534, 4294967294) < 0 ok 119 - mp_compare_uint(65534, 4294967295) < 0 ok 120 - mp_compare_uint(65534, 4294967296) < 0 ok 121 - mp_compare_uint(65534, 18446744073709551614) < 0 ok 122 - mp_compare_uint(65534, 18446744073709551615) < 0 ok 123 - mp_compare_uint(65535, 0) > 0 ok 124 - mp_compare_uint(65535, 1) > 0 ok 125 - mp_compare_uint(65535, 126) > 0 ok 126 - mp_compare_uint(65535, 127) > 0 ok 127 - mp_compare_uint(65535, 128) > 0 ok 128 - mp_compare_uint(65535, 254) > 0 ok 129 - mp_compare_uint(65535, 255) > 0 ok 130 - mp_compare_uint(65535, 65534) > 0 ok 131 - mp_compare_uint(65535, 65535) == 0 ok 132 - mp_compare_uint(65535, 65536) < 0 ok 133 - mp_compare_uint(65535, 4294967294) < 0 ok 134 - mp_compare_uint(65535, 4294967295) < 0 ok 135 - mp_compare_uint(65535, 4294967296) < 0 ok 136 - mp_compare_uint(65535, 18446744073709551614) < 0 ok 137 - mp_compare_uint(65535, 18446744073709551615) < 0 ok 138 - mp_compare_uint(65536, 0) > 0 ok 139 - mp_compare_uint(65536, 1) > 0 ok 140 - mp_compare_uint(65536, 126) > 0 ok 141 - mp_compare_uint(65536, 127) > 0 ok 142 - mp_compare_uint(65536, 128) > 0 ok 143 - mp_compare_uint(65536, 254) > 0 ok 144 - mp_compare_uint(65536, 255) > 0 ok 145 - mp_compare_uint(65536, 65534) > 0 ok 146 - mp_compare_uint(65536, 65535) > 0 ok 147 - mp_compare_uint(65536, 65536) == 0 ok 148 - mp_compare_uint(65536, 4294967294) < 0 ok 149 - mp_compare_uint(65536, 4294967295) < 0 ok 150 - mp_compare_uint(65536, 4294967296) < 0 ok 151 - mp_compare_uint(65536, 18446744073709551614) < 0 ok 152 - mp_compare_uint(65536, 18446744073709551615) < 0 ok 153 - mp_compare_uint(4294967294, 0) > 0 ok 154 - mp_compare_uint(4294967294, 1) > 0 ok 155 - mp_compare_uint(4294967294, 126) > 0 ok 156 - mp_compare_uint(4294967294, 127) > 0 ok 157 - mp_compare_uint(4294967294, 128) > 0 ok 158 - mp_compare_uint(4294967294, 254) > 0 ok 159 - mp_compare_uint(4294967294, 255) > 0 ok 160 - mp_compare_uint(4294967294, 65534) > 0 ok 161 - mp_compare_uint(4294967294, 65535) > 0 ok 162 - mp_compare_uint(4294967294, 65536) > 0 ok 163 - mp_compare_uint(4294967294, 4294967294) == 0 ok 164 - mp_compare_uint(4294967294, 4294967295) < 0 ok 165 - mp_compare_uint(4294967294, 4294967296) < 0 ok 166 - mp_compare_uint(4294967294, 18446744073709551614) < 0 ok 167 - mp_compare_uint(4294967294, 18446744073709551615) < 0 ok 168 - mp_compare_uint(4294967295, 0) > 0 ok 169 - mp_compare_uint(4294967295, 1) > 0 ok 170 - mp_compare_uint(4294967295, 126) > 0 ok 171 - mp_compare_uint(4294967295, 127) > 0 ok 172 - mp_compare_uint(4294967295, 128) > 0 ok 173 - mp_compare_uint(4294967295, 254) > 0 ok 174 - mp_compare_uint(4294967295, 255) > 0 ok 175 - mp_compare_uint(4294967295, 65534) > 0 ok 176 - mp_compare_uint(4294967295, 65535) > 0 ok 177 - mp_compare_uint(4294967295, 65536) > 0 ok 178 - mp_compare_uint(4294967295, 4294967294) > 0 ok 179 - mp_compare_uint(4294967295, 4294967295) == 0 ok 180 - mp_compare_uint(4294967295, 4294967296) < 0 ok 181 - mp_compare_uint(4294967295, 18446744073709551614) < 0 ok 182 - mp_compare_uint(4294967295, 18446744073709551615) < 0 ok 183 - mp_compare_uint(4294967296, 0) > 0 ok 184 - mp_compare_uint(4294967296, 1) > 0 ok 185 - mp_compare_uint(4294967296, 126) > 0 ok 186 - mp_compare_uint(4294967296, 127) > 0 ok 187 - mp_compare_uint(4294967296, 128) > 0 ok 188 - mp_compare_uint(4294967296, 254) > 0 ok 189 - mp_compare_uint(4294967296, 255) > 0 ok 190 - mp_compare_uint(4294967296, 65534) > 0 ok 191 - mp_compare_uint(4294967296, 65535) > 0 ok 192 - mp_compare_uint(4294967296, 65536) > 0 ok 193 - mp_compare_uint(4294967296, 4294967294) > 0 ok 194 - mp_compare_uint(4294967296, 4294967295) > 0 ok 195 - mp_compare_uint(4294967296, 4294967296) == 0 ok 196 - mp_compare_uint(4294967296, 18446744073709551614) < 0 ok 197 - mp_compare_uint(4294967296, 18446744073709551615) < 0 ok 198 - mp_compare_uint(18446744073709551614, 0) > 0 ok 199 - mp_compare_uint(18446744073709551614, 1) > 0 ok 200 - mp_compare_uint(18446744073709551614, 126) > 0 ok 201 - mp_compare_uint(18446744073709551614, 127) > 0 ok 202 - mp_compare_uint(18446744073709551614, 128) > 0 ok 203 - mp_compare_uint(18446744073709551614, 254) > 0 ok 204 - mp_compare_uint(18446744073709551614, 255) > 0 ok 205 - mp_compare_uint(18446744073709551614, 65534) > 0 ok 206 - mp_compare_uint(18446744073709551614, 65535) > 0 ok 207 - mp_compare_uint(18446744073709551614, 65536) > 0 ok 208 - mp_compare_uint(18446744073709551614, 4294967294) > 0 ok 209 - mp_compare_uint(18446744073709551614, 4294967295) > 0 ok 210 - mp_compare_uint(18446744073709551614, 4294967296) > 0 ok 211 - mp_compare_uint(18446744073709551614, 18446744073709551614) == 0 ok 212 - mp_compare_uint(18446744073709551614, 18446744073709551615) < 0 ok 213 - mp_compare_uint(18446744073709551615, 0) > 0 ok 214 - mp_compare_uint(18446744073709551615, 1) > 0 ok 215 - mp_compare_uint(18446744073709551615, 126) > 0 ok 216 - mp_compare_uint(18446744073709551615, 127) > 0 ok 217 - mp_compare_uint(18446744073709551615, 128) > 0 ok 218 - mp_compare_uint(18446744073709551615, 254) > 0 ok 219 - mp_compare_uint(18446744073709551615, 255) > 0 ok 220 - mp_compare_uint(18446744073709551615, 65534) > 0 ok 221 - mp_compare_uint(18446744073709551615, 65535) > 0 ok 222 - mp_compare_uint(18446744073709551615, 65536) > 0 ok 223 - mp_compare_uint(18446744073709551615, 4294967294) > 0 ok 224 - mp_compare_uint(18446744073709551615, 4294967295) > 0 ok 225 - mp_compare_uint(18446744073709551615, 4294967296) > 0 ok 226 - mp_compare_uint(18446744073709551615, 18446744073709551614) > 0 ok 227 - mp_compare_uint(18446744073709551615, 18446744073709551615) == 0 # *** test_compare_uints: done *** ok 15 - subtests 1..282 # *** test_format *** ok 1 - Test type on step 0 ok 2 - Test value on step 0 ok 3 - Test type on step 1 ok 4 - Test value on step 1 ok 5 - Test type on step 2 ok 6 - Test value on step 2 ok 7 - Test type on step 3 ok 8 - Test value on step 3 ok 9 - Test type on step 4 ok 10 - Test value on step 4 ok 11 - Test type on step 5 ok 12 - Test value on step 5 ok 13 - Test type on step 6 ok 14 - Test value on step 6 ok 15 - Test type on step 7 ok 16 - Test value on step 7 ok 17 - Test type on step 8 ok 18 - Test value on step 8 ok 19 - Test type on step 9 ok 20 - Test value on step 9 ok 21 - Test type on step 10 ok 22 - Test value on step 10 ok 23 - Test type on step 11 ok 24 - Test value on step 11 ok 25 - Test type on step 12 ok 26 - Test value on step 12 ok 27 - Test type on step 13 ok 28 - Test value on step 13 ok 29 - Test type on step 14 ok 30 - Test value on step 14 ok 31 - Test type on step 0 ok 32 - Test value on step 0 ok 33 - Test type on step 1 ok 34 - Test value on step 1 ok 35 - Test type on step 2 ok 36 - Test value on step 2 ok 37 - Test type on step 3 ok 38 - Test value on step 3 ok 39 - Test type on step 4 ok 40 - Test value on step 4 ok 41 - Test type on step 5 ok 42 - Test value on step 5 ok 43 - Test type on step 6 ok 44 - Test value on step 6 ok 45 - Test type on step 7 ok 46 - Test value on step 7 ok 47 - Test type on step 8 ok 48 - Test value on step 8 ok 49 - Test type on step 9 ok 50 - Test value on step 9 ok 51 - Test type on step 10 ok 52 - Test value on step 10 ok 53 - Test type on step 11 ok 54 - Test value on step 11 ok 55 - Test type on step 12 ok 56 - Test value on step 12 ok 57 - Test type on step 13 ok 58 - Test value on step 13 ok 59 - Test type on step 14 ok 60 - Test value on step 14 ok 61 - check ok 62 - type ok 63 - decode ok 64 - check ok 65 - type ok 66 - check ok 67 - type ok 68 - decode ok 69 - check ok 70 - type ok 71 - decode ok 72 - check ok 73 - type ok 74 - decode ok 75 - check ok 76 - type ok 77 - decode ok 78 - check ok 79 - type ok 80 - decode ok 81 - check ok 82 - type ok 83 - decode ok 84 - check ok 85 - type ok 86 - decode ok 87 - check ok 88 - type ok 89 - decode ok 90 - check ok 91 - type ok 92 - decode ok 93 - check ok 94 - type ok 95 - decode ok 96 - check ok 97 - type ok 98 - decode ok 99 - compare ok 100 - check ok 101 - type ok 102 - decode ok 103 - check ok 104 - type ok 105 - decode ok 106 - compare ok 107 - check ok 108 - type ok 109 - decode ok 110 - check ok 111 - type ok 112 - decode ok 113 - check ok 114 - type ok 115 - check ok 116 - compare ok 117 - check ok 118 - type ok 119 - decode ok 120 - check ok 121 - type ok 122 - decode ok 123 - check ok 124 - type ok 125 - decode ok 126 - compare ok 127 - check ok 128 - type ok 129 - decode ok 130 - check ok 131 - type ok 132 - decode ok 133 - check ok 134 - type ok 135 - decode ok 136 - check ok 137 - type ok 138 - decode ok 139 - nothing more ok 140 - no magic detected ok 141 - return value on step 0 ok 142 - buffer overflow on step 0 ok 143 - return value on step 1 ok 144 - buffer overflow on step 1 ok 145 - return value on step 2 ok 146 - buffer overflow on step 2 ok 147 - return value on step 3 ok 148 - buffer overflow on step 3 ok 149 - return value on step 4 ok 150 - buffer overflow on step 4 ok 151 - return value on step 5 ok 152 - buffer overflow on step 5 ok 153 - return value on step 6 ok 154 - buffer overflow on step 6 ok 155 - return value on step 7 ok 156 - buffer overflow on step 7 ok 157 - return value on step 8 ok 158 - buffer overflow on step 8 ok 159 - return value on step 9 ok 160 - buffer overflow on step 9 ok 161 - return value on step 10 ok 162 - buffer overflow on step 10 ok 163 - return value on step 11 ok 164 - buffer overflow on step 11 ok 165 - return value on step 12 ok 166 - buffer overflow on step 12 ok 167 - return value on step 13 ok 168 - buffer overflow on step 13 ok 169 - return value on step 14 ok 170 - buffer overflow on step 14 ok 171 - return value on step 15 ok 172 - buffer overflow on step 15 ok 173 - return value on step 16 ok 174 - buffer overflow on step 16 ok 175 - return value on step 17 ok 176 - buffer overflow on step 17 ok 177 - return value on step 18 ok 178 - buffer overflow on step 18 ok 179 - return value on step 19 ok 180 - buffer overflow on step 19 ok 181 - return value on step 20 ok 182 - buffer overflow on step 20 ok 183 - return value on step 21 ok 184 - buffer overflow on step 21 ok 185 - return value on step 22 ok 186 - buffer overflow on step 22 ok 187 - return value on step 23 ok 188 - buffer overflow on step 23 ok 189 - return value on step 24 ok 190 - buffer overflow on step 24 ok 191 - return value on step 25 ok 192 - buffer overflow on step 25 ok 193 - return value on step 26 ok 194 - buffer overflow on step 26 ok 195 - return value on step 27 ok 196 - buffer overflow on step 27 ok 197 - return value on step 28 ok 198 - buffer overflow on step 28 ok 199 - return value on step 29 ok 200 - buffer overflow on step 29 ok 201 - return value on step 30 ok 202 - buffer overflow on step 30 ok 203 - return value on step 31 ok 204 - buffer overflow on step 31 ok 205 - return value on step 32 ok 206 - buffer overflow on step 32 ok 207 - return value on step 33 ok 208 - buffer overflow on step 33 ok 209 - return value on step 34 ok 210 - buffer overflow on step 34 ok 211 - return value on step 35 ok 212 - buffer overflow on step 35 ok 213 - return value on step 36 ok 214 - buffer overflow on step 36 ok 215 - return value on step 37 ok 216 - buffer overflow on step 37 ok 217 - return value on step 38 ok 218 - buffer overflow on step 38 ok 219 - return value on step 39 ok 220 - buffer overflow on step 39 ok 221 - return value on step 40 ok 222 - buffer overflow on step 40 ok 223 - return value on step 41 ok 224 - buffer overflow on step 41 ok 225 - return value on step 42 ok 226 - buffer overflow on step 42 ok 227 - return value on step 43 ok 228 - buffer overflow on step 43 ok 229 - return value on step 44 ok 230 - buffer overflow on step 44 ok 231 - return value on step 45 ok 232 - buffer overflow on step 45 ok 233 - return value on step 46 ok 234 - buffer overflow on step 46 ok 235 - return value on step 47 ok 236 - buffer overflow on step 47 ok 237 - return value on step 48 ok 238 - buffer overflow on step 48 ok 239 - return value on step 49 ok 240 - buffer overflow on step 49 ok 241 - return value on step 50 ok 242 - buffer overflow on step 50 ok 243 - return value on step 51 ok 244 - buffer overflow on step 51 ok 245 - return value on step 52 ok 246 - buffer overflow on step 52 ok 247 - return value on step 53 ok 248 - buffer overflow on step 53 ok 249 - return value on step 54 ok 250 - buffer overflow on step 54 ok 251 - return value on step 55 ok 252 - buffer overflow on step 55 ok 253 - return value on step 56 ok 254 - buffer overflow on step 56 ok 255 - return value on step 57 ok 256 - buffer overflow on step 57 ok 257 - return value on step 58 ok 258 - buffer overflow on step 58 ok 259 - return value on step 59 ok 260 - buffer overflow on step 59 ok 261 - return value on step 60 ok 262 - buffer overflow on step 60 ok 263 - return value on step 61 ok 264 - buffer overflow on step 61 ok 265 - return value on step 62 ok 266 - buffer overflow on step 62 ok 267 - return value on step 63 ok 268 - buffer overflow on step 63 ok 269 - return value on step 64 ok 270 - buffer overflow on step 64 ok 271 - return value on step 65 ok 272 - buffer overflow on step 65 ok 273 - return value on step 66 ok 274 - buffer overflow on step 66 ok 275 - return value on step 67 ok 276 - buffer overflow on step 67 ok 277 - return value on step 68 ok 278 - buffer overflow on step 68 ok 279 - return value on step 69 ok 280 - buffer overflow on step 69 ok 281 - return value on step 70 ok 282 - buffer overflow on step 70 # *** test_format: done *** ok 16 - subtests 1..10 # *** test_mp_print *** ok 1 - mp_snprint return value ok 2 - mp_snprint result ok 3 - mp_snprint limit = 0 ok 4 - mp_snprint limit = 1 ok 5 - mp_snprint limit = 2 ok 6 - mp_snprint limit = expected ok 7 - mp_snprint limit = expected + 1 ok 8 - mp_fprint return value ok 9 - mp_fprint result ok 10 - mp_fprint I/O error # *** test_mp_print: done *** ok 17 - subtests 1..65 # *** test_mp_check *** ok 1 - invalid fixmap 1 ok 2 - invalid fixmap 2 ok 3 - invalid fixmap 3 ok 4 - invalid fixarray 1 ok 5 - invalid fixarray 2 ok 6 - invalid fixarray 3 ok 7 - invalid fixstr 1 ok 8 - invalid fixstr 2 ok 9 - invalid fixstr 3 ok 10 - invalid bin8 1 ok 11 - invalid bin8 2 ok 12 - invalid bin16 1 ok 13 - invalid bin16 2 ok 14 - invalid bin32 1 ok 15 - invalid bin32 2 ok 16 - invalid ext8 1 ok 17 - invalid ext8 2 ok 18 - invalid ext8 3 ok 19 - invalid ext8 4 ok 20 - invalid ext16 1 ok 21 - invalid ext16 2 ok 22 - invalid ext16 3 ok 23 - invalid ext16 4 ok 24 - invalid ext32 1 ok 25 - invalid ext32 2 ok 26 - invalid ext32 3 ok 27 - invalid ext32 4 ok 28 - invalid float32 1 ok 29 - invalid float32 2 ok 30 - invalid float64 1 ok 31 - invalid float64 2 ok 32 - invalid uint8 1 ok 33 - invalid uint16 1 ok 34 - invalid uint32 1 ok 35 - invalid uint64 1 ok 36 - invalid int8 1 ok 37 - invalid int16 1 ok 38 - invalid int32 1 ok 39 - invalid int64 1 ok 40 - invalid fixext8 1 ok 41 - invalid fixext8 2 ok 42 - invalid fixext16 1 ok 43 - invalid fixext16 2 ok 44 - invalid fixext32 1 ok 45 - invalid fixext32 2 ok 46 - invalid fixext64 1 ok 47 - invalid fixext64 2 ok 48 - invalid fixext128 1 ok 49 - invalid fixext128 2 ok 50 - invalid str8 1 ok 51 - invalid str8 2 ok 52 - invalid str16 1 ok 53 - invalid str16 2 ok 54 - invalid str32 1 ok 55 - invalid str32 2 ok 56 - invalid array16 1 ok 57 - invalid array16 2 ok 58 - invalid array32 1 ok 59 - invalid array32 2 ok 60 - invalid map16 1 ok 61 - invalid map16 2 ok 62 - invalid map16 2 ok 63 - invalid map32 1 ok 64 - invalid map32 2 ok 65 - invalid map32 3 # *** test_mp_check: done *** ok 18 - subtests 1..96 # *** test_numbers *** ok 1 - mp_read_int32(mp_encode_uint(123)) check success ok 2 - mp_read_int32(mp_encode_uint(123)) check pos advanced ok 3 - mp_read_int32(mp_encode_uint(123)) check result ok 4 - mp_read_int32(mp_encode_uint(12345)) check success ok 5 - mp_read_int32(mp_encode_uint(12345)) check pos advanced ok 6 - mp_read_int32(mp_encode_uint(12345)) check result ok 7 - mp_read_int32(mp_encode_uint(2147483647)) check success ok 8 - mp_read_int32(mp_encode_uint(2147483647)) check pos advanced ok 9 - mp_read_int32(mp_encode_uint(2147483647)) check result ok 10 - mp_read_int32(mp_encode_uint(2147483648)) check fail ok 11 - mp_read_int32(mp_encode_uint(2147483648)) check pos unchanged ok 12 - mp_read_int32(mp_encode_int(-123)) check success ok 13 - mp_read_int32(mp_encode_int(-123)) check pos advanced ok 14 - mp_read_int32(mp_encode_int(-123)) check result ok 15 - mp_read_int32(mp_encode_int(-12345)) check success ok 16 - mp_read_int32(mp_encode_int(-12345)) check pos advanced ok 17 - mp_read_int32(mp_encode_int(-12345)) check result ok 18 - mp_read_int32(mp_encode_int(-2147483648)) check success ok 19 - mp_read_int32(mp_encode_int(-2147483648)) check pos advanced ok 20 - mp_read_int32(mp_encode_int(-2147483648)) check result ok 21 - mp_read_int32(mp_encode_int(-2147483649LL)) check fail ok 22 - mp_read_int32(mp_encode_int(-2147483649LL)) check pos unchanged ok 23 - mp_read_int32(mp_encode_float(-1e2)) check fail ok 24 - mp_read_int32(mp_encode_float(-1e2)) check pos unchanged ok 25 - mp_read_int32(mp_encode_double(1.2345)) check fail ok 26 - mp_read_int32(mp_encode_double(1.2345)) check pos unchanged ok 27 - mp_read_int32(mp_encode_map(5)) check fail ok 28 - mp_read_int32(mp_encode_map(5)) check pos unchanged ok 29 - mp_read_int64(mp_encode_uint(123)) check success ok 30 - mp_read_int64(mp_encode_uint(123)) check pos advanced ok 31 - mp_read_int64(mp_encode_uint(123)) check result ok 32 - mp_read_int64(mp_encode_uint(12345)) check success ok 33 - mp_read_int64(mp_encode_uint(12345)) check pos advanced ok 34 - mp_read_int64(mp_encode_uint(12345)) check result ok 35 - mp_read_int64(mp_encode_uint(123456789)) check success ok 36 - mp_read_int64(mp_encode_uint(123456789)) check pos advanced ok 37 - mp_read_int64(mp_encode_uint(123456789)) check result ok 38 - mp_read_int64(mp_encode_uint(9223372036854775807ULL)) check success ok 39 - mp_read_int64(mp_encode_uint(9223372036854775807ULL)) check pos advanced ok 40 - mp_read_int64(mp_encode_uint(9223372036854775807ULL)) check result ok 41 - mp_read_int64(mp_encode_uint(9223372036854775808ULL)) check fail ok 42 - mp_read_int64(mp_encode_uint(9223372036854775808ULL)) check pos unchanged ok 43 - mp_read_int64(mp_encode_int(-123)) check success ok 44 - mp_read_int64(mp_encode_int(-123)) check pos advanced ok 45 - mp_read_int64(mp_encode_int(-123)) check result ok 46 - mp_read_int64(mp_encode_int(-12345)) check success ok 47 - mp_read_int64(mp_encode_int(-12345)) check pos advanced ok 48 - mp_read_int64(mp_encode_int(-12345)) check result ok 49 - mp_read_int64(mp_encode_int(-123456789)) check success ok 50 - mp_read_int64(mp_encode_int(-123456789)) check pos advanced ok 51 - mp_read_int64(mp_encode_int(-123456789)) check result ok 52 - mp_read_int64(mp_encode_int(-9223372036854775807LL)) check success ok 53 - mp_read_int64(mp_encode_int(-9223372036854775807LL)) check pos advanced ok 54 - mp_read_int64(mp_encode_int(-9223372036854775807LL)) check result ok 55 - mp_read_int64(mp_encode_float(100)) check fail ok 56 - mp_read_int64(mp_encode_float(100)) check pos unchanged ok 57 - mp_read_int64(mp_encode_double(-5.4321)) check fail ok 58 - mp_read_int64(mp_encode_double(-5.4321)) check pos unchanged ok 59 - mp_read_int64(mp_encode_array(10)) check fail ok 60 - mp_read_int64(mp_encode_array(10)) check pos unchanged ok 61 - mp_read_double(mp_encode_uint(123)) check success ok 62 - mp_read_double(mp_encode_uint(123)) check pos advanced ok 63 - mp_read_double(mp_encode_uint(123)) check result ok 64 - mp_read_double(mp_encode_uint(12345)) check success ok 65 - mp_read_double(mp_encode_uint(12345)) check pos advanced ok 66 - mp_read_double(mp_encode_uint(12345)) check result ok 67 - mp_read_double(mp_encode_uint(123456789)) check success ok 68 - mp_read_double(mp_encode_uint(123456789)) check pos advanced ok 69 - mp_read_double(mp_encode_uint(123456789)) check result ok 70 - mp_read_double(mp_encode_uint(1234567890000ULL)) check success ok 71 - mp_read_double(mp_encode_uint(1234567890000ULL)) check pos advanced ok 72 - mp_read_double(mp_encode_uint(1234567890000ULL)) check result ok 73 - mp_read_double(mp_encode_uint(123456789123456789ULL)) check fail ok 74 - mp_read_double(mp_encode_uint(123456789123456789ULL)) check pos unchanged ok 75 - mp_read_double(mp_encode_int(-123)) check success ok 76 - mp_read_double(mp_encode_int(-123)) check pos advanced ok 77 - mp_read_double(mp_encode_int(-123)) check result ok 78 - mp_read_double(mp_encode_int(-12345)) check success ok 79 - mp_read_double(mp_encode_int(-12345)) check pos advanced ok 80 - mp_read_double(mp_encode_int(-12345)) check result ok 81 - mp_read_double(mp_encode_int(-123456789)) check success ok 82 - mp_read_double(mp_encode_int(-123456789)) check pos advanced ok 83 - mp_read_double(mp_encode_int(-123456789)) check result ok 84 - mp_read_double(mp_encode_int(-1234567890000LL)) check success ok 85 - mp_read_double(mp_encode_int(-1234567890000LL)) check pos advanced ok 86 - mp_read_double(mp_encode_int(-1234567890000LL)) check result ok 87 - mp_read_double(mp_encode_int(-123456789123456789LL)) check fail ok 88 - mp_read_double(mp_encode_int(-123456789123456789LL)) check pos unchanged ok 89 - mp_read_double(mp_encode_float(6.565e6)) check success ok 90 - mp_read_double(mp_encode_float(6.565e6)) check pos advanced ok 91 - mp_read_double(mp_encode_float(6.565e6)) check result ok 92 - mp_read_double(mp_encode_double(-5.555)) check success ok 93 - mp_read_double(mp_encode_double(-5.555)) check pos advanced ok 94 - mp_read_double(mp_encode_double(-5.555)) check result ok 95 - mp_read_double(mp_encode_strl(100)) check fail ok 96 - mp_read_double(mp_encode_strl(100)) check pos unchanged # *** test_numbers: done *** ok 19 - subtests 1..4 # *** test_overflow *** ok 1 - mp_check array overflow ok 2 - mp_check map overflow ok 3 - mp_check str overflow ok 4 - mp_check bin overflow # *** test_overflow: done *** ok 20 - subtests tarantool_1.9.1.26.g63eb81e3c/test/unit/stailq.c0000664000000000000000000000540113306560010017577 0ustar rootroot#include "salad/stailq.h" #include #include #include "unit.h" #define PLAN 68 #define ITEMS 7 struct test { char ch; int no; struct stailq_entry next; }; static struct test items[ITEMS]; static struct stailq head, tail; int main(void) { int i; struct test *it; struct stailq_entry *entry; stailq_create(&head); plan(PLAN); ok(stailq_empty(&head), "list is empty"); stailq_reverse(&head); ok(stailq_empty(&head), "list is empty after reverse"); for (i = 0; i < ITEMS; i++) { items[i].no = i; stailq_add_tail(&head, &(items[i].next)); } is(stailq_first(&head), &items[0].next, "first item"); is(stailq_last(&head), &items[6].next, "last item"); i = 0; stailq_foreach(entry, &head) { is(entry, &items[i].next, "element (foreach) %d", i); i++; } isnt(stailq_first(&head), &items[ITEMS - 1].next, "first item"); ok(!stailq_empty(&head), "head is not empty"); is(stailq_first_entry(&head, struct test, next), &items[0], "first entry"); for (i = 0; i < ITEMS; i++) is(stailq_shift(&head), &items[i].next, "shift item %d", i); ok(stailq_empty(&head), "list is empty after shift"); stailq_create(&head); ok(stailq_empty(&head), "next is empty"); for (i = 0; i < ITEMS; i++) { items[i].no = i; stailq_add_entry(&head, &items[i], next); } stailq_foreach_entry(it, &head, next) { i--; is(it, items + i, "element (foreach_entry) %d", i); } stailq_create(&head); for (i = 0; i < ITEMS; i++) { items[i].no = ITEMS - i; stailq_add_tail_entry(&head, &items[i], next); } i = 0; stailq_foreach_entry(it, &head, next) { is(it, items + i, "element (foreach_entry) %d", i); i++; } stailq_create(&head); for (i = 0; i < ITEMS; i++) { items[i].no = ITEMS - i; stailq_add_tail_entry(&head, &items[i], next); } stailq_cut_tail(&head, NULL, &tail); ok(stailq_empty(&head), "head is empty after cut at first"); i = 0; stailq_foreach_entry(it, &tail, next) { is(it, items + i, "tail element after cut at first %d", i); i++; } stailq_concat(&head, &tail); stailq_cut_tail(&head, stailq_last(&head), &tail); ok(stailq_empty(&tail), "tail is empty after cut at last"); i = 0; stailq_foreach_entry(it, &head, next) { is(it, items + i, "head element after cut at last %d", i); i++; } stailq_concat(&head, &tail); stailq_cut_tail(&head, &items[3].next, &tail); i = 0; stailq_foreach_entry(it, &head, next) { is(it, items + i, "head element after cut at middle %d", i); i++; } stailq_foreach_entry(it, &tail, next) { is(it, items + i, "tail element after cut at middle %d", i); i++; } stailq_concat(&head, &tail); ok(stailq_empty(&tail), "tail is empty after concat"); i = 0; stailq_foreach_entry(it, &head, next) { is(it, items + i, "head element after concat %d", i); i++; } return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/heap_iterator.c0000664000000000000000000001105513306560010021132 0ustar rootroot#include #include #include #include #include #include #include "unit.h" #define HEAP_FORWARD_DECLARATION #include "salad/heap.h" #undef HEAP_FORWARD_DECLARATION struct test_type { uint32_t val1; uint32_t val2; char c; struct heap_node node; }; int test_type_less(const heap_t *heap, const struct heap_node *a, const struct heap_node *b) { const struct test_type *left = (struct test_type *)((char *)a - offsetof(struct test_type, node)); const struct test_type *right = (struct test_type *)((char *)b - offsetof(struct test_type, node)); return left->val1 < right->val1; } #define HEAP_NAME test_heap #define HEAP_LESS(h, a, b) test_type_less(h, a, b) #include "salad/heap.h" void free_all_nodes(heap_t *p_heap) { struct test_type *root_value; for (heap_off_t i = 0; i < p_heap->size; ++i) { root_value = (struct test_type *) ((char *)p_heap->harr[i] - offsetof(struct test_type, node)); free(root_value); } } static void test_iterator_create() { header(); struct test_type *value, *root_value; heap_t heap; test_heap_create(&heap); value = (struct test_type *)malloc(sizeof(struct test_type)); value->val1 = 0; test_heap_insert(&heap, &value->node); struct heap_iterator it; test_heap_iterator_init(&heap, &it); if (it.curr_pos != 0) fail("incorrect position after create", "it.curr_pos != 0"); free_all_nodes(&heap); footer(); } static void test_iterator_empty() { header(); struct heap_node *nd; heap_t heap; test_heap_create(&heap); struct heap_iterator it; test_heap_iterator_init(&heap, &it); nd = test_heap_iterator_next(&it); if (nd != NULL) fail("incorrect node", "nd != NULL"); free_all_nodes(&heap); footer(); } static void test_iterator_small() { header(); struct test_type *value, *root_value; struct heap_node *test_node; heap_t heap; test_heap_create(&heap); for (uint32_t i = 4; i > 0; --i) { value = (struct test_type *)malloc(sizeof(struct test_type)); value->val1 = i; test_heap_insert(&heap, &value->node); } struct heap_iterator it; bool used_key[5]; memset((void *)used_key, 0, sizeof(used_key)); test_heap_iterator_init(&heap, &it); test_node = NULL; for (uint32_t i = 0; i < 4; ++i) { test_node = test_heap_iterator_next(&it); if (test_node == NULL) fail("NULL returned from iterator", "test_node == NULL"); value = (struct test_type *)((char *)test_node - offsetof(struct test_type, node)); uint32_t val = value->val1; if (val < 1 || val > 5) fail("from iterator returned incorrect value", "val < 1 || val > 5"); if (used_key[val]) fail("from iterator some value returned twice", "used[val]"); used_key[val] = 1; } bool f = true; for (uint32_t i = 1; i < 5; ++i) f = used_key[i] && f; if (!f) fail("some node was skipped", "!f"); test_node = test_heap_iterator_next(&it); if (test_node != NULL) fail("after all iterator returns not NULL", "test_node != NULL"); free_all_nodes(&heap); footer(); } static void test_iterator_large() { header(); uint32_t const TEST_CASE_SIZE = 1000; struct test_type *value, *root_value; struct heap_node *test_node; heap_t heap; test_heap_create(&heap); for (uint32_t i = TEST_CASE_SIZE; i > 0; --i) { value = (struct test_type *)malloc(sizeof(struct test_type)); value->val1 = i; test_heap_insert(&heap, &value->node); } struct heap_iterator it; bool used_key[TEST_CASE_SIZE + 1]; memset((void *)used_key, 0, sizeof(used_key)); test_heap_iterator_init(&heap, &it); test_node = NULL; for (uint32_t i = 0; i < TEST_CASE_SIZE; ++i) { test_node = test_heap_iterator_next(&it); if (test_node == NULL) fail("NULL returned from iterator", "test_node == NULL"); value = (struct test_type *)((char *)test_node - offsetof(struct test_type, node)); uint32_t val = value->val1; if (val == 0 || val > TEST_CASE_SIZE) fail("from iterator returned incorrect value", "val < 0 || val > TEST_CASE_SIZE"); if (used_key[val]) fail("from iterator some value returned twice", "used[val]"); used_key[val] = 1; } bool f = true; for (uint32_t i = 1; i < TEST_CASE_SIZE; ++i) { f = used_key[i] && f; } if (!f) fail("some node was skipped", "!f"); test_node = test_heap_iterator_next(&it); if (test_node != NULL) fail("after all iterator returns not nil", "test_node != NULL"); free_all_nodes(&heap); footer(); } int main(int argc, const char** argv) { srand(179); test_iterator_create(); test_iterator_empty(); test_iterator_small(); test_iterator_large(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/bloom.cc0000664000000000000000000001112313306565107017567 0ustar rootroot#include "salad/bloom.h" #include #include #include using namespace std; uint32_t h(uint32_t i) { return i; } void simple_test() { cout << "*** " << __func__ << " ***" << endl; struct quota q; quota_init(&q, 100500); srand(time(0)); uint32_t error_count = 0; uint32_t fp_rate_too_big = 0; for (double p = 0.001; p < 0.5; p *= 1.3) { uint64_t tests = 0; uint64_t false_positive = 0; for (uint32_t count = 1000; count <= 10000; count *= 2) { struct bloom bloom; bloom_create(&bloom, count, p, &q); unordered_set check; for (uint32_t i = 0; i < count; i++) { uint32_t val = rand() % (count * 10); check.insert(val); bloom_add(&bloom, h(val)); } for (uint32_t i = 0; i < count * 10; i++) { bool has = check.find(i) != check.end(); bool bloom_possible = bloom_possible_has(&bloom, h(i)); tests++; if (has && !bloom_possible) error_count++; if (!has && bloom_possible) false_positive++; } bloom_destroy(&bloom, &q); } double fp_rate = (double)false_positive / tests; double excess = fp_rate / p; if (fp_rate > p) fp_rate_too_big++; } cout << "error_count = " << error_count << endl; cout << "fp_rate_too_big = " << fp_rate_too_big << endl; cout << "memory after destruction = " << quota_used(&q) << endl << endl; } void store_load_test() { cout << "*** " << __func__ << " ***" << endl; struct quota q; quota_init(&q, 100500); srand(time(0)); uint32_t error_count = 0; uint32_t fp_rate_too_big = 0; for (double p = 0.01; p < 0.5; p *= 1.5) { uint64_t tests = 0; uint64_t false_positive = 0; for (uint32_t count = 300; count <= 3000; count *= 10) { struct bloom bloom; bloom_create(&bloom, count, p, &q); unordered_set check; for (uint32_t i = 0; i < count; i++) { uint32_t val = rand() % (count * 10); check.insert(val); bloom_add(&bloom, h(val)); } struct bloom test = bloom; char *buf = (char *)malloc(bloom_store_size(&bloom)); bloom_store(&bloom, buf); bloom_destroy(&bloom, &q); memset(&bloom, '#', sizeof(bloom)); bloom_load_table(&test, buf, &q); free(buf); for (uint32_t i = 0; i < count * 10; i++) { bool has = check.find(i) != check.end(); bool bloom_possible = bloom_possible_has(&test, h(i)); tests++; if (has && !bloom_possible) error_count++; if (!has && bloom_possible) false_positive++; } bloom_destroy(&test, &q); } double fp_rate = (double)false_positive / tests; double excess = fp_rate / p; if (fp_rate > p) fp_rate_too_big++; } cout << "error_count = " << error_count << endl; cout << "fp_rate_too_big = " << fp_rate_too_big << endl; cout << "memory after destruction = " << quota_used(&q) << endl << endl; } void spectrum_test() { cout << "*** " << __func__ << " ***" << endl; struct quota q; quota_init(&q, 1005000); double p = 0.01; uint32_t count = 4000; struct bloom_spectrum spectrum; struct bloom bloom; /* using (count) */ bloom_spectrum_create(&spectrum, count, p, &q); for (uint32_t i = 0; i < count; i++) { bloom_spectrum_add(&spectrum, h(i)); } bloom_spectrum_choose(&spectrum, &bloom); bloom_spectrum_destroy(&spectrum, &q); uint64_t false_positive = 0; uint64_t error_count = 0; for (uint32_t i = 0; i < count; i++) { if (!bloom_possible_has(&bloom, h(i))) error_count++; } for (uint32_t i = count; i < 2 * count; i++) { if (bloom_possible_has(&bloom, h(i))) false_positive++; } bool fpr_rate_is_good = false_positive < 1.5 * p * count; cout << "bloom table size = " << bloom.table_size << endl; cout << "error_count = " << error_count << endl; cout << "fpr_rate_is_good = " << fpr_rate_is_good << endl; bloom_destroy(&bloom, &q); /* same test using (count * 10) */ bloom_spectrum_create(&spectrum, count * 10, p, &q); for (uint32_t i = 0; i < count; i++) { bloom_spectrum_add(&spectrum, h(i)); } bloom_spectrum_choose(&spectrum, &bloom); bloom_spectrum_destroy(&spectrum, &q); false_positive = 0; error_count = 0; for (uint32_t i = 0; i < count; i++) { if (!bloom_possible_has(&bloom, h(i))) error_count++; } for (uint32_t i = count; i < 2 * count; i++) { if (bloom_possible_has(&bloom, h(i))) false_positive++; } fpr_rate_is_good = false_positive < 1.5 * p * count; cout << "bloom table size = " << bloom.table_size << endl; cout << "error_count = " << error_count << endl; cout << "fpr_rate_is_good = " << fpr_rate_is_good << endl; bloom_destroy(&bloom, &q); cout << "memory after destruction = " << quota_used(&q) << endl << endl; } int main(void) { simple_test(); store_load_test(); spectrum_test(); } tarantool_1.9.1.26.g63eb81e3c/test/unit/csv.c0000664000000000000000000002321613306560010017101 0ustar rootroot/* * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "csv/csv.h" #include "unit.h" #include #include int isendl = 1; void print_endl(void *ctx) { fflush(stdout); puts(""); isendl = 1; } void print_field(void *ctx, const char *s, const char *end) { if(!isendl) putchar('\t'); isendl = 0; putchar('|'); for(const char *p = s; p != end && *p; p++) { if((*p == '\r' || *p == '\n') && (p + 1 == end || (*(p + 1) != '\r' && *(p + 1) != '\n'))) putchar('\n'); else putchar(*p); } putchar('|'); fflush(stdout); } void buf_endl(void *ctx) { *(*((char**)ctx))++ = '\n'; } void buf_field(void *ctx, const char *s, const char *end) { *(*((char**)ctx))++ = '|'; for(const char *p = s; p != end && *p; p++) { if((*p == '\r' || *p == '\n') && (p + 1 == end || (*(p + 1) != '\r' && *(p + 1) != '\n'))) *(*((char**)ctx))++ = '\n'; else *(*((char**)ctx))++ = *p; } *(*((char**)ctx))++ = '|'; *(*((char**)ctx))++ = '\t'; } void small_string_test(const char* const s) { struct csv csv; csv_create(&csv); csv.emit_field = print_field; csv.emit_row = print_endl; csv_parse_chunk(&csv, s, s + strlen(s)); csv_finish_parsing(&csv); csv_destroy(&csv); } void common_test(const char *data) { header(); small_string_test(data); footer(); } void test1() { header(); small_string_test("1\n \n1,2,3\n123\n"); footer(); } void test2() { header(); small_string_test( "123,456,abcac,\'multiword field 4\'\n" "none,none,0\n" ",,\n" ",," ); footer(); } void test3() { header(); small_string_test("1,,2"); footer(); } void test4() { header(); small_string_test("123 , 5 , 92 , 0, 0\n" "1, 12 34, 56, \"quote , \", 66\nok"); footer(); } void test5() { header(); const char * const s = "abc\tlonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglong\t0\n" "123\t456\t\n" "0\t\t\n"; struct csv csv; csv_create(&csv); csv.emit_field = print_field; csv.emit_row = print_endl; csv_setopt(&csv, CSV_OPT_DELIMITER, '\t'); csv_parse_chunk(&csv, s, s + strlen(s)); csv_finish_parsing(&csv); printf("valid: %s\n", csv.error_status == CSV_ER_INVALID ? "NO" : "yes"); csv_destroy(&csv); footer(); } void test6() { header(); const char * const s1 = "\n \nabc\nc\"\",\"d\",de\n\nk"; const char * const s2 = "\ne\n\n \n\" \"\n\"quote isn't closed, sorry\n \noh"; struct csv csv; csv_create(&csv); csv.emit_field = print_field; csv.emit_row = print_endl; csv_parse_chunk(&csv, s1, s1 + strlen(s1)); csv_parse_chunk(&csv, s2, s2 + 2); csv_parse_chunk(&csv, s2 + 2, s2 + strlen(s2)); csv_finish_parsing(&csv); printf("valid: %s\n", csv_get_error_status(&csv) == CSV_ER_INVALID ? "NO" : "yes"); csv_destroy(&csv); footer(); } struct counter { size_t line_cnt, fieldsizes_cnt; }; void line_counter(void *ctx) { ((struct counter*)ctx)->line_cnt++; } void fieldsizes_counter(void *ctx, const char *s, const char *end) { ((struct counter*)ctx)->fieldsizes_cnt += end - s; } void big_chunk_separated_test() { header(); struct csv csv; csv_create(&csv); csv_setopt(&csv, CSV_OPT_EMIT_FIELD, fieldsizes_counter); csv_setopt(&csv, CSV_OPT_EMIT_ROW, line_counter); size_t lines = 10000; size_t linelen = 300; size_t chunk_size = 1024; char *buf = malloc(lines * (linelen+4)); size_t bufn = 0; struct counter cnt; cnt.line_cnt = 0; cnt.fieldsizes_cnt = 0; csv_setopt(&csv, CSV_OPT_EMIT_CTX, &cnt); const char *s = "abc, def, def, cba"; for(size_t i = 0; i < lines; i++) { int k = linelen / strlen(s); for(int i = 0; i < k; i++) { memcpy(buf + bufn, s, strlen(s)); bufn += strlen(s); } buf[bufn++] = '\n'; } const char *bufp = buf; while(bufp < buf + bufn - chunk_size) { csv_parse_chunk(&csv, bufp, bufp + chunk_size); bufp += chunk_size; } csv_parse_chunk(&csv, bufp, buf + bufn); csv_finish_parsing(&csv); //without fieldsizes counts without commas and spaces printf("line_cnt=%d, fieldsizes_cnt=%d, %d\n", (int)cnt.line_cnt, (int)cnt.fieldsizes_cnt, (int) (lines * (strlen(s) - 6) * (linelen / strlen(s)))); fail_unless(lines == cnt.line_cnt); fail_unless(lines * (strlen(s) - 6) * (linelen / strlen(s)) == cnt.fieldsizes_cnt); csv_destroy(&csv); free(buf); footer(); } void random_generated_test() { header(); const char *rand_test = "\n\r\" ba\r a\ra, \n\"\n\"a\nb\" \raa\rb,\n" "\r, \n\",\r\n\"\n,a, ,\"a\n\n\r \"\r ba\r,b" " a,\n,\"\"a\n\r \"b\" \n,\",a\r,a ,\r\rc" "\" a,b\r\n,\"b\r\"aa \nb \n\r\r\n\n,\rb\nc" ",\n\n aa\n \"\n ab\rab,\r\" b\n\", ,,\r\r" "bab\rb\na\n\"a\ra,\"\",\n\"a\n\n \"\r \ra\n" "a\r\raa a\" ,baab ,a \rbb ,\r \r,\rb,, b" "\n\r\"\nb\n\nb \n,ab \raa\r\"\nb a\"ba,b, c" "\"a\"a \"\r\n\"b \n,b\"\",\nba\n\" \n\na \r" "\nb\rb\"bbba,\" \n\n\n,a,b,a,b,\n\n\n\nb\"\r"; struct csv csv; csv_create(&csv); csv_setopt(&csv, CSV_OPT_EMIT_FIELD, fieldsizes_counter); csv_setopt(&csv, CSV_OPT_EMIT_ROW, line_counter); struct counter cnt; cnt.line_cnt = 0; cnt.fieldsizes_cnt = 0; csv_setopt(&csv, CSV_OPT_EMIT_CTX, &cnt); csv_parse_chunk(&csv, rand_test, rand_test + strlen(rand_test)); csv_finish_parsing(&csv); printf("line_cnt=%d, fieldsizes_cnt=%d\n", (int)cnt.line_cnt, (int)cnt.fieldsizes_cnt); printf("valid: %s\n", csv_get_error_status(&csv) == CSV_ER_INVALID ? "NO" : "yes"); csv_destroy(&csv); footer(); } void iter_test1() { header(); struct csv_iterator it; struct csv csv; csv_create(&csv); csv_iterator_create(&it, &csv); int st = 0; const char *buf = ",d ,e\r\n12,42,3\no\n"; while((st = csv_next(&it)) != CSV_IT_EOF) { switch(st) { case CSV_IT_NEEDMORE: csv_feed(&it, buf, strlen(buf)); buf += strlen(buf); break; case CSV_IT_EOL: print_endl(0); break; case CSV_IT_OK: print_field(0, it.field, it.field + it.field_len); break; case CSV_IT_ERROR: printf("\nerror"); break; } } csv_destroy(&csv); footer(); } void iter_test2() { header(); struct csv_iterator it; struct csv csv; csv_create(&csv); csv_iterator_create(&it, &csv); int st = 0; const char ar[] = {'1', '\n', 0, '2', '3', 0, 0}; const char *buf = ar; while((st = csv_next(&it)) != CSV_IT_EOF) { switch(st) { case CSV_IT_NEEDMORE: csv_feed(&it, buf, strlen(buf)); buf += 3; break; case CSV_IT_EOL: print_endl(0); break; case CSV_IT_OK: print_field(0, it.field, it.field + it.field_len); break; case CSV_IT_ERROR: printf("\nerror"); break; } } csv_destroy(&csv); footer(); } void iter_test3() { header(); struct csv_iterator it; struct csv csv; csv_create(&csv); csv_iterator_create(&it, &csv); int st = 0; const char *ar[] = {"1,2,3\r\n", "4,5,6", "", ""}; int i = 0; const char *buf = ar[i++]; while((st = csv_next(&it)) != CSV_IT_EOF) { switch(st) { case CSV_IT_NEEDMORE: csv_feed(&it, buf, strlen(buf)); buf = ar[i++]; break; case CSV_IT_EOL: print_endl(0); break; case CSV_IT_OK: print_field(0, it.field, it.field + it.field_len); break; case CSV_IT_ERROR: printf("\nerror"); break; } } csv_destroy(&csv); footer(); } void csv_out() { header(); const char fields[4][24] = { "abc", "with,comma", "\"in quotes\"", "1 \" quote"}; char buf[54]; int i; struct csv csv; csv_create(&csv); for(i = 0; i < 4; i++) { int len = csv_escape_field(&csv, fields[i], strlen(fields[i]), buf, sizeof(buf)); printf("%s%c", buf, len, i == 3 ? '\n' : ','); } footer(); } int main() { test1(); test2(); test3(); test4(); test5(); test6(); // blank lines, invalid csv big_chunk_separated_test(); random_generated_test(); /* comma in quotes */ common_test( "first,last,address,city,zip\n" "John,Doe,120 any st.,\"Anytown, WW\",08123\n" ); /* empty fields */ common_test( "a,b,c\n" "1,\"\",\"\"\n" "2,3,4\n" ); /* escaped quotes */ common_test( "a,b\n" "1,\"ha \"\"ha\"\" ha\"\n" "3,4\n" ); /* json in csv */ common_test( "key,val\n" "1,\"{\"\"type\"\": \"\"Point\"\", \"\"coordinates\"\": [102.0, 0.5]}\"\n" ); /* new lines */ common_test( "a,b,c\n" "1,2,3\n" "\"Once upon \n" "a time\",5,6\n" "7,8,9\n" ); /* new lines with quetes */ common_test( "a,b\n" "1,\"ha\n" "\"\"ha\"\"\n" "ha\"\n" "3,4\n" ); /* utf8 */ common_test( " a,b,c\n" "1,2,3\n" "4,5,а нет ли ошибок?\n" ); /* ending spaces */ common_test(" www , \"aa\"a , \"tt \" \n"); //iterator tests iter_test1(); iter_test2(); iter_test3(); //output test csv_out(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/uri.result0000664000000000000000000017311413306560010020204 0ustar rootroot1..63 1..19 ok 1 - host: parse ok 2 - host: scheme ok 3 - host: login ok 4 - host: password ok 5 - host: host ok 6 - host: service ok 7 - host: path ok 8 - host: query ok 9 - host: fragment ok 10 - host: host_hint ok 11 - host: parse ok 12 - host: scheme ok 13 - host: login ok 14 - host: password ok 15 - host: host ok 16 - host: service ok 17 - host: path ok 18 - host: query ok 19 - host: fragment ok 1 - subtests 1..19 ok 1 - host/: parse ok 2 - host/: scheme ok 3 - host/: login ok 4 - host/: password ok 5 - host/: host ok 6 - host/: service ok 7 - host/: path ok 8 - host/: query ok 9 - host/: fragment ok 10 - host/: host_hint ok 11 - host/: parse ok 12 - host/: scheme ok 13 - host/: login ok 14 - host/: password ok 15 - host/: host ok 16 - host/: service ok 17 - host/: path ok 18 - host/: query ok 19 - host/: fragment ok 2 - subtests 1..19 ok 1 - host/path1/path2/path3: parse ok 2 - host/path1/path2/path3: scheme ok 3 - host/path1/path2/path3: login ok 4 - host/path1/path2/path3: password ok 5 - host/path1/path2/path3: host ok 6 - host/path1/path2/path3: service ok 7 - host/path1/path2/path3: path ok 8 - host/path1/path2/path3: query ok 9 - host/path1/path2/path3: fragment ok 10 - host/path1/path2/path3: host_hint ok 11 - host/path1/path2/path3: parse ok 12 - host/path1/path2/path3: scheme ok 13 - host/path1/path2/path3: login ok 14 - host/path1/path2/path3: password ok 15 - host/path1/path2/path3: host ok 16 - host/path1/path2/path3: service ok 17 - host/path1/path2/path3: path ok 18 - host/path1/path2/path3: query ok 19 - host/path1/path2/path3: fragment ok 3 - subtests 1..19 ok 1 - host/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 2 - host/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 3 - host/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 4 - host/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 5 - host/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 6 - host/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 7 - host/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 8 - host/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 9 - host/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 10 - host/path1/path2/path3?q1=v1&q2=v2#fragment: host_hint ok 11 - host/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 12 - host/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 13 - host/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 14 - host/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 15 - host/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 16 - host/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 17 - host/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 18 - host/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 19 - host/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 4 - subtests 1..19 ok 1 - host:service: parse ok 2 - host:service: scheme ok 3 - host:service: login ok 4 - host:service: password ok 5 - host:service: host ok 6 - host:service: service ok 7 - host:service: path ok 8 - host:service: query ok 9 - host:service: fragment ok 10 - host:service: host_hint ok 11 - host:service: parse ok 12 - host:service: scheme ok 13 - host:service: login ok 14 - host:service: password ok 15 - host:service: host ok 16 - host:service: service ok 17 - host:service: path ok 18 - host:service: query ok 19 - host:service: fragment ok 5 - subtests 1..19 ok 1 - host:service/: parse ok 2 - host:service/: scheme ok 3 - host:service/: login ok 4 - host:service/: password ok 5 - host:service/: host ok 6 - host:service/: service ok 7 - host:service/: path ok 8 - host:service/: query ok 9 - host:service/: fragment ok 10 - host:service/: host_hint ok 11 - host:service/: parse ok 12 - host:service/: scheme ok 13 - host:service/: login ok 14 - host:service/: password ok 15 - host:service/: host ok 16 - host:service/: service ok 17 - host:service/: path ok 18 - host:service/: query ok 19 - host:service/: fragment ok 6 - subtests 1..19 ok 1 - host:service/path1/path2/path3: parse ok 2 - host:service/path1/path2/path3: scheme ok 3 - host:service/path1/path2/path3: login ok 4 - host:service/path1/path2/path3: password ok 5 - host:service/path1/path2/path3: host ok 6 - host:service/path1/path2/path3: service ok 7 - host:service/path1/path2/path3: path ok 8 - host:service/path1/path2/path3: query ok 9 - host:service/path1/path2/path3: fragment ok 10 - host:service/path1/path2/path3: host_hint ok 11 - host:service/path1/path2/path3: parse ok 12 - host:service/path1/path2/path3: scheme ok 13 - host:service/path1/path2/path3: login ok 14 - host:service/path1/path2/path3: password ok 15 - host:service/path1/path2/path3: host ok 16 - host:service/path1/path2/path3: service ok 17 - host:service/path1/path2/path3: path ok 18 - host:service/path1/path2/path3: query ok 19 - host:service/path1/path2/path3: fragment ok 7 - subtests 1..19 ok 1 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 2 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 3 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 4 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 5 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 6 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 7 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 8 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 9 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 10 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: host_hint ok 11 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 12 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 13 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 14 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 15 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 16 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 17 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 18 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 19 - host:service/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 8 - subtests 1..19 ok 1 - login@host: parse ok 2 - login@host: scheme ok 3 - login@host: login ok 4 - login@host: password ok 5 - login@host: host ok 6 - login@host: service ok 7 - login@host: path ok 8 - login@host: query ok 9 - login@host: fragment ok 10 - login@host: host_hint ok 11 - login@host: parse ok 12 - login@host: scheme ok 13 - login@host: login ok 14 - login@host: password ok 15 - login@host: host ok 16 - login@host: service ok 17 - login@host: path ok 18 - login@host: query ok 19 - login@host: fragment ok 9 - subtests 1..19 ok 1 - login@host/: parse ok 2 - login@host/: scheme ok 3 - login@host/: login ok 4 - login@host/: password ok 5 - login@host/: host ok 6 - login@host/: service ok 7 - login@host/: path ok 8 - login@host/: query ok 9 - login@host/: fragment ok 10 - login@host/: host_hint ok 11 - login@host/: parse ok 12 - login@host/: scheme ok 13 - login@host/: login ok 14 - login@host/: password ok 15 - login@host/: host ok 16 - login@host/: service ok 17 - login@host/: path ok 18 - login@host/: query ok 19 - login@host/: fragment ok 10 - subtests 1..19 ok 1 - login@host/path1/path2/path3: parse ok 2 - login@host/path1/path2/path3: scheme ok 3 - login@host/path1/path2/path3: login ok 4 - login@host/path1/path2/path3: password ok 5 - login@host/path1/path2/path3: host ok 6 - login@host/path1/path2/path3: service ok 7 - login@host/path1/path2/path3: path ok 8 - login@host/path1/path2/path3: query ok 9 - login@host/path1/path2/path3: fragment ok 10 - login@host/path1/path2/path3: host_hint ok 11 - login@host/path1/path2/path3: parse ok 12 - login@host/path1/path2/path3: scheme ok 13 - login@host/path1/path2/path3: login ok 14 - login@host/path1/path2/path3: password ok 15 - login@host/path1/path2/path3: host ok 16 - login@host/path1/path2/path3: service ok 17 - login@host/path1/path2/path3: path ok 18 - login@host/path1/path2/path3: query ok 19 - login@host/path1/path2/path3: fragment ok 11 - subtests 1..19 ok 1 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 2 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 3 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 4 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 5 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 6 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 7 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 8 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 9 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 10 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: host_hint ok 11 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 12 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 13 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 14 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 15 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 16 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 17 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 18 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 19 - login@host/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 12 - subtests 1..19 ok 1 - login:password@host: parse ok 2 - login:password@host: scheme ok 3 - login:password@host: login ok 4 - login:password@host: password ok 5 - login:password@host: host ok 6 - login:password@host: service ok 7 - login:password@host: path ok 8 - login:password@host: query ok 9 - login:password@host: fragment ok 10 - login:password@host: host_hint ok 11 - login:password@host: parse ok 12 - login:password@host: scheme ok 13 - login:password@host: login ok 14 - login:password@host: password ok 15 - login:password@host: host ok 16 - login:password@host: service ok 17 - login:password@host: path ok 18 - login:password@host: query ok 19 - login:password@host: fragment ok 13 - subtests 1..19 ok 1 - login:@host: parse ok 2 - login:@host: scheme ok 3 - login:@host: login ok 4 - login:@host: password ok 5 - login:@host: host ok 6 - login:@host: service ok 7 - login:@host: path ok 8 - login:@host: query ok 9 - login:@host: fragment ok 10 - login:@host: host_hint ok 11 - login:@host: parse ok 12 - login:@host: scheme ok 13 - login:@host: login ok 14 - login:@host: password ok 15 - login:@host: host ok 16 - login:@host: service ok 17 - login:@host: path ok 18 - login:@host: query ok 19 - login:@host: fragment ok 14 - subtests 1..19 ok 1 - login:password@host/: parse ok 2 - login:password@host/: scheme ok 3 - login:password@host/: login ok 4 - login:password@host/: password ok 5 - login:password@host/: host ok 6 - login:password@host/: service ok 7 - login:password@host/: path ok 8 - login:password@host/: query ok 9 - login:password@host/: fragment ok 10 - login:password@host/: host_hint ok 11 - login:password@host/: parse ok 12 - login:password@host/: scheme ok 13 - login:password@host/: login ok 14 - login:password@host/: password ok 15 - login:password@host/: host ok 16 - login:password@host/: service ok 17 - login:password@host/: path ok 18 - login:password@host/: query ok 19 - login:password@host/: fragment ok 15 - subtests 1..19 ok 1 - login:password@host/path1/path2/path3: parse ok 2 - login:password@host/path1/path2/path3: scheme ok 3 - login:password@host/path1/path2/path3: login ok 4 - login:password@host/path1/path2/path3: password ok 5 - login:password@host/path1/path2/path3: host ok 6 - login:password@host/path1/path2/path3: service ok 7 - login:password@host/path1/path2/path3: path ok 8 - login:password@host/path1/path2/path3: query ok 9 - login:password@host/path1/path2/path3: fragment ok 10 - login:password@host/path1/path2/path3: host_hint ok 11 - login:password@host/path1/path2/path3: parse ok 12 - login:password@host/path1/path2/path3: scheme ok 13 - login:password@host/path1/path2/path3: login ok 14 - login:password@host/path1/path2/path3: password ok 15 - login:password@host/path1/path2/path3: host ok 16 - login:password@host/path1/path2/path3: service ok 17 - login:password@host/path1/path2/path3: path ok 18 - login:password@host/path1/path2/path3: query ok 19 - login:password@host/path1/path2/path3: fragment ok 16 - subtests 1..19 ok 1 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 2 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 3 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 4 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 5 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 6 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 7 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 8 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 9 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 10 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: host_hint ok 11 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 12 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 13 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 14 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 15 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 16 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 17 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 18 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 19 - login:password@host/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 17 - subtests 1..19 ok 1 - login:password@host:service: parse ok 2 - login:password@host:service: scheme ok 3 - login:password@host:service: login ok 4 - login:password@host:service: password ok 5 - login:password@host:service: host ok 6 - login:password@host:service: service ok 7 - login:password@host:service: path ok 8 - login:password@host:service: query ok 9 - login:password@host:service: fragment ok 10 - login:password@host:service: host_hint ok 11 - login:password@host:service: parse ok 12 - login:password@host:service: scheme ok 13 - login:password@host:service: login ok 14 - login:password@host:service: password ok 15 - login:password@host:service: host ok 16 - login:password@host:service: service ok 17 - login:password@host:service: path ok 18 - login:password@host:service: query ok 19 - login:password@host:service: fragment ok 18 - subtests 1..19 ok 1 - login:password@host:service/: parse ok 2 - login:password@host:service/: scheme ok 3 - login:password@host:service/: login ok 4 - login:password@host:service/: password ok 5 - login:password@host:service/: host ok 6 - login:password@host:service/: service ok 7 - login:password@host:service/: path ok 8 - login:password@host:service/: query ok 9 - login:password@host:service/: fragment ok 10 - login:password@host:service/: host_hint ok 11 - login:password@host:service/: parse ok 12 - login:password@host:service/: scheme ok 13 - login:password@host:service/: login ok 14 - login:password@host:service/: password ok 15 - login:password@host:service/: host ok 16 - login:password@host:service/: service ok 17 - login:password@host:service/: path ok 18 - login:password@host:service/: query ok 19 - login:password@host:service/: fragment ok 19 - subtests 1..19 ok 1 - login:password@host:service/path1/path2/path3: parse ok 2 - login:password@host:service/path1/path2/path3: scheme ok 3 - login:password@host:service/path1/path2/path3: login ok 4 - login:password@host:service/path1/path2/path3: password ok 5 - login:password@host:service/path1/path2/path3: host ok 6 - login:password@host:service/path1/path2/path3: service ok 7 - login:password@host:service/path1/path2/path3: path ok 8 - login:password@host:service/path1/path2/path3: query ok 9 - login:password@host:service/path1/path2/path3: fragment ok 10 - login:password@host:service/path1/path2/path3: host_hint ok 11 - login:password@host:service/path1/path2/path3: parse ok 12 - login:password@host:service/path1/path2/path3: scheme ok 13 - login:password@host:service/path1/path2/path3: login ok 14 - login:password@host:service/path1/path2/path3: password ok 15 - login:password@host:service/path1/path2/path3: host ok 16 - login:password@host:service/path1/path2/path3: service ok 17 - login:password@host:service/path1/path2/path3: path ok 18 - login:password@host:service/path1/path2/path3: query ok 19 - login:password@host:service/path1/path2/path3: fragment ok 20 - subtests 1..19 ok 1 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 2 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 3 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 4 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 5 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 6 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 7 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 8 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 9 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 10 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: host_hint ok 11 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 12 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 13 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 14 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 15 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 16 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 17 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 18 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 19 - login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 21 - subtests 1..19 ok 1 - scheme://login:password@host:service: parse ok 2 - scheme://login:password@host:service: scheme ok 3 - scheme://login:password@host:service: login ok 4 - scheme://login:password@host:service: password ok 5 - scheme://login:password@host:service: host ok 6 - scheme://login:password@host:service: service ok 7 - scheme://login:password@host:service: path ok 8 - scheme://login:password@host:service: query ok 9 - scheme://login:password@host:service: fragment ok 10 - scheme://login:password@host:service: host_hint ok 11 - scheme://login:password@host:service: parse ok 12 - scheme://login:password@host:service: scheme ok 13 - scheme://login:password@host:service: login ok 14 - scheme://login:password@host:service: password ok 15 - scheme://login:password@host:service: host ok 16 - scheme://login:password@host:service: service ok 17 - scheme://login:password@host:service: path ok 18 - scheme://login:password@host:service: query ok 19 - scheme://login:password@host:service: fragment ok 22 - subtests 1..19 ok 1 - scheme://login:password@host:service/: parse ok 2 - scheme://login:password@host:service/: scheme ok 3 - scheme://login:password@host:service/: login ok 4 - scheme://login:password@host:service/: password ok 5 - scheme://login:password@host:service/: host ok 6 - scheme://login:password@host:service/: service ok 7 - scheme://login:password@host:service/: path ok 8 - scheme://login:password@host:service/: query ok 9 - scheme://login:password@host:service/: fragment ok 10 - scheme://login:password@host:service/: host_hint ok 11 - scheme://login:password@host:service/: parse ok 12 - scheme://login:password@host:service/: scheme ok 13 - scheme://login:password@host:service/: login ok 14 - scheme://login:password@host:service/: password ok 15 - scheme://login:password@host:service/: host ok 16 - scheme://login:password@host:service/: service ok 17 - scheme://login:password@host:service/: path ok 18 - scheme://login:password@host:service/: query ok 19 - scheme://login:password@host:service/: fragment ok 23 - subtests 1..19 ok 1 - scheme://login:password@host:service/path1/path2/path3: parse ok 2 - scheme://login:password@host:service/path1/path2/path3: scheme ok 3 - scheme://login:password@host:service/path1/path2/path3: login ok 4 - scheme://login:password@host:service/path1/path2/path3: password ok 5 - scheme://login:password@host:service/path1/path2/path3: host ok 6 - scheme://login:password@host:service/path1/path2/path3: service ok 7 - scheme://login:password@host:service/path1/path2/path3: path ok 8 - scheme://login:password@host:service/path1/path2/path3: query ok 9 - scheme://login:password@host:service/path1/path2/path3: fragment ok 10 - scheme://login:password@host:service/path1/path2/path3: host_hint ok 11 - scheme://login:password@host:service/path1/path2/path3: parse ok 12 - scheme://login:password@host:service/path1/path2/path3: scheme ok 13 - scheme://login:password@host:service/path1/path2/path3: login ok 14 - scheme://login:password@host:service/path1/path2/path3: password ok 15 - scheme://login:password@host:service/path1/path2/path3: host ok 16 - scheme://login:password@host:service/path1/path2/path3: service ok 17 - scheme://login:password@host:service/path1/path2/path3: path ok 18 - scheme://login:password@host:service/path1/path2/path3: query ok 19 - scheme://login:password@host:service/path1/path2/path3: fragment ok 24 - subtests 1..19 ok 1 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 2 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 3 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 4 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 5 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 6 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 7 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 8 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 9 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 10 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: host_hint ok 11 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 12 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 13 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 14 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 15 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 16 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 17 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 18 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 19 - scheme://login:password@host:service/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 25 - subtests 1..19 ok 1 - host/path: parse ok 2 - host/path: scheme ok 3 - host/path: login ok 4 - host/path: password ok 5 - host/path: host ok 6 - host/path: service ok 7 - host/path: path ok 8 - host/path: query ok 9 - host/path: fragment ok 10 - host/path: host_hint ok 11 - host/path: parse ok 12 - host/path: scheme ok 13 - host/path: login ok 14 - host/path: password ok 15 - host/path: host ok 16 - host/path: service ok 17 - host/path: path ok 18 - host/path: query ok 19 - host/path: fragment ok 26 - subtests 1..19 ok 1 - host//: parse ok 2 - host//: scheme ok 3 - host//: login ok 4 - host//: password ok 5 - host//: host ok 6 - host//: service ok 7 - host//: path ok 8 - host//: query ok 9 - host//: fragment ok 10 - host//: host_hint ok 11 - host//: parse ok 12 - host//: scheme ok 13 - host//: login ok 14 - host//: password ok 15 - host//: host ok 16 - host//: service ok 17 - host//: path ok 18 - host//: query ok 19 - host//: fragment ok 27 - subtests 1..19 ok 1 - host//path: parse ok 2 - host//path: scheme ok 3 - host//path: login ok 4 - host//path: password ok 5 - host//path: host ok 6 - host//path: service ok 7 - host//path: path ok 8 - host//path: query ok 9 - host//path: fragment ok 10 - host//path: host_hint ok 11 - host//path: parse ok 12 - host//path: scheme ok 13 - host//path: login ok 14 - host//path: password ok 15 - host//path: host ok 16 - host//path: service ok 17 - host//path: path ok 18 - host//path: query ok 19 - host//path: fragment ok 28 - subtests 1..19 ok 1 - host/;abc?q: parse ok 2 - host/;abc?q: scheme ok 3 - host/;abc?q: login ok 4 - host/;abc?q: password ok 5 - host/;abc?q: host ok 6 - host/;abc?q: service ok 7 - host/;abc?q: path ok 8 - host/;abc?q: query ok 9 - host/;abc?q: fragment ok 10 - host/;abc?q: host_hint ok 11 - host/;abc?q: parse ok 12 - host/;abc?q: scheme ok 13 - host/;abc?q: login ok 14 - host/;abc?q: password ok 15 - host/;abc?q: host ok 16 - host/;abc?q: service ok 17 - host/;abc?q: path ok 18 - host/;abc?q: query ok 19 - host/;abc?q: fragment ok 29 - subtests 1..19 ok 1 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: parse ok 2 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: scheme ok 3 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: login ok 4 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: password ok 5 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: host ok 6 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: service ok 7 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: path ok 8 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: query ok 9 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: fragment ok 10 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: host_hint ok 11 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: parse ok 12 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: scheme ok 13 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: login ok 14 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: password ok 15 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: host ok 16 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: service ok 17 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: path ok 18 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: query ok 19 - scheme://login:password@host:service/@path1/:path2?q1=v1&q2=v2#fragment: fragment ok 30 - subtests 1..19 ok 1 - host/~user: parse ok 2 - host/~user: scheme ok 3 - host/~user: login ok 4 - host/~user: password ok 5 - host/~user: host ok 6 - host/~user: service ok 7 - host/~user: path ok 8 - host/~user: query ok 9 - host/~user: fragment ok 10 - host/~user: host_hint ok 11 - host/~user: parse ok 12 - host/~user: scheme ok 13 - host/~user: login ok 14 - host/~user: password ok 15 - host/~user: host ok 16 - host/~user: service ok 17 - host/~user: path ok 18 - host/~user: query ok 19 - host/~user: fragment ok 31 - subtests 1..19 ok 1 - try.tarantool.org: parse ok 2 - try.tarantool.org: scheme ok 3 - try.tarantool.org: login ok 4 - try.tarantool.org: password ok 5 - try.tarantool.org: host ok 6 - try.tarantool.org: service ok 7 - try.tarantool.org: path ok 8 - try.tarantool.org: query ok 9 - try.tarantool.org: fragment ok 10 - try.tarantool.org: host_hint ok 11 - try.tarantool.org: parse ok 12 - try.tarantool.org: scheme ok 13 - try.tarantool.org: login ok 14 - try.tarantool.org: password ok 15 - try.tarantool.org: host ok 16 - try.tarantool.org: service ok 17 - try.tarantool.org: path ok 18 - try.tarantool.org: query ok 19 - try.tarantool.org: fragment ok 32 - subtests 1..19 ok 1 - try.tarantool.org: parse ok 2 - try.tarantool.org: scheme ok 3 - try.tarantool.org: login ok 4 - try.tarantool.org: password ok 5 - try.tarantool.org: host ok 6 - try.tarantool.org: service ok 7 - try.tarantool.org: path ok 8 - try.tarantool.org: query ok 9 - try.tarantool.org: fragment ok 10 - try.tarantool.org: host_hint ok 11 - try.tarantool.org: parse ok 12 - try.tarantool.org: scheme ok 13 - try.tarantool.org: login ok 14 - try.tarantool.org: password ok 15 - try.tarantool.org: host ok 16 - try.tarantool.org: service ok 17 - try.tarantool.org: path ok 18 - try.tarantool.org: query ok 19 - try.tarantool.org: fragment ok 33 - subtests 1..19 ok 1 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: parse ok 2 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: scheme ok 3 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: login ok 4 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: password ok 5 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: host ok 6 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: service ok 7 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: path ok 8 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: query ok 9 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: fragment ok 10 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: host_hint ok 11 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: parse ok 12 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: scheme ok 13 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: login ok 14 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: password ok 15 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: host ok 16 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: service ok 17 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: path ok 18 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: query ok 19 - www.llanfairpwllgwyngyllgogerychwyrndrobwyll-llantysiliogogogoch.com: fragment ok 34 - subtests 1..19 ok 1 - 0.0.0.0: parse ok 2 - 0.0.0.0: scheme ok 3 - 0.0.0.0: login ok 4 - 0.0.0.0: password ok 5 - 0.0.0.0: host ok 6 - 0.0.0.0: service ok 7 - 0.0.0.0: path ok 8 - 0.0.0.0: query ok 9 - 0.0.0.0: fragment ok 10 - 0.0.0.0: host_hint ok 11 - 0.0.0.0: parse ok 12 - 0.0.0.0: scheme ok 13 - 0.0.0.0: login ok 14 - 0.0.0.0: password ok 15 - 0.0.0.0: host ok 16 - 0.0.0.0: service ok 17 - 0.0.0.0: path ok 18 - 0.0.0.0: query ok 19 - 0.0.0.0: fragment ok 35 - subtests 1..19 ok 1 - 127.0.0.1: parse ok 2 - 127.0.0.1: scheme ok 3 - 127.0.0.1: login ok 4 - 127.0.0.1: password ok 5 - 127.0.0.1: host ok 6 - 127.0.0.1: service ok 7 - 127.0.0.1: path ok 8 - 127.0.0.1: query ok 9 - 127.0.0.1: fragment ok 10 - 127.0.0.1: host_hint ok 11 - 127.0.0.1: parse ok 12 - 127.0.0.1: scheme ok 13 - 127.0.0.1: login ok 14 - 127.0.0.1: password ok 15 - 127.0.0.1: host ok 16 - 127.0.0.1: service ok 17 - 127.0.0.1: path ok 18 - 127.0.0.1: query ok 19 - 127.0.0.1: fragment ok 36 - subtests 1..19 ok 1 - 127.0.0.1:3313: parse ok 2 - 127.0.0.1:3313: scheme ok 3 - 127.0.0.1:3313: login ok 4 - 127.0.0.1:3313: password ok 5 - 127.0.0.1:3313: host ok 6 - 127.0.0.1:3313: service ok 7 - 127.0.0.1:3313: path ok 8 - 127.0.0.1:3313: query ok 9 - 127.0.0.1:3313: fragment ok 10 - 127.0.0.1:3313: host_hint ok 11 - 127.0.0.1:3313: parse ok 12 - 127.0.0.1:3313: scheme ok 13 - 127.0.0.1:3313: login ok 14 - 127.0.0.1:3313: password ok 15 - 127.0.0.1:3313: host ok 16 - 127.0.0.1:3313: service ok 17 - 127.0.0.1:3313: path ok 18 - 127.0.0.1:3313: query ok 19 - 127.0.0.1:3313: fragment ok 37 - subtests 1..19 ok 1 - scheme://login:password@127.0.0.1:3313: parse ok 2 - scheme://login:password@127.0.0.1:3313: scheme ok 3 - scheme://login:password@127.0.0.1:3313: login ok 4 - scheme://login:password@127.0.0.1:3313: password ok 5 - scheme://login:password@127.0.0.1:3313: host ok 6 - scheme://login:password@127.0.0.1:3313: service ok 7 - scheme://login:password@127.0.0.1:3313: path ok 8 - scheme://login:password@127.0.0.1:3313: query ok 9 - scheme://login:password@127.0.0.1:3313: fragment ok 10 - scheme://login:password@127.0.0.1:3313: host_hint ok 11 - scheme://login:password@127.0.0.1:3313: parse ok 12 - scheme://login:password@127.0.0.1:3313: scheme ok 13 - scheme://login:password@127.0.0.1:3313: login ok 14 - scheme://login:password@127.0.0.1:3313: password ok 15 - scheme://login:password@127.0.0.1:3313: host ok 16 - scheme://login:password@127.0.0.1:3313: service ok 17 - scheme://login:password@127.0.0.1:3313: path ok 18 - scheme://login:password@127.0.0.1:3313: query ok 19 - scheme://login:password@127.0.0.1:3313: fragment ok 38 - subtests 1..19 ok 1 - [2001::11a3:09d7::1]: parse ok 2 - [2001::11a3:09d7::1]: scheme ok 3 - [2001::11a3:09d7::1]: login ok 4 - [2001::11a3:09d7::1]: password ok 5 - [2001::11a3:09d7::1]: host ok 6 - [2001::11a3:09d7::1]: service ok 7 - [2001::11a3:09d7::1]: path ok 8 - [2001::11a3:09d7::1]: query ok 9 - [2001::11a3:09d7::1]: fragment ok 10 - [2001::11a3:09d7::1]: host_hint ok 11 - [2001::11a3:09d7::1]: parse ok 12 - [2001::11a3:09d7::1]: scheme ok 13 - [2001::11a3:09d7::1]: login ok 14 - [2001::11a3:09d7::1]: password ok 15 - [2001::11a3:09d7::1]: host ok 16 - [2001::11a3:09d7::1]: service ok 17 - [2001::11a3:09d7::1]: path ok 18 - [2001::11a3:09d7::1]: query ok 19 - [2001::11a3:09d7::1]: fragment ok 39 - subtests 1..19 ok 1 - scheme://login:password@[2001::11a3:09d7::1]:3313: parse ok 2 - scheme://login:password@[2001::11a3:09d7::1]:3313: scheme ok 3 - scheme://login:password@[2001::11a3:09d7::1]:3313: login ok 4 - scheme://login:password@[2001::11a3:09d7::1]:3313: password ok 5 - scheme://login:password@[2001::11a3:09d7::1]:3313: host ok 6 - scheme://login:password@[2001::11a3:09d7::1]:3313: service ok 7 - scheme://login:password@[2001::11a3:09d7::1]:3313: path ok 8 - scheme://login:password@[2001::11a3:09d7::1]:3313: query ok 9 - scheme://login:password@[2001::11a3:09d7::1]:3313: fragment ok 10 - scheme://login:password@[2001::11a3:09d7::1]:3313: host_hint ok 11 - scheme://login:password@[2001::11a3:09d7::1]:3313: parse ok 12 - scheme://login:password@[2001::11a3:09d7::1]:3313: scheme ok 13 - scheme://login:password@[2001::11a3:09d7::1]:3313: login ok 14 - scheme://login:password@[2001::11a3:09d7::1]:3313: password ok 15 - scheme://login:password@[2001::11a3:09d7::1]:3313: host ok 16 - scheme://login:password@[2001::11a3:09d7::1]:3313: service ok 17 - scheme://login:password@[2001::11a3:09d7::1]:3313: path ok 18 - scheme://login:password@[2001::11a3:09d7::1]:3313: query ok 19 - scheme://login:password@[2001::11a3:09d7::1]:3313: fragment ok 40 - subtests 1..19 ok 1 - scheme://[2001:0db8:11a3:09d7::1]: parse ok 2 - scheme://[2001:0db8:11a3:09d7::1]: scheme ok 3 - scheme://[2001:0db8:11a3:09d7::1]: login ok 4 - scheme://[2001:0db8:11a3:09d7::1]: password ok 5 - scheme://[2001:0db8:11a3:09d7::1]: host ok 6 - scheme://[2001:0db8:11a3:09d7::1]: service ok 7 - scheme://[2001:0db8:11a3:09d7::1]: path ok 8 - scheme://[2001:0db8:11a3:09d7::1]: query ok 9 - scheme://[2001:0db8:11a3:09d7::1]: fragment ok 10 - scheme://[2001:0db8:11a3:09d7::1]: host_hint ok 11 - scheme://[2001:0db8:11a3:09d7::1]: parse ok 12 - scheme://[2001:0db8:11a3:09d7::1]: scheme ok 13 - scheme://[2001:0db8:11a3:09d7::1]: login ok 14 - scheme://[2001:0db8:11a3:09d7::1]: password ok 15 - scheme://[2001:0db8:11a3:09d7::1]: host ok 16 - scheme://[2001:0db8:11a3:09d7::1]: service ok 17 - scheme://[2001:0db8:11a3:09d7::1]: path ok 18 - scheme://[2001:0db8:11a3:09d7::1]: query ok 19 - scheme://[2001:0db8:11a3:09d7::1]: fragment ok 41 - subtests 1..19 ok 1 - [::ffff:11.2.3.4]: parse ok 2 - [::ffff:11.2.3.4]: scheme ok 3 - [::ffff:11.2.3.4]: login ok 4 - [::ffff:11.2.3.4]: password ok 5 - [::ffff:11.2.3.4]: host ok 6 - [::ffff:11.2.3.4]: service ok 7 - [::ffff:11.2.3.4]: path ok 8 - [::ffff:11.2.3.4]: query ok 9 - [::ffff:11.2.3.4]: fragment ok 10 - [::ffff:11.2.3.4]: host_hint ok 11 - [::ffff:11.2.3.4]: parse ok 12 - [::ffff:11.2.3.4]: scheme ok 13 - [::ffff:11.2.3.4]: login ok 14 - [::ffff:11.2.3.4]: password ok 15 - [::ffff:11.2.3.4]: host ok 16 - [::ffff:11.2.3.4]: service ok 17 - [::ffff:11.2.3.4]: path ok 18 - [::ffff:11.2.3.4]: query ok 19 - [::ffff:11.2.3.4]: fragment ok 42 - subtests 1..19 ok 1 - scheme://login:password@[::ffff:11.2.3.4]:3313: parse ok 2 - scheme://login:password@[::ffff:11.2.3.4]:3313: scheme ok 3 - scheme://login:password@[::ffff:11.2.3.4]:3313: login ok 4 - scheme://login:password@[::ffff:11.2.3.4]:3313: password ok 5 - scheme://login:password@[::ffff:11.2.3.4]:3313: host ok 6 - scheme://login:password@[::ffff:11.2.3.4]:3313: service ok 7 - scheme://login:password@[::ffff:11.2.3.4]:3313: path ok 8 - scheme://login:password@[::ffff:11.2.3.4]:3313: query ok 9 - scheme://login:password@[::ffff:11.2.3.4]:3313: fragment ok 10 - scheme://login:password@[::ffff:11.2.3.4]:3313: host_hint ok 11 - scheme://login:password@[::ffff:11.2.3.4]:3313: parse ok 12 - scheme://login:password@[::ffff:11.2.3.4]:3313: scheme ok 13 - scheme://login:password@[::ffff:11.2.3.4]:3313: login ok 14 - scheme://login:password@[::ffff:11.2.3.4]:3313: password ok 15 - scheme://login:password@[::ffff:11.2.3.4]:3313: host ok 16 - scheme://login:password@[::ffff:11.2.3.4]:3313: service ok 17 - scheme://login:password@[::ffff:11.2.3.4]:3313: path ok 18 - scheme://login:password@[::ffff:11.2.3.4]:3313: query ok 19 - scheme://login:password@[::ffff:11.2.3.4]:3313: fragment ok 43 - subtests 1..19 ok 1 - 1: parse ok 2 - 1: scheme ok 3 - 1: login ok 4 - 1: password ok 5 - 1: host ok 6 - 1: service ok 7 - 1: path ok 8 - 1: query ok 9 - 1: fragment ok 10 - 1: host_hint ok 11 - 1: parse ok 12 - 1: scheme ok 13 - 1: login ok 14 - 1: password ok 15 - 1: host ok 16 - 1: service ok 17 - 1: path ok 18 - 1: query ok 19 - 1: fragment ok 44 - subtests 1..19 ok 1 - 10: parse ok 2 - 10: scheme ok 3 - 10: login ok 4 - 10: password ok 5 - 10: host ok 6 - 10: service ok 7 - 10: path ok 8 - 10: query ok 9 - 10: fragment ok 10 - 10: host_hint ok 11 - 10: parse ok 12 - 10: scheme ok 13 - 10: login ok 14 - 10: password ok 15 - 10: host ok 16 - 10: service ok 17 - 10: path ok 18 - 10: query ok 19 - 10: fragment ok 45 - subtests 1..19 ok 1 - 331: parse ok 2 - 331: scheme ok 3 - 331: login ok 4 - 331: password ok 5 - 331: host ok 6 - 331: service ok 7 - 331: path ok 8 - 331: query ok 9 - 331: fragment ok 10 - 331: host_hint ok 11 - 331: parse ok 12 - 331: scheme ok 13 - 331: login ok 14 - 331: password ok 15 - 331: host ok 16 - 331: service ok 17 - 331: path ok 18 - 331: query ok 19 - 331: fragment ok 46 - subtests 1..19 ok 1 - 3313: parse ok 2 - 3313: scheme ok 3 - 3313: login ok 4 - 3313: password ok 5 - 3313: host ok 6 - 3313: service ok 7 - 3313: path ok 8 - 3313: query ok 9 - 3313: fragment ok 10 - 3313: host_hint ok 11 - 3313: parse ok 12 - 3313: scheme ok 13 - 3313: login ok 14 - 3313: password ok 15 - 3313: host ok 16 - 3313: service ok 17 - 3313: path ok 18 - 3313: query ok 19 - 3313: fragment ok 47 - subtests 1..19 ok 1 - /: parse ok 2 - /: scheme ok 3 - /: login ok 4 - /: password ok 5 - /: host ok 6 - /: service ok 7 - /: path ok 8 - /: query ok 9 - /: fragment ok 10 - /: host_hint ok 11 - /: parse ok 12 - /: scheme ok 13 - /: login ok 14 - /: password ok 15 - /: host ok 16 - /: service ok 17 - /: path ok 18 - /: query ok 19 - /: fragment ok 48 - subtests 1..19 ok 1 - /path1/path2/path3: parse ok 2 - /path1/path2/path3: scheme ok 3 - /path1/path2/path3: login ok 4 - /path1/path2/path3: password ok 5 - /path1/path2/path3: host ok 6 - /path1/path2/path3: service ok 7 - /path1/path2/path3: path ok 8 - /path1/path2/path3: query ok 9 - /path1/path2/path3: fragment ok 10 - /path1/path2/path3: host_hint ok 11 - /path1/path2/path3: parse ok 12 - /path1/path2/path3: scheme ok 13 - /path1/path2/path3: login ok 14 - /path1/path2/path3: password ok 15 - /path1/path2/path3: host ok 16 - /path1/path2/path3: service ok 17 - /path1/path2/path3: path ok 18 - /path1/path2/path3: query ok 19 - /path1/path2/path3: fragment ok 49 - subtests 1..19 ok 1 - login:password@/path1/path2/path3: parse ok 2 - login:password@/path1/path2/path3: scheme ok 3 - login:password@/path1/path2/path3: login ok 4 - login:password@/path1/path2/path3: password ok 5 - login:password@/path1/path2/path3: host ok 6 - login:password@/path1/path2/path3: service ok 7 - login:password@/path1/path2/path3: path ok 8 - login:password@/path1/path2/path3: query ok 9 - login:password@/path1/path2/path3: fragment ok 10 - login:password@/path1/path2/path3: host_hint ok 11 - login:password@/path1/path2/path3: parse ok 12 - login:password@/path1/path2/path3: scheme ok 13 - login:password@/path1/path2/path3: login ok 14 - login:password@/path1/path2/path3: password ok 15 - login:password@/path1/path2/path3: host ok 16 - login:password@/path1/path2/path3: service ok 17 - login:password@/path1/path2/path3: path ok 18 - login:password@/path1/path2/path3: query ok 19 - login:password@/path1/path2/path3: fragment ok 50 - subtests 1..19 ok 1 - unix/:/path1/path2/path3: parse ok 2 - unix/:/path1/path2/path3: scheme ok 3 - unix/:/path1/path2/path3: login ok 4 - unix/:/path1/path2/path3: password ok 5 - unix/:/path1/path2/path3: host ok 6 - unix/:/path1/path2/path3: service ok 7 - unix/:/path1/path2/path3: path ok 8 - unix/:/path1/path2/path3: query ok 9 - unix/:/path1/path2/path3: fragment ok 10 - unix/:/path1/path2/path3: host_hint ok 11 - unix/:/path1/path2/path3: parse ok 12 - unix/:/path1/path2/path3: scheme ok 13 - unix/:/path1/path2/path3: login ok 14 - unix/:/path1/path2/path3: password ok 15 - unix/:/path1/path2/path3: host ok 16 - unix/:/path1/path2/path3: service ok 17 - unix/:/path1/path2/path3: path ok 18 - unix/:/path1/path2/path3: query ok 19 - unix/:/path1/path2/path3: fragment ok 51 - subtests 1..19 ok 1 - unix/:/path1/path2/path3:: parse ok 2 - unix/:/path1/path2/path3:: scheme ok 3 - unix/:/path1/path2/path3:: login ok 4 - unix/:/path1/path2/path3:: password ok 5 - unix/:/path1/path2/path3:: host ok 6 - unix/:/path1/path2/path3:: service ok 7 - unix/:/path1/path2/path3:: path ok 8 - unix/:/path1/path2/path3:: query ok 9 - unix/:/path1/path2/path3:: fragment ok 10 - unix/:/path1/path2/path3:: host_hint ok 11 - unix/:/path1/path2/path3:: parse ok 12 - unix/:/path1/path2/path3:: scheme ok 13 - unix/:/path1/path2/path3:: login ok 14 - unix/:/path1/path2/path3:: password ok 15 - unix/:/path1/path2/path3:: host ok 16 - unix/:/path1/path2/path3:: service ok 17 - unix/:/path1/path2/path3:: path ok 18 - unix/:/path1/path2/path3:: query ok 19 - unix/:/path1/path2/path3:: fragment ok 52 - subtests 1..19 ok 1 - unix/:/path1/path2/path3:/: parse ok 2 - unix/:/path1/path2/path3:/: scheme ok 3 - unix/:/path1/path2/path3:/: login ok 4 - unix/:/path1/path2/path3:/: password ok 5 - unix/:/path1/path2/path3:/: host ok 6 - unix/:/path1/path2/path3:/: service ok 7 - unix/:/path1/path2/path3:/: path ok 8 - unix/:/path1/path2/path3:/: query ok 9 - unix/:/path1/path2/path3:/: fragment ok 10 - unix/:/path1/path2/path3:/: host_hint ok 11 - unix/:/path1/path2/path3:/: parse ok 12 - unix/:/path1/path2/path3:/: scheme ok 13 - unix/:/path1/path2/path3:/: login ok 14 - unix/:/path1/path2/path3:/: password ok 15 - unix/:/path1/path2/path3:/: host ok 16 - unix/:/path1/path2/path3:/: service ok 17 - unix/:/path1/path2/path3:/: path ok 18 - unix/:/path1/path2/path3:/: query ok 19 - unix/:/path1/path2/path3:/: fragment ok 53 - subtests 1..19 ok 1 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 2 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 3 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 4 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 5 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 6 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 7 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 8 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 9 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 10 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: host_hint ok 11 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: parse ok 12 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: scheme ok 13 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: login ok 14 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: password ok 15 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: host ok 16 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: service ok 17 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: path ok 18 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: query ok 19 - unix/:/path1/path2/path3?q1=v1&q2=v2#fragment: fragment ok 54 - subtests 1..19 ok 1 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: parse ok 2 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: scheme ok 3 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: login ok 4 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: password ok 5 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: host ok 6 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: service ok 7 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: path ok 8 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: query ok 9 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: fragment ok 10 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: host_hint ok 11 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: parse ok 12 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: scheme ok 13 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: login ok 14 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: password ok 15 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: host ok 16 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: service ok 17 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: path ok 18 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: query ok 19 - unix/:/path1/path2/path3:/p1/p2?q1=v1&q2=v2#fragment: fragment ok 55 - subtests 1..19 ok 1 - login:password@unix/:/path1/path2/path3: parse ok 2 - login:password@unix/:/path1/path2/path3: scheme ok 3 - login:password@unix/:/path1/path2/path3: login ok 4 - login:password@unix/:/path1/path2/path3: password ok 5 - login:password@unix/:/path1/path2/path3: host ok 6 - login:password@unix/:/path1/path2/path3: service ok 7 - login:password@unix/:/path1/path2/path3: path ok 8 - login:password@unix/:/path1/path2/path3: query ok 9 - login:password@unix/:/path1/path2/path3: fragment ok 10 - login:password@unix/:/path1/path2/path3: host_hint ok 11 - login:password@unix/:/path1/path2/path3: parse ok 12 - login:password@unix/:/path1/path2/path3: scheme ok 13 - login:password@unix/:/path1/path2/path3: login ok 14 - login:password@unix/:/path1/path2/path3: password ok 15 - login:password@unix/:/path1/path2/path3: host ok 16 - login:password@unix/:/path1/path2/path3: service ok 17 - login:password@unix/:/path1/path2/path3: path ok 18 - login:password@unix/:/path1/path2/path3: query ok 19 - login:password@unix/:/path1/path2/path3: fragment ok 56 - subtests 1..19 ok 1 - login:password@unix/:/path1/path2/path3:: parse ok 2 - login:password@unix/:/path1/path2/path3:: scheme ok 3 - login:password@unix/:/path1/path2/path3:: login ok 4 - login:password@unix/:/path1/path2/path3:: password ok 5 - login:password@unix/:/path1/path2/path3:: host ok 6 - login:password@unix/:/path1/path2/path3:: service ok 7 - login:password@unix/:/path1/path2/path3:: path ok 8 - login:password@unix/:/path1/path2/path3:: query ok 9 - login:password@unix/:/path1/path2/path3:: fragment ok 10 - login:password@unix/:/path1/path2/path3:: host_hint ok 11 - login:password@unix/:/path1/path2/path3:: parse ok 12 - login:password@unix/:/path1/path2/path3:: scheme ok 13 - login:password@unix/:/path1/path2/path3:: login ok 14 - login:password@unix/:/path1/path2/path3:: password ok 15 - login:password@unix/:/path1/path2/path3:: host ok 16 - login:password@unix/:/path1/path2/path3:: service ok 17 - login:password@unix/:/path1/path2/path3:: path ok 18 - login:password@unix/:/path1/path2/path3:: query ok 19 - login:password@unix/:/path1/path2/path3:: fragment ok 57 - subtests 1..19 ok 1 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: parse ok 2 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: scheme ok 3 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: login ok 4 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: password ok 5 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: host ok 6 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: service ok 7 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: path ok 8 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: query ok 9 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: fragment ok 10 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: host_hint ok 11 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: parse ok 12 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: scheme ok 13 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: login ok 14 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: password ok 15 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: host ok 16 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: service ok 17 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: path ok 18 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: query ok 19 - scheme://login:password@unix/:/tmp/unix.sock:/path1/path2/path3: fragment ok 58 - subtests 1..19 ok 1 - unix/:./relative/path.sock:/test: parse ok 2 - unix/:./relative/path.sock:/test: scheme ok 3 - unix/:./relative/path.sock:/test: login ok 4 - unix/:./relative/path.sock:/test: password ok 5 - unix/:./relative/path.sock:/test: host ok 6 - unix/:./relative/path.sock:/test: service ok 7 - unix/:./relative/path.sock:/test: path ok 8 - unix/:./relative/path.sock:/test: query ok 9 - unix/:./relative/path.sock:/test: fragment ok 10 - unix/:./relative/path.sock:/test: host_hint ok 11 - unix/:./relative/path.sock:/test: parse ok 12 - unix/:./relative/path.sock:/test: scheme ok 13 - unix/:./relative/path.sock:/test: login ok 14 - unix/:./relative/path.sock:/test: password ok 15 - unix/:./relative/path.sock:/test: host ok 16 - unix/:./relative/path.sock:/test: service ok 17 - unix/:./relative/path.sock:/test: path ok 18 - unix/:./relative/path.sock:/test: query ok 19 - unix/:./relative/path.sock:/test: fragment ok 59 - subtests 1..19 ok 1 - scheme://unix/:./relative/path.sock:/test: parse ok 2 - scheme://unix/:./relative/path.sock:/test: scheme ok 3 - scheme://unix/:./relative/path.sock:/test: login ok 4 - scheme://unix/:./relative/path.sock:/test: password ok 5 - scheme://unix/:./relative/path.sock:/test: host ok 6 - scheme://unix/:./relative/path.sock:/test: service ok 7 - scheme://unix/:./relative/path.sock:/test: path ok 8 - scheme://unix/:./relative/path.sock:/test: query ok 9 - scheme://unix/:./relative/path.sock:/test: fragment ok 10 - scheme://unix/:./relative/path.sock:/test: host_hint ok 11 - scheme://unix/:./relative/path.sock:/test: parse ok 12 - scheme://unix/:./relative/path.sock:/test: scheme ok 13 - scheme://unix/:./relative/path.sock:/test: login ok 14 - scheme://unix/:./relative/path.sock:/test: password ok 15 - scheme://unix/:./relative/path.sock:/test: host ok 16 - scheme://unix/:./relative/path.sock:/test: service ok 17 - scheme://unix/:./relative/path.sock:/test: path ok 18 - scheme://unix/:./relative/path.sock:/test: query ok 19 - scheme://unix/:./relative/path.sock:/test: fragment ok 60 - subtests 1..19 ok 1 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: parse ok 2 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: scheme ok 3 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: login ok 4 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: password ok 5 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: host ok 6 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: service ok 7 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: path ok 8 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: query ok 9 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: fragment ok 10 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: host_hint ok 11 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: parse ok 12 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: scheme ok 13 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: login ok 14 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: password ok 15 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: host ok 16 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: service ok 17 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: path ok 18 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: query ok 19 - http://tarantool.org/dist/master/debian/pool/main/t/tarantool/tarantool_1.6.3+314+g91066ee+20140910+1434.orig.tar.gz: fragment ok 61 - subtests 1..19 ok 1 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: parse ok 2 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: scheme ok 3 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: login ok 4 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: password ok 5 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: host ok 6 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: service ok 7 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: path ok 8 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: query ok 9 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: fragment ok 10 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: host_hint ok 11 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: parse ok 12 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: scheme ok 13 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: login ok 14 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: password ok 15 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: host ok 16 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: service ok 17 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: path ok 18 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: query ok 19 - https://www.google.com/search?safe=off&site=&tbm=isch&source=hp&biw=1918&bih=1109&q=Tarantool&oq=Tarantool&gs_l=img.3..0i24l3j0i10i24j0i24&gws_rd=ssl: fragment ok 62 - subtests 1..2 ok 1 - empty is invalid ok 2 - :// is invalid ok 63 - subtests tarantool_1.9.1.26.g63eb81e3c/test/unit/histogram.result0000664000000000000000000000023213306560010021370 0ustar rootroot *** test_counts *** *** test_counts: done *** *** test_discard *** *** test_discard: done *** *** test_percentile *** *** test_percentile: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/vy_cache.result0000664000000000000000000000037113306560010021160 0ustar rootroot *** test_basic *** 1..6 ok 1 - cache is filled with 6 statements ok 2 - cache is filled with 12 statements ok 3 - cache is filled with 18 statements ok 4 - next_key * 4 ok 5 - restore ok 6 - restore on position after last *** test_basic: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/bps_tree.result0000664000000000000000000001553113306560010021206 0ustar rootroot *** simple_check *** Insert 1..X, remove 1..X Insert 1..X, remove X..1 Insert X..1, remove 1..X Insert X..1, remove X..1 *** simple_check: done *** *** compare_with_sptree_check *** *** compare_with_sptree_check: done *** *** compare_with_sptree_check_branches *** *** compare_with_sptree_check_branches: done *** *** bps_tree_debug_self_check *** *** bps_tree_debug_self_check: done *** *** loading_test *** *** loading_test: done *** *** printing_test *** Inserting 22 [(1) 22] Inserting 21 [(2) 21 22] Inserting 23 [(3) 21 22 23] Inserting 20 [(4) 20 21 22 23] Inserting 24 [(5) 20 21 22 23 24] Inserting 19 [(6) 19 20 21 22 23 24] Inserting 25 [(7) 19 20 21 22 23 24 25] Inserting 18 [(8) 18 19 20 21 22 23 24 25] Inserting 26 [(9) 18 19 20 21 22 23 24 25 26] Inserting 17 [(10) 17 18 19 20 21 22 23 24 25 26] Inserting 27 [(11) 17 18 19 20 21 22 23 24 25 26 27] Inserting 16 [(12) 16 17 18 19 20 21 22 23 24 25 26 27] Inserting 28 [(13) 16 17 18 19 20 21 22 23 24 25 26 27 28] Inserting 15 [(14) 15 16 17 18 19 20 21 22 23 24 25 26 27 28] Inserting 29 [(8) 15 16 17 18 19 20 21 22] 22 [(7) 23 24 25 26 27 28 29] Inserting 14 [(9) 14 15 16 17 18 19 20 21 22] 22 [(7) 23 24 25 26 27 28 29] Inserting 30 [(9) 14 15 16 17 18 19 20 21 22] 22 [(8) 23 24 25 26 27 28 29 30] Inserting 13 [(10) 13 14 15 16 17 18 19 20 21 22] 22 [(8) 23 24 25 26 27 28 29 30] Inserting 31 [(10) 13 14 15 16 17 18 19 20 21 22] 22 [(9) 23 24 25 26 27 28 29 30 31] Inserting 12 [(11) 12 13 14 15 16 17 18 19 20 21 22] 22 [(9) 23 24 25 26 27 28 29 30 31] Inserting 32 [(11) 12 13 14 15 16 17 18 19 20 21 22] 22 [(10) 23 24 25 26 27 28 29 30 31 32] Inserting 11 [(12) 11 12 13 14 15 16 17 18 19 20 21 22] 22 [(10) 23 24 25 26 27 28 29 30 31 32] Inserting 33 [(12) 11 12 13 14 15 16 17 18 19 20 21 22] 22 [(11) 23 24 25 26 27 28 29 30 31 32 33] Inserting 10 [(13) 10 11 12 13 14 15 16 17 18 19 20 21 22] 22 [(11) 23 24 25 26 27 28 29 30 31 32 33] Inserting 34 [(13) 10 11 12 13 14 15 16 17 18 19 20 21 22] 22 [(12) 23 24 25 26 27 28 29 30 31 32 33 34] Inserting 9 [(14) 9 10 11 12 13 14 15 16 17 18 19 20 21 22] 22 [(12) 23 24 25 26 27 28 29 30 31 32 33 34] Inserting 35 [(14) 9 10 11 12 13 14 15 16 17 18 19 20 21 22] 22 [(13) 23 24 25 26 27 28 29 30 31 32 33 34 35] Inserting 8 [(14) 8 9 10 11 12 13 14 15 16 17 18 19 20 21] 21 [(14) 22 23 24 25 26 27 28 29 30 31 32 33 34 35] Inserting 36 [(10) 8 9 10 11 12 13 14 15 16 17] 17 [(10) 18 19 20 21 22 23 24 25 26 27] 27 [(9) 28 29 30 31 32 33 34 35 36] Inserting 7 [(11) 7 8 9 10 11 12 13 14 15 16 17] 17 [(10) 18 19 20 21 22 23 24 25 26 27] 27 [(9) 28 29 30 31 32 33 34 35 36] Inserting 37 [(11) 7 8 9 10 11 12 13 14 15 16 17] 17 [(10) 18 19 20 21 22 23 24 25 26 27] 27 [(10) 28 29 30 31 32 33 34 35 36 37] Inserting 6 [(12) 6 7 8 9 10 11 12 13 14 15 16 17] 17 [(10) 18 19 20 21 22 23 24 25 26 27] 27 [(10) 28 29 30 31 32 33 34 35 36 37] Inserting 38 [(12) 6 7 8 9 10 11 12 13 14 15 16 17] 17 [(10) 18 19 20 21 22 23 24 25 26 27] 27 [(11) 28 29 30 31 32 33 34 35 36 37 38] Inserting 5 [(13) 5 6 7 8 9 10 11 12 13 14 15 16 17] 17 [(10) 18 19 20 21 22 23 24 25 26 27] 27 [(11) 28 29 30 31 32 33 34 35 36 37 38] Inserting 39 [(13) 5 6 7 8 9 10 11 12 13 14 15 16 17] 17 [(10) 18 19 20 21 22 23 24 25 26 27] 27 [(12) 28 29 30 31 32 33 34 35 36 37 38 39] Inserting 4 [(14) 4 5 6 7 8 9 10 11 12 13 14 15 16 17] 17 [(10) 18 19 20 21 22 23 24 25 26 27] 27 [(12) 28 29 30 31 32 33 34 35 36 37 38 39] Inserting 40 [(14) 4 5 6 7 8 9 10 11 12 13 14 15 16 17] 17 [(10) 18 19 20 21 22 23 24 25 26 27] 27 [(13) 28 29 30 31 32 33 34 35 36 37 38 39 40] Inserting 3 [(12) 3 4 5 6 7 8 9 10 11 12 13 14] 14 [(13) 15 16 17 18 19 20 21 22 23 24 25 26 27] 27 [(13) 28 29 30 31 32 33 34 35 36 37 38 39 40] Inserting 41 [(12) 3 4 5 6 7 8 9 10 11 12 13 14] 14 [(13) 15 16 17 18 19 20 21 22 23 24 25 26 27] 27 [(14) 28 29 30 31 32 33 34 35 36 37 38 39 40 41] Inserting 2 [(13) 2 3 4 5 6 7 8 9 10 11 12 13 14] 14 [(13) 15 16 17 18 19 20 21 22 23 24 25 26 27] 27 [(14) 28 29 30 31 32 33 34 35 36 37 38 39 40 41] Inserting 42 [(13) 2 3 4 5 6 7 8 9 10 11 12 13 14] 14 [(14) 15 16 17 18 19 20 21 22 23 24 25 26 27 28] 28 [(14) 29 30 31 32 33 34 35 36 37 38 39 40 41 42] Inserting 1 [(14) 1 2 3 4 5 6 7 8 9 10 11 12 13 14] 14 [(14) 15 16 17 18 19 20 21 22 23 24 25 26 27 28] 28 [(14) 29 30 31 32 33 34 35 36 37 38 39 40 41 42] Inserting 43 [(11) 1 2 3 4 5 6 7 8 9 10 11] 11 [(11) 12 13 14 15 16 17 18 19 20 21 22] 22 [(11) 23 24 25 26 27 28 29 30 31 32 33] 33 [(10) 34 35 36 37 38 39 40 41 42 43] Inserting 0 [(12) 0 1 2 3 4 5 6 7 8 9 10 11] 11 [(11) 12 13 14 15 16 17 18 19 20 21 22] 22 [(11) 23 24 25 26 27 28 29 30 31 32 33] 33 [(10) 34 35 36 37 38 39 40 41 42 43] *** printing_test: done *** *** white_box_test *** full leaf: [(14) 0 1 2 3 4 5 6 7 8 9 10 11 12 13] split now: [(8) 0 1 2 3 4 5 6 7] 7 [(7) 8 9 10 11 12 13 14] full 2 leafs: [(14) 0 1 2 3 4 5 6 7 8 9 10 11 12 13] 13 [(14) 14 15 16 17 18 19 20 21 22 23 24 25 26 27] split now: [(10) 0 1 2 3 4 5 6 7 8 9] 9 [(10) 10 11 12 13 14 15 16 17 18 19] 19 [(9) 20 21 22 23 24 25 26 27 28] full 3 leafs: [(14) 0 1 2 3 4 5 6 7 8 9 10 11 12 13] 13 [(14) 14 15 16 17 18 19 20 21 22 23 24 25 26 27] 27 [(14) 28 29 30 31 32 33 34 35 36 37 38 39 40 41] split now: [(11) 0 1 2 3 4 5 6 7 8 9 10] 10 [(11) 11 12 13 14 15 16 17 18 19 20 21] 21 [(11) 22 23 24 25 26 27 28 29 30 31 32] 32 [(10) 33 34 35 36 37 38 39 40 41 42] full 10 leafs: [(14) 0 1 2 3 4 5 6 7 8 9 10 11 12 13] 13 [(14) 14 15 16 17 18 19 20 21 22 23 24 25 26 27] 27 [(14) 28 29 30 31 32 33 34 35 36 37 38 39 40 41] 41 [(14) 42 43 44 45 46 47 48 49 50 51 52 53 54 55] 55 [(14) 56 57 58 59 60 61 62 63 64 65 66 67 68 69] 69 [(14) 70 71 72 73 74 75 76 77 78 79 80 81 82 83] 83 [(14) 84 85 86 87 88 89 90 91 92 93 94 95 96 97] 97 [(14) 98 99 100 101 102 103 104 105 106 107 108 109 110 111] 111 [(14) 112 113 114 115 116 117 118 119 120 121 122 123 124 125] 125 [(14) 126 127 128 129 130 131 132 133 134 135 136 137 138 139] 2-level split now: [(14) 0 1 2 3 4 5 6 7 8 9 10 11 12 13] 13 [(14) 14 15 16 17 18 19 20 21 22 23 24 25 26 27] 27 [(14) 28 29 30 31 32 33 34 35 36 37 38 39 40 41] 41 [(14) 42 43 44 45 46 47 48 49 50 51 52 53 54 55] 55 [(14) 56 57 58 59 60 61 62 63 64 65 66 67 68 69] 69 [(14) 70 71 72 73 74 75 76 77 78 79 80 81 82 83] 83 [(14) 84 85 86 87 88 89 90 91 92 93 94 95 96 97] 97 [(11) 98 99 100 101 102 103 104 105 106 107 108] 108 [(11) 109 110 111 112 113 114 115 116 117 118 119] 119 [(11) 120 121 122 123 124 125 126 127 128 129 130] 130 [(10) 131 132 133 134 135 136 137 138 139 140] *** white_box_test: done *** *** approximate_count *** Count: 10575 10575 Count: 10575 Error count: 0 Count: 10575 *** approximate_count: done *** *** insert_get_iterator *** *** insert_get_iterator: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/csv.result0000664000000000000000000000355413306560010020200 0ustar rootroot *** test1 *** |1| || |1| |2| |3| |123| *** test1: done *** *** test2 *** |123| |456| |abcac| |'multiword field 4'| |none| |none| |0| || || || || || || *** test2: done *** *** test3 *** |1| || |2| *** test3: done *** *** test4 *** |123| |5| |92| |0| |0| |1| |12 34| |56| |quote , | |66| |ok| *** test4: done *** *** test5 *** |abc| |longlonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglong| |0| |123| |456| || |0| || || valid: yes *** test5: done *** *** test6 *** || || |abc| |c"| |d| |de| || |k| |e| || || | | valid: NO *** test6: done *** *** big_chunk_separated_test *** line_cnt=10000, fieldsizes_cnt=1920000, 1920000 *** big_chunk_separated_test: done *** *** random_generated_test *** line_cnt=40, fieldsizes_cnt=183 valid: yes *** random_generated_test: done *** *** common_test *** |first| |last| |address| |city| |zip| |John| |Doe| |120 any st.| |Anytown, WW| |08123| *** common_test: done *** *** common_test *** |a| |b| |c| |1| |"| |"| |2| |3| |4| *** common_test: done *** *** common_test *** |a| |b| |1| |ha "ha" ha| |3| |4| *** common_test: done *** *** common_test *** |key| |val| |1| |{"type": "Point", "coordinates": [102.0, 0.5]}| *** common_test: done *** *** common_test *** |a| |b| |c| |1| |2| |3| |Once upon a time| |5| |6| |7| |8| |9| *** common_test: done *** *** common_test *** |a| |b| |1| |ha "ha" ha| |3| |4| *** common_test: done *** *** common_test *** |a| |b| |c| |1| |2| |3| |4| |5| |а нет ли ошибок?| *** common_test: done *** *** common_test *** |www| |aaa| |tt | *** common_test: done *** *** iter_test1 *** || |d| |e| |12| |42| |3| |o| *** iter_test1: done *** *** iter_test2 *** |1| |23| *** iter_test2: done *** *** iter_test3 *** |1| |2| |3| |4| |5| |6| *** iter_test3: done *** *** csv_out *** abc,"with,comma",""in quotes"",1 "" quote *** csv_out: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/rtree.result0000664000000000000000000000031213306560010020513 0ustar rootroot *** simple_check *** Insert 1..X, remove 1..X Insert 1..X, remove X..1 Insert X..1, remove 1..X Insert X..1, remove X..1 *** simple_check: done *** *** neighbor_test *** *** neighbor_test: done *** tarantool_1.9.1.26.g63eb81e3c/test/unit/rope_basic.c0000664000000000000000000000357713306560010020424 0ustar rootroot#include "salad/rope.h" #include "unit.h" #include "rope_common.h" /******************************************************************/ static void test_empty_rope() { header(); struct rope *rope = test_rope_new(); fail_unless(rope_size(rope) == 0); struct rope_iter *iter = rope_iter_new(rope); fail_unless(rope_iter_start(iter) == NULL); fail_unless(rope_iter_start(iter) == NULL); rope_traverse(rope, str_print); rope_check(rope); rope_pretty_print(rope, str_print); /* rope_erase(), rope_extract() expect a non-empty rope */ rope_iter_delete(iter); rope_delete(rope); footer(); } static void test_prepend() { header(); struct rope *rope = test_rope_new(); test_rope_insert(rope, 0, " c "); test_rope_insert(rope, 0, " b "); test_rope_insert(rope, 0, " a "); rope_delete(rope); footer(); } static void test_append() { header(); struct rope *rope = test_rope_new(); test_rope_insert(rope, rope_size(rope), " a "); test_rope_insert(rope, rope_size(rope), " b "); test_rope_insert(rope, rope_size(rope), " c "); rope_delete(rope); footer(); } static void test_insert() { header(); struct rope *rope = test_rope_new(); test_rope_insert(rope, rope_size(rope), " a "); test_rope_insert(rope, rope_size(rope) - 1, "b "); test_rope_insert(rope, rope_size(rope) - 2, "c "); test_rope_insert(rope, 1, " "); test_rope_insert(rope, rope_size(rope) - 1, " "); test_rope_insert(rope, 4, "*"); test_rope_insert(rope, 8, "*"); rope_delete(rope); footer(); } static void test_erase() { header(); struct rope *rope = test_rope_new(); rope_insert(rope, rope_size(rope), "a", 1); test_rope_erase(rope, 0); rope_insert(rope, rope_size(rope), "a", 1); rope_insert(rope, rope_size(rope), "b", 1); test_rope_erase(rope, 0); rope_delete(rope); footer(); } int main() { test_empty_rope(); test_append(); test_prepend(); test_insert(); test_erase(); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/unit/guard.result0000664000000000000000000000003013306560010020471 0ustar rootroot# signal handler called tarantool_1.9.1.26.g63eb81e3c/test/unit/unit.h0000664000000000000000000000776713306560010017307 0ustar rootroot#ifndef INCLUDES_TARANTOOL_TEST_UNIT_H #define INCLUDES_TARANTOOL_TEST_UNIT_H /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include /* exit() */ #include #define header() printf("\t*** %s ***\n", __func__) #define footer() printf("\t*** %s: done ***\n", __func__) #define fail(expr, result) do { \ fprintf(stderr, "Test failed: %s is %s at %s:%d, in function '%s'\n",\ expr, result, __FILE__, __LINE__, __func__); \ exit(-1); \ } while (0) #define fail_if(expr) if (expr) fail(#expr, "true") #define fail_unless(expr) if (!(expr)) fail(#expr, "false") #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** @brief example @code #include "unit.h" int main(void) { plan(3); // count of test You planned to check ok(1, "Test name 1"); is(4, 2 * 2, "2 * 2 == 4"); isnt(5, 2 * 2, "2 * 2 != 5); return check_plan(); // print resume } @endcode */ /* private function, use ok(...) instead */ int _ok(int condition, const char *fmt, ...); /* private function, use note(...) or diag(...) instead */ void _space(FILE *stream); #define msg(stream, ...) ({ _space(stream); fprintf(stream, "# "); \ fprintf(stream, __VA_ARGS__); fprintf(stream, "\n"); }) #define note(...) msg(stdout, __VA_ARGS__) #define diag(...) msg(stderr, __VA_ARGS__) /** @brief set and print plan @param count Before anything else, you need a testing plan. This basically declares how many tests your program is going to run to protect against premature failure. */ void plan(int count); /** @brief check if plan is reached and print report */ int check_plan(void); #define ok(condition, fmt, args...) { \ int res = _ok(condition, fmt, ##args); \ if (!res) { \ _space(stderr); \ fprintf(stderr, "# Failed test '"); \ fprintf(stderr, fmt, ##args); \ fprintf(stderr, "'\n"); \ _space(stderr); \ fprintf(stderr, "# in %s at line %d\n", __FILE__, __LINE__); \ } \ } #define is(a, b, fmt, args...) { \ int res = _ok((a) == (b), fmt, ##args); \ if (!res) { \ _space(stderr); \ fprintf(stderr, "# Failed test '"); \ fprintf(stderr, fmt, ##args); \ fprintf(stderr, "'\n"); \ _space(stderr); \ fprintf(stderr, "# in %s at line %d\n", __FILE__, __LINE__); \ } \ } #define isnt(a, b, fmt, args...) { \ int res = _ok((a) != (b), fmt, ##args); \ if (!res) { \ _space(stderr); \ fprintf(stderr, "# Failed test '"); \ fprintf(stderr, fmt, ##args); \ fprintf(stderr, "'\n"); \ _space(stderr); \ fprintf(stderr, "# in %s at line %d\n", __FILE__, __LINE__); \ } \ } #if defined(__cplusplus) } #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_TEST_UNIT_H */ tarantool_1.9.1.26.g63eb81e3c/test/box-py/0000775000000000000000000000000013306565107016411 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/box-py/suite.ini0000664000000000000000000000024013306560010020223 0ustar rootroot[default] core = tarantool description = legacy python tests script = box.lua lua_libs = lua/fiber.lua lua/fifo.lua use_unix_sockets = True is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/box-py/iproto.result0000664000000000000000000000672213306560010021160 0ustar rootrootbox.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... # # iproto packages test # # Test bug #899343 (server assertion failure on incorrect packet) # send the package with invalid length 12 # check that is server alive True # Test gh-206 "Segfault if sending IPROTO package without `KEY` field" IPROTO_SELECT query {'IPROTO_CODE': 1} {'IPROTO_SPACE_ID': 280} True IPROTO_DELETE query {'IPROTO_CODE': 5} {'IPROTO_SPACE_ID': 280} True IPROTO_UPDATE query {'IPROTO_CODE': 4} {'IPROTO_SPACE_ID': 280} True query {'IPROTO_CODE': 4} {'IPROTO_SPACE_ID': 280, 'IPROTO_KEY': (1,)} True IPROTO_REPLACE query {'IPROTO_CODE': 3} {'IPROTO_SPACE_ID': 280} True IPROTO_CALL query {'IPROTO_CODE': 6} {} True query {'IPROTO_CODE': 6} {'IPROTO_KEY': ('procname',)} True box.cfg.wal_mode --- - write ... space = box.schema.space.create('test', { id = 567 }) --- ... index = space:create_index('primary', { type = 'hash' }) --- ... box.schema.user.grant('guest', 'read,write,execute', 'space', 'test') --- ... - [1, baobab] - [2, obbaba] - [1, baobab] - [3, occama] - [2, obbaba] - [4, ockham] - [1, baobab] - [2, obbaba] space:drop() --- ... space = box.schema.space.create('test') --- ... index = space:create_index('primary', { type = 'hash', parts = {1, 'string'}}) --- ... STR 1 -- 0xa1 => ok ok ok ok ok ok 0xd901 => ok ok ok ok ok ok 0xda0001 => ok ok ok ok ok ok 0xdb00000001 => ok ok ok ok ok ok STR 31 -- 0xbf => ok ok ok ok ok ok 0xd91f => ok ok ok ok ok ok 0xda001f => ok ok ok ok ok ok 0xdb0000001f => ok ok ok ok ok ok STR 32 -- 0xd920 => ok ok ok ok ok 0xda0020 => ok ok ok ok ok 0xdb00000020 => ok ok ok ok ok STR 255 -- 0xd9ff => ok ok ok ok ok 0xda00ff => ok ok ok ok ok 0xdb000000ff => ok ok ok ok ok STR 256 -- 0xda0100 => ok ok ok ok 0xdb00000100 => ok ok ok ok STR 65535 -- 0xdaffff => ok ok ok ok 0xdb0000ffff => ok ok ok ok STR 65536 -- 0xdb00010000 => ok ok ok Test of schema_id in iproto. Normal connect done w/o errors: True Got schema_id: True Zero-schema_id connect done w/o errors: True Same schema_id: True Normal connect done w/o errors: True Same schema_id: True Wrong schema_id leads to error: True Same schema_id: True space2 = box.schema.create_space('test2') --- ... Schema changed -> error: True Got another schema_id: True Sync ok Sync on error is ok space:drop() --- ... space2:drop() --- ... space = box.schema.create_space('gh1280', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert({1}) --- - [1] ... space:insert({2, 'Music'}) --- - [2, 'Music'] ... space:insert({3, 'Length', 93}) --- - [3, 'Length', 93] ... space:drop() --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... # Test bugs gh-272, gh-1654 if the packet was incorrect, respond with # an error code and do not close connection sync=0, {49: 'Invalid MsgPack - packet header'} sync=1234, {49: "Missing mandatory field 'space id' in request"} sync=5678, {49: "Read access to space '_user' is denied for user 'guest'"} space = box.schema.space.create('test_index_base', { id = 568 }) --- ... index = space:create_index('primary', { type = 'hash' }) --- ... box.schema.user.grant('guest', 'read,write,execute', 'space', 'test_index_base') --- ... - [1, 0, 0, 0] - [1, 0, 1, -1] - [1, 0, 2, -2] function kek() return 'kek' end --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... Sync: 100 Retcode: [['kek']] box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box-py/snapshot.result0000664000000000000000000000172513306560010021501 0ustar rootrootspace = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... # # A test case for: http://bugs.launchpad.net/bugs/686411 # Check that 'box.snapshot()' does not overwrite a snapshot # file that already exists. Verify also that any other # error that happens when saving snapshot is propagated # to the caller. space:insert{1, 'first tuple'} --- - [1, 'first tuple'] ... box.snapshot() --- - ok ... space:insert{2, 'second tuple'} --- - [2, 'second tuple'] ... _, e = pcall(box.snapshot) --- ... e.type --- - SystemError ... e.errno --- - 17 ... space:delete{1} --- - [1, 'first tuple'] ... space:delete{2} --- - [2, 'second tuple'] ... # # A test case for http://bugs.launchpad.net/bugs/727174 # "tarantool_box crashes when saving snapshot on SIGUSR1" # # Increment the lsn number, to make sure there is no such snapshot yet # space:insert{1, 'Test tuple'} --- - [1, 'Test tuple'] ... Snapshot exists. space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box-py/args.result0000664000000000000000000000713213306565107020610 0ustar rootroottarantool --help Tarantool - a Lua application server Usage: tarantool script.lua [OPTIONS] [SCRIPT [ARGS]] All command line options are passed to the interpreted script. When no script name is provided, the server responds to: -h, --help display this help and exit -v, --version print program version and exit -e EXPR execute string 'EXPR' -l NAME require library 'NAME' -i enter interactive mode after executing 'SCRIPT' -- stop handling options - execute stdin and stop handling options Please visit project home page at http://tarantool.org to see online documentation, submit bugs or contribute a patch. tarantool -h Tarantool - a Lua application server Usage: tarantool script.lua [OPTIONS] [SCRIPT [ARGS]] All command line options are passed to the interpreted script. When no script name is provided, the server responds to: -h, --help display this help and exit -v, --version print program version and exit -e EXPR execute string 'EXPR' -l NAME require library 'NAME' -i enter interactive mode after executing 'SCRIPT' -- stop handling options - execute stdin and stop handling options Please visit project home page at http://tarantool.org to see online documentation, submit bugs or contribute a patch. tarantool -Z tarantool: invalid option tarantool --no-such-option tarantool: unrecognized option tarantool --no-such-option --version tarantool: unrecognized option tarantool --version Tarantool 1.minor.patch-- Target: platform Build options: flags Compiler: cc C_FLAGS: flags CXX_FLAGS: flags tarantool -v Tarantool 1.minor.patch-- Target: platform Build options: flags Compiler: cc C_FLAGS: flags CXX_FLAGS: flags tarantool -V Tarantool 1.minor.patch-- Target: platform Build options: flags Compiler: cc C_FLAGS: flags CXX_FLAGS: flags tarantool ${SOURCEDIR}/test/box-py/args.lua arg[-1] => tarantool arg[0] => ${SOURCEDIR}/test/box-py/args.lua tarantool ${SOURCEDIR}/test/box-py/args.lua 1 2 3 arg[-1] => tarantool arg[0] => ${SOURCEDIR}/test/box-py/args.lua arg[1] => 1 arg[2] => 2 arg[3] => 3 tarantool ${SOURCEDIR}/test/box-py/args.lua 1 2 3 -V arg[-1] => tarantool arg[0] => ${SOURCEDIR}/test/box-py/args.lua arg[1] => 1 arg[2] => 2 arg[3] => 3 arg[4] => -V tarantool ${SOURCEDIR}/test/box-py/args.lua -V 1 2 3 arg[-1] => tarantool arg[0] => ${SOURCEDIR}/test/box-py/args.lua arg[1] => -V arg[2] => 1 arg[3] => 2 arg[4] => 3 tarantool ${SOURCEDIR}/test/box-py/args.lua 1 2 3 --help arg[-1] => tarantool arg[0] => ${SOURCEDIR}/test/box-py/args.lua arg[1] => 1 arg[2] => 2 arg[3] => 3 arg[4] => --help tarantool ${SOURCEDIR}/test/box-py/args.lua --help 1 2 3 arg[-1] => tarantool arg[0] => ${SOURCEDIR}/test/box-py/args.lua arg[1] => --help arg[2] => 1 arg[3] => 2 arg[4] => 3 tarantool -V ${SOURCEDIR}/test/box-py/args.lua 1 2 3 Tarantool 1.minor.patch-- Target: platform Build options: flags Compiler: cc C_FLAGS: flags CXX_FLAGS: flags tarantool -e print('Hello') ${SOURCEDIR}/test/box-py/args.lua 1 2 3 Hello arg[-1] => tarantool arg[0] => ${SOURCEDIR}/test/box-py/args.lua arg[1] => 1 arg[2] => 2 arg[3] => 3 tarantool -e a = 10 -e print(a) ${SOURCEDIR}/test/box-py/args.lua 1 2 3 --help 10 arg[-1] => tarantool arg[0] => ${SOURCEDIR}/test/box-py/args.lua arg[1] => 1 arg[2] => 2 arg[3] => 3 arg[4] => --help tarantool -e print(rawget(_G, 'log') == nil) -e io.flush() -l log -e print(log.info('Hello')) ${SOURCEDIR}/test/box-py/args.lua 1 2 3 --help true Hello arg[-1] => tarantool arg[0] => ${SOURCEDIR}/test/box-py/args.lua arg[1] => 1 arg[2] => 2 arg[3] => 3 arg[4] => --help tarantool_1.9.1.26.g63eb81e3c/test/box-py/args.lua0000775000000000000000000000014413306560010020036 0ustar rootroot#!/usr/bin/env tarantool for i=-1,#arg do print(string.format("arg[%d] => %s", i, arg[i])) end tarantool_1.9.1.26.g63eb81e3c/test/box-py/box.lua0000664000000000000000000000044313306560010017671 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", force_recovery = true, rows_per_wal = 10 } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/box-py/print.result0000664000000000000000000000051513306560010020772 0ustar rootrootprint("Hello, world") --- ... io = require('io') --- ... local f = require('fiber').create( function() print('Ehllo, world') io.flush() end ) --- ... require('fiber').sleep(0.01) --- ... Check log line (Hello): --- - "logfile contains "Hello"" ... Check log line (Ehllo): --- - "logfile contains "Ehllo"" ... tarantool_1.9.1.26.g63eb81e3c/test/box-py/bootstrap.result0000664000000000000000000001730613306565107021675 0ustar rootrootbox.internal.bootstrap() --- ... box.space._schema:select{} --- - - ['cluster', ''] - ['max_id', 511] - ['version', 1, 7, 7] ... box.space._cluster:select{} --- - - [1, ''] ... box.space._space:select{} --- - - [272, 1, '_schema', 'memtx', 0, {}, [{'type': 'string', 'name': 'key'}]] - [276, 1, '_collation', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, { 'name': 'name', 'type': 'string'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'type', 'type': 'string'}, {'name': 'locale', 'type': 'string'}, { 'name': 'opts', 'type': 'map'}]] - [280, 1, '_space', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'engine', 'type': 'string'}, {'name': 'field_count', 'type': 'unsigned'}, {'name': 'flags', 'type': 'map'}, {'name': 'format', 'type': 'array'}]] - [281, 1, '_vspace', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'engine', 'type': 'string'}, {'name': 'field_count', 'type': 'unsigned'}, {'name': 'flags', 'type': 'map'}, {'name': 'format', 'type': 'array'}]] - [284, 1, '_sequence', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'step', 'type': 'integer'}, {'name': 'min', 'type': 'integer'}, {'name': 'max', 'type': 'integer'}, {'name': 'start', 'type': 'integer'}, {'name': 'cache', 'type': 'integer'}, {'name': 'cycle', 'type': 'boolean'}]] - [285, 1, '_sequence_data', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'value', 'type': 'integer'}]] - [288, 1, '_index', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'iid', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'opts', 'type': 'map'}, {'name': 'parts', 'type': 'array'}]] - [289, 1, '_vindex', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'iid', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'opts', 'type': 'map'}, {'name': 'parts', 'type': 'array'}]] - [296, 1, '_func', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'setuid', 'type': 'unsigned'}]] - [297, 1, '_vfunc', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'setuid', 'type': 'unsigned'}]] - [304, 1, '_user', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'auth', 'type': 'map'}]] - [305, 1, '_vuser', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'auth', 'type': 'map'}]] - [312, 1, '_priv', 'memtx', 0, {}, [{'name': 'grantor', 'type': 'unsigned'}, { 'name': 'grantee', 'type': 'unsigned'}, {'name': 'object_type', 'type': 'string'}, {'name': 'object_id', 'type': 'unsigned'}, {'name': 'privilege', 'type': 'unsigned'}]] - [313, 1, '_vpriv', 'sysview', 0, {}, [{'name': 'grantor', 'type': 'unsigned'}, {'name': 'grantee', 'type': 'unsigned'}, {'name': 'object_type', 'type': 'string'}, {'name': 'object_id', 'type': 'unsigned'}, {'name': 'privilege', 'type': 'unsigned'}]] - [320, 1, '_cluster', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'uuid', 'type': 'string'}]] - [330, 1, '_truncate', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'count', 'type': 'unsigned'}]] - [340, 1, '_space_sequence', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'sequence_id', 'type': 'unsigned'}, {'name': 'is_generated', 'type': 'boolean'}]] ... box.space._index:select{} --- - - [272, 0, 'primary', 'tree', {'unique': true}, [[0, 'string']]] - [276, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [276, 1, 'name', 'tree', {'unique': true}, [[1, 'string']]] - [280, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [280, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [280, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [281, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [281, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [281, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [284, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [284, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [284, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [285, 0, 'primary', 'hash', {'unique': true}, [[0, 'unsigned']]] - [288, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned'], [1, 'unsigned']]] - [288, 2, 'name', 'tree', {'unique': true}, [[0, 'unsigned'], [2, 'string']]] - [289, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned'], [1, 'unsigned']]] - [289, 2, 'name', 'tree', {'unique': true}, [[0, 'unsigned'], [2, 'string']]] - [296, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [296, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [296, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [297, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [297, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [297, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [304, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [304, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [304, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [305, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [305, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [305, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [312, 0, 'primary', 'tree', {'unique': true}, [[1, 'unsigned'], [2, 'string'], [3, 'unsigned']]] - [312, 1, 'owner', 'tree', {'unique': false}, [[0, 'unsigned']]] - [312, 2, 'object', 'tree', {'unique': false}, [[2, 'string'], [3, 'unsigned']]] - [313, 0, 'primary', 'tree', {'unique': true}, [[1, 'unsigned'], [2, 'string'], [3, 'unsigned']]] - [313, 1, 'owner', 'tree', {'unique': false}, [[0, 'unsigned']]] - [313, 2, 'object', 'tree', {'unique': false}, [[2, 'string'], [3, 'unsigned']]] - [320, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [320, 1, 'uuid', 'tree', {'unique': true}, [[1, 'string']]] - [330, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [340, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [340, 1, 'sequence', 'tree', {'unique': false}, [[1, 'unsigned']]] ... box.space._user:select{} --- - - [0, 1, 'guest', 'user', {'chap-sha1': 'vhvewKp0tNyweZQ+cFKAlsyphfg='}] - [1, 1, 'admin', 'user', {}] - [2, 1, 'public', 'role', {}] - [3, 1, 'replication', 'role', {}] - [31, 1, 'super', 'role', {}] ... box.space._func:select{} --- - - [1, 1, 'box.schema.user.info', 1, 'LUA'] ... box.space._priv:select{} --- - - [1, 0, 'role', 2, 4] - [1, 0, 'universe', 0, 24] - [1, 1, 'universe', 0, 4294967295] - [1, 2, 'function', 1, 4] - [1, 2, 'space', 276, 2] - [1, 2, 'space', 281, 1] - [1, 2, 'space', 289, 1] - [1, 2, 'space', 297, 1] - [1, 2, 'space', 305, 1] - [1, 2, 'space', 313, 1] - [1, 2, 'space', 330, 2] - [1, 3, 'space', 320, 2] - [1, 3, 'universe', 0, 1] - [1, 31, 'universe', 0, 4294967295] ... tarantool_1.9.1.26.g63eb81e3c/test/box-py/call.result0000664000000000000000000002646413306560010020564 0ustar rootrootbox.schema.user.create('test', { password = 'test' }) --- ... box.schema.user.grant('test', 'execute,read,write', 'universe') --- ... exp_notation = 1e123 --- ... function f1() return 'testing', 1, false, -1, 1.123, math.abs(exp_notation - 1e123) < 0.1, nil end --- ... f1() --- - testing - 1 - false - -1 - 1.123 - true - null ... call f1 () - [testing] - [1] - [false] - [-1] - [1.123] - [true] - [null] f1=nil --- ... call f1 () error: {code: ER_NO_SUCH_PROC, reason: Procedure 'f1' is not defined} function f1() return f1 end --- ... call f1 () error: {code: ER_PROC_LUA, reason: unsupported Lua type 'function'} call box.error (33333, 'Hey!') error: {code: U, reason: Unknown error} # A test case for Bug#103491 # server CALL processing bug with name path longer than two # https://bugs.launchpad.net/tarantool/+bug/1034912 f = function() return 'OK' end --- ... test = {} --- ... test.f = f --- ... test.test = {} --- ... test.test.f = f --- ... call f () - [OK] call test.f () - [OK] call test.test.f () - [OK] # Test for Bug #955226 # Lua Numbers are passed back wrongly as strings # function foo() return 1, 2, '1', '2' end --- ... call foo () - [1] - [2] - ['1'] - ['2'] function f1(...) return {...} end --- ... function f2(...) return f1({...}) end --- ... call f1 ('test_', 'test_') - [test_, test_] call f2 ('test_', 'test_') - [test_, test_] call f1 () - [] call f2 () - [] function f3() return {{'hello'}, {'world'}} end --- ... call f3 () - [hello] - [world] function f3() return {'hello', {'world'}} end --- ... call f3 () - - hello - [world] function f3() return 'hello', {{'world'}, {'canada'}} end --- ... call f3 () - [hello] - - [world] - [canada] function f3() return {}, '123', {{}, {}} end --- ... call f3 () - [] - ['123'] - - [] - [] function f3() return { {{'hello'}} } end --- ... call f3 () - - [hello] function f3() return { box.tuple.new('hello'), {'world'} } end --- ... call f3 () - [hello] - [world] function f3() return { {'world'}, box.tuple.new('hello') } end --- ... call f3 () - [world] - [hello] function f3() return { { test={1,2,3} }, { test2={1,2,3} } } end --- ... call f3 () - - test: [1, 2, 3] - test2: [1, 2, 3] call f1 ('jason',) - [jason] call f1 ('jason', 1, 'test', 2, 'stewart') - [jason, 1, test, 2, stewart] space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... function myreplace(...) return space:replace{...} end --- ... function myinsert(...) return space:insert{...} end --- ... call myinsert (1, 'test box delete') - [1, test box delete] call space:delete (1,) - [1, test box delete] call myinsert (1, 'test box delete') - [1, test box delete] call space:delete (1,) - [1, test box delete] call space:delete (1,) [] call myinsert (2, 'test box delete') - [2, test box delete] call space:delete (1,) [] call space:delete (2,) - [2, test box delete] call space:delete (2,) [] space:delete{2} --- ... call myinsert (2, 'test box delete') - [2, test box delete] call space:get (2,) - [2, test box delete] space:delete{2} --- - [2, 'test box delete'] ... call space:get (2,) [] call myinsert (2, 'test box.select()') - [2, test box.select()] call space:get (2,) - [2, test box.select()] call space:select (2,) - [2, test box.select()] space:get{2} --- - [2, 'test box.select()'] ... space:select{2} --- - - [2, 'test box.select()'] ... space:get{1} --- ... space:select{1} --- - [] ... call myreplace (2, 'hello', 'world') - [2, hello, world] call myreplace (2, 'goodbye', 'universe') - [2, goodbye, universe] call space:get (2,) - [2, goodbye, universe] call space:select (2,) - [2, goodbye, universe] space:get{2} --- - [2, 'goodbye', 'universe'] ... space:select{2} --- - - [2, 'goodbye', 'universe'] ... call myreplace (2,) - [2] call space:get (2,) - [2] call space:select (2,) - [2] call space:delete (2,) - [2] call space:delete (2,) [] call myinsert (3, 'old', 2) - [3, old, 2] call myinsert (3, 'old', 2) error: {code: ER_TUPLE_FOUND, reason: Duplicate key exists in unique index 'primary' in space 'tweedledum'} space:update({3}, {{'=', 1, 4}, {'=', 2, 'new'}}) --- - error: Attempt to modify a tuple field which is part of index 'primary' in space 'tweedledum' ... space:insert(space:get{3}:update{{'=', 1, 4}, {'=', 2, 'new'}}) space:delete{3} --- ... call space:get (4,) - [4, new, 2] call space:select (4,) - [4, new, 2] space:update({4}, {{'+', 3, 1}}) --- - [4, 'new', 3] ... space:update({4}, {{'-', 3, 1}}) --- - [4, 'new', 2] ... call space:get (4,) - [4, new, 2] call space:select (4,) - [4, new, 2] function field_x(key, field_index) return space:get(key)[field_index] end --- ... call field_x (4, 1) - [4] call field_x (4, 2) - [new] call space:delete (4,) - [4, new, 2] space:drop() --- ... space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'tree' }) --- ... eval (return 1)() --- [1] function f(...) return 1 end --- ... call f() --- - [1] eval (return 1, 2, 3)() --- [1, 2, 3] function f(...) return 1, 2, 3 end --- ... call f() --- - [1] - [2] - [3] eval (return true)() --- [true] function f(...) return true end --- ... call f() --- - [true] eval (return nil)() --- [null] function f(...) return nil end --- ... call f() --- - [null] eval (return )() --- [] function f(...) return end --- ... call f() --- [] eval (return {})() --- - [] function f(...) return {} end --- ... call f() --- - [] eval (return {1})() --- - [1] function f(...) return {1} end --- ... call f() --- - [1] eval (return {1, 2, 3})() --- - [1, 2, 3] function f(...) return {1, 2, 3} end --- ... call f() --- - [1, 2, 3] eval (return {k1 = 'v1', k2 = 'v2'})() --- - {k1: v1, k2: v2} function f(...) return {k1 = 'v1', k2 = 'v2'} end --- ... call f() --- - - {k1: v1, k2: v2} eval (return {k1 = 'v1', k2 = 'v2'})() --- - {k1: v1, k2: v2} function f(...) return {k1 = 'v1', k2 = 'v2'} end --- ... call f() --- - - {k1: v1, k2: v2} eval (return {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}})() --- - c: '106': [1, 1428578535] '2': [1, 1428578535] pc: '106': [1, 1428578535, 9243] '2': [1, 1428578535, 9243] s: [1, 1428578535] u: 1428578535 v: [] function f(...) return {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}} end --- ... call f() --- - - c: '106': [1, 1428578535] '2': [1, 1428578535] pc: '106': [1, 1428578535, 9243] '2': [1, 1428578535, 9243] s: [1, 1428578535] u: 1428578535 v: [] eval (return true, {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}})() --- - true - c: '106': [1, 1428578535] '2': [1, 1428578535] pc: '106': [1, 1428578535, 9243] '2': [1, 1428578535, 9243] s: [1, 1428578535] u: 1428578535 v: [] function f(...) return true, {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}} end --- ... call f() --- - [true] - - c: '106': [1, 1428578535] '2': [1, 1428578535] pc: '106': [1, 1428578535, 9243] '2': [1, 1428578535, 9243] s: [1, 1428578535] u: 1428578535 v: [] eval (return {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}, true)() --- - c: '106': [1, 1428578535] '2': [1, 1428578535] pc: '106': [1, 1428578535, 9243] '2': [1, 1428578535, 9243] s: [1, 1428578535] u: 1428578535 v: [] - true function f(...) return {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}, true end --- ... call f() --- - - c: '106': [1, 1428578535] '2': [1, 1428578535] pc: '106': [1, 1428578535, 9243] '2': [1, 1428578535, 9243] s: [1, 1428578535] u: 1428578535 v: [] - [true] t = box.tuple.new('tuple', {1, 2, 3}, { k1 = 'v', k2 = 'v2'}) --- ... eval (return t)() --- - - tuple - [1, 2, 3] - {k1: v, k2: v2} function f(...) return t end --- ... call f() --- - - tuple - [1, 2, 3] - {k1: v, k2: v2} eval (return t, t, t)() --- - - tuple - [1, 2, 3] - {k1: v, k2: v2} - - tuple - [1, 2, 3] - {k1: v, k2: v2} - - tuple - [1, 2, 3] - {k1: v, k2: v2} function f(...) return t, t, t end --- ... call f() --- - - tuple - [1, 2, 3] - {k1: v, k2: v2} - - tuple - [1, 2, 3] - {k1: v, k2: v2} - - tuple - [1, 2, 3] - {k1: v, k2: v2} eval (return {t})() --- - - - tuple - [1, 2, 3] - {k1: v, k2: v2} function f(...) return {t} end --- ... call f() --- - - tuple - [1, 2, 3] - {k1: v, k2: v2} eval (return {t, t, t})() --- - - - tuple - [1, 2, 3] - {k1: v, k2: v2} - - tuple - [1, 2, 3] - {k1: v, k2: v2} - - tuple - [1, 2, 3] - {k1: v, k2: v2} function f(...) return {t, t, t} end --- ... call f() --- - - tuple - [1, 2, 3] - {k1: v, k2: v2} - - tuple - [1, 2, 3] - {k1: v, k2: v2} - - tuple - [1, 2, 3] - {k1: v, k2: v2} eval (return error('exception'))() --- error: {code: ER_PROC_LUA, reason: exception} function f(...) return error('exception') end --- ... call f() --- error: {code: ER_PROC_LUA, reason: exception} eval (return box.error(0))() --- error: {code: ER_OK, reason: Unknown error} function f(...) return box.error(0) end --- ... call f() --- error: {code: ER_OK, reason: Unknown error} eval (return ...)() --- [] function f(...) return ... end --- ... call f() --- [] eval (return ...)(1,2,3) --- [1, 2, 3] function f(...) return ... end --- ... call f(1,2,3) --- - [1] - [2] - [3] eval (return ...)(None,None,None) --- [null, null, null] function f(...) return ... end --- ... call f(None,None,None) --- - [null] - [null] - [null] eval (return ...)({'k2': 'v2', 'k1': 'v1'}) --- - {k1: v1, k2: v2} function f(...) return ... end --- ... call f({'k2': 'v2', 'k1': 'v1'}) --- - - {k1: v1, k2: v2} eval (return space:auto_increment({"transaction"}))() --- - [1, transaction] function f(...) return space:auto_increment({"transaction"}) end --- ... call f() --- - [2, transaction] eval (return space:select{})() --- - - [1, transaction] - [2, transaction] function f(...) return space:select{} end --- ... call f() --- - [1, transaction] - [2, transaction] eval (return box.begin(), space:auto_increment({"failed"}), box.rollback())() --- - null - [3, failed] function f(...) return box.begin(), space:auto_increment({"failed"}), box.rollback() end --- ... call f() --- - [null] - [3, failed] eval (return space:select{})() --- - - [1, transaction] - [2, transaction] function f(...) return space:select{} end --- ... call f() --- - [1, transaction] - [2, transaction] eval (return require("fiber").sleep(0))() --- [] function f(...) return require("fiber").sleep(0) end --- ... call f() --- [] eval (!invalid expression)() --- error: {code: ER_PROC_LUA, reason: 'eval:1: unexpected symbol near ''!'''} space:drop() --- ... box.schema.user.drop('test') --- ... tarantool_1.9.1.26.g63eb81e3c/test/box-py/bad_trigger.result0000664000000000000000000000064713306560010022115 0ustar rootroot # # if on_connect() trigger raises an exception, the connection is dropped # nosuchfunction = nil --- ... function f1() nosuchfunction() end --- ... type(box.session.on_connect(f1)) --- - function ... greeting: True fixheader: True error code 32 error message: [string "function f1() nosuchfunction() end"]:1: attempt to call global 'nosuchfunction' (a nil value) eof: True box.session.on_connect(nil, f1) --- ... tarantool_1.9.1.26.g63eb81e3c/test/box-py/snapshot.test.py0000664000000000000000000000361313306560010021567 0ustar rootrootimport os import sys import yaml import time from signal import SIGUSR1 sys.stdout.push_filter(server.vardir, "") admin("space = box.schema.space.create('tweedledum')") admin("index = space:create_index('primary', { type = 'hash' })") print """# # A test case for: http://bugs.launchpad.net/bugs/686411 # Check that 'box.snapshot()' does not overwrite a snapshot # file that already exists. Verify also that any other # error that happens when saving snapshot is propagated # to the caller. """ admin("space:insert{1, 'first tuple'}") admin("box.snapshot()") # # Increment LSN admin("space:insert{2, 'second tuple'}") # # Check for other errors, e.g. "Permission denied". lsn = int(yaml.load(admin("box.info.lsn", silent=True))[0]) snapshot = str(lsn).zfill(20) + ".snap" snapshot = os.path.join(os.path.join(server.vardir, server.name), snapshot) # Make snapshot path unwritable snapshot os.mkdir(snapshot) admin("_, e = pcall(box.snapshot)") admin("e.type") admin("e.errno") # Cleanup os.rmdir(snapshot) admin("space:delete{1}") admin("space:delete{2}") print """# # A test case for http://bugs.launchpad.net/bugs/727174 # "tarantool_box crashes when saving snapshot on SIGUSR1" #""" print """ # Increment the lsn number, to make sure there is no such snapshot yet #""" admin("space:insert{1, 'Test tuple'}") pid = int(yaml.load(admin("box.info.pid", silent=True))[0]) lsn = int(yaml.load(admin("box.info.lsn", silent=True))[0]) snapshot = str(lsn).zfill(20) + ".snap" snapshot = os.path.join(os.path.join(server.vardir, server.name), snapshot) iteration = 0 MAX_ITERATIONS = 100 while not os.access(snapshot, os.F_OK) and iteration < MAX_ITERATIONS: if iteration % 10 == 0: os.kill(pid, SIGUSR1) time.sleep(0.01) iteration = iteration + 1 if iteration == 0 or iteration >= MAX_ITERATIONS: print "Snapshot is missing." else: print "Snapshot exists." admin("space:drop()") sys.stdout.pop_filter() tarantool_1.9.1.26.g63eb81e3c/test/box-py/args.test.py0000664000000000000000000000441213306560010020662 0ustar rootrootimport sys import os import re # mask BFD warnings: https://bugs.launchpad.net/tarantool/+bug/1018356 sys.stdout.push_filter("unable to read unknown load command 0x2\d+", "") server.test_option("--help") server.test_option("-h") # Replace with the same value for case when builddir inside source dir sys.stdout.push_filter(re.escape(os.getenv("BUILDDIR")+'/src/tarantool'), "tarantool") sys.stdout.push_filter(re.escape(os.getenv("BUILDDIR")), "${SOURCEDIR}") sys.stdout.push_filter(re.escape(os.getenv("SOURCEDIR")+'/src/tarantool'), "tarantool") sys.stdout.push_filter(re.escape(os.getenv("SOURCEDIR")), "${SOURCEDIR}") sys.stdout.push_filter("invalid option.*", "invalid option") sys.stdout.push_filter("unrecognized option.*", "unrecognized option") server.test_option("-Z") server.test_option("--no-such-option") server.test_option("--no-such-option --version") sys.stdout.push_filter(".* (\d+)\.\d+\.\d(-\d+-\w+)?", "Tarantool \\1.minor.patch--") sys.stdout.push_filter("Target: .*", "Target: platform ") sys.stdout.push_filter(".*Disable shared arena since.*\n", "") sys.stdout.push_filter("Build options: .*", "Build options: flags") sys.stdout.push_filter("C_FLAGS:.*", "C_FLAGS: flags") sys.stdout.push_filter("CXX_FLAGS:.*", "CXX_FLAGS: flags") sys.stdout.push_filter("Compiler: .*", "Compiler: cc") server.test_option("--version") server.test_option("-v") server.test_option("-V ") script = os.getenv("SOURCEDIR") + "/test/box-py/args.lua" server.test_option(script) server.test_option(script + " 1 2 3") server.test_option(script + " 1 2 3 -V") server.test_option(script + " -V 1 2 3") server.test_option(script + " 1 2 3 --help") server.test_option(script + " --help 1 2 3") server.test_option("-V " + script + " 1 2 3") server.test_option("-e \"print('Hello')\" " + script + " 1 2 3") server.test_option("-e \"a = 10\" " + \ "-e print(a) " + \ script + \ " 1 2 3 --help") server.test_option("-e \"print(rawget(_G, 'log') == nil)\" " + \ "-e io.flush() " + \ "-l log " + \ "-e \"print(log.info('Hello'))\" " + \ script + \ " 1 2 3 --help") sys.stdout.clear_all_filters() # Args filter cleanup # vim: syntax=python tarantool_1.9.1.26.g63eb81e3c/test/box-py/iproto.test.py0000664000000000000000000003120713306560010021244 0ustar rootrootimport os import sys import struct import socket import msgpack from tarantool.const import * from tarantool import Connection from tarantool.request import Request, RequestInsert, RequestSelect, RequestUpdate, RequestUpsert from tarantool.response import Response from lib.tarantool_connection import TarantoolConnection admin("box.schema.user.grant('guest', 'read,write,execute', 'universe')") print """ # # iproto packages test # """ # opeing new connection to tarantool/box conn = TarantoolConnection(server.iproto.host, server.iproto.port) conn.connect() s = conn.socket print """ # Test bug #899343 (server assertion failure on incorrect packet) """ print "# send the package with invalid length" invalid_request = struct.pack(' 0 # closing connection s.close() key_names = {} for (k,v) in globals().items(): if type(k) == str and k.startswith('IPROTO_') and type(v) == int: key_names[v] = k def repr_dict(todump): d = {} for (k, v) in todump.items(): k_name = key_names.get(k, k) d[k_name] = v return repr(d) def test(header, body): # Connect and authenticate c = Connection('localhost', server.iproto.port) c.connect() print 'query', repr_dict(header), repr_dict(body) header = msgpack.dumps(header) body = msgpack.dumps(body) query = msgpack.dumps(len(header) + len(body)) + header + body # Send raw request using connectred socket s = c._socket try: s.send(query) except OSError as e: print ' => ', 'Failed to send request' c.close() print iproto.py_con.ping() > 0 print """ # Test gh-206 "Segfault if sending IPROTO package without `KEY` field" """ print "IPROTO_SELECT" test({ IPROTO_CODE : REQUEST_TYPE_SELECT }, { IPROTO_SPACE_ID: 280 }) print "\n" print "IPROTO_DELETE" test({ IPROTO_CODE : REQUEST_TYPE_DELETE }, { IPROTO_SPACE_ID: 280 }) print "\n" print "IPROTO_UPDATE" test({ IPROTO_CODE : REQUEST_TYPE_UPDATE }, { IPROTO_SPACE_ID: 280 }) test({ IPROTO_CODE : REQUEST_TYPE_UPDATE }, { IPROTO_SPACE_ID: 280, IPROTO_KEY: (1, )}) print "\n" print "IPROTO_REPLACE" test({ IPROTO_CODE : REQUEST_TYPE_REPLACE }, { IPROTO_SPACE_ID: 280 }) print "\n" print "IPROTO_CALL" test({ IPROTO_CODE : REQUEST_TYPE_CALL }, {}) test({ IPROTO_CODE : REQUEST_TYPE_CALL }, { IPROTO_KEY: ('procname', )}) print "\n" # gh-434 Tarantool crashes on multiple iproto requests with WAL enabled admin("box.cfg.wal_mode") admin("space = box.schema.space.create('test', { id = 567 })") admin("index = space:create_index('primary', { type = 'hash' })") admin("box.schema.user.grant('guest', 'read,write,execute', 'space', 'test')") c = Connection('localhost', server.iproto.port) c.connect() request1 = RequestInsert(c, 567, [1, "baobab"]) request2 = RequestInsert(c, 567, [2, "obbaba"]) s = c._socket try: s.send(bytes(request1) + bytes(request2)) except OSError as e: print ' => ', 'Failed to send request' response1 = Response(c, c._read_response()) response2 = Response(c, c._read_response()) print response1.__str__() print response2.__str__() request1 = RequestInsert(c, 567, [3, "occama"]) request2 = RequestSelect(c, 567, 0, [1], 0, 1, 0) s = c._socket try: s.send(bytes(request1) + bytes(request2)) except OSError as e: print ' => ', 'Failed to send request' response1 = Response(c, c._read_response()) response2 = Response(c, c._read_response()) print response1.__str__() print response2.__str__() request1 = RequestSelect(c, 567, 0, [2], 0, 1, 0) request2 = RequestInsert(c, 567, [4, "ockham"]) s = c._socket try: s.send(bytes(request1) + bytes(request2)) except OSError as e: print ' => ', 'Failed to send request' response1 = Response(c, c._read_response()) response2 = Response(c, c._read_response()) print response1.__str__() print response2.__str__() request1 = RequestSelect(c, 567, 0, [1], 0, 1, 0) request2 = RequestSelect(c, 567, 0, [2], 0, 1, 0) s = c._socket try: s.send(bytes(request1) + bytes(request2)) except OSError as e: print ' => ', 'Failed to send request' response1 = Response(c, c._read_response()) response2 = Response(c, c._read_response()) print response1.__str__() print response2.__str__() c.close() admin("space:drop()") # # gh-522: Broken compatibility with msgpack-python for strings of size 33..255 # admin("space = box.schema.space.create('test')") admin("index = space:create_index('primary', { type = 'hash', parts = {1, 'string'}})") class RawInsert(Request): request_type = REQUEST_TYPE_INSERT def __init__(self, conn, space_no, blob): super(RawInsert, self).__init__(conn) request_body = "\x82" + msgpack.dumps(IPROTO_SPACE_ID) + \ msgpack.dumps(space_id) + msgpack.dumps(IPROTO_TUPLE) + blob self._bytes = self.header(len(request_body)) + request_body class RawSelect(Request): request_type = REQUEST_TYPE_SELECT def __init__(self, conn, space_no, blob): super(RawSelect, self).__init__(conn) request_body = "\x83" + msgpack.dumps(IPROTO_SPACE_ID) + \ msgpack.dumps(space_id) + msgpack.dumps(IPROTO_KEY) + blob + \ msgpack.dumps(IPROTO_LIMIT) + msgpack.dumps(100); self._bytes = self.header(len(request_body)) + request_body c = iproto.py_con space = c.space('test') space_id = space.space_no TESTS = [ (1, "\xa1", "\xd9\x01", "\xda\x00\x01", "\xdb\x00\x00\x00\x01"), (31, "\xbf", "\xd9\x1f", "\xda\x00\x1f", "\xdb\x00\x00\x00\x1f"), (32, "\xd9\x20", "\xda\x00\x20", "\xdb\x00\x00\x00\x20"), (255, "\xd9\xff", "\xda\x00\xff", "\xdb\x00\x00\x00\xff"), (256, "\xda\x01\x00", "\xdb\x00\x00\x01\x00"), (65535, "\xda\xff\xff", "\xdb\x00\x00\xff\xff"), (65536, "\xdb\x00\x01\x00\x00"), ] for test in TESTS: it = iter(test) size = next(it) print 'STR', size print '--' for fmt in it: print '0x' + fmt.encode('hex'), '=>', field = '*' * size c._send_request(RawInsert(c, space_id, "\x91" + fmt + field)) tuple = space.select(field)[0] print len(tuple[0])== size and 'ok' or 'fail', it2 = iter(test) next(it2) for fmt2 in it2: tuple = c._send_request(RawSelect(c, space_id, "\x91" + fmt2 + field))[0] print len(tuple[0]) == size and 'ok' or 'fail', tuple = space.delete(field)[0] print len(tuple[0]) == size and 'ok' or 'fail', print print print 'Test of schema_id in iproto.' c = Connection('localhost', server.iproto.port) c.connect() s = c._socket def receive_response(): resp_len = '' resp_headerbody = '' resp_header = {} resp_body = {} try: resp_len = s.recv(5) resp_len = msgpack.loads(resp_len) resp_headerbody = s.recv(resp_len) unpacker = msgpack.Unpacker(use_list = True) unpacker.feed(resp_headerbody) resp_header = unpacker.unpack() resp_body = unpacker.unpack() except OSError as e: print ' => ', 'Failed to recv response' res = {} res['header'] = resp_header res['body'] = resp_body return res def test_request(req_header, req_body): query_header = msgpack.dumps(req_header) query_body = msgpack.dumps(req_body) packet_len = len(query_header) + len(query_body) query = msgpack.dumps(packet_len) + query_header + query_body try: s.send(query) except OSError as e: print ' => ', 'Failed to send request' return receive_response() header = { IPROTO_CODE : REQUEST_TYPE_SELECT} body = { IPROTO_SPACE_ID: space_id, IPROTO_INDEX_ID: 0, IPROTO_KEY: [], IPROTO_ITERATOR: 2, IPROTO_OFFSET: 0, IPROTO_LIMIT: 1 } resp = test_request(header, body) print 'Normal connect done w/o errors:', resp['header'][0] == 0 print 'Got schema_id:', resp['header'][5] > 0 schema_id = resp['header'][5] header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : 0 } resp = test_request(header, body) print 'Zero-schema_id connect done w/o errors:', resp['header'][0] == 0 print 'Same schema_id:', resp['header'][5] == schema_id header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : schema_id } resp = test_request(header, body) print 'Normal connect done w/o errors:', resp['header'][0] == 0 print 'Same schema_id:', resp['header'][5] == schema_id header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : schema_id + 1 } resp = test_request(header, body) print 'Wrong schema_id leads to error:', resp['header'][0] != 0 print 'Same schema_id:', resp['header'][5] == schema_id admin("space2 = box.schema.create_space('test2')") header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : schema_id } resp = test_request(header, body) print 'Schema changed -> error:', resp['header'][0] != 0 print 'Got another schema_id:', resp['header'][5] != schema_id # # gh-2334 Lost SYNC in JOIN response. # uuid = '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95' header = { IPROTO_CODE: REQUEST_TYPE_JOIN, IPROTO_SYNC: 2334 } body = { IPROTO_SERVER_UUID: uuid } resp = test_request(header, body) if resp['header'][IPROTO_SYNC] == 2334: i = 1 while i < 3: resp = receive_response() if resp['header'][IPROTO_SYNC] != 2334: print 'Bad sync on response with number ', i break if resp['header'][IPROTO_CODE] == REQUEST_TYPE_OK: i += 1 else: print 'Sync ok' else: print 'Bad first sync' # # Try incorrect JOIN. SYNC must be also returned. # body[IPROTO_SERVER_UUID] = 'unknown' resp = test_request(header, body) if resp['header'][IPROTO_SYNC] == 2334: print('Sync on error is ok') else: print('Sync on error is not ok') c.close() admin("space:drop()") admin("space2:drop()") # # gh-1280 Segmentation fault on space.select(tuple()) or space.select([2]) # admin("space = box.schema.create_space('gh1280', { engine = 'vinyl' })") admin("index = space:create_index('primary')") admin("space:insert({1})") admin("space:insert({2, 'Music'})") admin("space:insert({3, 'Length', 93})") iproto.py_con.space('gh1280').select([]) iproto.py_con.space('gh1280').select(list()) admin("space:drop()") admin("box.schema.user.revoke('guest', 'read,write,execute', 'universe')") # # gh-272 if the packet was incorrect, respond with an error code # gh-1654 do not close connnection on invalid request # print """ # Test bugs gh-272, gh-1654 if the packet was incorrect, respond with # an error code and do not close connection """ c = Connection('localhost', server.iproto.port) c.connect() s = c._socket header = { "hello": "world"} body = { "bug": 272 } resp = test_request(header, body) print 'sync=%d, %s' % (resp['header'][IPROTO_SYNC], resp['body']) header = { IPROTO_CODE : REQUEST_TYPE_SELECT } header[IPROTO_SYNC] = 1234 resp = test_request(header, body) print 'sync=%d, %s' % (resp['header'][IPROTO_SYNC], resp['body']) header[IPROTO_SYNC] = 5678 body = { IPROTO_SPACE_ID: 304, IPROTO_KEY: [], IPROTO_LIMIT: 1 } resp = test_request(header, body) print 'sync=%d, %s' % (resp['header'][IPROTO_SYNC], resp['body']) c.close() admin("space = box.schema.space.create('test_index_base', { id = 568 })") admin("index = space:create_index('primary', { type = 'hash' })") admin("box.schema.user.grant('guest', 'read,write,execute', 'space', 'test_index_base')") c = Connection('localhost', server.iproto.port) c.connect() s = c._socket request = RequestInsert(c, 568, [1, 0, 0, 0]) try: s.send(bytes(request)) except OSError as e: print ' => ', 'Failed to send request' response = Response(c, c._read_response()) print response.__str__() request = RequestUpdate(c, 568, 0, [1], [['+', 2, 1], ['-', 3, 1]]) try: s.send(bytes(request)) except OSError as e: print ' => ', 'Failed to send request' response = Response(c, c._read_response()) print response.__str__() request = RequestUpsert(c, 568, 0, [1, 0, 0, 0], [['+', 2, 1], ['-', 3, 1]]) try: s.send(bytes(request)) except OSError as e: print ' => ', 'Failed to send request' response = Response(c, c._read_response()) request = RequestSelect(c, 568, 0, [1], 0, 1, 0) try: s.send(bytes(request)) except OSError as e: print ' => ', 'Failed to send request' response = Response(c, c._read_response()) print response.__str__() c.close() # # gh-2619 follow up: allow empty args for call/eval. # admin("function kek() return 'kek' end") admin("box.schema.user.grant('guest', 'read,write,execute', 'universe')") c = Connection('localhost', server.iproto.port) c.connect() s = c._socket header = { IPROTO_CODE: REQUEST_TYPE_CALL, IPROTO_SYNC: 100 } body = { IPROTO_FUNCTION_NAME: 'kek' } resp = test_request(header, body) print "Sync: ", resp['header'][IPROTO_SYNC] print "Retcode: ", resp['body'][IPROTO_DATA] c.close() admin("box.schema.user.revoke('guest', 'read,write,execute', 'universe')") admin("space:drop()") tarantool_1.9.1.26.g63eb81e3c/test/box-py/bad_trigger.test.py0000664000000000000000000000244413306560010022202 0ustar rootrootfrom lib.box_connection import BoxConnection from lib.tarantool_connection import TarantoolConnection from tarantool import NetworkError from tarantool.const import IPROTO_GREETING_SIZE, IPROTO_CODE, IPROTO_ERROR, \ REQUEST_TYPE_ERROR import socket import msgpack print """ # # if on_connect() trigger raises an exception, the connection is dropped # """ # silence possible error of strict mode server.admin("nosuchfunction = nil") server.admin("function f1() nosuchfunction() end") server.admin("type(box.session.on_connect(f1))") unpacker = msgpack.Unpacker(use_list = False) conn = TarantoolConnection(server.iproto.host, server.iproto.port) conn.connect() s = conn.socket # Read greeting print 'greeting: ', len(s.recv(IPROTO_GREETING_SIZE)) == IPROTO_GREETING_SIZE # Read error packet IPROTO_FIXHEADER_SIZE = 5 fixheader = s.recv(IPROTO_FIXHEADER_SIZE) print 'fixheader: ', len(fixheader) == IPROTO_FIXHEADER_SIZE unpacker.feed(fixheader) packet_len = unpacker.unpack() packet = s.recv(packet_len) unpacker.feed(packet) # Parse packet header = unpacker.unpack() body = unpacker.unpack() print 'error code', (header[IPROTO_CODE] & (REQUEST_TYPE_ERROR - 1)) print 'error message: ', body[IPROTO_ERROR] print 'eof:', len(s.recv(1024)) == 0 s.close() server.admin("box.session.on_connect(nil, f1)") tarantool_1.9.1.26.g63eb81e3c/test/box-py/call.test.py0000664000000000000000000001322413306560010020642 0ustar rootrootimport os import sys def call(name, *args): return iproto.call(name, *args) admin("box.schema.user.create('test', { password = 'test' })") admin("box.schema.user.grant('test', 'execute,read,write', 'universe')") iproto.authenticate('test', 'test') # workaround for gh-770 centos 6 float representation admin('exp_notation = 1e123') admin("function f1() return 'testing', 1, false, -1, 1.123, math.abs(exp_notation - 1e123) < 0.1, nil end") admin("f1()") call("f1") admin("f1=nil") call("f1") admin("function f1() return f1 end") call("f1") # A test case for https://github.com/tarantool/tarantool/issues/44 # IPROTO required! call("box.error", 33333, 'Hey!') print """ # A test case for Bug#103491 # server CALL processing bug with name path longer than two # https://bugs.launchpad.net/tarantool/+bug/1034912 """ admin("f = function() return 'OK' end") admin("test = {}") admin("test.f = f") admin("test.test = {}") admin("test.test.f = f") call("f") call("test.f") call("test.test.f") print """ # Test for Bug #955226 # Lua Numbers are passed back wrongly as strings # """ admin("function foo() return 1, 2, '1', '2' end") call("foo") # # check how well we can return tables # admin("function f1(...) return {...} end") admin("function f2(...) return f1({...}) end") call("f1", 'test_', 'test_') call("f2", 'test_', 'test_') call("f1") call("f2") # # check multi-tuple return # admin("function f3() return {{'hello'}, {'world'}} end") call("f3") admin("function f3() return {'hello', {'world'}} end") call("f3") admin("function f3() return 'hello', {{'world'}, {'canada'}} end") call("f3") admin("function f3() return {}, '123', {{}, {}} end") call("f3") admin("function f3() return { {{'hello'}} } end") call("f3") admin("function f3() return { box.tuple.new('hello'), {'world'} } end") call("f3") admin("function f3() return { {'world'}, box.tuple.new('hello') } end") call("f3") admin("function f3() return { { test={1,2,3} }, { test2={1,2,3} } } end") call("f3") call("f1", 'jason') call("f1", 'jason', 1, 'test', 2, 'stewart') admin("space = box.schema.space.create('tweedledum')") admin("index = space:create_index('primary', { type = 'hash' })") admin("function myreplace(...) return space:replace{...} end") admin("function myinsert(...) return space:insert{...} end") call("myinsert", 1, 'test box delete') call("space:delete", 1) call("myinsert", 1, 'test box delete') call("space:delete", 1) call("space:delete", 1) call("myinsert", 2, 'test box delete') call("space:delete", 1) call("space:delete", 2) call("space:delete", 2) admin("space:delete{2}") call("myinsert", 2, 'test box delete') call("space:get", 2) admin("space:delete{2}") call("space:get", 2) call("myinsert", 2, 'test box.select()') call("space:get", 2) call("space:select", 2) admin("space:get{2}") admin("space:select{2}") admin("space:get{1}") admin("space:select{1}") call("myreplace", 2, 'hello', 'world') call("myreplace", 2, 'goodbye', 'universe') call("space:get", 2) call("space:select", 2) admin("space:get{2}") admin("space:select{2}") call("myreplace", 2) call("space:get", 2) call("space:select", 2) call("space:delete", 2) call("space:delete", 2) call("myinsert", 3, 'old', 2) # test that insert produces a duplicate key error call("myinsert", 3, 'old', 2) admin("space:update({3}, {{'=', 1, 4}, {'=', 2, 'new'}})") admin("space:insert(space:get{3}:update{{'=', 1, 4}, {'=', 2, 'new'}}) space:delete{3}") call("space:get", 4) call("space:select", 4) admin("space:update({4}, {{'+', 3, 1}})") admin("space:update({4}, {{'-', 3, 1}})") call("space:get", 4) call("space:select", 4) admin("function field_x(key, field_index) return space:get(key)[field_index] end") call("field_x", 4, 1) call("field_x", 4, 2) call("space:delete", 4) admin("space:drop()") admin("space = box.schema.space.create('tweedledum')") admin("index = space:create_index('primary', { type = 'tree' })") def lua_eval(name, *args): print 'eval (%s)(%s)' % (name, ','.join([ str(arg) for arg in args])) print '---' print iproto.py_con.eval(name, args) def lua_call(name, *args): print 'call %s(%s)' % (name, ','.join([ str(arg) for arg in args])) print '---' print iproto.py_con.call(name, args) def test(expr, *args): lua_eval('return ' + expr, *args) admin('function f(...) return ' + expr + ' end') lua_call('f', *args) # Return values test("1") test("1, 2, 3") test("true") test("nil") test("") test("{}") test("{1}") test("{1, 2, 3}") test("{k1 = 'v1', k2 = 'v2'}") test("{k1 = 'v1', k2 = 'v2'}") # gh-791: maps are wrongly assumed to be arrays test("{s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}") test("true, {s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}") test("{s = {1, 1428578535}, u = 1428578535, v = {}, c = {['2'] = {1, 1428578535}, ['106'] = { 1, 1428578535} }, pc = {['2'] = {1, 1428578535, 9243}, ['106'] = {1, 1428578535, 9243}}}, true") admin("t = box.tuple.new('tuple', {1, 2, 3}, { k1 = 'v', k2 = 'v2'})") test("t") test("t, t, t") test("{t}") test("{t, t, t}") test("error('exception')") test("box.error(0)") test('...') test('...', 1, 2, 3) test('...', None, None, None) test('...', { 'k1': 'v1', 'k2': 'v2'}) # Transactions test('space:auto_increment({"transaction"})') test('space:select{}') test('box.begin(), space:auto_increment({"failed"}), box.rollback()') test('space:select{}') test('require("fiber").sleep(0)') # Other lua_eval('!invalid expression') admin("space:drop()") admin("box.schema.user.drop('test')") # Re-connect after removing user iproto.py_con.close() tarantool_1.9.1.26.g63eb81e3c/test/box-py/bootstrap.test.py0000664000000000000000000000122513306560010021742 0ustar rootroot import sys import yaml server_uuid = server.get_param('uuid') sys.stdout.push_filter(server_uuid, '') cluster_uuid = yaml.load(server.admin('box.space._schema:get("cluster")', silent = True))[0][1] sys.stdout.push_filter(cluster_uuid, '') server.admin('box.internal.bootstrap()') server.restart() server.admin('box.space._schema:select{}') server.admin('box.space._cluster:select{}') server.admin('box.space._space:select{}') server.admin('box.space._index:select{}') server.admin('box.space._user:select{}') server.admin('box.space._func:select{}') server.admin('box.space._priv:select{}') # Cleanup sys.stdout.pop_filter() tarantool_1.9.1.26.g63eb81e3c/test/box-py/print.test.py0000664000000000000000000000124213306560010021060 0ustar rootrootimport tarantool import sys import os import re log = server.get_log() admin('print("Hello, world")') admin("io = require('io')") admin("""local f = require('fiber').create( function() print('Ehllo, world') io.flush() end )""") admin("require('fiber').sleep(0.01)") print("Check log line (Hello):") print('---') if log.seek_once('Hello') >= 0: print('- "logfile contains "Hello""') else: print('- "logfile does not contain "Hello""') print('...') print("Check log line (Ehllo):") print('---') if log.seek_once('Ehllo') >= 0: print('- "logfile contains "Ehllo""') else: print('- "logfile does not contain "Ehllo""') print('...') tarantool_1.9.1.26.g63eb81e3c/test/.gitattributes0000664000000000000000000000003213306560010020045 0ustar rootroot*.result diff merge=text tarantool_1.9.1.26.g63eb81e3c/test/app/0000775000000000000000000000000013306565107015753 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/app/fiber_channel.result0000664000000000000000000001457713306560010021774 0ustar rootrootfiber = require('fiber') --- ... env = require('test_run') --- ... test_run = env.new() --- ... -- channel methods ignore extra arguments, as regular Lua functions do ignored_args = {'Extra', 'arguments', 'are', 'ignored'} --- ... ch = fiber.channel(1) --- ... ch:size() --- - 1 ... ch:count() --- - 0 ... ch:is_full() --- - false ... ch:is_empty() --- - true ... ch:size(unpack(ignored_args)) --- - 1 ... ch:count(unpack(ignored_args)) --- - 0 ... ch:is_full(unpack(ignored_args)) --- - false ... ch:is_empty(unpack(ignored_args)) --- - true ... ch:get(.1) --- - null ... ch:get(.1, nil) --- - null ... ch:get(.1, nil, unpack(ignored_args)) --- - null ... tostring(ch) --- - 'channel: 0' ... ch:put() --- - error: 'usage: channel:put(var [, timeout])' ... ch:count() --- - 0 ... ch:put('test') --- - true ... tostring(ch) --- - 'channel: 1' ... ch:get() --- - test ... ch:put('test', nil), ch:get() --- - true - test ... ch:put('test', nil, unpack(ignored_args)), ch:get() --- - true - test ... ch:get('wrong timeout') --- - error: 'usage: channel:get([timeout])' ... ch:get(-10) --- - error: 'usage: channel:get([timeout])' ... ch:put(234) --- - true ... ch:put(345, .1) --- - false ... ch:count() --- - 1 ... ch:is_full() --- - true ... ch:is_empty() --- - false ... buffer = {} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... tfbr = fiber.create( function() while true do table.insert(buffer, ch:get()) end end ); --- ... t = {}; --- ... for i = 1, 10 do table.insert(t, {i, ch:put(i, 0.1)}) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... t --- - - - 1 - true - - 2 - true - - 3 - true - - 4 - true - - 5 - true - - 6 - true - - 7 - true - - 8 - true - - 9 - true - - 10 - true ... ch:has_readers() --- - true ... ch:has_writers() --- - false ... fiber.cancel(tfbr) --- ... ch:has_readers() --- - false ... ch:has_writers() --- - false ... ch:count() --- - 0 ... ch:put(box.info.pid) --- - true ... ch:count() --- - 1 ... ch:is_full() --- - true ... ch:is_empty() --- - false ... ch:get(box.info.pid) == box.info.pid --- - true ... buffer --- - - 234 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 ... ch:is_empty() --- - true ... ch:is_full() --- - false ... ch:is_empty() --- - true ... test_run:cmd("setopt delimiter ';'") --- - true ... tfbr = fiber.create( function() while true do local v = ch:get() table.insert(buffer, v) end end ); --- ... tfbr2 = fiber.create( function() while true do local v = ch:get() table.insert(buffer, v) end end ); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... buffer = {} --- ... buffer --- - [] ... ch:is_full() --- - false ... ch:is_empty() --- - true ... ch:put(1) --- - true ... ch:put(2) --- - true ... ch:put(3) --- - true ... ch:put(4) --- - true ... ch:put(5) --- - true ... t = {} --- ... for i = 35, 45 do table.insert(t, ch:put(i)) end --- ... t --- - - true - true - true - true - true - true - true - true - true - true - true ... while #buffer < 15 do fiber.sleep(0.001) end --- ... table.sort(buffer) --- ... buffer --- - - 1 - 2 - 3 - 4 - 5 - 35 - 36 - 37 - 38 - 39 - 40 - 41 - 42 - 43 - 44 - 45 ... ch = fiber.channel(1) --- ... ch:is_closed() --- - false ... passed = false --- ... type(fiber.create(function() if ch:get() == nil then passed = true end end)) --- - userdata ... ch:close() --- ... fiber.yield() --- ... passed --- - true ... ch:get() --- - null ... ch:get() --- - null ... ch:put(10) --- - false ... ch:is_closed() --- - true ... tostring(ch) --- - 'channel: closed' ... ch = fiber.channel(1) --- ... ch:put(true) --- - true ... ch:is_closed() --- - false ... passed = false --- ... type(fiber.create(function() if ch:put(true) == false then passed = true end end)) --- - userdata ... ch:close() --- ... fiber.yield() --- ... passed --- - true ... ch:get() --- - null ... ch:get() --- - null ... ch:put(10) --- - false ... ch:is_closed() --- - true ... -- race conditions chs, test_res, count = {}, {}, 0 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 10 do table.insert(chs, fiber.channel()) end; --- ... fibers = {} for i = 1, 10 do table.insert(fibers, fiber.create(function(no) fiber.self():name('pusher') while true do chs[no]:put({no}) fiber.sleep(0.001 * math.random()) end end, i) ) end; --- ... for i = 1, 10 do table.insert(fibers, fiber.create(function(no) fiber.self():name('receiver') while true do local r = chs[no]:get(math.random() * .001) if r ~= nil and r[1] == no then test_res[no] = true elseif r ~= nil then break end fiber.sleep(0.001 * math.random()) count = count + 1 end test_res[no] = false end, i) ) end; --- ... for i = 1, 100 do fiber.sleep(0.01) if count > 2000 then break end end; --- ... count > 2000, #test_res, test_res; --- - true - 10 - - true - true - true - true - true - true - true - true - true - true ... for _, fiber in ipairs(fibers) do fiber:cancel() end; --- ... -- -- gh-756: channel:close() leaks memory -- ffi = require('ffi'); --- ... do stat, err = pcall(ffi.cdef, [[struct gh756 { int k; }]]) if not stat and not err:match('attempt to redefine') then error(err) end stat, err = pcall(ffi.metatype, 'struct gh756', { __gc = function() refs = refs - 1; end }) if not stat and not err:match('cannot change a protected metatable') then error(err) end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... ct = ffi.typeof('struct gh756') --- ... -- create 10 objects and put they to a channel refs = 10 --- ... ch = fiber.channel(refs) --- ... for i=1,refs do ch:put(ffi.new(ct, i)) end --- ... -- get an object from the channel, run GC and check the number of objects ch:get().k == 1 --- - true ... collectgarbage('collect') --- - 0 ... refs --- - 9 ... ch:get().k == 2 --- - true ... collectgarbage('collect') --- - 0 ... refs --- - 8 ... -- close the channel and check the number of objects ch:close() --- ... collectgarbage('collect') --- - 0 ... refs -- must be zero --- - 0 ... tarantool_1.9.1.26.g63eb81e3c/test/app/argparse.result0000664000000000000000000000556313306560010021014 0ustar rootroot-- internal argparse test argparse = require('internal.argparse').parse --- ... -- test with empty arguments and options argparse() --- - [] ... -- test with command name (should be excluded) argparse({[0] = 'tarantoolctl', 'start', 'instance'}) --- - - start - instance ... -- test long option argparse({'tarantoolctl', 'start', 'instance', '--start'}) --- - 1: tarantoolctl 2: start 3: instance start: true ... argparse({'tarantoolctl', 'start', 'instance', '--start', '--stop'}) --- - 1: tarantoolctl 2: start 3: instance start: true stop: true ... argparse({'tarantoolctl', 'start', 'instance', '--start', '--stop', '--stop'}) --- - 1: tarantoolctl 2: start 3: instance start: true stop: - true - true ... argparse({'tarantoolctl', 'start', 'instance', '--start', '--stop', '--stop'}) --- - 1: tarantoolctl 2: start 3: instance start: true stop: - true - true ... argparse({'tarantoolctl', 'start', 'instance', '-baobab'}) --- - 1: tarantoolctl 2: start 3: instance b: - true - true - true o: true a: - true - true ... argparse({'tarantoolctl', 'start', 'instance', '-vovov'}) --- - 1: tarantoolctl 2: start 3: instance o: - true - true v: - true - true - true ... argparse({'tarantoolctl', 'start', 'instance', '--start=lalochka'}) --- - 1: tarantoolctl 2: start 3: instance start: lalochka ... argparse({'tarantoolctl', 'start', 'instance', '--start', 'lalochka'}) --- - 1: tarantoolctl 2: start 3: instance start: lalochka ... argparse({'tarantoolctl', 'start', 'instance', '--start', '--', 'lalochka'}) --- - error: 'builtin/internal.argparse.lua:105: bad argument #5: ID not valid' ... argparse({'tarantoolctl', 'start', 'instance', '--start', '-', 'lalochka'}) --- - 1: tarantoolctl 2: start 3: instance 4: lalochka start: true ... argparse({'--verh=42'}, {{'verh', 'number'}}) --- - verh: 42 ... argparse({'--verh=42'}, {{'verh', 'number+'}}) --- - verh: - 42 ... argparse({'--verh=42'}, {{'verh', 'string'}}) --- - verh: '42' ... argparse({'--verh=42'}, {{'verh', 'string+'}}) --- - verh: - '42' ... argparse({'--verh=42'}, {{'verh'}}) --- - verh: '42' ... argparse({'--verh=42'}, {'verh'}) --- - verh: '42' ... argparse({'--verh=42'}, {{'verh', 'boolean'}}) --- - error: 'builtin/internal.argparse.lua:35: Bad input for parameter "verh". Expected boolean, got "42"' ... argparse({'--verh=42'}, {{'verh', 'boolean+'}}) --- - error: 'builtin/internal.argparse.lua:35: Bad input for parameter "verh". Expected boolean, got "42"' ... argparse({'--verh=42'}, {'niz'}) --- - error: 'builtin/internal.argparse.lua:142: unknown options: verh' ... argparse({'--super-option'}) --- - super-option: true ... argparse({'tarantoolctl', 'start', 'instance', '--start=lalochka', 'option', '-', 'another option'}) --- - 1: tarantoolctl 2: start 3: instance 4: option 5: '-' 6: another option start: lalochka ... tarantool_1.9.1.26.g63eb81e3c/test/app/loaders.test.lua0000664000000000000000000000351313306560010021053 0ustar rootrootfio = require('fio') env = require('test_run') test_run = env.new() source_dir = os.getenv("SOURCEDIR") .. "/test/app/" build_dir = os.getenv("BUILDDIR") .. "/test/app/" -- -- Check . loader -- orig_cwd = fio.cwd() fio.chdir(source_dir) cwd_loader = package.loaders[2] f = cwd_loader("loaders") type(f) f() fio.chdir(orig_cwd) -- -- Check .rocks loader -- tmp_dir = fio.tempdir() work_dir = fio.pathjoin(tmp_dir, "pr") fio.mkdir(work_dir) pr1_dir = fio.pathjoin(work_dir, "pr1") fio.mkdir(pr1_dir) pr2_dir = fio.pathjoin(pr1_dir, "pr2") fio.mkdir(pr2_dir) lua_dir = ".rocks/share/tarantool" lib_dir = ".rocks/lib/tarantool" test_run:cmd("setopt delimiter ';'"); function create_dirs(name) fio.mkdir(name) fio.mkdir(name .. "/.rocks") fio.mkdir(name .. "/.rocks/share") fio.mkdir(name .. "/.rocks/lib") fio.mkdir(name .. "/.rocks/share/tarantool") fio.mkdir(name .. "/.rocks/lib/tarantool") end; test_run:cmd("setopt delimiter ''"); create_dirs(work_dir) create_dirs(pr1_dir) create_dirs(pr2_dir) soext = (jit.os == "OSX" and "dylib" or "so") loaders_path = fio.pathjoin(source_dir, "loaders.lua") loaderslib_path = fio.pathjoin(build_dir, "loaderslib."..soext) fio.symlink(loaders_path, fio.pathjoin(work_dir, lua_dir, "loaders.lua")) fio.symlink(loaderslib_path, fio.pathjoin(pr1_dir, lib_dir, "loaderslib."..soext)) orig_cwd = fio.cwd() fio.chdir(pr2_dir) rocks_loader = package.loaders[4] rocks_loader_dyn = package.loaders[5] f = rocks_loader("loaders") type(f) f() f = rocks_loader_dyn("loaderslib") type(f) f() f = rocks_loader("loaders1") type(f) -- error package.loaded.loaders = nil package.loaded.loaders1 = nil package.loaded.loaderslib = nil fio.chdir(work_dir) f = rocks_loader("loaders") type(f) f() f = rocks_loader("loaders1") type(f) -- error f = rocks_loader_dyn("loaderslib") type(f) -- error fio.chdir(orig_cwd) tarantool_1.9.1.26.g63eb81e3c/test/app/crypto_hmac.result0000664000000000000000000000603513306560010021513 0ustar rootroottest_run = require('test_run').new() --- ... test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua:\"]: '") --- - true ... crypto = require('crypto') --- ... type(crypto) --- - table ... -- -- Invalid arguments -- crypto.hmac.md4() --- - error: 'builtin/crypto.lua:"]: Usage: hmac.md4(key, string)' ... crypto.hmac.md5() --- - error: 'builtin/crypto.lua:"]: Usage: hmac.md5(key, string)' ... crypto.hmac.sha1() --- - error: 'builtin/crypto.lua:"]: Usage: hmac.sha1(key, string)' ... crypto.hmac.sha224() --- - error: 'builtin/crypto.lua:"]: Usage: hmac.sha224(key, string)' ... crypto.hmac.sha256() --- - error: 'builtin/crypto.lua:"]: Usage: hmac.sha256(key, string)' ... crypto.hmac.sha384() --- - error: 'builtin/crypto.lua:"]: Usage: hmac.sha384(key, string)' ... crypto.hmac.sha512() --- - error: 'builtin/crypto.lua:"]: Usage: hmac.sha512(key, string)' ... crypto.hmac.nodigest --- - error: '[string "return crypto.hmac.nodigest "]:1: HMAC method "nodigest" is not supported' ... crypto.hmac.sha1('012345678', 'fred') --- - !!binary H35BJij7GZ0Rag9c+HvsTFden3c= ... key = '012345678' --- ... message = 'fred' --- ... crypto.hmac.sha1(key, nil) --- - error: 'builtin/crypto.lua:"]: Usage: hmac.sha1(key, string)' ... crypto.hmac.sha1(nil, message) --- - error: 'builtin/crypto.lua:"]: Key should be specified for HMAC operations' ... crypto.hmac.sha1(nil, nil) --- - error: 'builtin/crypto.lua:"]: Usage: hmac.sha1(key, string)' ... crypto.hmac.md4(key, message) --- - !!binary O62dNTQcfTuiyXfa/MKlig== ... crypto.hmac.md5(key, message) --- - !!binary s5FptfcQK37Bfh0R40qDPw== ... crypto.hmac.sha1(key, message) --- - !!binary H35BJij7GZ0Rag9c+HvsTFden3c= ... crypto.hmac.sha224(key, message) --- - !!binary JjzvWsIRDqIdEKKaDCILc3ybETuxj6LSkBJudw== ... crypto.hmac.sha256(key, message) --- - !!binary cIBSEwca3aliz6WGAYXiKK1+kU1ldzUk49s/b86AVxQ= ... crypto.hmac.sha384(key, message) --- - !!binary 1LC7zV1riyvdAjxUAhSYRXVLGUjsEZvLbvnbqJCqJPq3X117YfklFki++JWPUB8G ... crypto.hmac.sha512(key, message) --- - !!binary Q4PL+6f9bpLtXmGBaoq2aT4arwCoA0YmcOZ612jzZ0FgZ63CRMIa6JZ92t4cj+PQ8wojXj8jbo658ir/5BvPOg== ... -- -- Incremental update -- hmac_sha1 = crypto.hmac.sha1.new(key) --- ... hmac_sha1:update('abc') --- ... hmac_sha1:update('cde') --- ... hmac_sha1:result() == crypto.hmac.sha1(key, 'abccde') --- - true ... -- -- Empty string -- crypto.hmac.md4(key, '') --- - !!binary JntcBTt7gh45TdtdxuS6Fw== ... crypto.hmac.md5(key, '') --- - !!binary dIgsXw3Q8VV7D3I+s3kOPg== ... crypto.hmac.sha1(key, '') --- - !!binary eM9i/oncUFbfzncL5OQ2ZnUpWCY= ... crypto.hmac.sha224(key, '') --- - !!binary WC5mv2A+l1Y5/CEkxLMrRbmb/5temFsNXQ3xoQ== ... crypto.hmac.sha256(key, '') --- - !!binary lJeYNw6OtpHZCw0WUd+XvfwLcZM6za2O8/LJ48YQ+tQ= ... crypto.hmac.sha384(key, '') --- - !!binary z7E/+NqRq/Kkzk9+ijvCuyo8KgU57LoEJIx3ysgJfUDxJtCAWsHDqY/6GmJO1Slo ... crypto.hmac.sha512(key, '') --- - !!binary yqXDqloZTz1F312gTvXxod+2Rdd1O48FPI2h/2tux90XIemkz5xGMRs2sKajmAe7817TFWjnjHfvToDQ0Pvq3w== ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/app/suite.ini0000664000000000000000000000022713306560010017572 0ustar rootroot[default] core = tarantool description = application server tests script = app.lua lua_libs = lua/fiber.lua use_unix_sockets = True is_parallel = True tarantool_1.9.1.26.g63eb81e3c/test/app/app.lua0000664000000000000000000000036013306560010017221 0ustar rootroot#!/usr/bin/env tarantool box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", rows_per_wal = 50 } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/app/socket.test.lua0000664000000000000000000004220713306560010020715 0ustar rootrootjson = require 'json' yaml = require 'yaml' pickle = require 'pickle' socket = require 'socket' fiber = require 'fiber' msgpack = require 'msgpack' log = require 'log' errno = require 'errno' fio = require 'fio' ffi = require('ffi') type(socket) env = require('test_run') test_run = env.new() test_run:cmd("push filter '(error: .builtin/.*[.]lua):[0-9]+' to '\\1'") socket('PF_INET', 'SOCK_STREAM', 'tcp121222'); s = socket('PF_INET', 'SOCK_STREAM', 'tcp') type(s) -- Invalid arguments test_run:cmd("setopt delimiter ';'") for k in pairs(getmetatable(s).__index) do local r, msg = pcall(s[k]) if not msg:match('Usage:') then error("Arguments is not checked for "..k) end end; s:close(); test_run:cmd("setopt delimiter ''"); LISTEN = require('uri').parse(box.cfg.listen) LISTEN ~= nil s = socket.tcp_connect(LISTEN.host, LISTEN.service) s:nonblock(true) s:nonblock() s:nonblock(false) s:nonblock() s:nonblock(true) s:readable(.01) s:wait(.01) socket.iowait(s:fd(), 'RW') socket.iowait(s:fd(), 3) socket.iowait(s:fd(), 'R') socket.iowait(s:fd(), 'r') socket.iowait(s:fd(), 1) socket.iowait(s:fd(), 'W') socket.iowait(s:fd(), 'w') socket.iowait(s:fd(), 2) socket.iowait(s:fd(), '') socket.iowait(s:fd(), -1) socket.iowait(s:fd(), 'RW') socket.iowait(s:fd(), 'RW', -100500) s:readable(0) s:errno() > 0 s:error() s:writable(.00000000000001) s:writable(0) s:wait(.01) socket.iowait(nil, nil, -1) socket.iowait(nil, nil, 0.0001) socket.iowait(-1, nil, 0.0001) socket.iowait(nil, 'RW') socket.iowait(0, nil) handshake = ffi.new('char[128]') -- test sysread with char * s:sysread(handshake, 128) ffi.string(handshake, 9) ping = msgpack.encode({ [0] = 64, [1] = 0 }) ping = msgpack.encode(string.len(ping)) .. ping -- test syswrite with char * s:syswrite(ffi.cast('const char *', ping), #ping) s:readable(1) s:wait(.01) pong = s:sysread() string.len(pong) msgpack.decode(pong) function remove_schema_id(t, x) if t[5] then t[5] = 'XXX' end return t, x end remove_schema_id(msgpack.decode(pong, 6)) s:close() s = socket('PF_INET', 'SOCK_STREAM', 'tcp') s:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) s:error() s:bind('127.0.0.1', 0) s:error() s:listen(128) sevres = {} type(require('fiber').create(function() s:readable() do local sc = s:accept() table.insert(sevres, sc) sc:syswrite('ok') sc:close() end end)) #sevres sc = socket('PF_INET', 'SOCK_STREAM', 'tcp') sc:nonblock(false) sc:sysconnect('127.0.0.1', s:name().port) sc:nonblock(true) sc:readable(.5) sc:sysread() string.match(tostring(sc), ', peer') ~= nil #sevres sevres[1].host s:setsockopt('SOL_SOCKET', 'SO_BROADCAST', false) s:getsockopt('socket', 'SO_TYPE') s:error() s:setsockopt('SOL_SOCKET', 'SO_DEBUG', false) s:getsockopt('socket', 'SO_DEBUG') s:setsockopt('SOL_SOCKET', 'SO_ACCEPTCONN', 1) s:getsockopt('SOL_SOCKET', 'SO_RCVBUF') > 32 s:error() s:setsockopt('IPPROTO_TCP', 'TCP_NODELAY', true) s:getsockopt('IPPROTO_TCP', 'TCP_NODELAY') > 0 s:setsockopt('SOL_TCP', 'TCP_NODELAY', false) s:getsockopt('SOL_TCP', 'TCP_NODELAY') == 0 s:setsockopt('tcp', 'TCP_NODELAY', true) s:getsockopt('tcp', 'TCP_NODELAY') > 0 s:setsockopt(6, 'TCP_NODELAY', false) s:getsockopt(6, 'TCP_NODELAY') == 0 not s:setsockopt(nil, 'TCP_NODELAY', true) and errno() == errno.EINVAL not s:getsockopt(nil, 'TCP_NODELAY') and errno() == errno.EINVAL s:linger() s:linger(true, 1) s:linger() s:linger(false, 1) s:linger() s:close() s = socket('PF_INET', 'SOCK_STREAM', 'tcp') s:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) s:bind('127.0.0.1', 0) s:listen(128) sc = socket('PF_INET', 'SOCK_STREAM', 'tcp') sc:sysconnect('127.0.0.1', s:name().port) or errno() == errno.EINPROGRESS sc:writable(10) sc:write('Hello, world') sa, addr = s:accept() addr2 = sa:name() addr2.host == addr.host addr2.family == addr.family sa:nonblock(1) sa:read(8) sa:read(3) sc:writable() sc:write(', again') sa:read(8) sa:error() string.len(sa:read(0)) type(sa:read(0)) sa:read(1, .01) sc:writable() sc:send('abc') sa:read(3) sc:send('Hello') sa:readable() sa:recv() sa:recv() sc:send('Hello') sc:send(', world') sc:send("\\nnew line") sa:read('\\n', 1) sa:read({ chunk = 1, delimiter = 'ine'}, 1) sa:read('ine', 1) sa:read('ine', 0.1) sc:send('Hello, world') sa:read(',', 1) sc:shutdown('W') sa:close() sc:close() s = socket('PF_UNIX', 'SOCK_STREAM', 0) s:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) s ~= nil s:nonblock() s:nonblock(true) s:nonblock() os.remove('/tmp/tarantool-test-socket') s:bind('unix/', '/tmp/tarantool-test-socket') sc ~= nil s:listen(1234) sc = socket('PF_UNIX', 'SOCK_STREAM', 0) sc:nonblock(true) sc:sysconnect('unix/', '/tmp/tarantool-test-socket') sc:error() s:readable() sa = s:accept() sa:nonblock(true) sa:send('Hello, world') sc:recv() sc:close() sa:close() s:close() _ = os.remove('/tmp/tarantool-test-socket') test_run:cmd("setopt delimiter ';'") function aexitst(ai, hostnames, port) for i, a in pairs(ai) do for j, host in pairs(hostnames) do if a.host == host and a.port == port then return true end end end return ai end; aexitst( socket.getaddrinfo('localhost', 'http', { protocol = 'tcp', type = 'SOCK_STREAM'}), {'127.0.0.1', '::1'}, 80 ); test_run:cmd("setopt delimiter ''"); wrong_addr = socket.getaddrinfo('non-existing-domain-name-12211alklkl.com', 'http', {}) wrong_addr == nil or #wrong_addr == 0 sc = socket('PF_INET', 'SOCK_STREAM', 'tcp') sc ~= nil sc:getsockopt('SOL_SOCKET', 'SO_ERROR') sc:nonblock(true) sc:sysconnect('127.0.0.1', 3458) or errno() == errno.EINPROGRESS or errno() == errno.ECONNREFUSED string.match(tostring(sc), ', peer') == nil sc:writable() string.match(tostring(sc), ', peer') == nil socket_error = sc:getsockopt('SOL_SOCKET', 'SO_ERROR') socket_error == errno.ECONNREFUSED or socket_error == 0 test_run:cmd("setopt delimiter ';'") inf = socket.getaddrinfo('127.0.0.1', '80', { type = 'SOCK_DGRAM', flags = { 'AI_NUMERICSERV', 'AI_NUMERICHOST', } }); -- musl libc https://github.com/tarantool/tarantool/issues/1249 inf[1].canonname = nil; inf; test_run:cmd("setopt delimiter ''"); sc = socket('AF_INET', 'SOCK_STREAM', 'tcp') json.encode(sc:name()) sc:name() sc:nonblock(true) sc:close() s = socket('AF_INET', 'SOCK_DGRAM', 'udp') s:bind('127.0.0.1', 0) sc = socket('AF_INET', 'SOCK_DGRAM', 'udp') sc:sendto('127.0.0.1', s:name().port, 'Hello, world') s:readable(10) s:recv() sc:sendto('127.0.0.1', s:name().port, 'Hello, world, 2') s:readable(10) d, from = s:recvfrom() from.port > 0 from.port = 'Random port' json.encode{d, from} s:close() sc:close() s = socket('AF_INET', 'SOCK_DGRAM', 'udp') s:nonblock(true) s:bind('127.0.0.1') s:name().port > 0 sc = socket('AF_INET', 'SOCK_DGRAM', 'udp') sc:nonblock(true) sc:sendto('127.0.0.1', s:name().port) sc:sendto('127.0.0.1', s:name().port, 'Hello, World!') s:readable(1) data, from = s:recvfrom(10) data s:sendto(from.host, from.port, 'Hello, hello!') sc:readable(1) data_r, from_r = sc:recvfrom() data_r from_r.host from_r.port == s:name().port s:close() sc:close() -- tcp_connect -- test timeout socket.tcp_connect('127.0.0.1', 80, 0.00000000001) -- AF_INET s = socket('AF_INET', 'SOCK_STREAM', 'tcp') s:bind('127.0.0.1', 0) port = s:name().port s:listen() sc, e = socket.tcp_connect('127.0.0.1', port), errno() sc ~= nil e == 0 sc:close() s:close() socket.tcp_connect('127.0.0.1', port), errno() == errno.ECONNREFUSED -- AF_UNIX path = '/tmp/tarantool-test-socket' _ = os.remove(path) s = socket('AF_UNIX', 'SOCK_STREAM', 0) s:bind('unix/', path) socket.tcp_connect('unix/', path), errno() == errno.ECONNREFUSED s:listen() sc, e = socket.tcp_connect('unix/', path), errno() sc ~= nil e sc:close() s:close() socket.tcp_connect('unix/', path), errno() == errno.ECONNREFUSED _ = os.remove(path) socket.tcp_connect('unix/', path), errno() == errno.ENOENT -- invalid fd / tampering s = socket('AF_INET', 'SOCK_STREAM', 'tcp') s:read(9) s:close() s._gc_socket.fd = 512 s._gc_socket = nil tostring(s) s = nil -- close serv = socket('AF_INET', 'SOCK_STREAM', 'tcp') serv:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) serv:bind('127.0.0.1', port) port = serv:name().port serv:listen() test_run:cmd("setopt delimiter ';'") f = fiber.create(function(serv) serv:readable() sc = serv:accept() sc:write("Tarantool test server") sc:shutdown() sc:close() serv:close() end, serv); test_run:cmd("setopt delimiter ''"); s = socket.tcp_connect('127.0.0.1', port) ch = fiber.channel() f = fiber.create(function() s:read(12) ch:put(true) end) s:close() ch:get(1) s:error() -- random port master = socket('PF_INET', 'SOCK_STREAM', 'tcp') master:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) port = 32768 + math.random(32768) attempt = 0 test_run:cmd("setopt delimiter ';'") while attempt < 10 do if not master:bind('127.0.0.1', port) then port = 32768 + math.random(32768) attempt = attempt + 1 else break end end; master:listen(); function gh361() local s = socket('PF_INET', 'SOCK_STREAM', 'tcp') s:sysconnect('127.0.0.1', port) s:wait() res = s:read(1200) end; test_run:cmd("setopt delimiter ''"); f = fiber.create(gh361) fiber.cancel(f) while f:status() ~= 'dead' do fiber.sleep(0.001) end master:close() f = nil path = '/tmp/tarantool-test-socket' s = socket('PF_UNIX', 'SOCK_STREAM', 0) s:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) s:error() s:bind('unix/', path) s:error() s:listen(128) test_run:cmd("setopt delimiter ';'") f = fiber.create(function() for i=1,2 do s:readable() local sc = s:accept() sc:write('ok!') sc:shutdown() sc:close() end end); test_run:cmd("setopt delimiter ''"); c = socket.tcp_connect('unix/', path) c:error() x = c:read('!') x, type(x), #x x = c:read('!') c:error() x, type(x), #x x = c:read('!') c:error() x, type(x), #x c:close() c = socket.tcp_connect('unix/', path) c:error() x = c:read(3) c:error() x, type(x), #x x = c:read(1) c:error() x, type(x), #x x = c:read(1) c:error() x, type(x), #x x = c:sysread(1) c:error() x, type(x), #x c:close() s:close() _ = os.remove(path) server, addr = socket.tcp_server('unix/', path, function(s) s:write('Hello, world') end) type(addr) server ~= nil fiber.sleep(.1) client = socket.tcp_connect('unix/', path) client ~= nil client:read(123) server:close() -- unix socket automatically removed while fio.stat(path) ~= nil do fiber.sleep(0.001) end test_run:cmd("setopt delimiter ';'") server, addr = socket.tcp_server('localhost', 0, { handler = function(s) s:read(2) s:write('Hello, world') end, name = 'testserv'}); test_run:cmd("setopt delimiter ''"); type(addr) server ~= nil addr2 = server:name() addr.host == addr2.host addr.family == addr2.family fiber.sleep(.1) client = socket.tcp_connect(addr2.host, addr2.port) client ~= nil -- Check that listen and client fibers have appropriate names cnt = 0 test_run:cmd("setopt delimiter ';'") for _, f in pairs(fiber.info()) do if f.name:match('^testserv/') then cnt = cnt + 1 end end; test_run:cmd("setopt delimiter ''"); cnt client:write('hi') client:read(123) client:close() server:close() longstring = string.rep("abc", 65535) server = socket.tcp_server('unix/', path, function(s) s:write(longstring) end) client = socket.tcp_connect('unix/', path) client:read(#longstring) == longstring client = socket.tcp_connect('unix/', path) client:read(#longstring + 1) == longstring client = socket.tcp_connect('unix/', path) client:read(#longstring - 1) == string.sub(longstring, 1, #longstring - 1) longstring = "Hello\r\n\r\nworld\n\n" client = socket.tcp_connect('unix/', path) client:read{ line = { "\n\n", "\r\n\r\n" } } server:close() -- gh-658: socket:read() incorrectly handles size and delimiter together body = "a 10\nb 15\nabc" remaining = #body test_run:cmd("setopt delimiter ';'") server = socket.tcp_server('unix/', path, function(s) s:write(body) s:read(100500) end); test_run:cmd("setopt delimiter ''"); client = socket.tcp_connect('unix/', path) buf = client:read({ size = remaining, delimiter = "\n"}) buf == "a 10\n" remaining = remaining - #buf buf = client:read({ size = remaining, delimiter = "\n"}) buf == "b 15\n" remaining = remaining - #buf buf = client:read({ size = remaining, delimiter = "\n"}) buf == "abc" remaining = remaining - #buf remaining == 0 buf = client:read({ size = remaining, delimiter = "\n"}) buf == "" buf = client:read({ size = remaining, delimiter = "\n"}) buf == "" client:close() server:close() _ = os.remove(path) -- Test that socket is closed on GC s = socket('AF_UNIX', 'SOCK_STREAM', 0) s:bind('unix/', path) s:listen() s = nil while socket.tcp_connect('unix/', path) do collectgarbage('collect') end _ = os.remove(path) -- Test serializers with sockets s = socket('AF_UNIX', 'SOCK_STREAM', 0) -- check __serialize hook json.decode(json.encode(s)).fd == s:fd() yaml.decode(yaml.encode(s)).fd == s:fd() s = nil -- start AF_UNIX server with dead socket exists path = '/tmp/tarantool-test-socket' s = socket('AF_UNIX', 'SOCK_STREAM', 0) s:bind('unix/', path) s:close() s = socket('AF_UNIX', 'SOCK_STREAM', 0) { s:bind('unix/', path), errno() == errno.EADDRINUSE } s:close() s = socket.tcp_server('unix/', path, function() end) s ~= nil s:close() fio.stat(path) == nil { socket.tcp_connect('abrakadabra#123') == nil, errno.strerror() } -- wrong options for getaddrinfo socket.getaddrinfo('host', 'port', { type = 'WRONG' }) == nil and errno() == errno.EINVAL socket.getaddrinfo('host', 'port', { family = 'WRONG' }) == nil and errno() == errno.EINVAL socket.getaddrinfo('host', 'port', { protocol = 'WRONG' }) == nil and errno() == errno.EPROTOTYPE socket.getaddrinfo('host', 'port', { flags = 'WRONG' }) == nil and errno() == errno.EINVAL -- gh-574: check that fiber with getaddrinfo can be safely cancelled test_run:cmd("setopt delimiter ';'") f = fiber.create(function() while true do local result = socket.getaddrinfo('localhost', '80') fiber.sleep(0) end end); test_run:cmd("setopt delimiter ''"); f:cancel() -------------------------------------------------------------------------------- -- Lua Socket Emulation -------------------------------------------------------------------------------- test_run:cmd("push filter 'fd=([0-9]+)' to 'fd='") s = socket.tcp() s s:close() -- Sic: incompatible with Lua Socket s:close() s = socket.tcp() host, port, family = s:getsockname() host == '0.0.0.0', port == '0', family == 'inet' status, reason = s:getpeername() status == nil, type(reason) == 'string' s:settimeout(100500) s:setoption('keepalive', true) s:setoption('linger', { on = true }) s:setoption('linger', true) s:setoption('reuseaddr', true) s:setoption('tcp-nodelay', true) s:setoption('unknown', true) s:bind('127.0.0.1', 0) s:bind('127.0.0.1', 0) -- error handling s:listen(10) s -- transformed to tcp{server} socket host, port, family = s:getsockname() host == '127.0.0.1', type(port) == 'string', family == 'inet' status, reason = s:getpeername() status == nil, type(reason) == 'string' s:settimeout(0) status, reason = s:accept() status == nil, type(reason) == 'string' s:settimeout(0.001) status, reason = s:accept() status == nil, type(reason) == 'string' s:settimeout(100500) rch, wch = fiber.channel(1), fiber.channel(1) sc = socket.connect(host, port) test_run:cmd("setopt delimiter ';'") cfiber = fiber.create(function(sc, rch, wch) while sc:send(wch:get()) and rch:put(sc:receive("*l")) do end end, sc, rch, wch); test_run:cmd("setopt delimiter ''"); c = s:accept() c chost, cport, cfamily = c:getsockname() chost == '127.0.0.1', type(cport) == 'string', cfamily == 'inet' chost, cport, cfamily = c:getpeername() chost == '127.0.0.1', type(cport) == 'string', cfamily == 'inet' wch:put("Ping\n") c:receive("*l") c:send("Pong\n") rch:get() wch:put("HELO lua\nMAIL FROM: \n") c:receive("*l") c:receive("*l") c:send("250 Welcome to Lua Universe\n") c:send("$$$250 OK\n$$$", 4, 11) rch:get() wch:put("RCPT TO: \n") c:receive() c:send("250") c:send(" ") c:send("OK") c:send("\n") rch:get() wch:put("DATA\n") c:receive(4) c:receive("*l") wch:put("Fu") c:send("354 Please type your message\n") sc:close() c:receive("*l", "Line: ") c:receive() c:receive(10) c:receive("*a") c:close() -- eof with bytes sc = socket.connect(host, port) sc c = s:accept() c _ = fiber.create(function() sc:send("Po") end) sc:close() c:receive(100500, "Message:") c:close() -- eof with '*l' sc = socket.connect(host, port) sc c = s:accept() c _ = fiber.create(function() sc:send("Pong\nPo") end) sc:close() c:receive("*l", "Message:") c:receive("*l", "Message: ") c:receive("*l", "Message: ") c:close() -- eof with '*a' sc = socket.connect(host, port) sc c = s:accept() c _ = fiber.create(function() sc:send("Pong\n") end) sc:close() c:receive("*a", "Message: ") c:receive("*a", "Message: ") c:close() -- shutdown sc = socket.connect(host, port) sc c = s:accept() c _ = fiber.create(function() sc:send("Pong\n") end) sc:shutdown("send") c:receive() c:shutdown("send") status, reason = c:shutdown("recv") status == nil, type(reason) == 'string' status, reason = c:shutdown("recv") status == nil, type(reason) == 'string' status, reason = c:shutdown("both") status == nil, type(reason) == 'string' c:close() sc:close() s:close() -- socket.bind / socket.connect s = socket.bind('0.0.0.0', 0) s host, port, family = s:getsockname() sc = socket.connect(host, port) sc sc:close() sc = socket.tcp() sc:connect(host, port) sc:close() s:close() test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/app/cmdline.result0000664000000000000000000000054613306560010020617 0ustar rootrootarg[-1] ~= nil --- - true ... arg[0] ~= nil --- - true ... string.match(arg[-1], '^/') ~= nil --- - true ... string.match(arg[0], '^/') == nil --- - true ... string.match(arg[-1], '/tarantool$') ~= nil --- - true ... string.match(arg[2], 'app%.lua$') ~= nil --- - true ... io.type( io.open(arg[-1]) ) --- - file ... io.type( io.open(arg[0]) ) --- - file ... tarantool_1.9.1.26.g63eb81e3c/test/app/crypto.test.lua0000664000000000000000000000234013306560010020737 0ustar rootroottest_run = require('test_run').new() test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua:\"]: '") crypto = require('crypto') type(crypto) ciph = crypto.cipher.aes128.cbc pass = '1234567887654321' iv = 'abcdefghijklmnop' enc = ciph.encrypt('test', pass, iv) enc ciph.decrypt(enc, pass, iv) --Failing scenaries crypto.cipher.aes128.cbc.encrypt('a') crypto.cipher.aes128.cbc.encrypt('a', '123456', '435') crypto.cipher.aes128.cbc.encrypt('a', '1234567887654321') crypto.cipher.aes128.cbc.encrypt('a', '1234567887654321', '12') crypto.cipher.aes256.cbc.decrypt('a') crypto.cipher.aes256.cbc.decrypt('a', '123456', '435') crypto.cipher.aes256.cbc.decrypt('a', '12345678876543211234567887654321') crypto.cipher.aes256.cbc.decrypt('12', '12345678876543211234567887654321', '12') crypto.cipher.aes192.cbc.encrypt.new() crypto.cipher.aes192.cbc.encrypt.new('123321') crypto.cipher.aes192.cbc.decrypt.new('123456788765432112345678') crypto.cipher.aes192.cbc.decrypt.new('123456788765432112345678', '12345') crypto.cipher.aes100.efb crypto.cipher.aes256.nomode crypto.digest.nodigest bad_pass = '8765432112345678' bad_iv = '123456abcdefghij' ciph.decrypt(enc, bad_pass, iv) ciph.decrypt(enc, pass, bad_iv) test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/app/pack.result0000664000000000000000000000262113306560010020116 0ustar rootroot-- Test pickle.pack() pickle = require('pickle') --- ... pickle.pack() --- - error: 'bad argument #1 to ''?'' (string expected, got no value)' ... pickle.pack(1) --- - error: 'pickle.pack: argument count does not match the format' ... pickle.pack('abc') --- - error: 'pickle.pack: argument count does not match the format' ... pickle.pack('a', ' - hello') --- - ' - hello' ... pickle.pack('Aa', ' - hello', ' world') --- - ' - hello world' ... pickle.pack('s', 0x4d) --- - "M\0" ... pickle.pack('ssss', 25940, 29811, 28448, 11883) --- - Test ok. ... pickle.pack('SSSS', 25940, 29811, 28448, 11883) --- - Test ok. ... pickle.pack('SSSSSSSS', 28493, 29550, 27680, 27497, 29541, 20512, 29285, 8556) --- - Mons likes Perl! ... pickle.pack('bsil', 84, 29541, 1802444916, 2338318684567380014ULL) --- - 'Test ok. Let`s ' ... pickle.unpack('b', 'T') --- - 84 ... pickle.unpack('s', 'Te') --- - 25940 ... pickle.unpack('i', 'Test') --- - 1953719636 ... pickle.unpack('l', 'Test ok.') --- - 3344889333436081492 ... pickle.unpack('bsil', pickle.pack('bsil', 255, 65535, 4294967295, tonumber64('18446744073709551615'))) --- - 255 - 65535 - 4294967295 - 18446744073709551615 ... pickle.unpack('','') --- ... pickle.unpack('ii', pickle.pack('i', 1)) --- - error: 'pickle.unpack(''i''): got 4 bytes (expected: 8+)' ... pickle.unpack('i', pickle.pack('ii', 1, 1)) --- - error: 'pickle.unpack(''i''): too many bytes: unpacked 4, total 8' ... tarantool_1.9.1.26.g63eb81e3c/test/app/loaders.lua0000664000000000000000000000002113306560010020064 0ustar rootrootreturn "success" tarantool_1.9.1.26.g63eb81e3c/test/app/cmdline.test.lua0000664000000000000000000000036113306560010021033 0ustar rootrootarg[-1] ~= nil arg[0] ~= nil string.match(arg[-1], '^/') ~= nil string.match(arg[0], '^/') == nil string.match(arg[-1], '/tarantool$') ~= nil string.match(arg[2], 'app%.lua$') ~= nil io.type( io.open(arg[-1]) ) io.type( io.open(arg[0]) ) tarantool_1.9.1.26.g63eb81e3c/test/app/pack.test.lua0000664000000000000000000000135413306560010020341 0ustar rootroot-- Test pickle.pack() pickle = require('pickle') pickle.pack() pickle.pack(1) pickle.pack('abc') pickle.pack('a', ' - hello') pickle.pack('Aa', ' - hello', ' world') pickle.pack('s', 0x4d) pickle.pack('ssss', 25940, 29811, 28448, 11883) pickle.pack('SSSS', 25940, 29811, 28448, 11883) pickle.pack('SSSSSSSS', 28493, 29550, 27680, 27497, 29541, 20512, 29285, 8556) pickle.pack('bsil', 84, 29541, 1802444916, 2338318684567380014ULL) pickle.unpack('b', 'T') pickle.unpack('s', 'Te') pickle.unpack('i', 'Test') pickle.unpack('l', 'Test ok.') pickle.unpack('bsil', pickle.pack('bsil', 255, 65535, 4294967295, tonumber64('18446744073709551615'))) pickle.unpack('','') pickle.unpack('ii', pickle.pack('i', 1)) pickle.unpack('i', pickle.pack('ii', 1, 1)) tarantool_1.9.1.26.g63eb81e3c/test/app/fiber_channel.test.lua0000664000000000000000000001040013306560010022172 0ustar rootrootfiber = require('fiber') env = require('test_run') test_run = env.new() -- channel methods ignore extra arguments, as regular Lua functions do ignored_args = {'Extra', 'arguments', 'are', 'ignored'} ch = fiber.channel(1) ch:size() ch:count() ch:is_full() ch:is_empty() ch:size(unpack(ignored_args)) ch:count(unpack(ignored_args)) ch:is_full(unpack(ignored_args)) ch:is_empty(unpack(ignored_args)) ch:get(.1) ch:get(.1, nil) ch:get(.1, nil, unpack(ignored_args)) tostring(ch) ch:put() ch:count() ch:put('test') tostring(ch) ch:get() ch:put('test', nil), ch:get() ch:put('test', nil, unpack(ignored_args)), ch:get() ch:get('wrong timeout') ch:get(-10) ch:put(234) ch:put(345, .1) ch:count() ch:is_full() ch:is_empty() buffer = {} test_run:cmd("setopt delimiter ';'") tfbr = fiber.create( function() while true do table.insert(buffer, ch:get()) end end ); t = {}; for i = 1, 10 do table.insert(t, {i, ch:put(i, 0.1)}) end; test_run:cmd("setopt delimiter ''"); t ch:has_readers() ch:has_writers() fiber.cancel(tfbr) ch:has_readers() ch:has_writers() ch:count() ch:put(box.info.pid) ch:count() ch:is_full() ch:is_empty() ch:get(box.info.pid) == box.info.pid buffer ch:is_empty() ch:is_full() ch:is_empty() test_run:cmd("setopt delimiter ';'") tfbr = fiber.create( function() while true do local v = ch:get() table.insert(buffer, v) end end ); tfbr2 = fiber.create( function() while true do local v = ch:get() table.insert(buffer, v) end end ); test_run:cmd("setopt delimiter ''"); buffer = {} buffer ch:is_full() ch:is_empty() ch:put(1) ch:put(2) ch:put(3) ch:put(4) ch:put(5) t = {} for i = 35, 45 do table.insert(t, ch:put(i)) end t while #buffer < 15 do fiber.sleep(0.001) end table.sort(buffer) buffer ch = fiber.channel(1) ch:is_closed() passed = false type(fiber.create(function() if ch:get() == nil then passed = true end end)) ch:close() fiber.yield() passed ch:get() ch:get() ch:put(10) ch:is_closed() tostring(ch) ch = fiber.channel(1) ch:put(true) ch:is_closed() passed = false type(fiber.create(function() if ch:put(true) == false then passed = true end end)) ch:close() fiber.yield() passed ch:get() ch:get() ch:put(10) ch:is_closed() -- race conditions chs, test_res, count = {}, {}, 0 test_run:cmd("setopt delimiter ';'") for i = 1, 10 do table.insert(chs, fiber.channel()) end; fibers = {} for i = 1, 10 do table.insert(fibers, fiber.create(function(no) fiber.self():name('pusher') while true do chs[no]:put({no}) fiber.sleep(0.001 * math.random()) end end, i) ) end; for i = 1, 10 do table.insert(fibers, fiber.create(function(no) fiber.self():name('receiver') while true do local r = chs[no]:get(math.random() * .001) if r ~= nil and r[1] == no then test_res[no] = true elseif r ~= nil then break end fiber.sleep(0.001 * math.random()) count = count + 1 end test_res[no] = false end, i) ) end; for i = 1, 100 do fiber.sleep(0.01) if count > 2000 then break end end; count > 2000, #test_res, test_res; for _, fiber in ipairs(fibers) do fiber:cancel() end; -- -- gh-756: channel:close() leaks memory -- ffi = require('ffi'); do stat, err = pcall(ffi.cdef, [[struct gh756 { int k; }]]) if not stat and not err:match('attempt to redefine') then error(err) end stat, err = pcall(ffi.metatype, 'struct gh756', { __gc = function() refs = refs - 1; end }) if not stat and not err:match('cannot change a protected metatable') then error(err) end end; test_run:cmd("setopt delimiter ''"); ct = ffi.typeof('struct gh756') -- create 10 objects and put they to a channel refs = 10 ch = fiber.channel(refs) for i=1,refs do ch:put(ffi.new(ct, i)) end -- get an object from the channel, run GC and check the number of objects ch:get().k == 1 collectgarbage('collect') refs ch:get().k == 2 collectgarbage('collect') refs -- close the channel and check the number of objects ch:close() collectgarbage('collect') refs -- must be zero tarantool_1.9.1.26.g63eb81e3c/test/app/socket.skipcond0000664000000000000000000000053213306560010020763 0ustar rootroot # vim: set ft=python : import re import os.path import socket import os test_path = '/tmp/tarantool-test-socket' if os.path.exists(test_path): os.remove(test_path) s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: s.bind(test_path) except: self.skip = 1 s.close() if os.path.exists(test_path): os.remove(test_path) tarantool_1.9.1.26.g63eb81e3c/test/app/strict.result0000664000000000000000000000033213306560010020505 0ustar rootrootstrict = require('strict') --- ... strict.on() --- ... if a then a = true end --- - error: '[string "if a then a = true end "]:1: variable ''a'' is not declared' ... strict.off() --- ... if a then a = true end --- ... tarantool_1.9.1.26.g63eb81e3c/test/app/fio.test.lua0000664000000000000000000002036213306560010020200 0ustar rootrootfio = require 'fio' ffi = require 'ffi' buffer = require 'buffer' test_run = require('test_run').new() -- umask type(fio.umask(0)) fio.umask() -- pathjoin st, err = pcall(fio.basename, nil, nil) st err:match("basename") ~= nil fio.pathjoin('abc', 'cde') fio.pathjoin('/', 'abc') fio.pathjoin('abc/', '/cde') fio.pathjoin('/', '/cde') fio.pathjoin('/a', '/') fio.pathjoin('abc', 'awdeq///qweqwqwe///', "//asda//") -- basename st, err = pcall(fio.basename, nil) st err:match("basename") ~= nil fio.basename('/') fio.basename('abc') fio.basename('abc.cde', '.cde') fio.basename('abc^cde', '.cde') fio.basename('/path/to/file.cde', '.cde') -- other tests tmpdir = fio.tempdir() file1 = fio.pathjoin(tmpdir, 'file.1') file2 = fio.pathjoin(tmpdir, 'file.2') file3 = fio.pathjoin(tmpdir, 'file.3') file4 = fio.pathjoin(tmpdir, 'file.4') st, err = pcall(fio.open, nil) st err:match("open") ~= nil fh1 = fio.open(file1, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) fh1 ~= nil f1s = fh1:stat() f1s.size f1s.is_reg() f1s:is_reg() f1s:is_dir() f1s:is_link() f1s:is_sock() f1s:is_fifo() f1s:is_chr() f1s:is_blk() fh1:seek(121) fh1:stat().size fh1:write(nil) fh1:write("Hello, world") fh1:stat().size fh1:fsync() fh1:fdatasync() fio.sync() fh1:pread(512, 121) fh1:pread(5, 121) fh1:write("; Ehllo, again") fh1:seek(121) fh1:read(13) fh1:read(512) fh1:pread(512, 14 + 121) fh1:pwrite("He", 14 + 121) fh1:pread(512, 14 + 121) { fh1:stat().size, fio.stat(file1).size } fh1:seek(121) fh1:read(512) fio.link(nil, nil) fio.link(file1, file2) fio.glob(nil) glob = fio.glob(fio.pathjoin(tmpdir, '*')) #glob { string.match(glob[1], '^.*/(.*)'), string.match(glob[2], '^.*/(.*)') } fio.stat(file1).inode == fio.stat(file2).inode fh3 = fio.open(file3, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0x1FD) fh1:stat().inode ~= fh3:stat().inode 0775 bit.band(fh3:stat().mode, 0x1FF) == 0x1FD fh3:write("abc") fio.rename(nil, nil) fio.rename(file3, file4) fio.symlink(nil, nil) fio.symlink(file4, file3) fio.stat(nil) fio.stat(file3).size fio.lstat(file3).size ~= fio.stat(file3).size fio.lstat(file3).mode ~= fio.stat(file3).mode fio.basename(fio.readlink(file3)) bit.band(fio.stat(file4).mode, 0x1FF) == 0x1FD fio.chmod(nil, 0x1F8) fio.chmod(file4, 0x1F8) -- 0x770 bit.band(fh3:stat().mode, 0x1FF) == 0x1F8 bit.band(fio.stat(file4).mode, 0x1FF) == 0x1F8 dir1 = fio.pathjoin(tmpdir, 'dir1') dir2 = fio.pathjoin(tmpdir, 'dir2') fio.mkdir(nil) fio.mkdir(dir1) -- standard mode fio.mkdir(dir2, 1) -- custom mode string.format('%04o', bit.band(fio.stat(dir1).mode, 0x1FF)) string.format('%04o', bit.band(fio.stat(dir2).mode, 0x1FF)) -- cleanup directories { fh1:close(), fh3:close() } fh1:close() fh3:close() fio.rmdir(nil) fio.rmdir(dir1) fio.rmdir(dir2) { fio.unlink(file1), fio.unlink(file2), fio.unlink(file3), fio.unlink(file4) } { fio.unlink(file1), fio.unlink(file2), fio.unlink(file3), fio.unlink(file4) } fio.rmdir(tmpdir) fio.rmdir(tmpdir) fio.unlink() fio.unlink(nil) -- gh-1211 use 0777 if mode omitted in open fh4 = fio.open('newfile', {'O_RDWR','O_CREAT','O_EXCL'}) bit.band(fh4:stat().mode, 0x1FF) == bit.band(fio.umask(), 0x1ff) fh4:close() fio.unlink('newfile') -- dirname st, err = pcall(fio.dirname, nil) st err:match("dirname") ~= nil fio.dirname('abc') fio.dirname('/abc') fio.dirname('/abc/cde') fio.dirname('/abc/cde/') fio.dirname('/') -- abspath st, err = pcall(fio.abspath, nil) st err:match("abspath") ~= nil fio.abspath("/") fio.abspath("/tmp") fio.abspath("/tmp/test/../") fio.abspath("/tmp/test/../abc") fio.abspath("/tmp/./test") fio.abspath("/tmp///test//abc") fio.abspath("/../") fio.abspath("/../tmp") type(string.find(fio.abspath("tmp"), "tmp")) -- chdir old_cwd = fio.cwd() st, err = pcall(fio.chdir, nil) st err:match("chdir") ~= nil st, err = pcall(fio.chdir, 42) st err:match("chdir") ~= nil fio.chdir('/no/such/file/or/directory') fio.chdir('/') fio.cwd() fio.chdir(old_cwd) fio.cwd() == old_cwd -- listdir tmpdir = fio.tempdir() dir3 = fio.pathjoin(tmpdir, "dir3") st, err = pcall(fio.mkdir, nil) st err:match("mkdir") ~= nil fio.mkdir(dir3) fio.mkdir(fio.pathjoin(dir3, "1")) fio.mkdir(fio.pathjoin(dir3, "2")) fio.mkdir(fio.pathjoin(dir3, "3")) fio.listdir("/no/such/directory/") ls = fio.listdir(dir3) table.sort(ls, function(a, b) return tonumber(a) < tonumber(b) end) ls -- rmtree fio.stat(dir3) ~= nil fio.rmtree(dir3) fio.stat(dir3) == nil st, err = fio.rmtree(dir3) st err:match("No such") ~= nil -- mktree tmp1 = fio.pathjoin(tmpdir, "1") tmp2 = fio.pathjoin(tmp1, "2") tree = fio.pathjoin(tmp2, "3") tree2 = fio.pathjoin(tmpdir, "4") st, err = pcall(fio.mktree, nil) st err:match("mktree") ~= nil fio.mktree(tree) fio.stat(tree) ~= nil fio.stat(tmp2) ~= nil fio.mktree(tree2, 1) -- copy and copytree file1 = fio.pathjoin(tmp1, 'file.1') file2 = fio.pathjoin(tmp2, 'file.2') file3 = fio.pathjoin(tree, 'file.3') fh1 = fio.open(file1, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) fh1:write("gogo") fh1:close() fh1 = fio.open(file2, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) fh1:write("lolo") fh1:close() fio.symlink(file1, file3) fio.copyfile(file1, tmp2) fio.stat(fio.pathjoin(tmp2, "file.1")) ~= nil res, err = fio.copyfile(fio.pathjoin(tmp1, 'not_exists.txt'), tmp1) res err:match("failed to copy") ~= nil newdir = fio.pathjoin(tmpdir, "newdir") fio.copytree(fio.pathjoin(tmpdir, "1"), newdir) fio.stat(fio.pathjoin(newdir, "file.1")) ~= nil fio.stat(fio.pathjoin(newdir, "2", "file.2")) ~= nil fio.stat(fio.pathjoin(newdir, "2", "3", "file.3")) ~= nil fio.readlink(fio.pathjoin(newdir, "2", "3", "file.3")) == file1 fio.copytree("/no/such/dir", "/some/where") -- ibuf read/write buf = buffer.ibuf() tmpdir = fio.tempdir() tmpfile = fio.pathjoin(tmpdir, "test1") fh = fio.open(tmpfile, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) fh:write('helloworld!') fh:seek(0) fh:read() fh:close() fh:read() fio.unlink(tmpfile) tmpfile = fio.pathjoin(tmpdir, "test") fh = fio.open(tmpfile, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) fh:write('helloworld!') fh:seek(0) len = fh:read(buf:reserve(12)) ffi.string(buf:alloc(len), len) fh:seek(0) len = fh:read(buf:reserve(5), 5) ffi.string(buf:alloc(len), len) len = fh:read(buf:reserve(5), 5) ffi.string(buf:alloc(len), len) len = fh:read(buf:reserve(5), 5) ffi.string(buf:alloc(len), len) buf:reset() len = fh:pread(buf:reserve(5), 5, 5) ffi.string(buf:alloc(len), len) len = fh:pread(buf:reserve(5), 5) ffi.string(buf:alloc(len), len) fh:seek(0) fh:write(buf.rpos, buf:size()) fh:seek(0) fh:read(64) fh:pwrite(buf:read(5), 5, 5) fh:pwrite(buf:read(5), 5) fh:seek(0) fh:read(64) buf:recycle() fh:close() -- gh-2924 -- fio.path.exists lexists is_file, etc -- fio.path.is_file(tmpfile) fio.path.is_dir(tmpfile) fio.path.is_link(tmpfile) fio.path.exists(tmpfile) fio.path.lexists(tmpfile) non_existing_file = "/no/such/file" fio.path.is_file(non_existing_file) fio.path.is_dir(non_existing_file) fio.path.is_link(non_existing_file) fio.path.exists(non_existing_file) fio.path.lexists(non_existing_file) fio.path.is_file(tmpdir) fio.path.is_dir(tmpdir) fio.path.is_link(tmpdir) fio.path.exists(tmpdir) fio.path.lexists(tmpdir) link = fio.pathjoin(tmpdir, "link") fio.symlink(tmpfile, link) fio.path.is_file(link) fio.path.is_dir(link) fio.path.is_link(link) fio.path.exists(link) fio.path.lexists(link) fio.unlink(link) fio.symlink(non_existing_file, link) fio.path.is_file(link) fio.path.is_dir(link) fio.path.is_link(link) fio.path.exists(link) fio.path.lexists(link) fio.unlink(link) fio.symlink(tmpdir, link) fio.path.is_file(link) fio.path.is_dir(link) fio.path.is_link(link) fio.path.exists(link) fio.path.lexists(link) fio.unlink(link) fio.unlink(tmpfile) tmp1 = fio.pathjoin(tmpdir, "tmp1") tmp2= fio.pathjoin(tmpdir, "tmp2") test_run:cmd("setopt delimiter ';'") function write_file(name, odd) local fh = fio.open(name, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) if odd then fh:write(string.rep('1', 100)) else fh:write(string.rep('2', 100)) end fh:write(name) fh:seek(0) return fh end; test_run:cmd("setopt delimiter ''"); fh1 = write_file(tmp1) fh2 = write_file(tmp2) fiber = require('fiber') digest = require('digest') str = fh1:read() fh1:seek(0) hash = digest.crc32(str) ch = fiber.channel(1) f1 = fiber.create(function() str = fh1:read() ch:put(digest.crc32(str)) end) f2 = fiber.create(function() str = fh2:read() end) ch:get() == hash fio.unlink(tmp1) fio.unlink(tmp2) fio.rmdir(tmpdir) tarantool_1.9.1.26.g63eb81e3c/test/app/fiber.test.lua0000664000000000000000000002520013306565107020522 0ustar rootrootfiber = require('fiber') space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) env = require('test_run') test_run = env.new() -- A test case for a race condition between ev_schedule -- and wal_schedule fiber schedulers. -- The same fiber should not be scheduled by ev_schedule (e.g. -- due to cancellation) if it is within th wal_schedule queue. -- The test case is dependent on rows_per_wal, since this is when -- we reopen the .xlog file and thus wal_scheduler takes a long -- pause box.cfg.rows_per_wal space:insert{1, 'testing', 'lua rocks'} space:delete{1} space:insert{1, 'testing', 'lua rocks'} space:delete{1} space:insert{1, 'test box delete'} space:delete{1} space:insert{1, 'test box delete'} space:delete{1} space:insert{1684234849, 'test box delete'} space:delete{1684234849} space:insert{1684234849, 'test box delete'} space:delete{1684234849} space:insert{1684234849, 'test box.select()'} space:replace{1684234849, 'hello', 'world'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1667655012, 'goodbye', 'universe'} space:replace{1684234849} space:delete{1684234849} space:delete{1667655012} space:insert{1953719668, 'old', 1684234849} -- test that insert produces a duplicate key error space:insert{1953719668, 'old', 1684234849} space:update(1953719668, {{'=', 1, 1953719668}, {'=', 2, 'new'}}) space:update(1234567890, {{'+', 3, 1}}) space:update(1953719668, {{'+', 3, 1}}) space:update(1953719668, {{'-', 3, 1}}) space:update(1953719668, {{'-', 3, 1}}) space:update(1953719668, {{'+', 3, 1}}) space:delete{1953719668} -- must be read-only space:insert{1953719668} space:insert{1684234849} space:delete{1953719668} space:delete{1684234849} space:insert{1953719668, 'hello world'} space:update(1953719668, {{'=', 2, 'bye, world'}}) space:delete{1953719668} -- test tuple iterators t = space:insert{1953719668} t = space:replace{1953719668, 'another field'} t = space:replace{1953719668, 'another field', 'one more'} space:truncate() -- test passing arguments in and out created fiber test_run:cmd("setopt delimiter ';'") function y() space = box.space['tweedledum'] while true do space:replace{1953719668, os.time()} fiber.sleep(0.001) end end; f = fiber.create(y); fiber.sleep(0.002); fiber.cancel(f); -- fiber garbage collection n = 1000; ch = fiber.channel(n); for k = 1, n, 1 do fiber.create( function() fiber.sleep(0) ch:put(k) end ) end; for k = 1, n, 1 do ch:get() end; test_run:cmd("setopt delimiter ''"); collectgarbage('collect') -- check that these newly created fibers are garbage collected fiber.find(900) fiber.find(910) fiber.find(920) fiber.find() fiber.find('test') -- https://github.com/tarantool/tarantool/issues/131 -- fiber.resume(fiber.cancel()) -- hang f = fiber.create(function() fiber.cancel(fiber.self()) end) f = nil -- https://github.com/tarantool/tarantool/issues/119 ftest = function() fiber.sleep(0.0001 * math.random() ) return true end test_run:cmd("setopt delimiter ';'") result = 0; for i = 1, 10 do local res = {} for j = 1, 300 do fiber.create(function() table.insert(res, ftest()) end) end while #res < 300 do fiber.sleep(0) end result = result + #res end; test_run:cmd("setopt delimiter ''"); result -- -- -- Test fiber.create() -- -- This should try to infinitely create fibers, -- but hit the fiber stack size limit and fail -- with an error. -- -- 2016-03-25 kostja -- -- fiber call stack depth was removed, we should -- use runtime memory limit control instead; the -- old limit was easy to circument with only -- slightly more complicated fork bomb code -- -- f = function() fiber.create(f) end -- f() -- -- Test argument passing -- f = function(a, b) fiber.create(function(arg) result = arg end, a..b) end f('hello ', 'world') result f('bye ', 'world') result -- -- Test that the created fiber is detached -- local f = fiber.create(function() result = fiber.status() end) result -- A test case for Bug#933487 -- tarantool crashed during shutdown if non running LUA fiber -- was created f = fiber.create(function () fiber.sleep(1) return true end) box.snapshot() _, e = pcall(box.snapshot) e _, e = pcall(box.snapshot) e f = fiber.create(function () fiber.sleep(1) end) -- Test fiber.sleep() fiber.sleep(0) fiber.sleep(0.01) fiber.sleep(0.0001) fiber.sleep('hello') fiber.sleep(box, 0.001) -- test fiber.self() f = fiber.self() old_id = f:id() fiber.self():id() - old_id < 3 fiber.self():id() - old_id < 5 g = fiber.self() f==g -- arguments to fiber.create f = fiber.create(print('hello')) -- test passing arguments in and out created fiber res = {} function r(a, b) res = { a, b } end f=fiber.create(r) while f:status() == 'running' do fiber.sleep(0) end res f=fiber.create(r, 'hello') while f:status() == 'running' do fiber.sleep(0) end res f=fiber.create(r, 'hello, world') while f:status() == 'running' do fiber.sleep(0) end res f=fiber.create(r, 'hello', 'world', 'wide') while f:status() == 'running' do fiber.sleep(0) end res -- test fiber.status functions: invalid arguments fiber.status(1) fiber.status('fafa-gaga') fiber.status(nil) -- test fiber.cancel function r() fiber.sleep(1000) end f = fiber.create(r) fiber.cancel(f) while f:status() ~= 'dead' do fiber.sleep(0) end f:status() -- Test fiber.name() old_name = fiber.name() fiber.name() == old_name fiber.self():name() == old_name fiber.name('hello fiber') fiber.name() fiber.self():name('bye fiber') fiber.self():name() fiber.self():name(old_name) space:drop() -- box.fiber test (create, resume, yield, status) dofile("fiber.lua") -- print run fiber's test box_fiber_run_test() -- various... function testfun() while true do fiber.sleep(10) end end f = fiber.create(testfun) f:cancel() fib_id = fiber.create(testfun):id() fiber.find(fib_id):cancel() while fiber.find(fib_id) ~= nil do fiber.sleep(0) end fiber.find(fib_id) -- -- Test local storage -- type(fiber.self().storage) fiber.self().storage.key = 48 fiber.self().storage.key test_run:cmd("setopt delimiter ';'") function testfun(mgmt, ch) mgmt:get() ch:put(fiber.self().storage.key) end; test_run:cmd("setopt delimiter ''"); mgmt = fiber.channel() ch = fiber.channel() f = fiber.create(testfun, mgmt, ch) f.storage.key = 'some value' mgmt:put("wakeup plz") ch:get() ch:close() mgmt:close() ch = nil mgmt = nil fiber.self().storage.key -- our local storage is not affected by f -- attempt to access local storage of dead fiber raises error pcall(function(f) return f.storage end, f) -- -- Test that local storage is garbage collected when fiber is died -- ffi = require('ffi') ch = fiber.channel(1) test_run:cmd("setopt delimiter ';'") function testfun() fiber.self().storage.x = ffi.gc(ffi.new('char[1]'), function() ch:put('gc ok') end) end; test_run:cmd("setopt delimiter ''"); f = fiber.create(testfun) collectgarbage('collect') ch:get() ch:close() ch = nil -- -- Test that local storage is not garbage collected with fiber object -- test_run:cmd("setopt delimiter ';'") function testfun(ch) fiber.self().storage.x = 'ok' collectgarbage('collect') ch:put(fiber.self().storage.x or 'failed') end; test_run:cmd("setopt delimiter ''"); ch = fiber.channel(1) fiber.create(testfun, ch):status() ch:get() ch:close() ch = nil -- # gh-125 box.fiber.cancel() by numeric id -- function y() while true do fiber.sleep(0.001) end end f = fiber.create(y) fiber.kill(f:id()) while f:status() ~= 'dead' do fiber.sleep(0.01) end -- # gh-420 fiber.cancel() assertion `!(f->flags & (1 << 2))' failed -- done = false test_run:cmd("setopt delimiter ';'") function test() fiber.name('gh-420') local fun, errmsg = loadstring('fiber.cancel(fiber.self())') xpcall(fun, function() end) xpcall(fun, function() end) done = true fun() end; test_run:cmd("setopt delimiter ''"); f = fiber.create(test) done -- # gh-536: fiber.info() doesn't list fibers with default names -- function loop() while true do fiber.sleep(10) end end f1 = fiber.create(loop) f2 = fiber.create(loop) f3 = fiber.create(loop) info = fiber.info() info[f1:id()] ~= nil info[f2:id()] ~= nil info[f3:id()] ~= nil info = fiber.info({bt = false}) info[f1:id()].backtrace == nil info = fiber.info({backtrace = false}) info[f1:id()].backtrace == nil f1:cancel() f2:cancel() f3:cancel() -- # gh-666: nulls in output -- getmetatable(fiber.info()) zombie = false for fid, i in pairs(fiber.info()) do if i.name == 'zombie' then zombie = true end end zombie -- test case for gh-778 - fiber.id() on a dead fiber f = fiber.create(function() end) id = f:id() fiber.sleep(0) f:status() id == f:id() -- -- gh-1238: log error if a fiber terminates due to uncaught Lua error -- -- must show in the log _ = fiber.create(function() error('gh-1238') end) test_run:grep_log("default", "gh%-1238") ~= nil -- must NOT show in the log _ = fiber.create(function() fiber.self():cancel() end) fiber.sleep(0.001) test_run:grep_log("default", "FiberIsCancelled") == nil -- must show in the log _ = fiber.create(function() box.error(box.error.ILLEGAL_PARAMS, 'oh my') end) test_run:grep_log("default", "ER_ILLEGAL_PARAMS:[^\n]*") -- #1734 fiber.name irt dead fibers fiber.create(function()end):name() -- -- gh-1926 -- fiber.create(function() fiber.wakeup(fiber.self()) end) -- -- gh-2066 test for fiber wakeup -- _ = box.schema.space.create('test2066', {if_not_exists = true}) _ = box.space.test2066:create_index('pk', {if_not_exists = true}) function fn2() fiber.sleep(60) box.space.test2066:replace({1}) end f2 = fiber.create(fn2) function fn1() fiber.sleep(60) f2:wakeup() end f1 = fiber.create(fn1) -- push two fibers to ready list f1:wakeup() f2:wakeup() fiber.sleep(0.01) box.space.test2066:drop() -- -- gh-2642 box.session.type() -- session_type = "" function fn1() session_type = box.session.type() return end _ = fiber.create(fn1) session_type session_type = nil fiber = nil -- -- gh-2622 fiber.name() truncates new name -- fiber = require('fiber') long_name = string.rep('a', 300) fiber.name() fiber.name('new_name') fiber.name(long_name) fiber.name() fiber.name(long_name, {truncate = true}) fiber.name() f = fiber.self() fiber.name(f) fiber.name(f, 'new_name') fiber.name(f, long_name) fiber.name(f) fiber.name(f, long_name, {truncate = true}) fiber.name(f) test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/app/digest.test.lua0000664000000000000000000001153513306560010020704 0ustar rootroottest_run = require('test_run').new() test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua:\"]: '") fiber = require('fiber') digest = require('digest') type(digest) -- -- Invalid arguments -- digest.md4() digest.md5() digest.sha1() digest.sha224() digest.sha256() digest.sha384() digest.sha512() digest.md4_hex() digest.md5_hex() digest.sha1_hex() digest.sha224_hex() digest.sha256_hex() digest.sha384_hex() digest.sha512_hex() -- -- gh-1561: Bad checksum on non-string types -- digest.md4(12345LL) digest.md5(12345LL) digest.sha1(12345LL) digest.sha224(12345LL) digest.sha256(12345LL) digest.sha384(12345LL) digest.sha512(12345LL) -- -- Empty string -- digest.md4('') digest.md5('') digest.sha1('') digest.sha224('') digest.sha256('') digest.sha384('') digest.sha512('') digest.md4_hex('') digest.md5_hex('') digest.sha1_hex('') digest.sha224_hex('') digest.sha256_hex('') digest.sha384_hex('') digest.sha512_hex('') -- -- Non-empty string -- digest.md4('tarantool') digest.md5('tarantool') digest.sha1('tarantool') digest.sha224('tarantool') digest.sha256('tarantool') digest.sha384('tarantool') digest.sha512('tarantool') digest.md4_hex('tarantool') digest.md5_hex('tarantool') digest.sha1_hex('tarantool') digest.sha224_hex('tarantool') digest.sha256_hex('tarantool') digest.sha384_hex('tarantool') digest.sha512_hex('tarantool') digest.md5_hex(123) digest.md5_hex('123') digest.md5_hex(true) digest.md5_hex('true') digest.md5_hex(nil) digest.md5_hex() digest.crc32() digest.crc32_update(4294967295, '') digest.crc32('abc') digest.crc32_update(4294967295, 'abc') digest.crc32('abccde') digest.crc32_update(digest.crc32('abc'), 'cde') crc = digest.crc32.new() crc:update('abc') crc2 = crc:copy() crc:update('cde') crc:result() == digest.crc32('abccde') crc2:update('def') crc2:result() == digest.crc32('abcdef') crc, crc2 = nil, nil digest.base64_encode('12345') digest.base64_decode('MTIzNDU=') digest.base64_encode('asdfl asdf adfa zxc vzxcvz llll') digest.base64_decode('YXNkZmwgYXNkZiBhZGZhIHp4YyB2enhjdnogbGxsbA==') digest.base64_encode('11 00 11 00 abcdef ABCDEF 00 11 00 11') digest.base64_decode('MTEgMDAgMTEgMDAgYWJjZGVmIEFCQ0RFRiAwMCAxMSAwMCAxMQ==') s = string.rep('a', 54 * 2) -- two lines in base64 b = digest.base64_encode(s) b digest.base64_decode(b) == s digest.base64_decode(nil) digest.base64_encode(nil) digest.base64_encode(123) digest.base64_decode(123) digest.guava('hello', 0) digest.guava(1, 'nope_') digest.guava(10863919174838991, 11) digest.guava(2016238256797177309, 11) digest.guava(1673758223894951030, 11) digest.urandom() #digest.urandom(0) #digest.urandom(1) #digest.urandom(16) digest.murmur('1234') mur = digest.murmur.new{seed=13} nulldigest = mur:result() mur:update('1234') mur:result() mur_new = mur:copy() mur_new:update('1234') mur_new:result() ~= mur:result() mur:clear() nulldigest == mur:result() mur = digest.murmur.new{seed=14} mur:update('1234') mur:result() mur, mur_new, nulldigest = nil, nil, nil digest.aes256cbc.encrypt('test123', 'passpasspasspasspasspasspasspass', 'iv12tras8712cvbh') digest.aes256cbc.decrypt(digest.aes256cbc.encrypt('test123', 'passpasspasspasspasspasspasspass', 'iv12tras8712cvbh'), 'passpasspasspasspasspasspasspass', 'iv12tras8712cvbh') digest.aes256cbc.decrypt(digest.aes256cbc.encrypt('test123', 'passpasspasspasspasspasspasspass', 'iv12tras8712cvbh'), 'nosspasspasspasspasspasspasspass', 'iv12tras8712cvbh') -- -- Test base64 options. (gh-2479, gh-2478, gh-2777). -- b = digest.base64_encode('123', { urlsafe = true }) b digest.base64_decode(b) b = digest.base64_encode('1234567', { urlsafe = true }) b digest.base64_decode(b) b = digest.base64_encode('12345678', { urlsafe = true }) b digest.base64_decode(b) b = digest.base64_encode('1234567', { nopad = true }) b digest.base64_decode(b) b = digest.base64_encode(string.rep('a', 100), { nowrap = true }) b digest.base64_decode(b) -- -- gh-3358: any option makes base64 work like urlsafe. -- s = digest.base64_encode('?>>>', {nowrap = true}) -- Check for '+' - it is not urlsafe. s:find('+') ~= nil s = digest.base64_encode('?>>>', {nopad = true}) s:find('+') ~= nil digest.pbkdf2("password", "salt", 4096, 32) digest.pbkdf2_hex("password", "salt", 4096, 32) digest.pbkdf2_hex("password", "salt") s, err = pcall(digest.pbkdf2, 12, "salt") s err:match("Usage") s, err = pcall(digest.pbkdf2_hex, 12, "salt") s err:match("Usage") s, err = pcall(digest.pbkdf2_hex, "password", "salt", "lol", "lol") s err:match("number") digest = nil test_run:cmd("clear filter") -- gh-3396: fiber-safe pbkdf2 res = {} sentry = fiber.channel() _ = test_run:cmd("setopt delimiter ';'") function test_pbkdf2() local digest = require('digest') for i = 1, 10 do table.insert(res, digest.pbkdf2('', 'salt', 100, 32):hex()) end sentry:put(fiber.id()) end; _ = test_run:cmd("setopt delimiter ''"); _ = fiber.create(test_pbkdf2) _ = fiber.create(test_pbkdf2) _ = sentry:get() _ = sentry:get() res tarantool_1.9.1.26.g63eb81e3c/test/app/uuid.skipcond0000664000000000000000000000026413306560010020443 0ustar rootrootimport re uuid = admin("lua box.uuid_hex()") if not re.search(re.compile('^\s+-\s+[a-f0-9]{32}\s*$', re.M), uuid): if re.search('box.uuid\(\):', uuid): self.skip = 1 tarantool_1.9.1.26.g63eb81e3c/test/app/strict.test.lua0000664000000000000000000000014213306560010020725 0ustar rootrootstrict = require('strict') strict.on() if a then a = true end strict.off() if a then a = true end tarantool_1.9.1.26.g63eb81e3c/test/app/loaders.result0000664000000000000000000000470313306560010020634 0ustar rootrootfio = require('fio') --- ... env = require('test_run') --- ... test_run = env.new() --- ... source_dir = os.getenv("SOURCEDIR") .. "/test/app/" --- ... build_dir = os.getenv("BUILDDIR") .. "/test/app/" --- ... -- -- Check . loader -- orig_cwd = fio.cwd() --- ... fio.chdir(source_dir) --- - true ... cwd_loader = package.loaders[2] --- ... f = cwd_loader("loaders") --- ... type(f) --- - function ... f() --- - success ... fio.chdir(orig_cwd) --- - true ... -- -- Check .rocks loader -- tmp_dir = fio.tempdir() --- ... work_dir = fio.pathjoin(tmp_dir, "pr") --- ... fio.mkdir(work_dir) --- - true ... pr1_dir = fio.pathjoin(work_dir, "pr1") --- ... fio.mkdir(pr1_dir) --- - true ... pr2_dir = fio.pathjoin(pr1_dir, "pr2") --- ... fio.mkdir(pr2_dir) --- - true ... lua_dir = ".rocks/share/tarantool" --- ... lib_dir = ".rocks/lib/tarantool" --- ... test_run:cmd("setopt delimiter ';'"); --- - true ... function create_dirs(name) fio.mkdir(name) fio.mkdir(name .. "/.rocks") fio.mkdir(name .. "/.rocks/share") fio.mkdir(name .. "/.rocks/lib") fio.mkdir(name .. "/.rocks/share/tarantool") fio.mkdir(name .. "/.rocks/lib/tarantool") end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... create_dirs(work_dir) --- ... create_dirs(pr1_dir) --- ... create_dirs(pr2_dir) --- ... soext = (jit.os == "OSX" and "dylib" or "so") --- ... loaders_path = fio.pathjoin(source_dir, "loaders.lua") --- ... loaderslib_path = fio.pathjoin(build_dir, "loaderslib."..soext) --- ... fio.symlink(loaders_path, fio.pathjoin(work_dir, lua_dir, "loaders.lua")) --- - true ... fio.symlink(loaderslib_path, fio.pathjoin(pr1_dir, lib_dir, "loaderslib."..soext)) --- - true ... orig_cwd = fio.cwd() --- ... fio.chdir(pr2_dir) --- - true ... rocks_loader = package.loaders[4] --- ... rocks_loader_dyn = package.loaders[5] --- ... f = rocks_loader("loaders") --- ... type(f) --- - function ... f() --- - success ... f = rocks_loader_dyn("loaderslib") --- ... type(f) --- - function ... f() --- - success ... f = rocks_loader("loaders1") --- ... type(f) -- error --- - string ... package.loaded.loaders = nil --- ... package.loaded.loaders1 = nil --- ... package.loaded.loaderslib = nil --- ... fio.chdir(work_dir) --- - true ... f = rocks_loader("loaders") --- ... type(f) --- - function ... f() --- - success ... f = rocks_loader("loaders1") --- ... type(f) -- error --- - string ... f = rocks_loader_dyn("loaderslib") --- ... type(f) -- error --- - string ... fio.chdir(orig_cwd) --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/app/msgpack.test.lua0000664000000000000000000000301113306560010021040 0ustar rootrootbuffer = require 'buffer' msgpack = require 'msgpack' -- Arguments check. buf = buffer.ibuf() msgpack.encode() msgpack.encode('test', 'str') msgpack.encode('test', buf.buf) msgpack.decode() msgpack.decode(123) msgpack.decode(buf) msgpack.decode(buf.buf, 'size') msgpack.decode('test', 0) msgpack.decode('test', 5) msgpack.decode('test', 'offset') msgpack.decode_unchecked() msgpack.decode_unchecked(123) msgpack.decode_unchecked(buf) msgpack.decode_unchecked('test', 0) msgpack.decode_unchecked('test', 5) msgpack.decode_unchecked('test', 'offset') -- Encode/decode a string. s = msgpack.encode({1, 2, 3}) .. msgpack.encode({4, 5, 6}) obj, offset = msgpack.decode(s) obj obj, offset = msgpack.decode(s, offset) obj offset == #s + 1 obj, offset = msgpack.decode_unchecked(s) obj obj, offset = msgpack.decode_unchecked(s, offset) obj offset == #s + 1 -- Encode/decode a buffer. buf = buffer.ibuf() len = msgpack.encode({1, 2, 3}, buf) len = msgpack.encode({4, 5, 6}, buf) + len buf:size() == len orig_rpos = buf.rpos obj, rpos = msgpack.decode(buf.rpos, buf:size()) obj buf.rpos = rpos obj, rpos = msgpack.decode(buf.rpos, buf:size()) obj buf.rpos = rpos buf:size() == 0 buf.rpos = orig_rpos obj, rpos = msgpack.decode_unchecked(buf.rpos, buf:size()) obj buf.rpos = rpos obj, rpos = msgpack.decode_unchecked(buf.rpos, buf:size()) obj buf.rpos = rpos buf:size() == 0 -- Invalid msgpack. s = msgpack.encode({1, 2, 3}) s = s:sub(1, -2) msgpack.decode(s) buf = buffer.ibuf() msgpack.encode({1, 2, 3}, buf) msgpack.decode(buf.rpos, buf:size() - 1) tarantool_1.9.1.26.g63eb81e3c/test/app/socket.result0000664000000000000000000006330113306560010020472 0ustar rootrootjson = require 'json' --- ... yaml = require 'yaml' --- ... pickle = require 'pickle' --- ... socket = require 'socket' --- ... fiber = require 'fiber' --- ... msgpack = require 'msgpack' --- ... log = require 'log' --- ... errno = require 'errno' --- ... fio = require 'fio' --- ... ffi = require('ffi') --- ... type(socket) --- - table ... env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("push filter '(error: .builtin/.*[.]lua):[0-9]+' to '\\1'") --- - true ... socket('PF_INET', 'SOCK_STREAM', 'tcp121222'); --- - null ... s = socket('PF_INET', 'SOCK_STREAM', 'tcp') --- ... type(s) --- - table ... -- Invalid arguments test_run:cmd("setopt delimiter ';'") --- - true ... for k in pairs(getmetatable(s).__index) do local r, msg = pcall(s[k]) if not msg:match('Usage:') then error("Arguments is not checked for "..k) end end; --- ... s:close(); --- - true ... test_run:cmd("setopt delimiter ''"); --- - true ... LISTEN = require('uri').parse(box.cfg.listen) --- ... LISTEN ~= nil --- - true ... s = socket.tcp_connect(LISTEN.host, LISTEN.service) --- ... s:nonblock(true) --- - true ... s:nonblock() --- - true ... s:nonblock(false) --- - false ... s:nonblock() --- - false ... s:nonblock(true) --- - true ... s:readable(.01) --- - true ... s:wait(.01) --- - RW ... socket.iowait(s:fd(), 'RW') --- - RW ... socket.iowait(s:fd(), 3) --- - 3 ... socket.iowait(s:fd(), 'R') --- - R ... socket.iowait(s:fd(), 'r') --- - R ... socket.iowait(s:fd(), 1) --- - 1 ... socket.iowait(s:fd(), 'W') --- - W ... socket.iowait(s:fd(), 'w') --- - W ... socket.iowait(s:fd(), 2) --- - 2 ... socket.iowait(s:fd(), '') --- - error: 'Usage: iowait(fd, 1 | ''r'' | 2 | ''w'' | 3 | ''rw'' [, timeout])' ... socket.iowait(s:fd(), -1) --- - error: 'Usage: iowait(fd, 1 | ''r'' | 2 | ''w'' | 3 | ''rw'' [, timeout])' ... socket.iowait(s:fd(), 'RW') --- - RW ... socket.iowait(s:fd(), 'RW', -100500) --- - RW ... s:readable(0) --- - true ... s:errno() > 0 --- - false ... s:error() --- - null ... s:writable(.00000000000001) --- - true ... s:writable(0) --- - true ... s:wait(.01) --- - RW ... socket.iowait(nil, nil, -1) --- ... socket.iowait(nil, nil, 0.0001) --- ... socket.iowait(-1, nil, 0.0001) --- ... socket.iowait(nil, 'RW') --- - error: 'Usage: iowait(fd, 1 | ''r'' | 2 | ''w'' | 3 | ''rw'' [, timeout])' ... socket.iowait(0, nil) --- - error: 'Usage: iowait(fd, 1 | ''r'' | 2 | ''w'' | 3 | ''rw'' [, timeout])' ... handshake = ffi.new('char[128]') --- ... -- test sysread with char * s:sysread(handshake, 128) --- - 128 ... ffi.string(handshake, 9) --- - Tarantool ... ping = msgpack.encode({ [0] = 64, [1] = 0 }) --- ... ping = msgpack.encode(string.len(ping)) .. ping --- ... -- test syswrite with char * s:syswrite(ffi.cast('const char *', ping), #ping) --- - 6 ... s:readable(1) --- - true ... s:wait(.01) --- - RW ... pong = s:sysread() --- ... string.len(pong) --- - 29 ... msgpack.decode(pong) --- - 24 - 6 ... function remove_schema_id(t, x) if t[5] then t[5] = 'XXX' end return t, x end --- ... remove_schema_id(msgpack.decode(pong, 6)) --- - {0: 0, 1: 0, 5: 'XXX'} - 29 ... s:close() --- - true ... s = socket('PF_INET', 'SOCK_STREAM', 'tcp') --- ... s:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) --- - true ... s:error() --- - null ... s:bind('127.0.0.1', 0) --- - true ... s:error() --- - null ... s:listen(128) --- - true ... sevres = {} --- ... type(require('fiber').create(function() s:readable() do local sc = s:accept() table.insert(sevres, sc) sc:syswrite('ok') sc:close() end end)) --- - userdata ... #sevres --- - 0 ... sc = socket('PF_INET', 'SOCK_STREAM', 'tcp') --- ... sc:nonblock(false) --- - false ... sc:sysconnect('127.0.0.1', s:name().port) --- - true ... sc:nonblock(true) --- - true ... sc:readable(.5) --- - true ... sc:sysread() --- - ok ... string.match(tostring(sc), ', peer') ~= nil --- - true ... #sevres --- - 1 ... sevres[1].host --- - null ... s:setsockopt('SOL_SOCKET', 'SO_BROADCAST', false) --- - true ... s:getsockopt('socket', 'SO_TYPE') --- - 1 ... s:error() --- - null ... s:setsockopt('SOL_SOCKET', 'SO_DEBUG', false) --- - true ... s:getsockopt('socket', 'SO_DEBUG') --- - 0 ... s:setsockopt('SOL_SOCKET', 'SO_ACCEPTCONN', 1) --- - error: 'builtin/socket.lua: Socket option SO_ACCEPTCONN is read only' ... s:getsockopt('SOL_SOCKET', 'SO_RCVBUF') > 32 --- - true ... s:error() --- - null ... s:setsockopt('IPPROTO_TCP', 'TCP_NODELAY', true) --- - true ... s:getsockopt('IPPROTO_TCP', 'TCP_NODELAY') > 0 --- - true ... s:setsockopt('SOL_TCP', 'TCP_NODELAY', false) --- - true ... s:getsockopt('SOL_TCP', 'TCP_NODELAY') == 0 --- - true ... s:setsockopt('tcp', 'TCP_NODELAY', true) --- - true ... s:getsockopt('tcp', 'TCP_NODELAY') > 0 --- - true ... s:setsockopt(6, 'TCP_NODELAY', false) --- - true ... s:getsockopt(6, 'TCP_NODELAY') == 0 --- - true ... not s:setsockopt(nil, 'TCP_NODELAY', true) and errno() == errno.EINVAL --- - true ... not s:getsockopt(nil, 'TCP_NODELAY') and errno() == errno.EINVAL --- - true ... s:linger() --- - false - 0 ... s:linger(true, 1) --- - true - 1 ... s:linger() --- - true - 1 ... s:linger(false, 1) --- - false - 1 ... s:linger() --- - false - 1 ... s:close() --- - true ... s = socket('PF_INET', 'SOCK_STREAM', 'tcp') --- ... s:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) --- - true ... s:bind('127.0.0.1', 0) --- - true ... s:listen(128) --- - true ... sc = socket('PF_INET', 'SOCK_STREAM', 'tcp') --- ... sc:sysconnect('127.0.0.1', s:name().port) or errno() == errno.EINPROGRESS --- - true ... sc:writable(10) --- - true ... sc:write('Hello, world') --- - 12 ... sa, addr = s:accept() --- ... addr2 = sa:name() --- ... addr2.host == addr.host --- - true ... addr2.family == addr.family --- - true ... sa:nonblock(1) --- - true ... sa:read(8) --- - Hello, w ... sa:read(3) --- - orl ... sc:writable() --- - true ... sc:write(', again') --- - 7 ... sa:read(8) --- - d, again ... sa:error() --- - null ... string.len(sa:read(0)) --- - 0 ... type(sa:read(0)) --- - string ... sa:read(1, .01) --- - null ... sc:writable() --- - true ... sc:send('abc') --- - 3 ... sa:read(3) --- - abc ... sc:send('Hello') --- - 5 ... sa:readable() --- - true ... sa:recv() --- - Hello ... sa:recv() --- - null ... sc:send('Hello') --- - 5 ... sc:send(', world') --- - 7 ... sc:send("\\nnew line") --- - 10 ... sa:read('\\n', 1) --- - Hello, world\n ... sa:read({ chunk = 1, delimiter = 'ine'}, 1) --- - n ... sa:read('ine', 1) --- - ew line ... sa:read('ine', 0.1) --- - null ... sc:send('Hello, world') --- - 12 ... sa:read(',', 1) --- - Hello, ... sc:shutdown('W') --- - true ... sa:close() --- - true ... sc:close() --- - true ... s = socket('PF_UNIX', 'SOCK_STREAM', 0) --- ... s:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) --- - true ... s ~= nil --- - true ... s:nonblock() --- - true ... s:nonblock(true) --- - true ... s:nonblock() --- - true ... os.remove('/tmp/tarantool-test-socket') --- - null - '/tmp/tarantool-test-socket: No such file or directory' - 2 ... s:bind('unix/', '/tmp/tarantool-test-socket') --- - true ... sc ~= nil --- - true ... s:listen(1234) --- - true ... sc = socket('PF_UNIX', 'SOCK_STREAM', 0) --- ... sc:nonblock(true) --- - true ... sc:sysconnect('unix/', '/tmp/tarantool-test-socket') --- - true ... sc:error() --- - null ... s:readable() --- - true ... sa = s:accept() --- ... sa:nonblock(true) --- - true ... sa:send('Hello, world') --- - 12 ... sc:recv() --- - Hello, world ... sc:close() --- - true ... sa:close() --- - true ... s:close() --- - true ... _ = os.remove('/tmp/tarantool-test-socket') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function aexitst(ai, hostnames, port) for i, a in pairs(ai) do for j, host in pairs(hostnames) do if a.host == host and a.port == port then return true end end end return ai end; --- ... aexitst( socket.getaddrinfo('localhost', 'http', { protocol = 'tcp', type = 'SOCK_STREAM'}), {'127.0.0.1', '::1'}, 80 ); --- - true ... test_run:cmd("setopt delimiter ''"); --- - true ... wrong_addr = socket.getaddrinfo('non-existing-domain-name-12211alklkl.com', 'http', {}) --- ... wrong_addr == nil or #wrong_addr == 0 --- - true ... sc = socket('PF_INET', 'SOCK_STREAM', 'tcp') --- ... sc ~= nil --- - true ... sc:getsockopt('SOL_SOCKET', 'SO_ERROR') --- - 0 ... sc:nonblock(true) --- - true ... sc:sysconnect('127.0.0.1', 3458) or errno() == errno.EINPROGRESS or errno() == errno.ECONNREFUSED --- - true ... string.match(tostring(sc), ', peer') == nil --- - true ... sc:writable() --- - true ... string.match(tostring(sc), ', peer') == nil --- - true ... socket_error = sc:getsockopt('SOL_SOCKET', 'SO_ERROR') --- ... socket_error == errno.ECONNREFUSED or socket_error == 0 --- - true ... test_run:cmd("setopt delimiter ';'") --- - true ... inf = socket.getaddrinfo('127.0.0.1', '80', { type = 'SOCK_DGRAM', flags = { 'AI_NUMERICSERV', 'AI_NUMERICHOST', } }); --- ... -- musl libc https://github.com/tarantool/tarantool/issues/1249 inf[1].canonname = nil; --- ... inf; --- - - host: 127.0.0.1 family: AF_INET type: SOCK_DGRAM protocol: udp port: 80 ... test_run:cmd("setopt delimiter ''"); --- - true ... sc = socket('AF_INET', 'SOCK_STREAM', 'tcp') --- ... json.encode(sc:name()) --- - '{"host":"0.0.0.0","family":"AF_INET","type":"SOCK_STREAM","protocol":"tcp","port":0}' ... sc:name() --- - host: 0.0.0.0 family: AF_INET type: SOCK_STREAM protocol: tcp port: 0 ... sc:nonblock(true) --- - true ... sc:close() --- - true ... s = socket('AF_INET', 'SOCK_DGRAM', 'udp') --- ... s:bind('127.0.0.1', 0) --- - true ... sc = socket('AF_INET', 'SOCK_DGRAM', 'udp') --- ... sc:sendto('127.0.0.1', s:name().port, 'Hello, world') --- - 12 ... s:readable(10) --- - true ... s:recv() --- - Hello, world ... sc:sendto('127.0.0.1', s:name().port, 'Hello, world, 2') --- - 15 ... s:readable(10) --- - true ... d, from = s:recvfrom() --- ... from.port > 0 --- - true ... from.port = 'Random port' --- ... json.encode{d, from} --- - '["Hello, world, 2",{"host":"127.0.0.1","family":"AF_INET","port":"Random port"}]' ... s:close() --- - true ... sc:close() --- - true ... s = socket('AF_INET', 'SOCK_DGRAM', 'udp') --- ... s:nonblock(true) --- - true ... s:bind('127.0.0.1') --- - true ... s:name().port > 0 --- - true ... sc = socket('AF_INET', 'SOCK_DGRAM', 'udp') --- ... sc:nonblock(true) --- - true ... sc:sendto('127.0.0.1', s:name().port) --- - true ... sc:sendto('127.0.0.1', s:name().port, 'Hello, World!') --- - 13 ... s:readable(1) --- - true ... data, from = s:recvfrom(10) --- ... data --- - Hello, Wor ... s:sendto(from.host, from.port, 'Hello, hello!') --- - 13 ... sc:readable(1) --- - true ... data_r, from_r = sc:recvfrom() --- ... data_r --- - Hello, hello! ... from_r.host --- - 127.0.0.1 ... from_r.port == s:name().port --- - true ... s:close() --- - true ... sc:close() --- - true ... -- tcp_connect -- test timeout socket.tcp_connect('127.0.0.1', 80, 0.00000000001) --- - null ... -- AF_INET s = socket('AF_INET', 'SOCK_STREAM', 'tcp') --- ... s:bind('127.0.0.1', 0) --- - true ... port = s:name().port --- ... s:listen() --- - true ... sc, e = socket.tcp_connect('127.0.0.1', port), errno() --- ... sc ~= nil --- - true ... e == 0 --- - true ... sc:close() --- - true ... s:close() --- - true ... socket.tcp_connect('127.0.0.1', port), errno() == errno.ECONNREFUSED --- - null - true ... -- AF_UNIX path = '/tmp/tarantool-test-socket' --- ... _ = os.remove(path) --- ... s = socket('AF_UNIX', 'SOCK_STREAM', 0) --- ... s:bind('unix/', path) --- - true ... socket.tcp_connect('unix/', path), errno() == errno.ECONNREFUSED --- - null - true ... s:listen() --- - true ... sc, e = socket.tcp_connect('unix/', path), errno() --- ... sc ~= nil --- - true ... e --- - 0 ... sc:close() --- - true ... s:close() --- - true ... socket.tcp_connect('unix/', path), errno() == errno.ECONNREFUSED --- - null - true ... _ = os.remove(path) --- ... socket.tcp_connect('unix/', path), errno() == errno.ENOENT --- - null - true ... -- invalid fd / tampering s = socket('AF_INET', 'SOCK_STREAM', 'tcp') --- ... s:read(9) --- - null ... s:close() --- - true ... s._gc_socket.fd = 512 --- - error: '[string "s._gc_socket.fd = 512 "]:1: attempt to write to constant location' ... s._gc_socket = nil --- ... tostring(s) --- - error: 'builtin/socket.lua: Usage: socket:method(), called with non-socket' ... s = nil --- ... -- close serv = socket('AF_INET', 'SOCK_STREAM', 'tcp') --- ... serv:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) --- - true ... serv:bind('127.0.0.1', port) --- - true ... port = serv:name().port --- ... serv:listen() --- - true ... test_run:cmd("setopt delimiter ';'") --- - true ... f = fiber.create(function(serv) serv:readable() sc = serv:accept() sc:write("Tarantool test server") sc:shutdown() sc:close() serv:close() end, serv); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... s = socket.tcp_connect('127.0.0.1', port) --- ... ch = fiber.channel() --- ... f = fiber.create(function() s:read(12) ch:put(true) end) --- ... s:close() --- - true ... ch:get(1) --- - true ... s:error() --- - error: 'builtin/socket.lua: attempt to use closed socket' ... -- random port master = socket('PF_INET', 'SOCK_STREAM', 'tcp') --- ... master:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) --- - true ... port = 32768 + math.random(32768) --- ... attempt = 0 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... while attempt < 10 do if not master:bind('127.0.0.1', port) then port = 32768 + math.random(32768) attempt = attempt + 1 else break end end; --- ... master:listen(); --- - true ... function gh361() local s = socket('PF_INET', 'SOCK_STREAM', 'tcp') s:sysconnect('127.0.0.1', port) s:wait() res = s:read(1200) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... f = fiber.create(gh361) --- ... fiber.cancel(f) --- ... while f:status() ~= 'dead' do fiber.sleep(0.001) end --- ... master:close() --- - true ... f = nil --- ... path = '/tmp/tarantool-test-socket' --- ... s = socket('PF_UNIX', 'SOCK_STREAM', 0) --- ... s:setsockopt('SOL_SOCKET', 'SO_REUSEADDR', true) --- - true ... s:error() --- - null ... s:bind('unix/', path) --- - true ... s:error() --- - null ... s:listen(128) --- - true ... test_run:cmd("setopt delimiter ';'") --- - true ... f = fiber.create(function() for i=1,2 do s:readable() local sc = s:accept() sc:write('ok!') sc:shutdown() sc:close() end end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... c = socket.tcp_connect('unix/', path) --- ... c:error() --- - null ... x = c:read('!') --- ... x, type(x), #x --- - ok! - string - 3 ... x = c:read('!') --- ... c:error() --- - null ... x, type(x), #x --- - - string - 0 ... x = c:read('!') --- ... c:error() --- - null ... x, type(x), #x --- - - string - 0 ... c:close() --- - true ... c = socket.tcp_connect('unix/', path) --- ... c:error() --- - null ... x = c:read(3) --- ... c:error() --- - null ... x, type(x), #x --- - ok! - string - 3 ... x = c:read(1) --- ... c:error() --- - null ... x, type(x), #x --- - - string - 0 ... x = c:read(1) --- ... c:error() --- - null ... x, type(x), #x --- - - string - 0 ... x = c:sysread(1) --- ... c:error() --- - null ... x, type(x), #x --- - - string - 0 ... c:close() --- - true ... s:close() --- - true ... _ = os.remove(path) --- ... server, addr = socket.tcp_server('unix/', path, function(s) s:write('Hello, world') end) --- ... type(addr) --- - table ... server ~= nil --- - true ... fiber.sleep(.1) --- ... client = socket.tcp_connect('unix/', path) --- ... client ~= nil --- - true ... client:read(123) --- - Hello, world ... server:close() --- - true ... -- unix socket automatically removed while fio.stat(path) ~= nil do fiber.sleep(0.001) end --- ... test_run:cmd("setopt delimiter ';'") --- - true ... server, addr = socket.tcp_server('localhost', 0, { handler = function(s) s:read(2) s:write('Hello, world') end, name = 'testserv'}); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... type(addr) --- - table ... server ~= nil --- - true ... addr2 = server:name() --- ... addr.host == addr2.host --- - true ... addr.family == addr2.family --- - true ... fiber.sleep(.1) --- ... client = socket.tcp_connect(addr2.host, addr2.port) --- ... client ~= nil --- - true ... -- Check that listen and client fibers have appropriate names cnt = 0 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for _, f in pairs(fiber.info()) do if f.name:match('^testserv/') then cnt = cnt + 1 end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... cnt --- - 2 ... client:write('hi') --- - 2 ... client:read(123) --- - Hello, world ... client:close() --- - true ... server:close() --- - true ... longstring = string.rep("abc", 65535) --- ... server = socket.tcp_server('unix/', path, function(s) s:write(longstring) end) --- ... client = socket.tcp_connect('unix/', path) --- ... client:read(#longstring) == longstring --- - true ... client = socket.tcp_connect('unix/', path) --- ... client:read(#longstring + 1) == longstring --- - true ... client = socket.tcp_connect('unix/', path) --- ... client:read(#longstring - 1) == string.sub(longstring, 1, #longstring - 1) --- - true ... longstring = "Hello\r\n\r\nworld\n\n" --- ... client = socket.tcp_connect('unix/', path) --- ... client:read{ line = { "\n\n", "\r\n\r\n" } } --- - "Hello\r\n\r\n" ... server:close() --- - true ... -- gh-658: socket:read() incorrectly handles size and delimiter together body = "a 10\nb 15\nabc" --- ... remaining = #body --- ... test_run:cmd("setopt delimiter ';'") --- - true ... server = socket.tcp_server('unix/', path, function(s) s:write(body) s:read(100500) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... client = socket.tcp_connect('unix/', path) --- ... buf = client:read({ size = remaining, delimiter = "\n"}) --- ... buf == "a 10\n" --- - true ... remaining = remaining - #buf --- ... buf = client:read({ size = remaining, delimiter = "\n"}) --- ... buf == "b 15\n" --- - true ... remaining = remaining - #buf --- ... buf = client:read({ size = remaining, delimiter = "\n"}) --- ... buf == "abc" --- - true ... remaining = remaining - #buf --- ... remaining == 0 --- - true ... buf = client:read({ size = remaining, delimiter = "\n"}) --- ... buf == "" --- - true ... buf = client:read({ size = remaining, delimiter = "\n"}) --- ... buf == "" --- - true ... client:close() --- - true ... server:close() --- - true ... _ = os.remove(path) --- ... -- Test that socket is closed on GC s = socket('AF_UNIX', 'SOCK_STREAM', 0) --- ... s:bind('unix/', path) --- - true ... s:listen() --- - true ... s = nil --- ... while socket.tcp_connect('unix/', path) do collectgarbage('collect') end --- ... _ = os.remove(path) --- ... -- Test serializers with sockets s = socket('AF_UNIX', 'SOCK_STREAM', 0) --- ... -- check __serialize hook json.decode(json.encode(s)).fd == s:fd() --- - true ... yaml.decode(yaml.encode(s)).fd == s:fd() --- - true ... s = nil --- ... -- start AF_UNIX server with dead socket exists path = '/tmp/tarantool-test-socket' --- ... s = socket('AF_UNIX', 'SOCK_STREAM', 0) --- ... s:bind('unix/', path) --- - true ... s:close() --- - true ... s = socket('AF_UNIX', 'SOCK_STREAM', 0) --- ... { s:bind('unix/', path), errno() == errno.EADDRINUSE } --- - - false - true ... s:close() --- - true ... s = socket.tcp_server('unix/', path, function() end) --- ... s ~= nil --- - true ... s:close() --- - true ... fio.stat(path) == nil --- - true ... { socket.tcp_connect('abrakadabra#123') == nil, errno.strerror() } --- - - true - Invalid argument ... -- wrong options for getaddrinfo socket.getaddrinfo('host', 'port', { type = 'WRONG' }) == nil and errno() == errno.EINVAL --- - true ... socket.getaddrinfo('host', 'port', { family = 'WRONG' }) == nil and errno() == errno.EINVAL --- - true ... socket.getaddrinfo('host', 'port', { protocol = 'WRONG' }) == nil and errno() == errno.EPROTOTYPE --- - true ... socket.getaddrinfo('host', 'port', { flags = 'WRONG' }) == nil and errno() == errno.EINVAL --- - true ... -- gh-574: check that fiber with getaddrinfo can be safely cancelled test_run:cmd("setopt delimiter ';'") --- - true ... f = fiber.create(function() while true do local result = socket.getaddrinfo('localhost', '80') fiber.sleep(0) end end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... f:cancel() --- ... -------------------------------------------------------------------------------- -- Lua Socket Emulation -------------------------------------------------------------------------------- test_run:cmd("push filter 'fd=([0-9]+)' to 'fd='") --- - true ... s = socket.tcp() --- ... s --- - 'tcp{master}: fd=' ... s:close() --- - 1 ... -- Sic: incompatible with Lua Socket s:close() --- - error: 'builtin/socket.lua: attempt to use closed socket' ... s = socket.tcp() --- ... host, port, family = s:getsockname() --- ... host == '0.0.0.0', port == '0', family == 'inet' --- - true - true - true ... status, reason = s:getpeername() --- ... status == nil, type(reason) == 'string' --- - true - true ... s:settimeout(100500) --- - 1 ... s:setoption('keepalive', true) --- - 1 ... s:setoption('linger', { on = true }) --- - 1 ... s:setoption('linger', true) --- - 1 ... s:setoption('reuseaddr', true) --- - 1 ... s:setoption('tcp-nodelay', true) --- - 1 ... s:setoption('unknown', true) --- - error: 'builtin/socket.lua: Unknown socket option name: unknown' ... s:bind('127.0.0.1', 0) --- - 1 ... s:bind('127.0.0.1', 0) -- error handling --- - null - Invalid argument ... s:listen(10) --- - 1 ... s -- transformed to tcp{server} socket --- - 'tcp{server}: fd=' ... host, port, family = s:getsockname() --- ... host == '127.0.0.1', type(port) == 'string', family == 'inet' --- - true - true - true ... status, reason = s:getpeername() --- ... status == nil, type(reason) == 'string' --- - true - true ... s:settimeout(0) --- - 1 ... status, reason = s:accept() --- ... status == nil, type(reason) == 'string' --- - true - true ... s:settimeout(0.001) --- - 1 ... status, reason = s:accept() --- ... status == nil, type(reason) == 'string' --- - true - true ... s:settimeout(100500) --- - 1 ... rch, wch = fiber.channel(1), fiber.channel(1) --- ... sc = socket.connect(host, port) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... cfiber = fiber.create(function(sc, rch, wch) while sc:send(wch:get()) and rch:put(sc:receive("*l")) do end end, sc, rch, wch); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... c = s:accept() --- ... c --- - 'tcp{client}: fd=' ... chost, cport, cfamily = c:getsockname() --- ... chost == '127.0.0.1', type(cport) == 'string', cfamily == 'inet' --- - true - true - true ... chost, cport, cfamily = c:getpeername() --- ... chost == '127.0.0.1', type(cport) == 'string', cfamily == 'inet' --- - true - true - true ... wch:put("Ping\n") --- - true ... c:receive("*l") --- - Ping ... c:send("Pong\n") --- - 5 ... rch:get() --- - Pong ... wch:put("HELO lua\nMAIL FROM: \n") --- - true ... c:receive("*l") --- - HELO lua ... c:receive("*l") --- - 'MAIL FROM: ' ... c:send("250 Welcome to Lua Universe\n") --- - 28 ... c:send("$$$250 OK\n$$$", 4, 11) --- - 11 ... rch:get() --- - 250 Welcome to Lua Universe ... wch:put("RCPT TO: \n") --- - true ... c:receive() --- - 'RCPT TO: ' ... c:send("250") --- - 3 ... c:send(" ") --- - 1 ... c:send("OK") --- - 2 ... c:send("\n") --- - 1 ... rch:get() --- - 250 OK ... wch:put("DATA\n") --- - true ... c:receive(4) --- - DATA ... c:receive("*l") --- - ... wch:put("Fu") --- - true ... c:send("354 Please type your message\n") --- - 29 ... sc:close() --- - 1 ... c:receive("*l", "Line: ") --- - null - closed - 'Line: Fu' ... c:receive() --- - null - closed - ... c:receive(10) --- - null - closed - ... c:receive("*a") --- - null - closed - ... c:close() --- - 1 ... -- eof with bytes sc = socket.connect(host, port) --- ... sc --- - 'tcp{client}: fd=' ... c = s:accept() --- ... c --- - 'tcp{client}: fd=' ... _ = fiber.create(function() sc:send("Po") end) --- ... sc:close() --- - 1 ... c:receive(100500, "Message:") --- - null - closed - Message:Po ... c:close() --- - 1 ... -- eof with '*l' sc = socket.connect(host, port) --- ... sc --- - 'tcp{client}: fd=' ... c = s:accept() --- ... c --- - 'tcp{client}: fd=' ... _ = fiber.create(function() sc:send("Pong\nPo") end) --- ... sc:close() --- - 1 ... c:receive("*l", "Message:") --- - Message:Pong ... c:receive("*l", "Message: ") --- - null - closed - 'Message: Po' ... c:receive("*l", "Message: ") --- - null - closed - 'Message: ' ... c:close() --- - 1 ... -- eof with '*a' sc = socket.connect(host, port) --- ... sc --- - 'tcp{client}: fd=' ... c = s:accept() --- ... c --- - 'tcp{client}: fd=' ... _ = fiber.create(function() sc:send("Pong\n") end) --- ... sc:close() --- - 1 ... c:receive("*a", "Message: ") --- - 'Message: Pong ' ... c:receive("*a", "Message: ") --- - null - closed - 'Message: ' ... c:close() --- - 1 ... -- shutdown sc = socket.connect(host, port) --- ... sc --- - 'tcp{client}: fd=' ... c = s:accept() --- ... c --- - 'tcp{client}: fd=' ... _ = fiber.create(function() sc:send("Pong\n") end) --- ... sc:shutdown("send") --- - 1 ... c:receive() --- - Pong ... c:shutdown("send") --- - 1 ... status, reason = c:shutdown("recv") --- ... status == nil, type(reason) == 'string' --- - true - true ... status, reason = c:shutdown("recv") --- ... status == nil, type(reason) == 'string' --- - true - true ... status, reason = c:shutdown("both") --- ... status == nil, type(reason) == 'string' --- - true - true ... c:close() --- - 1 ... sc:close() --- - 1 ... s:close() --- - 1 ... -- socket.bind / socket.connect s = socket.bind('0.0.0.0', 0) --- ... s --- - 'tcp{server}: fd=' ... host, port, family = s:getsockname() --- ... sc = socket.connect(host, port) --- ... sc --- - 'tcp{client}: fd=' ... sc:close() --- - 1 ... sc = socket.tcp() --- ... sc:connect(host, port) --- - 1 ... sc:close() --- - 1 ... s:close() --- - 1 ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/app/fio.result0000664000000000000000000003154713306560010017766 0ustar rootrootfio = require 'fio' --- ... ffi = require 'ffi' --- ... buffer = require 'buffer' --- ... test_run = require('test_run').new() --- ... -- umask type(fio.umask(0)) --- - number ... fio.umask() --- - 0 ... -- pathjoin st, err = pcall(fio.basename, nil, nil) --- ... st --- - false ... err:match("basename") ~= nil --- - true ... fio.pathjoin('abc', 'cde') --- - abc/cde ... fio.pathjoin('/', 'abc') --- - /abc ... fio.pathjoin('abc/', '/cde') --- - abc/cde ... fio.pathjoin('/', '/cde') --- - /cde ... fio.pathjoin('/a', '/') --- - /a ... fio.pathjoin('abc', 'awdeq///qweqwqwe///', "//asda//") --- - abc/awdeq/qweqwqwe/asda ... -- basename st, err = pcall(fio.basename, nil) --- ... st --- - false ... err:match("basename") ~= nil --- - true ... fio.basename('/') --- - ... fio.basename('abc') --- - abc ... fio.basename('abc.cde', '.cde') --- - abc ... fio.basename('abc^cde', '.cde') --- - abc^cde ... fio.basename('/path/to/file.cde', '.cde') --- - file ... -- other tests tmpdir = fio.tempdir() --- ... file1 = fio.pathjoin(tmpdir, 'file.1') --- ... file2 = fio.pathjoin(tmpdir, 'file.2') --- ... file3 = fio.pathjoin(tmpdir, 'file.3') --- ... file4 = fio.pathjoin(tmpdir, 'file.4') --- ... st, err = pcall(fio.open, nil) --- ... st --- - false ... err:match("open") ~= nil --- - true ... fh1 = fio.open(file1, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) --- ... fh1 ~= nil --- - true ... f1s = fh1:stat() --- ... f1s.size --- - 0 ... f1s.is_reg() --- - error: 'usage: stat:is_reg()' ... f1s:is_reg() --- - true ... f1s:is_dir() --- - false ... f1s:is_link() --- - false ... f1s:is_sock() --- - false ... f1s:is_fifo() --- - false ... f1s:is_chr() --- - false ... f1s:is_blk() --- - false ... fh1:seek(121) --- - 121 ... fh1:stat().size --- - 0 ... fh1:write(nil) --- - true ... fh1:write("Hello, world") --- - true ... fh1:stat().size --- - 136 ... fh1:fsync() --- - true ... fh1:fdatasync() --- - true ... fio.sync() --- - true ... fh1:pread(512, 121) --- - nilHello, world ... fh1:pread(5, 121) --- - nilHe ... fh1:write("; Ehllo, again") --- - true ... fh1:seek(121) --- - 121 ... fh1:read(13) --- - nilHello, wor ... fh1:read(512) --- - ld; Ehllo, again ... fh1:pread(512, 14 + 121) --- - d; Ehllo, again ... fh1:pwrite("He", 14 + 121) --- - true ... fh1:pread(512, 14 + 121) --- - He Ehllo, again ... { fh1:stat().size, fio.stat(file1).size } --- - - 150 - 150 ... fh1:seek(121) --- - 121 ... fh1:read(512) --- - nilHello, worlHe Ehllo, again ... fio.link(nil, nil) --- - error: 'Usage: fio.link(target, linkpath)' ... fio.link(file1, file2) --- - true ... fio.glob(nil) --- - error: 'Usage: fio.glob(pattern)' ... glob = fio.glob(fio.pathjoin(tmpdir, '*')) --- ... #glob --- - 2 ... { string.match(glob[1], '^.*/(.*)'), string.match(glob[2], '^.*/(.*)') } --- - - file.1 - file.2 ... fio.stat(file1).inode == fio.stat(file2).inode --- - true ... fh3 = fio.open(file3, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0x1FD) --- ... fh1:stat().inode ~= fh3:stat().inode --- - true ... 0775 --- - 775 ... bit.band(fh3:stat().mode, 0x1FF) == 0x1FD --- - true ... fh3:write("abc") --- - true ... fio.rename(nil, nil) --- - error: 'Usage: fio.rename(oldpath, newpath)' ... fio.rename(file3, file4) --- - true ... fio.symlink(nil, nil) --- - error: 'Usage: fio.symlink(target, linkpath)' ... fio.symlink(file4, file3) --- - true ... fio.stat(nil) --- - error: 'Usage: fio.stat(pathname)' ... fio.stat(file3).size --- - 3 ... fio.lstat(file3).size ~= fio.stat(file3).size --- - true ... fio.lstat(file3).mode ~= fio.stat(file3).mode --- - true ... fio.basename(fio.readlink(file3)) --- - file.4 ... bit.band(fio.stat(file4).mode, 0x1FF) == 0x1FD --- - true ... fio.chmod(nil, 0x1F8) --- - error: 'Usage: fio.chmod(pathname, mode)' ... fio.chmod(file4, 0x1F8) -- 0x770 --- - true ... bit.band(fh3:stat().mode, 0x1FF) == 0x1F8 --- - true ... bit.band(fio.stat(file4).mode, 0x1FF) == 0x1F8 --- - true ... dir1 = fio.pathjoin(tmpdir, 'dir1') --- ... dir2 = fio.pathjoin(tmpdir, 'dir2') --- ... fio.mkdir(nil) --- - error: Usage fio.mkdir(pathname[, mode]) ... fio.mkdir(dir1) -- standard mode --- - true ... fio.mkdir(dir2, 1) -- custom mode --- - true ... string.format('%04o', bit.band(fio.stat(dir1).mode, 0x1FF)) --- - '0777' ... string.format('%04o', bit.band(fio.stat(dir2).mode, 0x1FF)) --- - '0001' ... -- cleanup directories { fh1:close(), fh3:close() } --- - - true - true ... fh1:close() --- - false - 'fio: Bad file descriptor' ... fh3:close() --- - false - 'fio: Bad file descriptor' ... fio.rmdir(nil) --- - error: 'Usage: fio.rmdir(pathname)' ... fio.rmdir(dir1) --- - true ... fio.rmdir(dir2) --- - true ... { fio.unlink(file1), fio.unlink(file2), fio.unlink(file3), fio.unlink(file4) } --- - - true - true - true - true ... { fio.unlink(file1), fio.unlink(file2), fio.unlink(file3), fio.unlink(file4) } --- - - false - false - false - false - 'fio: No such file or directory' ... fio.rmdir(tmpdir) --- - true ... fio.rmdir(tmpdir) --- - false - 'fio: No such file or directory' ... fio.unlink() --- - error: 'Usage: fio.unlink(pathname)' ... fio.unlink(nil) --- - error: 'Usage: fio.unlink(pathname)' ... -- gh-1211 use 0777 if mode omitted in open fh4 = fio.open('newfile', {'O_RDWR','O_CREAT','O_EXCL'}) --- ... bit.band(fh4:stat().mode, 0x1FF) == bit.band(fio.umask(), 0x1ff) --- - true ... fh4:close() --- - true ... fio.unlink('newfile') --- - true ... -- dirname st, err = pcall(fio.dirname, nil) --- ... st --- - false ... err:match("dirname") ~= nil --- - true ... fio.dirname('abc') --- - . ... fio.dirname('/abc') --- - / ... fio.dirname('/abc/cde') --- - /abc ... fio.dirname('/abc/cde/') --- - /abc ... fio.dirname('/') --- - / ... -- abspath st, err = pcall(fio.abspath, nil) --- ... st --- - false ... err:match("abspath") ~= nil --- - true ... fio.abspath("/") --- - / ... fio.abspath("/tmp") --- - /tmp ... fio.abspath("/tmp/test/../") --- - /tmp ... fio.abspath("/tmp/test/../abc") --- - /tmp/abc ... fio.abspath("/tmp/./test") --- - /tmp/test ... fio.abspath("/tmp///test//abc") --- - /tmp/test/abc ... fio.abspath("/../") --- - / ... fio.abspath("/../tmp") --- - /tmp ... type(string.find(fio.abspath("tmp"), "tmp")) --- - number ... -- chdir old_cwd = fio.cwd() --- ... st, err = pcall(fio.chdir, nil) --- ... st --- - false ... err:match("chdir") ~= nil --- - true ... st, err = pcall(fio.chdir, 42) --- ... st --- - false ... err:match("chdir") ~= nil --- - true ... fio.chdir('/no/such/file/or/directory') --- - false ... fio.chdir('/') --- - true ... fio.cwd() --- - / ... fio.chdir(old_cwd) --- - true ... fio.cwd() == old_cwd --- - true ... -- listdir tmpdir = fio.tempdir() --- ... dir3 = fio.pathjoin(tmpdir, "dir3") --- ... st, err = pcall(fio.mkdir, nil) --- ... st --- - false ... err:match("mkdir") ~= nil --- - true ... fio.mkdir(dir3) --- - true ... fio.mkdir(fio.pathjoin(dir3, "1")) --- - true ... fio.mkdir(fio.pathjoin(dir3, "2")) --- - true ... fio.mkdir(fio.pathjoin(dir3, "3")) --- - true ... fio.listdir("/no/such/directory/") --- - null - 'can''t listdir /no/such/directory/: fio: No such file or directory' ... ls = fio.listdir(dir3) --- ... table.sort(ls, function(a, b) return tonumber(a) < tonumber(b) end) --- ... ls --- - - '1' - '2' - '3' ... -- rmtree fio.stat(dir3) ~= nil --- - true ... fio.rmtree(dir3) --- - true ... fio.stat(dir3) == nil --- - true ... st, err = fio.rmtree(dir3) --- ... st --- - null ... err:match("No such") ~= nil --- - true ... -- mktree tmp1 = fio.pathjoin(tmpdir, "1") --- ... tmp2 = fio.pathjoin(tmp1, "2") --- ... tree = fio.pathjoin(tmp2, "3") --- ... tree2 = fio.pathjoin(tmpdir, "4") --- ... st, err = pcall(fio.mktree, nil) --- ... st --- - false ... err:match("mktree") ~= nil --- - true ... fio.mktree(tree) --- - true ... fio.stat(tree) ~= nil --- - true ... fio.stat(tmp2) ~= nil --- - true ... fio.mktree(tree2, 1) --- - true ... -- copy and copytree file1 = fio.pathjoin(tmp1, 'file.1') --- ... file2 = fio.pathjoin(tmp2, 'file.2') --- ... file3 = fio.pathjoin(tree, 'file.3') --- ... fh1 = fio.open(file1, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) --- ... fh1:write("gogo") --- - true ... fh1:close() --- - true ... fh1 = fio.open(file2, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) --- ... fh1:write("lolo") --- - true ... fh1:close() --- - true ... fio.symlink(file1, file3) --- - true ... fio.copyfile(file1, tmp2) --- - true ... fio.stat(fio.pathjoin(tmp2, "file.1")) ~= nil --- - true ... res, err = fio.copyfile(fio.pathjoin(tmp1, 'not_exists.txt'), tmp1) --- ... res --- - false ... err:match("failed to copy") ~= nil --- - true ... newdir = fio.pathjoin(tmpdir, "newdir") --- ... fio.copytree(fio.pathjoin(tmpdir, "1"), newdir) --- - true ... fio.stat(fio.pathjoin(newdir, "file.1")) ~= nil --- - true ... fio.stat(fio.pathjoin(newdir, "2", "file.2")) ~= nil --- - true ... fio.stat(fio.pathjoin(newdir, "2", "3", "file.3")) ~= nil --- - true ... fio.readlink(fio.pathjoin(newdir, "2", "3", "file.3")) == file1 --- - true ... fio.copytree("/no/such/dir", "/some/where") --- - false - Directory /no/such/dir does not exist ... -- ibuf read/write buf = buffer.ibuf() --- ... tmpdir = fio.tempdir() --- ... tmpfile = fio.pathjoin(tmpdir, "test1") --- ... fh = fio.open(tmpfile, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) --- ... fh:write('helloworld!') --- - true ... fh:seek(0) --- - 0 ... fh:read() --- - helloworld! ... fh:close() --- - true ... fh:read() --- - null - 'fio: Bad file descriptor' ... fio.unlink(tmpfile) --- - true ... tmpfile = fio.pathjoin(tmpdir, "test") --- ... fh = fio.open(tmpfile, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) --- ... fh:write('helloworld!') --- - true ... fh:seek(0) --- - 0 ... len = fh:read(buf:reserve(12)) --- ... ffi.string(buf:alloc(len), len) --- - helloworld! ... fh:seek(0) --- - 0 ... len = fh:read(buf:reserve(5), 5) --- ... ffi.string(buf:alloc(len), len) --- - hello ... len = fh:read(buf:reserve(5), 5) --- ... ffi.string(buf:alloc(len), len) --- - world ... len = fh:read(buf:reserve(5), 5) --- ... ffi.string(buf:alloc(len), len) --- - '!' ... buf:reset() --- ... len = fh:pread(buf:reserve(5), 5, 5) --- ... ffi.string(buf:alloc(len), len) --- - world ... len = fh:pread(buf:reserve(5), 5) --- ... ffi.string(buf:alloc(len), len) --- - hello ... fh:seek(0) --- - 0 ... fh:write(buf.rpos, buf:size()) --- - true ... fh:seek(0) --- - 0 ... fh:read(64) --- - worldhello! ... fh:pwrite(buf:read(5), 5, 5) --- - true ... fh:pwrite(buf:read(5), 5) --- - true ... fh:seek(0) --- - 0 ... fh:read(64) --- - helloworld! ... buf:recycle() --- ... fh:close() --- - true ... -- gh-2924 -- fio.path.exists lexists is_file, etc -- fio.path.is_file(tmpfile) --- - true ... fio.path.is_dir(tmpfile) --- - false ... fio.path.is_link(tmpfile) --- - false ... fio.path.exists(tmpfile) --- - true ... fio.path.lexists(tmpfile) --- - true ... non_existing_file = "/no/such/file" --- ... fio.path.is_file(non_existing_file) --- - false ... fio.path.is_dir(non_existing_file) --- - false ... fio.path.is_link(non_existing_file) --- - false ... fio.path.exists(non_existing_file) --- - false ... fio.path.lexists(non_existing_file) --- - false ... fio.path.is_file(tmpdir) --- - false ... fio.path.is_dir(tmpdir) --- - true ... fio.path.is_link(tmpdir) --- - false ... fio.path.exists(tmpdir) --- - true ... fio.path.lexists(tmpdir) --- - true ... link = fio.pathjoin(tmpdir, "link") --- ... fio.symlink(tmpfile, link) --- - true ... fio.path.is_file(link) --- - true ... fio.path.is_dir(link) --- - false ... fio.path.is_link(link) --- - true ... fio.path.exists(link) --- - true ... fio.path.lexists(link) --- - true ... fio.unlink(link) --- - true ... fio.symlink(non_existing_file, link) --- - true ... fio.path.is_file(link) --- - false ... fio.path.is_dir(link) --- - false ... fio.path.is_link(link) --- - true ... fio.path.exists(link) --- - false ... fio.path.lexists(link) --- - true ... fio.unlink(link) --- - true ... fio.symlink(tmpdir, link) --- - true ... fio.path.is_file(link) --- - false ... fio.path.is_dir(link) --- - true ... fio.path.is_link(link) --- - true ... fio.path.exists(link) --- - true ... fio.path.lexists(link) --- - true ... fio.unlink(link) --- - true ... fio.unlink(tmpfile) --- - true ... tmp1 = fio.pathjoin(tmpdir, "tmp1") --- ... tmp2= fio.pathjoin(tmpdir, "tmp2") --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function write_file(name, odd) local fh = fio.open(name, { 'O_RDWR', 'O_TRUNC', 'O_CREAT' }, 0777) if odd then fh:write(string.rep('1', 100)) else fh:write(string.rep('2', 100)) end fh:write(name) fh:seek(0) return fh end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... fh1 = write_file(tmp1) --- ... fh2 = write_file(tmp2) --- ... fiber = require('fiber') --- ... digest = require('digest') --- ... str = fh1:read() --- ... fh1:seek(0) --- - 0 ... hash = digest.crc32(str) --- ... ch = fiber.channel(1) --- ... f1 = fiber.create(function() str = fh1:read() ch:put(digest.crc32(str)) end) --- ... f2 = fiber.create(function() str = fh2:read() end) --- ... ch:get() == hash --- - true ... fio.unlink(tmp1) --- - true ... fio.unlink(tmp2) --- - true ... fio.rmdir(tmpdir) --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/app/crypto_hmac.test.lua0000664000000000000000000000217013306560010021730 0ustar rootroottest_run = require('test_run').new() test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua:\"]: '") crypto = require('crypto') type(crypto) -- -- Invalid arguments -- crypto.hmac.md4() crypto.hmac.md5() crypto.hmac.sha1() crypto.hmac.sha224() crypto.hmac.sha256() crypto.hmac.sha384() crypto.hmac.sha512() crypto.hmac.nodigest crypto.hmac.sha1('012345678', 'fred') key = '012345678' message = 'fred' crypto.hmac.sha1(key, nil) crypto.hmac.sha1(nil, message) crypto.hmac.sha1(nil, nil) crypto.hmac.md4(key, message) crypto.hmac.md5(key, message) crypto.hmac.sha1(key, message) crypto.hmac.sha224(key, message) crypto.hmac.sha256(key, message) crypto.hmac.sha384(key, message) crypto.hmac.sha512(key, message) -- -- Incremental update -- hmac_sha1 = crypto.hmac.sha1.new(key) hmac_sha1:update('abc') hmac_sha1:update('cde') hmac_sha1:result() == crypto.hmac.sha1(key, 'abccde') -- -- Empty string -- crypto.hmac.md4(key, '') crypto.hmac.md5(key, '') crypto.hmac.sha1(key, '') crypto.hmac.sha224(key, '') crypto.hmac.sha256(key, '') crypto.hmac.sha384(key, '') crypto.hmac.sha512(key, '') test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/app/CMakeLists.txt0000664000000000000000000000004613306560010020477 0ustar rootrootbuild_module(loaderslib loaderslib.c) tarantool_1.9.1.26.g63eb81e3c/test/app/env.result0000664000000000000000000000101413306560010017763 0ustar rootrootos = require('os') --- ... test_run = require('test_run').new() --- ... os.setenv('location', 'Hell_Hotel') --- ... os.getenv('location') --- - Hell_Hotel ... os.setenv('location', nil) --- ... do os.getenv('location') end --- ... env_dict = os.environ() --- ... type(env_dict) --- - table ... test_run:cmd("setopt delimiter ';'") --- - true ... do for k, v in pairs(env_dict) do if type(k) ~= 'string' or type(v) ~= 'string' then return false end end return true end; --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/app/digest.result0000664000000000000000000003164113306560010020463 0ustar rootroottest_run = require('test_run').new() --- ... test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua:\"]: '") --- - true ... fiber = require('fiber') --- ... digest = require('digest') --- ... type(digest) --- - table ... -- -- Invalid arguments -- digest.md4() --- - error: 'builtin/crypto.lua:"]: Usage: digest.md4(string)' ... digest.md5() --- - error: 'builtin/crypto.lua:"]: Usage: digest.md5(string)' ... digest.sha1() --- - error: 'builtin/digest.lua:"]: Usage: digest.sha1(string)' ... digest.sha224() --- - error: 'builtin/crypto.lua:"]: Usage: digest.sha224(string)' ... digest.sha256() --- - error: 'builtin/crypto.lua:"]: Usage: digest.sha256(string)' ... digest.sha384() --- - error: 'builtin/crypto.lua:"]: Usage: digest.sha384(string)' ... digest.sha512() --- - error: 'builtin/crypto.lua:"]: Usage: digest.sha512(string)' ... digest.md4_hex() --- - error: 'builtin/digest.lua:"]: Usage: digest.md4_hex(string)' ... digest.md5_hex() --- - error: 'builtin/digest.lua:"]: Usage: digest.md5_hex(string)' ... digest.sha1_hex() --- - error: 'builtin/digest.lua:"]: Usage: digest.sha1_hex(string)' ... digest.sha224_hex() --- - error: 'builtin/digest.lua:"]: Usage: digest.sha224_hex(string)' ... digest.sha256_hex() --- - error: 'builtin/digest.lua:"]: Usage: digest.sha256_hex(string)' ... digest.sha384_hex() --- - error: 'builtin/digest.lua:"]: Usage: digest.sha384_hex(string)' ... digest.sha512_hex() --- - error: 'builtin/digest.lua:"]: Usage: digest.sha512_hex(string)' ... -- -- gh-1561: Bad checksum on non-string types -- digest.md4(12345LL) --- - error: 'builtin/crypto.lua:"]: Usage: digest.md4(string)' ... digest.md5(12345LL) --- - error: 'builtin/crypto.lua:"]: Usage: digest.md5(string)' ... digest.sha1(12345LL) --- - error: 'builtin/digest.lua:"]: Usage: digest.sha1(string)' ... digest.sha224(12345LL) --- - error: 'builtin/crypto.lua:"]: Usage: digest.sha224(string)' ... digest.sha256(12345LL) --- - error: 'builtin/crypto.lua:"]: Usage: digest.sha256(string)' ... digest.sha384(12345LL) --- - error: 'builtin/crypto.lua:"]: Usage: digest.sha384(string)' ... digest.sha512(12345LL) --- - error: 'builtin/crypto.lua:"]: Usage: digest.sha512(string)' ... -- -- Empty string -- digest.md4('') --- - !!binary MdbP4NFq6TG3PFnX4MCJwA== ... digest.md5('') --- - !!binary 1B2M2Y8AsgTpgAmY7PhCfg== ... digest.sha1('') --- - !!binary 2jmj7l5rSw0yVb/vlWAYkK/YBwk= ... digest.sha224('') --- - !!binary 0UoCjCo6K8lHYQK7KII0xBWisB+CjqYqxbPkLw== ... digest.sha256('') --- - !!binary 47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU= ... digest.sha384('') --- - !!binary OLBgp1GsljhM2TJ+sbHjaiH9txEUvgdDTAzHv2P24donTt6/529l+9Ua0vFImLlb ... digest.sha512('') --- - !!binary z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXcg/SpIdNs6c5H0NE8XYXysP+DGNKHfuwvY7kxvUdBeoGlODJ6+SfaPg== ... digest.md4_hex('') --- - 31d6cfe0d16ae931b73c59d7e0c089c0 ... digest.md5_hex('') --- - d41d8cd98f00b204e9800998ecf8427e ... digest.sha1_hex('') --- - da39a3ee5e6b4b0d3255bfef95601890afd80709 ... digest.sha224_hex('') --- - d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f ... digest.sha256_hex('') --- - e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 ... digest.sha384_hex('') --- - 38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b ... digest.sha512_hex('') --- - cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e ... -- -- Non-empty string -- digest.md4('tarantool') --- - !!binary x+tS5t7XznGO6NpI2d5cSQ== ... digest.md5('tarantool') --- - !!binary nGmFYyNwcg5O9nMw42PD+w== ... digest.sha1('tarantool') --- - !!binary 0g7s+pB5mf+fu7NCqXJEg+z2x94= ... digest.sha224('tarantool') --- - !!binary gzVFDH9GRbhgAH1F/YJWaWX/KWy7SGsELk8hbQ== ... digest.sha256('tarantool') --- - !!binary 2TqjwZwQz+V10Y7RlpTVE90LNrzuU5NP9FT7ptKvxC0= ... digest.sha384('tarantool') --- - !!binary yiixxVa4HJTDQecIhiFiB2S0VeAKO0X5AurR5UeVVtArv7Ymt0/WCiGEQ0VYO62L ... digest.sha512('tarantool') --- - !!binary 3YAs+oTeL2V3n8mEVDLy8+GylBX65B0219kFEES9Tku4x7G8+W0Eh3QfGolEtMSJVne7k082Exxpk4Ggf5vNug== ... digest.md4_hex('tarantool') --- - c7eb52e6ded7ce718ee8da48d9de5c49 ... digest.md5_hex('tarantool') --- - 9c6985632370720e4ef67330e363c3fb ... digest.sha1_hex('tarantool') --- - d20eecfa907999ff9fbbb342a9724483ecf6c7de ... digest.sha224_hex('tarantool') --- - 8335450c7f4645b860007d45fd82566965ff296cbb486b042e4f216d ... digest.sha256_hex('tarantool') --- - d93aa3c19c10cfe575d18ed19694d513dd0b36bcee53934ff454fba6d2afc42d ... digest.sha384_hex('tarantool') --- - ca28b1c556b81c94c341e7088621620764b455e00a3b45f902ead1e5479556d02bbfb626b74fd60a21844345583bad8b ... digest.sha512_hex('tarantool') --- - dd802cfa84de2f65779fc9845432f2f3e1b29415fae41d36d7d9051044bd4e4bb8c7b1bcf96d0487741f1a8944b4c4895677bb934f36131c699381a07f9bcdba ... digest.md5_hex(123) --- - error: 'builtin/digest.lua:"]: Usage: digest.md5_hex(string)' ... digest.md5_hex('123') --- - 202cb962ac59075b964b07152d234b70 ... digest.md5_hex(true) --- - error: 'builtin/digest.lua:"]: Usage: digest.md5_hex(string)' ... digest.md5_hex('true') --- - b326b5062b2f0e69046810717534cb09 ... digest.md5_hex(nil) --- - error: 'builtin/digest.lua:"]: Usage: digest.md5_hex(string)' ... digest.md5_hex() --- - error: 'builtin/digest.lua:"]: Usage: digest.md5_hex(string)' ... digest.crc32() --- - error: 'builtin/digest.lua:"]: Usage digest.crc32(string)' ... digest.crc32_update(4294967295, '') --- - 4294967295 ... digest.crc32('abc') --- - 3384066120 ... digest.crc32_update(4294967295, 'abc') --- - 3384066120 ... digest.crc32('abccde') --- - 3628146660 ... digest.crc32_update(digest.crc32('abc'), 'cde') --- - 3628146660 ... crc = digest.crc32.new() --- ... crc:update('abc') --- ... crc2 = crc:copy() --- ... crc:update('cde') --- ... crc:result() == digest.crc32('abccde') --- - true ... crc2:update('def') --- ... crc2:result() == digest.crc32('abcdef') --- - true ... crc, crc2 = nil, nil --- ... digest.base64_encode('12345') --- - MTIzNDU= ... digest.base64_decode('MTIzNDU=') --- - '12345' ... digest.base64_encode('asdfl asdf adfa zxc vzxcvz llll') --- - YXNkZmwgYXNkZiBhZGZhIHp4YyB2enhjdnogbGxsbA== ... digest.base64_decode('YXNkZmwgYXNkZiBhZGZhIHp4YyB2enhjdnogbGxsbA==') --- - asdfl asdf adfa zxc vzxcvz llll ... digest.base64_encode('11 00 11 00 abcdef ABCDEF 00 11 00 11') --- - MTEgMDAgMTEgMDAgYWJjZGVmIEFCQ0RFRiAwMCAxMSAwMCAxMQ== ... digest.base64_decode('MTEgMDAgMTEgMDAgYWJjZGVmIEFCQ0RFRiAwMCAxMSAwMCAxMQ==') --- - 11 00 11 00 abcdef ABCDEF 00 11 00 11 ... s = string.rep('a', 54 * 2) -- two lines in base64 --- ... b = digest.base64_encode(s) --- ... b --- - 'YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFh YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFh ' ... digest.base64_decode(b) == s --- - true ... digest.base64_decode(nil) --- - error: 'builtin/digest.lua:"]: Usage: digest.base64_decode(string)' ... digest.base64_encode(nil) --- - error: 'builtin/digest.lua:"]: Usage: digest.base64_encode(string[, table])' ... digest.base64_encode(123) --- - error: 'builtin/digest.lua:"]: Usage: digest.base64_encode(string[, table])' ... digest.base64_decode(123) --- - error: 'builtin/digest.lua:"]: Usage: digest.base64_decode(string)' ... digest.guava('hello', 0) --- - error: 'bad argument #1 to ''?'' (cannot convert ''string'' to ''int64_t'')' ... digest.guava(1, 'nope_') --- - error: 'bad argument #2 to ''?'' (cannot convert ''string'' to ''int'')' ... digest.guava(10863919174838991, 11) --- - 8 ... digest.guava(2016238256797177309, 11) --- - 7 ... digest.guava(1673758223894951030, 11) --- - 7 ... digest.urandom() --- - error: 'builtin/digest.lua:"]: Usage: digest.urandom(len)' ... #digest.urandom(0) --- - 0 ... #digest.urandom(1) --- - 1 ... #digest.urandom(16) --- - 16 ... digest.murmur('1234') --- - 1859914009 ... mur = digest.murmur.new{seed=13} --- ... nulldigest = mur:result() --- ... mur:update('1234') --- ... mur:result() --- - 1859914009 ... mur_new = mur:copy() --- ... mur_new:update('1234') --- ... mur_new:result() ~= mur:result() --- - true ... mur:clear() --- ... nulldigest == mur:result() --- - true ... mur = digest.murmur.new{seed=14} --- ... mur:update('1234') --- ... mur:result() --- - 1689834281 ... mur, mur_new, nulldigest = nil, nil, nil --- ... digest.aes256cbc.encrypt('test123', 'passpasspasspasspasspasspasspass', 'iv12tras8712cvbh') --- - !!binary ynO0qTTrTLNpNm3GrTnjng== ... digest.aes256cbc.decrypt(digest.aes256cbc.encrypt('test123', 'passpasspasspasspasspasspasspass', 'iv12tras8712cvbh'), 'passpasspasspasspasspasspasspass', 'iv12tras8712cvbh') --- - test123 ... digest.aes256cbc.decrypt(digest.aes256cbc.encrypt('test123', 'passpasspasspasspasspasspasspass', 'iv12tras8712cvbh'), 'nosspasspasspasspasspasspasspass', 'iv12tras8712cvbh') --- - error: 'builtin/crypto.lua:"]: Can''t finalize cipher:error:06065064:digital envelope routines:EVP_DecryptFinal_ex:bad decrypt' ... -- -- Test base64 options. (gh-2479, gh-2478, gh-2777). -- b = digest.base64_encode('123', { urlsafe = true }) --- ... b --- - MTIz ... digest.base64_decode(b) --- - '123' ... b = digest.base64_encode('1234567', { urlsafe = true }) --- ... b --- - MTIzNDU2Nw ... digest.base64_decode(b) --- - '1234567' ... b = digest.base64_encode('12345678', { urlsafe = true }) --- ... b --- - MTIzNDU2Nzg ... digest.base64_decode(b) --- - '12345678' ... b = digest.base64_encode('1234567', { nopad = true }) --- ... b --- - MTIzNDU2Nw ... digest.base64_decode(b) --- - '1234567' ... b = digest.base64_encode(string.rep('a', 100), { nowrap = true }) --- ... b --- - YWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYQ== ... digest.base64_decode(b) --- - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ... -- -- gh-3358: any option makes base64 work like urlsafe. -- s = digest.base64_encode('?>>>', {nowrap = true}) --- ... -- Check for '+' - it is not urlsafe. s:find('+') ~= nil --- - true ... s = digest.base64_encode('?>>>', {nopad = true}) --- ... s:find('+') ~= nil --- - true ... digest.pbkdf2("password", "salt", 4096, 32) --- - !!binary xeR41ZKIyEGqUw22hFxMjZYok6ABzk4RpJY4c6qYE0o= ... digest.pbkdf2_hex("password", "salt", 4096, 32) --- - c5e478d59288c841aa530db6845c4c8d962893a001ce4e11a4963873aa98134a ... digest.pbkdf2_hex("password", "salt") --- - 0394a2ede332c9a13eb82e9b24631604c31df978b4e2f0fbd2c549944f9d79a536ceea9b92c6170cbbf0153ef33a4ff57321e17b7a5fadc33f7023ddd325da4744753b6d61571dcba34ae96099068cea39a07a4303263cf3749b5fbc93222946a3987f75f2d6aeea024acc4f95a0d6e7141cdb0b1f12065030ac169507f91b32 ... s, err = pcall(digest.pbkdf2, 12, "salt") --- ... s --- - false ... err:match("Usage") --- - Usage ... s, err = pcall(digest.pbkdf2_hex, 12, "salt") --- ... s --- - false ... err:match("Usage") --- - Usage ... s, err = pcall(digest.pbkdf2_hex, "password", "salt", "lol", "lol") --- ... s --- - false ... err:match("number") --- - number ... digest = nil --- ... test_run:cmd("clear filter") --- - true ... -- gh-3396: fiber-safe pbkdf2 res = {} --- ... sentry = fiber.channel() --- ... _ = test_run:cmd("setopt delimiter ';'") --- ... function test_pbkdf2() local digest = require('digest') for i = 1, 10 do table.insert(res, digest.pbkdf2('', 'salt', 100, 32):hex()) end sentry:put(fiber.id()) end; --- ... _ = test_run:cmd("setopt delimiter ''"); --- ... _ = fiber.create(test_pbkdf2) --- ... _ = fiber.create(test_pbkdf2) --- ... _ = sentry:get() --- ... _ = sentry:get() --- ... res --- - - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 - bafac115a0022b2894f2983b5b5102455bdd3ba7cfbeb09f219a9fde8f3ee6a9 ... tarantool_1.9.1.26.g63eb81e3c/test/app/fiber.result0000664000000000000000000003712413306565107020311 0ustar rootrootfiber = require('fiber') --- ... space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... env = require('test_run') --- ... test_run = env.new() --- ... -- A test case for a race condition between ev_schedule -- and wal_schedule fiber schedulers. -- The same fiber should not be scheduled by ev_schedule (e.g. -- due to cancellation) if it is within th wal_schedule queue. -- The test case is dependent on rows_per_wal, since this is when -- we reopen the .xlog file and thus wal_scheduler takes a long -- pause box.cfg.rows_per_wal --- - 50 ... space:insert{1, 'testing', 'lua rocks'} --- - [1, 'testing', 'lua rocks'] ... space:delete{1} --- - [1, 'testing', 'lua rocks'] ... space:insert{1, 'testing', 'lua rocks'} --- - [1, 'testing', 'lua rocks'] ... space:delete{1} --- - [1, 'testing', 'lua rocks'] ... space:insert{1, 'test box delete'} --- - [1, 'test box delete'] ... space:delete{1} --- - [1, 'test box delete'] ... space:insert{1, 'test box delete'} --- - [1, 'test box delete'] ... space:delete{1} --- - [1, 'test box delete'] ... space:insert{1684234849, 'test box delete'} --- - [1684234849, 'test box delete'] ... space:delete{1684234849} --- - [1684234849, 'test box delete'] ... space:insert{1684234849, 'test box delete'} --- - [1684234849, 'test box delete'] ... space:delete{1684234849} --- - [1684234849, 'test box delete'] ... space:insert{1684234849, 'test box.select()'} --- - [1684234849, 'test box.select()'] ... space:replace{1684234849, 'hello', 'world'} --- - [1684234849, 'hello', 'world'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1667655012, 'goodbye', 'universe'} --- - [1667655012, 'goodbye', 'universe'] ... space:replace{1684234849} --- - [1684234849] ... space:delete{1684234849} --- - [1684234849] ... space:delete{1667655012} --- - [1667655012, 'goodbye', 'universe'] ... space:insert{1953719668, 'old', 1684234849} --- - [1953719668, 'old', 1684234849] ... -- test that insert produces a duplicate key error space:insert{1953719668, 'old', 1684234849} --- - error: Duplicate key exists in unique index 'primary' in space 'tweedledum' ... space:update(1953719668, {{'=', 1, 1953719668}, {'=', 2, 'new'}}) --- - [1953719668, 'new', 1684234849] ... space:update(1234567890, {{'+', 3, 1}}) --- ... space:update(1953719668, {{'+', 3, 1}}) --- - [1953719668, 'new', 1684234850] ... space:update(1953719668, {{'-', 3, 1}}) --- - [1953719668, 'new', 1684234849] ... space:update(1953719668, {{'-', 3, 1}}) --- - [1953719668, 'new', 1684234848] ... space:update(1953719668, {{'+', 3, 1}}) --- - [1953719668, 'new', 1684234849] ... space:delete{1953719668} --- - [1953719668, 'new', 1684234849] ... -- must be read-only space:insert{1953719668} --- - [1953719668] ... space:insert{1684234849} --- - [1684234849] ... space:delete{1953719668} --- - [1953719668] ... space:delete{1684234849} --- - [1684234849] ... space:insert{1953719668, 'hello world'} --- - [1953719668, 'hello world'] ... space:update(1953719668, {{'=', 2, 'bye, world'}}) --- - [1953719668, 'bye, world'] ... space:delete{1953719668} --- - [1953719668, 'bye, world'] ... -- test tuple iterators t = space:insert{1953719668} --- ... t = space:replace{1953719668, 'another field'} --- ... t = space:replace{1953719668, 'another field', 'one more'} --- ... space:truncate() --- ... -- test passing arguments in and out created fiber test_run:cmd("setopt delimiter ';'") --- - true ... function y() space = box.space['tweedledum'] while true do space:replace{1953719668, os.time()} fiber.sleep(0.001) end end; --- ... f = fiber.create(y); --- ... fiber.sleep(0.002); --- ... fiber.cancel(f); --- ... -- fiber garbage collection n = 1000; --- ... ch = fiber.channel(n); --- ... for k = 1, n, 1 do fiber.create( function() fiber.sleep(0) ch:put(k) end ) end; --- ... for k = 1, n, 1 do ch:get() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... collectgarbage('collect') --- - 0 ... -- check that these newly created fibers are garbage collected fiber.find(900) --- - null ... fiber.find(910) --- - null ... fiber.find(920) --- - null ... fiber.find() --- - error: 'fiber.find(id): bad arguments' ... fiber.find('test') --- - null ... -- https://github.com/tarantool/tarantool/issues/131 -- fiber.resume(fiber.cancel()) -- hang f = fiber.create(function() fiber.cancel(fiber.self()) end) --- ... f = nil --- ... -- https://github.com/tarantool/tarantool/issues/119 ftest = function() fiber.sleep(0.0001 * math.random() ) return true end --- ... test_run:cmd("setopt delimiter ';'") --- - true ... result = 0; --- ... for i = 1, 10 do local res = {} for j = 1, 300 do fiber.create(function() table.insert(res, ftest()) end) end while #res < 300 do fiber.sleep(0) end result = result + #res end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... result --- - 3000 ... -- -- -- Test fiber.create() -- -- This should try to infinitely create fibers, -- but hit the fiber stack size limit and fail -- with an error. -- -- 2016-03-25 kostja -- -- fiber call stack depth was removed, we should -- use runtime memory limit control instead; the -- old limit was easy to circument with only -- slightly more complicated fork bomb code -- -- f = function() fiber.create(f) end -- f() -- -- Test argument passing -- f = function(a, b) fiber.create(function(arg) result = arg end, a..b) end --- ... f('hello ', 'world') --- ... result --- - hello world ... f('bye ', 'world') --- ... result --- - bye world ... -- -- Test that the created fiber is detached -- local f = fiber.create(function() result = fiber.status() end) --- ... result --- - running ... -- A test case for Bug#933487 -- tarantool crashed during shutdown if non running LUA fiber -- was created f = fiber.create(function () fiber.sleep(1) return true end) --- ... box.snapshot() --- - ok ... _, e = pcall(box.snapshot) --- ... e --- - ok ... _, e = pcall(box.snapshot) --- ... e --- - ok ... f = fiber.create(function () fiber.sleep(1) end) --- ... -- Test fiber.sleep() fiber.sleep(0) --- ... fiber.sleep(0.01) --- ... fiber.sleep(0.0001) --- ... fiber.sleep('hello') --- - error: 'fiber.sleep(delay): bad arguments' ... fiber.sleep(box, 0.001) --- - error: 'fiber.sleep(delay): bad arguments' ... -- test fiber.self() f = fiber.self() --- ... old_id = f:id() --- ... fiber.self():id() - old_id < 3 --- - true ... fiber.self():id() - old_id < 5 --- - true ... g = fiber.self() --- ... f==g --- - true ... -- arguments to fiber.create f = fiber.create(print('hello')) --- - error: '[string "f = fiber.create(print(''hello'')) "]:1: fiber.create(function, ...): bad arguments' ... -- test passing arguments in and out created fiber res = {} --- ... function r(a, b) res = { a, b } end --- ... f=fiber.create(r) --- ... while f:status() == 'running' do fiber.sleep(0) end --- ... res --- - [] ... f=fiber.create(r, 'hello') --- ... while f:status() == 'running' do fiber.sleep(0) end --- ... res --- - - hello ... f=fiber.create(r, 'hello, world') --- ... while f:status() == 'running' do fiber.sleep(0) end --- ... res --- - - hello, world ... f=fiber.create(r, 'hello', 'world', 'wide') --- ... while f:status() == 'running' do fiber.sleep(0) end --- ... res --- - - hello - world ... -- test fiber.status functions: invalid arguments fiber.status(1) --- - error: 'bad argument #1 to ''?'' (fiber expected, got number)' ... fiber.status('fafa-gaga') --- - error: 'bad argument #1 to ''?'' (fiber expected, got string)' ... fiber.status(nil) --- - error: 'bad argument #1 to ''?'' (fiber expected, got nil)' ... -- test fiber.cancel function r() fiber.sleep(1000) end --- ... f = fiber.create(r) --- ... fiber.cancel(f) --- ... while f:status() ~= 'dead' do fiber.sleep(0) end --- ... f:status() --- - dead ... -- Test fiber.name() old_name = fiber.name() --- ... fiber.name() == old_name --- - true ... fiber.self():name() == old_name --- - true ... fiber.name('hello fiber') --- ... fiber.name() --- - hello fiber ... fiber.self():name('bye fiber') --- ... fiber.self():name() --- - bye fiber ... fiber.self():name(old_name) --- ... space:drop() --- ... -- box.fiber test (create, resume, yield, status) dofile("fiber.lua") --- ... -- print run fiber's test box_fiber_run_test() --- - - 'A: odd 1' - 'tester: status(printer) = suspended' - 'count: 1' - 'status: suspended' - 'B: odd 1' - 'C: event 2' - 'count: 2' - 'status: suspended' - 'A: odd 3' - 'count: 3' - 'status: suspended' - 'B: odd 3' - 'C: event 4' - 'D: event 4' - 'A: odd 5' - 'count: 4' - 'status: suspended' - 'B: odd 5' ... -- various... function testfun() while true do fiber.sleep(10) end end --- ... f = fiber.create(testfun) --- ... f:cancel() --- ... fib_id = fiber.create(testfun):id() --- ... fiber.find(fib_id):cancel() --- ... while fiber.find(fib_id) ~= nil do fiber.sleep(0) end --- ... fiber.find(fib_id) --- - null ... -- -- Test local storage -- type(fiber.self().storage) --- - table ... fiber.self().storage.key = 48 --- ... fiber.self().storage.key --- - 48 ... test_run:cmd("setopt delimiter ';'") --- - true ... function testfun(mgmt, ch) mgmt:get() ch:put(fiber.self().storage.key) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... mgmt = fiber.channel() --- ... ch = fiber.channel() --- ... f = fiber.create(testfun, mgmt, ch) --- ... f.storage.key = 'some value' --- ... mgmt:put("wakeup plz") --- - true ... ch:get() --- - some value ... ch:close() --- ... mgmt:close() --- ... ch = nil --- ... mgmt = nil --- ... fiber.self().storage.key -- our local storage is not affected by f --- - 48 ... -- attempt to access local storage of dead fiber raises error pcall(function(f) return f.storage end, f) --- - false - '[string "return pcall(function(f) return f.storage end..."]:1: the fiber is dead' ... -- -- Test that local storage is garbage collected when fiber is died -- ffi = require('ffi') --- ... ch = fiber.channel(1) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function testfun() fiber.self().storage.x = ffi.gc(ffi.new('char[1]'), function() ch:put('gc ok') end) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... f = fiber.create(testfun) --- ... collectgarbage('collect') --- - 0 ... ch:get() --- - gc ok ... ch:close() --- ... ch = nil --- ... -- -- Test that local storage is not garbage collected with fiber object -- test_run:cmd("setopt delimiter ';'") --- - true ... function testfun(ch) fiber.self().storage.x = 'ok' collectgarbage('collect') ch:put(fiber.self().storage.x or 'failed') end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... ch = fiber.channel(1) --- ... fiber.create(testfun, ch):status() --- - dead ... ch:get() --- - ok ... ch:close() --- ... ch = nil --- ... -- # gh-125 box.fiber.cancel() by numeric id -- function y() while true do fiber.sleep(0.001) end end --- ... f = fiber.create(y) --- ... fiber.kill(f:id()) --- ... while f:status() ~= 'dead' do fiber.sleep(0.01) end --- ... -- # gh-420 fiber.cancel() assertion `!(f->flags & (1 << 2))' failed -- done = false --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function test() fiber.name('gh-420') local fun, errmsg = loadstring('fiber.cancel(fiber.self())') xpcall(fun, function() end) xpcall(fun, function() end) done = true fun() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... f = fiber.create(test) --- ... done --- - true ... -- # gh-536: fiber.info() doesn't list fibers with default names -- function loop() while true do fiber.sleep(10) end end --- ... f1 = fiber.create(loop) --- ... f2 = fiber.create(loop) --- ... f3 = fiber.create(loop) --- ... info = fiber.info() --- ... info[f1:id()] ~= nil --- - true ... info[f2:id()] ~= nil --- - true ... info[f3:id()] ~= nil --- - true ... info = fiber.info({bt = false}) --- ... info[f1:id()].backtrace == nil --- - true ... info = fiber.info({backtrace = false}) --- ... info[f1:id()].backtrace == nil --- - true ... f1:cancel() --- ... f2:cancel() --- ... f3:cancel() --- ... -- # gh-666: nulls in output -- getmetatable(fiber.info()) --- - __serialize: mapping ... zombie = false --- ... for fid, i in pairs(fiber.info()) do if i.name == 'zombie' then zombie = true end end --- ... zombie --- - false ... -- test case for gh-778 - fiber.id() on a dead fiber f = fiber.create(function() end) --- ... id = f:id() --- ... fiber.sleep(0) --- ... f:status() --- - dead ... id == f:id() --- - true ... -- -- gh-1238: log error if a fiber terminates due to uncaught Lua error -- -- must show in the log _ = fiber.create(function() error('gh-1238') end) --- ... test_run:grep_log("default", "gh%-1238") ~= nil --- - true ... -- must NOT show in the log _ = fiber.create(function() fiber.self():cancel() end) --- ... fiber.sleep(0.001) --- ... test_run:grep_log("default", "FiberIsCancelled") == nil --- - true ... -- must show in the log _ = fiber.create(function() box.error(box.error.ILLEGAL_PARAMS, 'oh my') end) --- ... test_run:grep_log("default", "ER_ILLEGAL_PARAMS:[^\n]*") --- - 'ER_ILLEGAL_PARAMS: Illegal parameters, oh my' ... -- #1734 fiber.name irt dead fibers fiber.create(function()end):name() --- - error: the fiber is dead ... -- -- gh-1926 -- fiber.create(function() fiber.wakeup(fiber.self()) end) --- - the fiber is dead ... -- -- gh-2066 test for fiber wakeup -- _ = box.schema.space.create('test2066', {if_not_exists = true}) --- ... _ = box.space.test2066:create_index('pk', {if_not_exists = true}) --- ... function fn2() fiber.sleep(60) box.space.test2066:replace({1}) end --- ... f2 = fiber.create(fn2) --- ... function fn1() fiber.sleep(60) f2:wakeup() end --- ... f1 = fiber.create(fn1) --- ... -- push two fibers to ready list f1:wakeup() f2:wakeup() --- ... fiber.sleep(0.01) --- ... box.space.test2066:drop() --- ... -- -- gh-2642 box.session.type() -- session_type = "" --- ... function fn1() session_type = box.session.type() return end --- ... _ = fiber.create(fn1) --- ... session_type --- - background ... session_type = nil --- ... fiber = nil --- ... -- -- gh-2622 fiber.name() truncates new name -- fiber = require('fiber') --- ... long_name = string.rep('a', 300) --- ... fiber.name() --- - 'console/unix/:' ... fiber.name('new_name') --- ... fiber.name(long_name) --- - error: Fiber name is too long ... fiber.name() --- - new_name ... fiber.name(long_name, {truncate = true}) --- ... fiber.name() --- - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ... f = fiber.self() --- ... fiber.name(f) --- - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ... fiber.name(f, 'new_name') --- ... fiber.name(f, long_name) --- - error: Fiber name is too long ... fiber.name(f) --- - new_name ... fiber.name(f, long_name, {truncate = true}) --- ... fiber.name(f) --- - aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/app/env.test.lua0000664000000000000000000000070013306560010020205 0ustar rootrootos = require('os') test_run = require('test_run').new() os.setenv('location', 'Hell_Hotel') os.getenv('location') os.setenv('location', nil) do os.getenv('location') end env_dict = os.environ() type(env_dict) test_run:cmd("setopt delimiter ';'") do for k, v in pairs(env_dict) do if type(k) ~= 'string' or type(v) ~= 'string' then return false end end return true end; test_run:cmd("setopt delimiter ''") tarantool_1.9.1.26.g63eb81e3c/test/app/crypto.result0000664000000000000000000000627213306560010020526 0ustar rootroottest_run = require('test_run').new() --- ... test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua:\"]: '") --- - true ... crypto = require('crypto') --- ... type(crypto) --- - table ... ciph = crypto.cipher.aes128.cbc --- ... pass = '1234567887654321' --- ... iv = 'abcdefghijklmnop' --- ... enc = ciph.encrypt('test', pass, iv) --- ... enc --- - !!binary WpJJu6l6oziZcyvND8KueA== ... ciph.decrypt(enc, pass, iv) --- - test ... --Failing scenaries crypto.cipher.aes128.cbc.encrypt('a') --- - error: 'builtin/crypto.lua:"]: Key length should be equal to cipher key length (16 bytes)' ... crypto.cipher.aes128.cbc.encrypt('a', '123456', '435') --- - error: 'builtin/crypto.lua:"]: Key length should be equal to cipher key length (16 bytes)' ... crypto.cipher.aes128.cbc.encrypt('a', '1234567887654321') --- - error: 'builtin/crypto.lua:"]: Initial vector length should be equal to cipher iv length (16 bytes)' ... crypto.cipher.aes128.cbc.encrypt('a', '1234567887654321', '12') --- - error: 'builtin/crypto.lua:"]: Initial vector length should be equal to cipher iv length (16 bytes)' ... crypto.cipher.aes256.cbc.decrypt('a') --- - error: 'builtin/crypto.lua:"]: Key length should be equal to cipher key length (32 bytes)' ... crypto.cipher.aes256.cbc.decrypt('a', '123456', '435') --- - error: 'builtin/crypto.lua:"]: Key length should be equal to cipher key length (32 bytes)' ... crypto.cipher.aes256.cbc.decrypt('a', '12345678876543211234567887654321') --- - error: 'builtin/crypto.lua:"]: Initial vector length should be equal to cipher iv length (16 bytes)' ... crypto.cipher.aes256.cbc.decrypt('12', '12345678876543211234567887654321', '12') --- - error: 'builtin/crypto.lua:"]: Initial vector length should be equal to cipher iv length (16 bytes)' ... crypto.cipher.aes192.cbc.encrypt.new() --- - error: Key length should be equal to cipher key length (24 bytes) ... crypto.cipher.aes192.cbc.encrypt.new('123321') --- - error: Key length should be equal to cipher key length (24 bytes) ... crypto.cipher.aes192.cbc.decrypt.new('123456788765432112345678') --- - error: Initial vector length should be equal to cipher iv length (16 bytes) ... crypto.cipher.aes192.cbc.decrypt.new('123456788765432112345678', '12345') --- - error: Initial vector length should be equal to cipher iv length (16 bytes) ... crypto.cipher.aes100.efb --- - error: '[string "return crypto.cipher.aes100.efb "]:1: Cipher method "aes100" is not supported' ... crypto.cipher.aes256.nomode --- - error: 'builtin/crypto.lua:"]: Cipher mode nomode is not supported' ... crypto.digest.nodigest --- - error: '[string "return crypto.digest.nodigest "]:1: Digest method "nodigest" is not supported' ... bad_pass = '8765432112345678' --- ... bad_iv = '123456abcdefghij' --- ... ciph.decrypt(enc, bad_pass, iv) --- - error: 'builtin/crypto.lua:"]: Can''t finalize cipher:error:06065064:digital envelope routines:EVP_DecryptFinal_ex:bad decrypt' ... ciph.decrypt(enc, pass, bad_iv) --- - error: 'builtin/crypto.lua:"]: Can''t finalize cipher:error:06065064:digital envelope routines:EVP_DecryptFinal_ex:bad decrypt' ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/app/luafun.test.lua0000664000000000000000000000270313306560010020714 0ustar rootroot-------------------------------------------------------------------------------- -- # luafun integration -------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) for i = 1,5,1 do space:replace({i, i}) end fun = require('fun') env = require('test_run') test_run = env.new() -- print all methods from metatable methods = fun.iter(getmetatable(fun.range(5)).__index):totable() table.sort(methods) methods -- iter on arrays fun.iter({1, 2, 3}):totable() fun.iter({2, 4, 6, 8}):all(function(x) return x % 2 == 1 end) -- iter on hashes fun.iter({a = 1, b = 2, c = 3}):tomap() -- iter on tuple fun.iter(box.tuple.new({1, 2, 3}):pairs()):totable() -- iter on space (using __ipairs) function pred(t) return t[1] % 2 == 0 end fun.iter(space):totable() fun.iter(space:pairs()):totable() space:pairs():filter(pred):drop(2):take(3):totable() -- iter on index (using __ipairs) fun.iter(space.index[0]):totable() fun.iter(space.index[0]:pairs()):totable() space.index[0]:pairs():drop(2):take(3):totable() -- test global functions test_run:cmd("setopt delimiter ';'") fun.reduce(function(acc, val) return acc + val end, 0, fun.filter(function(x) return x % 11 == 0 end, fun.map(function(x) return 2 * x end, fun.range(1000)))); test_run:cmd("setopt delimiter ''"); t = {} fun.foreach(function(x) table.insert(t, x) end, "abcde") t space:drop() tarantool_1.9.1.26.g63eb81e3c/test/app/argparse.test.lua0000664000000000000000000000266013306560010021230 0ustar rootroot-- internal argparse test argparse = require('internal.argparse').parse -- test with empty arguments and options argparse() -- test with command name (should be excluded) argparse({[0] = 'tarantoolctl', 'start', 'instance'}) -- test long option argparse({'tarantoolctl', 'start', 'instance', '--start'}) argparse({'tarantoolctl', 'start', 'instance', '--start', '--stop'}) argparse({'tarantoolctl', 'start', 'instance', '--start', '--stop', '--stop'}) argparse({'tarantoolctl', 'start', 'instance', '--start', '--stop', '--stop'}) argparse({'tarantoolctl', 'start', 'instance', '-baobab'}) argparse({'tarantoolctl', 'start', 'instance', '-vovov'}) argparse({'tarantoolctl', 'start', 'instance', '--start=lalochka'}) argparse({'tarantoolctl', 'start', 'instance', '--start', 'lalochka'}) argparse({'tarantoolctl', 'start', 'instance', '--start', '--', 'lalochka'}) argparse({'tarantoolctl', 'start', 'instance', '--start', '-', 'lalochka'}) argparse({'--verh=42'}, {{'verh', 'number'}}) argparse({'--verh=42'}, {{'verh', 'number+'}}) argparse({'--verh=42'}, {{'verh', 'string'}}) argparse({'--verh=42'}, {{'verh', 'string+'}}) argparse({'--verh=42'}, {{'verh'}}) argparse({'--verh=42'}, {'verh'}) argparse({'--verh=42'}, {{'verh', 'boolean'}}) argparse({'--verh=42'}, {{'verh', 'boolean+'}}) argparse({'--verh=42'}, {'niz'}) argparse({'--super-option'}) argparse({'tarantoolctl', 'start', 'instance', '--start=lalochka', 'option', '-', 'another option'}) tarantool_1.9.1.26.g63eb81e3c/test/app/uuid.result0000664000000000000000000000527713306560010020160 0ustar rootroot-- box.uuid uuid = require('uuid') --- ... -- -- RFC4122 compliance -- uu = uuid.new() --- ... -- new()always generates RFC4122 variant bit.band(uu.clock_seq_hi_and_reserved, 0xc0) == 0x80 --- - true ... vsn = bit.rshift(uu.time_hi_and_version, 12) --- ... -- new() generates time-based or random-based version vsn == 1 or vsn == 4 --- - true ... -- -- to/from string -- uu = uuid() --- ... #uu:str() --- - 36 ... string.match(uu:str(), '^[a-f0-9%-]+$') ~= nil --- - true ... uu == uuid.fromstr(uu:str()) --- - true ... uu = uuid.fromstr('ba90d815-14e0-431d-80c0-ce587885bb78') --- ... uu:str() --- - ba90d815-14e0-431d-80c0-ce587885bb78 ... tostring(uu) --- - ba90d815-14e0-431d-80c0-ce587885bb78 ... tostring(uu) == uu:str() --- - true ... uu.time_low; --- - 3130054677 ... uu.time_mid; --- - 5344 ... uu.time_hi_and_version; --- - 17181 ... uu.clock_seq_hi_and_reserved; --- - 128 ... uu.clock_seq_low; --- - 192 ... uu.node[0] --- - 206 ... uu.node[1] --- - 88 ... uu.node[2] --- - 120 ... uu.node[3] --- - 133 ... uu.node[4] --- - 187 ... uu.node[5] --- - 120 ... -- aliases #uuid.str() --- - 36 ... -- invalid values uuid.fromstr(nil) --- - error: 'builtin/uuid.lua:47: fromstr(str)' ... uuid.fromstr('') --- - null ... uuid.fromstr('blablabla') --- - null ... uuid.fromstr(string.rep(' ', 36)) --- - null ... uuid.fromstr('ba90d81514e0431d80c0ce587885bb78') --- - null ... uuid.fromstr('ba90d815-14e0-431d-80c0') --- - null ... uuid.fromstr('ba90d815-14e0-431d-80c0-tt587885bb7') --- - null ... -- -- to/from binary -- uu = uuid() --- ... #uu:bin() --- - 16 ... #uu:bin('h') --- - 16 ... #uu:bin('l') --- - 16 ... #uu:bin('n') --- - 16 ... #uu:bin('b') --- - 16 ... uu:bin() == uu:bin('h') --- - true ... uu:bin('n') ~= uu:bin('h') --- - true ... uu:bin('b') ~= uu:bin('l') --- - true ... uu == uuid.frombin(uu:bin()) --- - true ... uu == uuid.frombin(uu:bin('b'), 'b') --- - true ... uu == uuid.frombin(uu:bin('l'), 'l') --- - true ... uu = uuid.fromstr('adf9d02e-0756-11e4-b5cf-525400123456') --- ... uu:bin('l') --- - !!binary LtD5rVYH5BG1z1JUABI0Vg== ... uu:bin('b') --- - !!binary rfnQLgdWEeS1z1JUABI0Vg== ... -- aliases #uuid.bin() --- - 16 ... #uuid.bin('l') --- - 16 ... -- -- eq and nil -- uu = uuid.new() --- ... uuid.NULL --- - 00000000-0000-0000-0000-000000000000 ... uuid.NULL:isnil() --- - true ... uuid.NULL ~= uu --- - true ... uu:isnil() --- - false ... uu == uu --- - true ... uu == uu --- - true ... uu == nil --- - false ... uu == 12345 --- - false ... uu == "blablabla" --- - false ... -- -- invalid usage -- uu = uuid.new() --- ... uu.isnil() --- - error: 'Usage: uuid:isnil()' ... uu.bin() --- - error: 'Usage: uuid:bin([byteorder])' ... uu.str() --- - error: 'Usage: uuid:str()' ... uu = nil --- ... uuid = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/app/uuid.test.lua0000664000000000000000000000311213306560010020363 0ustar rootroot-- box.uuid uuid = require('uuid') -- -- RFC4122 compliance -- uu = uuid.new() -- new()always generates RFC4122 variant bit.band(uu.clock_seq_hi_and_reserved, 0xc0) == 0x80 vsn = bit.rshift(uu.time_hi_and_version, 12) -- new() generates time-based or random-based version vsn == 1 or vsn == 4 -- -- to/from string -- uu = uuid() #uu:str() string.match(uu:str(), '^[a-f0-9%-]+$') ~= nil uu == uuid.fromstr(uu:str()) uu = uuid.fromstr('ba90d815-14e0-431d-80c0-ce587885bb78') uu:str() tostring(uu) tostring(uu) == uu:str() uu.time_low; uu.time_mid; uu.time_hi_and_version; uu.clock_seq_hi_and_reserved; uu.clock_seq_low; uu.node[0] uu.node[1] uu.node[2] uu.node[3] uu.node[4] uu.node[5] -- aliases #uuid.str() -- invalid values uuid.fromstr(nil) uuid.fromstr('') uuid.fromstr('blablabla') uuid.fromstr(string.rep(' ', 36)) uuid.fromstr('ba90d81514e0431d80c0ce587885bb78') uuid.fromstr('ba90d815-14e0-431d-80c0') uuid.fromstr('ba90d815-14e0-431d-80c0-tt587885bb7') -- -- to/from binary -- uu = uuid() #uu:bin() #uu:bin('h') #uu:bin('l') #uu:bin('n') #uu:bin('b') uu:bin() == uu:bin('h') uu:bin('n') ~= uu:bin('h') uu:bin('b') ~= uu:bin('l') uu == uuid.frombin(uu:bin()) uu == uuid.frombin(uu:bin('b'), 'b') uu == uuid.frombin(uu:bin('l'), 'l') uu = uuid.fromstr('adf9d02e-0756-11e4-b5cf-525400123456') uu:bin('l') uu:bin('b') -- aliases #uuid.bin() #uuid.bin('l') -- -- eq and nil -- uu = uuid.new() uuid.NULL uuid.NULL:isnil() uuid.NULL ~= uu uu:isnil() uu == uu uu == uu uu == nil uu == 12345 uu == "blablabla" -- -- invalid usage -- uu = uuid.new() uu.isnil() uu.bin() uu.str() uu = nil uuid = nil tarantool_1.9.1.26.g63eb81e3c/test/app/lua/0000775000000000000000000000000013306560010016520 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/app/lua/fiber.lua0000664000000000000000000000412013306560010020307 0ustar rootrootlocal fiber = require('fiber') -- -------------------------------------------------------------------------- -- -- Local functions -- -------------------------------------------------------------------------- -- -- printer task fiber local printer_task -- tester task fiber local tester_task -- test log local result = {} -- -------------------------------------------------------------------------- -- -- printer task routines -- -------------------------------------------------------------------------- -- -- odd printer local function odd(x) table.insert(result,'A: odd '..tostring(x)) fiber.sleep(0.0) table.insert(result,'B: odd '..tostring(x)) end -- even printer local function even(x) table.insert(result,'C: event '..tostring(x)) if x == 2 then return end table.insert(result,'D: event '..tostring(x)) end -- printer task routine main function local function printer_task_routine(x) for i = 1, x do if i == 3 then fiber.sleep(0) end if i % 2 == 0 then even(i) else odd(i) end end end ------------------------------------------------------------------------ -- tester task routines ------------------------------------------------------------------------ -- tester task routine main function local function tester_task_routine() printer_task = fiber.create(printer_task_routine, 5) table.insert(result, "tester: status(printer) = " .. printer_task:status()) local count = 1 while printer_task:status() ~= "dead" do table.insert(result, "count: " .. tostring(count)) table.insert(result, "status: " .. printer_task:status()) count = count + 1 fiber.sleep(0) end end -- -------------------------------------------------------------------------- -- -- Test functions -- -------------------------------------------------------------------------- -- -- run fiber test function box_fiber_run_test() -- run tester tester_task = fiber.create(tester_task_routine) while tester_task:status() ~= 'dead' do fiber.sleep(0) end return result end tarantool_1.9.1.26.g63eb81e3c/test/app/loaderslib.c0000664000000000000000000000015613306560010020225 0ustar rootroot#include LUA_API int luaopen_loaderslib(lua_State *L) { lua_pushliteral(L, "success"); return 1; } tarantool_1.9.1.26.g63eb81e3c/test/app/fiber_cond.result0000664000000000000000000000140313306560010021267 0ustar rootrootfiber = require('fiber') --- ... -- fiber.cond c = fiber.cond() --- ... tostring(c) --- - cond ... -- args validation c.wait() --- - error: 'usage: cond:wait([timeout])' ... c.wait('1') --- - error: 'bad argument #1 to ''?'' (fiber.cond expected, got string)' ... c:wait('1') --- - false ... c:wait(-1) --- - error: 'usage: cond:wait([timeout])' ... -- timeout c:wait(0.1) --- - false ... -- wait success fiber.create(function() fiber.sleep(.5); c:broadcast() end) and c:wait(.6) --- - true ... -- signal t = {} --- ... for i = 1,4 do fiber.create(function() c:wait(); table.insert(t, '#') end) end --- ... c:signal() --- ... fiber.sleep(0.1) --- ... t --- - - '#' ... -- broadcast c:broadcast() --- ... fiber.sleep(0.1) --- ... t --- - - '#' - '#' - '#' - '#' ... tarantool_1.9.1.26.g63eb81e3c/test/app/msgpack.result0000664000000000000000000000611313306560010020625 0ustar rootrootbuffer = require 'buffer' --- ... msgpack = require 'msgpack' --- ... -- Arguments check. buf = buffer.ibuf() --- ... msgpack.encode() --- - error: 'msgpack.encode: a Lua object expected' ... msgpack.encode('test', 'str') --- - error: expected cdata as 2 argument ... msgpack.encode('test', buf.buf) --- - error: 'msgpack.encode: argument 2 must be of type ''struct ibuf''' ... msgpack.decode() --- - error: 'msgpack.decode: a Lua string or ''char *'' expected' ... msgpack.decode(123) --- - error: 'msgpack.decode: a Lua string or ''char *'' expected' ... msgpack.decode(buf) --- - error: 'msgpack.decode: a Lua string or ''char *'' expected' ... msgpack.decode(buf.buf, 'size') --- - error: 'bad argument #2 to ''?'' (number expected, got string)' ... msgpack.decode('test', 0) --- - error: 'msgpack.decode: offset is out of bounds' ... msgpack.decode('test', 5) --- - error: 'msgpack.decode: offset is out of bounds' ... msgpack.decode('test', 'offset') --- - error: 'bad argument #2 to ''?'' (number expected, got string)' ... msgpack.decode_unchecked() --- - error: 'msgpack.decode: a Lua string or ''char *'' expected' ... msgpack.decode_unchecked(123) --- - error: 'msgpack.decode: a Lua string or ''char *'' expected' ... msgpack.decode_unchecked(buf) --- - error: 'msgpack.decode: a Lua string or ''char *'' expected' ... msgpack.decode_unchecked('test', 0) --- - error: 'msgpack.decode: offset is out of bounds' ... msgpack.decode_unchecked('test', 5) --- - error: 'msgpack.decode: offset is out of bounds' ... msgpack.decode_unchecked('test', 'offset') --- - error: 'bad argument #2 to ''?'' (number expected, got string)' ... -- Encode/decode a string. s = msgpack.encode({1, 2, 3}) .. msgpack.encode({4, 5, 6}) --- ... obj, offset = msgpack.decode(s) --- ... obj --- - [1, 2, 3] ... obj, offset = msgpack.decode(s, offset) --- ... obj --- - [4, 5, 6] ... offset == #s + 1 --- - true ... obj, offset = msgpack.decode_unchecked(s) --- ... obj --- - [1, 2, 3] ... obj, offset = msgpack.decode_unchecked(s, offset) --- ... obj --- - [4, 5, 6] ... offset == #s + 1 --- - true ... -- Encode/decode a buffer. buf = buffer.ibuf() --- ... len = msgpack.encode({1, 2, 3}, buf) --- ... len = msgpack.encode({4, 5, 6}, buf) + len --- ... buf:size() == len --- - true ... orig_rpos = buf.rpos --- ... obj, rpos = msgpack.decode(buf.rpos, buf:size()) --- ... obj --- - [1, 2, 3] ... buf.rpos = rpos --- ... obj, rpos = msgpack.decode(buf.rpos, buf:size()) --- ... obj --- - [4, 5, 6] ... buf.rpos = rpos --- ... buf:size() == 0 --- - true ... buf.rpos = orig_rpos --- ... obj, rpos = msgpack.decode_unchecked(buf.rpos, buf:size()) --- ... obj --- - [1, 2, 3] ... buf.rpos = rpos --- ... obj, rpos = msgpack.decode_unchecked(buf.rpos, buf:size()) --- ... obj --- - [4, 5, 6] ... buf.rpos = rpos --- ... buf:size() == 0 --- - true ... -- Invalid msgpack. s = msgpack.encode({1, 2, 3}) --- ... s = s:sub(1, -2) --- ... msgpack.decode(s) --- - error: 'msgpack.decode: invalid MsgPack' ... buf = buffer.ibuf() --- ... msgpack.encode({1, 2, 3}, buf) --- - 4 ... msgpack.decode(buf.rpos, buf:size() - 1) --- - error: 'msgpack.decode: invalid MsgPack' ... tarantool_1.9.1.26.g63eb81e3c/test/app/luafun.result0000664000000000000000000000520013306560010020466 0ustar rootroot-------------------------------------------------------------------------------- -- # luafun integration -------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... for i = 1,5,1 do space:replace({i, i}) end --- ... fun = require('fun') --- ... env = require('test_run') --- ... test_run = env.new() --- ... -- print all methods from metatable methods = fun.iter(getmetatable(fun.range(5)).__index):totable() --- ... table.sort(methods) --- ... methods --- - - all - any - car - cdr - chain - cycle - drop - drop_n - drop_while - each - elem_index - elem_indexes - elem_indices - enumerate - every - filter - foldl - for_each - foreach - grep - head - index - index_of - indexes - indices - intersperse - is_null - is_prefix_of - length - map - max - max_by - maximum - min - min_by - minimum - minimum_by - nth - op - operator - partition - product - reduce - remove_if - some - span - split - split_at - sum - tail - take - take_n - take_while - tomap - totable - unwrap - zip ... -- iter on arrays fun.iter({1, 2, 3}):totable() --- - - 1 - 2 - 3 ... fun.iter({2, 4, 6, 8}):all(function(x) return x % 2 == 1 end) --- - false ... -- iter on hashes fun.iter({a = 1, b = 2, c = 3}):tomap() --- - b: 2 a: 1 c: 3 ... -- iter on tuple fun.iter(box.tuple.new({1, 2, 3}):pairs()):totable() --- - - 1 - 2 - 3 ... -- iter on space (using __ipairs) function pred(t) return t[1] % 2 == 0 end --- ... fun.iter(space):totable() --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] ... fun.iter(space:pairs()):totable() --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] ... space:pairs():filter(pred):drop(2):take(3):totable() --- - [] ... -- iter on index (using __ipairs) fun.iter(space.index[0]):totable() --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] ... fun.iter(space.index[0]:pairs()):totable() --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] ... space.index[0]:pairs():drop(2):take(3):totable() --- - - [3, 3] - [4, 4] - [5, 5] ... -- test global functions test_run:cmd("setopt delimiter ';'") --- - true ... fun.reduce(function(acc, val) return acc + val end, 0, fun.filter(function(x) return x % 11 == 0 end, fun.map(function(x) return 2 * x end, fun.range(1000)))); --- - 90090 ... test_run:cmd("setopt delimiter ''"); --- - true ... t = {} --- ... fun.foreach(function(x) table.insert(t, x) end, "abcde") --- ... t --- - - a - b - c - d - e ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/app/fiber_cond.test.lua0000664000000000000000000000064213306560010021514 0ustar rootrootfiber = require('fiber') -- fiber.cond c = fiber.cond() tostring(c) -- args validation c.wait() c.wait('1') c:wait('1') c:wait(-1) -- timeout c:wait(0.1) -- wait success fiber.create(function() fiber.sleep(.5); c:broadcast() end) and c:wait(.6) -- signal t = {} for i = 1,4 do fiber.create(function() c:wait(); table.insert(t, '#') end) end c:signal() fiber.sleep(0.1) t -- broadcast c:broadcast() fiber.sleep(0.1) t tarantool_1.9.1.26.g63eb81e3c/test/replication-py/0000775000000000000000000000000013306560010020116 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/replication-py/failed.lua0000664000000000000000000000033113306560010022042 0ustar rootroot#!/usr/bin/env tarantool box.cfg({ listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), memtx_memory = 107374182, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication-py/suite.ini0000664000000000000000000000015513306560010021751 0ustar rootroot[default] core = tarantool script = master.lua description = tarantool/box, replication is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/replication-py/conflict.test.py0000664000000000000000000000654013306560010023254 0ustar rootrootfrom lib.tarantool_server import TarantoolServer from time import sleep import yaml def check_replication(nodes, select_args=''): for node in nodes: node.admin('box.space.test:select{%s}' % select_args) master = server master.admin("box.schema.user.grant('guest', 'replication')") replica = TarantoolServer(server.ini) replica.script = 'replication-py/replica.lua' replica.vardir = server.vardir replica.rpl_master = master replica.deploy() def parallel_run(cmd1, cmd2, compare): print 'parallel send: %s' % cmd1 print 'parallel send: %s' % cmd2 master.admin.socket.sendall('%s\n' % cmd1) replica.admin.socket.sendall('%s\n' % cmd2) master.admin.socket.recv(2048) replica.admin.socket.recv(2048) # wait for status changing in tarantool master_status = yaml.load(master.admin( 'box.info().replication[2].upstream.status', silent=True ))[0] replica_status = yaml.load(replica.admin( 'box.info().replication[1].upstream.status', silent=True ))[0] # wait for status results = [f(master_status, replica_status) for f in compare] while True: sleep(0.01) if any(results): print 'replication state is correct' break def prepare_cluster(): print 'reset master-master replication' master.stop() master.cleanup(True) master.start() master.admin("box.schema.user.grant('guest', 'replication')", silent=True) replica.stop() replica.cleanup(True) replica.start() master.admin("box.cfg{replication='%s'}" % replica.iproto.uri, silent=True) r1_id = replica.get_param('id') r2_id = master.get_param('id') master.admin("space = box.schema.space.create('test')", silent=True) master.admin("index = space:create_index('primary', { type = 'tree'})", silent=True) master.admin('for k = 1, 9 do space:insert{k, k*k} end', silent=True) # wait lsn replica.wait_lsn(r2_id, master.get_lsn(r2_id)) master.wait_lsn(r1_id, replica.get_lsn(r1_id)) # test1: double update in master and replica prepare_cluster() parallel_run( "box.space.test:update(1, {{'#', 2, 1}})", "box.space.test:update(1, {{'#', 2, 1}})", [ lambda x,y: x == 'stopped' or y == 'stopped', lambda x,y: x == 'follow' and y == 'follow', ] ) check_replication([master, replica], '1') # test2: insert different values with single id prepare_cluster() parallel_run( 'box.space.test:insert{20, 1}', 'box.space.test:insert{20, 2}', [ lambda x,y: x == 'stopped' or y == 'stopped', lambda x,y: x == 'follow' and y == 'follow', ] ) # test3: update different values prepare_cluster() parallel_run( "box.space.test:update(2, {{'=', 2, 1}})", "box.space.test:update(2, {{'=', 2, 2}})", [lambda x,y: x == 'follow' and y == 'follow',] ) # test4: CRDT increment with update prepare_cluster() parallel_run( "box.space.test:update(1, {{'+', 2, 1}})", "box.space.test:update(1, {{'+', 2, 2}})", [lambda x,y: x == 'follow' and y == 'follow',] ) check_replication([master, replica], '1') # test5: delete not existing key prepare_cluster() parallel_run( "box.space.test:delete(999)", "box.space.test:delete(999)", [lambda x,y: x == 'follow' and y == 'follow',] ) check_replication([master, replica]) # cleanup replica.stop() replica.cleanup(True) server.stop() server.cleanup(True) server.deploy() tarantool_1.9.1.26.g63eb81e3c/test/replication-py/replica.lua0000664000000000000000000000040313306560010022235 0ustar rootroot#!/usr/bin/env tarantool box_cfg_done = false require('console').listen(os.getenv('ADMIN')) box.cfg({ listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), memtx_memory = 107374182, }) box_cfg_done = true tarantool_1.9.1.26.g63eb81e3c/test/replication-py/multi.result0000664000000000000000000000261113306560010022510 0ustar rootrootfiber = require('fiber') --- ... box.schema.user.grant('guest', 'replication') --- ... box.schema.user.grant('guest', 'execute', 'universe') --- ... ---------------------------------------------------------------------- Bootstrap replicas ---------------------------------------------------------------------- done ---------------------------------------------------------------------- Make a full mesh ---------------------------------------------------------------------- server 1 connected server 1 connected server 1 connected box.info.vclock --- - {1: 4} ... server 2 connected server 2 connected server 2 connected box.info.vclock --- - {1: 4} ... server 3 connected server 3 connected server 3 connected box.info.vclock --- - {1: 4} ... done ---------------------------------------------------------------------- Test inserts ---------------------------------------------------------------------- Create a test space _ = box.schema.space.create('test') --- ... _ = box.space.test:create_index('primary') --- ... server 1 is ok server 2 is ok server 3 is ok Insert records inserted 60 records Synchronize server 3 done server 3 done server 3 done done Check data server 1 is ok server 2 is ok server 3 is ok Done ---------------------------------------------------------------------- Cleanup ---------------------------------------------------------------------- server 1 done server 2 done server 3 done tarantool_1.9.1.26.g63eb81e3c/test/replication-py/conflict.result0000664000000000000000000000243113306560010023157 0ustar rootrootbox.schema.user.grant('guest', 'replication') --- ... reset master-master replication parallel send: box.space.test:update(1, {{'#', 2, 1}}) parallel send: box.space.test:update(1, {{'#', 2, 1}}) replication state is correct box.space.test:select{1} --- - - [1] ... box.space.test:select{1} --- - - [1] ... reset master-master replication parallel send: box.space.test:insert{20, 1} parallel send: box.space.test:insert{20, 2} replication state is correct reset master-master replication parallel send: box.space.test:update(2, {{'=', 2, 1}}) parallel send: box.space.test:update(2, {{'=', 2, 2}}) replication state is correct reset master-master replication parallel send: box.space.test:update(1, {{'+', 2, 1}}) parallel send: box.space.test:update(1, {{'+', 2, 2}}) replication state is correct box.space.test:select{1} --- - - [1, 4] ... box.space.test:select{1} --- - - [1, 4] ... reset master-master replication parallel send: box.space.test:delete(999) parallel send: box.space.test:delete(999) replication state is correct box.space.test:select{} --- - - [1, 1] - [2, 4] - [3, 9] - [4, 16] - [5, 25] - [6, 36] - [7, 49] - [8, 64] - [9, 81] ... box.space.test:select{} --- - - [1, 1] - [2, 4] - [3, 9] - [4, 16] - [5, 25] - [6, 36] - [7, 49] - [8, 64] - [9, 81] ... tarantool_1.9.1.26.g63eb81e3c/test/replication-py/swap.test.py0000664000000000000000000000766113306560010022432 0ustar rootrootimport os import tarantool from lib.tarantool_server import TarantoolServer import re import yaml REPEAT = 20 ID_BEGIN = 0 ID_STEP = 5 LOGIN = 'test' PASSWORD = 'pass123456' engines = ['memtx', 'vinyl'] def insert_tuples(_server, begin, end, msg = "tuple"): for engine in engines: for i in range(begin, end): print 'box.space.%s:insert{%d, "%s %d"}' % (engine, i, msg, i) print '-' space = _server.iproto.py_con.space(engine) print space.insert((i, '%s %d' % (msg, i))) def select_tuples(_server, begin, end): for engine in engines: for i in range(begin, end): print 'box.space.%s:select{%d}' % (engine, i) print '-' space = _server.iproto.py_con.space(engine) print space.select(i) # master server master = server # Re-deploy server to cleanup Phia data master.stop() master.cleanup() master.deploy() master.admin("box.schema.user.create('%s', { password = '%s'})" % (LOGIN, PASSWORD)) master.admin("box.schema.user.grant('%s', 'read,write,execute', 'universe')" % LOGIN) master.iproto.py_con.authenticate(LOGIN, PASSWORD) master.uri = '%s:%s@%s' % (LOGIN, PASSWORD, master.iproto.uri) os.putenv('MASTER', master.uri) # replica server replica = TarantoolServer() replica.script = "replication-py/replica.lua" replica.vardir = server.vardir #os.path.join(server.vardir, 'replica') replica.deploy() replica.admin("while box.info.id == 0 do require('fiber').sleep(0.01) end") replica.uri = '%s:%s@%s' % (LOGIN, PASSWORD, replica.iproto.uri) replica.admin("while box.space['_priv']:len() < 1 do require('fiber').sleep(0.01) end") replica.iproto.py_con.authenticate(LOGIN, PASSWORD) for engine in engines: master.admin("s = box.schema.space.create('%s', { engine = '%s'})" % (engine, engine)) master.admin("index = s:create_index('primary', {type = 'tree'})") ### gh-343: replica.cc must not add login and password to proc title #status = replica.get_param("status") #host_port = "%s:%s" % master.iproto.uri #m = re.search(r'replica/(.*)/.*', status) #if not m or m.group(1) != host_port: # print 'invalid box.info.status', status, 'expected host:port', host_port master_id = master.get_param('id') replica_id = replica.get_param('id') id = ID_BEGIN for i in range(REPEAT): print "test %d iteration" % i # insert to master insert_tuples(master, id, id + ID_STEP) # select from replica replica.wait_lsn(master_id, master.get_lsn(master_id)) select_tuples(replica, id, id + ID_STEP) id += ID_STEP # insert to master insert_tuples(master, id, id + ID_STEP) # select from replica replica.wait_lsn(master_id, master.get_lsn(master_id)) select_tuples(replica, id, id + ID_STEP) id += ID_STEP print "swap servers" # reconfigure replica to master replica.rpl_master = None print("switch replica to master") replica.admin("box.cfg{replication=''}") # reconfigure master to replica master.rpl_master = replica print("switch master to replica") master.admin("box.cfg{replication='%s'}" % replica.uri, silent=True) # insert to replica insert_tuples(replica, id, id + ID_STEP) # select from master master.wait_lsn(replica_id, replica.get_lsn(replica_id)) select_tuples(master, id, id + ID_STEP) id += ID_STEP # insert to replica insert_tuples(replica, id, id + ID_STEP) # select from master master.wait_lsn(replica_id, replica.get_lsn(replica_id)) select_tuples(master, id, id + ID_STEP) id += ID_STEP print "rollback servers configuration" # reconfigure replica to master master.rpl_master = None print("switch master to master") master.admin("box.cfg{replication=''}") # reconfigure master to replica replica.rpl_master = master print("switch replica to replica") replica.admin("box.cfg{replication='%s'}" % master.uri, silent=True) # Cleanup. replica.stop() replica.cleanup(True) server.stop() server.deploy() tarantool_1.9.1.26.g63eb81e3c/test/replication-py/swap.result0000664000000000000000000026732113306560010022343 0ustar rootrootbox.schema.user.create('test', { password = 'pass123456'}) --- ... box.schema.user.grant('test', 'read,write,execute', 'universe') --- ... while box.info.id == 0 do require('fiber').sleep(0.01) end --- ... while box.space['_priv']:len() < 1 do require('fiber').sleep(0.01) end --- ... s = box.schema.space.create('memtx', { engine = 'memtx'}) --- ... index = s:create_index('primary', {type = 'tree'}) --- ... s = box.schema.space.create('vinyl', { engine = 'vinyl'}) --- ... index = s:create_index('primary', {type = 'tree'}) --- ... test 0 iteration box.space.memtx:insert{0, "tuple 0"} - - [0, tuple 0] box.space.memtx:insert{1, "tuple 1"} - - [1, tuple 1] box.space.memtx:insert{2, "tuple 2"} - - [2, tuple 2] box.space.memtx:insert{3, "tuple 3"} - - [3, tuple 3] box.space.memtx:insert{4, "tuple 4"} - - [4, tuple 4] box.space.vinyl:insert{0, "tuple 0"} - - [0, tuple 0] box.space.vinyl:insert{1, "tuple 1"} - - [1, tuple 1] box.space.vinyl:insert{2, "tuple 2"} - - [2, tuple 2] box.space.vinyl:insert{3, "tuple 3"} - - [3, tuple 3] box.space.vinyl:insert{4, "tuple 4"} - - [4, tuple 4] box.space.memtx:select{0} - - [0, tuple 0] box.space.memtx:select{1} - - [1, tuple 1] box.space.memtx:select{2} - - [2, tuple 2] box.space.memtx:select{3} - - [3, tuple 3] box.space.memtx:select{4} - - [4, tuple 4] box.space.vinyl:select{0} - - [0, tuple 0] box.space.vinyl:select{1} - - [1, tuple 1] box.space.vinyl:select{2} - - [2, tuple 2] box.space.vinyl:select{3} - - [3, tuple 3] box.space.vinyl:select{4} - - [4, tuple 4] box.space.memtx:insert{5, "tuple 5"} - - [5, tuple 5] box.space.memtx:insert{6, "tuple 6"} - - [6, tuple 6] box.space.memtx:insert{7, "tuple 7"} - - [7, tuple 7] box.space.memtx:insert{8, "tuple 8"} - - [8, tuple 8] box.space.memtx:insert{9, "tuple 9"} - - [9, tuple 9] box.space.vinyl:insert{5, "tuple 5"} - - [5, tuple 5] box.space.vinyl:insert{6, "tuple 6"} - - [6, tuple 6] box.space.vinyl:insert{7, "tuple 7"} - - [7, tuple 7] box.space.vinyl:insert{8, "tuple 8"} - - [8, tuple 8] box.space.vinyl:insert{9, "tuple 9"} - - [9, tuple 9] box.space.memtx:select{5} - - [5, tuple 5] box.space.memtx:select{6} - - [6, tuple 6] box.space.memtx:select{7} - - [7, tuple 7] box.space.memtx:select{8} - - [8, tuple 8] box.space.memtx:select{9} - - [9, tuple 9] box.space.vinyl:select{5} - - [5, tuple 5] box.space.vinyl:select{6} - - [6, tuple 6] box.space.vinyl:select{7} - - [7, tuple 7] box.space.vinyl:select{8} - - [8, tuple 8] box.space.vinyl:select{9} - - [9, tuple 9] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{10, "tuple 10"} - - [10, tuple 10] box.space.memtx:insert{11, "tuple 11"} - - [11, tuple 11] box.space.memtx:insert{12, "tuple 12"} - - [12, tuple 12] box.space.memtx:insert{13, "tuple 13"} - - [13, tuple 13] box.space.memtx:insert{14, "tuple 14"} - - [14, tuple 14] box.space.vinyl:insert{10, "tuple 10"} - - [10, tuple 10] box.space.vinyl:insert{11, "tuple 11"} - - [11, tuple 11] box.space.vinyl:insert{12, "tuple 12"} - - [12, tuple 12] box.space.vinyl:insert{13, "tuple 13"} - - [13, tuple 13] box.space.vinyl:insert{14, "tuple 14"} - - [14, tuple 14] box.space.memtx:select{10} - - [10, tuple 10] box.space.memtx:select{11} - - [11, tuple 11] box.space.memtx:select{12} - - [12, tuple 12] box.space.memtx:select{13} - - [13, tuple 13] box.space.memtx:select{14} - - [14, tuple 14] box.space.vinyl:select{10} - - [10, tuple 10] box.space.vinyl:select{11} - - [11, tuple 11] box.space.vinyl:select{12} - - [12, tuple 12] box.space.vinyl:select{13} - - [13, tuple 13] box.space.vinyl:select{14} - - [14, tuple 14] box.space.memtx:insert{15, "tuple 15"} - - [15, tuple 15] box.space.memtx:insert{16, "tuple 16"} - - [16, tuple 16] box.space.memtx:insert{17, "tuple 17"} - - [17, tuple 17] box.space.memtx:insert{18, "tuple 18"} - - [18, tuple 18] box.space.memtx:insert{19, "tuple 19"} - - [19, tuple 19] box.space.vinyl:insert{15, "tuple 15"} - - [15, tuple 15] box.space.vinyl:insert{16, "tuple 16"} - - [16, tuple 16] box.space.vinyl:insert{17, "tuple 17"} - - [17, tuple 17] box.space.vinyl:insert{18, "tuple 18"} - - [18, tuple 18] box.space.vinyl:insert{19, "tuple 19"} - - [19, tuple 19] box.space.memtx:select{15} - - [15, tuple 15] box.space.memtx:select{16} - - [16, tuple 16] box.space.memtx:select{17} - - [17, tuple 17] box.space.memtx:select{18} - - [18, tuple 18] box.space.memtx:select{19} - - [19, tuple 19] box.space.vinyl:select{15} - - [15, tuple 15] box.space.vinyl:select{16} - - [16, tuple 16] box.space.vinyl:select{17} - - [17, tuple 17] box.space.vinyl:select{18} - - [18, tuple 18] box.space.vinyl:select{19} - - [19, tuple 19] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 1 iteration box.space.memtx:insert{20, "tuple 20"} - - [20, tuple 20] box.space.memtx:insert{21, "tuple 21"} - - [21, tuple 21] box.space.memtx:insert{22, "tuple 22"} - - [22, tuple 22] box.space.memtx:insert{23, "tuple 23"} - - [23, tuple 23] box.space.memtx:insert{24, "tuple 24"} - - [24, tuple 24] box.space.vinyl:insert{20, "tuple 20"} - - [20, tuple 20] box.space.vinyl:insert{21, "tuple 21"} - - [21, tuple 21] box.space.vinyl:insert{22, "tuple 22"} - - [22, tuple 22] box.space.vinyl:insert{23, "tuple 23"} - - [23, tuple 23] box.space.vinyl:insert{24, "tuple 24"} - - [24, tuple 24] box.space.memtx:select{20} - - [20, tuple 20] box.space.memtx:select{21} - - [21, tuple 21] box.space.memtx:select{22} - - [22, tuple 22] box.space.memtx:select{23} - - [23, tuple 23] box.space.memtx:select{24} - - [24, tuple 24] box.space.vinyl:select{20} - - [20, tuple 20] box.space.vinyl:select{21} - - [21, tuple 21] box.space.vinyl:select{22} - - [22, tuple 22] box.space.vinyl:select{23} - - [23, tuple 23] box.space.vinyl:select{24} - - [24, tuple 24] box.space.memtx:insert{25, "tuple 25"} - - [25, tuple 25] box.space.memtx:insert{26, "tuple 26"} - - [26, tuple 26] box.space.memtx:insert{27, "tuple 27"} - - [27, tuple 27] box.space.memtx:insert{28, "tuple 28"} - - [28, tuple 28] box.space.memtx:insert{29, "tuple 29"} - - [29, tuple 29] box.space.vinyl:insert{25, "tuple 25"} - - [25, tuple 25] box.space.vinyl:insert{26, "tuple 26"} - - [26, tuple 26] box.space.vinyl:insert{27, "tuple 27"} - - [27, tuple 27] box.space.vinyl:insert{28, "tuple 28"} - - [28, tuple 28] box.space.vinyl:insert{29, "tuple 29"} - - [29, tuple 29] box.space.memtx:select{25} - - [25, tuple 25] box.space.memtx:select{26} - - [26, tuple 26] box.space.memtx:select{27} - - [27, tuple 27] box.space.memtx:select{28} - - [28, tuple 28] box.space.memtx:select{29} - - [29, tuple 29] box.space.vinyl:select{25} - - [25, tuple 25] box.space.vinyl:select{26} - - [26, tuple 26] box.space.vinyl:select{27} - - [27, tuple 27] box.space.vinyl:select{28} - - [28, tuple 28] box.space.vinyl:select{29} - - [29, tuple 29] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{30, "tuple 30"} - - [30, tuple 30] box.space.memtx:insert{31, "tuple 31"} - - [31, tuple 31] box.space.memtx:insert{32, "tuple 32"} - - [32, tuple 32] box.space.memtx:insert{33, "tuple 33"} - - [33, tuple 33] box.space.memtx:insert{34, "tuple 34"} - - [34, tuple 34] box.space.vinyl:insert{30, "tuple 30"} - - [30, tuple 30] box.space.vinyl:insert{31, "tuple 31"} - - [31, tuple 31] box.space.vinyl:insert{32, "tuple 32"} - - [32, tuple 32] box.space.vinyl:insert{33, "tuple 33"} - - [33, tuple 33] box.space.vinyl:insert{34, "tuple 34"} - - [34, tuple 34] box.space.memtx:select{30} - - [30, tuple 30] box.space.memtx:select{31} - - [31, tuple 31] box.space.memtx:select{32} - - [32, tuple 32] box.space.memtx:select{33} - - [33, tuple 33] box.space.memtx:select{34} - - [34, tuple 34] box.space.vinyl:select{30} - - [30, tuple 30] box.space.vinyl:select{31} - - [31, tuple 31] box.space.vinyl:select{32} - - [32, tuple 32] box.space.vinyl:select{33} - - [33, tuple 33] box.space.vinyl:select{34} - - [34, tuple 34] box.space.memtx:insert{35, "tuple 35"} - - [35, tuple 35] box.space.memtx:insert{36, "tuple 36"} - - [36, tuple 36] box.space.memtx:insert{37, "tuple 37"} - - [37, tuple 37] box.space.memtx:insert{38, "tuple 38"} - - [38, tuple 38] box.space.memtx:insert{39, "tuple 39"} - - [39, tuple 39] box.space.vinyl:insert{35, "tuple 35"} - - [35, tuple 35] box.space.vinyl:insert{36, "tuple 36"} - - [36, tuple 36] box.space.vinyl:insert{37, "tuple 37"} - - [37, tuple 37] box.space.vinyl:insert{38, "tuple 38"} - - [38, tuple 38] box.space.vinyl:insert{39, "tuple 39"} - - [39, tuple 39] box.space.memtx:select{35} - - [35, tuple 35] box.space.memtx:select{36} - - [36, tuple 36] box.space.memtx:select{37} - - [37, tuple 37] box.space.memtx:select{38} - - [38, tuple 38] box.space.memtx:select{39} - - [39, tuple 39] box.space.vinyl:select{35} - - [35, tuple 35] box.space.vinyl:select{36} - - [36, tuple 36] box.space.vinyl:select{37} - - [37, tuple 37] box.space.vinyl:select{38} - - [38, tuple 38] box.space.vinyl:select{39} - - [39, tuple 39] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 2 iteration box.space.memtx:insert{40, "tuple 40"} - - [40, tuple 40] box.space.memtx:insert{41, "tuple 41"} - - [41, tuple 41] box.space.memtx:insert{42, "tuple 42"} - - [42, tuple 42] box.space.memtx:insert{43, "tuple 43"} - - [43, tuple 43] box.space.memtx:insert{44, "tuple 44"} - - [44, tuple 44] box.space.vinyl:insert{40, "tuple 40"} - - [40, tuple 40] box.space.vinyl:insert{41, "tuple 41"} - - [41, tuple 41] box.space.vinyl:insert{42, "tuple 42"} - - [42, tuple 42] box.space.vinyl:insert{43, "tuple 43"} - - [43, tuple 43] box.space.vinyl:insert{44, "tuple 44"} - - [44, tuple 44] box.space.memtx:select{40} - - [40, tuple 40] box.space.memtx:select{41} - - [41, tuple 41] box.space.memtx:select{42} - - [42, tuple 42] box.space.memtx:select{43} - - [43, tuple 43] box.space.memtx:select{44} - - [44, tuple 44] box.space.vinyl:select{40} - - [40, tuple 40] box.space.vinyl:select{41} - - [41, tuple 41] box.space.vinyl:select{42} - - [42, tuple 42] box.space.vinyl:select{43} - - [43, tuple 43] box.space.vinyl:select{44} - - [44, tuple 44] box.space.memtx:insert{45, "tuple 45"} - - [45, tuple 45] box.space.memtx:insert{46, "tuple 46"} - - [46, tuple 46] box.space.memtx:insert{47, "tuple 47"} - - [47, tuple 47] box.space.memtx:insert{48, "tuple 48"} - - [48, tuple 48] box.space.memtx:insert{49, "tuple 49"} - - [49, tuple 49] box.space.vinyl:insert{45, "tuple 45"} - - [45, tuple 45] box.space.vinyl:insert{46, "tuple 46"} - - [46, tuple 46] box.space.vinyl:insert{47, "tuple 47"} - - [47, tuple 47] box.space.vinyl:insert{48, "tuple 48"} - - [48, tuple 48] box.space.vinyl:insert{49, "tuple 49"} - - [49, tuple 49] box.space.memtx:select{45} - - [45, tuple 45] box.space.memtx:select{46} - - [46, tuple 46] box.space.memtx:select{47} - - [47, tuple 47] box.space.memtx:select{48} - - [48, tuple 48] box.space.memtx:select{49} - - [49, tuple 49] box.space.vinyl:select{45} - - [45, tuple 45] box.space.vinyl:select{46} - - [46, tuple 46] box.space.vinyl:select{47} - - [47, tuple 47] box.space.vinyl:select{48} - - [48, tuple 48] box.space.vinyl:select{49} - - [49, tuple 49] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{50, "tuple 50"} - - [50, tuple 50] box.space.memtx:insert{51, "tuple 51"} - - [51, tuple 51] box.space.memtx:insert{52, "tuple 52"} - - [52, tuple 52] box.space.memtx:insert{53, "tuple 53"} - - [53, tuple 53] box.space.memtx:insert{54, "tuple 54"} - - [54, tuple 54] box.space.vinyl:insert{50, "tuple 50"} - - [50, tuple 50] box.space.vinyl:insert{51, "tuple 51"} - - [51, tuple 51] box.space.vinyl:insert{52, "tuple 52"} - - [52, tuple 52] box.space.vinyl:insert{53, "tuple 53"} - - [53, tuple 53] box.space.vinyl:insert{54, "tuple 54"} - - [54, tuple 54] box.space.memtx:select{50} - - [50, tuple 50] box.space.memtx:select{51} - - [51, tuple 51] box.space.memtx:select{52} - - [52, tuple 52] box.space.memtx:select{53} - - [53, tuple 53] box.space.memtx:select{54} - - [54, tuple 54] box.space.vinyl:select{50} - - [50, tuple 50] box.space.vinyl:select{51} - - [51, tuple 51] box.space.vinyl:select{52} - - [52, tuple 52] box.space.vinyl:select{53} - - [53, tuple 53] box.space.vinyl:select{54} - - [54, tuple 54] box.space.memtx:insert{55, "tuple 55"} - - [55, tuple 55] box.space.memtx:insert{56, "tuple 56"} - - [56, tuple 56] box.space.memtx:insert{57, "tuple 57"} - - [57, tuple 57] box.space.memtx:insert{58, "tuple 58"} - - [58, tuple 58] box.space.memtx:insert{59, "tuple 59"} - - [59, tuple 59] box.space.vinyl:insert{55, "tuple 55"} - - [55, tuple 55] box.space.vinyl:insert{56, "tuple 56"} - - [56, tuple 56] box.space.vinyl:insert{57, "tuple 57"} - - [57, tuple 57] box.space.vinyl:insert{58, "tuple 58"} - - [58, tuple 58] box.space.vinyl:insert{59, "tuple 59"} - - [59, tuple 59] box.space.memtx:select{55} - - [55, tuple 55] box.space.memtx:select{56} - - [56, tuple 56] box.space.memtx:select{57} - - [57, tuple 57] box.space.memtx:select{58} - - [58, tuple 58] box.space.memtx:select{59} - - [59, tuple 59] box.space.vinyl:select{55} - - [55, tuple 55] box.space.vinyl:select{56} - - [56, tuple 56] box.space.vinyl:select{57} - - [57, tuple 57] box.space.vinyl:select{58} - - [58, tuple 58] box.space.vinyl:select{59} - - [59, tuple 59] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 3 iteration box.space.memtx:insert{60, "tuple 60"} - - [60, tuple 60] box.space.memtx:insert{61, "tuple 61"} - - [61, tuple 61] box.space.memtx:insert{62, "tuple 62"} - - [62, tuple 62] box.space.memtx:insert{63, "tuple 63"} - - [63, tuple 63] box.space.memtx:insert{64, "tuple 64"} - - [64, tuple 64] box.space.vinyl:insert{60, "tuple 60"} - - [60, tuple 60] box.space.vinyl:insert{61, "tuple 61"} - - [61, tuple 61] box.space.vinyl:insert{62, "tuple 62"} - - [62, tuple 62] box.space.vinyl:insert{63, "tuple 63"} - - [63, tuple 63] box.space.vinyl:insert{64, "tuple 64"} - - [64, tuple 64] box.space.memtx:select{60} - - [60, tuple 60] box.space.memtx:select{61} - - [61, tuple 61] box.space.memtx:select{62} - - [62, tuple 62] box.space.memtx:select{63} - - [63, tuple 63] box.space.memtx:select{64} - - [64, tuple 64] box.space.vinyl:select{60} - - [60, tuple 60] box.space.vinyl:select{61} - - [61, tuple 61] box.space.vinyl:select{62} - - [62, tuple 62] box.space.vinyl:select{63} - - [63, tuple 63] box.space.vinyl:select{64} - - [64, tuple 64] box.space.memtx:insert{65, "tuple 65"} - - [65, tuple 65] box.space.memtx:insert{66, "tuple 66"} - - [66, tuple 66] box.space.memtx:insert{67, "tuple 67"} - - [67, tuple 67] box.space.memtx:insert{68, "tuple 68"} - - [68, tuple 68] box.space.memtx:insert{69, "tuple 69"} - - [69, tuple 69] box.space.vinyl:insert{65, "tuple 65"} - - [65, tuple 65] box.space.vinyl:insert{66, "tuple 66"} - - [66, tuple 66] box.space.vinyl:insert{67, "tuple 67"} - - [67, tuple 67] box.space.vinyl:insert{68, "tuple 68"} - - [68, tuple 68] box.space.vinyl:insert{69, "tuple 69"} - - [69, tuple 69] box.space.memtx:select{65} - - [65, tuple 65] box.space.memtx:select{66} - - [66, tuple 66] box.space.memtx:select{67} - - [67, tuple 67] box.space.memtx:select{68} - - [68, tuple 68] box.space.memtx:select{69} - - [69, tuple 69] box.space.vinyl:select{65} - - [65, tuple 65] box.space.vinyl:select{66} - - [66, tuple 66] box.space.vinyl:select{67} - - [67, tuple 67] box.space.vinyl:select{68} - - [68, tuple 68] box.space.vinyl:select{69} - - [69, tuple 69] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{70, "tuple 70"} - - [70, tuple 70] box.space.memtx:insert{71, "tuple 71"} - - [71, tuple 71] box.space.memtx:insert{72, "tuple 72"} - - [72, tuple 72] box.space.memtx:insert{73, "tuple 73"} - - [73, tuple 73] box.space.memtx:insert{74, "tuple 74"} - - [74, tuple 74] box.space.vinyl:insert{70, "tuple 70"} - - [70, tuple 70] box.space.vinyl:insert{71, "tuple 71"} - - [71, tuple 71] box.space.vinyl:insert{72, "tuple 72"} - - [72, tuple 72] box.space.vinyl:insert{73, "tuple 73"} - - [73, tuple 73] box.space.vinyl:insert{74, "tuple 74"} - - [74, tuple 74] box.space.memtx:select{70} - - [70, tuple 70] box.space.memtx:select{71} - - [71, tuple 71] box.space.memtx:select{72} - - [72, tuple 72] box.space.memtx:select{73} - - [73, tuple 73] box.space.memtx:select{74} - - [74, tuple 74] box.space.vinyl:select{70} - - [70, tuple 70] box.space.vinyl:select{71} - - [71, tuple 71] box.space.vinyl:select{72} - - [72, tuple 72] box.space.vinyl:select{73} - - [73, tuple 73] box.space.vinyl:select{74} - - [74, tuple 74] box.space.memtx:insert{75, "tuple 75"} - - [75, tuple 75] box.space.memtx:insert{76, "tuple 76"} - - [76, tuple 76] box.space.memtx:insert{77, "tuple 77"} - - [77, tuple 77] box.space.memtx:insert{78, "tuple 78"} - - [78, tuple 78] box.space.memtx:insert{79, "tuple 79"} - - [79, tuple 79] box.space.vinyl:insert{75, "tuple 75"} - - [75, tuple 75] box.space.vinyl:insert{76, "tuple 76"} - - [76, tuple 76] box.space.vinyl:insert{77, "tuple 77"} - - [77, tuple 77] box.space.vinyl:insert{78, "tuple 78"} - - [78, tuple 78] box.space.vinyl:insert{79, "tuple 79"} - - [79, tuple 79] box.space.memtx:select{75} - - [75, tuple 75] box.space.memtx:select{76} - - [76, tuple 76] box.space.memtx:select{77} - - [77, tuple 77] box.space.memtx:select{78} - - [78, tuple 78] box.space.memtx:select{79} - - [79, tuple 79] box.space.vinyl:select{75} - - [75, tuple 75] box.space.vinyl:select{76} - - [76, tuple 76] box.space.vinyl:select{77} - - [77, tuple 77] box.space.vinyl:select{78} - - [78, tuple 78] box.space.vinyl:select{79} - - [79, tuple 79] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 4 iteration box.space.memtx:insert{80, "tuple 80"} - - [80, tuple 80] box.space.memtx:insert{81, "tuple 81"} - - [81, tuple 81] box.space.memtx:insert{82, "tuple 82"} - - [82, tuple 82] box.space.memtx:insert{83, "tuple 83"} - - [83, tuple 83] box.space.memtx:insert{84, "tuple 84"} - - [84, tuple 84] box.space.vinyl:insert{80, "tuple 80"} - - [80, tuple 80] box.space.vinyl:insert{81, "tuple 81"} - - [81, tuple 81] box.space.vinyl:insert{82, "tuple 82"} - - [82, tuple 82] box.space.vinyl:insert{83, "tuple 83"} - - [83, tuple 83] box.space.vinyl:insert{84, "tuple 84"} - - [84, tuple 84] box.space.memtx:select{80} - - [80, tuple 80] box.space.memtx:select{81} - - [81, tuple 81] box.space.memtx:select{82} - - [82, tuple 82] box.space.memtx:select{83} - - [83, tuple 83] box.space.memtx:select{84} - - [84, tuple 84] box.space.vinyl:select{80} - - [80, tuple 80] box.space.vinyl:select{81} - - [81, tuple 81] box.space.vinyl:select{82} - - [82, tuple 82] box.space.vinyl:select{83} - - [83, tuple 83] box.space.vinyl:select{84} - - [84, tuple 84] box.space.memtx:insert{85, "tuple 85"} - - [85, tuple 85] box.space.memtx:insert{86, "tuple 86"} - - [86, tuple 86] box.space.memtx:insert{87, "tuple 87"} - - [87, tuple 87] box.space.memtx:insert{88, "tuple 88"} - - [88, tuple 88] box.space.memtx:insert{89, "tuple 89"} - - [89, tuple 89] box.space.vinyl:insert{85, "tuple 85"} - - [85, tuple 85] box.space.vinyl:insert{86, "tuple 86"} - - [86, tuple 86] box.space.vinyl:insert{87, "tuple 87"} - - [87, tuple 87] box.space.vinyl:insert{88, "tuple 88"} - - [88, tuple 88] box.space.vinyl:insert{89, "tuple 89"} - - [89, tuple 89] box.space.memtx:select{85} - - [85, tuple 85] box.space.memtx:select{86} - - [86, tuple 86] box.space.memtx:select{87} - - [87, tuple 87] box.space.memtx:select{88} - - [88, tuple 88] box.space.memtx:select{89} - - [89, tuple 89] box.space.vinyl:select{85} - - [85, tuple 85] box.space.vinyl:select{86} - - [86, tuple 86] box.space.vinyl:select{87} - - [87, tuple 87] box.space.vinyl:select{88} - - [88, tuple 88] box.space.vinyl:select{89} - - [89, tuple 89] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{90, "tuple 90"} - - [90, tuple 90] box.space.memtx:insert{91, "tuple 91"} - - [91, tuple 91] box.space.memtx:insert{92, "tuple 92"} - - [92, tuple 92] box.space.memtx:insert{93, "tuple 93"} - - [93, tuple 93] box.space.memtx:insert{94, "tuple 94"} - - [94, tuple 94] box.space.vinyl:insert{90, "tuple 90"} - - [90, tuple 90] box.space.vinyl:insert{91, "tuple 91"} - - [91, tuple 91] box.space.vinyl:insert{92, "tuple 92"} - - [92, tuple 92] box.space.vinyl:insert{93, "tuple 93"} - - [93, tuple 93] box.space.vinyl:insert{94, "tuple 94"} - - [94, tuple 94] box.space.memtx:select{90} - - [90, tuple 90] box.space.memtx:select{91} - - [91, tuple 91] box.space.memtx:select{92} - - [92, tuple 92] box.space.memtx:select{93} - - [93, tuple 93] box.space.memtx:select{94} - - [94, tuple 94] box.space.vinyl:select{90} - - [90, tuple 90] box.space.vinyl:select{91} - - [91, tuple 91] box.space.vinyl:select{92} - - [92, tuple 92] box.space.vinyl:select{93} - - [93, tuple 93] box.space.vinyl:select{94} - - [94, tuple 94] box.space.memtx:insert{95, "tuple 95"} - - [95, tuple 95] box.space.memtx:insert{96, "tuple 96"} - - [96, tuple 96] box.space.memtx:insert{97, "tuple 97"} - - [97, tuple 97] box.space.memtx:insert{98, "tuple 98"} - - [98, tuple 98] box.space.memtx:insert{99, "tuple 99"} - - [99, tuple 99] box.space.vinyl:insert{95, "tuple 95"} - - [95, tuple 95] box.space.vinyl:insert{96, "tuple 96"} - - [96, tuple 96] box.space.vinyl:insert{97, "tuple 97"} - - [97, tuple 97] box.space.vinyl:insert{98, "tuple 98"} - - [98, tuple 98] box.space.vinyl:insert{99, "tuple 99"} - - [99, tuple 99] box.space.memtx:select{95} - - [95, tuple 95] box.space.memtx:select{96} - - [96, tuple 96] box.space.memtx:select{97} - - [97, tuple 97] box.space.memtx:select{98} - - [98, tuple 98] box.space.memtx:select{99} - - [99, tuple 99] box.space.vinyl:select{95} - - [95, tuple 95] box.space.vinyl:select{96} - - [96, tuple 96] box.space.vinyl:select{97} - - [97, tuple 97] box.space.vinyl:select{98} - - [98, tuple 98] box.space.vinyl:select{99} - - [99, tuple 99] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 5 iteration box.space.memtx:insert{100, "tuple 100"} - - [100, tuple 100] box.space.memtx:insert{101, "tuple 101"} - - [101, tuple 101] box.space.memtx:insert{102, "tuple 102"} - - [102, tuple 102] box.space.memtx:insert{103, "tuple 103"} - - [103, tuple 103] box.space.memtx:insert{104, "tuple 104"} - - [104, tuple 104] box.space.vinyl:insert{100, "tuple 100"} - - [100, tuple 100] box.space.vinyl:insert{101, "tuple 101"} - - [101, tuple 101] box.space.vinyl:insert{102, "tuple 102"} - - [102, tuple 102] box.space.vinyl:insert{103, "tuple 103"} - - [103, tuple 103] box.space.vinyl:insert{104, "tuple 104"} - - [104, tuple 104] box.space.memtx:select{100} - - [100, tuple 100] box.space.memtx:select{101} - - [101, tuple 101] box.space.memtx:select{102} - - [102, tuple 102] box.space.memtx:select{103} - - [103, tuple 103] box.space.memtx:select{104} - - [104, tuple 104] box.space.vinyl:select{100} - - [100, tuple 100] box.space.vinyl:select{101} - - [101, tuple 101] box.space.vinyl:select{102} - - [102, tuple 102] box.space.vinyl:select{103} - - [103, tuple 103] box.space.vinyl:select{104} - - [104, tuple 104] box.space.memtx:insert{105, "tuple 105"} - - [105, tuple 105] box.space.memtx:insert{106, "tuple 106"} - - [106, tuple 106] box.space.memtx:insert{107, "tuple 107"} - - [107, tuple 107] box.space.memtx:insert{108, "tuple 108"} - - [108, tuple 108] box.space.memtx:insert{109, "tuple 109"} - - [109, tuple 109] box.space.vinyl:insert{105, "tuple 105"} - - [105, tuple 105] box.space.vinyl:insert{106, "tuple 106"} - - [106, tuple 106] box.space.vinyl:insert{107, "tuple 107"} - - [107, tuple 107] box.space.vinyl:insert{108, "tuple 108"} - - [108, tuple 108] box.space.vinyl:insert{109, "tuple 109"} - - [109, tuple 109] box.space.memtx:select{105} - - [105, tuple 105] box.space.memtx:select{106} - - [106, tuple 106] box.space.memtx:select{107} - - [107, tuple 107] box.space.memtx:select{108} - - [108, tuple 108] box.space.memtx:select{109} - - [109, tuple 109] box.space.vinyl:select{105} - - [105, tuple 105] box.space.vinyl:select{106} - - [106, tuple 106] box.space.vinyl:select{107} - - [107, tuple 107] box.space.vinyl:select{108} - - [108, tuple 108] box.space.vinyl:select{109} - - [109, tuple 109] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{110, "tuple 110"} - - [110, tuple 110] box.space.memtx:insert{111, "tuple 111"} - - [111, tuple 111] box.space.memtx:insert{112, "tuple 112"} - - [112, tuple 112] box.space.memtx:insert{113, "tuple 113"} - - [113, tuple 113] box.space.memtx:insert{114, "tuple 114"} - - [114, tuple 114] box.space.vinyl:insert{110, "tuple 110"} - - [110, tuple 110] box.space.vinyl:insert{111, "tuple 111"} - - [111, tuple 111] box.space.vinyl:insert{112, "tuple 112"} - - [112, tuple 112] box.space.vinyl:insert{113, "tuple 113"} - - [113, tuple 113] box.space.vinyl:insert{114, "tuple 114"} - - [114, tuple 114] box.space.memtx:select{110} - - [110, tuple 110] box.space.memtx:select{111} - - [111, tuple 111] box.space.memtx:select{112} - - [112, tuple 112] box.space.memtx:select{113} - - [113, tuple 113] box.space.memtx:select{114} - - [114, tuple 114] box.space.vinyl:select{110} - - [110, tuple 110] box.space.vinyl:select{111} - - [111, tuple 111] box.space.vinyl:select{112} - - [112, tuple 112] box.space.vinyl:select{113} - - [113, tuple 113] box.space.vinyl:select{114} - - [114, tuple 114] box.space.memtx:insert{115, "tuple 115"} - - [115, tuple 115] box.space.memtx:insert{116, "tuple 116"} - - [116, tuple 116] box.space.memtx:insert{117, "tuple 117"} - - [117, tuple 117] box.space.memtx:insert{118, "tuple 118"} - - [118, tuple 118] box.space.memtx:insert{119, "tuple 119"} - - [119, tuple 119] box.space.vinyl:insert{115, "tuple 115"} - - [115, tuple 115] box.space.vinyl:insert{116, "tuple 116"} - - [116, tuple 116] box.space.vinyl:insert{117, "tuple 117"} - - [117, tuple 117] box.space.vinyl:insert{118, "tuple 118"} - - [118, tuple 118] box.space.vinyl:insert{119, "tuple 119"} - - [119, tuple 119] box.space.memtx:select{115} - - [115, tuple 115] box.space.memtx:select{116} - - [116, tuple 116] box.space.memtx:select{117} - - [117, tuple 117] box.space.memtx:select{118} - - [118, tuple 118] box.space.memtx:select{119} - - [119, tuple 119] box.space.vinyl:select{115} - - [115, tuple 115] box.space.vinyl:select{116} - - [116, tuple 116] box.space.vinyl:select{117} - - [117, tuple 117] box.space.vinyl:select{118} - - [118, tuple 118] box.space.vinyl:select{119} - - [119, tuple 119] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 6 iteration box.space.memtx:insert{120, "tuple 120"} - - [120, tuple 120] box.space.memtx:insert{121, "tuple 121"} - - [121, tuple 121] box.space.memtx:insert{122, "tuple 122"} - - [122, tuple 122] box.space.memtx:insert{123, "tuple 123"} - - [123, tuple 123] box.space.memtx:insert{124, "tuple 124"} - - [124, tuple 124] box.space.vinyl:insert{120, "tuple 120"} - - [120, tuple 120] box.space.vinyl:insert{121, "tuple 121"} - - [121, tuple 121] box.space.vinyl:insert{122, "tuple 122"} - - [122, tuple 122] box.space.vinyl:insert{123, "tuple 123"} - - [123, tuple 123] box.space.vinyl:insert{124, "tuple 124"} - - [124, tuple 124] box.space.memtx:select{120} - - [120, tuple 120] box.space.memtx:select{121} - - [121, tuple 121] box.space.memtx:select{122} - - [122, tuple 122] box.space.memtx:select{123} - - [123, tuple 123] box.space.memtx:select{124} - - [124, tuple 124] box.space.vinyl:select{120} - - [120, tuple 120] box.space.vinyl:select{121} - - [121, tuple 121] box.space.vinyl:select{122} - - [122, tuple 122] box.space.vinyl:select{123} - - [123, tuple 123] box.space.vinyl:select{124} - - [124, tuple 124] box.space.memtx:insert{125, "tuple 125"} - - [125, tuple 125] box.space.memtx:insert{126, "tuple 126"} - - [126, tuple 126] box.space.memtx:insert{127, "tuple 127"} - - [127, tuple 127] box.space.memtx:insert{128, "tuple 128"} - - [128, tuple 128] box.space.memtx:insert{129, "tuple 129"} - - [129, tuple 129] box.space.vinyl:insert{125, "tuple 125"} - - [125, tuple 125] box.space.vinyl:insert{126, "tuple 126"} - - [126, tuple 126] box.space.vinyl:insert{127, "tuple 127"} - - [127, tuple 127] box.space.vinyl:insert{128, "tuple 128"} - - [128, tuple 128] box.space.vinyl:insert{129, "tuple 129"} - - [129, tuple 129] box.space.memtx:select{125} - - [125, tuple 125] box.space.memtx:select{126} - - [126, tuple 126] box.space.memtx:select{127} - - [127, tuple 127] box.space.memtx:select{128} - - [128, tuple 128] box.space.memtx:select{129} - - [129, tuple 129] box.space.vinyl:select{125} - - [125, tuple 125] box.space.vinyl:select{126} - - [126, tuple 126] box.space.vinyl:select{127} - - [127, tuple 127] box.space.vinyl:select{128} - - [128, tuple 128] box.space.vinyl:select{129} - - [129, tuple 129] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{130, "tuple 130"} - - [130, tuple 130] box.space.memtx:insert{131, "tuple 131"} - - [131, tuple 131] box.space.memtx:insert{132, "tuple 132"} - - [132, tuple 132] box.space.memtx:insert{133, "tuple 133"} - - [133, tuple 133] box.space.memtx:insert{134, "tuple 134"} - - [134, tuple 134] box.space.vinyl:insert{130, "tuple 130"} - - [130, tuple 130] box.space.vinyl:insert{131, "tuple 131"} - - [131, tuple 131] box.space.vinyl:insert{132, "tuple 132"} - - [132, tuple 132] box.space.vinyl:insert{133, "tuple 133"} - - [133, tuple 133] box.space.vinyl:insert{134, "tuple 134"} - - [134, tuple 134] box.space.memtx:select{130} - - [130, tuple 130] box.space.memtx:select{131} - - [131, tuple 131] box.space.memtx:select{132} - - [132, tuple 132] box.space.memtx:select{133} - - [133, tuple 133] box.space.memtx:select{134} - - [134, tuple 134] box.space.vinyl:select{130} - - [130, tuple 130] box.space.vinyl:select{131} - - [131, tuple 131] box.space.vinyl:select{132} - - [132, tuple 132] box.space.vinyl:select{133} - - [133, tuple 133] box.space.vinyl:select{134} - - [134, tuple 134] box.space.memtx:insert{135, "tuple 135"} - - [135, tuple 135] box.space.memtx:insert{136, "tuple 136"} - - [136, tuple 136] box.space.memtx:insert{137, "tuple 137"} - - [137, tuple 137] box.space.memtx:insert{138, "tuple 138"} - - [138, tuple 138] box.space.memtx:insert{139, "tuple 139"} - - [139, tuple 139] box.space.vinyl:insert{135, "tuple 135"} - - [135, tuple 135] box.space.vinyl:insert{136, "tuple 136"} - - [136, tuple 136] box.space.vinyl:insert{137, "tuple 137"} - - [137, tuple 137] box.space.vinyl:insert{138, "tuple 138"} - - [138, tuple 138] box.space.vinyl:insert{139, "tuple 139"} - - [139, tuple 139] box.space.memtx:select{135} - - [135, tuple 135] box.space.memtx:select{136} - - [136, tuple 136] box.space.memtx:select{137} - - [137, tuple 137] box.space.memtx:select{138} - - [138, tuple 138] box.space.memtx:select{139} - - [139, tuple 139] box.space.vinyl:select{135} - - [135, tuple 135] box.space.vinyl:select{136} - - [136, tuple 136] box.space.vinyl:select{137} - - [137, tuple 137] box.space.vinyl:select{138} - - [138, tuple 138] box.space.vinyl:select{139} - - [139, tuple 139] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 7 iteration box.space.memtx:insert{140, "tuple 140"} - - [140, tuple 140] box.space.memtx:insert{141, "tuple 141"} - - [141, tuple 141] box.space.memtx:insert{142, "tuple 142"} - - [142, tuple 142] box.space.memtx:insert{143, "tuple 143"} - - [143, tuple 143] box.space.memtx:insert{144, "tuple 144"} - - [144, tuple 144] box.space.vinyl:insert{140, "tuple 140"} - - [140, tuple 140] box.space.vinyl:insert{141, "tuple 141"} - - [141, tuple 141] box.space.vinyl:insert{142, "tuple 142"} - - [142, tuple 142] box.space.vinyl:insert{143, "tuple 143"} - - [143, tuple 143] box.space.vinyl:insert{144, "tuple 144"} - - [144, tuple 144] box.space.memtx:select{140} - - [140, tuple 140] box.space.memtx:select{141} - - [141, tuple 141] box.space.memtx:select{142} - - [142, tuple 142] box.space.memtx:select{143} - - [143, tuple 143] box.space.memtx:select{144} - - [144, tuple 144] box.space.vinyl:select{140} - - [140, tuple 140] box.space.vinyl:select{141} - - [141, tuple 141] box.space.vinyl:select{142} - - [142, tuple 142] box.space.vinyl:select{143} - - [143, tuple 143] box.space.vinyl:select{144} - - [144, tuple 144] box.space.memtx:insert{145, "tuple 145"} - - [145, tuple 145] box.space.memtx:insert{146, "tuple 146"} - - [146, tuple 146] box.space.memtx:insert{147, "tuple 147"} - - [147, tuple 147] box.space.memtx:insert{148, "tuple 148"} - - [148, tuple 148] box.space.memtx:insert{149, "tuple 149"} - - [149, tuple 149] box.space.vinyl:insert{145, "tuple 145"} - - [145, tuple 145] box.space.vinyl:insert{146, "tuple 146"} - - [146, tuple 146] box.space.vinyl:insert{147, "tuple 147"} - - [147, tuple 147] box.space.vinyl:insert{148, "tuple 148"} - - [148, tuple 148] box.space.vinyl:insert{149, "tuple 149"} - - [149, tuple 149] box.space.memtx:select{145} - - [145, tuple 145] box.space.memtx:select{146} - - [146, tuple 146] box.space.memtx:select{147} - - [147, tuple 147] box.space.memtx:select{148} - - [148, tuple 148] box.space.memtx:select{149} - - [149, tuple 149] box.space.vinyl:select{145} - - [145, tuple 145] box.space.vinyl:select{146} - - [146, tuple 146] box.space.vinyl:select{147} - - [147, tuple 147] box.space.vinyl:select{148} - - [148, tuple 148] box.space.vinyl:select{149} - - [149, tuple 149] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{150, "tuple 150"} - - [150, tuple 150] box.space.memtx:insert{151, "tuple 151"} - - [151, tuple 151] box.space.memtx:insert{152, "tuple 152"} - - [152, tuple 152] box.space.memtx:insert{153, "tuple 153"} - - [153, tuple 153] box.space.memtx:insert{154, "tuple 154"} - - [154, tuple 154] box.space.vinyl:insert{150, "tuple 150"} - - [150, tuple 150] box.space.vinyl:insert{151, "tuple 151"} - - [151, tuple 151] box.space.vinyl:insert{152, "tuple 152"} - - [152, tuple 152] box.space.vinyl:insert{153, "tuple 153"} - - [153, tuple 153] box.space.vinyl:insert{154, "tuple 154"} - - [154, tuple 154] box.space.memtx:select{150} - - [150, tuple 150] box.space.memtx:select{151} - - [151, tuple 151] box.space.memtx:select{152} - - [152, tuple 152] box.space.memtx:select{153} - - [153, tuple 153] box.space.memtx:select{154} - - [154, tuple 154] box.space.vinyl:select{150} - - [150, tuple 150] box.space.vinyl:select{151} - - [151, tuple 151] box.space.vinyl:select{152} - - [152, tuple 152] box.space.vinyl:select{153} - - [153, tuple 153] box.space.vinyl:select{154} - - [154, tuple 154] box.space.memtx:insert{155, "tuple 155"} - - [155, tuple 155] box.space.memtx:insert{156, "tuple 156"} - - [156, tuple 156] box.space.memtx:insert{157, "tuple 157"} - - [157, tuple 157] box.space.memtx:insert{158, "tuple 158"} - - [158, tuple 158] box.space.memtx:insert{159, "tuple 159"} - - [159, tuple 159] box.space.vinyl:insert{155, "tuple 155"} - - [155, tuple 155] box.space.vinyl:insert{156, "tuple 156"} - - [156, tuple 156] box.space.vinyl:insert{157, "tuple 157"} - - [157, tuple 157] box.space.vinyl:insert{158, "tuple 158"} - - [158, tuple 158] box.space.vinyl:insert{159, "tuple 159"} - - [159, tuple 159] box.space.memtx:select{155} - - [155, tuple 155] box.space.memtx:select{156} - - [156, tuple 156] box.space.memtx:select{157} - - [157, tuple 157] box.space.memtx:select{158} - - [158, tuple 158] box.space.memtx:select{159} - - [159, tuple 159] box.space.vinyl:select{155} - - [155, tuple 155] box.space.vinyl:select{156} - - [156, tuple 156] box.space.vinyl:select{157} - - [157, tuple 157] box.space.vinyl:select{158} - - [158, tuple 158] box.space.vinyl:select{159} - - [159, tuple 159] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 8 iteration box.space.memtx:insert{160, "tuple 160"} - - [160, tuple 160] box.space.memtx:insert{161, "tuple 161"} - - [161, tuple 161] box.space.memtx:insert{162, "tuple 162"} - - [162, tuple 162] box.space.memtx:insert{163, "tuple 163"} - - [163, tuple 163] box.space.memtx:insert{164, "tuple 164"} - - [164, tuple 164] box.space.vinyl:insert{160, "tuple 160"} - - [160, tuple 160] box.space.vinyl:insert{161, "tuple 161"} - - [161, tuple 161] box.space.vinyl:insert{162, "tuple 162"} - - [162, tuple 162] box.space.vinyl:insert{163, "tuple 163"} - - [163, tuple 163] box.space.vinyl:insert{164, "tuple 164"} - - [164, tuple 164] box.space.memtx:select{160} - - [160, tuple 160] box.space.memtx:select{161} - - [161, tuple 161] box.space.memtx:select{162} - - [162, tuple 162] box.space.memtx:select{163} - - [163, tuple 163] box.space.memtx:select{164} - - [164, tuple 164] box.space.vinyl:select{160} - - [160, tuple 160] box.space.vinyl:select{161} - - [161, tuple 161] box.space.vinyl:select{162} - - [162, tuple 162] box.space.vinyl:select{163} - - [163, tuple 163] box.space.vinyl:select{164} - - [164, tuple 164] box.space.memtx:insert{165, "tuple 165"} - - [165, tuple 165] box.space.memtx:insert{166, "tuple 166"} - - [166, tuple 166] box.space.memtx:insert{167, "tuple 167"} - - [167, tuple 167] box.space.memtx:insert{168, "tuple 168"} - - [168, tuple 168] box.space.memtx:insert{169, "tuple 169"} - - [169, tuple 169] box.space.vinyl:insert{165, "tuple 165"} - - [165, tuple 165] box.space.vinyl:insert{166, "tuple 166"} - - [166, tuple 166] box.space.vinyl:insert{167, "tuple 167"} - - [167, tuple 167] box.space.vinyl:insert{168, "tuple 168"} - - [168, tuple 168] box.space.vinyl:insert{169, "tuple 169"} - - [169, tuple 169] box.space.memtx:select{165} - - [165, tuple 165] box.space.memtx:select{166} - - [166, tuple 166] box.space.memtx:select{167} - - [167, tuple 167] box.space.memtx:select{168} - - [168, tuple 168] box.space.memtx:select{169} - - [169, tuple 169] box.space.vinyl:select{165} - - [165, tuple 165] box.space.vinyl:select{166} - - [166, tuple 166] box.space.vinyl:select{167} - - [167, tuple 167] box.space.vinyl:select{168} - - [168, tuple 168] box.space.vinyl:select{169} - - [169, tuple 169] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{170, "tuple 170"} - - [170, tuple 170] box.space.memtx:insert{171, "tuple 171"} - - [171, tuple 171] box.space.memtx:insert{172, "tuple 172"} - - [172, tuple 172] box.space.memtx:insert{173, "tuple 173"} - - [173, tuple 173] box.space.memtx:insert{174, "tuple 174"} - - [174, tuple 174] box.space.vinyl:insert{170, "tuple 170"} - - [170, tuple 170] box.space.vinyl:insert{171, "tuple 171"} - - [171, tuple 171] box.space.vinyl:insert{172, "tuple 172"} - - [172, tuple 172] box.space.vinyl:insert{173, "tuple 173"} - - [173, tuple 173] box.space.vinyl:insert{174, "tuple 174"} - - [174, tuple 174] box.space.memtx:select{170} - - [170, tuple 170] box.space.memtx:select{171} - - [171, tuple 171] box.space.memtx:select{172} - - [172, tuple 172] box.space.memtx:select{173} - - [173, tuple 173] box.space.memtx:select{174} - - [174, tuple 174] box.space.vinyl:select{170} - - [170, tuple 170] box.space.vinyl:select{171} - - [171, tuple 171] box.space.vinyl:select{172} - - [172, tuple 172] box.space.vinyl:select{173} - - [173, tuple 173] box.space.vinyl:select{174} - - [174, tuple 174] box.space.memtx:insert{175, "tuple 175"} - - [175, tuple 175] box.space.memtx:insert{176, "tuple 176"} - - [176, tuple 176] box.space.memtx:insert{177, "tuple 177"} - - [177, tuple 177] box.space.memtx:insert{178, "tuple 178"} - - [178, tuple 178] box.space.memtx:insert{179, "tuple 179"} - - [179, tuple 179] box.space.vinyl:insert{175, "tuple 175"} - - [175, tuple 175] box.space.vinyl:insert{176, "tuple 176"} - - [176, tuple 176] box.space.vinyl:insert{177, "tuple 177"} - - [177, tuple 177] box.space.vinyl:insert{178, "tuple 178"} - - [178, tuple 178] box.space.vinyl:insert{179, "tuple 179"} - - [179, tuple 179] box.space.memtx:select{175} - - [175, tuple 175] box.space.memtx:select{176} - - [176, tuple 176] box.space.memtx:select{177} - - [177, tuple 177] box.space.memtx:select{178} - - [178, tuple 178] box.space.memtx:select{179} - - [179, tuple 179] box.space.vinyl:select{175} - - [175, tuple 175] box.space.vinyl:select{176} - - [176, tuple 176] box.space.vinyl:select{177} - - [177, tuple 177] box.space.vinyl:select{178} - - [178, tuple 178] box.space.vinyl:select{179} - - [179, tuple 179] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 9 iteration box.space.memtx:insert{180, "tuple 180"} - - [180, tuple 180] box.space.memtx:insert{181, "tuple 181"} - - [181, tuple 181] box.space.memtx:insert{182, "tuple 182"} - - [182, tuple 182] box.space.memtx:insert{183, "tuple 183"} - - [183, tuple 183] box.space.memtx:insert{184, "tuple 184"} - - [184, tuple 184] box.space.vinyl:insert{180, "tuple 180"} - - [180, tuple 180] box.space.vinyl:insert{181, "tuple 181"} - - [181, tuple 181] box.space.vinyl:insert{182, "tuple 182"} - - [182, tuple 182] box.space.vinyl:insert{183, "tuple 183"} - - [183, tuple 183] box.space.vinyl:insert{184, "tuple 184"} - - [184, tuple 184] box.space.memtx:select{180} - - [180, tuple 180] box.space.memtx:select{181} - - [181, tuple 181] box.space.memtx:select{182} - - [182, tuple 182] box.space.memtx:select{183} - - [183, tuple 183] box.space.memtx:select{184} - - [184, tuple 184] box.space.vinyl:select{180} - - [180, tuple 180] box.space.vinyl:select{181} - - [181, tuple 181] box.space.vinyl:select{182} - - [182, tuple 182] box.space.vinyl:select{183} - - [183, tuple 183] box.space.vinyl:select{184} - - [184, tuple 184] box.space.memtx:insert{185, "tuple 185"} - - [185, tuple 185] box.space.memtx:insert{186, "tuple 186"} - - [186, tuple 186] box.space.memtx:insert{187, "tuple 187"} - - [187, tuple 187] box.space.memtx:insert{188, "tuple 188"} - - [188, tuple 188] box.space.memtx:insert{189, "tuple 189"} - - [189, tuple 189] box.space.vinyl:insert{185, "tuple 185"} - - [185, tuple 185] box.space.vinyl:insert{186, "tuple 186"} - - [186, tuple 186] box.space.vinyl:insert{187, "tuple 187"} - - [187, tuple 187] box.space.vinyl:insert{188, "tuple 188"} - - [188, tuple 188] box.space.vinyl:insert{189, "tuple 189"} - - [189, tuple 189] box.space.memtx:select{185} - - [185, tuple 185] box.space.memtx:select{186} - - [186, tuple 186] box.space.memtx:select{187} - - [187, tuple 187] box.space.memtx:select{188} - - [188, tuple 188] box.space.memtx:select{189} - - [189, tuple 189] box.space.vinyl:select{185} - - [185, tuple 185] box.space.vinyl:select{186} - - [186, tuple 186] box.space.vinyl:select{187} - - [187, tuple 187] box.space.vinyl:select{188} - - [188, tuple 188] box.space.vinyl:select{189} - - [189, tuple 189] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{190, "tuple 190"} - - [190, tuple 190] box.space.memtx:insert{191, "tuple 191"} - - [191, tuple 191] box.space.memtx:insert{192, "tuple 192"} - - [192, tuple 192] box.space.memtx:insert{193, "tuple 193"} - - [193, tuple 193] box.space.memtx:insert{194, "tuple 194"} - - [194, tuple 194] box.space.vinyl:insert{190, "tuple 190"} - - [190, tuple 190] box.space.vinyl:insert{191, "tuple 191"} - - [191, tuple 191] box.space.vinyl:insert{192, "tuple 192"} - - [192, tuple 192] box.space.vinyl:insert{193, "tuple 193"} - - [193, tuple 193] box.space.vinyl:insert{194, "tuple 194"} - - [194, tuple 194] box.space.memtx:select{190} - - [190, tuple 190] box.space.memtx:select{191} - - [191, tuple 191] box.space.memtx:select{192} - - [192, tuple 192] box.space.memtx:select{193} - - [193, tuple 193] box.space.memtx:select{194} - - [194, tuple 194] box.space.vinyl:select{190} - - [190, tuple 190] box.space.vinyl:select{191} - - [191, tuple 191] box.space.vinyl:select{192} - - [192, tuple 192] box.space.vinyl:select{193} - - [193, tuple 193] box.space.vinyl:select{194} - - [194, tuple 194] box.space.memtx:insert{195, "tuple 195"} - - [195, tuple 195] box.space.memtx:insert{196, "tuple 196"} - - [196, tuple 196] box.space.memtx:insert{197, "tuple 197"} - - [197, tuple 197] box.space.memtx:insert{198, "tuple 198"} - - [198, tuple 198] box.space.memtx:insert{199, "tuple 199"} - - [199, tuple 199] box.space.vinyl:insert{195, "tuple 195"} - - [195, tuple 195] box.space.vinyl:insert{196, "tuple 196"} - - [196, tuple 196] box.space.vinyl:insert{197, "tuple 197"} - - [197, tuple 197] box.space.vinyl:insert{198, "tuple 198"} - - [198, tuple 198] box.space.vinyl:insert{199, "tuple 199"} - - [199, tuple 199] box.space.memtx:select{195} - - [195, tuple 195] box.space.memtx:select{196} - - [196, tuple 196] box.space.memtx:select{197} - - [197, tuple 197] box.space.memtx:select{198} - - [198, tuple 198] box.space.memtx:select{199} - - [199, tuple 199] box.space.vinyl:select{195} - - [195, tuple 195] box.space.vinyl:select{196} - - [196, tuple 196] box.space.vinyl:select{197} - - [197, tuple 197] box.space.vinyl:select{198} - - [198, tuple 198] box.space.vinyl:select{199} - - [199, tuple 199] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 10 iteration box.space.memtx:insert{200, "tuple 200"} - - [200, tuple 200] box.space.memtx:insert{201, "tuple 201"} - - [201, tuple 201] box.space.memtx:insert{202, "tuple 202"} - - [202, tuple 202] box.space.memtx:insert{203, "tuple 203"} - - [203, tuple 203] box.space.memtx:insert{204, "tuple 204"} - - [204, tuple 204] box.space.vinyl:insert{200, "tuple 200"} - - [200, tuple 200] box.space.vinyl:insert{201, "tuple 201"} - - [201, tuple 201] box.space.vinyl:insert{202, "tuple 202"} - - [202, tuple 202] box.space.vinyl:insert{203, "tuple 203"} - - [203, tuple 203] box.space.vinyl:insert{204, "tuple 204"} - - [204, tuple 204] box.space.memtx:select{200} - - [200, tuple 200] box.space.memtx:select{201} - - [201, tuple 201] box.space.memtx:select{202} - - [202, tuple 202] box.space.memtx:select{203} - - [203, tuple 203] box.space.memtx:select{204} - - [204, tuple 204] box.space.vinyl:select{200} - - [200, tuple 200] box.space.vinyl:select{201} - - [201, tuple 201] box.space.vinyl:select{202} - - [202, tuple 202] box.space.vinyl:select{203} - - [203, tuple 203] box.space.vinyl:select{204} - - [204, tuple 204] box.space.memtx:insert{205, "tuple 205"} - - [205, tuple 205] box.space.memtx:insert{206, "tuple 206"} - - [206, tuple 206] box.space.memtx:insert{207, "tuple 207"} - - [207, tuple 207] box.space.memtx:insert{208, "tuple 208"} - - [208, tuple 208] box.space.memtx:insert{209, "tuple 209"} - - [209, tuple 209] box.space.vinyl:insert{205, "tuple 205"} - - [205, tuple 205] box.space.vinyl:insert{206, "tuple 206"} - - [206, tuple 206] box.space.vinyl:insert{207, "tuple 207"} - - [207, tuple 207] box.space.vinyl:insert{208, "tuple 208"} - - [208, tuple 208] box.space.vinyl:insert{209, "tuple 209"} - - [209, tuple 209] box.space.memtx:select{205} - - [205, tuple 205] box.space.memtx:select{206} - - [206, tuple 206] box.space.memtx:select{207} - - [207, tuple 207] box.space.memtx:select{208} - - [208, tuple 208] box.space.memtx:select{209} - - [209, tuple 209] box.space.vinyl:select{205} - - [205, tuple 205] box.space.vinyl:select{206} - - [206, tuple 206] box.space.vinyl:select{207} - - [207, tuple 207] box.space.vinyl:select{208} - - [208, tuple 208] box.space.vinyl:select{209} - - [209, tuple 209] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{210, "tuple 210"} - - [210, tuple 210] box.space.memtx:insert{211, "tuple 211"} - - [211, tuple 211] box.space.memtx:insert{212, "tuple 212"} - - [212, tuple 212] box.space.memtx:insert{213, "tuple 213"} - - [213, tuple 213] box.space.memtx:insert{214, "tuple 214"} - - [214, tuple 214] box.space.vinyl:insert{210, "tuple 210"} - - [210, tuple 210] box.space.vinyl:insert{211, "tuple 211"} - - [211, tuple 211] box.space.vinyl:insert{212, "tuple 212"} - - [212, tuple 212] box.space.vinyl:insert{213, "tuple 213"} - - [213, tuple 213] box.space.vinyl:insert{214, "tuple 214"} - - [214, tuple 214] box.space.memtx:select{210} - - [210, tuple 210] box.space.memtx:select{211} - - [211, tuple 211] box.space.memtx:select{212} - - [212, tuple 212] box.space.memtx:select{213} - - [213, tuple 213] box.space.memtx:select{214} - - [214, tuple 214] box.space.vinyl:select{210} - - [210, tuple 210] box.space.vinyl:select{211} - - [211, tuple 211] box.space.vinyl:select{212} - - [212, tuple 212] box.space.vinyl:select{213} - - [213, tuple 213] box.space.vinyl:select{214} - - [214, tuple 214] box.space.memtx:insert{215, "tuple 215"} - - [215, tuple 215] box.space.memtx:insert{216, "tuple 216"} - - [216, tuple 216] box.space.memtx:insert{217, "tuple 217"} - - [217, tuple 217] box.space.memtx:insert{218, "tuple 218"} - - [218, tuple 218] box.space.memtx:insert{219, "tuple 219"} - - [219, tuple 219] box.space.vinyl:insert{215, "tuple 215"} - - [215, tuple 215] box.space.vinyl:insert{216, "tuple 216"} - - [216, tuple 216] box.space.vinyl:insert{217, "tuple 217"} - - [217, tuple 217] box.space.vinyl:insert{218, "tuple 218"} - - [218, tuple 218] box.space.vinyl:insert{219, "tuple 219"} - - [219, tuple 219] box.space.memtx:select{215} - - [215, tuple 215] box.space.memtx:select{216} - - [216, tuple 216] box.space.memtx:select{217} - - [217, tuple 217] box.space.memtx:select{218} - - [218, tuple 218] box.space.memtx:select{219} - - [219, tuple 219] box.space.vinyl:select{215} - - [215, tuple 215] box.space.vinyl:select{216} - - [216, tuple 216] box.space.vinyl:select{217} - - [217, tuple 217] box.space.vinyl:select{218} - - [218, tuple 218] box.space.vinyl:select{219} - - [219, tuple 219] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 11 iteration box.space.memtx:insert{220, "tuple 220"} - - [220, tuple 220] box.space.memtx:insert{221, "tuple 221"} - - [221, tuple 221] box.space.memtx:insert{222, "tuple 222"} - - [222, tuple 222] box.space.memtx:insert{223, "tuple 223"} - - [223, tuple 223] box.space.memtx:insert{224, "tuple 224"} - - [224, tuple 224] box.space.vinyl:insert{220, "tuple 220"} - - [220, tuple 220] box.space.vinyl:insert{221, "tuple 221"} - - [221, tuple 221] box.space.vinyl:insert{222, "tuple 222"} - - [222, tuple 222] box.space.vinyl:insert{223, "tuple 223"} - - [223, tuple 223] box.space.vinyl:insert{224, "tuple 224"} - - [224, tuple 224] box.space.memtx:select{220} - - [220, tuple 220] box.space.memtx:select{221} - - [221, tuple 221] box.space.memtx:select{222} - - [222, tuple 222] box.space.memtx:select{223} - - [223, tuple 223] box.space.memtx:select{224} - - [224, tuple 224] box.space.vinyl:select{220} - - [220, tuple 220] box.space.vinyl:select{221} - - [221, tuple 221] box.space.vinyl:select{222} - - [222, tuple 222] box.space.vinyl:select{223} - - [223, tuple 223] box.space.vinyl:select{224} - - [224, tuple 224] box.space.memtx:insert{225, "tuple 225"} - - [225, tuple 225] box.space.memtx:insert{226, "tuple 226"} - - [226, tuple 226] box.space.memtx:insert{227, "tuple 227"} - - [227, tuple 227] box.space.memtx:insert{228, "tuple 228"} - - [228, tuple 228] box.space.memtx:insert{229, "tuple 229"} - - [229, tuple 229] box.space.vinyl:insert{225, "tuple 225"} - - [225, tuple 225] box.space.vinyl:insert{226, "tuple 226"} - - [226, tuple 226] box.space.vinyl:insert{227, "tuple 227"} - - [227, tuple 227] box.space.vinyl:insert{228, "tuple 228"} - - [228, tuple 228] box.space.vinyl:insert{229, "tuple 229"} - - [229, tuple 229] box.space.memtx:select{225} - - [225, tuple 225] box.space.memtx:select{226} - - [226, tuple 226] box.space.memtx:select{227} - - [227, tuple 227] box.space.memtx:select{228} - - [228, tuple 228] box.space.memtx:select{229} - - [229, tuple 229] box.space.vinyl:select{225} - - [225, tuple 225] box.space.vinyl:select{226} - - [226, tuple 226] box.space.vinyl:select{227} - - [227, tuple 227] box.space.vinyl:select{228} - - [228, tuple 228] box.space.vinyl:select{229} - - [229, tuple 229] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{230, "tuple 230"} - - [230, tuple 230] box.space.memtx:insert{231, "tuple 231"} - - [231, tuple 231] box.space.memtx:insert{232, "tuple 232"} - - [232, tuple 232] box.space.memtx:insert{233, "tuple 233"} - - [233, tuple 233] box.space.memtx:insert{234, "tuple 234"} - - [234, tuple 234] box.space.vinyl:insert{230, "tuple 230"} - - [230, tuple 230] box.space.vinyl:insert{231, "tuple 231"} - - [231, tuple 231] box.space.vinyl:insert{232, "tuple 232"} - - [232, tuple 232] box.space.vinyl:insert{233, "tuple 233"} - - [233, tuple 233] box.space.vinyl:insert{234, "tuple 234"} - - [234, tuple 234] box.space.memtx:select{230} - - [230, tuple 230] box.space.memtx:select{231} - - [231, tuple 231] box.space.memtx:select{232} - - [232, tuple 232] box.space.memtx:select{233} - - [233, tuple 233] box.space.memtx:select{234} - - [234, tuple 234] box.space.vinyl:select{230} - - [230, tuple 230] box.space.vinyl:select{231} - - [231, tuple 231] box.space.vinyl:select{232} - - [232, tuple 232] box.space.vinyl:select{233} - - [233, tuple 233] box.space.vinyl:select{234} - - [234, tuple 234] box.space.memtx:insert{235, "tuple 235"} - - [235, tuple 235] box.space.memtx:insert{236, "tuple 236"} - - [236, tuple 236] box.space.memtx:insert{237, "tuple 237"} - - [237, tuple 237] box.space.memtx:insert{238, "tuple 238"} - - [238, tuple 238] box.space.memtx:insert{239, "tuple 239"} - - [239, tuple 239] box.space.vinyl:insert{235, "tuple 235"} - - [235, tuple 235] box.space.vinyl:insert{236, "tuple 236"} - - [236, tuple 236] box.space.vinyl:insert{237, "tuple 237"} - - [237, tuple 237] box.space.vinyl:insert{238, "tuple 238"} - - [238, tuple 238] box.space.vinyl:insert{239, "tuple 239"} - - [239, tuple 239] box.space.memtx:select{235} - - [235, tuple 235] box.space.memtx:select{236} - - [236, tuple 236] box.space.memtx:select{237} - - [237, tuple 237] box.space.memtx:select{238} - - [238, tuple 238] box.space.memtx:select{239} - - [239, tuple 239] box.space.vinyl:select{235} - - [235, tuple 235] box.space.vinyl:select{236} - - [236, tuple 236] box.space.vinyl:select{237} - - [237, tuple 237] box.space.vinyl:select{238} - - [238, tuple 238] box.space.vinyl:select{239} - - [239, tuple 239] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 12 iteration box.space.memtx:insert{240, "tuple 240"} - - [240, tuple 240] box.space.memtx:insert{241, "tuple 241"} - - [241, tuple 241] box.space.memtx:insert{242, "tuple 242"} - - [242, tuple 242] box.space.memtx:insert{243, "tuple 243"} - - [243, tuple 243] box.space.memtx:insert{244, "tuple 244"} - - [244, tuple 244] box.space.vinyl:insert{240, "tuple 240"} - - [240, tuple 240] box.space.vinyl:insert{241, "tuple 241"} - - [241, tuple 241] box.space.vinyl:insert{242, "tuple 242"} - - [242, tuple 242] box.space.vinyl:insert{243, "tuple 243"} - - [243, tuple 243] box.space.vinyl:insert{244, "tuple 244"} - - [244, tuple 244] box.space.memtx:select{240} - - [240, tuple 240] box.space.memtx:select{241} - - [241, tuple 241] box.space.memtx:select{242} - - [242, tuple 242] box.space.memtx:select{243} - - [243, tuple 243] box.space.memtx:select{244} - - [244, tuple 244] box.space.vinyl:select{240} - - [240, tuple 240] box.space.vinyl:select{241} - - [241, tuple 241] box.space.vinyl:select{242} - - [242, tuple 242] box.space.vinyl:select{243} - - [243, tuple 243] box.space.vinyl:select{244} - - [244, tuple 244] box.space.memtx:insert{245, "tuple 245"} - - [245, tuple 245] box.space.memtx:insert{246, "tuple 246"} - - [246, tuple 246] box.space.memtx:insert{247, "tuple 247"} - - [247, tuple 247] box.space.memtx:insert{248, "tuple 248"} - - [248, tuple 248] box.space.memtx:insert{249, "tuple 249"} - - [249, tuple 249] box.space.vinyl:insert{245, "tuple 245"} - - [245, tuple 245] box.space.vinyl:insert{246, "tuple 246"} - - [246, tuple 246] box.space.vinyl:insert{247, "tuple 247"} - - [247, tuple 247] box.space.vinyl:insert{248, "tuple 248"} - - [248, tuple 248] box.space.vinyl:insert{249, "tuple 249"} - - [249, tuple 249] box.space.memtx:select{245} - - [245, tuple 245] box.space.memtx:select{246} - - [246, tuple 246] box.space.memtx:select{247} - - [247, tuple 247] box.space.memtx:select{248} - - [248, tuple 248] box.space.memtx:select{249} - - [249, tuple 249] box.space.vinyl:select{245} - - [245, tuple 245] box.space.vinyl:select{246} - - [246, tuple 246] box.space.vinyl:select{247} - - [247, tuple 247] box.space.vinyl:select{248} - - [248, tuple 248] box.space.vinyl:select{249} - - [249, tuple 249] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{250, "tuple 250"} - - [250, tuple 250] box.space.memtx:insert{251, "tuple 251"} - - [251, tuple 251] box.space.memtx:insert{252, "tuple 252"} - - [252, tuple 252] box.space.memtx:insert{253, "tuple 253"} - - [253, tuple 253] box.space.memtx:insert{254, "tuple 254"} - - [254, tuple 254] box.space.vinyl:insert{250, "tuple 250"} - - [250, tuple 250] box.space.vinyl:insert{251, "tuple 251"} - - [251, tuple 251] box.space.vinyl:insert{252, "tuple 252"} - - [252, tuple 252] box.space.vinyl:insert{253, "tuple 253"} - - [253, tuple 253] box.space.vinyl:insert{254, "tuple 254"} - - [254, tuple 254] box.space.memtx:select{250} - - [250, tuple 250] box.space.memtx:select{251} - - [251, tuple 251] box.space.memtx:select{252} - - [252, tuple 252] box.space.memtx:select{253} - - [253, tuple 253] box.space.memtx:select{254} - - [254, tuple 254] box.space.vinyl:select{250} - - [250, tuple 250] box.space.vinyl:select{251} - - [251, tuple 251] box.space.vinyl:select{252} - - [252, tuple 252] box.space.vinyl:select{253} - - [253, tuple 253] box.space.vinyl:select{254} - - [254, tuple 254] box.space.memtx:insert{255, "tuple 255"} - - [255, tuple 255] box.space.memtx:insert{256, "tuple 256"} - - [256, tuple 256] box.space.memtx:insert{257, "tuple 257"} - - [257, tuple 257] box.space.memtx:insert{258, "tuple 258"} - - [258, tuple 258] box.space.memtx:insert{259, "tuple 259"} - - [259, tuple 259] box.space.vinyl:insert{255, "tuple 255"} - - [255, tuple 255] box.space.vinyl:insert{256, "tuple 256"} - - [256, tuple 256] box.space.vinyl:insert{257, "tuple 257"} - - [257, tuple 257] box.space.vinyl:insert{258, "tuple 258"} - - [258, tuple 258] box.space.vinyl:insert{259, "tuple 259"} - - [259, tuple 259] box.space.memtx:select{255} - - [255, tuple 255] box.space.memtx:select{256} - - [256, tuple 256] box.space.memtx:select{257} - - [257, tuple 257] box.space.memtx:select{258} - - [258, tuple 258] box.space.memtx:select{259} - - [259, tuple 259] box.space.vinyl:select{255} - - [255, tuple 255] box.space.vinyl:select{256} - - [256, tuple 256] box.space.vinyl:select{257} - - [257, tuple 257] box.space.vinyl:select{258} - - [258, tuple 258] box.space.vinyl:select{259} - - [259, tuple 259] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 13 iteration box.space.memtx:insert{260, "tuple 260"} - - [260, tuple 260] box.space.memtx:insert{261, "tuple 261"} - - [261, tuple 261] box.space.memtx:insert{262, "tuple 262"} - - [262, tuple 262] box.space.memtx:insert{263, "tuple 263"} - - [263, tuple 263] box.space.memtx:insert{264, "tuple 264"} - - [264, tuple 264] box.space.vinyl:insert{260, "tuple 260"} - - [260, tuple 260] box.space.vinyl:insert{261, "tuple 261"} - - [261, tuple 261] box.space.vinyl:insert{262, "tuple 262"} - - [262, tuple 262] box.space.vinyl:insert{263, "tuple 263"} - - [263, tuple 263] box.space.vinyl:insert{264, "tuple 264"} - - [264, tuple 264] box.space.memtx:select{260} - - [260, tuple 260] box.space.memtx:select{261} - - [261, tuple 261] box.space.memtx:select{262} - - [262, tuple 262] box.space.memtx:select{263} - - [263, tuple 263] box.space.memtx:select{264} - - [264, tuple 264] box.space.vinyl:select{260} - - [260, tuple 260] box.space.vinyl:select{261} - - [261, tuple 261] box.space.vinyl:select{262} - - [262, tuple 262] box.space.vinyl:select{263} - - [263, tuple 263] box.space.vinyl:select{264} - - [264, tuple 264] box.space.memtx:insert{265, "tuple 265"} - - [265, tuple 265] box.space.memtx:insert{266, "tuple 266"} - - [266, tuple 266] box.space.memtx:insert{267, "tuple 267"} - - [267, tuple 267] box.space.memtx:insert{268, "tuple 268"} - - [268, tuple 268] box.space.memtx:insert{269, "tuple 269"} - - [269, tuple 269] box.space.vinyl:insert{265, "tuple 265"} - - [265, tuple 265] box.space.vinyl:insert{266, "tuple 266"} - - [266, tuple 266] box.space.vinyl:insert{267, "tuple 267"} - - [267, tuple 267] box.space.vinyl:insert{268, "tuple 268"} - - [268, tuple 268] box.space.vinyl:insert{269, "tuple 269"} - - [269, tuple 269] box.space.memtx:select{265} - - [265, tuple 265] box.space.memtx:select{266} - - [266, tuple 266] box.space.memtx:select{267} - - [267, tuple 267] box.space.memtx:select{268} - - [268, tuple 268] box.space.memtx:select{269} - - [269, tuple 269] box.space.vinyl:select{265} - - [265, tuple 265] box.space.vinyl:select{266} - - [266, tuple 266] box.space.vinyl:select{267} - - [267, tuple 267] box.space.vinyl:select{268} - - [268, tuple 268] box.space.vinyl:select{269} - - [269, tuple 269] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{270, "tuple 270"} - - [270, tuple 270] box.space.memtx:insert{271, "tuple 271"} - - [271, tuple 271] box.space.memtx:insert{272, "tuple 272"} - - [272, tuple 272] box.space.memtx:insert{273, "tuple 273"} - - [273, tuple 273] box.space.memtx:insert{274, "tuple 274"} - - [274, tuple 274] box.space.vinyl:insert{270, "tuple 270"} - - [270, tuple 270] box.space.vinyl:insert{271, "tuple 271"} - - [271, tuple 271] box.space.vinyl:insert{272, "tuple 272"} - - [272, tuple 272] box.space.vinyl:insert{273, "tuple 273"} - - [273, tuple 273] box.space.vinyl:insert{274, "tuple 274"} - - [274, tuple 274] box.space.memtx:select{270} - - [270, tuple 270] box.space.memtx:select{271} - - [271, tuple 271] box.space.memtx:select{272} - - [272, tuple 272] box.space.memtx:select{273} - - [273, tuple 273] box.space.memtx:select{274} - - [274, tuple 274] box.space.vinyl:select{270} - - [270, tuple 270] box.space.vinyl:select{271} - - [271, tuple 271] box.space.vinyl:select{272} - - [272, tuple 272] box.space.vinyl:select{273} - - [273, tuple 273] box.space.vinyl:select{274} - - [274, tuple 274] box.space.memtx:insert{275, "tuple 275"} - - [275, tuple 275] box.space.memtx:insert{276, "tuple 276"} - - [276, tuple 276] box.space.memtx:insert{277, "tuple 277"} - - [277, tuple 277] box.space.memtx:insert{278, "tuple 278"} - - [278, tuple 278] box.space.memtx:insert{279, "tuple 279"} - - [279, tuple 279] box.space.vinyl:insert{275, "tuple 275"} - - [275, tuple 275] box.space.vinyl:insert{276, "tuple 276"} - - [276, tuple 276] box.space.vinyl:insert{277, "tuple 277"} - - [277, tuple 277] box.space.vinyl:insert{278, "tuple 278"} - - [278, tuple 278] box.space.vinyl:insert{279, "tuple 279"} - - [279, tuple 279] box.space.memtx:select{275} - - [275, tuple 275] box.space.memtx:select{276} - - [276, tuple 276] box.space.memtx:select{277} - - [277, tuple 277] box.space.memtx:select{278} - - [278, tuple 278] box.space.memtx:select{279} - - [279, tuple 279] box.space.vinyl:select{275} - - [275, tuple 275] box.space.vinyl:select{276} - - [276, tuple 276] box.space.vinyl:select{277} - - [277, tuple 277] box.space.vinyl:select{278} - - [278, tuple 278] box.space.vinyl:select{279} - - [279, tuple 279] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 14 iteration box.space.memtx:insert{280, "tuple 280"} - - [280, tuple 280] box.space.memtx:insert{281, "tuple 281"} - - [281, tuple 281] box.space.memtx:insert{282, "tuple 282"} - - [282, tuple 282] box.space.memtx:insert{283, "tuple 283"} - - [283, tuple 283] box.space.memtx:insert{284, "tuple 284"} - - [284, tuple 284] box.space.vinyl:insert{280, "tuple 280"} - - [280, tuple 280] box.space.vinyl:insert{281, "tuple 281"} - - [281, tuple 281] box.space.vinyl:insert{282, "tuple 282"} - - [282, tuple 282] box.space.vinyl:insert{283, "tuple 283"} - - [283, tuple 283] box.space.vinyl:insert{284, "tuple 284"} - - [284, tuple 284] box.space.memtx:select{280} - - [280, tuple 280] box.space.memtx:select{281} - - [281, tuple 281] box.space.memtx:select{282} - - [282, tuple 282] box.space.memtx:select{283} - - [283, tuple 283] box.space.memtx:select{284} - - [284, tuple 284] box.space.vinyl:select{280} - - [280, tuple 280] box.space.vinyl:select{281} - - [281, tuple 281] box.space.vinyl:select{282} - - [282, tuple 282] box.space.vinyl:select{283} - - [283, tuple 283] box.space.vinyl:select{284} - - [284, tuple 284] box.space.memtx:insert{285, "tuple 285"} - - [285, tuple 285] box.space.memtx:insert{286, "tuple 286"} - - [286, tuple 286] box.space.memtx:insert{287, "tuple 287"} - - [287, tuple 287] box.space.memtx:insert{288, "tuple 288"} - - [288, tuple 288] box.space.memtx:insert{289, "tuple 289"} - - [289, tuple 289] box.space.vinyl:insert{285, "tuple 285"} - - [285, tuple 285] box.space.vinyl:insert{286, "tuple 286"} - - [286, tuple 286] box.space.vinyl:insert{287, "tuple 287"} - - [287, tuple 287] box.space.vinyl:insert{288, "tuple 288"} - - [288, tuple 288] box.space.vinyl:insert{289, "tuple 289"} - - [289, tuple 289] box.space.memtx:select{285} - - [285, tuple 285] box.space.memtx:select{286} - - [286, tuple 286] box.space.memtx:select{287} - - [287, tuple 287] box.space.memtx:select{288} - - [288, tuple 288] box.space.memtx:select{289} - - [289, tuple 289] box.space.vinyl:select{285} - - [285, tuple 285] box.space.vinyl:select{286} - - [286, tuple 286] box.space.vinyl:select{287} - - [287, tuple 287] box.space.vinyl:select{288} - - [288, tuple 288] box.space.vinyl:select{289} - - [289, tuple 289] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{290, "tuple 290"} - - [290, tuple 290] box.space.memtx:insert{291, "tuple 291"} - - [291, tuple 291] box.space.memtx:insert{292, "tuple 292"} - - [292, tuple 292] box.space.memtx:insert{293, "tuple 293"} - - [293, tuple 293] box.space.memtx:insert{294, "tuple 294"} - - [294, tuple 294] box.space.vinyl:insert{290, "tuple 290"} - - [290, tuple 290] box.space.vinyl:insert{291, "tuple 291"} - - [291, tuple 291] box.space.vinyl:insert{292, "tuple 292"} - - [292, tuple 292] box.space.vinyl:insert{293, "tuple 293"} - - [293, tuple 293] box.space.vinyl:insert{294, "tuple 294"} - - [294, tuple 294] box.space.memtx:select{290} - - [290, tuple 290] box.space.memtx:select{291} - - [291, tuple 291] box.space.memtx:select{292} - - [292, tuple 292] box.space.memtx:select{293} - - [293, tuple 293] box.space.memtx:select{294} - - [294, tuple 294] box.space.vinyl:select{290} - - [290, tuple 290] box.space.vinyl:select{291} - - [291, tuple 291] box.space.vinyl:select{292} - - [292, tuple 292] box.space.vinyl:select{293} - - [293, tuple 293] box.space.vinyl:select{294} - - [294, tuple 294] box.space.memtx:insert{295, "tuple 295"} - - [295, tuple 295] box.space.memtx:insert{296, "tuple 296"} - - [296, tuple 296] box.space.memtx:insert{297, "tuple 297"} - - [297, tuple 297] box.space.memtx:insert{298, "tuple 298"} - - [298, tuple 298] box.space.memtx:insert{299, "tuple 299"} - - [299, tuple 299] box.space.vinyl:insert{295, "tuple 295"} - - [295, tuple 295] box.space.vinyl:insert{296, "tuple 296"} - - [296, tuple 296] box.space.vinyl:insert{297, "tuple 297"} - - [297, tuple 297] box.space.vinyl:insert{298, "tuple 298"} - - [298, tuple 298] box.space.vinyl:insert{299, "tuple 299"} - - [299, tuple 299] box.space.memtx:select{295} - - [295, tuple 295] box.space.memtx:select{296} - - [296, tuple 296] box.space.memtx:select{297} - - [297, tuple 297] box.space.memtx:select{298} - - [298, tuple 298] box.space.memtx:select{299} - - [299, tuple 299] box.space.vinyl:select{295} - - [295, tuple 295] box.space.vinyl:select{296} - - [296, tuple 296] box.space.vinyl:select{297} - - [297, tuple 297] box.space.vinyl:select{298} - - [298, tuple 298] box.space.vinyl:select{299} - - [299, tuple 299] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 15 iteration box.space.memtx:insert{300, "tuple 300"} - - [300, tuple 300] box.space.memtx:insert{301, "tuple 301"} - - [301, tuple 301] box.space.memtx:insert{302, "tuple 302"} - - [302, tuple 302] box.space.memtx:insert{303, "tuple 303"} - - [303, tuple 303] box.space.memtx:insert{304, "tuple 304"} - - [304, tuple 304] box.space.vinyl:insert{300, "tuple 300"} - - [300, tuple 300] box.space.vinyl:insert{301, "tuple 301"} - - [301, tuple 301] box.space.vinyl:insert{302, "tuple 302"} - - [302, tuple 302] box.space.vinyl:insert{303, "tuple 303"} - - [303, tuple 303] box.space.vinyl:insert{304, "tuple 304"} - - [304, tuple 304] box.space.memtx:select{300} - - [300, tuple 300] box.space.memtx:select{301} - - [301, tuple 301] box.space.memtx:select{302} - - [302, tuple 302] box.space.memtx:select{303} - - [303, tuple 303] box.space.memtx:select{304} - - [304, tuple 304] box.space.vinyl:select{300} - - [300, tuple 300] box.space.vinyl:select{301} - - [301, tuple 301] box.space.vinyl:select{302} - - [302, tuple 302] box.space.vinyl:select{303} - - [303, tuple 303] box.space.vinyl:select{304} - - [304, tuple 304] box.space.memtx:insert{305, "tuple 305"} - - [305, tuple 305] box.space.memtx:insert{306, "tuple 306"} - - [306, tuple 306] box.space.memtx:insert{307, "tuple 307"} - - [307, tuple 307] box.space.memtx:insert{308, "tuple 308"} - - [308, tuple 308] box.space.memtx:insert{309, "tuple 309"} - - [309, tuple 309] box.space.vinyl:insert{305, "tuple 305"} - - [305, tuple 305] box.space.vinyl:insert{306, "tuple 306"} - - [306, tuple 306] box.space.vinyl:insert{307, "tuple 307"} - - [307, tuple 307] box.space.vinyl:insert{308, "tuple 308"} - - [308, tuple 308] box.space.vinyl:insert{309, "tuple 309"} - - [309, tuple 309] box.space.memtx:select{305} - - [305, tuple 305] box.space.memtx:select{306} - - [306, tuple 306] box.space.memtx:select{307} - - [307, tuple 307] box.space.memtx:select{308} - - [308, tuple 308] box.space.memtx:select{309} - - [309, tuple 309] box.space.vinyl:select{305} - - [305, tuple 305] box.space.vinyl:select{306} - - [306, tuple 306] box.space.vinyl:select{307} - - [307, tuple 307] box.space.vinyl:select{308} - - [308, tuple 308] box.space.vinyl:select{309} - - [309, tuple 309] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{310, "tuple 310"} - - [310, tuple 310] box.space.memtx:insert{311, "tuple 311"} - - [311, tuple 311] box.space.memtx:insert{312, "tuple 312"} - - [312, tuple 312] box.space.memtx:insert{313, "tuple 313"} - - [313, tuple 313] box.space.memtx:insert{314, "tuple 314"} - - [314, tuple 314] box.space.vinyl:insert{310, "tuple 310"} - - [310, tuple 310] box.space.vinyl:insert{311, "tuple 311"} - - [311, tuple 311] box.space.vinyl:insert{312, "tuple 312"} - - [312, tuple 312] box.space.vinyl:insert{313, "tuple 313"} - - [313, tuple 313] box.space.vinyl:insert{314, "tuple 314"} - - [314, tuple 314] box.space.memtx:select{310} - - [310, tuple 310] box.space.memtx:select{311} - - [311, tuple 311] box.space.memtx:select{312} - - [312, tuple 312] box.space.memtx:select{313} - - [313, tuple 313] box.space.memtx:select{314} - - [314, tuple 314] box.space.vinyl:select{310} - - [310, tuple 310] box.space.vinyl:select{311} - - [311, tuple 311] box.space.vinyl:select{312} - - [312, tuple 312] box.space.vinyl:select{313} - - [313, tuple 313] box.space.vinyl:select{314} - - [314, tuple 314] box.space.memtx:insert{315, "tuple 315"} - - [315, tuple 315] box.space.memtx:insert{316, "tuple 316"} - - [316, tuple 316] box.space.memtx:insert{317, "tuple 317"} - - [317, tuple 317] box.space.memtx:insert{318, "tuple 318"} - - [318, tuple 318] box.space.memtx:insert{319, "tuple 319"} - - [319, tuple 319] box.space.vinyl:insert{315, "tuple 315"} - - [315, tuple 315] box.space.vinyl:insert{316, "tuple 316"} - - [316, tuple 316] box.space.vinyl:insert{317, "tuple 317"} - - [317, tuple 317] box.space.vinyl:insert{318, "tuple 318"} - - [318, tuple 318] box.space.vinyl:insert{319, "tuple 319"} - - [319, tuple 319] box.space.memtx:select{315} - - [315, tuple 315] box.space.memtx:select{316} - - [316, tuple 316] box.space.memtx:select{317} - - [317, tuple 317] box.space.memtx:select{318} - - [318, tuple 318] box.space.memtx:select{319} - - [319, tuple 319] box.space.vinyl:select{315} - - [315, tuple 315] box.space.vinyl:select{316} - - [316, tuple 316] box.space.vinyl:select{317} - - [317, tuple 317] box.space.vinyl:select{318} - - [318, tuple 318] box.space.vinyl:select{319} - - [319, tuple 319] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 16 iteration box.space.memtx:insert{320, "tuple 320"} - - [320, tuple 320] box.space.memtx:insert{321, "tuple 321"} - - [321, tuple 321] box.space.memtx:insert{322, "tuple 322"} - - [322, tuple 322] box.space.memtx:insert{323, "tuple 323"} - - [323, tuple 323] box.space.memtx:insert{324, "tuple 324"} - - [324, tuple 324] box.space.vinyl:insert{320, "tuple 320"} - - [320, tuple 320] box.space.vinyl:insert{321, "tuple 321"} - - [321, tuple 321] box.space.vinyl:insert{322, "tuple 322"} - - [322, tuple 322] box.space.vinyl:insert{323, "tuple 323"} - - [323, tuple 323] box.space.vinyl:insert{324, "tuple 324"} - - [324, tuple 324] box.space.memtx:select{320} - - [320, tuple 320] box.space.memtx:select{321} - - [321, tuple 321] box.space.memtx:select{322} - - [322, tuple 322] box.space.memtx:select{323} - - [323, tuple 323] box.space.memtx:select{324} - - [324, tuple 324] box.space.vinyl:select{320} - - [320, tuple 320] box.space.vinyl:select{321} - - [321, tuple 321] box.space.vinyl:select{322} - - [322, tuple 322] box.space.vinyl:select{323} - - [323, tuple 323] box.space.vinyl:select{324} - - [324, tuple 324] box.space.memtx:insert{325, "tuple 325"} - - [325, tuple 325] box.space.memtx:insert{326, "tuple 326"} - - [326, tuple 326] box.space.memtx:insert{327, "tuple 327"} - - [327, tuple 327] box.space.memtx:insert{328, "tuple 328"} - - [328, tuple 328] box.space.memtx:insert{329, "tuple 329"} - - [329, tuple 329] box.space.vinyl:insert{325, "tuple 325"} - - [325, tuple 325] box.space.vinyl:insert{326, "tuple 326"} - - [326, tuple 326] box.space.vinyl:insert{327, "tuple 327"} - - [327, tuple 327] box.space.vinyl:insert{328, "tuple 328"} - - [328, tuple 328] box.space.vinyl:insert{329, "tuple 329"} - - [329, tuple 329] box.space.memtx:select{325} - - [325, tuple 325] box.space.memtx:select{326} - - [326, tuple 326] box.space.memtx:select{327} - - [327, tuple 327] box.space.memtx:select{328} - - [328, tuple 328] box.space.memtx:select{329} - - [329, tuple 329] box.space.vinyl:select{325} - - [325, tuple 325] box.space.vinyl:select{326} - - [326, tuple 326] box.space.vinyl:select{327} - - [327, tuple 327] box.space.vinyl:select{328} - - [328, tuple 328] box.space.vinyl:select{329} - - [329, tuple 329] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{330, "tuple 330"} - - [330, tuple 330] box.space.memtx:insert{331, "tuple 331"} - - [331, tuple 331] box.space.memtx:insert{332, "tuple 332"} - - [332, tuple 332] box.space.memtx:insert{333, "tuple 333"} - - [333, tuple 333] box.space.memtx:insert{334, "tuple 334"} - - [334, tuple 334] box.space.vinyl:insert{330, "tuple 330"} - - [330, tuple 330] box.space.vinyl:insert{331, "tuple 331"} - - [331, tuple 331] box.space.vinyl:insert{332, "tuple 332"} - - [332, tuple 332] box.space.vinyl:insert{333, "tuple 333"} - - [333, tuple 333] box.space.vinyl:insert{334, "tuple 334"} - - [334, tuple 334] box.space.memtx:select{330} - - [330, tuple 330] box.space.memtx:select{331} - - [331, tuple 331] box.space.memtx:select{332} - - [332, tuple 332] box.space.memtx:select{333} - - [333, tuple 333] box.space.memtx:select{334} - - [334, tuple 334] box.space.vinyl:select{330} - - [330, tuple 330] box.space.vinyl:select{331} - - [331, tuple 331] box.space.vinyl:select{332} - - [332, tuple 332] box.space.vinyl:select{333} - - [333, tuple 333] box.space.vinyl:select{334} - - [334, tuple 334] box.space.memtx:insert{335, "tuple 335"} - - [335, tuple 335] box.space.memtx:insert{336, "tuple 336"} - - [336, tuple 336] box.space.memtx:insert{337, "tuple 337"} - - [337, tuple 337] box.space.memtx:insert{338, "tuple 338"} - - [338, tuple 338] box.space.memtx:insert{339, "tuple 339"} - - [339, tuple 339] box.space.vinyl:insert{335, "tuple 335"} - - [335, tuple 335] box.space.vinyl:insert{336, "tuple 336"} - - [336, tuple 336] box.space.vinyl:insert{337, "tuple 337"} - - [337, tuple 337] box.space.vinyl:insert{338, "tuple 338"} - - [338, tuple 338] box.space.vinyl:insert{339, "tuple 339"} - - [339, tuple 339] box.space.memtx:select{335} - - [335, tuple 335] box.space.memtx:select{336} - - [336, tuple 336] box.space.memtx:select{337} - - [337, tuple 337] box.space.memtx:select{338} - - [338, tuple 338] box.space.memtx:select{339} - - [339, tuple 339] box.space.vinyl:select{335} - - [335, tuple 335] box.space.vinyl:select{336} - - [336, tuple 336] box.space.vinyl:select{337} - - [337, tuple 337] box.space.vinyl:select{338} - - [338, tuple 338] box.space.vinyl:select{339} - - [339, tuple 339] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 17 iteration box.space.memtx:insert{340, "tuple 340"} - - [340, tuple 340] box.space.memtx:insert{341, "tuple 341"} - - [341, tuple 341] box.space.memtx:insert{342, "tuple 342"} - - [342, tuple 342] box.space.memtx:insert{343, "tuple 343"} - - [343, tuple 343] box.space.memtx:insert{344, "tuple 344"} - - [344, tuple 344] box.space.vinyl:insert{340, "tuple 340"} - - [340, tuple 340] box.space.vinyl:insert{341, "tuple 341"} - - [341, tuple 341] box.space.vinyl:insert{342, "tuple 342"} - - [342, tuple 342] box.space.vinyl:insert{343, "tuple 343"} - - [343, tuple 343] box.space.vinyl:insert{344, "tuple 344"} - - [344, tuple 344] box.space.memtx:select{340} - - [340, tuple 340] box.space.memtx:select{341} - - [341, tuple 341] box.space.memtx:select{342} - - [342, tuple 342] box.space.memtx:select{343} - - [343, tuple 343] box.space.memtx:select{344} - - [344, tuple 344] box.space.vinyl:select{340} - - [340, tuple 340] box.space.vinyl:select{341} - - [341, tuple 341] box.space.vinyl:select{342} - - [342, tuple 342] box.space.vinyl:select{343} - - [343, tuple 343] box.space.vinyl:select{344} - - [344, tuple 344] box.space.memtx:insert{345, "tuple 345"} - - [345, tuple 345] box.space.memtx:insert{346, "tuple 346"} - - [346, tuple 346] box.space.memtx:insert{347, "tuple 347"} - - [347, tuple 347] box.space.memtx:insert{348, "tuple 348"} - - [348, tuple 348] box.space.memtx:insert{349, "tuple 349"} - - [349, tuple 349] box.space.vinyl:insert{345, "tuple 345"} - - [345, tuple 345] box.space.vinyl:insert{346, "tuple 346"} - - [346, tuple 346] box.space.vinyl:insert{347, "tuple 347"} - - [347, tuple 347] box.space.vinyl:insert{348, "tuple 348"} - - [348, tuple 348] box.space.vinyl:insert{349, "tuple 349"} - - [349, tuple 349] box.space.memtx:select{345} - - [345, tuple 345] box.space.memtx:select{346} - - [346, tuple 346] box.space.memtx:select{347} - - [347, tuple 347] box.space.memtx:select{348} - - [348, tuple 348] box.space.memtx:select{349} - - [349, tuple 349] box.space.vinyl:select{345} - - [345, tuple 345] box.space.vinyl:select{346} - - [346, tuple 346] box.space.vinyl:select{347} - - [347, tuple 347] box.space.vinyl:select{348} - - [348, tuple 348] box.space.vinyl:select{349} - - [349, tuple 349] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{350, "tuple 350"} - - [350, tuple 350] box.space.memtx:insert{351, "tuple 351"} - - [351, tuple 351] box.space.memtx:insert{352, "tuple 352"} - - [352, tuple 352] box.space.memtx:insert{353, "tuple 353"} - - [353, tuple 353] box.space.memtx:insert{354, "tuple 354"} - - [354, tuple 354] box.space.vinyl:insert{350, "tuple 350"} - - [350, tuple 350] box.space.vinyl:insert{351, "tuple 351"} - - [351, tuple 351] box.space.vinyl:insert{352, "tuple 352"} - - [352, tuple 352] box.space.vinyl:insert{353, "tuple 353"} - - [353, tuple 353] box.space.vinyl:insert{354, "tuple 354"} - - [354, tuple 354] box.space.memtx:select{350} - - [350, tuple 350] box.space.memtx:select{351} - - [351, tuple 351] box.space.memtx:select{352} - - [352, tuple 352] box.space.memtx:select{353} - - [353, tuple 353] box.space.memtx:select{354} - - [354, tuple 354] box.space.vinyl:select{350} - - [350, tuple 350] box.space.vinyl:select{351} - - [351, tuple 351] box.space.vinyl:select{352} - - [352, tuple 352] box.space.vinyl:select{353} - - [353, tuple 353] box.space.vinyl:select{354} - - [354, tuple 354] box.space.memtx:insert{355, "tuple 355"} - - [355, tuple 355] box.space.memtx:insert{356, "tuple 356"} - - [356, tuple 356] box.space.memtx:insert{357, "tuple 357"} - - [357, tuple 357] box.space.memtx:insert{358, "tuple 358"} - - [358, tuple 358] box.space.memtx:insert{359, "tuple 359"} - - [359, tuple 359] box.space.vinyl:insert{355, "tuple 355"} - - [355, tuple 355] box.space.vinyl:insert{356, "tuple 356"} - - [356, tuple 356] box.space.vinyl:insert{357, "tuple 357"} - - [357, tuple 357] box.space.vinyl:insert{358, "tuple 358"} - - [358, tuple 358] box.space.vinyl:insert{359, "tuple 359"} - - [359, tuple 359] box.space.memtx:select{355} - - [355, tuple 355] box.space.memtx:select{356} - - [356, tuple 356] box.space.memtx:select{357} - - [357, tuple 357] box.space.memtx:select{358} - - [358, tuple 358] box.space.memtx:select{359} - - [359, tuple 359] box.space.vinyl:select{355} - - [355, tuple 355] box.space.vinyl:select{356} - - [356, tuple 356] box.space.vinyl:select{357} - - [357, tuple 357] box.space.vinyl:select{358} - - [358, tuple 358] box.space.vinyl:select{359} - - [359, tuple 359] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 18 iteration box.space.memtx:insert{360, "tuple 360"} - - [360, tuple 360] box.space.memtx:insert{361, "tuple 361"} - - [361, tuple 361] box.space.memtx:insert{362, "tuple 362"} - - [362, tuple 362] box.space.memtx:insert{363, "tuple 363"} - - [363, tuple 363] box.space.memtx:insert{364, "tuple 364"} - - [364, tuple 364] box.space.vinyl:insert{360, "tuple 360"} - - [360, tuple 360] box.space.vinyl:insert{361, "tuple 361"} - - [361, tuple 361] box.space.vinyl:insert{362, "tuple 362"} - - [362, tuple 362] box.space.vinyl:insert{363, "tuple 363"} - - [363, tuple 363] box.space.vinyl:insert{364, "tuple 364"} - - [364, tuple 364] box.space.memtx:select{360} - - [360, tuple 360] box.space.memtx:select{361} - - [361, tuple 361] box.space.memtx:select{362} - - [362, tuple 362] box.space.memtx:select{363} - - [363, tuple 363] box.space.memtx:select{364} - - [364, tuple 364] box.space.vinyl:select{360} - - [360, tuple 360] box.space.vinyl:select{361} - - [361, tuple 361] box.space.vinyl:select{362} - - [362, tuple 362] box.space.vinyl:select{363} - - [363, tuple 363] box.space.vinyl:select{364} - - [364, tuple 364] box.space.memtx:insert{365, "tuple 365"} - - [365, tuple 365] box.space.memtx:insert{366, "tuple 366"} - - [366, tuple 366] box.space.memtx:insert{367, "tuple 367"} - - [367, tuple 367] box.space.memtx:insert{368, "tuple 368"} - - [368, tuple 368] box.space.memtx:insert{369, "tuple 369"} - - [369, tuple 369] box.space.vinyl:insert{365, "tuple 365"} - - [365, tuple 365] box.space.vinyl:insert{366, "tuple 366"} - - [366, tuple 366] box.space.vinyl:insert{367, "tuple 367"} - - [367, tuple 367] box.space.vinyl:insert{368, "tuple 368"} - - [368, tuple 368] box.space.vinyl:insert{369, "tuple 369"} - - [369, tuple 369] box.space.memtx:select{365} - - [365, tuple 365] box.space.memtx:select{366} - - [366, tuple 366] box.space.memtx:select{367} - - [367, tuple 367] box.space.memtx:select{368} - - [368, tuple 368] box.space.memtx:select{369} - - [369, tuple 369] box.space.vinyl:select{365} - - [365, tuple 365] box.space.vinyl:select{366} - - [366, tuple 366] box.space.vinyl:select{367} - - [367, tuple 367] box.space.vinyl:select{368} - - [368, tuple 368] box.space.vinyl:select{369} - - [369, tuple 369] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{370, "tuple 370"} - - [370, tuple 370] box.space.memtx:insert{371, "tuple 371"} - - [371, tuple 371] box.space.memtx:insert{372, "tuple 372"} - - [372, tuple 372] box.space.memtx:insert{373, "tuple 373"} - - [373, tuple 373] box.space.memtx:insert{374, "tuple 374"} - - [374, tuple 374] box.space.vinyl:insert{370, "tuple 370"} - - [370, tuple 370] box.space.vinyl:insert{371, "tuple 371"} - - [371, tuple 371] box.space.vinyl:insert{372, "tuple 372"} - - [372, tuple 372] box.space.vinyl:insert{373, "tuple 373"} - - [373, tuple 373] box.space.vinyl:insert{374, "tuple 374"} - - [374, tuple 374] box.space.memtx:select{370} - - [370, tuple 370] box.space.memtx:select{371} - - [371, tuple 371] box.space.memtx:select{372} - - [372, tuple 372] box.space.memtx:select{373} - - [373, tuple 373] box.space.memtx:select{374} - - [374, tuple 374] box.space.vinyl:select{370} - - [370, tuple 370] box.space.vinyl:select{371} - - [371, tuple 371] box.space.vinyl:select{372} - - [372, tuple 372] box.space.vinyl:select{373} - - [373, tuple 373] box.space.vinyl:select{374} - - [374, tuple 374] box.space.memtx:insert{375, "tuple 375"} - - [375, tuple 375] box.space.memtx:insert{376, "tuple 376"} - - [376, tuple 376] box.space.memtx:insert{377, "tuple 377"} - - [377, tuple 377] box.space.memtx:insert{378, "tuple 378"} - - [378, tuple 378] box.space.memtx:insert{379, "tuple 379"} - - [379, tuple 379] box.space.vinyl:insert{375, "tuple 375"} - - [375, tuple 375] box.space.vinyl:insert{376, "tuple 376"} - - [376, tuple 376] box.space.vinyl:insert{377, "tuple 377"} - - [377, tuple 377] box.space.vinyl:insert{378, "tuple 378"} - - [378, tuple 378] box.space.vinyl:insert{379, "tuple 379"} - - [379, tuple 379] box.space.memtx:select{375} - - [375, tuple 375] box.space.memtx:select{376} - - [376, tuple 376] box.space.memtx:select{377} - - [377, tuple 377] box.space.memtx:select{378} - - [378, tuple 378] box.space.memtx:select{379} - - [379, tuple 379] box.space.vinyl:select{375} - - [375, tuple 375] box.space.vinyl:select{376} - - [376, tuple 376] box.space.vinyl:select{377} - - [377, tuple 377] box.space.vinyl:select{378} - - [378, tuple 378] box.space.vinyl:select{379} - - [379, tuple 379] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica test 19 iteration box.space.memtx:insert{380, "tuple 380"} - - [380, tuple 380] box.space.memtx:insert{381, "tuple 381"} - - [381, tuple 381] box.space.memtx:insert{382, "tuple 382"} - - [382, tuple 382] box.space.memtx:insert{383, "tuple 383"} - - [383, tuple 383] box.space.memtx:insert{384, "tuple 384"} - - [384, tuple 384] box.space.vinyl:insert{380, "tuple 380"} - - [380, tuple 380] box.space.vinyl:insert{381, "tuple 381"} - - [381, tuple 381] box.space.vinyl:insert{382, "tuple 382"} - - [382, tuple 382] box.space.vinyl:insert{383, "tuple 383"} - - [383, tuple 383] box.space.vinyl:insert{384, "tuple 384"} - - [384, tuple 384] box.space.memtx:select{380} - - [380, tuple 380] box.space.memtx:select{381} - - [381, tuple 381] box.space.memtx:select{382} - - [382, tuple 382] box.space.memtx:select{383} - - [383, tuple 383] box.space.memtx:select{384} - - [384, tuple 384] box.space.vinyl:select{380} - - [380, tuple 380] box.space.vinyl:select{381} - - [381, tuple 381] box.space.vinyl:select{382} - - [382, tuple 382] box.space.vinyl:select{383} - - [383, tuple 383] box.space.vinyl:select{384} - - [384, tuple 384] box.space.memtx:insert{385, "tuple 385"} - - [385, tuple 385] box.space.memtx:insert{386, "tuple 386"} - - [386, tuple 386] box.space.memtx:insert{387, "tuple 387"} - - [387, tuple 387] box.space.memtx:insert{388, "tuple 388"} - - [388, tuple 388] box.space.memtx:insert{389, "tuple 389"} - - [389, tuple 389] box.space.vinyl:insert{385, "tuple 385"} - - [385, tuple 385] box.space.vinyl:insert{386, "tuple 386"} - - [386, tuple 386] box.space.vinyl:insert{387, "tuple 387"} - - [387, tuple 387] box.space.vinyl:insert{388, "tuple 388"} - - [388, tuple 388] box.space.vinyl:insert{389, "tuple 389"} - - [389, tuple 389] box.space.memtx:select{385} - - [385, tuple 385] box.space.memtx:select{386} - - [386, tuple 386] box.space.memtx:select{387} - - [387, tuple 387] box.space.memtx:select{388} - - [388, tuple 388] box.space.memtx:select{389} - - [389, tuple 389] box.space.vinyl:select{385} - - [385, tuple 385] box.space.vinyl:select{386} - - [386, tuple 386] box.space.vinyl:select{387} - - [387, tuple 387] box.space.vinyl:select{388} - - [388, tuple 388] box.space.vinyl:select{389} - - [389, tuple 389] swap servers switch replica to master box.cfg{replication=''} --- ... switch master to replica box.space.memtx:insert{390, "tuple 390"} - - [390, tuple 390] box.space.memtx:insert{391, "tuple 391"} - - [391, tuple 391] box.space.memtx:insert{392, "tuple 392"} - - [392, tuple 392] box.space.memtx:insert{393, "tuple 393"} - - [393, tuple 393] box.space.memtx:insert{394, "tuple 394"} - - [394, tuple 394] box.space.vinyl:insert{390, "tuple 390"} - - [390, tuple 390] box.space.vinyl:insert{391, "tuple 391"} - - [391, tuple 391] box.space.vinyl:insert{392, "tuple 392"} - - [392, tuple 392] box.space.vinyl:insert{393, "tuple 393"} - - [393, tuple 393] box.space.vinyl:insert{394, "tuple 394"} - - [394, tuple 394] box.space.memtx:select{390} - - [390, tuple 390] box.space.memtx:select{391} - - [391, tuple 391] box.space.memtx:select{392} - - [392, tuple 392] box.space.memtx:select{393} - - [393, tuple 393] box.space.memtx:select{394} - - [394, tuple 394] box.space.vinyl:select{390} - - [390, tuple 390] box.space.vinyl:select{391} - - [391, tuple 391] box.space.vinyl:select{392} - - [392, tuple 392] box.space.vinyl:select{393} - - [393, tuple 393] box.space.vinyl:select{394} - - [394, tuple 394] box.space.memtx:insert{395, "tuple 395"} - - [395, tuple 395] box.space.memtx:insert{396, "tuple 396"} - - [396, tuple 396] box.space.memtx:insert{397, "tuple 397"} - - [397, tuple 397] box.space.memtx:insert{398, "tuple 398"} - - [398, tuple 398] box.space.memtx:insert{399, "tuple 399"} - - [399, tuple 399] box.space.vinyl:insert{395, "tuple 395"} - - [395, tuple 395] box.space.vinyl:insert{396, "tuple 396"} - - [396, tuple 396] box.space.vinyl:insert{397, "tuple 397"} - - [397, tuple 397] box.space.vinyl:insert{398, "tuple 398"} - - [398, tuple 398] box.space.vinyl:insert{399, "tuple 399"} - - [399, tuple 399] box.space.memtx:select{395} - - [395, tuple 395] box.space.memtx:select{396} - - [396, tuple 396] box.space.memtx:select{397} - - [397, tuple 397] box.space.memtx:select{398} - - [398, tuple 398] box.space.memtx:select{399} - - [399, tuple 399] box.space.vinyl:select{395} - - [395, tuple 395] box.space.vinyl:select{396} - - [396, tuple 396] box.space.vinyl:select{397} - - [397, tuple 397] box.space.vinyl:select{398} - - [398, tuple 398] box.space.vinyl:select{399} - - [399, tuple 399] rollback servers configuration switch master to master box.cfg{replication=''} --- ... switch replica to replica tarantool_1.9.1.26.g63eb81e3c/test/replication-py/panic.lua0000664000000000000000000000033413306560010021713 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg({ listen = os.getenv("LISTEN"), memtx_memory = 107374182, force_recovery = true, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication-py/init_storage.test.py0000664000000000000000000000454313306560010024143 0ustar rootrootimport os import glob from lib.tarantool_server import TarantoolServer # master server master = server master_id = master.get_param('id') master.admin("box.schema.user.grant('guest', 'replication')") print '-------------------------------------------------------------' print 'gh-484: JOIN doesn\'t save data to snapshot with TREE index' print '-------------------------------------------------------------' master.admin("space = box.schema.space.create('test', {id = 42})") master.admin("index = space:create_index('primary', { type = 'tree'})") master.admin('for k = 1, 9 do space:insert{k, k*k} end') replica = TarantoolServer(server.ini) replica.script = 'replication-py/replica.lua' replica.vardir = server.vardir #os.path.join(server.vardir, 'replica') replica.rpl_master = master replica.deploy() replica.admin('box.space.test:select()') replica.restart() replica.admin('box.space.test:select()') replica.stop() replica.cleanup(True) print '-------------------------------------------------------------' print 'replica test 2 (must be ok)' print '-------------------------------------------------------------' master.restart() master.admin('for k = 10, 19 do box.space[42]:insert{k, k*k*k} end') master.admin("for k = 20, 29 do box.space[42]:upsert({k}, {}) end") lsn = master.get_lsn(master_id) replica = TarantoolServer(server.ini) replica.script = 'replication-py/replica.lua' replica.vardir = server.vardir #os.path.join(server.vardir, 'replica') replica.rpl_master = master replica.deploy() replica.admin('space = box.space.test'); replica.wait_lsn(master_id, lsn) for i in range(1, 20): replica.admin('space:get{%d}' % i) replica.stop() replica.cleanup(True) print '-------------------------------------------------------------' print 'reconnect on JOIN/SUBSCRIBE' print '-------------------------------------------------------------' server.stop() replica = TarantoolServer(server.ini) replica.script = 'replication/replica.lua' replica.vardir = server.vardir #os.path.join(server.vardir, 'replica') replica.rpl_master = master replica.deploy(wait=False) print 'waiting reconnect on JOIN...' server.start() replica.wait_until_started() print 'ok' replica.stop() server.stop() print 'waiting reconnect on SUBSCRIBE...' replica.start(wait=False) server.start() replica.wait_until_started() print 'ok' replica.stop() replica.cleanup(True) server.stop() server.deploy() tarantool_1.9.1.26.g63eb81e3c/test/replication-py/cluster.test.py0000664000000000000000000002571213306560010023136 0ustar rootrootimport os import sys import re import yaml import uuid import glob from lib.tarantool_server import TarantoolServer ## Get cluster uuid cluster_uuid = '' try: cluster_uuid = yaml.load(server.admin("box.space._schema:get('cluster')", silent = True))[0][1] uuid.UUID('{' + cluster_uuid + '}') print 'ok - cluster uuid' except Exception as e: print 'not ok - invalid cluster uuid', e server.iproto.reconnect() # re-connect with new permissions print '-------------------------------------------------------------' print ' gh-696: Check global READ permissions for replication' print '-------------------------------------------------------------' # Generate replica cluster UUID replica_uuid = str(uuid.uuid4()) ## Universal read permission is required to perform JOIN/SUBSCRIBE rows = list(server.iproto.py_con.join(replica_uuid)) print len(rows) == 1 and rows[0].return_message.find('Read access') >= 0 and \ 'ok' or 'not ok', '-', 'join without read permissions on universe' rows = list(server.iproto.py_con.subscribe(cluster_uuid, replica_uuid)) print len(rows) == 1 and rows[0].return_message.find('Read access') >= 0 and \ 'ok' or 'not ok', '-', 'subscribe without read permissions on universe' ## Write permission to space `_cluster` is required to perform JOIN server.admin("box.schema.user.grant('guest', 'read', 'universe')") server.iproto.reconnect() # re-connect with new permissions rows = list(server.iproto.py_con.join(replica_uuid)) print len(rows) == 1 and rows[0].return_message.find('Write access') >= 0 and \ 'ok' or 'not ok', '-', 'join without write permissions to _cluster' def check_join(msg): ok = True for resp in server.iproto.py_con.join(replica_uuid): if resp.completion_status != 0: print 'not ok', '-', msg, resp.return_message ok = False server.iproto.reconnect() # the only way to stop JOIN if not ok: return tuples = server.iproto.py_con.space('_cluster').select(replica_uuid, index = 1) if len(tuples) == 0: print 'not ok', '-', msg, 'missing entry in _cluster' return server_id = tuples[0][0] print 'ok', '-', msg return server_id ## JOIN with permissions server.admin("box.schema.user.grant('guest', 'write', 'space', '_cluster')") server.iproto.reconnect() # re-connect with new permissions server_id = check_join('join with granted permissions') server.iproto.py_con.space('_cluster').delete(server_id) # JOIN with granted role server.admin("box.schema.user.revoke('guest', 'read', 'universe')") server.admin("box.schema.user.revoke('guest', 'write', 'space', '_cluster')") server.admin("box.schema.user.grant('guest', 'replication')") server.iproto.reconnect() # re-connect with new permissions server_id = check_join('join with granted role') server.iproto.py_con.space('_cluster').delete(server_id) print '-------------------------------------------------------------' print 'gh-707: Master crashes on JOIN if it does not have snapshot files' print 'gh-480: If socket is closed while JOIN, replica wont reconnect' print '-------------------------------------------------------------' data_dir = os.path.join(server.vardir, server.name) for k in glob.glob(os.path.join(data_dir, '*.snap')): os.unlink(k) # remember the number of servers in _cluster table server_count = len(server.iproto.py_con.space('_cluster').select(())) rows = list(server.iproto.py_con.join(replica_uuid)) print len(rows) > 0 and rows[-1].return_message.find('.snap') >= 0 and \ 'ok' or 'not ok', '-', 'join without snapshots' res = server.iproto.py_con.space('_cluster').select(()) if server_count <= len(res): print 'ok - _cluster did not change after unsuccessful JOIN' else: print 'not ok - _cluster did change after unsuccessful JOIN' print res server.admin("box.schema.user.revoke('guest', 'replication')") server.admin('box.snapshot()') print '-------------------------------------------------------------' print 'gh-434: Assertion if replace _cluster tuple for local server' print '-------------------------------------------------------------' master_uuid = server.get_param('uuid') sys.stdout.push_filter(master_uuid, '') # Invalid UUID server.admin("box.space._cluster:replace{1, require('uuid').NULL:str()}") # Update of UUID is not OK server.admin("box.space._cluster:replace{1, require('uuid').str()}") # Update of tail is OK server.admin("box.space._cluster:update(1, {{'=', 3, 'test'}})") print '-------------------------------------------------------------' print 'gh-1140: Assertion if replace _cluster tuple for remote server' print '-------------------------------------------------------------' # Test that insert is OK new_uuid = '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95' server.admin("box.space._cluster:insert{{5, '{0}'}}".format(new_uuid)) server.admin("box.info.vclock[5] == nil") # Replace with the same UUID is OK server.admin("box.space._cluster:replace{{5, '{0}'}}".format(new_uuid)) # Replace with a new UUID is not OK new_uuid = 'a48a19a3-26c0-4f8c-a5b5-77377bab389b' server.admin("box.space._cluster:replace{{5, '{0}'}}".format(new_uuid)) # Update of tail is OK server.admin("box.space._cluster:update(5, {{'=', 3, 'test'}})") # Delete is OK server.admin("box.space._cluster:delete(5)") # gh-1219: LSN must not be removed from vclock on unregister server.admin("box.info.vclock[5] == nil") # Cleanup server.stop() server.deploy() print '-------------------------------------------------------------' print 'Start a new replica and check box.info on the start' print '-------------------------------------------------------------' # master server master = server master_id = master.get_param('id') master.admin("box.schema.user.grant('guest', 'replication')") replica = TarantoolServer(server.ini) replica.script = 'replication-py/replica.lua' replica.vardir = server.vardir replica.rpl_master = master replica.deploy() replica_id = replica.get_param('id') replica_uuid = replica.get_param('uuid') sys.stdout.push_filter(replica_uuid, '') replica.admin('box.info.id == %d' % replica_id) replica.admin('not box.info.ro') replica.admin('box.info.lsn == 0') replica.admin('box.info.vclock[%d] == nil' % replica_id) print '-------------------------------------------------------------' print 'Modify data to bump LSN and check box.info' print '-------------------------------------------------------------' replica.admin('box.space._schema:insert{"test", 48}') replica.admin('box.info.lsn == 1') replica.admin('box.info.vclock[%d] == 1' % replica_id) print '-------------------------------------------------------------' print 'Connect master to replica' print '-------------------------------------------------------------' replication_source = yaml.load(replica.admin('box.cfg.listen', silent = True))[0] sys.stdout.push_filter(replication_source, '') master.admin("box.cfg{ replication_source = '%s' }" % replication_source) master.wait_lsn(replica_id, replica.get_lsn(replica_id)) print '-------------------------------------------------------------' print 'Disconnect replica from master' print '-------------------------------------------------------------' replica.admin('box.cfg { replication_source = "" }') print '-------------------------------------------------------------' print 'Unregister replica' print '-------------------------------------------------------------' master.admin('box.space._cluster:delete{%d} ~= nil' % replica_id) # gh-1219: LSN must not be removed from vclock on unregister master.admin('box.info.vclock[%d] == 1' % replica_id) print '-------------------------------------------------------------' print 'Modify data to bump LSN on replica' print '-------------------------------------------------------------' replica.admin('box.space._schema:insert{"tost", 49}') replica.admin('box.info.lsn == 2') replica.admin('box.info.vclock[%d] == 2' % replica_id) print '-------------------------------------------------------------' print 'Master must not crash then receives orphan rows from replica' print '-------------------------------------------------------------' replication_source = yaml.load(replica.admin('box.cfg.listen', silent = True))[0] sys.stdout.push_filter(replication_source, '') master.admin("box.cfg{ replication = '%s' }" % replication_source) master.wait_lsn(replica_id, replica.get_lsn(replica_id)) master.admin('box.info.vclock[%d] == 2' % replica_id) master.admin("box.cfg{ replication = '' }") replica.stop() replica.cleanup(True) print '-------------------------------------------------------------' print 'Start a new replica and check that server_id, LSN is re-used' print '-------------------------------------------------------------' # # gh-1219: Proper removal of servers with non-zero LSN from _cluster # # Snapshot is required. Otherwise a relay will skip records made by previous # replica with the re-used id. master.admin("box.snapshot()") master.admin('box.info.vclock[%d] == 2' % replica_id) replica = TarantoolServer(server.ini) replica.script = 'replication-py/replica.lua' replica.vardir = server.vardir replica.rpl_master = master replica.deploy() replica.wait_lsn(master_id, master.get_lsn(master_id)) # Check that replica_id was re-used replica.admin('box.info.id == %d' % replica_id) replica.admin('not box.info.ro') # All records were succesfully recovered. # Replica should have the same vclock as master. master.admin('box.info.vclock[%d] == 2' % replica_id) replica.admin('box.info.vclock[%d] == 2' % replica_id) replica.stop() replica.cleanup(True) print '-------------------------------------------------------------' print 'JOIN replica to read-only master' print '-------------------------------------------------------------' # master server master = server master.admin('box.cfg { read_only = true }') #gh-1230 Assertion vclock_has on attempt to JOIN read-only master failed = TarantoolServer(server.ini) failed.script = 'replication-py/failed.lua' failed.vardir = server.vardir failed.rpl_master = master failed.name = "failed" failed.crash_expected = True try: failed.deploy() except Exception as e: line = "ER_READONLY" if failed.logfile_pos.seek_once(line) >= 0: print "'%s' exists in server log" % line master.admin('box.cfg { read_only = false }') print '-------------------------------------------------------------' print 'JOIN replica with different replica set UUID' print '-------------------------------------------------------------' failed = TarantoolServer(server.ini) failed.script = 'replication-py/uuid_mismatch.lua' failed.vardir = server.vardir failed.rpl_master = master failed.name = "uuid_mismatch" failed.crash_expected = True try: failed.deploy() except Exception as e: line = "ER_REPLICASET_UUID_MISMATCH" if failed.logfile_pos.seek_once(line) >= 0: print "'%s' exists in server log" % line failed.cleanup() print '-------------------------------------------------------------' print 'Cleanup' print '-------------------------------------------------------------' # Cleanup sys.stdout.pop_filter() master.admin("box.schema.user.revoke('guest', 'replication')") tarantool_1.9.1.26.g63eb81e3c/test/replication-py/multi.test.py0000664000000000000000000000625213306560010022605 0ustar rootrootimport sys import os from lib.tarantool_server import TarantoolServer import yaml REPLICA_N = 3 ROW_N = REPLICA_N * 20 ## # master server master = server master.admin("fiber = require('fiber')") master.admin("box.schema.user.grant('guest', 'replication')") master.admin("box.schema.user.grant('guest', 'execute', 'universe')") print '----------------------------------------------------------------------' print 'Bootstrap replicas' print '----------------------------------------------------------------------' # Start replicas master.id = master.get_param('id') cluster = [ master ] for i in range(REPLICA_N - 1): server = TarantoolServer(server.ini) server.script = 'replication-py/replica.lua' server.vardir = os.path.join(server.vardir, 'replica', str(master.id + i)) server.rpl_master = master server.deploy() # Wait replica to fully bootstrap. # Otherwise can get ACCESS_DENIED error. cluster.append(server) # Make a list of servers sources = [] for server in cluster: sources.append(yaml.load(server.admin('box.cfg.listen', silent = True))[0]) server.id = server.get_param('id') print 'done' print '----------------------------------------------------------------------' print 'Make a full mesh' print '----------------------------------------------------------------------' # Connect each server to each other to make full mesh for server in cluster: server.iproto.py_con.eval("box.cfg { replication = ... }", [sources]) # Wait connections to establish for server in cluster: for server2 in cluster: server.iproto.py_con.eval(""" while #box.info.vclock[...] ~= nil do fiber.sleep(0.01) end;""", server2.id) print 'server', server.id, "connected" server.admin("box.info.vclock") print 'done' print '----------------------------------------------------------------------' print 'Test inserts' print '----------------------------------------------------------------------' print 'Create a test space' master.admin("_ = box.schema.space.create('test')") master.admin("_ = box.space.test:create_index('primary')") master_lsn = master.get_lsn(master.id) # Wait changes to propagate to replicas for server in cluster: server.wait_lsn(master.id, master_lsn) print 'server', server.id, 'is ok' print print 'Insert records' for i in range(ROW_N): server = cluster[i % REPLICA_N] server.admin("box.space.test:insert{%d, %s}" % (i, server.id), silent = True) print 'inserted %d records' % ROW_N print print 'Synchronize' for server1 in cluster: for server2 in cluster: server1.wait_lsn(server2.id, server2.get_lsn(server2.id)) print 'server', server.id, 'done' print 'done' print print 'Check data' for server in cluster: cnt = yaml.load(server.admin("box.space.test:len()", silent = True))[0] print 'server', server.id, 'is', cnt == ROW_N and 'ok' or 'not ok' print 'Done' print print print '----------------------------------------------------------------------' print 'Cleanup' print '----------------------------------------------------------------------' for server in cluster: server.stop() print 'server', server.id, 'done' print master.cleanup() master.deploy() tarantool_1.9.1.26.g63eb81e3c/test/replication-py/master.lua0000664000000000000000000000027413306560010022117 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg({ listen = os.getenv("LISTEN"), memtx_memory = 107374182, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication-py/init_storage.result0000664000000000000000000000345313306560010024052 0ustar rootrootbox.schema.user.grant('guest', 'replication') --- ... ------------------------------------------------------------- gh-484: JOIN doesn't save data to snapshot with TREE index ------------------------------------------------------------- space = box.schema.space.create('test', {id = 42}) --- ... index = space:create_index('primary', { type = 'tree'}) --- ... for k = 1, 9 do space:insert{k, k*k} end --- ... box.space.test:select() --- - - [1, 1] - [2, 4] - [3, 9] - [4, 16] - [5, 25] - [6, 36] - [7, 49] - [8, 64] - [9, 81] ... box.space.test:select() --- - - [1, 1] - [2, 4] - [3, 9] - [4, 16] - [5, 25] - [6, 36] - [7, 49] - [8, 64] - [9, 81] ... ------------------------------------------------------------- replica test 2 (must be ok) ------------------------------------------------------------- for k = 10, 19 do box.space[42]:insert{k, k*k*k} end --- ... for k = 20, 29 do box.space[42]:upsert({k}, {}) end --- ... space = box.space.test --- ... space:get{1} --- - [1, 1] ... space:get{2} --- - [2, 4] ... space:get{3} --- - [3, 9] ... space:get{4} --- - [4, 16] ... space:get{5} --- - [5, 25] ... space:get{6} --- - [6, 36] ... space:get{7} --- - [7, 49] ... space:get{8} --- - [8, 64] ... space:get{9} --- - [9, 81] ... space:get{10} --- - [10, 1000] ... space:get{11} --- - [11, 1331] ... space:get{12} --- - [12, 1728] ... space:get{13} --- - [13, 2197] ... space:get{14} --- - [14, 2744] ... space:get{15} --- - [15, 3375] ... space:get{16} --- - [16, 4096] ... space:get{17} --- - [17, 4913] ... space:get{18} --- - [18, 5832] ... space:get{19} --- - [19, 6859] ... ------------------------------------------------------------- reconnect on JOIN/SUBSCRIBE ------------------------------------------------------------- waiting reconnect on JOIN... ok waiting reconnect on SUBSCRIBE... ok tarantool_1.9.1.26.g63eb81e3c/test/replication-py/uuid_mismatch.lua0000664000000000000000000000036613306560010023461 0ustar rootroot#!/usr/bin/env tarantool box.cfg({ listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), replicaset_uuid = "12345678-abcd-abcd-abcd-123456789000", }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/replication-py/cluster.result0000664000000000000000000001256213306560010023045 0ustar rootrootok - cluster uuid ------------------------------------------------------------- gh-696: Check global READ permissions for replication ------------------------------------------------------------- ok - join without read permissions on universe ok - subscribe without read permissions on universe box.schema.user.grant('guest', 'read', 'universe') --- ... ok - join without write permissions to _cluster box.schema.user.grant('guest', 'write', 'space', '_cluster') --- ... ok - join with granted permissions box.schema.user.revoke('guest', 'read', 'universe') --- ... box.schema.user.revoke('guest', 'write', 'space', '_cluster') --- ... box.schema.user.grant('guest', 'replication') --- ... ok - join with granted role ------------------------------------------------------------- gh-707: Master crashes on JOIN if it does not have snapshot files gh-480: If socket is closed while JOIN, replica wont reconnect ------------------------------------------------------------- ok - join without snapshots ok - _cluster did not change after unsuccessful JOIN box.schema.user.revoke('guest', 'replication') --- ... box.snapshot() --- - ok ... ------------------------------------------------------------- gh-434: Assertion if replace _cluster tuple for local server ------------------------------------------------------------- box.space._cluster:replace{1, require('uuid').NULL:str()} --- - error: The local instance id 1 is read-only ... box.space._cluster:replace{1, require('uuid').str()} --- - error: The local instance id 1 is read-only ... box.space._cluster:update(1, {{'=', 3, 'test'}}) --- - error: The local instance id 1 is read-only ... ------------------------------------------------------------- gh-1140: Assertion if replace _cluster tuple for remote server ------------------------------------------------------------- box.space._cluster:insert{5, '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95'} --- - [5, '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95'] ... box.info.vclock[5] == nil --- - true ... box.space._cluster:replace{5, '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95'} --- - [5, '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95'] ... box.space._cluster:replace{5, 'a48a19a3-26c0-4f8c-a5b5-77377bab389b'} --- - error: Space _cluster does not support updates of instance uuid ... box.space._cluster:update(5, {{'=', 3, 'test'}}) --- - [5, '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95', 'test'] ... box.space._cluster:delete(5) --- - [5, '0d5bd431-7f3e-4695-a5c2-82de0a9cbc95', 'test'] ... box.info.vclock[5] == nil --- - true ... ------------------------------------------------------------- Start a new replica and check box.info on the start ------------------------------------------------------------- box.schema.user.grant('guest', 'replication') --- ... box.info.id == 2 --- - true ... not box.info.ro --- - true ... box.info.lsn == 0 --- - true ... box.info.vclock[2] == nil --- - true ... ------------------------------------------------------------- Modify data to bump LSN and check box.info ------------------------------------------------------------- box.space._schema:insert{"test", 48} --- - ['test', 48] ... box.info.lsn == 1 --- - true ... box.info.vclock[2] == 1 --- - true ... ------------------------------------------------------------- Connect master to replica ------------------------------------------------------------- box.cfg{ replication_source = '' } --- ... ------------------------------------------------------------- Disconnect replica from master ------------------------------------------------------------- box.cfg { replication_source = "" } --- ... ------------------------------------------------------------- Unregister replica ------------------------------------------------------------- box.space._cluster:delete{2} ~= nil --- - true ... box.info.vclock[2] == 1 --- - true ... ------------------------------------------------------------- Modify data to bump LSN on replica ------------------------------------------------------------- box.space._schema:insert{"tost", 49} --- - ['tost', 49] ... box.info.lsn == 2 --- - true ... box.info.vclock[2] == 2 --- - true ... ------------------------------------------------------------- Master must not crash then receives orphan rows from replica ------------------------------------------------------------- box.cfg{ replication = '' } --- ... box.info.vclock[2] == 2 --- - true ... box.cfg{ replication = '' } --- ... ------------------------------------------------------------- Start a new replica and check that server_id, LSN is re-used ------------------------------------------------------------- box.snapshot() --- - ok ... box.info.vclock[2] == 2 --- - true ... box.info.id == 2 --- - true ... not box.info.ro --- - true ... box.info.vclock[2] == 2 --- - true ... box.info.vclock[2] == 2 --- - true ... ------------------------------------------------------------- JOIN replica to read-only master ------------------------------------------------------------- box.cfg { read_only = true } --- ... 'ER_READONLY' exists in server log box.cfg { read_only = false } --- ... ------------------------------------------------------------- JOIN replica with different replica set UUID ------------------------------------------------------------- 'ER_REPLICASET_UUID_MISMATCH' exists in server log ------------------------------------------------------------- Cleanup ------------------------------------------------------------- box.schema.user.revoke('guest', 'replication') --- ... tarantool_1.9.1.26.g63eb81e3c/test/.tarantoolctl0000664000000000000000000000045213306560010017667 0ustar rootroot-- Options for test-run tarantoolctl local workdir = os.getenv('TEST_WORKDIR') default_cfg = { pid_file = workdir, wal_dir = workdir, memtx_dir = workdir, vinyl_dir = workdir, log = workdir, background = false, } instance_dir = workdir -- vim: set ft=lua : tarantool_1.9.1.26.g63eb81e3c/test/box/0000775000000000000000000000000013306565107015763 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/box/tuple_bench.test.lua0000664000000000000000000000164313306560010021724 0ustar rootrootbuild_path = os.getenv("BUILDDIR") package.cpath = build_path..'/test/box/?.so;'..build_path..'/test/box/?.dylib;'..package.cpath net = require('net.box') c = net:new(os.getenv("LISTEN")) box.schema.func.create('tuple_bench', {language = "C"}) box.schema.user.grant('guest', 'execute', 'function', 'tuple_bench') space = box.schema.space.create('tester') key_parts = {1, 'unsigned', 2, 'string'} _ = space:create_index('primary', {type = 'TREE', parts = key_parts}) box.schema.user.grant('guest', 'read,write', 'space', 'tester') box.space.tester:insert({1, "abc", 100}) box.space.tester:insert({2, "bcd", 200}) box.space.tester:insert({3, "ccd", 200}) prof = require('gperftools.cpu') prof.start('tuple.prof') key_types = {} for i = 1, #key_parts, 2 do table.insert(key_types, key_parts[i + 1]) end c:call('tuple_bench', key_types) prof.flush() prof.stop() box.schema.func.drop("tuple_bench") box.space.tester:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/function1.test.lua0000664000000000000000000000735713306560010021352 0ustar rootrootbuild_path = os.getenv("BUILDDIR") package.cpath = build_path..'/test/box/?.so;'..build_path..'/test/box/?.dylib;'..package.cpath log = require('log') net = require('net.box') c = net.connect(os.getenv("LISTEN")) box.schema.func.create('function1', {language = "C"}) box.schema.user.grant('guest', 'execute', 'function', 'function1') _ = box.schema.space.create('test') _ = box.space.test:create_index('primary') box.schema.user.grant('guest', 'read,write', 'space', 'test') c:call('function1') box.schema.func.drop("function1") box.schema.func.create('function1.args', {language = "C"}) box.schema.user.grant('guest', 'execute', 'function', 'function1.args') c:call('function1.args') c:call('function1.args', { "xx" }) c:call('function1.args', { 15 }) box.schema.func.drop("function1.args") box.schema.func.create('function1.multi_inc', {language = "C"}) box.schema.user.grant('guest', 'execute', 'function', 'function1.multi_inc') c:call('function1.multi_inc') box.space.test:select{} c:call('function1.multi_inc', { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }) box.space.test:select{} c:call('function1.multi_inc', { 2, 4, 6, 8, 10 }) box.space.test:select{} c:call('function1.multi_inc', { 0, 2, 4 }) box.space.test:select{} box.schema.func.drop("function1.multi_inc") box.schema.func.create('function1.errors', {language = "C"}) box.schema.user.grant('guest', 'execute', 'function', 'function1.errors') c:call('function1.errors') box.schema.func.drop("function1.errors") box.schema.func.create('xxx', {language = 'invalid'}) -- language normalization function func_lang(name) return (box.space._func.index[2]:select{name}[1] or {})[5] end box.schema.func.create('f11'), func_lang('f11') box.schema.func.create('f12', {language = 'Lua'}), func_lang('f12') box.schema.func.create('f13', {language = 'lua'}), func_lang('f13') box.schema.func.create('f14', {language = 'lUa'}), func_lang('f14') box.schema.func.create('f15', {language = 'c'}), func_lang('f15') box.schema.func.create('f16', {language = 'C'}), func_lang('f16') box.schema.func.drop("f11") box.schema.func.drop("f12") box.schema.func.drop("f13") box.schema.func.drop("f14") box.schema.func.drop("f15") box.schema.func.drop("f16") box.space.test:drop() -- Missing shared library name = 'unkownmod.unknownfunc' box.schema.func.create(name, {language = 'C'}) box.schema.user.grant('guest', 'execute', 'function', name) c:call(name) box.schema.func.drop(name) -- Drop function while executing gh-910 box.schema.func.create('function1.test_yield', {language = "C"}) box.schema.user.grant('guest', 'execute', 'function', 'function1.test_yield') s = box.schema.space.create('test_yield') _ = s:create_index('pk') box.schema.user.grant('guest', 'read,write', 'space', 'test_yield') fiber = require('fiber') ch = fiber.channel(1) _ = fiber.create(function() c:call('function1.test_yield') ch:put(true) end) while s:get({1}) == nil do fiber.yield(0.0001) end box.schema.func.drop('function1.test_yield') ch:get() s:drop() -- gh-2914: check identifier constraints. test_run = require('test_run').new() identifier = require("identifier") test_run:cmd("setopt delimiter ';'") -- -- '.' in func name is used to point out path therefore '.' in name -- itself is prohibited -- -- identifier.run_test( function (identifier) if identifier == "." then return end box.schema.func.create(identifier, {language = "lua"}) box.schema.user.grant('guest', 'execute', 'function', identifier) rawset(_G, identifier, function () return 1 end) local res = pcall(c.call, c, identifier) if c:call(identifier) ~= 1 then error("Should not fire") end rawset(_G, identifier, nil) end, function (identifier) if identifier == "." then return end box.schema.func.drop(identifier) end ); test_run:cmd("setopt delimiter ''"); c:close() tarantool_1.9.1.26.g63eb81e3c/test/box/reconfigure.result0000664000000000000000000000356513306565107021544 0ustar rootroottoo_long_threshold_default = box.cfg.too_long_threshold --- ... io_collect_interval_default = box.cfg.io_collect_interval --- ... box.cfg.too_long_threshold --- - 0.5 ... -- good box.cfg{too_long_threshold=0.2} --- ... box.cfg.too_long_threshold --- - 0.2 ... -- good box.cfg{snap_io_rate_limit=10} --- ... box.cfg.snap_io_rate_limit --- - 10 ... box.cfg.io_collect_interval --- - null ... box.cfg{io_collect_interval=0.001} --- ... box.cfg.io_collect_interval --- - 0.001 ... -- A test case for http://bugs.launchpad.net/bugs/712447: -- Valgrind reports use of not initialized memory after 'reload -- configuration' -- space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary') --- ... space:insert{1, 'tuple'} --- - [1, 'tuple'] ... box.snapshot() --- - ok ... box.cfg{} --- ... space:insert{2, 'tuple2'} --- - [2, 'tuple2'] ... box.snapshot() --- - ok ... space:insert{3, 'tuple3'} --- - [3, 'tuple3'] ... box.snapshot() --- - ok ... -- A test case for https://github.com/tarantool/tarantool/issues/112: -- Tarantool crashes with SIGSEGV during reload configuration -- -- log level box.cfg{log_level=5} --- ... -- constants box.cfg{wal_dir="dynamic"} --- - error: Can't set option 'wal_dir' dynamically ... box.cfg{memtx_dir="dynamic"} --- - error: Can't set option 'memtx_dir' dynamically ... box.cfg{log="new logger"} --- - error: Can't set option 'log' dynamically ... -- bad1 box.cfg{memtx_memory=53687091} --- - error: Can't set option 'memtx_memory' dynamically ... box.cfg.memtx_memory --- - 107374182 ... space:drop() --- ... box.cfg{snap_io_rate_limit=0} --- ... box.cfg{io_collect_interval=0} --- ... box.cfg{too_long_threshold=0.5} --- ... box.cfg.snap_io_rate_limit = nil --- ... box.cfg.io_collect_interval = nil --- ... box.cfg { too_long_threshold = too_long_threshold_default } --- ... box.cfg { io_collect_interval = io_collect_interval_default } --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/crossjoin.result0000664000000000000000000000337213306560010021225 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'tree' }) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function crossjoin(space0, space1, limit) local result = {} for _,v0 in space0:pairs() do for _,v1 in space1:pairs() do if limit <= 0 then return result end local newtuple = v0:totable() for _, v in v1:pairs() do table.insert(newtuple, v) end table.insert(result, newtuple) limit = limit - 1 end end return result end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... crossjoin(space, space, 0) --- - [] ... crossjoin(space, space, 10000) --- - [] ... space:insert{1} --- - [1] ... crossjoin(space, space, 10000) --- - - [1, 1] ... space:insert{2} --- - [2] ... crossjoin(space, space, 10000) --- - - [1, 1] - [1, 2] - [2, 1] - [2, 2] ... space:insert{3, 'hello'} --- - [3, 'hello'] ... crossjoin(space, space, 10000) --- - - [1, 1] - [1, 2] - [1, 3, 'hello'] - [2, 1] - [2, 2] - [2, 3, 'hello'] - [3, 'hello', 1] - [3, 'hello', 2] - [3, 'hello', 3, 'hello'] ... space:insert{4, 'world'} --- - [4, 'world'] ... space[0]:insert{5, 'hello world'} --- - error: '[string "return space[0]:insert{5, ''hello world''} "]:1: attempt to index a nil value' ... crossjoin(space, space, 10000) --- - - [1, 1] - [1, 2] - [1, 3, 'hello'] - [1, 4, 'world'] - [2, 1] - [2, 2] - [2, 3, 'hello'] - [2, 4, 'world'] - [3, 'hello', 1] - [3, 'hello', 2] - [3, 'hello', 3, 'hello'] - [3, 'hello', 4, 'world'] - [4, 'world', 1] - [4, 'world', 2] - [4, 'world', 3, 'hello'] - [4, 'world', 4, 'world'] ... space:drop() --- ... crossjoin = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/access_sysview.test.lua0000664000000000000000000001574713306565107022514 0ustar rootrootsession = box.session -- -- Basic tests -- #box.space._vspace:select{} == #box.space._space:select{} #box.space._vindex:select{} == #box.space._index:select{} #box.space._vuser:select{} == #box.space._user:select{} #box.space._vpriv:select{} == #box.space._priv:select{} #box.space._vfunc:select{} == #box.space._func:select{} -- gh-1042: bad error message for _vspace, _vuser, _vindex, etc. -- Space '_vspace' (sysview) does not support replace box.space._vspace:replace({1, 1, 'test'}) box.space._vspace:delete(1) box.space._vspace:update(1, {{'=', 2, 48}}) -- error: Index 'primary' of space '_vspace' (sysview) does not support xxx() box.space._vspace.index.primary:len() box.space._vspace.index.primary:random(48) session.su('guest') -- -- _vspace + _vindex -- -- _vXXXX views are visible for 'public' role #box.space._vspace.index[2]:select('_vspace') ~= 0 #box.space._vspace.index[2]:select('_vindex') ~= 0 #box.space._vspace.index[2]:select('_vuser') ~= 0 #box.space._vspace.index[2]:select('_vfunc') ~= 0 #box.space._vspace.index[2]:select('_vpriv') ~= 0 #box.space._vindex:select(box.space._vspace.id) > 0 #box.space._vindex:select(box.space._vindex.id) > 0 #box.space._vindex:select(box.space._vuser.id) > 0 #box.space._vindex:select(box.space._vfunc.id) > 0 #box.space._vindex:select(box.space._vpriv.id) > 0 box.session.su('admin') box.schema.user.revoke('guest', 'public') box.session.su('guest') #box.space._vspace:select{} #box.space._vindex:select{} #box.space._vuser:select{} #box.space._vpriv:select{} #box.space._vfunc:select{} box.session.su('admin') box.schema.user.grant('guest', 'public') box.session.su('guest') #box.space._vspace:select{} #box.space._vindex:select{} box.session.su('admin') s = box.schema.space.create('test') s = box.space.test:create_index('primary') box.schema.role.grant('public', 'read', 'space', 'test') box.session.su('guest') box.space._vspace.index[2]:get('test') ~= nil #box.space._vindex:select(box.space.test.id) == 1 box.session.su('admin') box.schema.role.revoke('public', 'read', 'space', 'test') box.session.su('guest') box.space._vspace.index[2]:get('test') == nil #box.space._vindex:select(box.space.test.id) == 0 box.session.su('admin') box.schema.user.grant('guest', 'read', 'space', 'test') box.session.su('guest') box.space._vspace.index[2]:get('test') ~= nil #box.space._vindex:select(box.space.test.id) == 1 box.session.su('admin') box.schema.user.revoke('guest', 'read', 'space', 'test') box.session.su('guest') box.space._vspace.index[2]:get('test') == nil #box.space._vindex:select(box.space.test.id) == 0 -- check universe permissions box.session.su('admin') box.schema.user.grant('guest', 'read', 'universe') box.session.su('guest') #box.space._vspace:select{} #box.space._vindex:select{} #box.space._vuser:select{} #box.space._vpriv:select{} #box.space._vfunc:select{} box.session.su('admin') box.schema.user.revoke('guest', 'read', 'universe') box.schema.user.grant('guest', 'write', 'universe') box.session.su('guest') #box.space._vindex:select{} #box.space._vuser:select{} #box.space._vpriv:select{} #box.space._vfunc:select{} box.session.su('admin') box.schema.user.revoke('guest', 'write', 'universe') box.space.test:drop() box.session.su('guest') -- read access to original space also allow to read a view box.session.su('admin') space_cnt = #box.space._space:select{} index_cnt = #box.space._index:select{} box.schema.user.grant('guest', 'read', 'space', '_space') box.schema.user.grant('guest', 'read', 'space', '_index') box.session.su('guest') #box.space._vspace:select{} == space_cnt #box.space._vindex:select{} == index_cnt box.session.su('admin') box.schema.user.revoke('guest', 'read', 'space', '_space') box.schema.user.revoke('guest', 'read', 'space', '_index') box.session.su('guest') #box.space._vspace:select{} < space_cnt #box.space._vindex:select{} < index_cnt -- -- _vuser -- -- a guest user can read information about itself t = box.space._vuser:select(); return #t == 1 and t[1][3] == 'guest' -- read access to original space also allow to read a view box.session.su('admin') user_cnt = #box.space._user:select{} box.schema.user.grant('guest', 'read', 'space', '_user') box.session.su('guest') #box.space._vuser:select{} == user_cnt box.session.su('admin') box.schema.user.revoke('guest', 'read', 'space', '_user') box.session.su('guest') #box.space._vuser:select{} < user_cnt box.session.su('admin') box.schema.user.grant('guest', 'read,write,create', 'universe') box.session.su('guest') box.schema.user.create('tester') box.session.su('admin') box.schema.user.revoke('guest', 'read,write,create', 'universe') box.session.su('guest') #box.space._vuser.index[2]:select('tester') > 0 box.session.su('admin') box.schema.user.drop('tester') box.session.su('guest') -- -- _vpriv -- -- a guest user can see granted 'public' role box.space._vpriv.index[2]:select('role')[1][2] == session.uid() -- read access to original space also allow to read a view box.session.su('admin') box.schema.user.grant('guest', 'read', 'space', '_priv') priv_cnt = #box.space._priv:select{} box.session.su('guest') #box.space._vpriv:select{} == priv_cnt box.session.su('admin') box.schema.user.revoke('guest', 'read', 'space', '_priv') box.session.su('guest') cnt = #box.space._vpriv:select{} cnt < priv_cnt box.session.su('admin') box.schema.user.grant('guest', 'read,write', 'space', '_schema') box.session.su('guest') #box.space._vpriv:select{} == cnt + 1 box.session.su('admin') box.schema.user.revoke('guest', 'read,write', 'space', '_schema') box.session.su('guest') #box.space._vpriv:select{} == cnt -- -- _vfunc -- box.session.su('admin') box.schema.func.create('test') -- read access to original space also allow to read a view func_cnt = #box.space._func:select{} box.schema.user.grant('guest', 'read', 'space', '_func') box.session.su('guest') #box.space._vfunc:select{} == func_cnt box.session.su('admin') box.schema.user.revoke('guest', 'read', 'space', '_func') box.session.su('guest') cnt = #box.space._vfunc:select{} cnt < func_cnt box.session.su('admin') box.schema.user.grant('guest', 'execute', 'function', 'test') box.session.su('guest') #box.space._vfunc:select{} = cnt + 1 box.session.su('admin') box.schema.user.revoke('guest', 'execute', 'function', 'test') box.session.su('guest') #box.space._vfunc:select{} == cnt box.session.su('admin') box.schema.user.grant('guest', 'execute', 'universe') box.session.su('guest') #box.space._vfunc:select{} == cnt + 1 box.session.su('admin') box.schema.user.revoke('guest', 'execute', 'universe') box.schema.func.drop('test') box.session.su('guest') #box.space._vfunc:select{} == cnt -- -- view:alter() tests -- session.su('admin') box.space._vspace.index[1]:alter({parts = { 2, 'string' }}) box.space._vspace.index[1]:select('xxx') box.space._vspace.index[1]:select(1) box.space._vspace.index[1]:alter({parts = { 2, 'unsigned' }}) box.space._space.index[1]:drop() box.space._vspace.index[1]:select(1) s = box.space._space:create_index('owner', {parts = { 2, 'unsigned' }, id = 1, unique = false}) #box.space._vspace.index[1]:select(1) > 0 session = nil tarantool_1.9.1.26.g63eb81e3c/test/box/reload1.c0000664000000000000000000000454113306560010017446 0ustar rootroot#include "module.h" #include #include int foo(box_function_ctx_t *ctx, const char *args, const char *args_end) { static const char *SPACE_TEST_NAME = "test"; static const char *INDEX_NAME = "primary"; uint32_t space_test_id = box_space_id_by_name(SPACE_TEST_NAME, strlen(SPACE_TEST_NAME)); uint32_t index_id = box_index_id_by_name(space_test_id, INDEX_NAME, strlen(INDEX_NAME)); if (space_test_id == BOX_ID_NIL || index_id == BOX_ID_NIL) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Can't find index %s in space %s", INDEX_NAME, SPACE_TEST_NAME); } mp_decode_array(&args); uint32_t num = mp_decode_uint(&args); char buf[16]; char *end = buf; end = mp_encode_array(end, 1); end = mp_encode_uint(end, num); if (box_insert(space_test_id, buf, end, NULL) < 0) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Can't insert in space %s", SPACE_TEST_NAME); } end = buf; end = mp_encode_array(end, 1); end = mp_encode_uint(end, 0); while (box_index_count(space_test_id, index_id, ITER_EQ, buf, end) <= 0) { fiber_sleep(0.001); } end = buf; end = mp_encode_array(end, 1); end = mp_encode_int(end, -((int)num)); if (box_insert(space_test_id, buf, end, NULL) < 0) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Can't insert in space %s", SPACE_TEST_NAME); } return 0; } int test_reload(box_function_ctx_t *ctx, const char *args, const char *args_end) { static const char *SPACE_NAME = "test_reload"; uint32_t space_id = box_space_id_by_name(SPACE_NAME, strlen(SPACE_NAME)); if (space_id == BOX_ID_NIL) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Can't find space %s", SPACE_NAME); } assert(!box_txn()); box_txn_begin(); assert(box_txn()); /* Replace value */ char tuple_buf[16]; char *tuple_end = tuple_buf; tuple_end = mp_encode_array(tuple_end, 2); tuple_end = mp_encode_uint(tuple_end, 1); tuple_end = mp_encode_uint(tuple_end, 2); /* counter */ assert(tuple_end <= tuple_buf + sizeof(tuple_buf)); if (box_replace(space_id, tuple_buf, tuple_end, NULL) != 0) return -1; box_txn_commit(); assert(!box_txn()); fiber_sleep(0.001); tuple_end = tuple_buf; tuple_end = mp_encode_array(tuple_end, 1); tuple_end = mp_encode_uint(tuple_end, 1); struct tuple *tuple = box_tuple_new(box_tuple_format_default(), tuple_buf, tuple_end); return box_return_tuple(ctx, tuple); } tarantool_1.9.1.26.g63eb81e3c/test/box/iterator.test.lua0000664000000000000000000001113013306560010021255 0ustar rootrootiterate = dofile('utils.lua').iterate test_run = require('test_run').new() test_run:cmd("push filter '(error: .builtin/.*[.]lua):[0-9]+' to '\\1'") # Tree single-part unique space = box.schema.space.create('tweedledum') idx1 = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true}) -- Hash single-part unique idx5 = space:create_index('i4', { type = 'hash', parts = {1, 'string'}, unique = true}) -- Hash multi-part unique idx6 = space:create_index('i5', { type = 'hash', parts = {2, 'string', 3, 'string'}, unique = true}) space:insert{'pid_001', 'sid_001', 'tid_998', 'a'} space:insert{'pid_002', 'sid_001', 'tid_997', 'a'} space:insert{'pid_003', 'sid_002', 'tid_997', 'b'} space:insert{'pid_005', 'sid_002', 'tid_996', 'b'} space:insert{'pid_007', 'sid_003', 'tid_996', 'a'} space:insert{'pid_011', 'sid_004', 'tid_996', 'c'} space:insert{'pid_013', 'sid_005', 'tid_996', 'b'} space:insert{'pid_017', 'sid_006', 'tid_996', 'a'} space:insert{'pid_019', 'sid_005', 'tid_995', 'a'} space:insert{'pid_023', 'sid_005', 'tid_994', 'a'} ------------------------------------------------------------------------------- -- Iterator: hash single-part unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i4', 0, 1) iterate('tweedledum', 'i4', 0, 1, box.index.ALL) iterate('tweedledum', 'i4', 0, 1, box.index.EQ) iterate('tweedledum', 'i4', 0, 1, box.index.EQ, 'pid_003') iterate('tweedledum', 'i4', 0, 1, box.index.EQ, 'pid_666') ------------------------------------------------------------------------------- -- Iterator: hash multi-part unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i5', 1, 3, box.index.ALL) iterate('tweedledum', 'i5', 1, 3, box.index.EQ, 'sid_005') iterate('tweedledum', 'i5', 1, 3, box.index.EQ, 'sid_005', 'tid_995') iterate('tweedledum', 'i5', 1, 3, box.index.EQ, 'sid_005', 'tid_999') iterate('tweedledum', 'i5', 1, 3, box.index.EQ, 'sid_005', 'tid_995', 'a') space:drop() ------------------------------------------------------------------------------- -- Iterator: https://github.com/tarantool/tarantool/issues/464 -- Iterator safety after changing schema ------------------------------------------------------------------------------- space = box.schema.space.create('test', {temporary=true}) idx1 = space:create_index('primary', {type='HASH',unique=true}) idx2 = space:create_index('t1', {type='TREE',unique=true}) idx3 = space:create_index('t2', {type='TREE',unique=true}) box.space.test:insert{0} box.space.test:insert{1} gen1, param1, state1 = space.index.t1:pairs({}, {iterator = box.index.ALL}) gen1(param1, state1) gen2, param2, state2 = space.index.t2:pairs({}, {iterator = box.index.ALL}) gen2(param2, state2) id = space.index.t1.id box.schema.index.drop(space.id, id) gen1(param1, state1) gen2(param2, state2) gen2, param2, state2 = space.index.t2:pairs({}, {iterator = box.index.ALL}) gen2(param2, state2) gen2(param2, state2) space:drop() ------------------------------------------------------------------------------- -- Iterator: https://github.com/tarantool/tarantool/issues/498 -- Iterator is not checked for wrong type; accept lowercase iterator ------------------------------------------------------------------------------- space = box.schema.space.create('test', {temporary=true}) idx1 = space:create_index('primary', {type='TREE',unique=true}) space:insert{0} space:insert{1} gen, param, state = space.index.primary:pairs({}, {iterator = 'ALL'}) gen(param, state) gen(param, state) gen(param, state) gen, param, state = space.index.primary:pairs({}, {iterator = 'all'}) gen(param, state) gen(param, state) gen, param, state = space.index.primary:pairs({}, {iterator = 'mistake'}) space:select({}, {iterator = box.index.ALL}) space:select({}, {iterator = 'all'}) space:select({}, {iterator = 'mistake'}) space:drop() ------------------------------------------------------------------------------- -- Restore GE iterator for HASH https://github.com/tarantool/tarantool/issues/836 ------------------------------------------------------------------------------- space = box.schema.space.create('test', {temporary=true}) idx1 = space:create_index('primary', {type='hash',unique=true}) for i = 0,5 do space:insert{i} end space:select(2) space:select(5, {iterator="GE"}) space:select(nil, {iterator="GE"}) space:select(5, {iterator="GT"}) l = space:select(nil, {limit=2, iterator="GT"}) l l = space:select(l[#l][1], {limit=2, iterator="GT"}) l l = space:select(l[#l][1], {limit=2, iterator="GT"}) l l = space:select(l[#l][1], {limit=2, iterator="GT"}) l space:drop() iterate = nil tarantool_1.9.1.26.g63eb81e3c/test/box/suite.ini0000664000000000000000000000067013306560010017604 0ustar rootroot[default] core = tarantool description = Database tests script = box.lua disabled = rtree_errinj.test.lua tuple_bench.test.lua release_disabled = errinj.test.lua errinj_index.test.lua rtree_errinj.test.lua upsert_errinj.test.lua iproto_stress.test.lua lua_libs = lua/fifo.lua lua/utils.lua lua/bitset.lua lua/index_random_test.lua lua/push.lua lua/identifier.lua use_unix_sockets = True long_run = iproto_stress.test.lua is_parallel = True tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_array.result0000664000000000000000000001324013306560010021526 0ustar rootroots = box.schema.space.create('spatial') --- ... _ = s:create_index('primary') --- ... spatial = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) --- ... spatial.type --- - RTREE ... s:insert{1,{0.0,0.0}} --- - [1, [0, 0]] ... s:insert{2,{0.0,10.0}} --- - [2, [0, 10]] ... s:insert{3,{0.0,50.0}} --- - [3, [0, 50]] ... s:insert{4,{10.0,0.0}} --- - [4, [10, 0]] ... s:insert{5,{50.0,0.0}} --- - [5, [50, 0]] ... s:insert{6,{10.0,10.0}} --- - [6, [10, 10]] ... s:insert{7,{10.0,50.0}} --- - [7, [10, 50]] ... s:insert{8,{50.0,10.0}} --- - [8, [50, 10]] ... s:insert{9,{50.0,50.0}} --- - [9, [50, 50]] ... -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) --- - - [1, [0, 0]] - [2, [0, 10]] - [3, [0, 50]] - [4, [10, 0]] - [5, [50, 0]] - [6, [10, 10]] - [7, [10, 50]] - [8, [50, 10]] - [9, [50, 50]] ... -- select records belonging to rectangle (0,0,10,10) s.index.spatial:select({0.0,0.0,10.0,10.0}, {iterator = 'LE'}) --- - - [1, [0, 0]] - [2, [0, 10]] - [4, [10, 0]] - [6, [10, 10]] ... -- select records with coordinates (10,10) s.index.spatial:select({10.0,10.0}, {iterator = 'EQ'}) --- - - [6, [10, 10]] ... -- select neighbors of point (5,5) s.index.spatial:select({5.0,5.0}, {iterator = 'NEIGHBOR'}) --- - - [1, [0, 0]] - [2, [0, 10]] - [4, [10, 0]] - [6, [10, 10]] - [3, [0, 50]] - [5, [50, 0]] - [7, [10, 50]] - [8, [50, 10]] - [9, [50, 50]] ... s:drop() --- ... s = box.schema.space.create('spatial') --- ... _ = s:create_index('primary') --- ... spatial = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}, dimension = 8}) --- ... spatial.type --- - RTREE ... s:insert{ 1,{0, 0, 0, 0, 0, 0, 0, 0}} --- - [1, [0, 0, 0, 0, 0, 0, 0, 0]] ... s:insert{ 2,{10, 0, 0, 0, 0, 0, 0, 0}} --- - [2, [10, 0, 0, 0, 0, 0, 0, 0]] ... s:insert{ 3,{0, 10, 0, 0, 0, 0, 0, 0}} --- - [3, [0, 10, 0, 0, 0, 0, 0, 0]] ... s:insert{ 4,{0, 0, 10, 0, 0, 0, 0, 0}} --- - [4, [0, 0, 10, 0, 0, 0, 0, 0]] ... s:insert{ 5,{0, 0, 0, 10, 0, 0, 0, 0}} --- - [5, [0, 0, 0, 10, 0, 0, 0, 0]] ... s:insert{ 6,{0, 0, 0, 0, 10, 0, 0, 0}} --- - [6, [0, 0, 0, 0, 10, 0, 0, 0]] ... s:insert{ 7,{0, 0, 0, 0, 0, 10, 0, 0}} --- - [7, [0, 0, 0, 0, 0, 10, 0, 0]] ... s:insert{ 8,{0, 0, 0, 0, 0, 0, 10, 0}} --- - [8, [0, 0, 0, 0, 0, 0, 10, 0]] ... s:insert{ 9,{0, 0, 0, 0, 0, 0, 0, 10}} --- - [9, [0, 0, 0, 0, 0, 0, 0, 10]] ... s:insert{10,{50, 0, 0, 0, 0, 0, 0, 0}} --- - [10, [50, 0, 0, 0, 0, 0, 0, 0]] ... s:insert{11,{0, 50, 0, 0, 0, 0, 0, 0}} --- - [11, [0, 50, 0, 0, 0, 0, 0, 0]] ... s:insert{12,{0, 0, 50, 0, 0, 0, 0, 0}} --- - [12, [0, 0, 50, 0, 0, 0, 0, 0]] ... s:insert{13,{0, 0, 0, 50, 0, 0, 0, 0}} --- - [13, [0, 0, 0, 50, 0, 0, 0, 0]] ... s:insert{14,{0, 0, 0, 0, 50, 0, 0, 0}} --- - [14, [0, 0, 0, 0, 50, 0, 0, 0]] ... s:insert{15,{0, 0, 0, 0, 0, 50, 0, 0}} --- - [15, [0, 0, 0, 0, 0, 50, 0, 0]] ... s:insert{16,{0, 0, 0, 0, 0, 0, 50, 0}} --- - [16, [0, 0, 0, 0, 0, 0, 50, 0]] ... s:insert{17,{0, 0, 0, 0, 0, 0, 0, 50}} --- - [17, [0, 0, 0, 0, 0, 0, 0, 50]] ... s:insert{18,{10, 10, 10, 10, 10, 10, 10, 10}} --- - [18, [10, 10, 10, 10, 10, 10, 10, 10]] ... s:insert{19,{10, 50, 10, 50, 10, 50, 10, 50}} --- - [19, [10, 50, 10, 50, 10, 50, 10, 50]] ... s:insert{20,{0, 10, 50, 0, 10, 50, 0, 10}} --- - [20, [0, 10, 50, 0, 10, 50, 0, 10]] ... p0 = {0, 0, 0, 0, 0, 0, 0, 0} --- ... p5 = {5, 5, 5, 5, 5, 5, 5, 5} --- ... p10 = {10, 10, 10, 10, 10, 10, 10, 10 } --- ... rt0_10 = {0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10 } --- ... -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) --- - - [1, [0, 0, 0, 0, 0, 0, 0, 0]] - [2, [10, 0, 0, 0, 0, 0, 0, 0]] - [3, [0, 10, 0, 0, 0, 0, 0, 0]] - [4, [0, 0, 10, 0, 0, 0, 0, 0]] - [5, [0, 0, 0, 10, 0, 0, 0, 0]] - [6, [0, 0, 0, 0, 10, 0, 0, 0]] - [7, [0, 0, 0, 0, 0, 10, 0, 0]] - [8, [0, 0, 0, 0, 0, 0, 10, 0]] - [9, [0, 0, 0, 0, 0, 0, 0, 10]] - [10, [50, 0, 0, 0, 0, 0, 0, 0]] - [11, [0, 50, 0, 0, 0, 0, 0, 0]] - [12, [0, 0, 50, 0, 0, 0, 0, 0]] - [13, [0, 0, 0, 50, 0, 0, 0, 0]] - [14, [0, 0, 0, 0, 50, 0, 0, 0]] - [15, [0, 0, 0, 0, 0, 50, 0, 0]] - [16, [0, 0, 0, 0, 0, 0, 50, 0]] - [17, [0, 0, 0, 0, 0, 0, 0, 50]] - [18, [10, 10, 10, 10, 10, 10, 10, 10]] - [19, [10, 50, 10, 50, 10, 50, 10, 50]] - [20, [0, 10, 50, 0, 10, 50, 0, 10]] ... -- select records belonging to rectangle (0,0,..10,10,..) s.index.spatial:select(rt0_10, {iterator = 'LE'}) --- - - [1, [0, 0, 0, 0, 0, 0, 0, 0]] - [2, [10, 0, 0, 0, 0, 0, 0, 0]] - [3, [0, 10, 0, 0, 0, 0, 0, 0]] - [4, [0, 0, 10, 0, 0, 0, 0, 0]] - [5, [0, 0, 0, 10, 0, 0, 0, 0]] - [6, [0, 0, 0, 0, 10, 0, 0, 0]] - [7, [0, 0, 0, 0, 0, 10, 0, 0]] - [8, [0, 0, 0, 0, 0, 0, 10, 0]] - [9, [0, 0, 0, 0, 0, 0, 0, 10]] - [18, [10, 10, 10, 10, 10, 10, 10, 10]] ... -- select records with coordinates (10,10) s.index.spatial:select(p10, {iterator = 'EQ'}) --- - - [18, [10, 10, 10, 10, 10, 10, 10, 10]] ... -- select neighbors of point (5,5) s.index.spatial:select(p5, {iterator = 'NEIGHBOR'}) --- - - [1, [0, 0, 0, 0, 0, 0, 0, 0]] - [2, [10, 0, 0, 0, 0, 0, 0, 0]] - [3, [0, 10, 0, 0, 0, 0, 0, 0]] - [4, [0, 0, 10, 0, 0, 0, 0, 0]] - [5, [0, 0, 0, 10, 0, 0, 0, 0]] - [6, [0, 0, 0, 0, 10, 0, 0, 0]] - [7, [0, 0, 0, 0, 0, 10, 0, 0]] - [8, [0, 0, 0, 0, 0, 0, 10, 0]] - [9, [0, 0, 0, 0, 0, 0, 0, 10]] - [18, [10, 10, 10, 10, 10, 10, 10, 10]] - [10, [50, 0, 0, 0, 0, 0, 0, 0]] - [11, [0, 50, 0, 0, 0, 0, 0, 0]] - [12, [0, 0, 50, 0, 0, 0, 0, 0]] - [13, [0, 0, 0, 50, 0, 0, 0, 0]] - [14, [0, 0, 0, 0, 50, 0, 0, 0]] - [15, [0, 0, 0, 0, 0, 50, 0, 0]] - [16, [0, 0, 0, 0, 0, 0, 50, 0]] - [17, [0, 0, 0, 0, 0, 0, 0, 50]] - [20, [0, 10, 50, 0, 10, 50, 0, 10]] - [19, [10, 50, 10, 50, 10, 50, 10, 50]] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/transaction.result0000664000000000000000000001663513306565107021563 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("setopt delimiter ';'") --- - true ... -- empty transaction - ok box.begin() box.commit(); --- ... -- double begin box.begin() box.begin(); --- - error: 'Operation is not permitted when there is an active transaction ' ... -- no active transaction since exception rolled it back box.commit(); --- ... -- double commit - implicit start of transaction box.begin() box.commit() box.commit(); --- ... -- commit if not started - implicit start of transaction box.commit(); --- ... -- rollback if not started - ok -- double rollback - ok box.rollback() box.begin() box.rollback() box.rollback(); --- ... -- rollback of an empty trans - ends transaction box.begin() box.rollback(); --- ... -- no current transaction - implicit begin box.commit(); --- ... fiber = require('fiber'); --- ... function sloppy() box.begin() end; --- ... f = fiber.create(sloppy); --- ... -- when the sloppy fiber ends, its session has an active transction -- ensure it's rolled back automatically while f:status() ~= 'dead' do fiber.sleep(0) end; --- ... -- transactions and system spaces box.begin() box.schema.space.create('test'); --- - error: Space _schema does not support multi-statement transactions ... box.rollback(); --- ... box.begin() box.schema.func.create('test'); --- - error: Space _func does not support multi-statement transactions ... box.rollback(); --- ... box.begin() box.schema.user.create('test'); --- - error: Space _user does not support multi-statement transactions ... box.rollback(); --- ... box.begin() box.schema.user.grant('guest', 'read', 'space', '_priv'); --- - error: Space _priv does not support multi-statement transactions ... box.rollback(); --- ... box.begin() box.space._schema:insert{'test'}; --- - error: Space _schema does not support multi-statement transactions ... box.rollback(); --- ... box.begin() box.space._cluster:insert{123456789, 'abc'}; --- - error: Space _cluster does not support multi-statement transactions ... box.rollback(); --- ... s = box.schema.space.create('test'); --- ... box.begin() index = s:create_index('primary'); --- - error: DDL does not support multi-statement transactions ... box.rollback(); --- ... index = s:create_index('primary'); --- ... t = nil function multi() box.begin() s:auto_increment{'first row'} s:auto_increment{'second row'} t = s:select{} box.commit() end; --- ... multi(); --- ... t; --- - - [1, 'first row'] - [2, 'second row'] ... s:select{}; --- - - [1, 'first row'] - [2, 'second row'] ... s:truncate(); --- ... function multi() box.begin() s:auto_increment{'first row'} s:auto_increment{'second row'} t = s:select{} box.rollback() end; --- ... multi(); --- ... t; --- - - [1, 'first row'] - [2, 'second row'] ... s:select{}; --- - [] ... function multi() box.begin() s:insert{1, 'first row'} pcall(s.insert, s, {1, 'duplicate'}) t = s:select{} box.commit() end; --- ... multi(); --- ... t; --- - - [1, 'first row'] ... s:select{}; --- - - [1, 'first row'] ... s:truncate(); --- ... -- -- Test that fiber yield causes a transaction rollback -- but only if the transaction has changed any data -- -- Test admin console box.begin(); --- ... -- should be ok - active transaction, and we don't -- know, maybe it will use vinyl engine, which -- may support yield() in the future, so we don't roll -- back a transction with no statements. box.commit(); --- ... box.begin() s:insert{1, 'Must be rolled back'}; --- ... -- nothing - the transaction was rolled back -- nothing to commit because of yield while s:get{1} ~= nil do fiber.sleep(0) end box.commit(); --- ... -- Test background fiber -- function sloppy() box.begin() s:insert{1, 'From background fiber'} end; --- ... f = fiber.create(sloppy); --- ... while f:status() == 'running' do fiber.sleep(0) end; --- ... -- When the sloppy fiber ends, its session has an active transction -- It's rolled back automatically s:select{}; --- - [] ... t = nil; --- ... function sloppy() box.begin() s:insert{1, 'From background fiber'} fiber.sleep(0) pcall(box.commit) t = s:select{} end; --- ... f = fiber.create(sloppy); --- ... while f:status() ~= 'dead' do fiber.sleep(0) end; --- ... t; --- - [] ... s:select{}; --- - [] ... s:drop(); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... test = box.schema.space.create('test') --- ... tindex = test:create_index('primary') --- ... box.begin() test:insert{1} box.rollback() --- ... test:select{1} --- - [] ... box.begin() test:insert{1} box.commit() --- ... test:select{1} --- - - [1] ... -- -- Test statement-level rollback -- box.space.test:truncate() --- ... function insert(a) box.space.test:insert(a) end --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function dup_key() box.begin() box.space.test:insert{1} local status, _ = pcall(insert, {1}) if not status then if box.error.last().code ~= box.error.TUPLE_FOUND then box.error.raise() end box.space.test:insert{2} end box.commit() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... dup_key() --- ... box.space.test:select{} --- - - [1] - [2] ... -- -- transaction which uses a non-existing space (used to crash in -- rollbackStatement) -- test = box.space.test --- ... box.space.test:drop() --- ... status, message = pcall(function() box.begin() test:put{1} test:put{2} box.commit() end) --- ... status --- - false ... message:match('does not exist') --- - does not exist ... if not status then box.rollback() end --- ... test = nil --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function tx_limit(n) box.begin() for i=0,n do box.space.test:insert{i} end box.commit() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... _ = box.schema.space.create('test'); --- ... _ = box.space.test:create_index('primary'); --- ... tx_limit(10000) --- ... box.space.test:len() --- - 10001 ... box.space.test:drop() --- ... -- -- gh-1638: box.rollback on a JIT-ed code path crashes LuaJIT -- (ffi call + yield don't mix well, rollback started to yield recently) -- Note: don't remove gh_1638(), it's necessary to trigger JIT-compilation. -- function gh_1638() box.begin(); box.rollback() end --- ... for i = 1, 1000 do fiber.create(function() gh_1638() end) end --- ... -- --gh-818 add atomic() -- space = box.schema.space.create('atomic') --- ... index = space:create_index('primary') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function args(...) return 'args', ... end; --- ... box.atomic(args, 1, 2, 3, 4, 5); --- - args - 1 - 2 - 3 - 4 - 5 ... function tx() space:auto_increment{'first row'} space:auto_increment{'second row'} return space:select{} end; --- ... box.atomic(tx); --- - - [1, 'first row'] - [2, 'second row'] ... function tx_error(space) space:auto_increment{'third'} space:auto_increment{'fourth'} error("some error") end; --- ... box.atomic(tx_error, space); --- - error: '[string "function tx_error(space) space:auto_incre..."]:1: some error' ... function nested(space) box.begin() end; --- ... box.atomic(nested, space); --- - error: 'Operation is not permitted when there is an active transaction ' ... function rollback(space) space:auto_increment{'fifth'} box.rollback() end; --- ... box.atomic(rollback, space); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... space:select{} --- - - [1, 'first row'] - [2, 'second row'] ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/admin.result0000664000000000000000000001302113306565107020310 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default') space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary') --- ... help() --- - - To get help, see the Tarantool manual at http://tarantool.org/doc/ - To start the interactive Tarantool tutorial, type 'tutorial()' ... cfg_filter(box.cfg) --- - - - background - false - - checkpoint_count - 2 - - checkpoint_interval - 3600 - - coredump - false - - force_recovery - false - - hot_standby - false - - listen - - - log - - - log_format - plain - - log_level - 5 - - log_nonblock - true - - memtx_dir - - - memtx_max_tuple_size - - - memtx_memory - 107374182 - - memtx_min_tuple_size - - - pid_file - - - read_only - false - - readahead - 16320 - - replication_connect_timeout - 30 - - replication_sync_lag - 10 - - replication_timeout - 1 - - rows_per_wal - 500000 - - slab_alloc_factor - 1.05 - - too_long_threshold - 0.5 - - vinyl_bloom_fpr - 0.05 - - vinyl_cache - 134217728 - - vinyl_dir - - - vinyl_max_tuple_size - 1048576 - - vinyl_memory - 134217728 - - vinyl_page_size - 8192 - - vinyl_range_size - 1073741824 - - vinyl_read_threads - 1 - - vinyl_run_count_per_level - 2 - - vinyl_run_size_ratio - 3.5 - - vinyl_timeout - 60 - - vinyl_write_threads - 2 - - wal_dir - - - wal_dir_rescan_delay - 2 - - wal_max_size - 268435456 - - wal_mode - write - - worker_pool_threads - 4 ... space:insert{1, 'tuple'} --- - [1, 'tuple'] ... box.snapshot() --- - ok ... space:delete{1} --- - [1, 'tuple'] ... test_run:cmd("setopt delimiter ';'") --- - true ... function check_type(arg, typeof) return type(arg) == typeof end; --- ... function test_box_info() local tmp = box.info() local num = {'pid', 'uptime'} local str = {'version', 'status' } local failed = {} if check_type(tmp.replication, 'table') == false then table.insert(failed, 'box.info().replication') else tmp.replication = nil end for k, v in ipairs(num) do if check_type(tmp[v], 'number') == false then table.insert(failed, 'box.info().'..v) else tmp[v] = nil end end for k, v in ipairs(str) do if check_type(tmp[v], 'string') == false then table.insert(failed, 'box.info().'..v) else tmp[v] = nil end end if #tmp > 0 or #failed > 0 then return 'box.info() is not ok.', 'failed: ', failed, tmp else return 'box.info() is ok.' end end; --- ... function test_slab(tbl) local num = {'item_size', 'item_count', 'slab_size', 'slab_count', 'mem_used', 'mem_free'} local failed = {} for k, v in ipairs(num) do if check_type(tbl[v], 'number') == false then table.insert(failed, 'box.slab.info()..'..v) else tbl[v] = nil end end if #tbl > 0 or #failed > 0 then return false, failed else return true, {} end end; --- ... function test_box_slab_info() local tmp = box.slab.info() local tmp_slabs = box.slab.stats() local cdata = {'arena_size', 'arena_used'} local failed = {} if type(tmp_slabs) == 'table' then for name, tbl in ipairs(tmp_slabs) do local bl, fld = test_slab(tbl) if bl == true then tmp[name] = nil else for k, v in ipairs(fld) do table.insert(failed, v) end end end else table.insert(failed, 'box.slab.info().slabs is not ok') end if #tmp_slabs == 0 then tmp_slabs = nil end for k, v in ipairs(cdata) do if check_type(tmp[v], 'number') == false then table.insert(failed, 'box.slab.info().'..v) else tmp[v] = nil end end if #tmp > 0 or #failed > 0 then return "box.slab.info() is not ok", tmp, failed else return "box.slab.info() is ok" end end; --- ... function test_fiber(tbl) local num = {'fid', 'csw'} for k, v in ipairs(num) do if check_type(tmp[v], 'number') == false then table.insert(failed, "require('fiber').info().."..v) else tmp[v] = nil end end if type(tbl.backtrace) == 'table' and #tbl.backtrace > 0 then tbl.backtrace = nil else table.insert(failed, 'backtrace') end if #tbl > 0 or #failed > 0 then return false, failed else return true, {} end end; --- ... function test_box_fiber_info() local tmp = require('fiber').info() local failed = {} for name, tbl in ipairs(tmp) do local bl, fld = test_fiber(tbl) if bl == true then tmp[name] = nil else for k, v in ipairs(fld) do table.insert(failed, v) end end end if #tmp > 0 or #failed > 0 then return "require('fiber').info is not ok. failed: ", tmp, failed else return "require('fiber').info() is ok" end end; --- ... test_box_info(); --- - box.info() is ok. ... test_box_slab_info(); --- - box.slab.info() is ok ... test_box_fiber_info(); --- - require('fiber').info() is ok ... space:drop(); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/box/errinj.test.lua0000664000000000000000000002353713306565107020747 0ustar rootrooterrinj = box.error.injection net_box = require('net.box') space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) errinj.info() errinj.set("some-injection", true) errinj.set("some-injection") -- check error space:select{222444} errinj.set("ERRINJ_TESTING", true) space:select{222444} errinj.set("ERRINJ_TESTING", false) -- Check how well we handle a failed log write errinj.set("ERRINJ_WAL_IO", true) space:insert{1} space:get{1} errinj.set("ERRINJ_WAL_IO", false) space:insert{1} errinj.set("ERRINJ_WAL_IO", true) space:update(1, {{'=', 2, 2}}) space:get{1} space:get{2} errinj.set("ERRINJ_WAL_IO", false) space:truncate() -- Check a failed log rotation errinj.set("ERRINJ_WAL_ROTATE", true) space:insert{1} space:get{1} errinj.set("ERRINJ_WAL_ROTATE", false) space:insert{1} errinj.set("ERRINJ_WAL_ROTATE", true) space:update(1, {{'=', 2, 2}}) space:get{1} space:get{2} errinj.set("ERRINJ_WAL_ROTATE", false) space:update(1, {{'=', 2, 2}}) space:get{1} space:get{2} errinj.set("ERRINJ_WAL_ROTATE", true) space:truncate() errinj.set("ERRINJ_WAL_ROTATE", false) space:truncate() space:drop() -- Check how well we handle a failed log write in DDL s_disabled = box.schema.space.create('disabled') s_withindex = box.schema.space.create('withindex') index1 = s_withindex:create_index('primary', { type = 'hash' }) s_withdata = box.schema.space.create('withdata') index2 = s_withdata:create_index('primary', { type = 'tree' }) s_withdata:insert{1, 2, 3, 4, 5} s_withdata:insert{4, 5, 6, 7, 8} index3 = s_withdata:create_index('secondary', { type = 'hash', parts = {2, 'unsigned', 3, 'unsigned' }}) errinj.set("ERRINJ_WAL_IO", true) test = box.schema.space.create('test') s_disabled:create_index('primary', { type = 'hash' }) s_disabled.enabled s_disabled:insert{0} s_withindex:create_index('secondary', { type = 'tree', parts = { 2, 'unsigned'} }) s_withindex.index.secondary s_withdata.index.secondary:drop() s_withdata.index.secondary.unique s_withdata:drop() box.space['withdata'].enabled index4 = s_withdata:create_index('another', { type = 'tree', parts = { 5, 'unsigned' }, unique = false}) s_withdata.index.another errinj.set("ERRINJ_WAL_IO", false) test = box.schema.space.create('test') index5 = s_disabled:create_index('primary', { type = 'hash' }) s_disabled.enabled s_disabled:insert{0} index6 = s_withindex:create_index('secondary', { type = 'tree', parts = { 2, 'unsigned'} }) s_withindex.index.secondary.unique s_withdata.index.secondary:drop() s_withdata.index.secondary s_withdata:drop() box.space['withdata'] index7 = s_withdata:create_index('another', { type = 'tree', parts = { 5, 'unsigned' }, unique = false}) s_withdata.index.another test:drop() s_disabled:drop() s_withindex:drop() -- Check transaction rollback when out of memory env = require('test_run') test_run = env.new() s = box.schema.space.create('s') _ = s:create_index('pk') errinj.set("ERRINJ_TUPLE_ALLOC", true) s:auto_increment{} s:select{} s:auto_increment{} s:select{} s:auto_increment{} s:select{} test_run:cmd("setopt delimiter ';'") box.begin() s:insert{1} box.commit(); box.rollback(); s:select{}; box.begin() s:insert{1} s:insert{2} box.commit(); s:select{}; box.rollback(); box.begin() pcall(s.insert, s, {1}) s:insert{2} box.commit(); s:select{}; box.rollback(); errinj.set("ERRINJ_TUPLE_ALLOC", false); box.begin() s:insert{1} errinj.set("ERRINJ_TUPLE_ALLOC", true) s:insert{2} box.commit(); errinj.set("ERRINJ_TUPLE_ALLOC", false); box.rollback(); s:select{}; box.begin() s:insert{1} errinj.set("ERRINJ_TUPLE_ALLOC", true) pcall(s.insert, s, {2}) box.commit(); s:select{}; box.rollback(); test_run:cmd("setopt delimiter ''"); errinj.set("ERRINJ_TUPLE_ALLOC", false) s:drop() s = box.schema.space.create('test') _ = s:create_index('test', {parts = {1, 'unsigned', 3, 'unsigned', 5, 'unsigned'}}) s:insert{1, 2, 3, 4, 5, 6} t = s:select{}[1] errinj.set("ERRINJ_TUPLE_FIELD", true) tostring(t[1]) .. tostring(t[2]) ..tostring(t[3]) .. tostring(t[4]) .. tostring(t[5]) .. tostring(t[6]) errinj.set("ERRINJ_TUPLE_FIELD", false) tostring(t[1]) .. tostring(t[2]) ..tostring(t[3]) .. tostring(t[4]) .. tostring(t[5]) .. tostring(t[6]) s:drop() s = box.schema.space.create('test') _ = s:create_index('test', {parts = {2, 'unsigned', 4, 'unsigned', 6, 'unsigned'}}) s:insert{1, 2, 3, 4, 5, 6} t = s:select{}[1] errinj.set("ERRINJ_TUPLE_FIELD", true) tostring(t[1]) .. tostring(t[2]) ..tostring(t[3]) .. tostring(t[4]) .. tostring(t[5]) .. tostring(t[6]) errinj.set("ERRINJ_TUPLE_FIELD", false) tostring(t[1]) .. tostring(t[2]) ..tostring(t[3]) .. tostring(t[4]) .. tostring(t[5]) .. tostring(t[6]) -- Cleanup s:drop() -- -- gh-2046: don't store offsets for sequential multi-parts keys -- s = box.schema.space.create('test') _ = s:create_index('seq2', { parts = { 1, 'unsigned', 2, 'unsigned' }}) _ = s:create_index('seq3', { parts = { 1, 'unsigned', 2, 'unsigned', 3, 'unsigned' }}) _ = s:create_index('seq5', { parts = { 1, 'unsigned', 2, 'unsigned', 3, 'unsigned', 4, 'scalar', 5, 'number' }}) _ = s:create_index('rnd1', { parts = { 3, 'unsigned' }}) errinj.set("ERRINJ_TUPLE_FIELD", true) tuple = s:insert({1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) tuple tuple[1] -- not-null, always accessible tuple[2] -- null, doesn't have offset tuple[3] -- not null, has offset tuple[4] -- null, doesn't have offset tuple[5] -- null, doesn't have offset s.index.seq2:select({1}) s.index.seq2:select({1, 2}) s.index.seq3:select({1}) s.index.seq3:select({1, 2, 3}) s.index.seq5:select({1}) s.index.seq5:select({1, 2, 3, 4, 5}) s.index.rnd1:select({3}) errinj.set("ERRINJ_TUPLE_FIELD", false) s:drop() space = box.schema.space.create('test') _ = space:create_index('pk') errinj.set("ERRINJ_WAL_WRITE", true) space:insert{1} errinj.set("ERRINJ_WAL_WRITE", false) errinj.set("ERRINJ_WAL_WRITE_DISK", true) _ = space:insert{1, require'digest'.urandom(192 * 1024)} errinj.set("ERRINJ_WAL_WRITE_DISK", false) _ = space:insert{1} errinj.set("ERRINJ_WAL_WRITE", true) box.snapshot() errinj.set("ERRINJ_WAL_WRITE", false) space:drop() --test space:bsize() in case of memory error utils = dofile('utils.lua') s = box.schema.space.create('space_bsize') idx = s:create_index('primary') for i = 1, 13 do s:insert{ i, string.rep('x', i) } end s:bsize() utils.space_bsize(s) errinj.set("ERRINJ_TUPLE_ALLOC", true) s:replace{1, "test"} s:bsize() utils.space_bsize(s) s:update({1}, {{'=', 3, '!'}}) s:bsize() utils.space_bsize(s) errinj.set("ERRINJ_TUPLE_ALLOC", false) s:drop() space = box.schema.space.create('test') index1 = space:create_index('primary') fiber = require'fiber' ch = fiber.channel(1) test_run:cmd('setopt delimiter ";"') function test() errinj.set('ERRINJ_WAL_WRITE_DISK', true) pcall(box.space.test.replace, box.space.test, {1, 1}) errinj.set('ERRINJ_WAL_WRITE_DISK', false) ch:put(true) end ; function run() fiber.create(test) box.snapshot() end ; test_run:cmd('setopt delimiter ""'); -- Port_dump can fail. box.schema.user.grant('guest', 'read,write,execute', 'universe') cn = net_box.connect(box.cfg.listen) cn:ping() errinj.set('ERRINJ_PORT_DUMP', true) ok, ret = pcall(cn.space._space.select, cn.space._space) assert(not ok) assert(string.match(tostring(ret), 'Failed to allocate')) errinj.set('ERRINJ_PORT_DUMP', false) cn:close() box.schema.user.revoke('guest', 'read, write, execute', 'universe') run() ch:get() box.space.test:select() test_run:cmd('restart server default') box.space.test:select() box.space.test:drop() errinj = box.error.injection net_box = require('net.box') fiber = require'fiber' s = box.schema.space.create('test') _ = s:create_index('pk') ch = fiber.channel(2) test_run:cmd("setopt delimiter ';'") function test(tuple) ch:put({pcall(s.replace, s, tuple)}) end; test_run:cmd("setopt delimiter ''"); errinj.set("ERRINJ_WAL_WRITE", true) _ = {fiber.create(test, {1, 2, 3}), fiber.create(test, {3, 4, 5})} {ch:get(), ch:get()} errinj.set("ERRINJ_WAL_WRITE", false) s:drop() -- rebuild some secondary indexes if the primary was changed s = box.schema.space.create('test') i1 = s:create_index('i1', {parts = {1, 'unsigned'}}) --i2 = s:create_index('i2', {parts = {5, 'unsigned'}, unique = false}) --i3 = s:create_index('i3', {parts = {6, 'unsigned'}, unique = false}) i2 = i1 i3 = i1 _ = s:insert{1, 4, 3, 4, 10, 10} _ = s:insert{2, 3, 1, 2, 10, 10} _ = s:insert{3, 2, 2, 1, 10, 10} _ = s:insert{4, 1, 4, 3, 10, 10} i1:select{} i2:select{} i3:select{} i1:alter({parts={2, 'unsigned'}}) _ = collectgarbage('collect') i1:select{} i2:select{} i3:select{} box.error.injection.set('ERRINJ_BUILD_SECONDARY', i2.id) i1:alter{parts = {3, "unsigned"}} _ = collectgarbage('collect') i1:select{} i2:select{} i3:select{} box.error.injection.set('ERRINJ_BUILD_SECONDARY', i3.id) i1:alter{parts = {4, "unsigned"}} _ = collectgarbage('collect') i1:select{} i2:select{} i3:select{} box.error.injection.set('ERRINJ_BUILD_SECONDARY', -1) s:drop() -- -- Do not rebuild index if the only change is a key part type -- compatible change. -- s = box.schema.space.create('test') pk = s:create_index('pk') sk = s:create_index('sk', {parts = {2, 'unsigned'}}) s:replace{1, 1} box.error.injection.set('ERRINJ_BUILD_SECONDARY', sk.id) sk:alter({parts = {2, 'number'}}) box.error.injection.set('ERRINJ_BUILD_SECONDARY', -1) s:drop() -- -- gh-3255: iproto can crash and discard responses, if a network -- is saturated, and DML yields too long on commit. -- box.schema.user.grant('guest', 'read,write,execute', 'universe') s = box.schema.space.create('test') _ = s:create_index('pk') c = net_box.connect(box.cfg.listen) ch = fiber.channel(200) errinj.set("ERRINJ_IPROTO_TX_DELAY", true) for i = 1, 100 do fiber.create(function() for j = 1, 10 do c.space.test:replace{1} end ch:put(true) end) end for i = 1, 100 do fiber.create(function() for j = 1, 10 do c.space.test:select() end ch:put(true) end) end for i = 1, 200 do ch:get() end errinj.set("ERRINJ_IPROTO_TX_DELAY", false) s:drop() box.schema.user.revoke('guest', 'read,write,execute','universe') tarantool_1.9.1.26.g63eb81e3c/test/box/func_reload.test.lua0000664000000000000000000000607313306560010021717 0ustar rootrootfio = require('fio') net = require('net.box') fiber = require('fiber') ext = (jit.os == "OSX" and "dylib" or "so") build_path = os.getenv("BUILDDIR") reload1_path = build_path..'/test/box/reload1.'..ext reload2_path = build_path..'/test/box/reload2.'..ext reload_path = "reload."..ext _ = fio.unlink(reload_path) c = net.connect(os.getenv("LISTEN")) box.schema.func.create('reload.foo', {language = "C"}) box.schema.user.grant('guest', 'execute', 'function', 'reload.foo') _ = box.schema.space.create('test') _ = box.space.test:create_index('primary', {parts = {1, "integer"}}) box.schema.user.grant('guest', 'read,write', 'space', 'test') _ = fio.unlink(reload_path) fio.symlink(reload1_path, reload_path) --check not fail on non-load func box.schema.func.reload("reload.foo") -- test of usual case reload. No hanging calls box.space.test:insert{0} c:call("reload.foo", {1}) box.space.test:delete{0} _ = fio.unlink(reload_path) fio.symlink(reload2_path, reload_path) box.schema.func.reload("reload.foo") c:call("reload.foo") box.space.test:select{} box.space.test:truncate() -- test case with hanging calls _ = fio.unlink(reload_path) fio.symlink(reload1_path, reload_path) box.schema.func.reload("reload.foo") fibers = 10 for i = 1, fibers do fiber.create(function() c:call("reload.foo", {i}) end) end while box.space.test:count() < fibers do fiber.sleep(0.001) end -- double reload doesn't fail waiting functions box.schema.func.reload("reload.foo") _ = fio.unlink(reload_path) fio.symlink(reload2_path, reload_path) box.schema.func.reload("reload.foo") c:call("reload.foo") while box.space.test:count() < 2 * fibers + 1 do fiber.sleep(0.001) end box.space.test:select{} box.schema.func.drop("reload.foo") box.space.test:drop() _ = fio.unlink(reload_path) fio.symlink(reload1_path, reload_path) box.schema.func.create('reload.test_reload', {language = "C"}) box.schema.user.grant('guest', 'execute', 'function', 'reload.test_reload') s = box.schema.space.create('test_reload') _ = s:create_index('pk') box.schema.user.grant('guest', 'read,write', 'space', 'test_reload') ch = fiber.channel(2) -- call first time to load function c:call("reload.test_reload") s:delete({1}) _ = fio.unlink(reload_path) fio.symlink(reload2_path, reload_path) _ = fiber.create(function() ch:put(c:call("reload.test_reload")) end) while s:get({1}) == nil do fiber.yield(0.0001) end box.schema.func.reload("reload.test_reload") _ = fiber.create(function() ch:put(c:call("reload.test_reload")) end) ch:get() ch:get() s:drop() box.schema.func.create('reload.test_reload_fail', {language = "C"}) box.schema.user.grant('guest', 'execute', 'function', 'reload.test_reload_fail') c:call("reload.test_reload_fail") _ = fio.unlink(reload_path) fio.symlink(reload1_path, reload_path) s, e = pcall(box.schema.func.reload, "reload.test_reload") s, string.find(tostring(e), 'test_reload_fail') ~= nil c:call("reload.test_reload") c:call("reload.test_reload_fail") box.schema.func.drop("reload.test_reload") box.schema.func.drop("reload.test_reload_fail") _ = fio.unlink(reload_path) box.schema.func.reload() box.schema.func.reload("non-existing") tarantool_1.9.1.26.g63eb81e3c/test/box/sql.test.lua0000664000000000000000000002057313306560010020236 0ustar rootrootenv = require('test_run') test_run = env.new() net_box = require('net.box') s = box.schema.space.create('test') _ = box.schema.space.create('test1', { id = 555 }) box.schema.user.create('test', { password = 'test' }) box.schema.user.grant('test', 'execute,read,write', 'universe') conn = net_box.connect('test:test@' .. box.cfg.listen) space = conn.space.test index = box.space.test:create_index('primary', { type = 'hash' }) _ = box.space.test1:create_index('primary', { type = 'hash' }) _ = box.space.test1:create_index('secondary', { type = 'hash', parts = {2, 'string'}}) -- send request to remote server to force schema reloading conn:reload_schema() space:select{} space:insert{1, 'I am a tuple'} space:select{1} space:select{0} space:select{2} test_run:cmd('restart server default') net_box = require('net.box') conn = net_box.connect('test:test@' .. box.cfg.listen) space = conn.space.test space:select{1} box.snapshot() space:select{1} test_run:cmd('restart server default') net_box = require('net.box') conn = net_box.connect('test:test@' .. box.cfg.listen) space = conn.space.test space:select{1} space:delete{1} space:select{1} -- xxx: update comes through, returns 0 rows affected space:update(1, {{'=', 2, 'I am a new tuple'}}) -- nothing is selected, since nothing was there space:select{1} space:insert{1, 'I am a new tuple'} space:select{1} space:update(1, {{'=', 2, 'I am the newest tuple'}}) space:select{1} -- this is correct, can append field to tuple space:update(1, {{'=', 2, 'Huh'}, {'=', 3, 'I am a new field! I was added via append'}}) space:select{1} -- this is illegal space:update(1, {{'=', 2, 'Huh'}, {'=', 1001, 'invalid field'}}) space:select{1} space:replace{1, 'I am a new tuple', 'stub'} space:update(1, {{'=', 2, 'Huh'}, {'=', 3, 'Oh-ho-ho'}}) space:select{1} -- check empty strings space:update(1, {{'=', 2, ''}, {'=', 3, ''}}) space:select{1} -- check type change space:update(1, {{'=', 2, 2}, {'=', 3, 3}}) space:select{1} -- check limits space:insert{0} space:select{0} space:select{4294967295} -- check update delete be secondary index conn.space.test1:insert{0, "hello", 1} conn.space.test1.index.secondary:update("hello", {{'=', 3, 2}}) conn.space.test1.index.secondary:delete("hello") -- cleanup space:delete(0) space:delete(4294967295) box.space.test:drop() box.space.test1:drop() box.schema.user.drop('test') space = nil net_box = require('net.box') -- Prepare spaces box.schema.user.create('test', { password = 'test' }) box.schema.user.grant('test', 'execute,read,write', 'universe') s = box.schema.space.create('tweedledum') index1 = s:create_index('primary', { type = 'tree', parts = { 1, 'string'} }) index2 = s:create_index('secondary', { type = 'tree', unique = false, parts = {2, 'string'}}) function compare(a,b) return a[1] < b[1] end conn = net_box.connect('test:test@' .. box.cfg.listen) space = conn.space.tweedledum -- A test case for Bug#729758 -- "SELECT fails with a disjunct and small LIMIT" -- https://bugs.launchpad.net/tarantool/+bug/729758 space:insert{'Doe', 'Richard'} space:insert{'Roe', 'Richard'} space:insert{'Woe', 'Richard'} space:insert{'Major', 'Tomas'} space:insert{'Kytes', 'Tomas'} sorted(space.index.secondary:select('Richard')) -- A test case for Bug#729879 -- "Zero limit is treated the same as no limit" -- https://bugs.launchpad.net/tarantool/+bug/729879 sorted(space.index.secondary:select('Richard', { limit = 0 })) s:truncate() -- A test case for Bug#730593 -- "Bad data if incomplete tuple" -- https://bugs.launchpad.net/tarantool/+bug/730593 -- Verify that if there is an index on, say, field 2, -- we can't insert tuples with cardinality 1 and -- get away with it. space:insert{'Britney'} sorted(space.index.secondary:select('Anything')) space:insert{'Stephanie'} sorted(space.index.secondary:select('Anything')) space:insert{'Spears', 'Britney'} space:select{'Spears'} sorted(space.index.secondary:select('Anything')) sorted(space.index.secondary:select('Britney')) s.index[0]:select('Spears', { limit = 100, iterator = 'GE' }) s.index[1]:select('Britney', { limit = 100, iterator = 'GE' }) space:delete('Spears') -- Cleanup s:truncate() -- Test composite keys with trees -- Redefine the second key to be composite s.index.secondary:alter{unique = true, parts = { 2, 'string', 3, 'string'}} space:insert{'key1', 'part1', 'part2'} -- Test a duplicate insert on unique index that once resulted in a crash (bug 926080) space:replace{'key1', 'part1', 'part2'} space:insert{'key2', 'part1', 'part2_a'} space:insert{'key3', 'part1', 'part2_b'} s.index[1]:select{} space:select{'key1'} space:select{'key2'} space:select{'key3'} sorted(space.index.secondary:select('part1')) s.index[1]:select('part1', { limit = 100, iterator = 'GE' }) s.index[0]:select('key2', { limit = 100, iterator = 'GE' }) s.index[1]:select({ 'part1', 'part2_a' }, { limit = 1, iterator = 'GE' }) space:select{'key1'} space:select{'key2'} space:select{'key3'} sorted(space.index.secondary:select('part1')) space:delete('key1') space:delete('key2') space:delete('key3') s:truncate() -- check non-unique multipart keys s.index.primary:alter{type = 'tree', parts = { 1, 'unsigned'}} s.index.secondary:alter{unique = false} space:insert{1234567, 'part1', 'part2'} space:insert{11234567, 'part1', 'part2'} space:insert{21234567, 'part1', 'part2_a'} space:insert{31234567, 'part1_a', 'part2'} space:insert{41234567, 'part1_a', 'part2_a'} l = {} for state, v in s:pairs() do table.insert(l, v) end l space:select{1234567} space:select{11234567} space:select{21234567} sorted(space.index.secondary:select('part1')) sorted(space.index.secondary:select('part1_a')) sorted(space.index.secondary:select('part_none')) sorted(s.index[1]:select({'part1', 'part2'})) sorted(space.index.secondary:select('part1')) sorted(space.index.secondary:select('part2')) -- cleanup space:delete(1234567) space:delete(11234567) space:delete(21234567) space:delete(31234567) space:delete(41234567) s:select{} s:truncate() s.index.primary:alter{type = 'hash'} s.index.secondary:alter{type = 'hash', unique = true, parts = { 2, 'string' }} space:insert{1, 'hello'} space:insert{2, 'brave'} space:insert{3, 'new'} space:insert{4, 'world'} env = require('test_run') test_run = env.new() -- Check how build_indexes() works test_run:cmd('restart server default') net_box = require('net.box') conn = net_box.connect('test:test@' .. box.cfg.listen) space = conn.space.tweedledum s = box.space.tweedledum -- Bug#929654 - secondary hash index is not built with build_indexes() sorted(space.index.secondary:select('hello')) sorted(space.index.secondary:select('brave')) sorted(space.index.secondary:select('new')) sorted(space.index.secondary:select('world')) s:truncate() -- A test case for: http://bugs.launchpad.net/bugs/735140 -- Partial REPLACE corrupts index. -- clean data and restart with appropriate config s.index.primary:alter{parts = {1, 'string'}} s.index.secondary:alter{type = 'tree', unique = false} space:insert{'Spears', 'Britney'} space:select{'Spears'} sorted(space.index.secondary:select('Britney')) -- try to insert the incoplete tuple space:replace{'Spears'} -- check that nothing has been updated space:select{'Spears'} -- cleanup space:delete('Spears') -- Test retrieval of duplicates via a secondary key s.index.primary:alter{parts = { 1, 'unsigned'}} space:insert{1, 'duplicate one'} space:insert{2, 'duplicate one'} space:insert{3, 'duplicate one'} space:insert{4, 'duplicate one'} space:insert{5, 'duplicate one'} space:insert{6, 'duplicate two'} space:insert{7, 'duplicate two'} space:insert{8, 'duplicate two'} space:insert{9, 'duplicate two'} space:insert{10, 'duplicate two'} space:insert{11, 'duplicate three'} space:insert{12, 'duplicate three'} space:insert{13, 'duplicate three'} space:insert{14, 'duplicate three'} space:insert{15, 'duplicate three'} sorted(space.index.secondary:select('duplicate one')) sorted(space.index.secondary:select('duplicate two')) sorted(space.index.secondary:select('duplicate three')) space:delete(1) space:delete(2) space:delete(3) space:delete(4) space:delete(5) space:delete(6) space:delete(7) space:delete(8) space:delete(9) space:delete(10) space:delete(11) space:delete(12) space:delete(13) space:delete(14) space:delete(15) -- Check min() and max() functions space:insert{1, 'Aardvark '} space:insert{2, 'Bilimbi'} space:insert{3, 'Creature '} s.index[1]:select{} s.index[0]:min() s.index[0]:max() s.index[1]:min() s.index[1]:max() space:delete(1) space:delete(2) space:delete(3) box.schema.user.drop('test') s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/iproto_stress.test.lua0000664000000000000000000000175113306560010022353 0ustar rootroottest_run = require('test_run').new() fiber = require('fiber') net_box = require('net.box') box.schema.user.grant('guest', 'read,write,execute', 'universe') s = box.schema.space.create('test') _ = s:create_index('primary', {unique=true, parts={1, 'unsigned', 2, 'unsigned', 3, 'unsigned'}}) n_workers = 0 test_run:cmd("setopt delimiter ';'") function worker(i) n_workers = n_workers + 1 for j = 1,2 do local conn = net_box.connect(box.cfg.listen) for k = 1,10 do conn.space.test:insert{i, j, k} end conn:close() fiber.sleep(1) end n_workers = n_workers - 1 end; test_run:cmd("setopt delimiter ''"); for i = 1,5000 do fiber.create(worker, i) end fiber.sleep(0.1) -- check that iproto doesn't deplete tx fiber pool on wal stall (see gh-1892) box.error.injection.set("ERRINJ_WAL_DELAY", true) fiber.sleep(1.0) box.error.injection.set("ERRINJ_WAL_DELAY", false) repeat fiber.sleep(1) until n_workers == 0 box.schema.user.revoke('guest', 'read,write,execute', 'universe') s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/net_msg_max.result0000664000000000000000000000313213306565107021523 0ustar rootroottest_run = require('test_run').new() --- ... fiber = require('fiber') --- ... net_box = require('net.box') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... conn = net_box.connect(box.cfg.listen) --- ... conn2 = net_box.connect(box.cfg.listen) --- ... active = 0 --- ... finished = 0 --- ... continue = false --- ... limit = 768 --- ... run_max = (limit - 100) / 2 --- ... old_readahead = box.cfg.readahead --- ... box.cfg{readahead = 9000} --- ... long_str = string.rep('a', 1000) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function do_long_f(...) active = active + 1 while not continue do fiber.sleep(0.01) end active = active - 1 finished = finished + 1 end; --- ... function do_long(c) c:call('do_long_f', {long_str}) end; --- ... function run_workers(c) finished = 0 continue = false for i = 1, run_max do fiber.create(do_long, c) end end; --- ... -- Wait until 'active' stops growing - it means, that the input -- is blocked. function wait_active(value) while value ~= active do fiber.sleep(0.01) end end; --- ... function wait_finished(needed) continue = true while finished ~= needed do fiber.sleep(0.01) end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- -- Test that message count limit is reachable. -- run_workers(conn) --- ... run_workers(conn2) --- ... wait_active(run_max * 2) --- ... active == run_max * 2 or active --- - true ... wait_finished(active) --- ... conn2:close() --- ... conn:close() --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... box.cfg{readahead = old_readahead} --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/reconfigure.test.lua0000664000000000000000000000250613306565107021757 0ustar rootroottoo_long_threshold_default = box.cfg.too_long_threshold io_collect_interval_default = box.cfg.io_collect_interval box.cfg.too_long_threshold -- good box.cfg{too_long_threshold=0.2} box.cfg.too_long_threshold -- good box.cfg{snap_io_rate_limit=10} box.cfg.snap_io_rate_limit box.cfg.io_collect_interval box.cfg{io_collect_interval=0.001} box.cfg.io_collect_interval -- A test case for http://bugs.launchpad.net/bugs/712447: -- Valgrind reports use of not initialized memory after 'reload -- configuration' -- space = box.schema.space.create('tweedledum') index = space:create_index('primary') space:insert{1, 'tuple'} box.snapshot() box.cfg{} space:insert{2, 'tuple2'} box.snapshot() space:insert{3, 'tuple3'} box.snapshot() -- A test case for https://github.com/tarantool/tarantool/issues/112: -- Tarantool crashes with SIGSEGV during reload configuration -- -- log level box.cfg{log_level=5} -- constants box.cfg{wal_dir="dynamic"} box.cfg{memtx_dir="dynamic"} box.cfg{log="new logger"} -- bad1 box.cfg{memtx_memory=53687091} box.cfg.memtx_memory space:drop() box.cfg{snap_io_rate_limit=0} box.cfg{io_collect_interval=0} box.cfg{too_long_threshold=0.5} box.cfg.snap_io_rate_limit = nil box.cfg.io_collect_interval = nil box.cfg { too_long_threshold = too_long_threshold_default } box.cfg { io_collect_interval = io_collect_interval_default } tarantool_1.9.1.26.g63eb81e3c/test/box/access_misc.test.lua0000664000000000000000000001525213306565107021725 0ustar rootrootsession = box.session utils = require('utils') EMPTY_MAP = utils.setmap({}) -- -- Check a double create space -- s = box.schema.space.create('test') s = box.schema.space.create('test') -- -- Check a double drop space -- s:drop() s:drop() -- -- Check double create user -- box.schema.user.create('testus') box.schema.user.create('testus') s = box.schema.space.create('admin_space') index = s:create_index('primary', {type = 'hash', parts = {1, 'unsigned'}}) s:insert({1}) s:insert({2}) -- -- Check double grant and read access -- box.schema.user.grant('testus', 'read', 'space', 'admin_space') box.schema.user.grant('testus', 'read', 'space', 'admin_space') session.su('testus') s:select(1) s:insert({3}) s:delete(1) s:drop() -- -- Check double revoke -- session.su('admin') box.schema.user.revoke('testus', 'read', 'space', 'admin_space') box.schema.user.revoke('testus', 'read', 'space', 'admin_space') session.su('testus') s:select(1) session.su('admin') -- -- Check write access on space -- box.schema.user.grant('testus', 'write', 'space', 'admin_space') session.su('testus') s:select(1) s:delete(1) s:insert({3}) s:drop() session.su('admin') -- -- Check double drop user -- box.schema.user.drop('testus') box.schema.user.drop('testus') -- -- Check 'guest' user -- session.su('guest') session.uid() box.space._user:select(1) s:select(1) s:insert({4}) s:delete({3}) s:drop() gs = box.schema.space.create('guest_space') box.schema.func.create('guest_func') session.su('admin') s:select() -- -- Create user with universe read&write grants -- and create this user session -- box.schema.user.create('uniuser') box.schema.user.grant('uniuser', 'read, write, execute', 'universe') session.su('uniuser') uid = session.uid() -- -- Check universal user -- Check delete currently authenticated user -- box.schema.user.drop('uniuser') -- --Check create, call and drop function -- box.schema.func.create('uniuser_func') function uniuser_func() return 'hello' end uniuser_func() box.schema.func.drop('uniuser_func') -- -- Check create and drop space -- us = box.schema.space.create('uniuser_space') us:drop() -- -- Check create and drop user -- box.schema.user.create('uniuser_testus') box.schema.user.drop('uniuser_testus') -- -- Check access system and any spaces -- box.space.admin_space:select() box.space._user:select(1) box.space._space:select(280) us = box.schema.space.create('uniuser_space') box.schema.func.create('uniuser_func') session.su('admin') box.schema.user.create('someuser') box.schema.user.grant('someuser', 'read, write, execute', 'universe') session.su('someuser') -- -- Check drop objects of another user -- s:drop() us:drop() box.schema.func.drop('uniuser_func') box.schema.user.drop('uniuser_testus') session.su('admin') box.schema.func.drop('uniuser_func') box.schema.user.drop('someuser') box.schema.user.drop('uniuser_testus') box.schema.user.drop('uniuser') _ = box.space._user:delete(uid) s:drop() -- -- Check write grant on _user -- box.schema.user.create('testuser') maxuid = box.space._user.index.primary:max()[1] box.schema.user.grant('testuser', 'write', 'space', '_user') session.su('testuser') testuser_uid = session.uid() _ = box.space._user:delete(2) box.space._user:select(1) uid = box.space._user:insert{maxuid+1, session.uid(), 'someone', 'user', EMPTY_MAP}[1] _ = box.space._user:delete(uid) session.su('admin') box.space._user:select(1) _ = box.space._user:delete(testuser_uid) box.schema.user.revoke('testuser', 'write', 'space', '_user') -- -- Check read grant on _user -- box.schema.user.grant('testuser', 'read', 'space', '_user') session.su('testuser') _ = box.space._user:delete(2) box.space._user:select(1) box.space._user:insert{uid, session.uid(), 'someone2', 'user'} session.su('admin') -- -- Check read grant on _index -- box.schema.user.grant('testuser', 'read', 'space', '_index') session.su('testuser') box.space._index:select(272) box.space._index:insert{512, 1,'owner','tree', 1, 1, 0,'unsigned'} session.su('admin') box.schema.user.revoke('testuser', 'usage,session', 'universe') box.schema.user.revoke('testuser', 'read, write, execute', 'universe') box.schema.user.grant('testuser', 'usage,session', 'universe') -- -- Check that itertors check privileges -- s = box.schema.space.create('glade') box.schema.user.grant('testuser', 'read', 'space', 'glade') index = s:create_index('primary', {unique = true, parts = {1, 'unsigned', 2, 'string'}}) s:insert({1, 'A'}) s:insert({2, 'B'}) s:insert({3, 'C'}) s:insert({4, 'D'}) t = {} for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end t t = {} session.su('testuser') s:select() for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end t t = {} session.su('admin') box.schema.user.revoke('testuser', 'read', 'space', 'glade') box.schema.user.grant('testuser', 'write', 'space', 'glade') session.su('testuser') s:select() for key, v in s.index.primary:pairs(1, {iterator = 'GE'}) do table.insert (t, v) end t t = {} session.su('admin') box.schema.user.grant('testuser', 'read, write, execute', 'space', 'glade') session.su('testuser') s:select() for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end t t = {} session.su('guest') s:select() for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end t t = {} session.su('guest') s:select() for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end t -- -- Check that alter and truncate do not affect space access control. -- session.su('admin') _ = s:create_index('secondary', {unique = false, parts = {2, 'string'}}) session.su('testuser') s:select() session.su('admin') s:truncate() s:insert({1234, 'ABCD'}) session.su('testuser') s:select() session.su('admin') box.schema.user.drop('testuser') s:drop() -- -- gh-3089 usage access is not applied to owner -- box.schema.user.grant("guest","read, write, execute, create", "universe") box.session.su("guest") s = box.schema.space.create("test") _ = s:create_index("prim") test_func = function() end box.schema.func.create('test_func') sq = box.schema.sequence.create("test") box.session.su("admin") box.schema.user.revoke("guest", "usage", "universe") box.session.su("guest") s:select{} s:drop() sq:set(100) sq:drop() c = require("net.box").connect(os.getenv("LISTEN")) c:call("test_func") box.session.su("admin") box.schema.user.revoke("guest","read, write, execute, create", "universe") box.session.su("guest") s:select{} s:drop() sq:set(100) sq:drop() c = require("net.box").connect(os.getenv("LISTEN")) c:call("test_func") box.session.su("admin") box.schema.user.grant("guest","usage", "universe") box.schema.func.drop("test_func") s:drop() sq:drop() box.space._user:select() box.space._space:select() box.space._func:select() session = nil tarantool_1.9.1.26.g63eb81e3c/test/box/schema_reload.result0000664000000000000000000000764613306565107022026 0ustar rootrootbox.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... net_box = require('net.box') --- ... fiber = require('fiber') --- ... LISTEN = require('uri').parse(box.cfg.listen) --- ... -- create first space s = box.schema.create_space('test') --- ... i = s:create_index('primary') --- ... cn = net_box.connect(LISTEN.host, LISTEN.service) --- ... -- check that schema is correct cn.space.test ~= nil --- - true ... old_schema_version = cn.schema_version --- ... -- create one more space s2 = box.schema.create_space('test2') --- ... i2 = s2:create_index('primary') --- ... ---------------------------------- -- TEST #1 simple reload ---------------------------------- -- check that schema is not fresh cn.space.test2 == nil --- - true ... cn.schema_version == old_schema_version --- - true ... -- exec request with reload cn.space.test:select{} --- - [] ... cn.schema_version > old_schema_version --- - true ... ---------------------------------- -- TEST #2 parallel select/reload ---------------------------------- env = require('test_run') --- ... test_run = env.new() --- ... requests = 0 --- ... reloads = 0 --- ... test_run:cmd('setopt delimiter ";"') --- - true ... function selector() while true do cn.space.test:select{} requests = requests + 1 fiber.sleep(0.01) end end function reloader() while true do cn:reload_schema() reloads = reloads + 1 fiber.sleep(0.001) end end; --- ... test_run:cmd('setopt delimiter ""'); --- - true ... request_fiber = fiber.create(selector) --- ... reload_fiber = fiber.create(reloader) --- ... -- Check that each fiber works while requests < 10 or reloads < 10 do fiber.sleep(0.01) end --- ... requests < reloads --- - true ... -- cleanup request_fiber:cancel() --- ... reload_fiber:cancel() --- ... s:drop() --- ... s2:drop() --- ... -------------------------------------------------------------------------------- -- gh-1808: support schema_version in CALL, EVAL and PING -------------------------------------------------------------------------------- test_run:cmd('setopt delimiter ";"') --- - true ... function bump_schema_version() if box.space.bump_schema_version == nil then box.schema.create_space('bump_schema_version') else box.space.bump_schema_version:drop() end end; --- ... test_run:cmd('setopt delimiter ""'); --- - true ... cn = net_box.connect(box.cfg.listen) --- ... -- ping schema_version = cn.schema_version --- ... bump_schema_version() --- ... cn:ping() --- - true ... -- Sic: net.box returns true on :ping() even on ER_WRONG_SCHEMA_VERSION while cn.schema_version == schema_version do fiber.sleep(0.0001) end --- ... cn.schema_version == schema_version + 1 --- - true ... -- call schema_version = cn.schema_version --- ... bump_schema_version() --- ... function somefunc() return true end --- ... cn:call('somefunc') --- - true ... cn.schema_version == schema_version + 1 --- - true ... somefunc = nil --- ... -- failed call schema_version = cn.schema_version --- ... bump_schema_version() --- ... cn:call('somefunc') --- - error: Procedure 'somefunc' is not defined ... cn.schema_version == schema_version + 1 --- - true ... -- eval schema_version = cn.schema_version --- ... bump_schema_version() --- ... cn:eval('return') --- ... cn.schema_version == schema_version + 1 --- - true ... somefunc = nil --- ... -- failed eval schema_version = cn.schema_version --- ... bump_schema_version() --- ... cn:eval('error("xx")') --- - error: 'eval:1: xx' ... cn.schema_version == schema_version + 1 --- - true ... somefunc = nil --- ... cn:close() --- ... -- box.internal.schema_version() schema_version = box.internal.schema_version() --- ... schema_version > 0 --- - true ... bump_schema_version() --- ... box.internal.schema_version() == schema_version + 1 --- - true ... if box.space.bump_schema_version ~= nil then box.space.bump_schema_version:drop() end --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/alter.test.lua0000664000000000000000000006257013306565107020565 0ustar rootroot_space = box.space[box.schema.SPACE_ID] _index = box.space[box.schema.INDEX_ID] ADMIN = 1 env = require('test_run') test_run = env.new() test_run:cmd("push filter ', .lsn.: [0-9]+' to ''") utils = require('utils') EMPTY_MAP = utils.setmap({}) -- -- Test insertion into a system space - verify that -- mandatory fields are required. -- _space:insert{_space.id, ADMIN, 'test', 'memtx', 0, EMPTY_MAP, {}} -- -- Bad space id -- _space:insert{'hello', 'world', 'test', 'memtx', 0, EMPTY_MAP, {}} -- -- Can't create a space which has wrong field count - field_count must be NUM -- _space:insert{_space.id, ADMIN, 'test', 'world', 0, EMPTY_MAP, {}} -- -- There is already a tuple for the system space -- _space:insert{_space.id, ADMIN, '_space', 'memtx', 0, EMPTY_MAP, {}} _space:replace{_space.id, ADMIN, '_space', 'memtx', 0, EMPTY_MAP, {}} _space:insert{_index.id, ADMIN, '_index', 'memtx', 0, EMPTY_MAP, {}} _space:replace{_index.id, ADMIN, '_index', 'memtx', 0, EMPTY_MAP, {}} -- -- Can't change properties of a space -- _space:replace{_space.id, ADMIN, '_space', 'memtx', 0, EMPTY_MAP, {}} -- -- Can't drop a system space -- _space:delete{_space.id} _space:delete{_index.id} -- -- Can't change properties of a space -- _space:update({_space.id}, {{'-', 1, 1}}) _space:update({_space.id}, {{'-', 1, 2}}) -- -- Create a space -- t = _space:auto_increment{ADMIN, 'hello', 'memtx', 0, EMPTY_MAP, {}} -- Check that a space exists space = box.space[t[1]] space.id space.field_count space.index[0] -- -- check dml - the space has no indexes yet, but must not crash on DML -- space:select{0} space:insert{0, 0} space:replace{0, 0} space:update({0}, {{'+', 1, 1}}) space:delete{0} t = _space:delete{space.id} space_deleted = box.space[t[1]] space_deleted space:replace{0} _index:insert{_space.id, 0, 'primary', 'tree', 1, 1, 0, 'unsigned'} _index:replace{_space.id, 0, 'primary', 'tree', 1, 1, 0, 'unsigned'} _index:insert{_index.id, 0, 'primary', 'tree', 1, 2, 0, 'unsigned', 1, 'unsigned'} _index:replace{_index.id, 0, 'primary', 'tree', 1, 2, 0, 'unsigned', 1, 'unsigned'} -- access_sysview.test changes output of _index:select{}. -- let's change _index space in such a way that it will be -- uniformn weather access_sysview.test is completed of not. box.space._space.index.owner:alter{parts = {2, 'unsigned'}} box.space._vspace.index.owner:alter{parts = {2, 'unsigned'}} _index:select{} -- modify indexes of a system space _index:delete{_index.id, 0} _space:insert{1000, ADMIN, 'hello', 'memtx', 0, EMPTY_MAP, {}} _index:insert{1000, 0, 'primary', 'tree', 1, 1, 0, 'unsigned'} box.space[1000]:insert{0, 'hello, world'} box.space[1000]:drop() box.space[1000] -- test that after disabling triggers on system spaces we still can -- get a correct snapshot _index:run_triggers(false) _space:run_triggers(false) box.snapshot() test_run:cmd("restart server default with cleanup=1") utils = require('utils') EMPTY_MAP = utils.setmap({}) ADMIN = 1 box.space['_space']:insert{1000, ADMIN, 'test', 'memtx', 0, EMPTY_MAP, {}} box.space[1000].id box.space['_space']:delete{1000} box.space[1000] -------------------------------------------------------------------------------- -- #197: box.space.space0:len() returns an error if there is no index -------------------------------------------------------------------------------- space = box.schema.space.create('gh197') space:len() space:truncate() space:pairs():totable() space:drop() -------------------------------------------------------------------------------- -- #198: names like '' and 'x.y' and 5 and 'primary ' are legal -- -- The result of this test is superseded by the change made -- in scope of gh-2914, which allows all printable characters for -- identifiers. -- -------------------------------------------------------------------------------- -- invalid identifiers s = box.schema.space.create('invalid.identifier') s.name s:drop() s = box.schema.space.create('invalid identifier') s.name s:drop() s = box.schema.space.create('primary ') '|'..s.name..'|' s:drop() s = box.schema.space.create('5') s.name s:drop() box.schema.space.create('') -- valid identifiers box.schema.space.create('_Abcde'):drop() box.schema.space.create('_5'):drop() box.schema.space.create('valid_identifier'):drop() -- some OS-es ship incomplete locales, breaking ID validation weird_chars='' if jit.os~='OSX' and jit.os~='BSD' then weird_chars='空間' end box.schema.space.create('ынтыпрайзный_'..weird_chars):drop() -- unicode box.schema.space.create('utf8_наше_Фсё'):drop() -- unicode space = box.schema.space.create('test') -- invalid identifiers i = space:create_index('invalid.identifier') i.name i:drop() i = space:create_index('invalid identifier') i.name i:drop() i = space:create_index('primary ') '|'..i.name..'|' i:drop() i = space:create_index('5') i.name i:drop() space:create_index('') space:drop() -- gh-57 Confusing error message when trying to create space with a -- duplicate id auto = box.schema.space.create('auto_original') box.schema.space.create('auto', {id = auto.id}) box.schema.space.drop('auto') box.schema.space.create('auto_original', {id = auto.id}) auto:drop() -- ------------------------------------------------------------------ -- gh-281 Crash after rename + replace + delete with multi-part index -- ------------------------------------------------------------------ s = box.schema.space.create('space') index = s:create_index('primary', {unique = true, parts = {1, 'unsigned', 2, 'string'}}) s:insert{1, 'a'} box.space.space.index.primary:rename('secondary') box.space.space:replace{1,'The rain in Spain'} box.space.space:delete{1,'The rain in Spain'} box.space.space:select{} s:drop() -- ------------------------------------------------------------------ -- gh-362 Appropriate error messages in create_index -- ------------------------------------------------------------------ s = box.schema.space.create(42) s = box.schema.space.create("test", "bug") s = box.schema.space.create("test", {unknown = 'param'}) s = box.schema.space.create("test") index = s:create_index('primary', {unique = true, parts = {0, 'unsigned', 1, 'string'}}) index = s:create_index('primary', {unique = true, parts = {'unsigned', 1, 'string', 2}}) index = s:create_index('primary', {unique = true, parts = 'bug'}) index = s:create_index('test', {unique = true, parts = {1, 'unsigned'}, mmap = true}) s:drop() -- ------------------------------------------------------------------ -- gh-155 Tarantool failure on simultaneous space:drop() -- ------------------------------------------------------------------ test_run:cmd("setopt delimiter ';'") local fiber = require('fiber') local W = 4 local N = 50 local ch = fiber.channel(W) for i=1,W do fiber.create(function() for k=1,N do local space_id = math.random(2147483647) local space = box.schema.space.create(string.format('space_%d', space_id)) space:create_index('pk', { type = 'tree' }) space:drop() end ch:put(true) end) end for i=1,W do ch:get() end test_run:cmd("setopt delimiter ''"); -- ------------------------------------------------------------------ -- Lower and upper cases -- ------------------------------------------------------------------ space = box.schema.space.create("test") _ = space:create_index('primary', { parts = {1, 'nUmBeR', 2, 'StRinG'}}) space.index.primary.parts[1].type == 'number' space.index.primary.parts[2].type == 'string' box.space._index:get({space.id, 0})[6] space:drop() -- ------------------------------------------------------------------ -- Aliases -- ------------------------------------------------------------------ space = box.schema.space.create("test") _ = space:create_index('primary', { parts = {1, 'uint', 2, 'int', 3, 'str'}}) space.index.primary.parts[1].type == 'unsigned' space.index.primary.parts[2].type == 'integer' space.index.primary.parts[3].type == 'string' box.space._index:get({space.id, 0})[6] space:drop() -- ------------------------------------------------------------------ -- Tarantool 1.6 compatibility -- ------------------------------------------------------------------ -- gh-1534: deprecate 'num' data type for unsigned integers space = box.schema.space.create("test") _ = space:create_index('primary', { parts = {1, 'num'}}) space.index.primary.parts[1].type == 'unsigned' box.space._index:get({space.id, 0})[6] space:drop() -- data dictionary compatibility is checked by upgrade.test.lua test_run:cmd("clear filter") -- -- create_index() does not modify index options -- s = box.schema.space.create('test', {engine='vinyl'}) opts = {parts={1, 'unsigned'}} _ = s:create_index('pk', opts) opts s:drop() -- -- gh-2074: alter a primary key -- s = box.schema.space.create('test') _ = s:create_index('pk') s:insert{1, 1} s:insert{2, 2} s:insert{3, 3} s.index.pk:alter({parts={1, 'num', 2, 'num'}}) s.index.pk s:select{} _ = s:create_index('secondary', {parts={2, 'num'}}) s.index.pk:alter({parts={1, 'num'}}) s:select{} s.index.pk s.index.secondary s.index.secondary:select{} s:drop() -- -- Forbid explicit space id 0. -- s = box.schema.create_space('test', { id = 0 }) -- -- gh-2660 space:truncate() does not preserve table triggers -- ts = box.schema.space.create('test') ti = ts:create_index('primary') ts:insert{1, 'b', 'c'} ts:insert{2, 'b', 'c'} o = nil n = nil function save_out(told, tnew) o = told n = tnew end _ = ts:on_replace(save_out) ts:replace{2, 'a', 'b', 'c'} o n ts:truncate() ts:replace{2, 'a', 'b'} o n ts:replace{3, 'a', 'b'} o n ts:drop() -- -- gh-2652: validate space format. -- s = box.schema.space.create('test', { format = "format" }) format = { { name = 100 } } s = box.schema.space.create('test', { format = format }) long = string.rep('a', box.schema.NAME_MAX + 1) format = { { name = long } } s = box.schema.space.create('test', { format = format }) format = { { name = 'id', type = '100' } } s = box.schema.space.create('test', { format = format }) format = { utils.setmap({}) } s = box.schema.space.create('test', { format = format }) -- Ensure the format is updated after index drop. format = { { name = 'id', type = 'unsigned' } } s = box.schema.space.create('test', { format = format }) pk = s:create_index('pk') sk = s:create_index('sk', { parts = { 2, 'string' } }) s:replace{1, 1} sk:drop() s:replace{1, 1} s:drop() -- Check index parts conflicting with space format. format = { { name='field1', type='unsigned' }, { name='field2', type='string' }, { name='field3', type='scalar' } } s = box.schema.space.create('test', { format = format }) pk = s:create_index('pk') sk1 = s:create_index('sk1', { parts = { 2, 'unsigned' } }) -- Check space format conflicting with index parts. sk3 = s:create_index('sk3', { parts = { 2, 'string' } }) format[2].type = 'unsigned' s:format(format) s:format() s.index.sk3.parts -- Space format can be updated, if conflicted index is deleted. sk3:drop() s:format(format) s:format() -- Check deprecated field types. format[2].type = 'num' format[3].type = 'str' format[4] = { name = 'field4', type = '*' } format s:format(format) s:format() s:replace{1, 2, '3', {4, 4, 4}} -- Check not indexed fields checking. s:truncate() format[2] = {name='field2', type='string'} format[3] = {name='field3', type='array'} format[4] = {name='field4', type='number'} format[5] = {name='field5', type='integer'} format[6] = {name='field6', type='scalar'} format[7] = {name='field7', type='map'} format[8] = {name='field8', type='any'} format[9] = {name='field9'} s:format(format) -- Check incorrect field types. format[9] = {name='err', type='any'} s:format(format) s:replace{1, '2', {3, 3}, 4.4, -5, true, {value=7}, 8, 9} s:replace{1, 2, {3, 3}, 4.4, -5, true, {value=7}, 8, 9} s:replace{1, '2', 3, 4.4, -5, true, {value=7}, 8, 9} s:replace{1, '2', {3, 3}, '4', -5, true, {value=7}, 8, 9} s:replace{1, '2', {3, 3}, 4.4, -5.5, true, {value=7}, 8, 9} s:replace{1, '2', {3, 3}, 4.4, -5, {6, 6}, {value=7}, 8, 9} s:replace{1, '2', {3, 3}, 4.4, -5, true, {7}, 8, 9} s:replace{1, '2', {3, 3}, 4.4, -5, true, {value=7}} s:replace{1, '2', {3, 3}, 4.4, -5, true, {value=7}, 8} s:truncate() -- -- gh-1014: field names. -- format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {name = 'field2'} format[3] = {name = 'field1'} s:format(format) s:drop() -- https://github.com/tarantool/tarantool/issues/2815 -- Extend space format definition syntax format = {{name='key',type='unsigned'}, {name='value',type='string'}} s = box.schema.space.create('test', { format = format }) s:format() s:format({'id', 'name'}) s:format() s:format({'id', {'name1'}}) s:format() s:format({'id', {'name2', 'string'}}) s:format() s:format({'id', {'name', type = 'string'}}) s:format() s:drop() format = {'key', {'value',type='string'}} s = box.schema.space.create('test', { format = format }) s:format() s:drop() s = box.schema.space.create('test') s:create_index('test', {parts = {'test'}}) s:create_index('test', {parts = {{'test'}}}) s:create_index('test', {parts = {{field = 'test'}}}) s:create_index('test', {parts = {1}}).parts s:drop() s = box.schema.space.create('test') s:format{{'test1', 'integer'}, 'test2', {'test3', 'integer'}, {'test4','scalar'}} s:create_index('test', {parts = {'test'}}) s:create_index('test', {parts = {{'test'}}}) s:create_index('test', {parts = {{field = 'test'}}}) s:create_index('test1', {parts = {'test1'}}).parts s:create_index('test2', {parts = {'test2'}}).parts s:create_index('test3', {parts = {{'test1', 'integer'}}}).parts s:create_index('test4', {parts = {{'test2', 'integer'}}}).parts s:create_index('test5', {parts = {{'test2', 'integer'}}}).parts s:create_index('test6', {parts = {1, 3}}).parts s:create_index('test7', {parts = {'test1', 4}}).parts s:create_index('test8', {parts = {{1, 'integer'}, {'test4', 'scalar'}}}).parts s:drop() -- -- gh-2800: space formats checking is broken. -- -- Ensure that vinyl correctly process field count change. s = box.schema.space.create('test', {engine = 'vinyl', field_count = 2}) pk = s:create_index('pk') s:replace{1, 2} t = box.space._space:select{s.id}[1]:totable() t[5] = 1 box.space._space:replace(t) s:drop() -- Check field type changes. format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {name = 'field2', type = 'any'} format[3] = {name = 'field3', type = 'unsigned'} format[4] = {name = 'field4', type = 'string'} format[5] = {name = 'field5', type = 'number'} format[6] = {name = 'field6', type = 'integer'} format[7] = {name = 'field7', type = 'boolean'} format[8] = {name = 'field8', type = 'scalar'} format[9] = {name = 'field9', type = 'array'} format[10] = {name = 'field10', type = 'map'} s = box.schema.space.create('test', {format = format}) pk = s:create_index('pk') t = s:replace{1, {2}, 3, '4', 5.5, -6, true, -8, {9, 9}, {val = 10}} test_run:cmd("setopt delimiter ';'") function fail_format_change(fieldno, new_type) local old_type = format[fieldno].type format[fieldno].type = new_type local ok, msg = pcall(s.format, s, format) format[fieldno].type = old_type return msg end; function ok_format_change(fieldno, new_type) local old_type = format[fieldno].type format[fieldno].type = new_type s:format(format) s:delete{1} format[fieldno].type = old_type s:format(format) s:replace(t) end; test_run:cmd("setopt delimiter ''"); -- any --X--> unsigned fail_format_change(2, 'unsigned') -- unsigned -----> any ok_format_change(3, 'any') -- unsigned --X--> string fail_format_change(3, 'string') -- unsigned -----> number ok_format_change(3, 'number') -- unsigned -----> integer ok_format_change(3, 'integer') -- unsigned -----> scalar ok_format_change(3, 'scalar') -- unsigned --X--> map fail_format_change(3, 'map') -- string -----> any ok_format_change(4, 'any') -- string -----> scalar ok_format_change(4, 'scalar') -- string --X--> boolean fail_format_change(4, 'boolean') -- number -----> any ok_format_change(5, 'any') -- number -----> scalar ok_format_change(5, 'scalar') -- number --X--> integer fail_format_change(5, 'integer') -- integer -----> any ok_format_change(6, 'any') -- integer -----> number ok_format_change(6, 'number') -- integer -----> scalar ok_format_change(6, 'scalar') -- integer --X--> unsigned fail_format_change(6, 'unsigned') -- boolean -----> any ok_format_change(7, 'any') -- boolean -----> scalar ok_format_change(7, 'scalar') -- boolean --X--> string fail_format_change(7, 'string') -- scalar -----> any ok_format_change(8, 'any') -- scalar --X--> unsigned fail_format_change(8, 'unsigned') -- array -----> any ok_format_change(9, 'any') -- array --X--> scalar fail_format_change(9, 'scalar') -- map -----> any ok_format_change(10, 'any') -- map --X--> scalar fail_format_change(10, 'scalar') s:drop() -- Check new fields adding. format = {} s = box.schema.space.create('test') format[1] = {name = 'field1', type = 'unsigned'} s:format(format) -- Ok, no indexes. pk = s:create_index('pk') format[2] = {name = 'field2', type = 'unsigned'} s:format(format) -- Ok, empty space. s:replace{1, 1} format[2] = nil s:format(format) -- Ok, can delete fields with no checks. s:delete{1} sk1 = s:create_index('sk1', {parts = {2, 'unsigned'}}) sk2 = s:create_index('sk2', {parts = {3, 'unsigned'}}) sk5 = s:create_index('sk5', {parts = {5, 'unsigned'}}) s:replace{1, 1, 1, 1, 1} format[2] = {name = 'field2', type = 'unsigned'} format[3] = {name = 'field3', type = 'unsigned'} format[4] = {name = 'field4', type = 'any'} format[5] = {name = 'field5', type = 'unsigned'} -- Ok, all new fields are indexed or have type ANY, and new -- field_count <= old field_count. s:format(format) s:replace{1, 1, 1, 1, 1, 1} format[6] = {name = 'field6', type = 'unsigned'} -- Ok, but check existing tuples for a new field[6]. s:format(format) -- Fail, not enough fields. s:replace{2, 2, 2, 2, 2} s:replace{2, 2, 2, 2, 2, 2, 2} format[7] = {name = 'field7', type = 'unsigned'} -- Fail, the tuple {1, ... 1} is invalid for a new format. s:format(format) s:drop() -- Vinyl does not support adding fields to a not empty space. s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk') s:replace{1,1} format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {name = 'field2', type = 'unsigned'} s:format(format) s:drop() -- -- gh-1557: NULL in indexes. -- NULL = require('msgpack').NULL format = {} format[1] = { name = 'field1', type = 'unsigned', is_nullable = true } format[2] = { name = 'field2', type = 'unsigned', is_nullable = true } s = box.schema.space.create('test', { format = format }) s:create_index('primary', { parts = { 'field1' } }) s:create_index('primary', { parts = {{'field1', is_nullable = false}} }) format[1].is_nullable = false s:format(format) s:create_index('primary', { parts = {{'field1', is_nullable = true}} }) s:create_index('primary', { parts = {'field1'} }) -- Check that is_nullable can't be set to false on non-empty space s:insert({1, NULL}) format[1].is_nullable = true s:format(format) format[1].is_nullable = false format[2].is_nullable = false s:format(format) s:delete(1) -- Disable is_nullable on empty space s:format(format) -- Disable is_nullable on a non-empty space. format[2].is_nullable = true s:format(format) s:replace{1, 1} format[2].is_nullable = false s:format(format) -- Enable is_nullable on a non-empty space. format[2].is_nullable = true s:format(format) s:replace{1, box.NULL} s:delete{1} s:format({}) s:create_index('secondary', { parts = {{2, 'string', is_nullable = true}} }) s:insert({1, NULL}) s.index.secondary:alter({ parts = {{2, 'string', is_nullable = false} }}) s:delete({1}) s.index.secondary:alter({ parts = {{2, 'string', is_nullable = false} }}) s:insert({1, NULL}) s:insert({2, 'xxx'}) s.index.secondary:alter({ parts = {{2, 'string', is_nullable = true} }}) s:insert({1, NULL}) s:drop() s = box.schema.create_space('test') test_run:cmd("setopt delimiter ';'") s:format({ [1] = { name = 'id1', type = 'unsigned'}, [2] = { name = 'id2', type = 'unsigned'}, [3] = { name = 'id3', type = 'string'}, [4] = { name = 'id4', type = 'string'}, [5] = { name = 'id5', type = 'string'}, [6] = { name = 'id6', type = 'string'}, }); test_run:cmd("setopt delimiter ''"); s:format() _ = s:create_index('primary') s:insert({1, 1, 'a', 'b', 'c', 'd'}) s:drop() s = box.schema.create_space('test') idx = s:create_index('idx') box.space.test == s s:drop() -- -- gh-3000: index modifying must change key_def parts and -- comparators. They can be changed, if there was compatible index -- parts change. For example, a part type was changed from -- unsigned to number. In such a case comparators must be reset -- and part types updated. -- s = box.schema.create_space('test') pk = s:create_index('pk') s:replace{1} pk:alter{parts = {{1, 'integer'}}} s:replace{-2} s:select{} s:drop() -- -- Allow to restrict space format, if corresponding restrictions -- already are defined in indexes. -- test_run:cmd("setopt delimiter ';'") function check_format_restriction(engine, name) local s = box.schema.create_space(name, {engine = engine}) local pk = s:create_index('pk') local format = {} format[1] = {name = 'field1'} s:replace{1} s:replace{100} s:replace{0} s:format(format) s:format() format[1].type = 'unsigned' s:format(format) end; test_run:cmd("setopt delimiter ''"); check_format_restriction('memtx', 'test1') check_format_restriction('vinyl', 'test2') box.space.test1:format() box.space.test1:select{} box.space.test2:format() box.space.test2:select{} box.space.test1:drop() box.space.test2:drop() -- -- Allow to change is_nullable in index definition on non-empty -- space. -- s = box.schema.create_space('test') pk = s:create_index('pk') sk1 = s:create_index('sk1', {parts = {{2, 'unsigned', is_nullable = true}}}) sk2 = s:create_index('sk2', {parts = {{3, 'unsigned', is_nullable = false}}}) s:replace{1, box.NULL, 1} sk1:alter({parts = {{2, 'unsigned', is_nullable = false}}}) s:replace{1, 1, 1} sk1:alter({parts = {{2, 'unsigned', is_nullable = false}}}) s:replace{1, 1, box.NULL} sk2:alter({parts = {{3, 'unsigned', is_nullable = true}}}) s:replace{1, 1, box.NULL} s:replace{2, 10, 100} s:replace{3, 0, 20} s:replace{4, 15, 150} s:replace{5, 9, box.NULL} sk1:select{} sk2:select{} s:drop() -- -- gh-3008: allow multiple types on the same field. -- format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {name = 'field2', type = 'scalar'} format[3] = {name = 'field3', type = 'integer'} s = box.schema.create_space('test', {format = format}) pk = s:create_index('pk') sk1 = s:create_index('sk1', {parts = {{2, 'number'}}}) sk2 = s:create_index('sk2', {parts = {{2, 'integer'}}}) sk3 = s:create_index('sk3', {parts = {{2, 'unsigned'}}}) sk4 = s:create_index('sk4', {parts = {{3, 'number'}}}) s:format() s:replace{1, '100', -20.2} s:replace{1, 100, -20.2} s:replace{1, 100, -20} s:replace{2, 50, 0} s:replace{3, 150, -60} s:replace{4, 0, 120} pk:select{} sk1:select{} sk2:select{} sk3:select{} sk4:select{} sk1:alter{parts = {{2, 'unsigned'}}} sk2:alter{parts = {{2, 'unsigned'}}} sk4:alter{parts = {{3, 'integer'}}} s:replace{1, 50.5, 1.5} s:replace{1, 50, 1.5} s:replace{5, 5, 5} sk1:select{} sk2:select{} sk3:select{} sk4:select{} sk1:drop() sk2:drop() sk3:drop() -- Remove 'unsigned' constraints from indexes, and 'scalar' now -- can be inserted in the second field. s:replace{1, true, 100} s:select{} sk4:select{} s:drop() -- -- gh-2914: Allow any space name which consists of printable characters -- identifier = require("identifier") test_run:cmd("setopt delimiter ';'") identifier.run_test( function (identifier) box.schema.space.create(identifier) if box.space[identifier] == nil then error("Cannot query space") end end, function (identifier) box.space[identifier]:drop() end ); s = box.schema.create_space("test"); identifier.run_test( function (identifier) s:create_index(identifier, {parts={1}}) end, function (identifier) s.index[identifier]:drop() end ); s:drop(); -- -- gh-2914: check column name validation. -- Ensure that col names are validated as identifiers. -- s = box.schema.create_space('test'); i = s:create_index("primary", {parts={1, "integer"}}); identifier.run_test( function (identifier) s:format({{name=identifier,type="integer"}}) local t = s:replace({1}) if t[identifier] ~= 1 then error("format identifier error") end end, function (identifier) end ); s:drop(); -- gh-2914: check coll name validation. identifier.run_test( function (identifier) box.internal.collation.create(identifier, 'ICU', 'ru-RU', {}) end, function (identifier) box.internal.collation.drop(identifier) end ); test_run:cmd("setopt delimiter ''"); -- -- gh-3011: add new names to old tuple formats. -- s = box.schema.create_space('test') pk = s:create_index('pk') t1 = s:replace{1} t1.field1 format = {} format[1] = {name = 'field1', type = 'unsigned'} s:format(format) t2 = s:replace{2} t2.field1 t1.field1 format[1].name = 'field_1' s:format(format) t3 = s:replace{3} t1.field1 t1.field_1 t2.field1 t2.field_1 t3.field1 t3.field_1 s:drop() -- -- gh-3008. Ensure the change of hash index parts updates hash -- key_def. -- s = box.schema.create_space('test') pk = s:create_index('pk', {type = 'hash'}) pk:alter{parts = {{1, 'string'}}} s:replace{'1', '1'} s:replace{'1', '2'} pk:select{} pk:select{'1'} s:drop() -- -- Ensure that incompatible key parts change validates format. -- s = box.schema.create_space('test') pk = s:create_index('pk') s:replace{1} pk:alter{parts = {{1, 'string'}}} -- Must fail. s:drop() -- -- gh-2895: do not ignore field type in space format, if it is not -- specified via 'type = ...'. -- format = {} format[1] = {name = 'field1', 'unsigned'} format[2] = {name = 'field2', 'unsigned'} s = box.schema.create_space('test', {format = format}) s:format() format[2] = {name = 'field2', 'unsigned', 'unknown'} s:format(format) s:format() s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/tree_pk_multipart.result0000664000000000000000000004132313306560010022744 0ustar rootroot-- -- Insert test -- env = require('test_run') --- ... test_run = env.new() --- ... space = box.schema.space.create('tweedledum') --- ... -- Multipart primary key (sender nickname, receiver nickname, message id) i1 = space:create_index('primary', { type = 'tree', parts = {1, 'string', 2, 'string', 3, 'unsigned'}, unique = true }) --- ... space:insert{'Vincent', 'Jules', 0, 'Do you know what they call a - a - a Quarter Pounder with cheese in Paris?'} --- - ['Vincent', 'Jules', 0, 'Do you know what they call a - a - a Quarter Pounder with cheese in Paris?'] ... space:insert{'Jules', 'Vincent', 0, 'They don`t call it a Quarter Pounder with cheese?'} --- - ['Jules', 'Vincent', 0, 'They don`t call it a Quarter Pounder with cheese?'] ... space:insert{'Vincent', 'Jules', 1, 'No man, they got the metric system. They wouldn`t know what the f--k a Quarter Pounder is.'} --- - ['Vincent', 'Jules', 1, 'No man, they got the metric system. They wouldn`t know what the f--k a Quarter Pounder is.'] ... space:insert{'Jules', 'Vincent', 1, 'Then what do they call it?'} --- - ['Jules', 'Vincent', 1, 'Then what do they call it?'] ... space:insert{'Vincent', 'Jules', 2, 'They call it a `Royale` with cheese.'} --- - ['Vincent', 'Jules', 2, 'They call it a `Royale` with cheese.'] ... space:insert{'Jules', 'Vincent', 2, 'A `Royale` with cheese!'} --- - ['Jules', 'Vincent', 2, 'A `Royale` with cheese!'] ... space:insert{'Vincent', 'Jules', 3, 'That`s right.'} --- - ['Vincent', 'Jules', 3, 'That`s right.'] ... space:insert{'Jules', 'Vincent', 3, 'What do they call a Big Mac?'} --- - ['Jules', 'Vincent', 3, 'What do they call a Big Mac?'] ... space:insert{'Vincent', 'Jules', 4, 'A Big Mac`s a Big Mac, but they call it `Le Big Mac.`'} --- - ['Vincent', 'Jules', 4, 'A Big Mac`s a Big Mac, but they call it `Le Big Mac.`'] ... space:insert{'Jules', 'Vincent', 4, '`Le Big Mac!`'} --- - ['Jules', 'Vincent', 4, '`Le Big Mac!`'] ... space:insert{'Vincent', 'Jules', 5, 'Ha, ha, ha.'} --- - ['Vincent', 'Jules', 5, 'Ha, ha, ha.'] ... space:insert{'Jules', 'Vincent', 5, 'What do they call a `Whopper`?'} --- - ['Jules', 'Vincent', 5, 'What do they call a `Whopper`?'] ... space:insert{'Vincent', 'Jules', 6, 'I dunno, I didn`t go into Burger King.'} --- - ['Vincent', 'Jules', 6, 'I dunno, I didn`t go into Burger King.'] ... space:insert{'The Wolf!', 'Vincent', 0, 'Jimmie, lead the way. Boys, get to work.'} --- - ['The Wolf!', 'Vincent', 0, 'Jimmie, lead the way. Boys, get to work.'] ... space:insert{'Vincent', 'The Wolf!', 0, 'A please would be nice.'} --- - ['Vincent', 'The Wolf!', 0, 'A please would be nice.'] ... space:insert{'The Wolf!', 'Vincent', 1, 'Come again?'} --- - ['The Wolf!', 'Vincent', 1, 'Come again?'] ... space:insert{'Vincent', 'The Wolf!', 1, 'I said a please would be nice.'} --- - ['Vincent', 'The Wolf!', 1, 'I said a please would be nice.'] ... space:insert{'The Wolf!', 'Vincent', 2, 'Get it straight buster - I`m not here to say please, I`m here to tell you what to do and if self-preservation is an instinct you possess you`d better fucking do it and do it quick. I`m here to help - if my help`s not appreciated then lotsa luck, gentlemen.'} --- - ['The Wolf!', 'Vincent', 2, 'Get it straight buster - I`m not here to say please, I`m here to tell you what to do and if self-preservation is an instinct you possess you`d better fucking do it and do it quick. I`m here to help - if my help`s not appreciated then lotsa luck, gentlemen.'] ... space:insert{'The Wolf!', 'Vincent', 3, 'I don`t mean any disrespect, I just don`t like people barking orders at me.'} --- - ['The Wolf!', 'Vincent', 3, 'I don`t mean any disrespect, I just don`t like people barking orders at me.'] ... space:insert{'Vincent', 'The Wolf!', 2, 'If I`m curt with you it`s because time is a factor. I think fast, I talk fast and I need you guys to act fast if you wanna get out of this. So, pretty please... with sugar on top. Clean the fucking car.'} --- - ['Vincent', 'The Wolf!', 2, 'If I`m curt with you it`s because time is a factor. I think fast, I talk fast and I need you guys to act fast if you wanna get out of this. So, pretty please... with sugar on top. Clean the fucking car.'] ... -- -- Select test -- -- Select by one entry space.index['primary']:get{'Vincent', 'Jules', 0} --- - ['Vincent', 'Jules', 0, 'Do you know what they call a - a - a Quarter Pounder with cheese in Paris?'] ... space.index['primary']:get{'Jules', 'Vincent', 0} --- - ['Jules', 'Vincent', 0, 'They don`t call it a Quarter Pounder with cheese?'] ... space.index['primary']:get{'Vincent', 'Jules', 1} --- - ['Vincent', 'Jules', 1, 'No man, they got the metric system. They wouldn`t know what the f--k a Quarter Pounder is.'] ... space.index['primary']:get{'Jules', 'Vincent', 1} --- - ['Jules', 'Vincent', 1, 'Then what do they call it?'] ... space.index['primary']:get{'Vincent', 'Jules', 2} --- - ['Vincent', 'Jules', 2, 'They call it a `Royale` with cheese.'] ... space.index['primary']:get{'Jules', 'Vincent', 2} --- - ['Jules', 'Vincent', 2, 'A `Royale` with cheese!'] ... space.index['primary']:get{'Vincent', 'Jules', 3} --- - ['Vincent', 'Jules', 3, 'That`s right.'] ... space.index['primary']:get{'Jules', 'Vincent', 3} --- - ['Jules', 'Vincent', 3, 'What do they call a Big Mac?'] ... space.index['primary']:get{'Vincent', 'Jules', 4} --- - ['Vincent', 'Jules', 4, 'A Big Mac`s a Big Mac, but they call it `Le Big Mac.`'] ... space.index['primary']:get{'Jules', 'Vincent', 4} --- - ['Jules', 'Vincent', 4, '`Le Big Mac!`'] ... space.index['primary']:get{'Vincent', 'Jules', 5} --- - ['Vincent', 'Jules', 5, 'Ha, ha, ha.'] ... space.index['primary']:get{'Jules', 'Vincent', 5} --- - ['Jules', 'Vincent', 5, 'What do they call a `Whopper`?'] ... space.index['primary']:get{'Vincent', 'Jules', 6} --- - ['Vincent', 'Jules', 6, 'I dunno, I didn`t go into Burger King.'] ... space.index['primary']:get{'The Wolf!', 'Vincent', 0} --- - ['The Wolf!', 'Vincent', 0, 'Jimmie, lead the way. Boys, get to work.'] ... space.index['primary']:get{'Vincent', 'The Wolf!', 0} --- - ['Vincent', 'The Wolf!', 0, 'A please would be nice.'] ... space.index['primary']:get{'The Wolf!', 'Vincent', 1} --- - ['The Wolf!', 'Vincent', 1, 'Come again?'] ... space.index['primary']:get{'Vincent', 'The Wolf!', 1} --- - ['Vincent', 'The Wolf!', 1, 'I said a please would be nice.'] ... space.index['primary']:get{'The Wolf!', 'Vincent', 2} --- - ['The Wolf!', 'Vincent', 2, 'Get it straight buster - I`m not here to say please, I`m here to tell you what to do and if self-preservation is an instinct you possess you`d better fucking do it and do it quick. I`m here to help - if my help`s not appreciated then lotsa luck, gentlemen.'] ... space.index['primary']:get{'The Wolf!', 'Vincent', 3} --- - ['The Wolf!', 'Vincent', 3, 'I don`t mean any disrespect, I just don`t like people barking orders at me.'] ... space.index['primary']:get{'Vincent', 'The Wolf!', 2} --- - ['Vincent', 'The Wolf!', 2, 'If I`m curt with you it`s because time is a factor. I think fast, I talk fast and I need you guys to act fast if you wanna get out of this. So, pretty please... with sugar on top. Clean the fucking car.'] ... -- Select all messages from Vincent to Jules space.index['primary']:select({'Vincent', 'Jules'}) --- - - ['Vincent', 'Jules', 0, 'Do you know what they call a - a - a Quarter Pounder with cheese in Paris?'] - ['Vincent', 'Jules', 1, 'No man, they got the metric system. They wouldn`t know what the f--k a Quarter Pounder is.'] - ['Vincent', 'Jules', 2, 'They call it a `Royale` with cheese.'] - ['Vincent', 'Jules', 3, 'That`s right.'] - ['Vincent', 'Jules', 4, 'A Big Mac`s a Big Mac, but they call it `Le Big Mac.`'] - ['Vincent', 'Jules', 5, 'Ha, ha, ha.'] - ['Vincent', 'Jules', 6, 'I dunno, I didn`t go into Burger King.'] ... -- Select all messages from Jules to Vincent space.index['primary']:select({'Jules', 'Vincent'}) --- - - ['Jules', 'Vincent', 0, 'They don`t call it a Quarter Pounder with cheese?'] - ['Jules', 'Vincent', 1, 'Then what do they call it?'] - ['Jules', 'Vincent', 2, 'A `Royale` with cheese!'] - ['Jules', 'Vincent', 3, 'What do they call a Big Mac?'] - ['Jules', 'Vincent', 4, '`Le Big Mac!`'] - ['Jules', 'Vincent', 5, 'What do they call a `Whopper`?'] ... -- Select all messages from Vincent to The Wolf space.index['primary']:select({'Vincent', 'The Wolf!'}) --- - - ['Vincent', 'The Wolf!', 0, 'A please would be nice.'] - ['Vincent', 'The Wolf!', 1, 'I said a please would be nice.'] - ['Vincent', 'The Wolf!', 2, 'If I`m curt with you it`s because time is a factor. I think fast, I talk fast and I need you guys to act fast if you wanna get out of this. So, pretty please... with sugar on top. Clean the fucking car.'] ... -- Select all messages from The Wolf to Vincent space.index['primary']:select({'The Wolf!', 'Vincent'}) --- - - ['The Wolf!', 'Vincent', 0, 'Jimmie, lead the way. Boys, get to work.'] - ['The Wolf!', 'Vincent', 1, 'Come again?'] - ['The Wolf!', 'Vincent', 2, 'Get it straight buster - I`m not here to say please, I`m here to tell you what to do and if self-preservation is an instinct you possess you`d better fucking do it and do it quick. I`m here to help - if my help`s not appreciated then lotsa luck, gentlemen.'] - ['The Wolf!', 'Vincent', 3, 'I don`t mean any disrespect, I just don`t like people barking orders at me.'] ... -- Select all Vincent messages space.index['primary']:select({'Vincent'}) --- - - ['Vincent', 'Jules', 0, 'Do you know what they call a - a - a Quarter Pounder with cheese in Paris?'] - ['Vincent', 'Jules', 1, 'No man, they got the metric system. They wouldn`t know what the f--k a Quarter Pounder is.'] - ['Vincent', 'Jules', 2, 'They call it a `Royale` with cheese.'] - ['Vincent', 'Jules', 3, 'That`s right.'] - ['Vincent', 'Jules', 4, 'A Big Mac`s a Big Mac, but they call it `Le Big Mac.`'] - ['Vincent', 'Jules', 5, 'Ha, ha, ha.'] - ['Vincent', 'Jules', 6, 'I dunno, I didn`t go into Burger King.'] - ['Vincent', 'The Wolf!', 0, 'A please would be nice.'] - ['Vincent', 'The Wolf!', 1, 'I said a please would be nice.'] - ['Vincent', 'The Wolf!', 2, 'If I`m curt with you it`s because time is a factor. I think fast, I talk fast and I need you guys to act fast if you wanna get out of this. So, pretty please... with sugar on top. Clean the fucking car.'] ... -- -- Delete test -- -- Delete some messages from the The Wolf and Vincent dialog space:delete{'The Wolf!', 'Vincent', 0} --- - ['The Wolf!', 'Vincent', 0, 'Jimmie, lead the way. Boys, get to work.'] ... space:delete{'The Wolf!', 'Vincent', 3} --- - ['The Wolf!', 'Vincent', 3, 'I don`t mean any disrespect, I just don`t like people barking orders at me.'] ... space:delete{'Vincent', 'The Wolf!', 0} --- - ['Vincent', 'The Wolf!', 0, 'A please would be nice.'] ... space:update({'Vincent', 'The Wolf!', 1}, {{ '=', 1, 'Updated' }, {'=', 5, 'New'}}) --- - error: Attempt to modify a tuple field which is part of index 'primary' in space 'tweedledum' ... space:update({'Updated', 'The Wolf!', 1}, {{ '=', 1, 'Vincent'}, { '#', 5, 1 }}) --- ... -- Checking Vincent's last messages space.index['primary']:select({'Vincent', 'The Wolf!'}) --- - - ['Vincent', 'The Wolf!', 1, 'I said a please would be nice.'] - ['Vincent', 'The Wolf!', 2, 'If I`m curt with you it`s because time is a factor. I think fast, I talk fast and I need you guys to act fast if you wanna get out of this. So, pretty please... with sugar on top. Clean the fucking car.'] ... -- Checking The Wolf's last messages space.index['primary']:select({'The Wolf!', 'Vincent'}) --- - - ['The Wolf!', 'Vincent', 1, 'Come again?'] - ['The Wolf!', 'Vincent', 2, 'Get it straight buster - I`m not here to say please, I`m here to tell you what to do and if self-preservation is an instinct you possess you`d better fucking do it and do it quick. I`m here to help - if my help`s not appreciated then lotsa luck, gentlemen.'] ... -- try to delete nonexistent message space:delete{'Vincent', 'The Wolf!', 3} --- ... -- try to delete patrial defined key space:delete{'Vincent', 'The Wolf!'} --- - error: Invalid key part count in an exact match (expected 3, got 2) ... -- try to delete by invalid key space:delete{'The Wolf!', 'Vincent', 1, 'Come again?'} --- - error: Invalid key part count in an exact match (expected 3, got 4) ... -- -- Update test -- space:update({'The Wolf!', 'Vincent', 1}, {{'=', 4, ''}}) --- - ['The Wolf!', 'Vincent', 1, ''] ... space:update({'Vincent', 'The Wolf!', 1}, {{'=', 4, ''}}) --- - ['Vincent', 'The Wolf!', 1, ''] ... -- Checking Vincent's last messages space.index['primary']:select({'Vincent', 'The Wolf!'}) --- - - ['Vincent', 'The Wolf!', 1, ''] - ['Vincent', 'The Wolf!', 2, 'If I`m curt with you it`s because time is a factor. I think fast, I talk fast and I need you guys to act fast if you wanna get out of this. So, pretty please... with sugar on top. Clean the fucking car.'] ... -- Checking The Wolf's last messages space.index['primary']:select({'The Wolf!', 'Vincent'}) --- - - ['The Wolf!', 'Vincent', 1, ''] - ['The Wolf!', 'Vincent', 2, 'Get it straight buster - I`m not here to say please, I`m here to tell you what to do and if self-preservation is an instinct you possess you`d better fucking do it and do it quick. I`m here to help - if my help`s not appreciated then lotsa luck, gentlemen.'] ... -- try to update a nonexistent message space:update({'Vincent', 'The Wolf!', 4}, {{'=', 4, ''}}) --- ... -- try to update patrial defined key space:update({'Vincent', 'The Wolf!'}, {{'=', 4, ''}}) --- - error: Invalid key part count in an exact match (expected 3, got 2) ... -- try to update by invalid key space:update({'The Wolf!', 'Vincent', 1, 'Come again?'}, {{'=', 4, ''}}) --- - error: Invalid key part count in an exact match (expected 3, got 4) ... space:len() --- - 17 ... space:truncate() --- ... space:len() --- - 0 ... -- A test case for Bug#1051006 Tree iterators return garbage --if an index is modified between calls -- space.index['primary']:drop() --- ... i1 = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) --- ... i2 = space:create_index('second', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true }) --- ... space:insert{'a', 'a', 'a'} --- - ['a', 'a', 'a'] ... space:insert{'d', 'd', 'd'} --- - ['d', 'd', 'd'] ... space:insert{'e', 'e', 'e'} --- - ['e', 'e', 'e'] ... space:insert{'b', 'b', 'b'} --- - ['b', 'b', 'b'] ... space:insert{'c', 'c', 'c'} --- - ['c', 'c', 'c'] ... t = {} --- ... gen, param, state = space.index['second']:pairs(nil, { iterator = box.index.GE }) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 2 do state, v = gen(param, state) table.insert(t, v) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... t --- - - ['a', 'a', 'a'] - ['b', 'b', 'b'] ... space:truncate() --- ... v --- - ['b', 'b', 'b'] ... collectgarbage('collect') --- - 0 ... v --- - ['b', 'b', 'b'] ... param, v = gen(param, state) --- ... v --- - null ... collectgarbage('collect') --- - 0 ... v --- - null ... t = {} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 3 do param, v = gen(param, state) table.insert(t, v) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... t --- - [] ... space:drop() --- ... space = nil --- ... -- Bug #1082356 -- Space #19, https://bugs.launchpad.net/tarantool/+bug/1082356 space = box.schema.space.create('tweedledum') --- ... -- Multipart primary key (sender nickname, receiver nickname, message id) i1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 3, 'unsigned'}, unique = true }) --- ... space:insert{1, 1} --- - error: Tuple field count 2 is less than required by space format or defined indexes (expected at least 3) ... space:replace{1, 1} --- - error: Tuple field count 2 is less than required by space format or defined indexes (expected at least 3) ... space:drop() --- ... -- test deletion of data one by one space = box.schema.space.create('test') --- ... i1 = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) --- ... i2 = space:create_index('second', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true }) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 100 do v = tostring(i) space:insert{v, string.rep(v, 2) , string.rep(v, 3)} end; --- ... local pk = space.index[0] while pk:len() > 0 do local state, t for state, t in pk:pairs() do local key = {} for _k2, parts in ipairs(pk.parts) do table.insert(key, t[parts.fieldno]) end space:delete(key) end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... space:drop() --- ... space = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/access_bin.result0000664000000000000000000001310513306565107021314 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... -- -- Access control tests which require a binary protocol -- connection to the server -- box.schema.user.grant('guest','read,write,execute','universe') --- ... session = box.session --- ... remote = require('net.box') --- ... c = remote.connect(box.cfg.listen) --- ... c:eval("session.su('admin')") --- ... c:eval("return session.user()") --- - admin ... c:close() --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... -- gh-488 suid functions -- setuid_space = box.schema.space.create('setuid_space') --- ... index = setuid_space:create_index('primary') --- ... setuid_func = function() return box.space.setuid_space:auto_increment{} end --- ... box.schema.func.create('setuid_func') --- ... box.schema.user.grant('guest', 'execute', 'function', 'setuid_func') --- ... c = remote.connect(box.cfg.listen) --- ... c:call("setuid_func") --- - error: Read access to space 'setuid_space' is denied for user 'guest' ... session.su('guest') --- ... setuid_func() --- - error: Read access to space 'setuid_space' is denied for user 'guest' ... session.su('admin') --- ... box.schema.func.drop('setuid_func') --- ... box.schema.func.create('setuid_func', { setuid = true }) --- ... box.schema.user.grant('guest', 'execute', 'function', 'setuid_func') --- ... c:call("setuid_func") --- - [1] ... session.su('guest') --- ... setuid_func() --- - error: Read access to space 'setuid_space' is denied for user 'guest' ... session.su('admin') --- ... c:close() --- ... -- OPENTAR-84: crash in on_replace_dd_func during recovery -- _func space recovered after _user space, so setuid option can be -- handled incorrectly box.snapshot() --- - ok ... test_run:cmd('restart server default') remote = require('net.box') --- ... session = box.session --- ... setuid_func = function() return box.space.setuid_space:auto_increment{} end --- ... c = remote.connect(box.cfg.listen) --- ... c:call("setuid_func") --- - [2] ... session.su('guest') --- ... setuid_func() --- - error: Read access to space 'setuid_space' is denied for user 'guest' ... session.su('admin') --- ... c:close() --- ... box.schema.func.drop('setuid_func') --- ... box.space.setuid_space:drop() --- ... -- -- gh-530 "assertion failed" -- If a user is dropped, its session should not be usable -- any more -- test = box.schema.space.create('test') --- ... index = test:create_index('primary') --- ... box.schema.user.create('test', {password='test'}) --- ... box.schema.user.grant('test', 'read,write', 'space','test') --- ... box.schema.user.grant('test', 'read', 'space', '_space') --- ... box.schema.user.grant('test', 'read', 'space', '_index') --- ... net = require('net.box') --- ... c = net.connect('test:test@'..box.cfg.listen) --- ... c.space.test:insert{1} --- - [1] ... box.schema.user.drop('test') --- ... c.space.test:insert{1} --- - error: User '32' is not found ... c:close() --- ... test:drop() --- ... -- -- gh-575: User loses 'universe' grants after alter -- box.space._priv:get{1} --- - error: Invalid key part count in an exact match (expected 3, got 1) ... u = box.space._user:get{1} --- ... box.session.su('admin') --- ... box.schema.user.passwd('Gx5!') --- ... c = require('net.box').new('admin:Gx5!@'..box.cfg.listen) --- ... c:call('dostring', { 'return 2 + 2' }) --- - 4 ... c:close() --- ... box.space._user:replace(u) --- - [1, 1, 'admin', 'user', {}] ... -- -- Roles: test that universal access of an authenticated -- session is not updated if grant is made from another -- session -- test = box.schema.space.create('test') --- ... _ = test:create_index('primary') --- ... test:insert{1} --- - [1] ... box.schema.user.create('test', {password='test'}) --- ... box.schema.user.grant('test', 'read', 'space', '_space') --- ... box.schema.user.grant('test', 'read', 'space', '_index') --- ... net = require('net.box') --- ... c = net.connect('test:test@'..box.cfg.listen) --- ... c.space.test:select{} --- - error: Read access to space 'test' is denied for user 'test' ... box.schema.role.grant('public', 'read', 'universe') --- ... c.space.test:select{} --- - error: Read access to space 'test' is denied for user 'test' ... c:close() --- ... c = net.connect('test:test@'..box.cfg.listen) --- ... c.space.test:select{} --- - - [1] ... box.schema.role.revoke('public', 'read', 'universe') --- ... c.space.test:select{} --- - - [1] ... box.session.su('test') --- ... test:select{} --- - error: Read access to space 'test' is denied for user 'test' ... box.session.su('admin') --- ... c:close() --- ... box.schema.user.drop('test') --- ... test:drop() --- ... -- -- gh-508 - wrong check for universal access of setuid functions -- -- notice that guest can execute stuff, but can't read space _func box.schema.user.grant('guest', 'execute', 'universe') --- ... function f1() return box.space._func:get(1)[4] end --- ... function f2() return box.space._func:get(2)[4] end --- ... box.schema.func.create('f1') --- ... box.schema.func.create('f2',{setuid=true}) --- ... c = net.connect(box.cfg.listen) --- ... -- should return access denied c:call('f1') --- - error: Read access to space '_func' is denied for user 'guest' ... -- should work (used to return access denied, because was not setuid c:call('f2') --- - 0 ... c:close() --- ... box.schema.user.revoke('guest', 'execute', 'universe') --- ... box.schema.func.drop('f1') --- ... box.schema.func.drop('f2') --- ... -- --gh-2063 - improper params to su function -- box.session.su('admin', box.session.user) --- - admin ... box.session.su('admin', box.session.user()) --- - error: 'bad argument #2 to ''?'' (function expected, got string)' ... -- clenaup box.session.su('admin') --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/stat_net.result0000664000000000000000000000157713306565107021056 0ustar rootroot-- clear statistics env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default') box.stat.net.SENT -- zero --- - total: 0 rps: 0 ... box.stat.net.RECEIVED -- zero --- - total: 0 rps: 0 ... space = box.schema.space.create('tweedledum') --- ... box.schema.user.grant('guest','read,write,execute','universe') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... remote = require 'net.box' --- ... LISTEN = require('uri').parse(box.cfg.listen) --- ... cn = remote.connect(LISTEN.host, LISTEN.service) --- ... cn.space.tweedledum:select() --small request --- - [] ... box.stat.net.SENT.total > 0 --- - true ... box.stat.net.RECEIVED.total > 0 --- - true ... -- box.stat.net.EVENTS.total > 0 -- box.stat.net.LOCKS.total > 0 space:drop() --- ... cn:close() --- ... box.schema.user.revoke('guest','read,write,execute','universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_errinj.test.lua0000664000000000000000000000070613306560010022125 0ustar rootrooterrinj = require('box.error.injection') s = box.schema.space.create('spatial') s:create_index('primary') s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) errinj.set("ERRINJ_INDEX_ALLOC", true) s:insert{1,{0,0}} s:insert{2,{0,10}} s:insert{3,{0,50}} s:insert{4,{10,0}} s:insert{5,{50,0}} s:insert{6,{10,10}} s:insert{7,{10,50}} s:insert{8,{50,10}} s:insert{9,{50,50}} errinj.set("ERRINJ_INDEX_ALLOC", false) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/alter.result0000664000000000000000000013142413306565107020337 0ustar rootroot_space = box.space[box.schema.SPACE_ID] --- ... _index = box.space[box.schema.INDEX_ID] --- ... ADMIN = 1 --- ... env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("push filter ', .lsn.: [0-9]+' to ''") --- - true ... utils = require('utils') --- ... EMPTY_MAP = utils.setmap({}) --- ... -- -- Test insertion into a system space - verify that -- mandatory fields are required. -- _space:insert{_space.id, ADMIN, 'test', 'memtx', 0, EMPTY_MAP, {}} --- - error: Duplicate key exists in unique index 'primary' in space '_space' ... -- -- Bad space id -- _space:insert{'hello', 'world', 'test', 'memtx', 0, EMPTY_MAP, {}} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... -- -- Can't create a space which has wrong field count - field_count must be NUM -- _space:insert{_space.id, ADMIN, 'test', 'world', 0, EMPTY_MAP, {}} --- - error: Duplicate key exists in unique index 'primary' in space '_space' ... -- -- There is already a tuple for the system space -- _space:insert{_space.id, ADMIN, '_space', 'memtx', 0, EMPTY_MAP, {}} --- - error: Duplicate key exists in unique index 'primary' in space '_space' ... _space:replace{_space.id, ADMIN, '_space', 'memtx', 0, EMPTY_MAP, {}} --- - [280, 1, '_space', 'memtx', 0, {}, []] ... _space:insert{_index.id, ADMIN, '_index', 'memtx', 0, EMPTY_MAP, {}} --- - error: Duplicate key exists in unique index 'primary' in space '_space' ... _space:replace{_index.id, ADMIN, '_index', 'memtx', 0, EMPTY_MAP, {}} --- - [288, 1, '_index', 'memtx', 0, {}, []] ... -- -- Can't change properties of a space -- _space:replace{_space.id, ADMIN, '_space', 'memtx', 0, EMPTY_MAP, {}} --- - [280, 1, '_space', 'memtx', 0, {}, []] ... -- -- Can't drop a system space -- _space:delete{_space.id} --- - error: 'Can''t drop space ''_space'': the space has indexes' ... _space:delete{_index.id} --- - error: 'Can''t drop space ''_index'': the space has indexes' ... -- -- Can't change properties of a space -- _space:update({_space.id}, {{'-', 1, 1}}) --- - error: Attempt to modify a tuple field which is part of index 'primary' in space '_space' ... _space:update({_space.id}, {{'-', 1, 2}}) --- - error: Attempt to modify a tuple field which is part of index 'primary' in space '_space' ... -- -- Create a space -- t = _space:auto_increment{ADMIN, 'hello', 'memtx', 0, EMPTY_MAP, {}} --- ... -- Check that a space exists space = box.space[t[1]] --- ... space.id --- - 341 ... space.field_count --- - 0 ... space.index[0] --- - null ... -- -- check dml - the space has no indexes yet, but must not crash on DML -- space:select{0} --- - error: 'No index #0 is defined in space ''hello''' ... space:insert{0, 0} --- - error: 'No index #0 is defined in space ''hello''' ... space:replace{0, 0} --- - error: 'No index #0 is defined in space ''hello''' ... space:update({0}, {{'+', 1, 1}}) --- - error: 'No index #0 is defined in space ''hello''' ... space:delete{0} --- - error: 'No index #0 is defined in space ''hello''' ... t = _space:delete{space.id} --- ... space_deleted = box.space[t[1]] --- ... space_deleted --- - null ... space:replace{0} --- - error: Space '341' does not exist ... _index:insert{_space.id, 0, 'primary', 'tree', 1, 1, 0, 'unsigned'} --- - error: Duplicate key exists in unique index 'primary' in space '_index' ... _index:replace{_space.id, 0, 'primary', 'tree', 1, 1, 0, 'unsigned'} --- - [280, 0, 'primary', 'tree', 1, 1, 0, 'unsigned'] ... _index:insert{_index.id, 0, 'primary', 'tree', 1, 2, 0, 'unsigned', 1, 'unsigned'} --- - error: Duplicate key exists in unique index 'primary' in space '_index' ... _index:replace{_index.id, 0, 'primary', 'tree', 1, 2, 0, 'unsigned', 1, 'unsigned'} --- - [288, 0, 'primary', 'tree', 1, 2, 0, 'unsigned', 1, 'unsigned'] ... -- access_sysview.test changes output of _index:select{}. -- let's change _index space in such a way that it will be -- uniformn weather access_sysview.test is completed of not. box.space._space.index.owner:alter{parts = {2, 'unsigned'}} --- ... box.space._vspace.index.owner:alter{parts = {2, 'unsigned'}} --- ... _index:select{} --- - - [272, 0, 'primary', 'tree', {'unique': true}, [[0, 'string']]] - [276, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [276, 1, 'name', 'tree', {'unique': true}, [[1, 'string']]] - [280, 0, 'primary', 'tree', 1, 1, 0, 'unsigned'] - [280, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [280, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [281, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [281, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [281, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [284, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [284, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [284, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [285, 0, 'primary', 'hash', {'unique': true}, [[0, 'unsigned']]] - [288, 0, 'primary', 'tree', 1, 2, 0, 'unsigned', 1, 'unsigned'] - [288, 2, 'name', 'tree', {'unique': true}, [[0, 'unsigned'], [2, 'string']]] - [289, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned'], [1, 'unsigned']]] - [289, 2, 'name', 'tree', {'unique': true}, [[0, 'unsigned'], [2, 'string']]] - [296, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [296, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [296, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [297, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [297, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [297, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [304, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [304, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [304, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [305, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [305, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [305, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [312, 0, 'primary', 'tree', {'unique': true}, [[1, 'unsigned'], [2, 'string'], [3, 'unsigned']]] - [312, 1, 'owner', 'tree', {'unique': false}, [[0, 'unsigned']]] - [312, 2, 'object', 'tree', {'unique': false}, [[2, 'string'], [3, 'unsigned']]] - [313, 0, 'primary', 'tree', {'unique': true}, [[1, 'unsigned'], [2, 'string'], [3, 'unsigned']]] - [313, 1, 'owner', 'tree', {'unique': false}, [[0, 'unsigned']]] - [313, 2, 'object', 'tree', {'unique': false}, [[2, 'string'], [3, 'unsigned']]] - [320, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [320, 1, 'uuid', 'tree', {'unique': true}, [[1, 'string']]] - [330, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [340, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [340, 1, 'sequence', 'tree', {'unique': false}, [[1, 'unsigned']]] ... -- modify indexes of a system space _index:delete{_index.id, 0} --- - error: Can't drop the primary key in a system space, space '_index' ... _space:insert{1000, ADMIN, 'hello', 'memtx', 0, EMPTY_MAP, {}} --- - [1000, 1, 'hello', 'memtx', 0, {}, []] ... _index:insert{1000, 0, 'primary', 'tree', 1, 1, 0, 'unsigned'} --- - [1000, 0, 'primary', 'tree', 1, 1, 0, 'unsigned'] ... box.space[1000]:insert{0, 'hello, world'} --- - [0, 'hello, world'] ... box.space[1000]:drop() --- ... box.space[1000] --- - null ... -- test that after disabling triggers on system spaces we still can -- get a correct snapshot _index:run_triggers(false) --- ... _space:run_triggers(false) --- ... box.snapshot() --- - ok ... test_run:cmd("restart server default with cleanup=1") utils = require('utils') --- ... EMPTY_MAP = utils.setmap({}) --- ... ADMIN = 1 --- ... box.space['_space']:insert{1000, ADMIN, 'test', 'memtx', 0, EMPTY_MAP, {}} --- - [1000, 1, 'test', 'memtx', 0, {}, []] ... box.space[1000].id --- - 1000 ... box.space['_space']:delete{1000} --- - [1000, 1, 'test', 'memtx', 0, {}, []] ... box.space[1000] --- - null ... -------------------------------------------------------------------------------- -- #197: box.space.space0:len() returns an error if there is no index -------------------------------------------------------------------------------- space = box.schema.space.create('gh197') --- ... space:len() --- - 0 ... space:truncate() --- ... space:pairs():totable() --- - [] ... space:drop() --- ... -------------------------------------------------------------------------------- -- #198: names like '' and 'x.y' and 5 and 'primary ' are legal -- -- The result of this test is superseded by the change made -- in scope of gh-2914, which allows all printable characters for -- identifiers. -- -------------------------------------------------------------------------------- -- invalid identifiers s = box.schema.space.create('invalid.identifier') --- ... s.name --- - invalid.identifier ... s:drop() --- ... s = box.schema.space.create('invalid identifier') --- ... s.name --- - invalid identifier ... s:drop() --- ... s = box.schema.space.create('primary ') --- ... '|'..s.name..'|' --- - '|primary |' ... s:drop() --- ... s = box.schema.space.create('5') --- ... s.name --- - '5' ... s:drop() --- ... box.schema.space.create('') --- - error: Invalid identifier '' (expected printable symbols only) ... -- valid identifiers box.schema.space.create('_Abcde'):drop() --- ... box.schema.space.create('_5'):drop() --- ... box.schema.space.create('valid_identifier'):drop() --- ... -- some OS-es ship incomplete locales, breaking ID validation weird_chars='' --- ... if jit.os~='OSX' and jit.os~='BSD' then weird_chars='空間' end --- ... box.schema.space.create('ынтыпрайзный_'..weird_chars):drop() -- unicode --- ... box.schema.space.create('utf8_наше_Фсё'):drop() -- unicode --- ... space = box.schema.space.create('test') --- ... -- invalid identifiers i = space:create_index('invalid.identifier') --- ... i.name --- - invalid.identifier ... i:drop() --- ... i = space:create_index('invalid identifier') --- ... i.name --- - invalid identifier ... i:drop() --- ... i = space:create_index('primary ') --- ... '|'..i.name..'|' --- - '|primary |' ... i:drop() --- ... i = space:create_index('5') --- ... i.name --- - '5' ... i:drop() --- ... space:create_index('') --- - error: Invalid identifier '' (expected printable symbols only) ... space:drop() --- ... -- gh-57 Confusing error message when trying to create space with a -- duplicate id auto = box.schema.space.create('auto_original') --- ... box.schema.space.create('auto', {id = auto.id}) --- - error: Duplicate key exists in unique index 'primary' in space '_space' ... box.schema.space.drop('auto') --- - error: Illegal parameters, space_id should be a number ... box.schema.space.create('auto_original', {id = auto.id}) --- - error: Space 'auto_original' already exists ... auto:drop() --- ... -- ------------------------------------------------------------------ -- gh-281 Crash after rename + replace + delete with multi-part index -- ------------------------------------------------------------------ s = box.schema.space.create('space') --- ... index = s:create_index('primary', {unique = true, parts = {1, 'unsigned', 2, 'string'}}) --- ... s:insert{1, 'a'} --- - [1, 'a'] ... box.space.space.index.primary:rename('secondary') --- ... box.space.space:replace{1,'The rain in Spain'} --- - [1, 'The rain in Spain'] ... box.space.space:delete{1,'The rain in Spain'} --- - [1, 'The rain in Spain'] ... box.space.space:select{} --- - - [1, 'a'] ... s:drop() --- ... -- ------------------------------------------------------------------ -- gh-362 Appropriate error messages in create_index -- ------------------------------------------------------------------ s = box.schema.space.create(42) --- - error: Illegal parameters, name should be a string ... s = box.schema.space.create("test", "bug") --- - error: Illegal parameters, options should be a table ... s = box.schema.space.create("test", {unknown = 'param'}) --- - error: Illegal parameters, unexpected option 'unknown' ... s = box.schema.space.create("test") --- ... index = s:create_index('primary', {unique = true, parts = {0, 'unsigned', 1, 'string'}}) --- - error: 'Illegal parameters, invalid index parts: field_no must be one-based' ... index = s:create_index('primary', {unique = true, parts = {'unsigned', 1, 'string', 2}}) --- - error: 'Illegal parameters, options.parts[1]: field was not found by name ''unsigned''' ... index = s:create_index('primary', {unique = true, parts = 'bug'}) --- - error: Illegal parameters, options parameter 'parts' should be of type table ... index = s:create_index('test', {unique = true, parts = {1, 'unsigned'}, mmap = true}) --- - error: Illegal parameters, unexpected option 'mmap' ... s:drop() --- ... -- ------------------------------------------------------------------ -- gh-155 Tarantool failure on simultaneous space:drop() -- ------------------------------------------------------------------ test_run:cmd("setopt delimiter ';'") --- - true ... local fiber = require('fiber') local W = 4 local N = 50 local ch = fiber.channel(W) for i=1,W do fiber.create(function() for k=1,N do local space_id = math.random(2147483647) local space = box.schema.space.create(string.format('space_%d', space_id)) space:create_index('pk', { type = 'tree' }) space:drop() end ch:put(true) end) end for i=1,W do ch:get() end test_run:cmd("setopt delimiter ''"); --- ... -- ------------------------------------------------------------------ -- Lower and upper cases -- ------------------------------------------------------------------ space = box.schema.space.create("test") --- ... _ = space:create_index('primary', { parts = {1, 'nUmBeR', 2, 'StRinG'}}) --- ... space.index.primary.parts[1].type == 'number' --- - true ... space.index.primary.parts[2].type == 'string' --- - true ... box.space._index:get({space.id, 0})[6] --- - [[0, 'number'], [1, 'string']] ... space:drop() --- ... -- ------------------------------------------------------------------ -- Aliases -- ------------------------------------------------------------------ space = box.schema.space.create("test") --- ... _ = space:create_index('primary', { parts = {1, 'uint', 2, 'int', 3, 'str'}}) --- ... space.index.primary.parts[1].type == 'unsigned' --- - true ... space.index.primary.parts[2].type == 'integer' --- - true ... space.index.primary.parts[3].type == 'string' --- - true ... box.space._index:get({space.id, 0})[6] --- - [[0, 'unsigned'], [1, 'integer'], [2, 'string']] ... space:drop() --- ... -- ------------------------------------------------------------------ -- Tarantool 1.6 compatibility -- ------------------------------------------------------------------ -- gh-1534: deprecate 'num' data type for unsigned integers space = box.schema.space.create("test") --- ... _ = space:create_index('primary', { parts = {1, 'num'}}) --- ... space.index.primary.parts[1].type == 'unsigned' --- - true ... box.space._index:get({space.id, 0})[6] --- - [[0, 'unsigned']] ... space:drop() --- ... -- data dictionary compatibility is checked by upgrade.test.lua test_run:cmd("clear filter") --- - true ... -- -- create_index() does not modify index options -- s = box.schema.space.create('test', {engine='vinyl'}) --- ... opts = {parts={1, 'unsigned'}} --- ... _ = s:create_index('pk', opts) --- ... opts --- - parts: - 1 - unsigned ... s:drop() --- ... -- -- gh-2074: alter a primary key -- s = box.schema.space.create('test') --- ... _ = s:create_index('pk') --- ... s:insert{1, 1} --- - [1, 1] ... s:insert{2, 2} --- - [2, 2] ... s:insert{3, 3} --- - [3, 3] ... s.index.pk:alter({parts={1, 'num', 2, 'num'}}) --- ... s.index.pk --- - unique: true parts: - type: unsigned is_nullable: false fieldno: 1 - type: unsigned is_nullable: false fieldno: 2 id: 0 space_id: 731 name: pk type: TREE ... s:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... _ = s:create_index('secondary', {parts={2, 'num'}}) --- ... s.index.pk:alter({parts={1, 'num'}}) --- ... s:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... s.index.pk --- - unique: true parts: - type: unsigned is_nullable: false fieldno: 1 id: 0 space_id: 731 name: pk type: TREE ... s.index.secondary --- - unique: true parts: - type: unsigned is_nullable: false fieldno: 2 id: 1 space_id: 731 name: secondary type: TREE ... s.index.secondary:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... s:drop() --- ... -- -- Forbid explicit space id 0. -- s = box.schema.create_space('test', { id = 0 }) --- - error: 'Failed to create space ''test'': space id 0 is reserved' ... -- -- gh-2660 space:truncate() does not preserve table triggers -- ts = box.schema.space.create('test') --- ... ti = ts:create_index('primary') --- ... ts:insert{1, 'b', 'c'} --- - [1, 'b', 'c'] ... ts:insert{2, 'b', 'c'} --- - [2, 'b', 'c'] ... o = nil --- ... n = nil --- ... function save_out(told, tnew) o = told n = tnew end --- ... _ = ts:on_replace(save_out) --- ... ts:replace{2, 'a', 'b', 'c'} --- - [2, 'a', 'b', 'c'] ... o --- - [2, 'b', 'c'] ... n --- - [2, 'a', 'b', 'c'] ... ts:truncate() --- ... ts:replace{2, 'a', 'b'} --- - [2, 'a', 'b'] ... o --- - null ... n --- - [2, 'a', 'b'] ... ts:replace{3, 'a', 'b'} --- - [3, 'a', 'b'] ... o --- - null ... n --- - [3, 'a', 'b'] ... ts:drop() --- ... -- -- gh-2652: validate space format. -- s = box.schema.space.create('test', { format = "format" }) --- - error: Illegal parameters, options parameter 'format' should be of type table ... format = { { name = 100 } } --- ... s = box.schema.space.create('test', { format = format }) --- - error: 'Illegal parameters, format[1]: name (string) is expected' ... long = string.rep('a', box.schema.NAME_MAX + 1) --- ... format = { { name = long } } --- ... s = box.schema.space.create('test', { format = format }) --- - error: 'Failed to create space ''test'': field 1 name is too long' ... format = { { name = 'id', type = '100' } } --- ... s = box.schema.space.create('test', { format = format }) --- - error: 'Failed to create space ''test'': field 1 has unknown field type' ... format = { utils.setmap({}) } --- ... s = box.schema.space.create('test', { format = format }) --- - error: 'Illegal parameters, format[1]: name (string) is expected' ... -- Ensure the format is updated after index drop. format = { { name = 'id', type = 'unsigned' } } --- ... s = box.schema.space.create('test', { format = format }) --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sk', { parts = { 2, 'string' } }) --- ... s:replace{1, 1} --- - error: 'Tuple field 2 type does not match one required by operation: expected string' ... sk:drop() --- ... s:replace{1, 1} --- - [1, 1] ... s:drop() --- ... -- Check index parts conflicting with space format. format = { { name='field1', type='unsigned' }, { name='field2', type='string' }, { name='field3', type='scalar' } } --- ... s = box.schema.space.create('test', { format = format }) --- ... pk = s:create_index('pk') --- ... sk1 = s:create_index('sk1', { parts = { 2, 'unsigned' } }) --- - error: Field 'field2' has type 'string' in space format, but type 'unsigned' in index definition ... -- Check space format conflicting with index parts. sk3 = s:create_index('sk3', { parts = { 2, 'string' } }) --- ... format[2].type = 'unsigned' --- ... s:format(format) --- - error: Field 'field2' has type 'unsigned' in space format, but type 'string' in index definition ... s:format() --- - [{'name': 'field1', 'type': 'unsigned'}, {'name': 'field2', 'type': 'string'}, { 'name': 'field3', 'type': 'scalar'}] ... s.index.sk3.parts --- - - type: string is_nullable: false fieldno: 2 ... -- Space format can be updated, if conflicted index is deleted. sk3:drop() --- ... s:format(format) --- ... s:format() --- - [{'name': 'field1', 'type': 'unsigned'}, {'name': 'field2', 'type': 'unsigned'}, {'name': 'field3', 'type': 'scalar'}] ... -- Check deprecated field types. format[2].type = 'num' --- ... format[3].type = 'str' --- ... format[4] = { name = 'field4', type = '*' } --- ... format --- - - name: field1 type: unsigned - name: field2 type: num - name: field3 type: str - name: field4 type: '*' ... s:format(format) --- ... s:format() --- - [{'name': 'field1', 'type': 'unsigned'}, {'name': 'field2', 'type': 'num'}, {'name': 'field3', 'type': 'str'}, {'name': 'field4', 'type': '*'}] ... s:replace{1, 2, '3', {4, 4, 4}} --- - [1, 2, '3', [4, 4, 4]] ... -- Check not indexed fields checking. s:truncate() --- ... format[2] = {name='field2', type='string'} --- ... format[3] = {name='field3', type='array'} --- ... format[4] = {name='field4', type='number'} --- ... format[5] = {name='field5', type='integer'} --- ... format[6] = {name='field6', type='scalar'} --- ... format[7] = {name='field7', type='map'} --- ... format[8] = {name='field8', type='any'} --- ... format[9] = {name='field9'} --- ... s:format(format) --- ... -- Check incorrect field types. format[9] = {name='err', type='any'} --- ... s:format(format) --- ... s:replace{1, '2', {3, 3}, 4.4, -5, true, {value=7}, 8, 9} --- - [1, '2', [3, 3], 4.4, -5, true, {'value': 7}, 8, 9] ... s:replace{1, 2, {3, 3}, 4.4, -5, true, {value=7}, 8, 9} --- - error: 'Tuple field 2 type does not match one required by operation: expected string' ... s:replace{1, '2', 3, 4.4, -5, true, {value=7}, 8, 9} --- - error: 'Tuple field 3 type does not match one required by operation: expected array' ... s:replace{1, '2', {3, 3}, '4', -5, true, {value=7}, 8, 9} --- - error: 'Tuple field 4 type does not match one required by operation: expected number' ... s:replace{1, '2', {3, 3}, 4.4, -5.5, true, {value=7}, 8, 9} --- - error: 'Tuple field 5 type does not match one required by operation: expected integer' ... s:replace{1, '2', {3, 3}, 4.4, -5, {6, 6}, {value=7}, 8, 9} --- - error: 'Tuple field 6 type does not match one required by operation: expected scalar' ... s:replace{1, '2', {3, 3}, 4.4, -5, true, {7}, 8, 9} --- - error: 'Tuple field 7 type does not match one required by operation: expected map' ... s:replace{1, '2', {3, 3}, 4.4, -5, true, {value=7}} --- - error: Tuple field count 7 is less than required by space format or defined indexes (expected at least 9) ... s:replace{1, '2', {3, 3}, 4.4, -5, true, {value=7}, 8} --- - error: Tuple field count 8 is less than required by space format or defined indexes (expected at least 9) ... s:truncate() --- ... -- -- gh-1014: field names. -- format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {name = 'field2'} --- ... format[3] = {name = 'field1'} --- ... s:format(format) --- - error: Space field 'field1' is duplicate ... s:drop() --- ... -- https://github.com/tarantool/tarantool/issues/2815 -- Extend space format definition syntax format = {{name='key',type='unsigned'}, {name='value',type='string'}} --- ... s = box.schema.space.create('test', { format = format }) --- ... s:format() --- - [{'name': 'key', 'type': 'unsigned'}, {'name': 'value', 'type': 'string'}] ... s:format({'id', 'name'}) --- ... s:format() --- - [{'name': 'id', 'type': 'any'}, {'name': 'name', 'type': 'any'}] ... s:format({'id', {'name1'}}) --- ... s:format() --- - [{'name': 'id', 'type': 'any'}, {'name': 'name1', 'type': 'any'}] ... s:format({'id', {'name2', 'string'}}) --- ... s:format() --- - [{'name': 'id', 'type': 'any'}, {'name': 'name2', 'type': 'string'}] ... s:format({'id', {'name', type = 'string'}}) --- ... s:format() --- - [{'name': 'id', 'type': 'any'}, {'name': 'name', 'type': 'string'}] ... s:drop() --- ... format = {'key', {'value',type='string'}} --- ... s = box.schema.space.create('test', { format = format }) --- ... s:format() --- - [{'name': 'key', 'type': 'any'}, {'name': 'value', 'type': 'string'}] ... s:drop() --- ... s = box.schema.space.create('test') --- ... s:create_index('test', {parts = {'test'}}) --- - error: 'Illegal parameters, options.parts[1]: field was not found by name ''test''' ... s:create_index('test', {parts = {{'test'}}}) --- - error: 'Illegal parameters, options.parts[1]: field was not found by name ''test''' ... s:create_index('test', {parts = {{field = 'test'}}}) --- - error: 'Illegal parameters, options.parts[1]: field was not found by name ''test''' ... s:create_index('test', {parts = {1}}).parts --- - - type: scalar is_nullable: false fieldno: 1 ... s:drop() --- ... s = box.schema.space.create('test') --- ... s:format{{'test1', 'integer'}, 'test2', {'test3', 'integer'}, {'test4','scalar'}} --- ... s:create_index('test', {parts = {'test'}}) --- - error: 'Illegal parameters, options.parts[1]: field was not found by name ''test''' ... s:create_index('test', {parts = {{'test'}}}) --- - error: 'Illegal parameters, options.parts[1]: field was not found by name ''test''' ... s:create_index('test', {parts = {{field = 'test'}}}) --- - error: 'Illegal parameters, options.parts[1]: field was not found by name ''test''' ... s:create_index('test1', {parts = {'test1'}}).parts --- - - type: integer is_nullable: false fieldno: 1 ... s:create_index('test2', {parts = {'test2'}}).parts --- - error: 'Can''t create or modify index ''test2'' in space ''test'': field type ''any'' is not supported' ... s:create_index('test3', {parts = {{'test1', 'integer'}}}).parts --- - - type: integer is_nullable: false fieldno: 1 ... s:create_index('test4', {parts = {{'test2', 'integer'}}}).parts --- - - type: integer is_nullable: false fieldno: 2 ... s:create_index('test5', {parts = {{'test2', 'integer'}}}).parts --- - - type: integer is_nullable: false fieldno: 2 ... s:create_index('test6', {parts = {1, 3}}).parts --- - - type: integer is_nullable: false fieldno: 1 - type: integer is_nullable: false fieldno: 3 ... s:create_index('test7', {parts = {'test1', 4}}).parts --- - - type: integer is_nullable: false fieldno: 1 - type: scalar is_nullable: false fieldno: 4 ... s:create_index('test8', {parts = {{1, 'integer'}, {'test4', 'scalar'}}}).parts --- - - type: integer is_nullable: false fieldno: 1 - type: scalar is_nullable: false fieldno: 4 ... s:drop() --- ... -- -- gh-2800: space formats checking is broken. -- -- Ensure that vinyl correctly process field count change. s = box.schema.space.create('test', {engine = 'vinyl', field_count = 2}) --- ... pk = s:create_index('pk') --- ... s:replace{1, 2} --- - [1, 2] ... t = box.space._space:select{s.id}[1]:totable() --- ... t[5] = 1 --- ... box.space._space:replace(t) --- - error: 'Can''t modify space ''test'': can not change field count on a non-empty space' ... s:drop() --- ... -- Check field type changes. format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {name = 'field2', type = 'any'} --- ... format[3] = {name = 'field3', type = 'unsigned'} --- ... format[4] = {name = 'field4', type = 'string'} --- ... format[5] = {name = 'field5', type = 'number'} --- ... format[6] = {name = 'field6', type = 'integer'} --- ... format[7] = {name = 'field7', type = 'boolean'} --- ... format[8] = {name = 'field8', type = 'scalar'} --- ... format[9] = {name = 'field9', type = 'array'} --- ... format[10] = {name = 'field10', type = 'map'} --- ... s = box.schema.space.create('test', {format = format}) --- ... pk = s:create_index('pk') --- ... t = s:replace{1, {2}, 3, '4', 5.5, -6, true, -8, {9, 9}, {val = 10}} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function fail_format_change(fieldno, new_type) local old_type = format[fieldno].type format[fieldno].type = new_type local ok, msg = pcall(s.format, s, format) format[fieldno].type = old_type return msg end; --- ... function ok_format_change(fieldno, new_type) local old_type = format[fieldno].type format[fieldno].type = new_type s:format(format) s:delete{1} format[fieldno].type = old_type s:format(format) s:replace(t) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- any --X--> unsigned fail_format_change(2, 'unsigned') --- - 'Tuple field 2 type does not match one required by operation: expected unsigned' ... -- unsigned -----> any ok_format_change(3, 'any') --- ... -- unsigned --X--> string fail_format_change(3, 'string') --- - 'Can''t modify space ''test'': Can not change a field type from unsigned to string on a not empty space' ... -- unsigned -----> number ok_format_change(3, 'number') --- ... -- unsigned -----> integer ok_format_change(3, 'integer') --- ... -- unsigned -----> scalar ok_format_change(3, 'scalar') --- ... -- unsigned --X--> map fail_format_change(3, 'map') --- - 'Can''t modify space ''test'': Can not change a field type from unsigned to map on a not empty space' ... -- string -----> any ok_format_change(4, 'any') --- ... -- string -----> scalar ok_format_change(4, 'scalar') --- ... -- string --X--> boolean fail_format_change(4, 'boolean') --- - 'Can''t modify space ''test'': Can not change a field type from string to boolean on a not empty space' ... -- number -----> any ok_format_change(5, 'any') --- ... -- number -----> scalar ok_format_change(5, 'scalar') --- ... -- number --X--> integer fail_format_change(5, 'integer') --- - 'Tuple field 5 type does not match one required by operation: expected integer' ... -- integer -----> any ok_format_change(6, 'any') --- ... -- integer -----> number ok_format_change(6, 'number') --- ... -- integer -----> scalar ok_format_change(6, 'scalar') --- ... -- integer --X--> unsigned fail_format_change(6, 'unsigned') --- - 'Tuple field 6 type does not match one required by operation: expected unsigned' ... -- boolean -----> any ok_format_change(7, 'any') --- ... -- boolean -----> scalar ok_format_change(7, 'scalar') --- ... -- boolean --X--> string fail_format_change(7, 'string') --- - 'Can''t modify space ''test'': Can not change a field type from boolean to string on a not empty space' ... -- scalar -----> any ok_format_change(8, 'any') --- ... -- scalar --X--> unsigned fail_format_change(8, 'unsigned') --- - 'Tuple field 8 type does not match one required by operation: expected unsigned' ... -- array -----> any ok_format_change(9, 'any') --- ... -- array --X--> scalar fail_format_change(9, 'scalar') --- - 'Can''t modify space ''test'': Can not change a field type from array to scalar on a not empty space' ... -- map -----> any ok_format_change(10, 'any') --- ... -- map --X--> scalar fail_format_change(10, 'scalar') --- - 'Can''t modify space ''test'': Can not change a field type from map to scalar on a not empty space' ... s:drop() --- ... -- Check new fields adding. format = {} --- ... s = box.schema.space.create('test') --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... s:format(format) -- Ok, no indexes. --- ... pk = s:create_index('pk') --- ... format[2] = {name = 'field2', type = 'unsigned'} --- ... s:format(format) -- Ok, empty space. --- ... s:replace{1, 1} --- - [1, 1] ... format[2] = nil --- ... s:format(format) -- Ok, can delete fields with no checks. --- ... s:delete{1} --- - [1, 1] ... sk1 = s:create_index('sk1', {parts = {2, 'unsigned'}}) --- ... sk2 = s:create_index('sk2', {parts = {3, 'unsigned'}}) --- ... sk5 = s:create_index('sk5', {parts = {5, 'unsigned'}}) --- ... s:replace{1, 1, 1, 1, 1} --- - [1, 1, 1, 1, 1] ... format[2] = {name = 'field2', type = 'unsigned'} --- ... format[3] = {name = 'field3', type = 'unsigned'} --- ... format[4] = {name = 'field4', type = 'any'} --- ... format[5] = {name = 'field5', type = 'unsigned'} --- ... -- Ok, all new fields are indexed or have type ANY, and new -- field_count <= old field_count. s:format(format) --- ... s:replace{1, 1, 1, 1, 1, 1} --- - [1, 1, 1, 1, 1, 1] ... format[6] = {name = 'field6', type = 'unsigned'} --- ... -- Ok, but check existing tuples for a new field[6]. s:format(format) --- ... -- Fail, not enough fields. s:replace{2, 2, 2, 2, 2} --- - error: Tuple field count 5 is less than required by space format or defined indexes (expected at least 6) ... s:replace{2, 2, 2, 2, 2, 2, 2} --- - [2, 2, 2, 2, 2, 2, 2] ... format[7] = {name = 'field7', type = 'unsigned'} --- ... -- Fail, the tuple {1, ... 1} is invalid for a new format. s:format(format) --- - error: Tuple field count 6 is less than required by space format or defined indexes (expected at least 7) ... s:drop() --- ... -- Vinyl does not support adding fields to a not empty space. s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk') --- ... s:replace{1,1} --- - [1, 1] ... format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {name = 'field2', type = 'unsigned'} --- ... s:format(format) --- - error: Vinyl does not support changing space format of a non-empty space ... s:drop() --- ... -- -- gh-1557: NULL in indexes. -- NULL = require('msgpack').NULL --- ... format = {} --- ... format[1] = { name = 'field1', type = 'unsigned', is_nullable = true } --- ... format[2] = { name = 'field2', type = 'unsigned', is_nullable = true } --- ... s = box.schema.space.create('test', { format = format }) --- ... s:create_index('primary', { parts = { 'field1' } }) --- - error: Primary index of the space 'test' can not contain nullable parts ... s:create_index('primary', { parts = {{'field1', is_nullable = false}} }) --- - error: Field 1 is nullable in space format, but not nullable in index parts ... format[1].is_nullable = false --- ... s:format(format) --- ... s:create_index('primary', { parts = {{'field1', is_nullable = true}} }) --- - error: Primary index of the space 'test' can not contain nullable parts ... s:create_index('primary', { parts = {'field1'} }) --- - unique: true parts: - type: unsigned is_nullable: false fieldno: 1 id: 0 space_id: 747 name: primary type: TREE ... -- Check that is_nullable can't be set to false on non-empty space s:insert({1, NULL}) --- - [1, null] ... format[1].is_nullable = true --- ... s:format(format) --- - error: Field 1 is nullable in space format, but not nullable in index parts ... format[1].is_nullable = false --- ... format[2].is_nullable = false --- ... s:format(format) --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... s:delete(1) --- - [1, null] ... -- Disable is_nullable on empty space s:format(format) --- ... -- Disable is_nullable on a non-empty space. format[2].is_nullable = true --- ... s:format(format) --- ... s:replace{1, 1} --- - [1, 1] ... format[2].is_nullable = false --- ... s:format(format) --- ... -- Enable is_nullable on a non-empty space. format[2].is_nullable = true --- ... s:format(format) --- ... s:replace{1, box.NULL} --- - [1, null] ... s:delete{1} --- - [1, null] ... s:format({}) --- ... s:create_index('secondary', { parts = {{2, 'string', is_nullable = true}} }) --- - unique: true parts: - type: string is_nullable: true fieldno: 2 id: 1 space_id: 747 name: secondary type: TREE ... s:insert({1, NULL}) --- - [1, null] ... s.index.secondary:alter({ parts = {{2, 'string', is_nullable = false} }}) --- - error: 'Tuple field 2 type does not match one required by operation: expected string' ... s:delete({1}) --- - [1, null] ... s.index.secondary:alter({ parts = {{2, 'string', is_nullable = false} }}) --- ... s:insert({1, NULL}) --- - error: 'Tuple field 2 type does not match one required by operation: expected string' ... s:insert({2, 'xxx'}) --- - [2, 'xxx'] ... s.index.secondary:alter({ parts = {{2, 'string', is_nullable = true} }}) --- ... s:insert({1, NULL}) --- - [1, null] ... s:drop() --- ... s = box.schema.create_space('test') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... s:format({ [1] = { name = 'id1', type = 'unsigned'}, [2] = { name = 'id2', type = 'unsigned'}, [3] = { name = 'id3', type = 'string'}, [4] = { name = 'id4', type = 'string'}, [5] = { name = 'id5', type = 'string'}, [6] = { name = 'id6', type = 'string'}, }); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... s:format() --- - [{'name': 'id1', 'type': 'unsigned'}, {'name': 'id2', 'type': 'unsigned'}, {'name': 'id3', 'type': 'string'}, {'name': 'id4', 'type': 'string'}, {'name': 'id5', 'type': 'string'}, {'name': 'id6', 'type': 'string'}] ... _ = s:create_index('primary') --- ... s:insert({1, 1, 'a', 'b', 'c', 'd'}) --- - [1, 1, 'a', 'b', 'c', 'd'] ... s:drop() --- ... s = box.schema.create_space('test') --- ... idx = s:create_index('idx') --- ... box.space.test == s --- - true ... s:drop() --- ... -- -- gh-3000: index modifying must change key_def parts and -- comparators. They can be changed, if there was compatible index -- parts change. For example, a part type was changed from -- unsigned to number. In such a case comparators must be reset -- and part types updated. -- s = box.schema.create_space('test') --- ... pk = s:create_index('pk') --- ... s:replace{1} --- - [1] ... pk:alter{parts = {{1, 'integer'}}} --- ... s:replace{-2} --- - [-2] ... s:select{} --- - - [-2] - [1] ... s:drop() --- ... -- -- Allow to restrict space format, if corresponding restrictions -- already are defined in indexes. -- test_run:cmd("setopt delimiter ';'") --- - true ... function check_format_restriction(engine, name) local s = box.schema.create_space(name, {engine = engine}) local pk = s:create_index('pk') local format = {} format[1] = {name = 'field1'} s:replace{1} s:replace{100} s:replace{0} s:format(format) s:format() format[1].type = 'unsigned' s:format(format) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... check_format_restriction('memtx', 'test1') --- ... check_format_restriction('vinyl', 'test2') --- ... box.space.test1:format() --- - [{'name': 'field1', 'type': 'unsigned'}] ... box.space.test1:select{} --- - - [0] - [1] - [100] ... box.space.test2:format() --- - [{'name': 'field1', 'type': 'unsigned'}] ... box.space.test2:select{} --- - - [0] - [1] - [100] ... box.space.test1:drop() --- ... box.space.test2:drop() --- ... -- -- Allow to change is_nullable in index definition on non-empty -- space. -- s = box.schema.create_space('test') --- ... pk = s:create_index('pk') --- ... sk1 = s:create_index('sk1', {parts = {{2, 'unsigned', is_nullable = true}}}) --- ... sk2 = s:create_index('sk2', {parts = {{3, 'unsigned', is_nullable = false}}}) --- ... s:replace{1, box.NULL, 1} --- - [1, null, 1] ... sk1:alter({parts = {{2, 'unsigned', is_nullable = false}}}) --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... s:replace{1, 1, 1} --- - [1, 1, 1] ... sk1:alter({parts = {{2, 'unsigned', is_nullable = false}}}) --- ... s:replace{1, 1, box.NULL} --- - error: 'Tuple field 3 type does not match one required by operation: expected unsigned' ... sk2:alter({parts = {{3, 'unsigned', is_nullable = true}}}) --- ... s:replace{1, 1, box.NULL} --- - [1, 1, null] ... s:replace{2, 10, 100} --- - [2, 10, 100] ... s:replace{3, 0, 20} --- - [3, 0, 20] ... s:replace{4, 15, 150} --- - [4, 15, 150] ... s:replace{5, 9, box.NULL} --- - [5, 9, null] ... sk1:select{} --- - - [3, 0, 20] - [1, 1, null] - [5, 9, null] - [2, 10, 100] - [4, 15, 150] ... sk2:select{} --- - - [1, 1, null] - [5, 9, null] - [3, 0, 20] - [2, 10, 100] - [4, 15, 150] ... s:drop() --- ... -- -- gh-3008: allow multiple types on the same field. -- format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {name = 'field2', type = 'scalar'} --- ... format[3] = {name = 'field3', type = 'integer'} --- ... s = box.schema.create_space('test', {format = format}) --- ... pk = s:create_index('pk') --- ... sk1 = s:create_index('sk1', {parts = {{2, 'number'}}}) --- ... sk2 = s:create_index('sk2', {parts = {{2, 'integer'}}}) --- ... sk3 = s:create_index('sk3', {parts = {{2, 'unsigned'}}}) --- ... sk4 = s:create_index('sk4', {parts = {{3, 'number'}}}) --- ... s:format() --- - [{'name': 'field1', 'type': 'unsigned'}, {'name': 'field2', 'type': 'scalar'}, { 'name': 'field3', 'type': 'integer'}] ... s:replace{1, '100', -20.2} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... s:replace{1, 100, -20.2} --- - error: 'Tuple field 3 type does not match one required by operation: expected integer' ... s:replace{1, 100, -20} --- - [1, 100, -20] ... s:replace{2, 50, 0} --- - [2, 50, 0] ... s:replace{3, 150, -60} --- - [3, 150, -60] ... s:replace{4, 0, 120} --- - [4, 0, 120] ... pk:select{} --- - - [1, 100, -20] - [2, 50, 0] - [3, 150, -60] - [4, 0, 120] ... sk1:select{} --- - - [4, 0, 120] - [2, 50, 0] - [1, 100, -20] - [3, 150, -60] ... sk2:select{} --- - - [4, 0, 120] - [2, 50, 0] - [1, 100, -20] - [3, 150, -60] ... sk3:select{} --- - - [4, 0, 120] - [2, 50, 0] - [1, 100, -20] - [3, 150, -60] ... sk4:select{} --- - - [3, 150, -60] - [1, 100, -20] - [2, 50, 0] - [4, 0, 120] ... sk1:alter{parts = {{2, 'unsigned'}}} --- ... sk2:alter{parts = {{2, 'unsigned'}}} --- ... sk4:alter{parts = {{3, 'integer'}}} --- ... s:replace{1, 50.5, 1.5} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... s:replace{1, 50, 1.5} --- - error: 'Tuple field 3 type does not match one required by operation: expected integer' ... s:replace{5, 5, 5} --- - [5, 5, 5] ... sk1:select{} --- - - [4, 0, 120] - [5, 5, 5] - [2, 50, 0] - [1, 100, -20] - [3, 150, -60] ... sk2:select{} --- - - [4, 0, 120] - [5, 5, 5] - [2, 50, 0] - [1, 100, -20] - [3, 150, -60] ... sk3:select{} --- - - [4, 0, 120] - [5, 5, 5] - [2, 50, 0] - [1, 100, -20] - [3, 150, -60] ... sk4:select{} --- - - [3, 150, -60] - [1, 100, -20] - [2, 50, 0] - [5, 5, 5] - [4, 0, 120] ... sk1:drop() --- ... sk2:drop() --- ... sk3:drop() --- ... -- Remove 'unsigned' constraints from indexes, and 'scalar' now -- can be inserted in the second field. s:replace{1, true, 100} --- - [1, true, 100] ... s:select{} --- - - [1, true, 100] - [2, 50, 0] - [3, 150, -60] - [4, 0, 120] - [5, 5, 5] ... sk4:select{} --- - - [3, 150, -60] - [2, 50, 0] - [5, 5, 5] - [1, true, 100] - [4, 0, 120] ... s:drop() --- ... -- -- gh-2914: Allow any space name which consists of printable characters -- identifier = require("identifier") --- ... test_run:cmd("setopt delimiter ';'") --- - true ... identifier.run_test( function (identifier) box.schema.space.create(identifier) if box.space[identifier] == nil then error("Cannot query space") end end, function (identifier) box.space[identifier]:drop() end ); --- - All tests passed ... s = box.schema.create_space("test"); --- ... identifier.run_test( function (identifier) s:create_index(identifier, {parts={1}}) end, function (identifier) s.index[identifier]:drop() end ); --- - All tests passed ... s:drop(); --- ... -- -- gh-2914: check column name validation. -- Ensure that col names are validated as identifiers. -- s = box.schema.create_space('test'); --- ... i = s:create_index("primary", {parts={1, "integer"}}); --- ... identifier.run_test( function (identifier) s:format({{name=identifier,type="integer"}}) local t = s:replace({1}) if t[identifier] ~= 1 then error("format identifier error") end end, function (identifier) end ); --- - All tests passed ... s:drop(); --- ... -- gh-2914: check coll name validation. identifier.run_test( function (identifier) box.internal.collation.create(identifier, 'ICU', 'ru-RU', {}) end, function (identifier) box.internal.collation.drop(identifier) end ); --- - All tests passed ... test_run:cmd("setopt delimiter ''"); --- - true ... -- -- gh-3011: add new names to old tuple formats. -- s = box.schema.create_space('test') --- ... pk = s:create_index('pk') --- ... t1 = s:replace{1} --- ... t1.field1 --- - null ... format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... s:format(format) --- ... t2 = s:replace{2} --- ... t2.field1 --- - 2 ... t1.field1 --- - 1 ... format[1].name = 'field_1' --- ... s:format(format) --- ... t3 = s:replace{3} --- ... t1.field1 --- - null ... t1.field_1 --- - 1 ... t2.field1 --- - null ... t2.field_1 --- - 2 ... t3.field1 --- - null ... t3.field_1 --- - 3 ... s:drop() --- ... -- -- gh-3008. Ensure the change of hash index parts updates hash -- key_def. -- s = box.schema.create_space('test') --- ... pk = s:create_index('pk', {type = 'hash'}) --- ... pk:alter{parts = {{1, 'string'}}} --- ... s:replace{'1', '1'} --- - ['1', '1'] ... s:replace{'1', '2'} --- - ['1', '2'] ... pk:select{} --- - - ['1', '2'] ... pk:select{'1'} --- - - ['1', '2'] ... s:drop() --- ... -- -- Ensure that incompatible key parts change validates format. -- s = box.schema.create_space('test') --- ... pk = s:create_index('pk') --- ... s:replace{1} --- - [1] ... pk:alter{parts = {{1, 'string'}}} -- Must fail. --- - error: 'Tuple field 1 type does not match one required by operation: expected string' ... s:drop() --- ... -- -- gh-2895: do not ignore field type in space format, if it is not -- specified via 'type = ...'. -- format = {} --- ... format[1] = {name = 'field1', 'unsigned'} --- ... format[2] = {name = 'field2', 'unsigned'} --- ... s = box.schema.create_space('test', {format = format}) --- ... s:format() --- - [{'type': 'unsigned', 'name': 'field1'}, {'type': 'unsigned', 'name': 'field2'}] ... format[2] = {name = 'field2', 'unsigned', 'unknown'} --- ... s:format(format) --- - error: 'Can''t modify space ''test'': field 2 format is not map with string keys' ... s:format() --- - [{'type': 'unsigned', 'name': 'field1'}, {'type': 'unsigned', 'name': 'field2'}] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/cfg.test.lua0000664000000000000000000000667513306565107020221 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd("push filter '(error: .*)\\.lua:[0-9]+: ' to '\\1.lua:: '") box.cfg.nosuchoption = 1 cfg_filter(box.cfg) -- must be read-only box.cfg() cfg_filter(box.cfg) -- check that cfg with unexpected parameter fails. box.cfg{sherlock = 'holmes'} -- check that cfg with unexpected type of parameter failes box.cfg{listen = {}} box.cfg{wal_dir = 0} box.cfg{coredump = 'true'} -- check comment to issue #2191 - bad argument #2 to ''uri_parse'' box.cfg{replication = {}} box.cfg{replication = {}} -------------------------------------------------------------------------------- -- Test of hierarchical cfg type check -------------------------------------------------------------------------------- box.cfg{memtx_memory = "100500"} box.cfg{vinyl = "vinyl"} box.cfg{vinyl_write_threads = "threads"} box.cfg{instance_uuid = box.info.uuid} box.cfg{instance_uuid = '12345678-0123-5678-1234-abcdefabcdef'} box.cfg{replicaset_uuid = box.info.cluster.uuid} box.cfg{replicaset_uuid = '12345678-0123-5678-1234-abcdefabcdef'} -------------------------------------------------------------------------------- -- Test of default cfg options -------------------------------------------------------------------------------- test_run:cmd('create server cfg_tester1 with script = "box/lua/cfg_test1.lua"') test_run:cmd("start server cfg_tester1") test_run:cmd('switch cfg_tester1') box.cfg.memtx_memory, box.cfg.slab_alloc_factor, box.cfg.vinyl_write_threads test_run:cmd("switch default") test_run:cmd("stop server cfg_tester1") test_run:cmd("cleanup server cfg_tester1") test_run:cmd('create server cfg_tester2 with script = "box/lua/cfg_test2.lua"') test_run:cmd("start server cfg_tester2") test_run:cmd('switch cfg_tester2') box.cfg.memtx_memory, box.cfg.slab_alloc_factor, box.cfg.vinyl_write_threads test_run:cmd("switch default") test_run:cmd("stop server cfg_tester2") test_run:cmd("cleanup server cfg_tester2") test_run:cmd('create server cfg_tester3 with script = "box/lua/cfg_test3.lua"') test_run:cmd("start server cfg_tester3") test_run:cmd('switch cfg_tester3') box.cfg.memtx_memory, box.cfg.slab_alloc_factor, box.cfg.vinyl_write_threads test_run:cmd("switch default") test_run:cmd("stop server cfg_tester3") test_run:cmd("cleanup server cfg_tester3") test_run:cmd('create server cfg_tester4 with script = "box/lua/cfg_test4.lua"') test_run:cmd("start server cfg_tester4") test_run:cmd('switch cfg_tester4') box.cfg.memtx_memory, box.cfg.slab_alloc_factor, box.cfg.vinyl_write_threads test_run:cmd("switch default") test_run:cmd("stop server cfg_tester4") test_run:cmd("cleanup server cfg_tester4") -------------------------------------------------------------------------------- -- Check that 'vinyl_dir' cfg option is not checked as long as -- there is no vinyl indexes (issue #2664) -------------------------------------------------------------------------------- test_run:cmd('create server cfg_tester with script = "box/lua/cfg_bad_vinyl_dir.lua"') test_run:cmd("start server cfg_tester") test_run:cmd('switch cfg_tester') _ = box.schema.space.create('test_memtx', {engine = 'memtx'}) _ = box.space.test_memtx:create_index('pk') -- ok _ = box.schema.space.create('test_vinyl', {engine = 'vinyl'}) _ = box.space.test_vinyl:create_index('pk') -- error box.snapshot() test_run:cmd("restart server cfg_tester") test_run:cmd("switch default") test_run:cmd("stop server cfg_tester") test_run:cmd("cleanup server cfg_tester") test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/box/on_replace.result0000664000000000000000000002326313306565107021340 0ustar rootroottest_run = require('test_run').new() --- ... -- test c and lua triggers: must return only lua triggers #box.space._space:on_replace() --- - 0 ... function f() print('test') end --- ... type(box.space._space:on_replace(f)) --- - function ... #box.space._space:on_replace() --- - 1 ... ts = box.schema.space.create('test_space') --- ... ti = ts:create_index('primary', { type = 'hash' }) --- ... type(ts.on_replace) --- - function ... ts.on_replace() --- - error: 'usage: space:on_replace(function | nil, [function | nil])' ... ts:on_replace() --- - [] ... ts:on_replace(123) --- - error: 'trigger reset: incorrect arguments' ... function fail(old_tuple, new_tuple) error('test') end --- ... type(ts:on_replace(fail)) --- - function ... ts:insert{1, 'b', 'c'} --- - error: '[string "function fail(old_tuple, new_tuple) error(''te..."]:1: test' ... ts:get{1} --- ... ts:on_replace(nil, fail) --- ... ts:insert{1, 'b', 'c'} --- - [1, 'b', 'c'] ... ts:get{1} --- - [1, 'b', 'c'] ... function fail(old_tuple, new_tuple) error('abc') end --- ... type(ts:on_replace(fail)) --- - function ... ts:insert{2, 'b', 'c'} --- - error: '[string "function fail(old_tuple, new_tuple) error(''ab..."]:1: abc' ... ts:get{2} --- ... o = nil --- ... n = nil --- ... function save_out(told, tnew) o = told n = tnew end --- ... type(ts:on_replace(save_out, fail)) --- - function ... ts:insert{2, 'a', 'b', 'c'} --- - [2, 'a', 'b', 'c'] ... o --- - null ... n --- - [2, 'a', 'b', 'c'] ... ts:replace{2, 'd', 'e', 'f'} --- - [2, 'd', 'e', 'f'] ... o --- - [2, 'a', 'b', 'c'] ... n --- - [2, 'd', 'e', 'f'] ... type(ts:on_replace(function() test = 1 end)) --- - function ... #ts:on_replace() --- - 2 ... ts:drop() --- ... -- test garbage in lua stack #box.space._space:on_replace() --- - 1 ... function f2() print('test2') end --- ... type(box.space._space:on_replace(f2)) --- - function ... #box.space._space:on_replace() --- - 2 ... -- -- gh-587: crash on attempt to modify space from triggers -- first = box.schema.space.create('first') --- ... _= first:create_index('primary') --- ... second = box.schema.space.create('second') --- ... _ = second:create_index('primary') --- ... -- one statement test_run:cmd("setopt delimiter ';'"); --- - true ... trigger_id = first:on_replace(function() second:replace({2, first:get(1)[2] .. " from on_replace trigger"}) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... first:replace({1, "hello"}) --- - [1, 'hello'] ... first:select() --- - - [1, 'hello'] ... second:select() --- - - [2, 'hello from on_replace trigger'] ... first:on_replace(nil, trigger_id) --- ... first:delete(1) --- - [1, 'hello'] ... second:delete(1) --- ... -- multiple statements test_run:cmd("setopt delimiter ';'"); --- - true ... trigger_id = first:on_replace(function() second:replace({1}) second:replace({2, first:get(1)[2] .. " in on_replace trigger"}) second:replace({3}) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... first:replace({1, "multistatement tx"}) --- - [1, 'multistatement tx'] ... first:select() --- - - [1, 'multistatement tx'] ... second:select() --- - - [1] - [2, 'multistatement tx in on_replace trigger'] - [3] ... first:on_replace(nil, trigger_id) --- ... first:delete(1) --- - [1, 'multistatement tx'] ... second:delete(1) --- - [1] ... second:delete(2) --- - [2, 'multistatement tx in on_replace trigger'] ... second:delete(3) --- - [3] ... -- rollback on error test_run:cmd("setopt delimiter ';'"); --- - true ... trigger_id = first:on_replace(function() second:replace({1, "discarded"}) second:insert({1}) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... first:replace({1, "rollback"}) --- - error: Duplicate key exists in unique index 'primary' in space 'second' ... first:select() --- - [] ... second:select() --- - [] ... first:on_replace(nil, trigger_id) --- ... -- max recursion depth RECURSION_LIMIT = 0 --- ... test_run:cmd("setopt delimiter ';'"); --- - true ... trigger_id = first:on_replace(function() RECURSION_LIMIT = RECURSION_LIMIT + 1 first:auto_increment({"recursive"}) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... first:replace({1, "recursive"}) --- - error: 'Can not execute a nested statement: nesting limit reached' ... first:select() --- - [] ... second:select() --- - [] ... RECURSION_LIMIT --- - 4 ... first:on_replace(nil, trigger_id) --- ... -- recursion level = 0 --- ... test_run:cmd("setopt delimiter ';'"); --- - true ... trigger_id = first:on_replace(function() level = level + 1 if level >= RECURSION_LIMIT then return end first:auto_increment({"recursive", level}) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... first:replace({0, "initial"}) --- - [0, 'initial'] ... first:select() --- - - [0, 'initial'] - [1, 'recursive', 1] - [2, 'recursive', 2] - [3, 'recursive', 3] ... second:select() --- - [] ... RECURSION_LIMIT --- - 4 ... first:on_replace(nil, trigger_id) --- ... first:truncate() --- ... second:truncate() --- ... -- transaction control test_run:cmd("setopt delimiter ';'"); --- - true ... trigger_id = first:on_replace(function() box.commit() first:auto_increment({"recursive", level}) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... first:replace({0, "initial"}) --- - error: Can not commit transaction in a nested statement ... first:select() --- - [] ... second:select() --- - [] ... first:on_replace(nil, trigger_id) --- ... test_run:cmd("setopt delimiter ';'"); --- - true ... trigger_id = first:on_replace(function() box.rollback() first:auto_increment({"recursive", level}) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... first:replace({0, "initial"}) --- - error: Rollback called in a nested statement ... first:select() --- - [] ... second:select() --- - [] ... first:on_replace(nil, trigger_id) --- ... test_run:cmd("setopt delimiter ';'"); --- - true ... trigger_id = first:on_replace(function() box.begin() first:auto_increment({"recursive", level}) box.commit() end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... first:replace({0, "initial"}) --- - error: 'Operation is not permitted when there is an active transaction ' ... first:select() --- - [] ... second:select() --- - [] ... first:on_replace(nil, trigger_id) --- ... first:drop() --- ... second:drop() --- ... s = box.schema.space.create('test_on_repl_ddl') --- ... _ = s:create_index('pk') --- ... t = s:on_replace(function () box.schema.space.create('some_space') end) --- ... s:replace({1, 2}) --- - error: Space _schema does not support multi-statement transactions ... t = s:on_replace(function () s:create_index('sec') end, t) --- ... s:replace({2, 3}) --- - error: Space _index does not support multi-statement transactions ... t = s:on_replace(function () box.schema.user.create('newu') end, t) --- ... s:replace({3, 4}) --- - error: Space _user does not support multi-statement transactions ... t = s:on_replace(function () box.schema.role.create('newr') end, t) --- ... s:replace({4, 5}) --- - error: Space _user does not support multi-statement transactions ... t = s:on_replace(function () s:drop() end, t) --- ... s:replace({5, 6}) --- - error: DDL does not support multi-statement transactions ... t = s:on_replace(function () box.schema.func.create('newf') end, t) --- ... s:replace({6, 7}) --- - error: Space _func does not support multi-statement transactions ... t = s:on_replace(function () box.schema.user.grant('guest', 'read,write', 'space', 'test_on_repl_ddl') end, t) --- ... s:replace({7, 8}) --- - error: Space _priv does not support multi-statement transactions ... t = s:on_replace(function () s:rename('newname') end, t) --- ... s:replace({8, 9}) --- - error: Space _space does not support multi-statement transactions ... t = s:on_replace(function () s.index.pk:rename('newname') end, t) --- ... s:replace({9, 10}) --- - error: Space _index does not support multi-statement transactions ... s:select() --- - [] ... s:drop() --- ... -- -- gh-3020: sub-statement rollback -- s1 = box.schema.space.create('test1') --- ... _ = s1:create_index('pk') --- ... s2 = box.schema.space.create('test2') --- ... _ = s2:create_index('pk') --- ... s3 = box.schema.space.create('test3') --- ... _ = s3:create_index('pk') --- ... test_run:cmd("setopt delimiter ';'"); --- - true ... x1 = 1; --- ... x2 = 1; --- ... _ = s1:on_replace(function(old, new) for i = 1, 3 do s2:insert{x1} x1 = x1 + 1 end if new[2] == 'fail' then error('fail') end pcall(s2.insert, s2, {123, 'fail'}) end); --- ... _ = s2:on_replace(function(old, new) for i = 1, 3 do s3:insert{x2} x2 = x2 + 1 end if new[2] == 'fail' then error('fail') end end); --- ... box.begin() s1:insert{1} pcall(s1.insert, s1, {123, 'fail'}) box.commit(); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... s1:select() --- - - [1] ... s2:select() --- - - [1] - [2] - [3] ... s3:select() --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] ... s1:drop() --- ... s2:drop() --- ... s3:drop() --- ... -- -- gh-3020: trigger chaining -- s1 = box.schema.space.create('test1') --- ... _ = s1:create_index('pk') --- ... s2 = box.schema.space.create('test2') --- ... _ = s2:create_index('pk') --- ... s3 = box.schema.space.create('test3') --- ... _ = s3:create_index('pk') --- ... x = 1 --- ... _ = s1:on_replace(function(old, new) s2:insert(new:update{{'!', 2, x}}) x = x + 1 end) --- ... _ = s1:on_replace(function(old, new) s3:insert(new:update{{'!', 2, x}}) x = x + 1 end) --- ... box.begin() s1:insert{1} s1:insert{2} s1:insert{3} box.commit() --- ... s1:select() --- - - [1] - [2] - [3] ... s2:select() --- - - [1, 2] - [2, 4] - [3, 6] ... s3:select() --- - - [1, 1] - [2, 3] - [3, 5] ... s1:drop() --- ... s2:drop() --- ... s3:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/errinj_index.test.lua0000664000000000000000000000514513306560010022115 0ustar rootrooterrinj = box.error.injection -- Check a failed realloc in tree index. s = box.schema.space.create('tweedledum') index = s:create_index('primary', {type = 'tree'} ) for i = 1,10 do s:insert{i, i, 'test' .. i} end res = {} for i = 1,10 do table.insert(res, s:get{i}) end res res = {} for _, t in s.index[0]:pairs() do table.insert(res, t) end res errinj.set("ERRINJ_INDEX_ALLOC", true) res = {} for i = 1,10 do table.insert(res, s:get{i}) end res res = {} for _, t in s.index[0]:pairs() do table.insert(res, t) end res for i = 501,2500 do s:insert{i, i} end s:delete{1} res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res res = {} for i = 501,510 do table.insert(res, (s:get{i})) end res --count must be exactly 10 function check_iter_and_size() local count = 0 for _, t in s.index[0]:pairs() do count = count + 1 end return count == 10 and "ok" or "fail" end check_iter_and_size() for i = 2501,3500 do s:insert{i, i} end s:delete{2} check_iter_and_size() res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res errinj.set("ERRINJ_INDEX_ALLOC", false) for i = 4501,5500 do s:insert{i, i} end res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res s:delete{8} res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res res = {} for i = 5001,5010 do table.insert(res, (s:get{i})) end res s:drop() ----------------------------------- -- Check a failed realloc in hash index. s = box.schema.space.create('tweedledum') index = s:create_index('primary', {type = 'hash'} ) for i = 1,10 do s:insert{i, i, 'test' .. i} end res = {} for i = 1,10 do table.insert(res, s:get{i}) end res res = {} for _, t in s.index[0]:pairs() do table.insert(res, t) end res errinj.set("ERRINJ_INDEX_ALLOC", true) res = {} for i = 1,10 do table.insert(res, s:get{i}) end res res = {} for _, t in s.index[0]:pairs() do table.insert(res, t) end res for i = 501,2500 do s:insert{i, i} end s:delete{1} res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res res = {} for i = 501,510 do table.insert(res, (s:get{i})) end res res = {} for i = 2001,2010 do table.insert(res, (s:get{i})) end res check_iter_and_size() for i = 2501,3500 do s:insert{i, i} end s:delete{2} check_iter_and_size() res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res for i = 3501,4500 do s:insert{i, i} end s:delete{3} check_iter_and_size() errinj.set("ERRINJ_INDEX_ALLOC", false) for i = 4501,5500 do s:insert{i, i} end res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res s:delete{8} res = {} for i = 1,10 do table.insert(res, (s:get{i})) end res res = {} for i = 5001,5010 do table.insert(res, (s:get{i})) end res s:drop() errinj = nil tarantool_1.9.1.26.g63eb81e3c/test/box/reload2.c0000664000000000000000000000260113306560010017442 0ustar rootroot#include "module.h" #include #include int foo(box_function_ctx_t *ctx, const char *args, const char *args_end) { static const char *SPACE_TEST_NAME = "test"; uint32_t space_test_id = box_space_id_by_name(SPACE_TEST_NAME, strlen(SPACE_TEST_NAME)); if (space_test_id == BOX_ID_NIL) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Can't find space %s", SPACE_TEST_NAME); } char buf[16]; char *end = buf; end = mp_encode_array(end, 1); end = mp_encode_uint(end, 0); if (box_insert(space_test_id, buf, end, NULL) < 0) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Can't insert in space %s", SPACE_TEST_NAME); } return 0; } int test_reload(box_function_ctx_t *ctx, const char *args, const char *args_end) { fiber_sleep(0.001); char tuple_buf[64]; char *tuple_end = tuple_buf; tuple_end = mp_encode_array(tuple_end, 1); tuple_end = mp_encode_uint(tuple_end, 2); struct tuple *tuple = box_tuple_new(box_tuple_format_default(), tuple_buf, tuple_end); return box_return_tuple(ctx, tuple); } int test_reload_fail(box_function_ctx_t *ctx, const char *args, const char *args_end) { char tuple_buf[64]; char *tuple_end = tuple_buf; tuple_end = mp_encode_array(tuple_end, 1); tuple_end = mp_encode_uint(tuple_end, 2); struct tuple *tuple = box_tuple_new(box_tuple_format_default(), tuple_buf, tuple_end); return box_return_tuple(ctx, tuple); } tarantool_1.9.1.26.g63eb81e3c/test/box/update.test.lua0000664000000000000000000002241613306560010020717 0ustar rootroots = box.schema.space.create('tweedledum') index = s:create_index('pk') -- test delete field s:insert{1000001, 1000002, 1000003, 1000004, 1000005} s:update({1000001}, {{'#', 1, 1}}) s:update({1000001}, {{'#', 1, "only one record please"}}) s:truncate() -- test arithmetic s:insert{1, 0} s:update(1, {{'+', 2, 10}}) s:update(1, {{'+', 2, 15}}) s:update(1, {{'-', 2, 5}}) s:update(1, {{'-', 2, 20}}) s:update(1, {{'|', 2, 0x9}}) s:update(1, {{'|', 2, 0x6}}) s:update(1, {{'&', 2, 0xabcde}}) s:update(1, {{'&', 2, 0x2}}) s:update(1, {{'^', 2, 0xa2}}) s:update(1, {{'^', 2, 0xa2}}) s:truncate() -- test delete multiple fields s:insert{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} s:update({0}, {{'#', 42, 1}}) s:update({0}, {{'#', 4, 'abirvalg'}}) s:update({0}, {{'#', 2, 1}, {'#', 4, 2}, {'#', 6, 1}}) s:update({0}, {{'#', 4, 3}}) s:update({0}, {{'#', 5, 123456}}) s:update({0}, {{'#', 3, 4294967295}}) s:update({0}, {{'#', 2, 0}}) s:truncate() -- test insert field s:insert{1, 3, 6, 9} s:update({1}, {{'!', 2, 2}}) s:update({1}, {{'!', 4, 4}, {'!', 4, 5}, {'!', 5, 7}, {'!', 5, 8}}) s:update({1}, {{'!', 10, 10}, {'!', 10, 11}, {'!', 10, 12}}) s:truncate() s:insert{1, 'tuple'} s:update({1}, {{'#', 2, 1}, {'!', 2, 'inserted tuple'}, {'=', 3, 'set tuple'}}) s:truncate() s:insert{1, 'tuple'} s:update({1}, {{'=', 2, 'set tuple'}, {'!', 2, 'inserted tuple'}, {'#', 3, 1}}) s:update({1}, {{'!', 1, 3}, {'!', 1, 2}}) s:truncate() -- test update's assign opearations s:replace{1, 'field string value'} s:update({1}, {{'=', 2, 'new field string value'}, {'=', 3, 42}, {'=', 4, 0xdeadbeef}}) -- test multiple update opearations on the same field s:update({1}, {{'+', 3, 16}, {'&', 4, 0xffff0000}, {'|', 4, 0x0000a0a0}, {'^', 4, 0xffff00aa}}) -- test update splice operation s:replace{1953719668, 'something to splice'} s:update(1953719668, {{':', 2, 1, 4, 'no'}}) s:update(1953719668, {{':', 2, 1, 2, 'every'}}) -- check an incorrect offset s:update(1953719668, {{':', 2, 100, 2, 'every'}}) s:update(1953719668, {{':', 2, -100, 2, 'every'}}) s:truncate() s:insert{1953719668, 'hello', 'october', '20th'}:unpack() s:truncate() s:insert{1953719668, 'hello world'} s:update(1953719668, {{'=', 2, 'bye, world'}}) s:delete{1953719668} s:replace({10, 'abcde'}) s:update(10, {{':', 2, 0, 0, '!'}}) s:update(10, {{':', 2, 1, 0, '('}}) s:update(10, {{':', 2, 2, 0, '({'}}) s:update(10, {{':', 2, -1, 0, ')'}}) s:update(10, {{':', 2, -2, 0, '})'}}) -- test update delete operations s:update({1}, {{'#', 4, 1}, {'#', 3, 1}}) -- test update insert operations s:update({1}, {{'!', 2, 1}, {'!', 2, 2}, {'!', 2, 3}, {'!', 2, 4}}) -- s:update: zero field s:insert{48} s:update(48, {{'=', 0, 'hello'}}) -- s:update: push/pop fields s:insert{1684234849} s:update({1684234849}, {{'#', 2, 1}}) s:update({1684234849}, {{'!', -1, 'push1'}}) s:update({1684234849}, {{'!', -1, 'push2'}}) s:update({1684234849}, {{'!', -1, 'push3'}}) s:update({1684234849}, {{'#', 2, 1}, {'!', -1, 'swap1'}}) s:update({1684234849}, {{'#', 2, 1}, {'!', -1, 'swap2'}}) s:update({1684234849}, {{'#', 2, 1}, {'!', -1, 'swap3'}}) s:update({1684234849}, {{'#', -1, 1}, {'!', -1, 'noop1'}}) s:update({1684234849}, {{'#', -1, 1}, {'!', -1, 'noop2'}}) s:update({1684234849}, {{'#', -1, 1}, {'!', -1, 'noop3'}}) -- -- negative indexes -- box.tuple.new({1, 2, 3, 4, 5}):update({{'!', 0, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -1, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -3, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -5, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -6, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -7, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -100500, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'=', 0, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -1, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -3, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -5, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -6, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -100500, 'Test'}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'+', 0, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -1, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -3, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -5, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -6, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -100500, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'|', 0, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -1, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -3, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -5, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -6, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -100500, 100}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'#', 0, 1}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -1, 1}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -3, 1}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -5, 1}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -6, 1}}) box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -100500, 1}}) -- -- #416: UPDATEs from Lua can't be properly restored due to one based indexing -- env = require('test_run') test_run = env.new() test_run:cmd('restart server default') s = box.space.tweedledum s:select{} s:truncate() s:drop() -- #521: Cryptic error message in update operation s = box.schema.space.create('tweedledum') index = s:create_index('pk') s:insert{1, 2, 3} s:update({1}) s:update({1}, {'=', 1, 1}) s:drop() -- #528: Different types in arithmetical update, overflow check ffi = require('ffi') s = box.schema.create_space('tweedledum') index = s:create_index('pk') s:insert{0, -1} -- + -- s:update({0}, {{'+', 2, "a"}}) -- err s:update({0}, {{'+', 2, 10}}) -- neg(ative) + pos(itive) = pos(itive) 9 s:update({0}, {{'+', 2, 5}}) -- pos + pos = pos 14 s:update({0}, {{'+', 2, -4}}) -- pos + neg = pos 10 s:update({0}, {{'+', 2, -22}}) -- pos + neg = neg -12 s:update({0}, {{'+', 2, -3}}) -- neg + neg = neg -15 s:update({0}, {{'+', 2, 7}}) -- neg + pos = neg -8 -- - -- s:update({0}, {{'-', 2, "a"}}) -- err s:update({0}, {{'-', 2, 16}}) -- neg(ative) - pos(itive) = neg(ative) -24 s:update({0}, {{'-', 2, -4}}) -- neg - neg = neg 20 s:update({0}, {{'-', 2, -32}}) -- neg - neg = pos 12 s:update({0}, {{'-', 2, 3}}) -- pos - pos = pos 9 s:update({0}, {{'-', 2, -5}}) -- pos - neg = pos 14 s:update({0}, {{'-', 2, 17}}) -- pos - pos = neg -3 -- bit -- s:replace{0, 0} -- 0 s:update({0}, {{'|', 2, 24}}) -- 24 s:update({0}, {{'|', 2, 2}}) -- 26 s:update({0}, {{'&', 2, 50}}) -- 18 s:update({0}, {{'^', 2, 6}}) -- 20 s:update({0}, {{'|', 2, -1}}) -- err s:update({0}, {{'&', 2, -1}}) -- err s:update({0}, {{'^', 2, -1}}) -- err s:replace{0, -1} -- -1 s:update({0}, {{'|', 2, 2}}) -- err s:update({0}, {{'&', 2, 40}}) -- err s:update({0}, {{'^', 2, 6}}) -- err s:replace{0, 1.5} -- 1.5 s:update({0}, {{'|', 2, 2}}) -- err s:update({0}, {{'&', 2, 40}}) -- err s:update({0}, {{'^', 2, 6}}) -- err -- double s:replace{0, 5} -- 5 s:update({0}, {{'+', 2, 1.5}}) -- int + double = double 6.5 s:update({0}, {{'|', 2, 2}}) -- err (double!) s:update({0}, {{'-', 2, 0.5}}) -- double - double = double 6 s:update({0}, {{'+', 2, 1.5}}) -- double + double = double 7.5 -- float s:replace{0, ffi.new("float", 1.5)} -- 1.5 s:update({0}, {{'+', 2, 2}}) -- float + int = float 3.5 s:update({0}, {{'+', 2, ffi.new("float", 3.5)}}) -- float + int = float 7 s:update({0}, {{'|', 2, 2}}) -- err (float!) s:update({0}, {{'-', 2, ffi.new("float", 1.5)}}) -- float - float = float 5.5 s:update({0}, {{'+', 2, ffi.new("float", 3.5)}}) -- float + float = float 9 s:update({0}, {{'-', 2, ffi.new("float", 9)}}) -- float + float = float 0 s:update({0}, {{'+', 2, ffi.new("float", 1.2)}}) -- float + float = float 1.2 -- overflow -- s:replace{0, 0xfffffffffffffffeull} s:update({0}, {{'+', 2, 1}}) -- ok s:update({0}, {{'+', 2, 1}}) -- overflow s:update({0}, {{'+', 2, 100500}}) -- overflow s:replace{0, 1} s:update({0}, {{'+', 2, 0xffffffffffffffffull}}) -- overflow s:replace{0, -1} s:update({0}, {{'+', 2, 0xffffffffffffffffull}}) -- ok s:replace{0, 0} s:update({0}, {{'-', 2, 0x7fffffffffffffffull}}) -- ok s:replace{0, -1} s:update({0}, {{'-', 2, 0x7fffffffffffffffull}}) -- ok s:replace{0, -2} s:update({0}, {{'-', 2, 0x7fffffffffffffffull}}) -- overflow s:replace{0, 1} s:update({0}, {{'-', 2, 0xffffffffffffffffull}}) -- overflow s:replace{0, 0xffffffffffffffefull} s:update({0}, {{'-', 2, -16}}) -- ok s:update({0}, {{'-', 2, -16}}) -- overflow s:replace{0, -0x4000000000000000ll} s:update({0}, {{'+', 2, -0x4000000000000000ll}}) -- ok s:replace{0, -0x4000000000000000ll} s:update({0}, {{'+', 2, -0x4000000000000001ll}}) -- overflow -- some wrong updates -- s:update({0}, 0) s:update({0}, {'+', 2, 2}) s:update({0}, {{}}) s:update({0}, {{'+'}}) s:update({0}, {{'+', 0}}) s:update({0}, {{'+', '+', '+'}}) s:update({0}, {{0, 0, 0}}) -- test for https://github.com/tarantool/tarantool/issues/1142 -- broken WAL during upsert ops = {} for i = 1,10 do table.insert(ops, {'=', 2, '1234567890'}) end s:upsert({0}, ops) -- https://github.com/tarantool/tarantool/issues/1854 s:get{0} s:update({0}, {}) --#stop server default --#start server default s = box.space.tweedledum -- -- gh-2036: msgpackffi doesn't support __serialize hint -- map = setmetatable({}, { __serialize = 'map' }) t = box.tuple.new({1, 2, 3}) s:replace({1, 2, 3}) t:update({{'=', 3, map}}) s:update(1, {{'=', 3, map}}) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/stat.test.lua0000664000000000000000000000156213306565107020423 0ustar rootroot-- clear statistics env = require('test_run') test_run = env.new() test_run:cmd('restart server default with cleanup=1') box.stat.INSERT.total box.stat.DELETE.total box.stat.UPDATE.total box.stat.REPLACE.total box.stat.SELECT.total box.stat.ERROR.total space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) -- check stat_cleanup -- add several tuples for i=1,10 do space:insert{i, 'tuple'..tostring(i)} end box.stat.INSERT.total box.stat.DELETE.total box.stat.UPDATE.total box.stat.REPLACE.total box.stat.SELECT.total -- check exceptions space:get('Impossible value') box.stat.ERROR.total test_run:cmd('restart server default') -- statistics must be zero box.stat.INSERT.total box.stat.DELETE.total box.stat.UPDATE.total box.stat.REPLACE.total box.stat.SELECT.total box.stat.ERROR.total -- cleanup box.space.tweedledum:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/tiny.lua0000664000000000000000000000065413306560010017442 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 50 * 1024 * 1024, pid_file = "tarantool.pid", force_recovery = false, slab_alloc_factor = 1.1, rows_per_wal = 5000000 } require('console').listen(os.getenv('ADMIN')) box.once('init', function() box.schema.user.grant('guest', 'read,write,execute', 'universe') end) tarantool_1.9.1.26.g63eb81e3c/test/box/iterator.result0000664000000000000000000001617413306560010021051 0ustar rootrootiterate = dofile('utils.lua').iterate --- ... test_run = require('test_run').new() --- ... test_run:cmd("push filter '(error: .builtin/.*[.]lua):[0-9]+' to '\\1'") --- - true ... # Tree single-part unique --- ... space = box.schema.space.create('tweedledum') --- ... idx1 = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true}) --- ... -- Hash single-part unique idx5 = space:create_index('i4', { type = 'hash', parts = {1, 'string'}, unique = true}) --- ... -- Hash multi-part unique idx6 = space:create_index('i5', { type = 'hash', parts = {2, 'string', 3, 'string'}, unique = true}) --- ... space:insert{'pid_001', 'sid_001', 'tid_998', 'a'} --- - ['pid_001', 'sid_001', 'tid_998', 'a'] ... space:insert{'pid_002', 'sid_001', 'tid_997', 'a'} --- - ['pid_002', 'sid_001', 'tid_997', 'a'] ... space:insert{'pid_003', 'sid_002', 'tid_997', 'b'} --- - ['pid_003', 'sid_002', 'tid_997', 'b'] ... space:insert{'pid_005', 'sid_002', 'tid_996', 'b'} --- - ['pid_005', 'sid_002', 'tid_996', 'b'] ... space:insert{'pid_007', 'sid_003', 'tid_996', 'a'} --- - ['pid_007', 'sid_003', 'tid_996', 'a'] ... space:insert{'pid_011', 'sid_004', 'tid_996', 'c'} --- - ['pid_011', 'sid_004', 'tid_996', 'c'] ... space:insert{'pid_013', 'sid_005', 'tid_996', 'b'} --- - ['pid_013', 'sid_005', 'tid_996', 'b'] ... space:insert{'pid_017', 'sid_006', 'tid_996', 'a'} --- - ['pid_017', 'sid_006', 'tid_996', 'a'] ... space:insert{'pid_019', 'sid_005', 'tid_995', 'a'} --- - ['pid_019', 'sid_005', 'tid_995', 'a'] ... space:insert{'pid_023', 'sid_005', 'tid_994', 'a'} --- - ['pid_023', 'sid_005', 'tid_994', 'a'] ... ------------------------------------------------------------------------------- -- Iterator: hash single-part unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i4', 0, 1) --- - - $pid_001$ - $pid_002$ - $pid_003$ - $pid_005$ - $pid_007$ - $pid_011$ - $pid_013$ - $pid_017$ - $pid_019$ - $pid_023$ ... iterate('tweedledum', 'i4', 0, 1, box.index.ALL) --- - - $pid_001$ - $pid_002$ - $pid_003$ - $pid_005$ - $pid_007$ - $pid_011$ - $pid_013$ - $pid_017$ - $pid_019$ - $pid_023$ ... iterate('tweedledum', 'i4', 0, 1, box.index.EQ) --- - error: HASH index does not support selects via a partial key (expected 1 parts, got 0). Please Consider changing index type to TREE. ... iterate('tweedledum', 'i4', 0, 1, box.index.EQ, 'pid_003') --- - - $pid_003$ ... iterate('tweedledum', 'i4', 0, 1, box.index.EQ, 'pid_666') --- - [] ... ------------------------------------------------------------------------------- -- Iterator: hash multi-part unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i5', 1, 3, box.index.ALL) --- - - $sid_001$tid_997$ - $sid_001$tid_998$ - $sid_002$tid_996$ - $sid_002$tid_997$ - $sid_003$tid_996$ - $sid_004$tid_996$ - $sid_005$tid_994$ - $sid_005$tid_995$ - $sid_005$tid_996$ - $sid_006$tid_996$ ... iterate('tweedledum', 'i5', 1, 3, box.index.EQ, 'sid_005') --- - error: HASH index does not support selects via a partial key (expected 2 parts, got 1). Please Consider changing index type to TREE. ... iterate('tweedledum', 'i5', 1, 3, box.index.EQ, 'sid_005', 'tid_995') --- - - $sid_005$tid_995$ ... iterate('tweedledum', 'i5', 1, 3, box.index.EQ, 'sid_005', 'tid_999') --- - [] ... iterate('tweedledum', 'i5', 1, 3, box.index.EQ, 'sid_005', 'tid_995', 'a') --- - error: Invalid key part count (expected [0..2], got 3) ... space:drop() --- ... ------------------------------------------------------------------------------- -- Iterator: https://github.com/tarantool/tarantool/issues/464 -- Iterator safety after changing schema ------------------------------------------------------------------------------- space = box.schema.space.create('test', {temporary=true}) --- ... idx1 = space:create_index('primary', {type='HASH',unique=true}) --- ... idx2 = space:create_index('t1', {type='TREE',unique=true}) --- ... idx3 = space:create_index('t2', {type='TREE',unique=true}) --- ... box.space.test:insert{0} --- - [0] ... box.space.test:insert{1} --- - [1] ... gen1, param1, state1 = space.index.t1:pairs({}, {iterator = box.index.ALL}) --- ... gen1(param1, state1) --- - - [0] ... gen2, param2, state2 = space.index.t2:pairs({}, {iterator = box.index.ALL}) --- ... gen2(param2, state2) --- - - [0] ... id = space.index.t1.id --- ... box.schema.index.drop(space.id, id) --- ... gen1(param1, state1) --- - null ... gen2(param2, state2) --- - - [1] ... gen2, param2, state2 = space.index.t2:pairs({}, {iterator = box.index.ALL}) --- ... gen2(param2, state2) --- - - [0] ... gen2(param2, state2) --- - - [1] ... space:drop() --- ... ------------------------------------------------------------------------------- -- Iterator: https://github.com/tarantool/tarantool/issues/498 -- Iterator is not checked for wrong type; accept lowercase iterator ------------------------------------------------------------------------------- space = box.schema.space.create('test', {temporary=true}) --- ... idx1 = space:create_index('primary', {type='TREE',unique=true}) --- ... space:insert{0} --- - [0] ... space:insert{1} --- - [1] ... gen, param, state = space.index.primary:pairs({}, {iterator = 'ALL'}) --- ... gen(param, state) --- - - [0] ... gen(param, state) --- - - [1] ... gen(param, state) --- - null ... gen, param, state = space.index.primary:pairs({}, {iterator = 'all'}) --- ... gen(param, state) --- - - [0] ... gen(param, state) --- - - [1] ... gen, param, state = space.index.primary:pairs({}, {iterator = 'mistake'}) --- - error: Unknown iterator type 'mistake' ... space:select({}, {iterator = box.index.ALL}) --- - - [0] - [1] ... space:select({}, {iterator = 'all'}) --- - - [0] - [1] ... space:select({}, {iterator = 'mistake'}) --- - error: Unknown iterator type 'mistake' ... space:drop() --- ... ------------------------------------------------------------------------------- -- Restore GE iterator for HASH https://github.com/tarantool/tarantool/issues/836 ------------------------------------------------------------------------------- space = box.schema.space.create('test', {temporary=true}) --- ... idx1 = space:create_index('primary', {type='hash',unique=true}) --- ... for i = 0,5 do space:insert{i} end --- ... space:select(2) --- - - [2] ... space:select(5, {iterator="GE"}) --- - error: Index 'primary' (HASH) of space 'test' (memtx) does not support requested iterator type ... space:select(nil, {iterator="GE"}) --- - error: HASH index does not support selects via a partial key (expected 1 parts, got 0). Please Consider changing index type to TREE. ... space:select(5, {iterator="GT"}) --- - [] ... l = space:select(nil, {limit=2, iterator="GT"}) --- ... l --- - - [0] - [1] ... l = space:select(l[#l][1], {limit=2, iterator="GT"}) --- ... l --- - - [2] - [3] ... l = space:select(l[#l][1], {limit=2, iterator="GT"}) --- ... l --- - - [4] - [5] ... l = space:select(l[#l][1], {limit=2, iterator="GT"}) --- ... l --- - [] ... space:drop() --- ... iterate = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/role.result0000664000000000000000000004064113306565107020171 0ustar rootrootbox.schema.role.create('iddqd') --- ... box.schema.role.create('iddqd') --- - error: Role 'iddqd' already exists ... box.schema.role.drop('iddqd') --- ... box.schema.role.drop('iddqd') --- - error: Role 'iddqd' is not found ... box.schema.role.create('iddqd') --- ... -- impossible to su to a role box.session.su('iddqd') --- - error: User 'iddqd' is not found ... -- test granting privilege to a role box.schema.role.grant('iddqd', 'execute', 'universe') --- ... box.schema.role.info('iddqd') --- - - - execute - universe - ... box.schema.role.revoke('iddqd', 'execute', 'universe') --- ... box.schema.role.info('iddqd') --- - [] ... -- test granting a role to a user box.schema.user.create('tester') --- ... box.schema.user.info('tester') --- - - - execute - role - public - - session,usage - universe - ... box.schema.user.grant('tester', 'execute', 'role', 'iddqd') --- ... box.schema.user.info('tester') --- - - - execute - role - public - - execute - role - iddqd - - session,usage - universe - ... -- test granting user to a user box.schema.user.grant('tester', 'execute', 'role', 'tester') --- - error: Role 'tester' is not found ... -- test granting a non-execute grant on a role - error box.schema.user.grant('tester', 'write', 'role', 'iddqd') --- - error: Unsupported role privilege 'write' ... box.schema.user.grant('tester', 'read', 'role', 'iddqd') --- - error: Unsupported role privilege 'read' ... -- test granting role to a role box.schema.role.grant('iddqd', 'execute', 'role', 'iddqd') --- - error: Granting role 'iddqd' to role 'iddqd' would create a loop ... box.schema.role.grant('iddqd', 'iddqd') --- - error: Granting role 'iddqd' to role 'iddqd' would create a loop ... box.schema.role.revoke('iddqd', 'iddqd') --- - error: User 'iddqd' does not have role 'iddqd' ... box.schema.user.grant('tester', 'iddqd') --- - error: User 'tester' already has role 'iddqd' ... box.schema.user.revoke('tester', 'iddqd') --- ... box.schema.role.drop('iddqd') --- ... box.schema.user.revoke('tester', 'no-such-role') --- - error: Role 'no-such-role' is not found ... box.schema.user.grant('tester', 'no-such-role') --- - error: Role 'no-such-role' is not found ... box.schema.user.drop('tester') --- ... -- check for loops in role grants box.schema.role.create('a') --- ... box.schema.role.create('b') --- ... box.schema.role.create('c') --- ... box.schema.role.create('d') --- ... box.schema.role.grant('b', 'a') --- ... box.schema.role.grant('c', 'a') --- ... box.schema.role.grant('d', 'b') --- ... box.schema.role.grant('d', 'c') --- ... --check user restrictions box.schema.user.grant('a', 'd') --- - error: User 'a' is not found ... box.schema.user.revoke('a', 'd') --- - error: User 'a' is not found ... box.schema.user.drop('a') --- - error: User 'a' is not found ... box.schema.role.grant('a', 'd') --- - error: Granting role 'd' to role 'a' would create a loop ... box.schema.role.drop('d') --- ... box.schema.role.drop('b') --- ... box.schema.role.drop('c') --- ... box.schema.role.drop('a') --- ... -- check that when dropping a role, it's first revoked -- from whoever it is granted box.schema.role.create('a') --- ... box.schema.role.create('b') --- ... box.schema.role.grant('b', 'a') --- ... box.schema.role.drop('a') --- ... box.schema.role.info('b') --- - [] ... box.schema.role.drop('b') --- ... -- check a grant received via a role box.schema.user.create('test') --- ... box.schema.user.create('grantee') --- ... box.schema.role.create('liaison') --- ... --check role restrictions box.schema.role.grant('test', 'liaison') --- - error: Role 'test' is not found ... box.schema.role.revoke('test', 'liaison') --- - error: Role 'test' is not found ... box.schema.role.drop('test') --- - error: Role 'test' is not found ... box.schema.user.grant('grantee', 'liaison') --- ... box.schema.user.grant('test', 'read,write,create', 'universe') --- ... box.session.su('test') --- ... s = box.schema.space.create('test') --- ... _ = s:create_index('i1') --- ... box.schema.role.grant('liaison', 'read,write', 'space', 'test') --- ... box.session.su('grantee') --- ... box.space.test:insert{1} --- - [1] ... box.space.test:select{1} --- - - [1] ... box.session.su('test') --- ... box.schema.role.revoke('liaison', 'read,write', 'space', 'test') --- ... box.session.su('grantee') --- ... box.space.test:insert{1} --- - error: Write access to space 'test' is denied for user 'grantee' ... box.space.test:select{1} --- - error: Read access to space 'test' is denied for user 'grantee' ... box.session.su('admin') --- ... box.schema.user.drop('test') --- ... box.schema.user.drop('grantee') --- ... box.schema.role.drop('liaison') --- ... -- -- Test how privileges are propagated through a complex role graph. -- Here's the graph: -- -- role1 ->- role2 -->- role4 -->- role6 ->- user1 -- \ / \ -- \->- role5 ->-/ \->- role9 ->- role10 ->- user -- / \ / -- role3 ->-/ \->- role7 ->-/ -- -- Privilege checks verify that grants/revokes are propagated correctly -- from the role1 to role10. -- box.schema.user.create("user") --- ... box.schema.role.create("role1") --- ... box.schema.role.create("role2") --- ... box.schema.role.create("role3") --- ... box.schema.role.create("role4") --- ... box.schema.role.create("role5") --- ... box.schema.role.create("role6") --- ... box.schema.role.create("role7") --- ... box.schema.user.create("user1") --- ... box.schema.role.create("role9") --- ... box.schema.role.create("role10") --- ... box.schema.role.grant("role2", "role1") --- ... box.schema.role.grant("role4", "role2") --- ... box.schema.role.grant("role5", "role2") --- ... box.schema.role.grant("role5", "role3") --- ... box.schema.role.grant("role6", "role4") --- ... box.schema.role.grant("role6", "role5") --- ... box.schema.role.grant("role7", "role5") --- ... box.schema.user.grant("user1", "role6") --- ... box.schema.role.grant("role9", "role6") --- ... box.schema.role.grant("role9", "role7") --- ... box.schema.role.grant("role10", "role9") --- ... box.schema.user.grant("user", "role10") --- ... -- try to create a cycle box.schema.role.grant("role2", "role10") --- - error: Granting role 'role10' to role 'role2' would create a loop ... -- -- test grant propagation -- box.schema.role.grant("role1", "read", "universe") --- ... box.session.su("user") --- ... box.space._space.index.name:get{"_space"}[3] --- - _space ... box.session.su("admin") --- ... box.schema.role.revoke("role1", "read", "universe") --- ... box.session.su("user") --- ... box.space._space.index.name:get{"_space"}[3] --- - error: Read access to space '_space' is denied for user 'user' ... box.session.su("admin") --- ... -- -- space-level privileges -- box.schema.role.grant("role1", "read", "space", "_index") --- ... box.session.su("user") --- ... box.space._space.index.name:get{"_space"}[3] --- - error: Read access to space '_space' is denied for user 'user' ... box.space._index:get{288, 0}[3] --- - primary ... box.session.su("admin") --- ... box.schema.role.revoke("role1", "read", "space", "_index") --- ... box.session.su("user") --- ... box.space._space.index.name:get{"_space"}[3] --- - error: Read access to space '_space' is denied for user 'user' ... box.space._index:get{288, 0}[3] --- - error: Read access to space '_index' is denied for user 'user' ... box.session.su("admin") --- ... -- -- grant to a non-leaf branch -- box.schema.role.grant("role5", "read", "space", "_index") --- ... box.session.su("user") --- ... box.space._space.index.name:get{"_space"}[3] --- - error: Read access to space '_space' is denied for user 'user' ... box.space._index:get{288, 0}[3] --- - primary ... box.session.su("admin") --- ... box.schema.role.revoke("role5", "read", "space", "_index") --- ... box.session.su("user") --- ... box.space._space.index.name:get{"_space"}[3] --- - error: Read access to space '_space' is denied for user 'user' ... box.space._index:get{288, 0}[3] --- - error: Read access to space '_index' is denied for user 'user' ... box.session.su("admin") --- ... -- -- grant via two branches -- box.schema.role.grant("role3", "read", "space", "_index") --- ... box.schema.role.grant("role4", "read", "space", "_index") --- ... box.schema.role.grant("role9", "read", "space", "_index") --- ... box.session.su("user") --- ... box.space._index:get{288, 0}[3] --- - primary ... box.session.su("user1") --- ... box.space._index:get{288, 0}[3] --- - primary ... box.session.su("admin") --- ... box.schema.role.revoke("role3", "read", "space", "_index") --- ... box.session.su("user") --- ... box.space._index:get{288, 0}[3] --- - primary ... box.session.su("user1") --- ... box.space._index:get{288, 0}[3] --- - primary ... box.session.su("admin") --- ... box.schema.role.revoke("role4", "read", "space", "_index") --- ... box.session.su("user") --- ... box.space._index:get{288, 0}[3] --- - primary ... box.session.su("user1") --- ... box.space._index:get{288, 0}[3] --- - error: Read access to space '_index' is denied for user 'user1' ... box.session.su("admin") --- ... box.schema.role.revoke("role9", "read", "space", "_index") --- ... box.session.su("user") --- ... box.space._index:get{288, 0}[3] --- - error: Read access to space '_index' is denied for user 'user' ... box.session.su("user1") --- ... box.space._index:get{288, 0}[3] --- - error: Read access to space '_index' is denied for user 'user1' ... box.session.su("admin") --- ... -- -- check diamond-shaped grant graph -- box.schema.role.grant("role5", "read", "space", "_space") --- ... box.session.su("user") --- ... box.space._space.index.name:get{"_space"}[3] --- - _space ... box.session.su("user1") --- ... box.space._space.index.name:get{"_space"}[3] --- - _space ... box.session.su("admin") --- ... box.schema.role.revoke("role5", "read", "space", "_space") --- ... box.session.su("user") --- ... box.space._space.index.name:get{"_space"}[3] --- - error: Read access to space '_space' is denied for user 'user' ... box.session.su("user1") --- ... box.space._space.index.name:get{"_space"}[3] --- - error: Read access to space '_space' is denied for user 'user1' ... box.session.su("admin") --- ... box.schema.user.drop("user") --- ... box.schema.user.drop("user1") --- ... box.schema.role.drop("role1") --- ... box.schema.role.drop("role2") --- ... box.schema.role.drop("role3") --- ... box.schema.role.drop("role4") --- ... box.schema.role.drop("role5") --- ... box.schema.role.drop("role6") --- ... box.schema.role.drop("role7") --- ... box.schema.role.drop("role9") --- ... box.schema.role.drop("role10") --- ... -- -- only the creator of the role can grant it (or a superuser) -- There is no grant option. -- the same applies for privileges -- box.schema.user.create('user') --- ... box.schema.user.create('grantee') --- ... box.schema.user.grant('user', 'read,write,execute,create', 'universe') --- ... box.session.su('user') --- ... box.schema.role.create('role') --- ... box.session.su('admin') --- ... box.schema.user.grant('grantee', 'role') --- ... box.schema.user.revoke('grantee', 'role') --- ... box.schema.user.create('john') --- ... box.session.su('john') --- ... -- error box.schema.user.grant('grantee', 'role') --- - error: Read access to space '_user' is denied for user 'john' ... -- box.session.su('admin') --- ... _ = box.schema.space.create('test') --- ... box.schema.user.grant('john', 'read,write,execute', 'universe') --- ... box.session.su('john') --- ... box.schema.user.grant('grantee', 'role') --- - error: Grant access to role 'role' is denied for user 'john' ... box.schema.user.grant('grantee', 'read', 'space', 'test') --- - error: Grant access to space 'test' is denied for user 'john' ... -- -- granting 'public' is however an exception - everyone -- can grant 'public' role, it's implicitly granted with -- a grant option. -- box.schema.user.grant('grantee', 'public') --- - error: User 'grantee' already has role 'public' ... -- -- revoking role 'public' is another deal - only the -- superuser can do that, and even that would be useless, -- since one can still re-grant it back to oneself. -- box.schema.user.revoke('grantee', 'public') --- - error: Revoke access to role 'public' is denied for user 'john' ... box.session.su('admin') --- ... box.schema.user.drop('john') --- ... box.schema.user.drop('user') --- ... box.schema.user.drop('grantee') --- ... box.schema.role.drop('role') --- ... box.space.test:drop() --- ... -- -- grant a privilege through a role, but -- the user has another privilege either granted -- natively (one case) or via another role. -- Check that privileges actually OR, but -- not replace each other. -- _ = box.schema.space.create('test') --- ... _ = box.space.test:create_index('primary') --- ... box.schema.user.create('john') --- ... box.schema.user.grant('john', 'read', 'space', 'test') --- ... box.session.su('john') --- ... box.space.test:select{} --- - [] ... box.space.test:insert{1} --- - error: Write access to space 'test' is denied for user 'john' ... box.session.su('admin') --- ... box.schema.role.grant('public', 'write', 'space', 'test') --- ... box.session.su('john') --- ... box.space.test:select{} --- - [] ... box.space.test:insert{2} --- - [2] ... box.session.su('admin') --- ... box.schema.role.revoke('public', 'write', 'space', 'test') --- ... box.session.su('john') --- ... box.space.test:select{} --- - - [2] ... box.space.test:insert{1} --- - error: Write access to space 'test' is denied for user 'john' ... box.session.su('admin') --- ... box.space.test:drop() --- ... box.schema.user.drop('john') --- ... -- test ER_GRANT box.space._priv:replace{1, 0, 'universe', 0, 0} --- - error: 'Incorrect grant arguments: the grant tuple has no privileges' ... -- role.exists() -- -- true if the role is present box.schema.role.exists('public') --- - true ... -- for if there is no such role box.schema.role.exists('nosuchrole') --- - false ... -- false for users box.schema.role.exists('guest') --- - false ... -- false for users box.schema.role.exists('admin') --- - false ... -- role id is ok box.schema.role.exists(3) --- - true ... -- user id box.schema.role.exists(0) --- - false ... box.schema.role.create('public', { if_not_exists = true}) --- ... box.schema.user.create('admin', { if_not_exists = true}) --- ... box.schema.user.create('guest', { if_not_exists = true}) --- ... box.schema.user.create('test', { if_not_exists = true}) --- ... box.schema.user.create('test', { if_not_exists = true}) --- ... box.schema.role.drop('test', { if_not_exists = true}) --- - error: Illegal parameters, unexpected option 'if_not_exists' ... box.schema.role.drop('test', { if_exists = true}) --- ... box.schema.role.create('test', { if_not_exists = true}) --- ... box.schema.role.create('test', { if_not_exists = true}) --- ... box.schema.user.drop('test', { if_not_exists = true}) --- - error: Illegal parameters, unexpected option 'if_not_exists' ... -- gh-664 roles: accepting bad syntax for create box.schema.role.create('role', 'role') --- - error: Illegal parameters, options should be a table ... box.schema.role.drop('role', 'role') --- - error: Illegal parameters, options should be a table ... box.schema.user.drop('test', { if_exists = true}) --- ... -- gh-663: inconsistent roles grant/revoke box.schema.role.create('X1') --- ... box.schema.role.create('X2') --- ... box.schema.role.info('X1') --- - [] ... box.schema.role.grant('X1','read','role','X2') --- - error: Unsupported role privilege 'read' ... box.schema.role.info('X1') --- - [] ... box.schema.role.revoke('X1','read','role','X2') --- - error: Unsupported role privilege 'read' ... box.schema.role.info('X1') --- - [] ... box.schema.role.drop('X1') --- ... box.schema.role.drop('X2') --- ... -- gh-867 inconsistent role/user info box.schema.role.create('test_role') --- ... box.schema.role.info('test_role') --- - [] ... box.schema.user.info('test_role') --- - error: User 'test_role' is not found ... box.schema.role.info('test_not_exist') --- - error: Role 'test_not_exist' is not found ... box.schema.user.create('test_user') --- ... box.schema.user.info('test_user') --- - - - execute - role - public - - session,usage - universe - ... box.schema.role.info('test_user') --- - error: Role 'test_user' is not found ... box.schema.user.info('test_not_exist') --- - error: User 'test_not_exist' is not found ... box.schema.role.drop('test_role') --- ... box.schema.user.drop('test_user') --- ... --gh-1266 if_exists for user drop box.schema.user.create('test_1266') --- ... box.schema.user.drop('test_1266') --- ... box.schema.user.drop('test_1266') --- - error: User 'test_1266' is not found ... box.schema.user.drop('test_1266', { if_exists = true}) --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/misc.test.lua0000664000000000000000000002127613306565107020407 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd("push filter 'table: .*' to 'table:
'") -- gh-266: box.info() crash on uncofigured box package.loaded['box.space'] == nil package.loaded['box.index'] == nil package.loaded['box.tuple'] == nil package.loaded['box.error'] == nil package.loaded['box.info'] == nil package.loaded['box.stat'] == nil package.loaded['box.session'] == nil space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'hash' }) -- Test Lua from admin console. Whenever producing output, -- make sure it's a valid YAML. ' lua says: hello' -- # What's in the box? t = {} for n in pairs(box) do table.insert(t, tostring(n)) end table.sort(t) t t = nil ---------------- -- # box.error ---------------- test_run:cmd("restart server default") env = require('test_run') test_run = env.new() box.error.last() box.error({code = 123, reason = 'test'}) box.error(box.error.ILLEGAL_PARAMS, "bla bla") box.error() box.error.raise() e = box.error.last() e e:unpack() e.type e.code e.message tostring(e) e = nil box.error.clear() box.error.last() box.error.raise() space = box.space.tweedledum -- -- gh-2080: box.error() crashes with wrong parameters box.error(box.error.UNSUPPORTED, "x", "x%s") box.error(box.error.UNSUPPORTED, "x") box.error(box.error.UNSUPPORTED) ---------------- -- # box.stat ---------------- t = {} test_run:cmd("setopt delimiter ';'") for k, v in pairs(box.stat()) do table.insert(t, k) end; for k, v in pairs(box.stat().DELETE) do table.insert(t, k) end; for k, v in pairs(box.stat.DELETE) do table.insert(t, k) end; t; ---------------- -- # box.space ---------------- type(box); type(box.space); t = {}; for i, v in pairs(space.index[0].parts[1]) do table.insert(t, tostring(i)..' : '..tostring(v)) end; t; ---------------- -- # box.slab ---------------- string.match(tostring(box.slab.info()), '^table:') ~= nil; box.slab.info().arena_used >= 0; box.slab.info().arena_size > 0; string.match(tostring(box.slab.stats()), '^table:') ~= nil; t = {}; for k, v in pairs(box.slab.info()) do table.insert(t, k) end; t; box.runtime.info().used > 0; box.runtime.info().maxalloc > 0; -- -- gh-502: box.slab.info() excessively sparse array -- type(require('yaml').encode(box.slab.info())); ---------------- -- # box.error ---------------- t = {} for k,v in pairs(box.error) do table.insert(t, 'box.error.'..tostring(k)..' : '..tostring(v)) end; t; test_run:cmd("setopt delimiter ''"); -- A test case for Bug#901674 -- No way to inspect exceptions from Box in Lua -- function myinsert(tuple) box.space.tweedledum:insert(tuple) end pcall(myinsert, {99, 1, 1953719668}) pcall(myinsert, {1, 'hello'}) pcall(myinsert, {1, 'hello'}) box.space.tweedledum:truncate() myinsert = nil -- A test case for gh-37: print of 64-bit number ffi = require('ffi') 1, 1 tonumber64(1), 1 -- Testing 64bit tonumber64() tonumber64('invalid number') tonumber64(123) tonumber64('123') type(tonumber64('4294967296')) == 'number' tonumber64('9223372036854775807') == tonumber64('9223372036854775807') tonumber64('9223372036854775807') - tonumber64('9223372036854775800') tonumber64('18446744073709551615') == tonumber64('18446744073709551615') tonumber64('18446744073709551615') + 1 tonumber64(-1) tonumber64('184467440737095516155') string.byte(require('msgpack').encode(tonumber64(123))) -- A test case for Bug#1061747 'tonumber64 is not transitive' tonumber64(tonumber64(2)) tostring(tonumber64(tonumber64(3))) -- A test case for Bug#1131108 'tonumber64 from negative int inconsistency' tonumber64(-1) tonumber64(-1LL) tonumber64(-1ULL) -1 -1LL -1ULL tonumber64(-1.0) 6LL - 7LL tostring(tonumber64('1234567890123')) == '1234567890123' tostring(tonumber64('12345678901234')) == '12345678901234' tostring(tonumber64('123456789012345')) == '123456789012345ULL' tostring(tonumber64('1234567890123456')) == '1234567890123456ULL' tonumber64('0x12') == 18 tonumber64('0x12', 16) == 18 tonumber64('0x12', 17) == nil tonumber64('0b01') == 1 tonumber64('0b01', 2) == 1 tonumber64('0b01', 3) == nil tonumber64(' 0b1 ') == 1 tonumber64(' 0b1 ', 'badbase') tonumber64(' 0b1 ', 123) -- big base tonumber64('12345', 123) -- big base tonumber64('0xfffff') == 1048575 tonumber64('0b111111111111111111') == 262143 tonumber64('20', 36) tonumber64("", 10) tonumber64("", 32) tonumber64("-1") tonumber64("-0x16") tonumber64("-0b11") tonumber64(" -0x16 ") tonumber64(" -0b11 ") -- numbers/cdata with base = 10 - return as is tonumber64(100) tonumber64(100, 10) tonumber64(100LL) tonumber64(100ULL, 10) tonumber64(-100LL) tonumber64(-100LL, 10) tonumber64(ffi.new('char', 10)) tonumber64(ffi.new('short', 10)) tonumber64(ffi.new('int', 10)) tonumber64(ffi.new('int8_t', 10)) tonumber64(ffi.new('int16_t', 10)) tonumber64(ffi.new('int32_t', 10)) tonumber64(ffi.new('int64_t', 10)) tonumber64(ffi.new('unsigned char', 10)) tonumber64(ffi.new('unsigned short', 10)) tonumber64(ffi.new('unsigned int', 10)) tonumber64(ffi.new('unsigned int', 10)) tonumber64(ffi.new('uint8_t', 10)) tonumber64(ffi.new('uint16_t', 10)) tonumber64(ffi.new('uint32_t', 10)) tonumber64(ffi.new('uint64_t', 10)) tonumber64(ffi.new('float', 10)) tonumber64(ffi.new('double', 10)) -- number/cdata with custom `base` - is not supported tonumber64(100, 2) tonumber64(100LL, 2) tonumber64(-100LL, 2) tonumber64(100ULL, 2) tonumber64(ffi.new('char', 10), 2) tonumber64(ffi.new('short', 10), 2) tonumber64(ffi.new('int', 10), 2) tonumber64(ffi.new('int8_t', 10), 2) tonumber64(ffi.new('int16_t', 10), 2) tonumber64(ffi.new('int32_t', 10), 2) tonumber64(ffi.new('int64_t', 10), 2) tonumber64(ffi.new('unsigned char', 10), 2) tonumber64(ffi.new('unsigned short', 10), 2) tonumber64(ffi.new('unsigned int', 10), 2) tonumber64(ffi.new('unsigned int', 10), 2) tonumber64(ffi.new('uint8_t', 10), 2) tonumber64(ffi.new('uint16_t', 10), 2) tonumber64(ffi.new('uint32_t', 10), 2) tonumber64(ffi.new('uint64_t', 10), 2) tonumber64(ffi.new('float', 10), 2) tonumber64(ffi.new('double', 10), 2) -- invalid types - return nil ffi.cdef("struct __tonumber64_test {};") tonumber64(ffi.new('struct __tonumber64_test')) tonumber64(nil) tonumber64(function() end) tonumber64({}) collectgarbage('collect') -- dostring() dostring('abc') dostring('abc=2') dostring('return abc') dostring('return ...', 1, 2, 3) -- A test case for Bug#1043804 lua error() -> server crash error() -- A test case for bitwise operations bit.lshift(1, 32) bit.band(1, 3) bit.bor(1, 2) space:truncate() dofile('fifo.lua') fifomax fifo_push(space, 1, 1) fifo_push(space, 1, 2) fifo_push(space, 1, 3) fifo_push(space, 1, 4) fifo_push(space, 1, 5) fifo_push(space, 1, 6) fifo_push(space, 1, 7) fifo_push(space, 1, 8) fifo_top(space, 1) space:delete{1} fifo_top(space, 1) space:delete{1} space:drop() test_run:cmd("clear filter") -- test test_run:grep_log() require('log').info('Incorrect password supplied') test_run:grep_log("default", "password") -- some collation test s = box.schema.space.create('test') not not s:create_index('test1', {parts = {{1, 'string', collation = 'Unicode'}}}) not not s:create_index('test2', {parts = {{2, 'string', collation = 'UNICODE'}}}) not not s:create_index('test3', {parts = {{3, 'string', collation = 'UnIcOdE'}}}) -- I'd prefer to panic on that s:create_index('test4', {parts = {{4, 'string'}}}).parts s:create_index('test5', {parts = {{5, 'string', collation = 'Unicode'}}}).parts s:drop() s = box.schema.space.create('test') not not s:create_index('test1', {parts = {{1, 'scalar', collation = 'unicode_ci'}}}) s:replace{1} s:replace{1.1} s:replace{false} s:replace{'Блин'} s:replace{'Ёж'} s:replace{'ешь'} s:replace{'Же'} s:replace{'Уже'} s:replace{'drop'} s:replace{'table'} s:replace{'users'} s:select{} s:select{'еж'} s:drop() s = box.schema.space.create('test') not not s:create_index('test1', {parts = {{1, 'number', collation = 'unicode_ci'}}}) not not s:create_index('test2', {parts = {{2, 'unsigned', collation = 'unicode_ci'}}}) not not s:create_index('test3', {parts = {{3, 'integer', collation = 'unicode_ci'}}}) not not s:create_index('test4', {parts = {{4, 'boolean', collation = 'unicode_ci'}}}) s:drop() -- -- gh-2068 no error for invalid user during space creation -- s = box.schema.space.create('test', {user="no_such_user"}) -- Too long WAL write warning (gh-2743). s = box.schema.space.create('test') _ = s:create_index('pk') too_long_threshold = box.cfg.too_long_threshold box.cfg{too_long_threshold = 0} -- log everything expected_rows = 3 expected_lsn = box.info.lsn + 1 box.begin() for i = 1, expected_rows do s:insert{i} end box.commit() msg = test_run:grep_log('default', 'too long WAL write.*') rows, lsn = string.match(msg, '(%d+) rows at LSN (%d+)') rows = tonumber(rows) lsn = tonumber(lsn) rows == expected_rows lsn == expected_lsn box.cfg{too_long_threshold = too_long_threshold} s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/access.test.lua0000664000000000000000000004657013306565107020721 0ustar rootrootenv = require('test_run') test_run = env.new() session = box.session -- user id for a Lua session is admin - 1 session.uid() -- extra arguments are ignored session.uid(nil) -- admin session.user() session.effective_user() -- extra argumentes are ignored session.user(nil) -- password() is a function which returns base64(sha1(sha1(password)) -- a string to store in _user table box.schema.user.password('test') box.schema.user.password('test1') -- admin can create any user box.schema.user.create('test', { password = 'test' }) -- su() let's you change the user of the session -- the user will be unabe to change back unless he/she -- is granted access to 'su' session.su('test') -- you can't create spaces unless you have a write access on -- system space _space -- in future we may introduce a separate privilege box.schema.space.create('test') -- su() goes through because called from admin -- console, and it has no access checks -- for functions session.su('admin') box.schema.user.grant('test', 'write', 'space', '_space') test_run:cmd("setopt delimiter ';'") function usermax() local i = 1 while true do box.schema.user.create('user'..i) i = i + 1 end end; usermax(); function usermax() local i = 1 while true do box.schema.user.drop('user'..i) i = i + 1 end end; usermax(); test_run:cmd("setopt delimiter ''"); box.schema.user.create('rich') box.schema.user.grant('rich', 'read,write', 'universe') session.su('rich') uid = session.uid() box.schema.func.create('dummy') session.su('admin') box.space['_user']:delete{uid} box.schema.func.drop('dummy') box.space['_user']:delete{uid} box.schema.user.revoke('rich', 'read,write', 'universe') box.schema.user.revoke('rich', 'public') box.schema.user.disable("rich") -- test double disable is a no op box.schema.user.disable("rich") box.space['_user']:delete{uid} box.schema.user.drop('test') -- gh-944 name is too long name = string.rep('a', box.schema.NAME_MAX - 1) box.schema.user.create(name..'aa') box.schema.user.create(name..'a') box.schema.user.drop(name..'a') box.schema.user.create(name) box.schema.user.drop(name) -- sudo box.schema.user.create('tester') -- admin -> user session.user() session.su('tester', function() return session.user(), session.effective_user() end) session.user() -- user -> admin session.su('tester') session.effective_user() session.su('admin', function() return session.user(), session.effective_user() end) session.user() session.effective_user() -- drop current user session.su('admin', function() return box.schema.user.drop('tester') end) session.user() session.su('admin') box.schema.user.drop('tester') session.user() -------------------------------------------------------------------------------- -- Check if identifiers obey the common constraints -------------------------------------------------------------------------------- identifier = require("identifier") test_run:cmd("setopt delimiter ';'") identifier.run_test( function (identifier) box.schema.user.create(identifier) box.schema.user.grant(identifier, 'super') box.session.su(identifier) box.session.su("admin") box.schema.user.revoke(identifier, 'super') end, box.schema.user.drop ); identifier.run_test( function (identifier) box.schema.role.create(identifier) box.schema.role.grant(identifier, 'execute,read,write', 'universe', nil, {if_not_exists = false}) end, box.schema.role.drop ); test_run:cmd("setopt delimiter ''"); -- valid identifiers box.schema.user.create('Петя_Иванов') box.schema.user.drop('Петя_Иванов') -- gh-300: misleading error message if a function does not exist LISTEN = require('uri').parse(box.cfg.listen) LISTEN ~= nil c = (require 'net.box').connect(LISTEN.host, LISTEN.service) c:call('nosuchfunction') function nosuchfunction() end c:call('nosuchfunction') nosuchfunction = nil c:call('nosuchfunction') c:close() -- Dropping a space recursively drops all grants - it's possible to -- restore from a snapshot box.schema.user.create('testus') s = box.schema.space.create('admin_space') index = s:create_index('primary', {type = 'hash', parts = {1, 'unsigned'}}) box.schema.user.grant('testus', 'write', 'space', 'admin_space') s:drop() box.snapshot() test_run:cmd('restart server default') box.schema.user.drop('testus') -- ------------------------------------------------------------ -- a test case for gh-289 -- box.schema.user.drop() with cascade doesn't work -- ------------------------------------------------------------ session = box.session box.schema.user.create('uniuser') box.schema.user.grant('uniuser', 'read, write, execute', 'universe') session.su('uniuser') us = box.schema.space.create('uniuser_space') session.su('admin') box.schema.user.drop('uniuser') -- ------------------------------------------------------------ -- A test case for gh-253 -- A user with universal grant has no access to drop oneself -- ------------------------------------------------------------ -- This behaviour is expected, since an object may be destroyed -- only by its creator at the moment -- ------------------------------------------------------------ box.schema.user.create('grantor') box.schema.user.grant('grantor', 'read, write, execute', 'universe') session.su('grantor') box.schema.user.create('grantee') box.schema.user.grant('grantee', 'read, write, execute', 'universe') session.su('grantee') -- fails - can't suicide - ask the creator to kill you box.schema.user.drop('grantee') session.su('grantor') box.schema.user.drop('grantee') -- fails, can't kill oneself box.schema.user.drop('grantor') session.su('admin') box.schema.user.drop('grantor') -- ---------------------------------------------------------- -- A test case for gh-299 -- It appears to be too easy to read all fields in _user -- table -- guest can't read _user table, add a test case -- ---------------------------------------------------------- session.su('guest') box.space._user:select{0} box.space._user:select{1} session.su('admin') -- ---------------------------------------------------------- -- A test case for gh-358 Change user does not work from lua -- Correct the update syntax in schema.lua -- ---------------------------------------------------------- box.schema.user.create('user1') box.space._user.index.name:select{'user1'} session.su('user1') box.schema.user.passwd('new_password') session.su('admin') box.space._user.index.name:select{'user1'} box.schema.user.passwd('user1', 'extra_new_password') box.space._user.index.name:select{'user1'} box.schema.user.passwd('invalid_user', 'some_password') box.schema.user.passwd() session.su('user1') -- permission denied box.schema.user.passwd('admin', 'xxx') session.su('admin') box.schema.user.drop('user1') box.space._user.index.name:select{'user1'} -- ---------------------------------------------------------- -- A test case for gh-421 Granting a privilege revokes an -- existing grant -- ---------------------------------------------------------- box.schema.user.create('user') id = box.space._user.index.name:get{'user'}[1] box.schema.user.grant('user', 'read,write', 'universe') box.space._priv:select{id} box.schema.user.grant('user', 'read', 'universe') box.space._priv:select{id} box.schema.user.revoke('user', 'write', 'universe') box.space._priv:select{id} box.schema.user.revoke('user', 'read', 'universe') box.space._priv:select{id} box.schema.user.grant('user', 'write', 'universe') box.space._priv:select{id} box.schema.user.grant('user', 'read', 'universe') box.space._priv:select{id} box.schema.user.drop('user') box.space._priv:select{id} -- ----------------------------------------------------------- -- Be a bit more rigorous in what is accepted in space _user -- ----------------------------------------------------------- utils = require('utils') box.space._user:insert{10, 1, 'name', 'strange-object-type', utils.setmap({})} box.space._user:insert{10, 1, 'name', 'role', utils.setmap{'password'}} session = nil -- ----------------------------------------------------------- -- admin can't manage grants on not owned objects -- ----------------------------------------------------------- box.schema.user.create('twostep') box.schema.user.grant('twostep', 'read,write,execute', 'universe') box.session.su('twostep') twostep = box.schema.space.create('twostep') index2 = twostep:create_index('primary') box.schema.func.create('test') box.session.su('admin') box.schema.user.revoke('twostep', 'execute,read,write', 'universe') box.schema.user.create('twostep_client') box.schema.user.grant('twostep_client', 'execute', 'function', 'test') box.schema.user.drop('twostep') box.schema.user.drop('twostep_client') -- the space is dropped when the user is dropped -- -- box.schema.user.exists() box.schema.user.exists('guest') box.schema.user.exists(nil) box.schema.user.exists(0) box.schema.user.exists(1) box.schema.user.exists(100500) box.schema.user.exists('admin') box.schema.user.exists('nosuchuser') box.schema.user.exists{} -- gh-671: box.schema.func.exists() box.schema.func.exists('nosuchfunc') box.schema.func.exists('guest') box.schema.func.exists(1) box.schema.func.exists(2) box.schema.func.exists('box.schema.user.info') box.schema.func.exists() box.schema.func.exists(nil) -- gh-665: user.exists() should nto be true for roles box.schema.user.exists('public') box.schema.role.exists('public') box.schema.role.exists(nil) -- test if_exists/if_not_exists in grant/revoke box.schema.user.grant('guest', 'read,write,execute', 'universe') box.schema.user.grant('guest', 'read,write,execute', 'universe') box.schema.user.grant('guest', 'read,write,execute', 'universe', '', { if_not_exists = true }) box.schema.user.revoke('guest', 'read,write,execute', 'universe') box.schema.user.revoke('guest', 'usage,session', 'universe') box.schema.user.revoke('guest', 'read,write,execute', 'universe') box.schema.user.revoke('guest', 'read,write,execute', 'universe', '', { if_exists = true }) box.schema.user.grant('guest', 'usage,session', 'universe') box.schema.func.create('dummy', { if_not_exists = true }) box.schema.func.create('dummy', { if_not_exists = true }) box.schema.func.drop('dummy') -- gh-664 roles: accepting bad syntax for create box.schema.user.create('user', 'blah') box.schema.user.drop('user', 'blah') -- gh-664 roles: accepting bad syntax for create box.schema.func.create('func', 'blah') box.schema.func.drop('blah', 'blah') -- gh-758 attempt to set password for user guest box.schema.user.passwd('guest', 'sesame') -- gh-1205 box.schema.user.info fails box.schema.user.drop('guest') box.schema.role.drop('guest') box.space._user.index.name:delete{'guest'} box.space._user:delete{box.schema.GUEST_ID} #box.schema.user.info('guest') > 0 box.schema.user.drop('admin') box.schema.role.drop('admin') box.space._user.index.name:delete{'admin'} box.space._user:delete{box.schema.ADMIN_ID} #box.schema.user.info('admin') > 0 box.schema.user.drop('public') box.schema.role.drop('public') box.space._user.index.name:delete{'public'} box.space._user:delete{box.schema.PUBLIC_ROLE_ID} #box.schema.role.info('public') > 0 box.schema.role.drop('super') box.schema.user.drop('super') box.space._user.index.name:delete{'super'} box.space._user:delete{box.schema.SUPER_ROLE_ID} #box.schema.role.info('super') > 0 -- gh-944 name is too long name = string.rep('a', box.schema.NAME_MAX - 1) box.schema.func.create(name..'aa') box.schema.func.create(name..'a') box.schema.func.drop(name..'a') box.schema.func.create(name) box.schema.func.drop(name) -- A test case for: http://bugs.launchpad.net/bugs/712456 -- Verify that when trying to access a non-existing or -- very large space id, no crash occurs. LISTEN = require('uri').parse(box.cfg.listen) c = (require 'net.box').connect(LISTEN.host, LISTEN.service) c:_request("select", nil, 1, box.index.EQ, 0, 0, 0xFFFFFFFF, {}) c:_request("select", nil, 65537, box.index.EQ, 0, 0, 0xFFFFFFFF, {}) c:_request("select", nil, 4294967295, box.index.EQ, 0, 0, 0xFFFFFFFF, {}) c:close() session = box.session box.schema.user.create('test') box.schema.user.grant('test', 'read,write', 'universe') session.su('test') box.internal.collation.create('test', 'ICU', 'ru_RU') session.su('admin') box.internal.collation.drop('test') -- success box.internal.collation.create('test', 'ICU', 'ru_RU') session.su('test') box.internal.collation.drop('test') -- fail session.su('admin') box.internal.collation.drop('test') -- success box.schema.user.drop('test') -- -- gh-2710 object drop revokes all associated privileges -- _ = box.schema.space.create('test_space') _ = box.schema.sequence.create('test_sequence') box.schema.func.create('test_function') box.schema.user.create('test_user') box.schema.user.grant('test_user', 'read', 'space', 'test_space') box.schema.user.grant('test_user', 'write', 'sequence', 'test_sequence') box.schema.user.grant('test_user', 'execute', 'function', 'test_function') box.schema.role.create('test_role') box.schema.role.grant('test_role', 'read', 'space', 'test_space') box.schema.role.grant('test_role', 'write', 'sequence', 'test_sequence') box.schema.role.grant('test_role', 'execute', 'function', 'test_function') box.schema.user.info('test_user') box.schema.role.info('test_role') box.space.test_space:drop() box.sequence.test_sequence:drop() box.schema.func.drop('test_function') box.schema.user.info('test_user') box.schema.role.info('test_role') box.schema.user.drop('test_user') box.schema.role.drop('test_role') -- gh-3023: box.session.su() changes both authenticated and effective -- user, while should only change the effective user -- function uids() return { uid = box.session.uid(), euid = box.session.euid() } end box.session.su('guest') uids() box.session.su('admin') box.session.su('guest', uids) -- -- gh-2898 System privileges -- s = box.schema.create_space("tweed") _ = s:create_index('primary', {type = 'hash', parts = {1, 'unsigned'}}) box.schema.user.create('test', {password="pass"}) box.schema.user.grant('test', 'read,write', 'universe') -- other users can't disable box.schema.user.create('test1') session.su("test1") box.schema.user.disable("test") session.su("admin") box.schema.user.disable("test") -- test double disable is a no op box.schema.user.disable("test") session.su("test") c = (require 'net.box').connect(LISTEN.host, LISTEN.service, {user="test", password="pass"}) c.state c.error session.su("test1") box.schema.user.grant("test", "usage", "universe") session.su('admin') box.schema.user.grant("test", "session", "universe") session.su("test") s:select{} session.su('admin') box.schema.user.enable("test") -- check enable not fails on double enabling box.schema.user.enable("test") session.su("test") s:select{} session.su("admin") box.schema.user.drop('test') box.schema.user.drop('test1') s:drop() -- -- gh-3022 role 'super' -- s = box.schema.space.create("admin_space") box.schema.user.grant('guest', 'super') box.session.su('guest') _ = box.schema.space.create('test') box.space.test:drop() _ = box.schema.user.create('test') box.schema.user.drop('test') _ = box.schema.func.create('test') box.schema.func.drop('test') -- gh-3088 bug: super role lacks drop privileges on other users' spaces s:drop() box.session.su('admin') box.schema.user.revoke('guest', 'super') box.session.su('guest') box.schema.space.create('test') box.schema.user.create('test') box.schema.func.create('test') box.session.su('admin') -- -- gh-2911 on_access_denied trigger -- obj_type = nil obj_name = nil op_type = nil euid = nil auid = nil function access_denied_trigger(op, type, name) obj_type = type; obj_name = name; op_type = op end function uid() euid = box.session.euid(); auid = box.session.uid() end _ = box.session.on_access_denied(access_denied_trigger) _ = box.session.on_access_denied(uid) s = box.schema.space.create('admin_space', {engine="vinyl"}) seq = box.schema.sequence.create('test_sequence') index = s:create_index('primary', {type = 'tree', parts = {1, 'unsigned'}}) box.schema.user.create('test_user', {password="pass"}) box.session.su("test_user") s:select{} obj_type, obj_name, op_type euid, auid seq:set(1) obj_type, obj_name, op_type euid, auid box.session.su("admin") c = (require 'net.box').connect(LISTEN.host, LISTEN.service, {user="test_user", password="pass"}) function func() end st, e = pcall(c.call, c, func) obj_type, op_type euid, auid obj_name:match("function") box.schema.user.revoke("test_user", "usage", "universe") box.session.su("test_user") st, e = pcall(s.select, s, {}) e = e:unpack() e.type, e.access_type, e.object_type, e.message obj_type, obj_name, op_type euid, auid box.session.su("admin") box.schema.user.revoke("test_user", "session", "universe") c = (require 'net.box').connect(LISTEN.host, LISTEN.service, {user="test_user", password="pass"}) obj_type, obj_name, op_type euid, auid box.session.on_access_denied(nil, access_denied_trigger) box.session.on_access_denied(nil, uid) box.schema.user.drop("test_user") seq:drop() s:drop() -- -- gh-945 create, drop, alter privileges -- box.schema.user.create("tester") s = box.schema.space.create("test") u = box.schema.user.create("test") f = box.schema.func.create("test") box.schema.user.grant("tester", "read,execute", "universe") -- failed create box.session.su("tester", box.schema.space.create, "test_space") box.session.su("tester", box.schema.user.create, 'test_user') box.session.su("tester", box.schema.func.create, 'test_func') -- -- FIXME 2.0: we still need to grant 'write' on universe -- explicitly since we still use process_rw to write to system -- tables from ddl -- box.schema.user.grant("tester", "create,write", "universe") -- successful create s1 = box.session.su("tester", box.schema.space.create, "test_space") _ = box.session.su("tester", box.schema.user.create, 'test_user') _ = box.session.su("tester", box.schema.func.create, 'test_func') -- successful drop of owned objects _ = box.session.su("tester", s1.drop, s1) _ = box.session.su("tester", box.schema.user.drop, 'test_user') _ = box.session.su("tester", box.schema.func.drop, 'test_func') -- failed alter -- box.session.su("tester", s.format, s, {name="id", type="unsigned"}) -- box.schema.user.grant("tester", "alter", "universe") -- successful alter -- box.session.su("tester", s.format, s, {name="id", type="unsigned"}) -- failed drop -- box.session.su("tester", s.drop, s) -- can't use here sudo -- because drop use sudo inside -- and currently sudo can't be performed nested box.session.su("tester") box.schema.user.drop("test") box.session.su("admin") box.session.su("tester", box.schema.func.drop, "test") box.schema.user.grant("tester", "drop", "universe") -- successful drop box.session.su("tester", s.drop, s) box.session.su("tester", box.schema.user.drop, "test") box.session.su("tester", box.schema.func.drop, "test") box.session.su("admin") box.schema.user.drop("tester") -- gh-3146 gotcha for granting universe with options box.schema.user.grant("guest", "read", "universe", {if_not_exists = true}) box.schema.user.grant("guest", "read", "universe", "useless name") box.schema.user.grant("guest", "read", "universe", "useless name", {if_not_exists = true}) box.schema.user.grant("guest", "read", "universe", 0, {if_not_exists = true}) box.schema.user.grant("guest", "read", "universe", nil, {if_not_exists = true}) box.schema.user.grant("guest", "read", "universe", {}, {if_not_exists = true}) box.schema.user.revoke("guest", "read", "universe", {if_exists = true}) box.schema.user.revoke("guest", "read", "universe", "useless name") box.schema.user.revoke("guest", "read", "universe", "useless name", {if_exists = true}) box.schema.user.revoke("guest", "read", "universe", 0, {if_exists = true}) box.schema.user.revoke("guest", "read", "universe", nil, {if_exists = true}) box.schema.user.revoke("guest", "read", "universe", {}, {if_exists = true}) tarantool_1.9.1.26.g63eb81e3c/test/box/access_escalation.test.lua0000664000000000000000000000421113306560010023071 0ustar rootrootfiber = require('fiber') net = require('net.box') log = require('log') json = require('json') os = require('os') -- gh-617: guest access denied because of setuid -- function invocation. -- Test for privilege escalation -- ----------------------------- -- * create a setuid function which changes effective id -- to superuser -- * invoke it via the binary protocol -- * while the function is running, invoke a non-setuid function -- which reads a system space. -- -- The invoked function should get "Access denied" error, -- there should be no privilege escalation. -- define functions function setuid() fiber.sleep(1000000) end function escalation() return box.space._space:get{box.schema.SPACE_ID} ~= nil end -- set up grants box.schema.func.create('setuid', {setuid=true}) box.schema.func.create('escalation') box.schema.user.grant('guest', 'execute', 'function', 'setuid') box.schema.user.grant('guest', 'execute', 'function', 'escalation') connection = net:connect(os.getenv("LISTEN")) background = fiber.create(function() connection:call("setuid") end) connection:call("escalation") fiber.cancel(background) -- -- tear down the functions; the grants are dropped recursively -- box.schema.func.drop('setuid') box.schema.func.drop('escalation') connection:close() -- Test for privilege de-escalation -- -------------------------------- -- -- * create a setuid function which runs under a deprived user -- * invoke the function, let it sleep -- * invoke a function which should have privileges -- -- create a deprived user box.schema.user.create('underprivileged') box.schema.user.grant('underprivileged', 'read,write', 'space', '_func') box.session.su('underprivileged') box.schema.func.create('setuid', {setuid=true}) box.session.su('admin') -- -- create a deprived function -- box.schema.user.grant('guest', 'read,write,execute', 'universe') connection = net:connect(os.getenv("LISTEN")) background = fiber.create(function() connection:call("setuid") end) connection:call("escalation") fiber.cancel(background) -- tear down box.schema.user.drop('underprivileged') box.schema.user.revoke('guest', 'read,write,execute', 'universe') connection:close() tarantool_1.9.1.26.g63eb81e3c/test/box/bitset.test.lua0000664000000000000000000001564613306560010020736 0ustar rootrootdofile('bitset.lua') create_space() ------------------------------------------------------------------------------ -- BitsetIndex: insert/delete ------------------------------------------------------------------------------ test_insert_delete(128) ------------------------------------------------------------------------------ -- BitsetIndex: ALL ------------------------------------------------------------------------------ clear() fill(1, 128) dump(box.index.BITS_ALL) box.space.tweedledum.index.bitset:count() ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ALL_SET (single bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ALL_SET, 0) box.space.tweedledum.index.bitset:count(0, { iterator = box.index.BITS_ALL_SET}) dump(box.index.BITS_ALL_SET, 1) box.space.tweedledum.index.bitset:count(1, { iterator = box.index.BITS_ALL_SET}) dump(box.index.BITS_ALL_SET, 2) box.space.tweedledum.index.bitset:count(2, { iterator = box.index.BITS_ALL_SET}) dump(box.index.BITS_ALL_SET, 8) box.space.tweedledum.index.bitset:count(8, { iterator = box.index.BITS_ALL_SET}) dump(box.index.BITS_ALL_SET, 128) box.space.tweedledum.index.bitset:count(128, { iterator = box.index.BITS_ALL_SET}) dump(box.index.BITS_ALL_SET, 1073741824) box.space.tweedledum.index.bitset:count(1073741824, { iterator = box.index.BITS_ALL_SET}) dump(box.index.BITS_ALL_SET, 2147483648) box.space.tweedledum.index.bitset:count(2147483648, { iterator = box.index.BITS_ALL_SET}) ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ALL_SET (multiple bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ALL_SET, 3) dump(box.index.BITS_ALL_SET, 7) dump(box.index.BITS_ALL_SET, 31) dump(box.index.BITS_ALL_SET, 5) dump(box.index.BITS_ALL_SET, 10) dump(box.index.BITS_ALL_SET, 27) dump(box.index.BITS_ALL_SET, 341) dump(box.index.BITS_ALL_SET, 2147483649) dump(box.index.BITS_ALL_SET, 4294967295) ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ALL_NOT_SET (single bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ALL_NOT_SET, 0) box.space.tweedledum.index.bitset:count(0, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 2) box.space.tweedledum.index.bitset:count(2, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 8) box.space.tweedledum.index.bitset:count(8, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 128) box.space.tweedledum.index.bitset:count(128, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 1073741824) box.space.tweedledum.index.bitset:count(1073741824, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 2147483648) box.space.tweedledum.index.bitset:count(2147483648, { iterator = box.index.BITS_ALL_NOT_SET}) ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ALL_NOT_SET (multiple bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ALL_NOT_SET, 3) box.space.tweedledum.index.bitset:count(3, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 7) box.space.tweedledum.index.bitset:count(7, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 10) box.space.tweedledum.index.bitset:count(10, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 27) box.space.tweedledum.index.bitset:count(27, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 85) box.space.tweedledum.index.bitset:count(85, { iterator = box.index.BITS_ALL_NOT_SET}) dump(box.index.BITS_ALL_NOT_SET, 4294967295) box.space.tweedledum.index.bitset:count(4294967295, { iterator = box.index.BITS_ALL_NOT_SET}) ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ANY_SET (single bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ANY_SET, 0) box.space.tweedledum.index.bitset:count(0, { iterator = box.index.BITS_ANY_SET}) dump(box.index.BITS_ANY_SET, 16) box.space.tweedledum.index.bitset:count(16, { iterator = box.index.BITS_ANY_SET}) dump(box.index.BITS_ANY_SET, 128) box.space.tweedledum.index.bitset:count(128, { iterator = box.index.BITS_ANY_SET}) ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ANY_SET (multiple bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ANY_SET, 7) dump(box.index.BITS_ANY_SET, 84) dump(box.index.BITS_ANY_SET, 113) drop_space() ------------------------------------------------------------------------------ -- Misc ------------------------------------------------------------------------------ -- gh-1467: invalid iterator type space = box.schema.space.create('test') _ = space:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) _ = space:create_index('bitset', { type = 'bitset', parts = {2, 'unsigned'}, unique = false }) space.index.bitset:select({1}, { iterator = 'OVERLAPS'}) space:drop() space = nil -- gh-1549: BITSET index with inappropriate types crashes in debug build space = box.schema.space.create('test') _ = space:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) _ = space:create_index('bitset', { type = 'bitset', parts = {2, 'number'}, unique = false }) space:drop() space = nil -- https://github.com/tarantool/tarantool/issues/1896 wrong countspace = box.schema.space.create('test') s = box.schema.space.create('test') _ = s:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) i = s:create_index('bitset', { type = 'bitset', parts = {2, 'unsigned'}, unique = false }) s:insert{1, 0} s:insert{2, 0} s:insert{3, 0} s:insert{4, 2} s:insert{5, 2} s:insert{6, 3} s:insert{7, 4} s:insert{8, 5} s:insert{9, 8} #i:select(7, {iterator = box.index.BITS_ANY_SET}) i:count(7, {iterator = box.index.BITS_ANY_SET}) s:drop() s = nil -- https://github.com/tarantool/tarantool/issues/1946 BITS_ALL_SET crashes s = box.schema.space.create('test') _ = s:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) i = s:create_index('bitset', { type = 'bitset', parts = {2, 'unsigned'}, unique = false }) for i=1,10 do s:insert{i, math.random(8)} end good = true function is_good(key, opts) return #i:select({key}, opts) == i:count({key}, opts) end function check(key, opts) good = good and is_good(key, opts) end for j=1,100 do check(math.random(9) - 1) end for j=1,100 do check(math.random(9) - 1, {iterator = box.index.BITS_ANY_SET}) end for j=1,100 do check(math.random(9) - 1, {iterator = box.index.BITS_ALL_SET}) end for j=1,100 do check(math.random(9) - 1, {iterator = box.index.BITS_ALL_NOT_SET}) end good s:drop() s = nil tarantool_1.9.1.26.g63eb81e3c/test/box/space_bsize.test.lua0000664000000000000000000000145013306560010021717 0ustar rootrootenv = require('test_run') test_run = env.new() utils = dofile('utils.lua') s = box.schema.space.create('space_bsize') idx = s:create_index('primary') for i = 1, 13 do s:insert{ i, string.rep('x', i) } end s:bsize() utils.space_bsize(s) for i = 1, 13, 2 do s:delete{ i } end s:bsize() utils.space_bsize(s) for i = 2, 13, 2 do s:update( { i }, {{ ":", 2, i, 0, string.rep('y', i) }} ) end s:bsize() utils.space_bsize(s) box.snapshot() test_run:cmd("restart server default") utils = dofile('utils.lua') s = box.space['space_bsize'] s:bsize() utils.space_bsize(s) for i = 1, 13, 2 do s:insert{ i, string.rep('y', i) } end s:bsize() utils.space_bsize(s) s:truncate() s:bsize() utils.space_bsize(s) for i = 1, 13 do s:insert{ i, string.rep('x', i) } end s:bsize() utils.space_bsize(s) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/stat.result0000664000000000000000000000232513306565107020200 0ustar rootroot-- clear statistics env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default with cleanup=1') box.stat.INSERT.total --- - 0 ... box.stat.DELETE.total --- - 0 ... box.stat.UPDATE.total --- - 0 ... box.stat.REPLACE.total --- - 0 ... box.stat.SELECT.total --- - 1 ... box.stat.ERROR.total --- - 0 ... space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... -- check stat_cleanup -- add several tuples for i=1,10 do space:insert{i, 'tuple'..tostring(i)} end --- ... box.stat.INSERT.total --- - 12 ... box.stat.DELETE.total --- - 0 ... box.stat.UPDATE.total --- - 1 ... box.stat.REPLACE.total --- - 0 ... box.stat.SELECT.total --- - 4 ... -- check exceptions space:get('Impossible value') --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... box.stat.ERROR.total --- - 1 ... test_run:cmd('restart server default') -- statistics must be zero box.stat.INSERT.total --- - 0 ... box.stat.DELETE.total --- - 0 ... box.stat.UPDATE.total --- - 0 ... box.stat.REPLACE.total --- - 0 ... box.stat.SELECT.total --- - 1 ... box.stat.ERROR.total --- - 0 ... -- cleanup box.space.tweedledum:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/backup_test.lua0000664000000000000000000000015613306560010020760 0ustar rootroot#!/usr/bin/env tarantool box.cfg{listen = os.getenv("LISTEN")} require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/box/cfg.result0000664000000000000000000001726713306565107017777 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("push filter '(error: .*)\\.lua:[0-9]+: ' to '\\1.lua:: '") --- - true ... box.cfg.nosuchoption = 1 --- - error: 'builtin/box/load_cfg.lua:: Attempt to modify a read-only table' ... cfg_filter(box.cfg) --- - - - background - false - - checkpoint_count - 2 - - checkpoint_interval - 3600 - - coredump - false - - force_recovery - false - - hot_standby - false - - listen - - - log - - - log_format - plain - - log_level - 5 - - log_nonblock - true - - memtx_dir - - - memtx_max_tuple_size - - - memtx_memory - 107374182 - - memtx_min_tuple_size - - - pid_file - - - read_only - false - - readahead - 16320 - - replication_connect_timeout - 30 - - replication_sync_lag - 10 - - replication_timeout - 1 - - rows_per_wal - 500000 - - slab_alloc_factor - 1.05 - - too_long_threshold - 0.5 - - vinyl_bloom_fpr - 0.05 - - vinyl_cache - 134217728 - - vinyl_dir - - - vinyl_max_tuple_size - 1048576 - - vinyl_memory - 134217728 - - vinyl_page_size - 8192 - - vinyl_range_size - 1073741824 - - vinyl_read_threads - 1 - - vinyl_run_count_per_level - 2 - - vinyl_run_size_ratio - 3.5 - - vinyl_timeout - 60 - - vinyl_write_threads - 2 - - wal_dir - - - wal_dir_rescan_delay - 2 - - wal_max_size - 268435456 - - wal_mode - write - - worker_pool_threads - 4 ... -- must be read-only box.cfg() --- ... cfg_filter(box.cfg) --- - - - background - false - - checkpoint_count - 2 - - checkpoint_interval - 3600 - - coredump - false - - force_recovery - false - - hot_standby - false - - listen - - - log - - - log_format - plain - - log_level - 5 - - log_nonblock - true - - memtx_dir - - - memtx_max_tuple_size - - - memtx_memory - 107374182 - - memtx_min_tuple_size - - - pid_file - - - read_only - false - - readahead - 16320 - - replication_connect_timeout - 30 - - replication_sync_lag - 10 - - replication_timeout - 1 - - rows_per_wal - 500000 - - slab_alloc_factor - 1.05 - - too_long_threshold - 0.5 - - vinyl_bloom_fpr - 0.05 - - vinyl_cache - 134217728 - - vinyl_dir - - - vinyl_max_tuple_size - 1048576 - - vinyl_memory - 134217728 - - vinyl_page_size - 8192 - - vinyl_range_size - 1073741824 - - vinyl_read_threads - 1 - - vinyl_run_count_per_level - 2 - - vinyl_run_size_ratio - 3.5 - - vinyl_timeout - 60 - - vinyl_write_threads - 2 - - wal_dir - - - wal_dir_rescan_delay - 2 - - wal_max_size - 268435456 - - wal_mode - write - - worker_pool_threads - 4 ... -- check that cfg with unexpected parameter fails. box.cfg{sherlock = 'holmes'} --- - error: 'Incorrect value for option ''sherlock'': unexpected option' ... -- check that cfg with unexpected type of parameter failes box.cfg{listen = {}} --- - error: 'Incorrect value for option ''listen'': should be one of types string, number' ... box.cfg{wal_dir = 0} --- - error: 'Incorrect value for option ''wal_dir'': should be of type string' ... box.cfg{coredump = 'true'} --- - error: 'Incorrect value for option ''coredump'': should be of type boolean' ... -- check comment to issue #2191 - bad argument #2 to ''uri_parse'' box.cfg{replication = {}} --- ... box.cfg{replication = {}} --- ... -------------------------------------------------------------------------------- -- Test of hierarchical cfg type check -------------------------------------------------------------------------------- box.cfg{memtx_memory = "100500"} --- - error: 'Incorrect value for option ''memtx_memory'': should be of type number' ... box.cfg{vinyl = "vinyl"} --- - error: 'Incorrect value for option ''vinyl'': unexpected option' ... box.cfg{vinyl_write_threads = "threads"} --- - error: 'Incorrect value for option ''vinyl_write_threads'': should be of type number' ... box.cfg{instance_uuid = box.info.uuid} --- ... box.cfg{instance_uuid = '12345678-0123-5678-1234-abcdefabcdef'} --- - error: 'Incorrect value for option ''instance_uuid'': Can''t change instance uuid' ... box.cfg{replicaset_uuid = box.info.cluster.uuid} --- ... box.cfg{replicaset_uuid = '12345678-0123-5678-1234-abcdefabcdef'} --- - error: 'Incorrect value for option ''replicaset_uuid'': Can''t change replicaset uuid' ... -------------------------------------------------------------------------------- -- Test of default cfg options -------------------------------------------------------------------------------- test_run:cmd('create server cfg_tester1 with script = "box/lua/cfg_test1.lua"') --- - true ... test_run:cmd("start server cfg_tester1") --- - true ... test_run:cmd('switch cfg_tester1') --- - true ... box.cfg.memtx_memory, box.cfg.slab_alloc_factor, box.cfg.vinyl_write_threads --- - 268435456 - 1.05 - 2 ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server cfg_tester1") --- - true ... test_run:cmd("cleanup server cfg_tester1") --- - true ... test_run:cmd('create server cfg_tester2 with script = "box/lua/cfg_test2.lua"') --- - true ... test_run:cmd("start server cfg_tester2") --- - true ... test_run:cmd('switch cfg_tester2') --- - true ... box.cfg.memtx_memory, box.cfg.slab_alloc_factor, box.cfg.vinyl_write_threads --- - 214748364 - 1.05 - 2 ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server cfg_tester2") --- - true ... test_run:cmd("cleanup server cfg_tester2") --- - true ... test_run:cmd('create server cfg_tester3 with script = "box/lua/cfg_test3.lua"') --- - true ... test_run:cmd("start server cfg_tester3") --- - true ... test_run:cmd('switch cfg_tester3') --- - true ... box.cfg.memtx_memory, box.cfg.slab_alloc_factor, box.cfg.vinyl_write_threads --- - 214748364 - 1.05 - 10 ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server cfg_tester3") --- - true ... test_run:cmd("cleanup server cfg_tester3") --- - true ... test_run:cmd('create server cfg_tester4 with script = "box/lua/cfg_test4.lua"') --- - true ... test_run:cmd("start server cfg_tester4") --- - true ... test_run:cmd('switch cfg_tester4') --- - true ... box.cfg.memtx_memory, box.cfg.slab_alloc_factor, box.cfg.vinyl_write_threads --- - 268435456 - 3.14 - 2 ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server cfg_tester4") --- - true ... test_run:cmd("cleanup server cfg_tester4") --- - true ... -------------------------------------------------------------------------------- -- Check that 'vinyl_dir' cfg option is not checked as long as -- there is no vinyl indexes (issue #2664) -------------------------------------------------------------------------------- test_run:cmd('create server cfg_tester with script = "box/lua/cfg_bad_vinyl_dir.lua"') --- - true ... test_run:cmd("start server cfg_tester") --- - true ... test_run:cmd('switch cfg_tester') --- - true ... _ = box.schema.space.create('test_memtx', {engine = 'memtx'}) --- ... _ = box.space.test_memtx:create_index('pk') -- ok --- ... _ = box.schema.space.create('test_vinyl', {engine = 'vinyl'}) --- ... _ = box.space.test_vinyl:create_index('pk') -- error --- - error: can not access vinyl data directory ... box.snapshot() --- - ok ... test_run:cmd("restart server cfg_tester") test_run:cmd("switch default") --- - true ... test_run:cmd("stop server cfg_tester") --- - true ... test_run:cmd("cleanup server cfg_tester") --- - true ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/box/bitset.result0000664000000000000000000007673613306560010020524 0ustar rootrootdofile('bitset.lua') --- ... create_space() --- ... ------------------------------------------------------------------------------ -- BitsetIndex: insert/delete ------------------------------------------------------------------------------ test_insert_delete(128) --- - - $ 1$ ... ------------------------------------------------------------------------------ -- BitsetIndex: ALL ------------------------------------------------------------------------------ clear() --- ... fill(1, 128) --- ... dump(box.index.BITS_ALL) --- - - $ 1$ - $ 2$ - $ 3$ - $ 4$ - $ 5$ - $ 6$ - $ 7$ - $ 8$ - $ 9$ - $ 10$ - $ 11$ - $ 12$ - $ 13$ - $ 14$ - $ 15$ - $ 16$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 24$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 32$ - $ 33$ - $ 34$ - $ 35$ - $ 36$ - $ 37$ - $ 38$ - $ 39$ - $ 40$ - $ 41$ - $ 42$ - $ 43$ - $ 44$ - $ 45$ - $ 46$ - $ 47$ - $ 48$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 56$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 64$ - $ 65$ - $ 66$ - $ 67$ - $ 68$ - $ 69$ - $ 70$ - $ 71$ - $ 72$ - $ 73$ - $ 74$ - $ 75$ - $ 76$ - $ 77$ - $ 78$ - $ 79$ - $ 80$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 88$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 96$ - $ 97$ - $ 98$ - $ 99$ - $ 100$ - $ 101$ - $ 102$ - $ 103$ - $ 104$ - $ 105$ - $ 106$ - $ 107$ - $ 108$ - $ 109$ - $ 110$ - $ 111$ - $ 112$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 120$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ - $ 128$ ... box.space.tweedledum.index.bitset:count() --- - 128 ... ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ALL_SET (single bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ALL_SET, 0) --- - [] ... box.space.tweedledum.index.bitset:count(0, { iterator = box.index.BITS_ALL_SET}) --- - 0 ... dump(box.index.BITS_ALL_SET, 1) --- - - $ 1$ - $ 3$ - $ 5$ - $ 7$ - $ 9$ - $ 11$ - $ 13$ - $ 15$ - $ 17$ - $ 19$ - $ 21$ - $ 23$ - $ 25$ - $ 27$ - $ 29$ - $ 31$ - $ 33$ - $ 35$ - $ 37$ - $ 39$ - $ 41$ - $ 43$ - $ 45$ - $ 47$ - $ 49$ - $ 51$ - $ 53$ - $ 55$ - $ 57$ - $ 59$ - $ 61$ - $ 63$ - $ 65$ - $ 67$ - $ 69$ - $ 71$ - $ 73$ - $ 75$ - $ 77$ - $ 79$ - $ 81$ - $ 83$ - $ 85$ - $ 87$ - $ 89$ - $ 91$ - $ 93$ - $ 95$ - $ 97$ - $ 99$ - $ 101$ - $ 103$ - $ 105$ - $ 107$ - $ 109$ - $ 111$ - $ 113$ - $ 115$ - $ 117$ - $ 119$ - $ 121$ - $ 123$ - $ 125$ - $ 127$ ... box.space.tweedledum.index.bitset:count(1, { iterator = box.index.BITS_ALL_SET}) --- - 64 ... dump(box.index.BITS_ALL_SET, 2) --- - - $ 2$ - $ 3$ - $ 6$ - $ 7$ - $ 10$ - $ 11$ - $ 14$ - $ 15$ - $ 18$ - $ 19$ - $ 22$ - $ 23$ - $ 26$ - $ 27$ - $ 30$ - $ 31$ - $ 34$ - $ 35$ - $ 38$ - $ 39$ - $ 42$ - $ 43$ - $ 46$ - $ 47$ - $ 50$ - $ 51$ - $ 54$ - $ 55$ - $ 58$ - $ 59$ - $ 62$ - $ 63$ - $ 66$ - $ 67$ - $ 70$ - $ 71$ - $ 74$ - $ 75$ - $ 78$ - $ 79$ - $ 82$ - $ 83$ - $ 86$ - $ 87$ - $ 90$ - $ 91$ - $ 94$ - $ 95$ - $ 98$ - $ 99$ - $ 102$ - $ 103$ - $ 106$ - $ 107$ - $ 110$ - $ 111$ - $ 114$ - $ 115$ - $ 118$ - $ 119$ - $ 122$ - $ 123$ - $ 126$ - $ 127$ ... box.space.tweedledum.index.bitset:count(2, { iterator = box.index.BITS_ALL_SET}) --- - 64 ... dump(box.index.BITS_ALL_SET, 8) --- - - $ 8$ - $ 9$ - $ 10$ - $ 11$ - $ 12$ - $ 13$ - $ 14$ - $ 15$ - $ 24$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 40$ - $ 41$ - $ 42$ - $ 43$ - $ 44$ - $ 45$ - $ 46$ - $ 47$ - $ 56$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 72$ - $ 73$ - $ 74$ - $ 75$ - $ 76$ - $ 77$ - $ 78$ - $ 79$ - $ 88$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 104$ - $ 105$ - $ 106$ - $ 107$ - $ 108$ - $ 109$ - $ 110$ - $ 111$ - $ 120$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ ... box.space.tweedledum.index.bitset:count(8, { iterator = box.index.BITS_ALL_SET}) --- - 64 ... dump(box.index.BITS_ALL_SET, 128) --- - - $ 128$ ... box.space.tweedledum.index.bitset:count(128, { iterator = box.index.BITS_ALL_SET}) --- - 1 ... dump(box.index.BITS_ALL_SET, 1073741824) --- - [] ... box.space.tweedledum.index.bitset:count(1073741824, { iterator = box.index.BITS_ALL_SET}) --- - 0 ... dump(box.index.BITS_ALL_SET, 2147483648) --- - [] ... box.space.tweedledum.index.bitset:count(2147483648, { iterator = box.index.BITS_ALL_SET}) --- - 0 ... ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ALL_SET (multiple bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ALL_SET, 3) --- - - $ 3$ - $ 7$ - $ 11$ - $ 15$ - $ 19$ - $ 23$ - $ 27$ - $ 31$ - $ 35$ - $ 39$ - $ 43$ - $ 47$ - $ 51$ - $ 55$ - $ 59$ - $ 63$ - $ 67$ - $ 71$ - $ 75$ - $ 79$ - $ 83$ - $ 87$ - $ 91$ - $ 95$ - $ 99$ - $ 103$ - $ 107$ - $ 111$ - $ 115$ - $ 119$ - $ 123$ - $ 127$ ... dump(box.index.BITS_ALL_SET, 7) --- - - $ 7$ - $ 15$ - $ 23$ - $ 31$ - $ 39$ - $ 47$ - $ 55$ - $ 63$ - $ 71$ - $ 79$ - $ 87$ - $ 95$ - $ 103$ - $ 111$ - $ 119$ - $ 127$ ... dump(box.index.BITS_ALL_SET, 31) --- - - $ 31$ - $ 63$ - $ 95$ - $ 127$ ... dump(box.index.BITS_ALL_SET, 5) --- - - $ 5$ - $ 7$ - $ 13$ - $ 15$ - $ 21$ - $ 23$ - $ 29$ - $ 31$ - $ 37$ - $ 39$ - $ 45$ - $ 47$ - $ 53$ - $ 55$ - $ 61$ - $ 63$ - $ 69$ - $ 71$ - $ 77$ - $ 79$ - $ 85$ - $ 87$ - $ 93$ - $ 95$ - $ 101$ - $ 103$ - $ 109$ - $ 111$ - $ 117$ - $ 119$ - $ 125$ - $ 127$ ... dump(box.index.BITS_ALL_SET, 10) --- - - $ 10$ - $ 11$ - $ 14$ - $ 15$ - $ 26$ - $ 27$ - $ 30$ - $ 31$ - $ 42$ - $ 43$ - $ 46$ - $ 47$ - $ 58$ - $ 59$ - $ 62$ - $ 63$ - $ 74$ - $ 75$ - $ 78$ - $ 79$ - $ 90$ - $ 91$ - $ 94$ - $ 95$ - $ 106$ - $ 107$ - $ 110$ - $ 111$ - $ 122$ - $ 123$ - $ 126$ - $ 127$ ... dump(box.index.BITS_ALL_SET, 27) --- - - $ 27$ - $ 31$ - $ 59$ - $ 63$ - $ 91$ - $ 95$ - $ 123$ - $ 127$ ... dump(box.index.BITS_ALL_SET, 341) --- - [] ... dump(box.index.BITS_ALL_SET, 2147483649) --- - [] ... dump(box.index.BITS_ALL_SET, 4294967295) --- - [] ... ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ALL_NOT_SET (single bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ALL_NOT_SET, 0) --- - - $ 1$ - $ 2$ - $ 3$ - $ 4$ - $ 5$ - $ 6$ - $ 7$ - $ 8$ - $ 9$ - $ 10$ - $ 11$ - $ 12$ - $ 13$ - $ 14$ - $ 15$ - $ 16$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 24$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 32$ - $ 33$ - $ 34$ - $ 35$ - $ 36$ - $ 37$ - $ 38$ - $ 39$ - $ 40$ - $ 41$ - $ 42$ - $ 43$ - $ 44$ - $ 45$ - $ 46$ - $ 47$ - $ 48$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 56$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 64$ - $ 65$ - $ 66$ - $ 67$ - $ 68$ - $ 69$ - $ 70$ - $ 71$ - $ 72$ - $ 73$ - $ 74$ - $ 75$ - $ 76$ - $ 77$ - $ 78$ - $ 79$ - $ 80$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 88$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 96$ - $ 97$ - $ 98$ - $ 99$ - $ 100$ - $ 101$ - $ 102$ - $ 103$ - $ 104$ - $ 105$ - $ 106$ - $ 107$ - $ 108$ - $ 109$ - $ 110$ - $ 111$ - $ 112$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 120$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ - $ 128$ ... box.space.tweedledum.index.bitset:count(0, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 128 ... dump(box.index.BITS_ALL_NOT_SET, 2) --- - - $ 1$ - $ 4$ - $ 5$ - $ 8$ - $ 9$ - $ 12$ - $ 13$ - $ 16$ - $ 17$ - $ 20$ - $ 21$ - $ 24$ - $ 25$ - $ 28$ - $ 29$ - $ 32$ - $ 33$ - $ 36$ - $ 37$ - $ 40$ - $ 41$ - $ 44$ - $ 45$ - $ 48$ - $ 49$ - $ 52$ - $ 53$ - $ 56$ - $ 57$ - $ 60$ - $ 61$ - $ 64$ - $ 65$ - $ 68$ - $ 69$ - $ 72$ - $ 73$ - $ 76$ - $ 77$ - $ 80$ - $ 81$ - $ 84$ - $ 85$ - $ 88$ - $ 89$ - $ 92$ - $ 93$ - $ 96$ - $ 97$ - $ 100$ - $ 101$ - $ 104$ - $ 105$ - $ 108$ - $ 109$ - $ 112$ - $ 113$ - $ 116$ - $ 117$ - $ 120$ - $ 121$ - $ 124$ - $ 125$ - $ 128$ ... box.space.tweedledum.index.bitset:count(2, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 64 ... dump(box.index.BITS_ALL_NOT_SET, 8) --- - - $ 1$ - $ 2$ - $ 3$ - $ 4$ - $ 5$ - $ 6$ - $ 7$ - $ 16$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 32$ - $ 33$ - $ 34$ - $ 35$ - $ 36$ - $ 37$ - $ 38$ - $ 39$ - $ 48$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 64$ - $ 65$ - $ 66$ - $ 67$ - $ 68$ - $ 69$ - $ 70$ - $ 71$ - $ 80$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 96$ - $ 97$ - $ 98$ - $ 99$ - $ 100$ - $ 101$ - $ 102$ - $ 103$ - $ 112$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 128$ ... box.space.tweedledum.index.bitset:count(8, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 64 ... dump(box.index.BITS_ALL_NOT_SET, 128) --- - - $ 1$ - $ 2$ - $ 3$ - $ 4$ - $ 5$ - $ 6$ - $ 7$ - $ 8$ - $ 9$ - $ 10$ - $ 11$ - $ 12$ - $ 13$ - $ 14$ - $ 15$ - $ 16$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 24$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 32$ - $ 33$ - $ 34$ - $ 35$ - $ 36$ - $ 37$ - $ 38$ - $ 39$ - $ 40$ - $ 41$ - $ 42$ - $ 43$ - $ 44$ - $ 45$ - $ 46$ - $ 47$ - $ 48$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 56$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 64$ - $ 65$ - $ 66$ - $ 67$ - $ 68$ - $ 69$ - $ 70$ - $ 71$ - $ 72$ - $ 73$ - $ 74$ - $ 75$ - $ 76$ - $ 77$ - $ 78$ - $ 79$ - $ 80$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 88$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 96$ - $ 97$ - $ 98$ - $ 99$ - $ 100$ - $ 101$ - $ 102$ - $ 103$ - $ 104$ - $ 105$ - $ 106$ - $ 107$ - $ 108$ - $ 109$ - $ 110$ - $ 111$ - $ 112$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 120$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ ... box.space.tweedledum.index.bitset:count(128, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 127 ... dump(box.index.BITS_ALL_NOT_SET, 1073741824) --- - - $ 1$ - $ 2$ - $ 3$ - $ 4$ - $ 5$ - $ 6$ - $ 7$ - $ 8$ - $ 9$ - $ 10$ - $ 11$ - $ 12$ - $ 13$ - $ 14$ - $ 15$ - $ 16$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 24$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 32$ - $ 33$ - $ 34$ - $ 35$ - $ 36$ - $ 37$ - $ 38$ - $ 39$ - $ 40$ - $ 41$ - $ 42$ - $ 43$ - $ 44$ - $ 45$ - $ 46$ - $ 47$ - $ 48$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 56$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 64$ - $ 65$ - $ 66$ - $ 67$ - $ 68$ - $ 69$ - $ 70$ - $ 71$ - $ 72$ - $ 73$ - $ 74$ - $ 75$ - $ 76$ - $ 77$ - $ 78$ - $ 79$ - $ 80$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 88$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 96$ - $ 97$ - $ 98$ - $ 99$ - $ 100$ - $ 101$ - $ 102$ - $ 103$ - $ 104$ - $ 105$ - $ 106$ - $ 107$ - $ 108$ - $ 109$ - $ 110$ - $ 111$ - $ 112$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 120$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ - $ 128$ ... box.space.tweedledum.index.bitset:count(1073741824, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 128 ... dump(box.index.BITS_ALL_NOT_SET, 2147483648) --- - - $ 1$ - $ 2$ - $ 3$ - $ 4$ - $ 5$ - $ 6$ - $ 7$ - $ 8$ - $ 9$ - $ 10$ - $ 11$ - $ 12$ - $ 13$ - $ 14$ - $ 15$ - $ 16$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 24$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 32$ - $ 33$ - $ 34$ - $ 35$ - $ 36$ - $ 37$ - $ 38$ - $ 39$ - $ 40$ - $ 41$ - $ 42$ - $ 43$ - $ 44$ - $ 45$ - $ 46$ - $ 47$ - $ 48$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 56$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 64$ - $ 65$ - $ 66$ - $ 67$ - $ 68$ - $ 69$ - $ 70$ - $ 71$ - $ 72$ - $ 73$ - $ 74$ - $ 75$ - $ 76$ - $ 77$ - $ 78$ - $ 79$ - $ 80$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 88$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 96$ - $ 97$ - $ 98$ - $ 99$ - $ 100$ - $ 101$ - $ 102$ - $ 103$ - $ 104$ - $ 105$ - $ 106$ - $ 107$ - $ 108$ - $ 109$ - $ 110$ - $ 111$ - $ 112$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 120$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ - $ 128$ ... box.space.tweedledum.index.bitset:count(2147483648, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 128 ... ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ALL_NOT_SET (multiple bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ALL_NOT_SET, 3) --- - - $ 4$ - $ 8$ - $ 12$ - $ 16$ - $ 20$ - $ 24$ - $ 28$ - $ 32$ - $ 36$ - $ 40$ - $ 44$ - $ 48$ - $ 52$ - $ 56$ - $ 60$ - $ 64$ - $ 68$ - $ 72$ - $ 76$ - $ 80$ - $ 84$ - $ 88$ - $ 92$ - $ 96$ - $ 100$ - $ 104$ - $ 108$ - $ 112$ - $ 116$ - $ 120$ - $ 124$ - $ 128$ ... box.space.tweedledum.index.bitset:count(3, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 32 ... dump(box.index.BITS_ALL_NOT_SET, 7) --- - - $ 8$ - $ 16$ - $ 24$ - $ 32$ - $ 40$ - $ 48$ - $ 56$ - $ 64$ - $ 72$ - $ 80$ - $ 88$ - $ 96$ - $ 104$ - $ 112$ - $ 120$ - $ 128$ ... box.space.tweedledum.index.bitset:count(7, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 16 ... dump(box.index.BITS_ALL_NOT_SET, 10) --- - - $ 1$ - $ 4$ - $ 5$ - $ 16$ - $ 17$ - $ 20$ - $ 21$ - $ 32$ - $ 33$ - $ 36$ - $ 37$ - $ 48$ - $ 49$ - $ 52$ - $ 53$ - $ 64$ - $ 65$ - $ 68$ - $ 69$ - $ 80$ - $ 81$ - $ 84$ - $ 85$ - $ 96$ - $ 97$ - $ 100$ - $ 101$ - $ 112$ - $ 113$ - $ 116$ - $ 117$ - $ 128$ ... box.space.tweedledum.index.bitset:count(10, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 32 ... dump(box.index.BITS_ALL_NOT_SET, 27) --- - - $ 4$ - $ 32$ - $ 36$ - $ 64$ - $ 68$ - $ 96$ - $ 100$ - $ 128$ ... box.space.tweedledum.index.bitset:count(27, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 8 ... dump(box.index.BITS_ALL_NOT_SET, 85) --- - - $ 2$ - $ 8$ - $ 10$ - $ 32$ - $ 34$ - $ 40$ - $ 42$ - $ 128$ ... box.space.tweedledum.index.bitset:count(85, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 8 ... dump(box.index.BITS_ALL_NOT_SET, 4294967295) --- - [] ... box.space.tweedledum.index.bitset:count(4294967295, { iterator = box.index.BITS_ALL_NOT_SET}) --- - 0 ... ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ANY_SET (single bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ANY_SET, 0) --- - [] ... box.space.tweedledum.index.bitset:count(0, { iterator = box.index.BITS_ANY_SET}) --- - 0 ... dump(box.index.BITS_ANY_SET, 16) --- - - $ 16$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 24$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 48$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 56$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 80$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 88$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 112$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 120$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ ... box.space.tweedledum.index.bitset:count(16, { iterator = box.index.BITS_ANY_SET}) --- - 64 ... dump(box.index.BITS_ANY_SET, 128) --- - - $ 128$ ... box.space.tweedledum.index.bitset:count(128, { iterator = box.index.BITS_ANY_SET}) --- - 1 ... ------------------------------------------------------------------------------ -- BitsetIndex: BITS_ANY_SET (multiple bit) ------------------------------------------------------------------------------ dump(box.index.BITS_ANY_SET, 7) --- - - $ 1$ - $ 2$ - $ 3$ - $ 4$ - $ 5$ - $ 6$ - $ 7$ - $ 9$ - $ 10$ - $ 11$ - $ 12$ - $ 13$ - $ 14$ - $ 15$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 33$ - $ 34$ - $ 35$ - $ 36$ - $ 37$ - $ 38$ - $ 39$ - $ 41$ - $ 42$ - $ 43$ - $ 44$ - $ 45$ - $ 46$ - $ 47$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 65$ - $ 66$ - $ 67$ - $ 68$ - $ 69$ - $ 70$ - $ 71$ - $ 73$ - $ 74$ - $ 75$ - $ 76$ - $ 77$ - $ 78$ - $ 79$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 97$ - $ 98$ - $ 99$ - $ 100$ - $ 101$ - $ 102$ - $ 103$ - $ 105$ - $ 106$ - $ 107$ - $ 108$ - $ 109$ - $ 110$ - $ 111$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ ... dump(box.index.BITS_ANY_SET, 84) --- - - $ 4$ - $ 5$ - $ 6$ - $ 7$ - $ 12$ - $ 13$ - $ 14$ - $ 15$ - $ 16$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 24$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 36$ - $ 37$ - $ 38$ - $ 39$ - $ 44$ - $ 45$ - $ 46$ - $ 47$ - $ 48$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 56$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 64$ - $ 65$ - $ 66$ - $ 67$ - $ 68$ - $ 69$ - $ 70$ - $ 71$ - $ 72$ - $ 73$ - $ 74$ - $ 75$ - $ 76$ - $ 77$ - $ 78$ - $ 79$ - $ 80$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 88$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 96$ - $ 97$ - $ 98$ - $ 99$ - $ 100$ - $ 101$ - $ 102$ - $ 103$ - $ 104$ - $ 105$ - $ 106$ - $ 107$ - $ 108$ - $ 109$ - $ 110$ - $ 111$ - $ 112$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 120$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ ... dump(box.index.BITS_ANY_SET, 113) --- - - $ 1$ - $ 3$ - $ 5$ - $ 7$ - $ 9$ - $ 11$ - $ 13$ - $ 15$ - $ 16$ - $ 17$ - $ 18$ - $ 19$ - $ 20$ - $ 21$ - $ 22$ - $ 23$ - $ 24$ - $ 25$ - $ 26$ - $ 27$ - $ 28$ - $ 29$ - $ 30$ - $ 31$ - $ 32$ - $ 33$ - $ 34$ - $ 35$ - $ 36$ - $ 37$ - $ 38$ - $ 39$ - $ 40$ - $ 41$ - $ 42$ - $ 43$ - $ 44$ - $ 45$ - $ 46$ - $ 47$ - $ 48$ - $ 49$ - $ 50$ - $ 51$ - $ 52$ - $ 53$ - $ 54$ - $ 55$ - $ 56$ - $ 57$ - $ 58$ - $ 59$ - $ 60$ - $ 61$ - $ 62$ - $ 63$ - $ 64$ - $ 65$ - $ 66$ - $ 67$ - $ 68$ - $ 69$ - $ 70$ - $ 71$ - $ 72$ - $ 73$ - $ 74$ - $ 75$ - $ 76$ - $ 77$ - $ 78$ - $ 79$ - $ 80$ - $ 81$ - $ 82$ - $ 83$ - $ 84$ - $ 85$ - $ 86$ - $ 87$ - $ 88$ - $ 89$ - $ 90$ - $ 91$ - $ 92$ - $ 93$ - $ 94$ - $ 95$ - $ 96$ - $ 97$ - $ 98$ - $ 99$ - $ 100$ - $ 101$ - $ 102$ - $ 103$ - $ 104$ - $ 105$ - $ 106$ - $ 107$ - $ 108$ - $ 109$ - $ 110$ - $ 111$ - $ 112$ - $ 113$ - $ 114$ - $ 115$ - $ 116$ - $ 117$ - $ 118$ - $ 119$ - $ 120$ - $ 121$ - $ 122$ - $ 123$ - $ 124$ - $ 125$ - $ 126$ - $ 127$ ... drop_space() --- ... ------------------------------------------------------------------------------ -- Misc ------------------------------------------------------------------------------ -- gh-1467: invalid iterator type space = box.schema.space.create('test') --- ... _ = space:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... _ = space:create_index('bitset', { type = 'bitset', parts = {2, 'unsigned'}, unique = false }) --- ... space.index.bitset:select({1}, { iterator = 'OVERLAPS'}) --- - error: Index 'bitset' (BITSET) of space 'test' (memtx) does not support requested iterator type ... space:drop() --- ... space = nil --- ... -- gh-1549: BITSET index with inappropriate types crashes in debug build space = box.schema.space.create('test') --- ... _ = space:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... _ = space:create_index('bitset', { type = 'bitset', parts = {2, 'number'}, unique = false }) --- - error: 'Can''t create or modify index ''bitset'' in space ''test'': BITSET index field type must be NUM or STR' ... space:drop() --- ... space = nil --- ... -- https://github.com/tarantool/tarantool/issues/1896 wrong countspace = box.schema.space.create('test') s = box.schema.space.create('test') --- ... _ = s:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... i = s:create_index('bitset', { type = 'bitset', parts = {2, 'unsigned'}, unique = false }) --- ... s:insert{1, 0} --- - [1, 0] ... s:insert{2, 0} --- - [2, 0] ... s:insert{3, 0} --- - [3, 0] ... s:insert{4, 2} --- - [4, 2] ... s:insert{5, 2} --- - [5, 2] ... s:insert{6, 3} --- - [6, 3] ... s:insert{7, 4} --- - [7, 4] ... s:insert{8, 5} --- - [8, 5] ... s:insert{9, 8} --- - [9, 8] ... #i:select(7, {iterator = box.index.BITS_ANY_SET}) --- - 5 ... i:count(7, {iterator = box.index.BITS_ANY_SET}) --- - 5 ... s:drop() --- ... s = nil --- ... -- https://github.com/tarantool/tarantool/issues/1946 BITS_ALL_SET crashes s = box.schema.space.create('test') --- ... _ = s:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... i = s:create_index('bitset', { type = 'bitset', parts = {2, 'unsigned'}, unique = false }) --- ... for i=1,10 do s:insert{i, math.random(8)} end --- ... good = true --- ... function is_good(key, opts) return #i:select({key}, opts) == i:count({key}, opts) end --- ... function check(key, opts) good = good and is_good(key, opts) end --- ... for j=1,100 do check(math.random(9) - 1) end --- ... for j=1,100 do check(math.random(9) - 1, {iterator = box.index.BITS_ANY_SET}) end --- ... for j=1,100 do check(math.random(9) - 1, {iterator = box.index.BITS_ALL_SET}) end --- ... for j=1,100 do check(math.random(9) - 1, {iterator = box.index.BITS_ALL_NOT_SET}) end --- ... good --- - true ... s:drop() --- ... s = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/indices_any_type.test.lua0000664000000000000000000000721013306560010022756 0ustar rootroot-- Tests for HASH index type s3 = box.schema.space.create('my_space4') i3_1 = s3:create_index('my_space4_idx1', {type='HASH', parts={1, 'scalar', 2, 'integer', 3, 'number'}, unique=true}) i3_2 = s3:create_index('my_space4_idx2', {type='HASH', parts={4, 'string', 5, 'scalar'}, unique=true}) i3_3 = s3:create_index('my_space4_idx3', {type='TREE', parts={6, 'boolean'}, unique=false}) s3:insert({100.5, 30, 95, "str1", 5, true}) s3:insert({"abc#$23", 1000, -21.542, "namesurname", 99, false}) s3:insert({true, -459, 4000, "foobar", "36.6", true}) s3:select{} i3_1:select({100.5}) i3_1:select({true, -459}) i3_1:select({"abc#$23", 1000, -21.542}) i3_2:select({"str1", 5}) i3_2:select({"str"}) i3_2:select({"str", 5}) i3_2:select({"foobar", "36.6"}) i3_3:select{true} i3_3:select{false} i3_3:select{} s3:drop() -- #2112 int vs. double compare s5 = box.schema.space.create('my_space5') _ = s5:create_index('primary', {parts={1, 'scalar'}}) -- small range 1 s5:insert({5}) s5:insert({5.1}) s5:select() s5:truncate() -- small range 2 s5:insert({5.1}) s5:insert({5}) s5:select() s5:truncate() -- small range 3 s5:insert({-5}) s5:insert({-5.1}) s5:select() s5:truncate() -- small range 4 s5:insert({-5.1}) s5:insert({-5}) s5:select() s5:truncate() -- conversion to another type is lossy for both values s5:insert({18446744073709551615ULL}) s5:insert({3.6893488147419103e+19}) s5:select() s5:truncate() -- insert in a different order to excersise another codepath s5:insert({3.6893488147419103e+19}) s5:insert({18446744073709551615ULL}) s5:select() s5:truncate() -- MP_INT vs MP_UINT s5:insert({-9223372036854775808LL}) s5:insert({-3.6893488147419103e+19}) s5:select() s5:truncate() -- insert in a different order to excersise another codepath s5:insert({-3.6893488147419103e+19}) s5:insert({-9223372036854775808LL}) s5:select() s5:truncate() -- different signs 1 s5:insert({9223372036854775807LL}) s5:insert({-3.6893488147419103e+19}) s5:select() s5:truncate() -- different signs 2 s5:insert({-3.6893488147419103e+19}) s5:insert({9223372036854775807LL}) s5:select() s5:truncate() -- different signs 3 s5:insert({-9223372036854775808LL}) s5:insert({3.6893488147419103e+19}) s5:select() s5:truncate() -- different signs 4 s5:insert({3.6893488147419103e+19}) s5:insert({-9223372036854775808LL}) s5:select() s5:truncate() -- different magnitude 1 s5:insert({1.1}) s5:insert({18446744073709551615ULL}) s5:select() s5:truncate() -- different magnitude 2 s5:insert({18446744073709551615ULL}) s5:insert({1.1}) s5:select() s5:truncate() -- Close values ffi = require('ffi') ffi.new('double', 1152921504606846976) == 1152921504606846976ULL ffi.new('double', 1152921504606846977) == 1152921504606846976ULL -- Close values 1 s5:insert({1152921504606846976ULL}) s5:insert({ffi.new('double', 1152921504606846976ULL)}) -- fail s5:select() s5:truncate() -- Close values 2 s5:insert({1152921504606846977ULL}) s5:insert({ffi.new('double', 1152921504606846976ULL)}) -- success s5:select() s5:truncate() -- Close values 3 s5:insert({-1152921504606846976LL}) s5:insert({ffi.new('double', -1152921504606846976LL)}) -- fail s5:select() s5:truncate() -- Close values 4 s5:insert({-1152921504606846977LL}) s5:insert({ffi.new('double', -1152921504606846976LL)}) -- success s5:select() s5:truncate() -- Close values 5 ffi.cdef "double exp2(double);" s5:insert({0xFFFFFFFFFFFFFFFFULL}) s5:insert({ffi.new('double', ffi.C.exp2(64))}) -- success s5:select() s5:truncate() -- Close values 6 s5:insert({0x8000000000000000LL}) s5:insert({ffi.new('double', -ffi.C.exp2(63))}) -- duplicate s5:select() s5:truncate() -- Close values 7 s5:insert({0x7FFFFFFFFFFFFFFFLL}) s5:insert({ffi.new('double', ffi.C.exp2(63))}) -- ok s5:select() s5:truncate() s5:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/access_sysview.result0000664000000000000000000002263313306565107022263 0ustar rootrootsession = box.session --- ... -- -- Basic tests -- #box.space._vspace:select{} == #box.space._space:select{} --- - true ... #box.space._vindex:select{} == #box.space._index:select{} --- - true ... #box.space._vuser:select{} == #box.space._user:select{} --- - true ... #box.space._vpriv:select{} == #box.space._priv:select{} --- - true ... #box.space._vfunc:select{} == #box.space._func:select{} --- - true ... -- gh-1042: bad error message for _vspace, _vuser, _vindex, etc. -- Space '_vspace' (sysview) does not support replace box.space._vspace:replace({1, 1, 'test'}) --- - error: View '_vspace' is read-only ... box.space._vspace:delete(1) --- - error: View '_vspace' is read-only ... box.space._vspace:update(1, {{'=', 2, 48}}) --- - error: View '_vspace' is read-only ... -- error: Index 'primary' of space '_vspace' (sysview) does not support xxx() box.space._vspace.index.primary:len() --- - error: Index 'primary' (TREE) of space '_vspace' (sysview) does not support size() ... box.space._vspace.index.primary:random(48) --- - error: Index 'primary' (TREE) of space '_vspace' (sysview) does not support random() ... session.su('guest') --- ... -- -- _vspace + _vindex -- -- _vXXXX views are visible for 'public' role #box.space._vspace.index[2]:select('_vspace') ~= 0 --- - true ... #box.space._vspace.index[2]:select('_vindex') ~= 0 --- - true ... #box.space._vspace.index[2]:select('_vuser') ~= 0 --- - true ... #box.space._vspace.index[2]:select('_vfunc') ~= 0 --- - true ... #box.space._vspace.index[2]:select('_vpriv') ~= 0 --- - true ... #box.space._vindex:select(box.space._vspace.id) > 0 --- - true ... #box.space._vindex:select(box.space._vindex.id) > 0 --- - true ... #box.space._vindex:select(box.space._vuser.id) > 0 --- - true ... #box.space._vindex:select(box.space._vfunc.id) > 0 --- - true ... #box.space._vindex:select(box.space._vpriv.id) > 0 --- - true ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'public') --- ... box.session.su('guest') --- ... #box.space._vspace:select{} --- - error: Read access to space '_vspace' is denied for user 'guest' ... #box.space._vindex:select{} --- - error: Read access to space '_vindex' is denied for user 'guest' ... #box.space._vuser:select{} --- - error: Read access to space '_vuser' is denied for user 'guest' ... #box.space._vpriv:select{} --- - error: Read access to space '_vpriv' is denied for user 'guest' ... #box.space._vfunc:select{} --- - error: Read access to space '_vfunc' is denied for user 'guest' ... box.session.su('admin') --- ... box.schema.user.grant('guest', 'public') --- ... box.session.su('guest') --- ... #box.space._vspace:select{} --- - 7 ... #box.space._vindex:select{} --- - 17 ... box.session.su('admin') --- ... s = box.schema.space.create('test') --- ... s = box.space.test:create_index('primary') --- ... box.schema.role.grant('public', 'read', 'space', 'test') --- ... box.session.su('guest') --- ... box.space._vspace.index[2]:get('test') ~= nil --- - true ... #box.space._vindex:select(box.space.test.id) == 1 --- - true ... box.session.su('admin') --- ... box.schema.role.revoke('public', 'read', 'space', 'test') --- ... box.session.su('guest') --- ... box.space._vspace.index[2]:get('test') == nil --- - true ... #box.space._vindex:select(box.space.test.id) == 0 --- - true ... box.session.su('admin') --- ... box.schema.user.grant('guest', 'read', 'space', 'test') --- ... box.session.su('guest') --- ... box.space._vspace.index[2]:get('test') ~= nil --- - true ... #box.space._vindex:select(box.space.test.id) == 1 --- - true ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'read', 'space', 'test') --- ... box.session.su('guest') --- ... box.space._vspace.index[2]:get('test') == nil --- - true ... #box.space._vindex:select(box.space.test.id) == 0 --- - true ... -- check universe permissions box.session.su('admin') --- ... box.schema.user.grant('guest', 'read', 'universe') --- ... box.session.su('guest') --- ... #box.space._vspace:select{} --- - 18 ... #box.space._vindex:select{} --- - 41 ... #box.space._vuser:select{} --- - 5 ... #box.space._vpriv:select{} --- - 14 ... #box.space._vfunc:select{} --- - 1 ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'read', 'universe') --- ... box.schema.user.grant('guest', 'write', 'universe') --- ... box.session.su('guest') --- ... #box.space._vindex:select{} --- - 41 ... #box.space._vuser:select{} --- - 1 ... #box.space._vpriv:select{} --- - 2 ... #box.space._vfunc:select{} --- - 1 ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'write', 'universe') --- ... box.space.test:drop() --- ... box.session.su('guest') --- ... -- read access to original space also allow to read a view box.session.su('admin') --- ... space_cnt = #box.space._space:select{} --- ... index_cnt = #box.space._index:select{} --- ... box.schema.user.grant('guest', 'read', 'space', '_space') --- ... box.schema.user.grant('guest', 'read', 'space', '_index') --- ... box.session.su('guest') --- ... #box.space._vspace:select{} == space_cnt --- - true ... #box.space._vindex:select{} == index_cnt --- - true ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'read', 'space', '_space') --- ... box.schema.user.revoke('guest', 'read', 'space', '_index') --- ... box.session.su('guest') --- ... #box.space._vspace:select{} < space_cnt --- - true ... #box.space._vindex:select{} < index_cnt --- - true ... -- -- _vuser -- -- a guest user can read information about itself t = box.space._vuser:select(); return #t == 1 and t[1][3] == 'guest' --- - true ... -- read access to original space also allow to read a view box.session.su('admin') --- ... user_cnt = #box.space._user:select{} --- ... box.schema.user.grant('guest', 'read', 'space', '_user') --- ... box.session.su('guest') --- ... #box.space._vuser:select{} == user_cnt --- - true ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'read', 'space', '_user') --- ... box.session.su('guest') --- ... #box.space._vuser:select{} < user_cnt --- - true ... box.session.su('admin') --- ... box.schema.user.grant('guest', 'read,write,create', 'universe') --- ... box.session.su('guest') --- ... box.schema.user.create('tester') --- ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'read,write,create', 'universe') --- ... box.session.su('guest') --- ... #box.space._vuser.index[2]:select('tester') > 0 --- - true ... box.session.su('admin') --- ... box.schema.user.drop('tester') --- ... box.session.su('guest') --- ... -- -- _vpriv -- -- a guest user can see granted 'public' role box.space._vpriv.index[2]:select('role')[1][2] == session.uid() --- - true ... -- read access to original space also allow to read a view box.session.su('admin') --- ... box.schema.user.grant('guest', 'read', 'space', '_priv') --- ... priv_cnt = #box.space._priv:select{} --- ... box.session.su('guest') --- ... #box.space._vpriv:select{} == priv_cnt --- - true ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'read', 'space', '_priv') --- ... box.session.su('guest') --- ... cnt = #box.space._vpriv:select{} --- ... cnt < priv_cnt --- - true ... box.session.su('admin') --- ... box.schema.user.grant('guest', 'read,write', 'space', '_schema') --- ... box.session.su('guest') --- ... #box.space._vpriv:select{} == cnt + 1 --- - true ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'read,write', 'space', '_schema') --- ... box.session.su('guest') --- ... #box.space._vpriv:select{} == cnt --- - true ... -- -- _vfunc -- box.session.su('admin') --- ... box.schema.func.create('test') --- ... -- read access to original space also allow to read a view func_cnt = #box.space._func:select{} --- ... box.schema.user.grant('guest', 'read', 'space', '_func') --- ... box.session.su('guest') --- ... #box.space._vfunc:select{} == func_cnt --- - true ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'read', 'space', '_func') --- ... box.session.su('guest') --- ... cnt = #box.space._vfunc:select{} --- ... cnt < func_cnt --- - true ... box.session.su('admin') --- ... box.schema.user.grant('guest', 'execute', 'function', 'test') --- ... box.session.su('guest') --- ... #box.space._vfunc:select{} = cnt + 1 --- ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'execute', 'function', 'test') --- ... box.session.su('guest') --- ... #box.space._vfunc:select{} == cnt --- - true ... box.session.su('admin') --- ... box.schema.user.grant('guest', 'execute', 'universe') --- ... box.session.su('guest') --- ... #box.space._vfunc:select{} == cnt + 1 --- - true ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'execute', 'universe') --- ... box.schema.func.drop('test') --- ... box.session.su('guest') --- ... #box.space._vfunc:select{} == cnt --- - true ... -- -- view:alter() tests -- session.su('admin') --- ... box.space._vspace.index[1]:alter({parts = { 2, 'string' }}) --- ... box.space._vspace.index[1]:select('xxx') --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... box.space._vspace.index[1]:select(1) --- - error: 'Supplied key type of part 0 does not match index part type: expected string' ... box.space._vspace.index[1]:alter({parts = { 2, 'unsigned' }}) --- ... box.space._space.index[1]:drop() --- ... box.space._vspace.index[1]:select(1) --- - error: 'No index #1 is defined in space ''_space''' ... s = box.space._space:create_index('owner', {parts = { 2, 'unsigned' }, id = 1, unique = false}) --- ... #box.space._vspace.index[1]:select(1) > 0 --- - true ... session = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/box.lua0000664000000000000000000000151713306560010017246 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", } require('console').listen(os.getenv('ADMIN')) local _hide = { pid_file=1, log=1, listen=1, vinyl_dir=1, memtx_dir=1, wal_dir=1, memtx_max_tuple_size=1, memtx_min_tuple_size=1 } function cfg_filter(data) if type(data)~='table' then return data end local keys,k,_ = {} for k in pairs(data) do table.insert(keys, k) end table.sort(keys) local result = {} for _,k in pairs(keys) do table.insert(result, {k, _hide[k] and '' or cfg_filter(data[k])}) end return result end function compare(a,b) return a[1] < b[1] end function sorted(data) table.sort(data, compare) return data end tarantool_1.9.1.26.g63eb81e3c/test/box/net.box.test.lua0000664000000000000000000006724313306565107021035 0ustar rootrootremote = require 'net.box' fiber = require 'fiber' log = require 'log' msgpack = require 'msgpack' env = require('test_run') test_run = env.new() test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua...\"]:: '") test_run:cmd("setopt delimiter ';'") function x_select(cn, space_id, index_id, iterator, offset, limit, key, opts) return cn:_request('select', opts, space_id, index_id, iterator, offset, limit, key) end function x_fatal(cn) cn._transport.perform_request(nil, nil, 'inject', nil, '\x80') end test_run:cmd("setopt delimiter ''"); LISTEN = require('uri').parse(box.cfg.listen) space = box.schema.space.create('net_box_test_space') index = space:create_index('primary', { type = 'tree' }) -- low level connection log.info("create connection") cn = remote.connect(LISTEN.host, LISTEN.service) log.info("state is %s", cn.state) cn:ping() log.info("ping is done") cn:ping() log.info("ping is done") cn:ping() -- check permissions cn:call('unexists_procedure') function test_foo(a,b,c) return { {{ [a] = 1 }}, {{ [b] = 2 }}, c } end cn:call('test_foo', {'a', 'b', 'c'}) cn:eval('return 2+2') cn:close() -- connect and call without usage access box.schema.user.grant('guest','execute','universe') box.schema.user.revoke('guest','usage','universe') box.session.su("guest") cn = remote.connect(LISTEN.host, LISTEN.service) cn:call('test_foo', {'a', 'b', 'c'}) box.session.su("admin") box.schema.user.grant('guest','usage','universe') cn:close() cn = remote.connect(box.cfg.listen) cn:call('unexists_procedure') cn:call('test_foo', {'a', 'b', 'c'}) cn:call(nil, {'a', 'b', 'c'}) cn:eval('return 2+2') cn:eval('return 1, 2, 3') cn:eval('return ...', {1, 2, 3}) cn:eval('return { k = "v1" }, true, { xx = 10, yy = 15 }, nil') cn:eval('return nil') cn:eval('return') cn:eval('error("exception")') cn:eval('box.error(0)') cn:eval('!invalid expression') -- box.commit() missing at return of CALL/EVAL function no_commit() box.begin() fiber.sleep(0.001) end cn:call('no_commit') cn:eval('no_commit()') remote.self:eval('return 1+1, 2+2') remote.self:eval('return') remote.self:eval('error("exception")') remote.self:eval('box.error(0)') remote.self:eval('!invalid expression') -- -- gh-822: net.box.call should roll back local transaction on error -- _ = box.schema.space.create('gh822') _ = box.space.gh822:create_index('primary') test_run:cmd("setopt delimiter ';'") -- rollback on invalid function function rollback_on_invalid_function() box.begin() box.space.gh822:insert{1, "netbox_test"} pcall(remote.self.call, remote.self, 'invalid_function') return box.space.gh822:get(1) == nil end; rollback_on_invalid_function(); -- rollback on call error function test_error() error('Some error') end; function rollback_on_call_error() box.begin() box.space.gh822:insert{1, "netbox_test"} pcall(remote.self.call, remote.self, 'test_error') return box.space.gh822:get(1) == nil end; rollback_on_call_error(); -- rollback on eval function rollback_on_eval_error() box.begin() box.space.gh822:insert{1, "netbox_test"} pcall(remote.self.eval, remote.self, "error('Some error')") return box.space.gh822:get(1) == nil end; rollback_on_eval_error(); test_run:cmd("setopt delimiter ''"); box.space.gh822:drop() box.schema.user.revoke('guest','execute','universe') box.schema.user.grant('guest','read,write,execute','universe') cn:close() cn = remote.connect(box.cfg.listen) x_select(cn, space.id, space.index.primary.id, box.index.EQ, 0, 0xFFFFFFFF, 123) space:insert{123, 345} x_select(cn, space.id, space.index.primary.id, box.index.EQ, 0, 0, 123) x_select(cn, space.id, space.index.primary.id, box.index.EQ, 0, 1, 123) x_select(cn, space.id, space.index.primary.id, box.index.EQ, 1, 1, 123) cn.space[space.id] ~= nil cn.space.net_box_test_space ~= nil cn.space.net_box_test_space ~= nil cn.space.net_box_test_space.index ~= nil cn.space.net_box_test_space.index.primary ~= nil cn.space.net_box_test_space.index[space.index.primary.id] ~= nil cn.space.net_box_test_space.index.primary:select(123) cn.space.net_box_test_space.index.primary:select(123, { limit = 0 }) cn.space.net_box_test_space.index.primary:select(nil, { limit = 1, }) cn.space.net_box_test_space:insert{234, 1,2,3} cn.space.net_box_test_space:insert{234, 1,2,3} cn.space.net_box_test_space.insert{234, 1,2,3} cn.space.net_box_test_space:replace{354, 1,2,3} cn.space.net_box_test_space:replace{354, 1,2,4} cn.space.net_box_test_space:select{123} space:select({123}, { iterator = 'GE' }) cn.space.net_box_test_space:select({123}, { iterator = 'GE' }) cn.space.net_box_test_space:select({123}, { iterator = 'GT' }) cn.space.net_box_test_space:select({123}, { iterator = 'GT', limit = 1 }) cn.space.net_box_test_space:select({123}, { iterator = 'GT', limit = 1, offset = 1 }) cn.space.net_box_test_space:select{123} cn.space.net_box_test_space:update({123}, { { '+', 2, 1 } }) cn.space.net_box_test_space:update(123, { { '+', 2, 1 } }) cn.space.net_box_test_space:select{123} cn.space.net_box_test_space:insert(cn.space.net_box_test_space:get{123}:update{ { '=', 1, 2 } }) cn.space.net_box_test_space:delete{123} cn.space.net_box_test_space:select{2} cn.space.net_box_test_space:select({234}, { iterator = 'LT' }) cn.space.net_box_test_space:update({1}, { { '+', 2, 2 } }) cn.space.net_box_test_space:delete{1} cn.space.net_box_test_space:delete{2} cn.space.net_box_test_space:delete{2} -- test one-based indexing in splice operation (see update.test.lua) cn.space.net_box_test_space:replace({10, 'abcde'}) cn.space.net_box_test_space:update(10, {{':', 2, 0, 0, '!'}}) cn.space.net_box_test_space:update(10, {{':', 2, 1, 0, '('}}) cn.space.net_box_test_space:update(10, {{':', 2, 2, 0, '({'}}) cn.space.net_box_test_space:update(10, {{':', 2, -1, 0, ')'}}) cn.space.net_box_test_space:update(10, {{':', 2, -2, 0, '})'}}) cn.space.net_box_test_space:delete{10} cn.space.net_box_test_space:select({}, { iterator = 'ALL' }) -- gh-841: net.box uses incorrect iterator type for select with no arguments cn.space.net_box_test_space:select() cn.space.net_box_test_space.index.primary:min() cn.space.net_box_test_space.index.primary:min(354) cn.space.net_box_test_space.index.primary:max() cn.space.net_box_test_space.index.primary:max(234) cn.space.net_box_test_space.index.primary:count() cn.space.net_box_test_space.index.primary:count(354) cn.space.net_box_test_space:get(354) -- reconnects after errors -- -- 1. no reconnect x_fatal(cn) cn.state cn:ping() cn:call('test_foo') cn:wait_state('active') -- -- 2 reconnect cn = remote.connect(LISTEN.host, LISTEN.service, { reconnect_after = .1 }) cn.space ~= nil cn.space.net_box_test_space:select({}, { iterator = 'ALL' }) x_fatal(cn) cn:wait_connected() cn:wait_state('active') cn:wait_state({active=true}) cn:ping() cn.state cn.space.net_box_test_space:select({}, { iterator = 'ALL' }) x_fatal(cn) x_select(cn, space.id, 0, box.index.ALL, 0, 0xFFFFFFFF, {}) cn.state cn:ping() -- -- dot-new-method cn1 = remote.new(LISTEN.host, LISTEN.service) x_select(cn1, space.id, 0, box.index.ALL, 0, 0xFFFFFFF, {}) cn1:close() -- -- error while waiting for response type(fiber.create(function() fiber.sleep(.5) x_fatal(cn) end)) function pause() fiber.sleep(10) return true end cn:call('pause') cn:call('test_foo', {'a', 'b', 'c'}) -- call remote.self:call('test_foo', {'a', 'b', 'c'}) cn:call('test_foo', {'a', 'b', 'c'}) -- long replies function long_rep() return { 1, string.rep('a', 5000) } end res = cn:call('long_rep') res[1] == 1 res[2] == string.rep('a', 5000) function long_rep() return { 1, string.rep('a', 50000) } end res = cn:call('long_rep') res[1] == 1 res[2] == string.rep('a', 50000) -- a.b.c.d u = '84F7BCFA-079C-46CC-98B4-F0C821BE833E' X = {} X.X = X function X.fn(x,y) return y or x end cn:call('X.fn', {u}) cn:call('X.X.X.X.X.X.X.fn', {u}) cn:call('X.X.X.X:fn', {u}) -- auth cn = remote.connect(LISTEN.host, LISTEN.service, { user = 'netbox', password = '123', wait_connected = true }) cn:is_connected() cn.error cn.state box.schema.user.create('netbox', { password = 'test' }) box.schema.user.grant('netbox', 'read, write, execute', 'universe'); cn = remote.connect(LISTEN.host, LISTEN.service, { user = 'netbox', password = 'test' }) cn.state cn.error cn:ping() function ret_after(to) fiber.sleep(to) return {{to}} end cn:ping({timeout = 1.00}) cn:ping({timeout = 1e-9}) cn:ping() remote_space = cn.space.net_box_test_space remote_pk = remote_space.index.primary remote_space:insert({0}, { timeout = 1.00 }) remote_space:insert({1}, { timeout = 1e-9 }) remote_space:insert({2}) remote_space:replace({0}, { timeout = 1e-9 }) remote_space:replace({1}) remote_space:replace({2}, { timeout = 1.00 }) remote_space:upsert({3}, {}, { timeout = 1e-9 }) remote_space:upsert({4}, {}) remote_space:upsert({5}, {}, { timeout = 1.00 }) remote_space:upsert({3}, {}) remote_space:update({3}, {}, { timeout = 1e-9 }) remote_space:update({4}, {}) remote_space:update({5}, {}, { timeout = 1.00 }) remote_space:update({3}, {}) remote_pk:update({5}, {}, { timeout = 1e-9 }) remote_pk:update({4}, {}) remote_pk:update({3}, {}, { timeout = 1.00 }) remote_pk:update({5}, {}) remote_space:get({0}) remote_space:get({1}, { timeout = 1.00 }) remote_space:get({2}, { timeout = 1e-9 }) remote_pk:get({3}, { timeout = 1e-9 }) remote_pk:get({4}) remote_pk:get({5}, { timeout = 1.00 }) remote_space:select({2}, { timeout = 1e-9 }) remote_space:select({2}, { timeout = 1.00 }) remote_space:select({2}) remote_pk:select({2}, { timeout = 1.00 }) remote_pk:select({2}, { timeout = 1e-9 }) remote_pk:select({2}) remote_space:select({5}, { timeout = 1.00, iterator = 'LE', limit = 5 }) remote_space:select({5}, { iterator = 'LE', limit = 5}) remote_space:select({5}, { timeout = 1e-9, iterator = 'LE', limit = 5 }) remote_pk:select({2}, { timeout = 1.00, iterator = 'LE', limit = 5 }) remote_pk:select({2}, { iterator = 'LE', limit = 5}) remote_pk:select({2}, { timeout = 1e-9, iterator = 'LE', limit = 5 }) remote_pk:count({2}, { timeout = 1.00}) remote_pk:count({2}, { timeout = 1e-9}) remote_pk:count({2}) remote_pk:count({2}, { timeout = 1.00, iterator = 'LE' }) remote_pk:count({2}, { iterator = 'LE'}) remote_pk:count({2}, { timeout = 1e-9, iterator = 'LE' }) remote_pk:min(nil, { timeout = 1.00 }) remote_pk:min(nil, { timeout = 1e-9 }) remote_pk:min(nil) remote_pk:min({0}, { timeout = 1e-9 }) remote_pk:min({1}) remote_pk:min({2}, { timeout = 1.00 }) remote_pk:max(nil) remote_pk:max(nil, { timeout = 1e-9 }) remote_pk:max(nil, { timeout = 1.00 }) remote_pk:max({0}, { timeout = 1.00 }) remote_pk:max({1}, { timeout = 1e-9 }) remote_pk:max({2}) _ = remote_space:delete({0}, { timeout = 1e-9 }) _ = remote_pk:delete({0}, { timeout = 1.00 }) _ = remote_space:delete({1}, { timeout = 1.00 }) _ = remote_pk:delete({1}, { timeout = 1e-9 }) _ = remote_space:delete({2}, { timeout = 1e-9 }) _ = remote_pk:delete({2}) _ = remote_pk:delete({3}) _ = remote_pk:delete({4}) _ = remote_pk:delete({5}) remote_space:get(0) remote_space:get(1) remote_space:get(2) remote_space = nil cn:call('ret_after', {0.01}, { timeout = 1.00 }) cn:call('ret_after', {1.00}, { timeout = 1e-9 }) cn:eval('return ret_after(...)', {0.01}, { timeout = 1.00 }) cn:eval('return ret_after(...)', {1.00}, { timeout = 1e-9 }) -- -- :timeout() -- @deprecated since 1.7.4 -- cn:timeout(1).space.net_box_test_space.index.primary:select{234} cn:call('ret_after', {.01}) cn:timeout(1):call('ret_after', {.01}) cn:timeout(.01):call('ret_after', {1}) cn = remote:timeout(0.0000000001):connect(LISTEN.host, LISTEN.service, { user = 'netbox', password = '123' }) cn:close() cn = remote:timeout(1):connect(LISTEN.host, LISTEN.service, { user = 'netbox', password = '123' }) remote.self:ping() remote.self.space.net_box_test_space:select{234} remote.self:timeout(123).space.net_box_test_space:select{234} remote.self:is_connected() remote.self:wait_connected() cn:close() -- cleanup database after tests space:drop() -- #1545 empty password cn = remote.connect(LISTEN.host, LISTEN.service, { user = 'test' }) cn ~= nil cn:close() cn = remote.connect(LISTEN.host, LISTEN.service, { password = 'test' }) cn ~= nil cn:close() -- #544 usage for remote[point]method cn = remote.connect(LISTEN.host, LISTEN.service) cn:eval('return true') cn.eval('return true') cn.ping() cn:close() remote.self:eval('return true') remote.self.eval('return true') -- uri as the first argument uri = string.format('%s:%s@%s:%s', 'netbox', 'test', LISTEN.host, LISTEN.service) cn = remote.new(uri) cn:ping() cn:close() uri = string.format('%s@%s:%s', 'netbox', LISTEN.host, LISTEN.service) cn = remote.new(uri) cn ~= nil, cn.state, cn.error cn:close() -- don't merge creds from uri & opts remote.new(uri, { password = 'test' }) cn = remote.new(uri, { user = 'netbox', password = 'test' }) cn:ping() cn:close() box.schema.user.revoke('netbox', 'read, write, execute', 'universe'); box.schema.user.drop('netbox') -- #594: bad argument #1 to 'setmetatable' (table expected, got number) test_run:cmd("setopt delimiter ';'") function gh594() local cn = remote.connect(box.cfg.listen) local ping = fiber.create(function() cn:ping() end) cn:call('dostring', {'return 2 + 2'}) cn:close() end; test_run:cmd("setopt delimiter ''"); gh594() -- #636: Reload schema on demand sp = box.schema.space.create('test_old') _ = sp:create_index('primary') sp:insert{1, 2, 3} con = remote.new(box.cfg.listen) con:ping() con.space.test_old:select{} con.space.test == nil sp = box.schema.space.create('test') _ = sp:create_index('primary') sp:insert{2, 3, 4} con.space.test == nil con:reload_schema() con.space.test:select{} box.space.test:drop() box.space.test_old:drop() con:close() name = string.match(arg[0], "([^,]+)%.lua") file_log = require('fio').open(name .. '.log', {'O_RDONLY', 'O_NONBLOCK'}) file_log:seek(0, 'SEEK_END') ~= 0 test_run:cmd("setopt delimiter ';'") _ = fiber.create( function() local conn = require('net.box').new(box.cfg.listen) conn:call('no_such_function', {}) conn:close() end ); test_run:cmd("setopt delimiter ''"); test_run:grep_log("default", "ER_NO_SUCH_PROC") -- gh-983 selecting a lot of data crashes the server or hangs the -- connection -- gh-983 test case: iproto connection selecting a lot of data _ = box.schema.space.create('test', { temporary = true }) _ = box.space.test:create_index('primary', {type = 'TREE', parts = {1,'unsigned'}}) data1k = "aaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhh" for i = 0,10000 do box.space.test:insert{i, data1k} end net = require('net.box') c = net:connect(box.cfg.listen) r = c.space.test:select(nil, {limit=5000}) box.space.test:drop() -- gh-970 gh-971 UPSERT over network _ = box.schema.space.create('test') _ = box.space.test:create_index('primary', {type = 'TREE', parts = {1,'unsigned'}}) _ = box.space.test:create_index('covering', {type = 'TREE', parts = {1,'unsigned',3,'string',2,'unsigned'}}) _ = box.space.test:insert{1, 2, "string"} c = net:connect(box.cfg.listen) c.space.test:select{} c.space.test:upsert({1, 2, 'nothing'}, {{'+', 2, 1}}) -- common update c.space.test:select{} c.space.test:upsert({2, 4, 'something'}, {{'+', 2, 1}}) -- insert c.space.test:select{} c.space.test:upsert({2, 4, 'nothing'}, {{'+', 3, 100500}}) -- wrong operation c.space.test:select{} -- gh-1729 net.box index metadata incompatible with local metadata c.space.test.index.primary.parts c.space.test.index.covering.parts box.space.test:drop() -- CALL vs CALL_16 in connect options function echo(...) return ... end c = net.connect(box.cfg.listen) c:call('echo', {42}) c:eval('return echo(...)', {42}) -- invalid arguments c:call('echo', 42) c:eval('return echo(...)', 42) c:close() c = net.connect(box.cfg.listen, {call_16 = true}) c:call('echo', 42) c:eval('return echo(...)', 42) c:close() -- -- gh-2195 export pure msgpack from net.box -- space = box.schema.space.create('test') _ = box.space.test:create_index('primary') c = net.connect(box.cfg.listen) ibuf = require('buffer').ibuf() c:ping() c.space.test ~= nil c.space.test:replace({1, 'hello'}) -- replace c.space.test:replace({2}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result -- insert c.space.test:insert({3}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result -- update c.space.test:update({3}, {}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result c.space.test.index.primary:update({3}, {}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result -- upsert c.space.test:upsert({4}, {}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result -- delete c.space.test:upsert({4}, {}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result -- select c.space.test.index.primary:select({3}, {iterator = 'LE', buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result -- select len = c.space.test:select({}, {buffer = ibuf}) ibuf.rpos + len == ibuf.wpos result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) ibuf.rpos == ibuf.wpos len result -- call c:call("echo", {1, 2, 3}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result c:call("echo", {}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result c:call("echo", nil, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result -- eval c:eval("echo(...)", {1, 2, 3}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result c:eval("echo(...)", {}, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result c:eval("echo(...)", nil, {buffer = ibuf}) result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) result -- unsupported methods c.space.test:get({1}, { buffer = ibuf}) c.space.test.index.primary:min({}, { buffer = ibuf}) c.space.test.index.primary:max({}, { buffer = ibuf}) c.space.test.index.primary:count({}, { buffer = ibuf}) c.space.test.index.primary:get({1}, { buffer = ibuf}) -- error handling rpos, wpos = ibuf.rpos, ibuf.wpos c.space.test:insert({1}, {buffer = ibuf}) ibuf.rpos == rpos, ibuf.wpos == wpos ibuf = nil c:close() space:drop() -- gh-1904 net.box hangs in :close() if a fiber was cancelled -- while blocked in :_wait_state() in :_request() options = {user = 'netbox', password = 'badpass', wait_connected = false, reconnect_after = 0.01} c = net:new(box.cfg.listen, options) f = fiber.create(function() c:call("") end) fiber.sleep(0.01) f:cancel(); c:close() -- check for on_schema_reload callback test_run:cmd("setopt delimiter ';'") do local a = 0 function osr_cb() a = a + 1 end local con = net.new(box.cfg.listen, { wait_connected = false }) con:on_schema_reload(osr_cb) con:wait_connected() con.space._schema:select{} box.schema.space.create('misisipi') box.space.misisipi:drop() con.space._schema:select{} con:close() con = nil return a end; do local a = 0 function osr_cb() a = a + 1 end local con = net.new(box.cfg.listen, { wait_connected = true }) con:on_schema_reload(osr_cb) con.space._schema:select{} box.schema.space.create('misisipi') box.space.misisipi:drop() con.space._schema:select{} con:close() con = nil return a end; test_run:cmd("setopt delimiter ''"); box.schema.user.revoke('guest', 'read,write,execute', 'universe') -- Tarantool < 1.7.1 compatibility (gh-1533) c = net.new(box.cfg.listen) c:ping() c:close() -- Test for connect_timeout > 0 in netbox connect test_run:cmd("setopt delimiter ';'"); greeting = "Tarantool 1.7.3 (Lua console)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" .. "type 'help' for interactive help~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"; socket = require('socket'); srv = socket.tcp_server('localhost', 3392, { handler = function(fd) local fiber = require('fiber') fiber.sleep(0.1) fd:write(greeting) end }); -- we must get timeout nb = net.new('localhost:3392', { wait_connected = true, console = true, connect_timeout = 0.01 }); nb.error == "Timeout exceeded" or nb.error == "Connection timed out"; nb:close(); -- we must get peer closed nb = net.new('localhost:3392', { wait_connected = true, console = true, connect_timeout = 0.2 }); nb.error ~= "Timeout exceeded"; nb:close(); test_run:cmd("setopt delimiter ''"); srv:close() test_run:cmd("clear filter") -- -- gh-2402 net.box doesn't support space:format() -- space = box.schema.space.create('test', {format={{name="id", type="unsigned"}}}) space ~= nil _ = box.space.test:create_index('primary') box.schema.user.grant('guest','read,write,execute','space', 'test') c = net.connect(box.cfg.listen) c:ping() c.space.test ~= nil format = c.space.test:format() format[1] ~= nil format[1].name == "id" format[1].type == "unsigned" c.space.test:format({}) c:close() space:drop() -- -- Check that it's possible to get connection object form net.box space -- space = box.schema.space.create('test', {format={{name="id", type="unsigned"}}}) space ~= nil _ = box.space.test:create_index('primary') box.schema.user.grant('guest','read,write,execute','space', 'test') c = net.connect(box.cfg.listen) c:ping() c.space.test ~= nil c.space.test.connection == c box.schema.user.revoke('guest','read,write,execute','space', 'test') c:close() -- -- gh-2642: box.session.type() -- box.schema.user.grant('guest','read,write,execute','universe') c = net.connect(box.cfg.listen) c:call("box.session.type") c:close() -- -- On_connect/disconnect triggers. -- test_run:cmd('create server connecter with script = "box/proxy.lua"') test_run:cmd('start server connecter') test_run:cmd("set variable connect_to to 'connecter.listen'") conn = net.connect(connect_to, { reconnect_after = 0.1 }) conn.state connected_cnt = 0 disconnected_cnt = 0 function on_connect() connected_cnt = connected_cnt + 1 end function on_disconnect() disconnected_cnt = disconnected_cnt + 1 end conn:on_connect(on_connect) conn:on_disconnect(on_disconnect) test_run:cmd('stop server connecter') test_run:cmd('start server connecter') while conn.state ~= 'active' do fiber.sleep(0.1) end connected_cnt disconnected_cnt conn:close() disconnected_cnt test_run:cmd('stop server connecter') -- -- gh-2401 update pseudo objects not replace them -- space:drop() space = box.schema.space.create('test') c = net.connect(box.cfg.listen) cspace = c.space.test space.index.test_index == nil cspace.index.test_index == nil _ = space:create_index("test_index", {parts={1, 'string'}}) c:reload_schema() space.index.test_index ~= nil cspace.index.test_index ~= nil c.space.test.index.test_index ~= nil -- cleanup box.schema.user.revoke('guest','read,write,execute','universe') space:drop() -- -- gh-946: long polling CALL blocks input -- box.schema.user.grant('guest', 'execute', 'universe') c = net.connect(box.cfg.listen) N = 100 pad = string.rep('x', 1024) long_call_cond = fiber.cond() long_call_channel = fiber.channel() fast_call_channel = fiber.channel() function fast_call(x) return x end function long_call(x) long_call_cond:wait() return x * 2 end test_run:cmd("setopt delimiter ';'") for i = 1, N do fiber.create(function() fast_call_channel:put(c:call('fast_call', {i, pad})) end) fiber.create(function() long_call_channel:put(c:call('long_call', {i, pad})) end) end test_run:cmd("setopt delimiter ''"); x = 0 for i = 1, N do x = x + fast_call_channel:get() end x long_call_cond:broadcast() x = 0 for i = 1, N do x = x + long_call_channel:get() end x -- Check that a connection does not leak if there is -- a long CALL in progress when it is closed. disconnected = false function on_disconnect() disconnected = true end box.session.on_disconnect(on_disconnect) == on_disconnect ch1 = fiber.channel(1) ch2 = fiber.channel(1) function wait_signal() ch1:put(true) ch2:get() end _ = fiber.create(function() c:call('wait_signal') end) ch1:get() c:close() fiber.sleep(0) disconnected -- false ch2:put(true) while disconnected == false do fiber.sleep(0.01) end disconnected -- true box.session.on_disconnect(nil, on_disconnect) -- -- gh-2666: check that netbox.call is not repeated on schema -- change. -- box.schema.user.grant('guest', 'write', 'space', '_space') box.schema.user.grant('guest', 'write', 'space', '_schema') count = 0 function create_space(name) count = count + 1 box.schema.create_space(name) return true end c = net.connect(box.cfg.listen) c:call('create_space', {'test1'}) count c:call('create_space', {'test2'}) count c:call('create_space', {'test3'}) count box.space.test1:drop() box.space.test2:drop() box.space.test3:drop() box.schema.user.revoke('guest', 'write', 'space', '_space') box.schema.user.revoke('guest', 'write', 'space', '_schema') c:close() -- -- gh-3164: netbox connection is not closed and garbage collected -- ever, if reconnect_after is set. -- test_run:cmd('start server connecter') test_run:cmd("set variable connect_to to 'connecter.listen'") weak = setmetatable({}, {__mode = 'v'}) -- Create strong and weak reference. Weak is valid until strong -- is valid too. strong = net.connect(connect_to, {reconnect_after = 0.1}) weak.c = strong weak.c:ping() test_run:cmd('stop server connecter') test_run:cmd('cleanup server connecter') -- Check the connection tries to reconnect at least two times. -- 'Cannot assign requested address' is the crutch for running the -- tests in a docker. This error emits instead of -- 'Connection refused' inside a docker. old_log_level = box.cfg.log_level box.cfg{log_level = 6} log.info(string.rep('a', 1000)) test_run:cmd("setopt delimiter ';'") while test_run:grep_log('default', 'Connection refused', 1000) == nil and test_run:grep_log('default', 'Cannot assign requested address', 1000) == nil do fiber.sleep(0.1) end; log.info(string.rep('a', 1000)); while test_run:grep_log('default', 'Connection refused', 1000) == nil and test_run:grep_log('default', 'Cannot assign requested address', 1000) == nil do fiber.sleep(0.1) end; test_run:cmd("setopt delimiter ''"); box.cfg{log_level = old_log_level} collectgarbage('collect') strong.state strong == weak.c -- Remove single strong reference. Now connection must be garbage -- collected. strong = nil collectgarbage('collect') -- Now weak.c is null, because it was weak reference, and the -- connection is deleted by 'collect'. weak.c box.schema.user.revoke('guest', 'execute', 'universe') c:close() c = nil -- -- gh-3256 net.box is_nullable and collation options output -- space = box.schema.create_space('test') box.schema.user.grant('guest', 'read,write,execute', 'universe') _ = space:create_index('pk') _ = space:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}}) c = net:connect(box.cfg.listen) c.space.test.index.sk.parts space:drop() space = box.schema.create_space('test') box.internal.collation.create('test', 'ICU', 'ru-RU') _ = space:create_index('sk', { type = 'tree', parts = {{1, 'str', collation = 'test'}}, unique = true }) c:reload_schema() c.space.test.index.sk.parts c:close() box.internal.collation.drop('test') space:drop() box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/box/upsert_errinj.test.lua0000664000000000000000000000045713306560010022331 0ustar rootrootenv = require('test_run') test_run = env.new() s = box.schema.create_space('tweedledum') index = s:create_index('pk') errinj = box.error.injection errinj.set("ERRINJ_TUPLE_ALLOC", true) s:upsert({111, '111', 222, '222'}, {{'!', 5, '!'}}) errinj.set("ERRINJ_TUPLE_ALLOC", false) s:select{111} s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/on_replace.test.lua0000664000000000000000000001431713306565107021561 0ustar rootroottest_run = require('test_run').new() -- test c and lua triggers: must return only lua triggers #box.space._space:on_replace() function f() print('test') end type(box.space._space:on_replace(f)) #box.space._space:on_replace() ts = box.schema.space.create('test_space') ti = ts:create_index('primary', { type = 'hash' }) type(ts.on_replace) ts.on_replace() ts:on_replace() ts:on_replace(123) function fail(old_tuple, new_tuple) error('test') end type(ts:on_replace(fail)) ts:insert{1, 'b', 'c'} ts:get{1} ts:on_replace(nil, fail) ts:insert{1, 'b', 'c'} ts:get{1} function fail(old_tuple, new_tuple) error('abc') end type(ts:on_replace(fail)) ts:insert{2, 'b', 'c'} ts:get{2} o = nil n = nil function save_out(told, tnew) o = told n = tnew end type(ts:on_replace(save_out, fail)) ts:insert{2, 'a', 'b', 'c'} o n ts:replace{2, 'd', 'e', 'f'} o n type(ts:on_replace(function() test = 1 end)) #ts:on_replace() ts:drop() -- test garbage in lua stack #box.space._space:on_replace() function f2() print('test2') end type(box.space._space:on_replace(f2)) #box.space._space:on_replace() -- -- gh-587: crash on attempt to modify space from triggers -- first = box.schema.space.create('first') _= first:create_index('primary') second = box.schema.space.create('second') _ = second:create_index('primary') -- one statement test_run:cmd("setopt delimiter ';'"); trigger_id = first:on_replace(function() second:replace({2, first:get(1)[2] .. " from on_replace trigger"}) end); test_run:cmd("setopt delimiter ''"); first:replace({1, "hello"}) first:select() second:select() first:on_replace(nil, trigger_id) first:delete(1) second:delete(1) -- multiple statements test_run:cmd("setopt delimiter ';'"); trigger_id = first:on_replace(function() second:replace({1}) second:replace({2, first:get(1)[2] .. " in on_replace trigger"}) second:replace({3}) end); test_run:cmd("setopt delimiter ''"); first:replace({1, "multistatement tx"}) first:select() second:select() first:on_replace(nil, trigger_id) first:delete(1) second:delete(1) second:delete(2) second:delete(3) -- rollback on error test_run:cmd("setopt delimiter ';'"); trigger_id = first:on_replace(function() second:replace({1, "discarded"}) second:insert({1}) end); test_run:cmd("setopt delimiter ''"); first:replace({1, "rollback"}) first:select() second:select() first:on_replace(nil, trigger_id) -- max recursion depth RECURSION_LIMIT = 0 test_run:cmd("setopt delimiter ';'"); trigger_id = first:on_replace(function() RECURSION_LIMIT = RECURSION_LIMIT + 1 first:auto_increment({"recursive"}) end); test_run:cmd("setopt delimiter ''"); first:replace({1, "recursive"}) first:select() second:select() RECURSION_LIMIT first:on_replace(nil, trigger_id) -- recursion level = 0 test_run:cmd("setopt delimiter ';'"); trigger_id = first:on_replace(function() level = level + 1 if level >= RECURSION_LIMIT then return end first:auto_increment({"recursive", level}) end); test_run:cmd("setopt delimiter ''"); first:replace({0, "initial"}) first:select() second:select() RECURSION_LIMIT first:on_replace(nil, trigger_id) first:truncate() second:truncate() -- transaction control test_run:cmd("setopt delimiter ';'"); trigger_id = first:on_replace(function() box.commit() first:auto_increment({"recursive", level}) end); test_run:cmd("setopt delimiter ''"); first:replace({0, "initial"}) first:select() second:select() first:on_replace(nil, trigger_id) test_run:cmd("setopt delimiter ';'"); trigger_id = first:on_replace(function() box.rollback() first:auto_increment({"recursive", level}) end); test_run:cmd("setopt delimiter ''"); first:replace({0, "initial"}) first:select() second:select() first:on_replace(nil, trigger_id) test_run:cmd("setopt delimiter ';'"); trigger_id = first:on_replace(function() box.begin() first:auto_increment({"recursive", level}) box.commit() end); test_run:cmd("setopt delimiter ''"); first:replace({0, "initial"}) first:select() second:select() first:on_replace(nil, trigger_id) first:drop() second:drop() s = box.schema.space.create('test_on_repl_ddl') _ = s:create_index('pk') t = s:on_replace(function () box.schema.space.create('some_space') end) s:replace({1, 2}) t = s:on_replace(function () s:create_index('sec') end, t) s:replace({2, 3}) t = s:on_replace(function () box.schema.user.create('newu') end, t) s:replace({3, 4}) t = s:on_replace(function () box.schema.role.create('newr') end, t) s:replace({4, 5}) t = s:on_replace(function () s:drop() end, t) s:replace({5, 6}) t = s:on_replace(function () box.schema.func.create('newf') end, t) s:replace({6, 7}) t = s:on_replace(function () box.schema.user.grant('guest', 'read,write', 'space', 'test_on_repl_ddl') end, t) s:replace({7, 8}) t = s:on_replace(function () s:rename('newname') end, t) s:replace({8, 9}) t = s:on_replace(function () s.index.pk:rename('newname') end, t) s:replace({9, 10}) s:select() s:drop() -- -- gh-3020: sub-statement rollback -- s1 = box.schema.space.create('test1') _ = s1:create_index('pk') s2 = box.schema.space.create('test2') _ = s2:create_index('pk') s3 = box.schema.space.create('test3') _ = s3:create_index('pk') test_run:cmd("setopt delimiter ';'"); x1 = 1; x2 = 1; _ = s1:on_replace(function(old, new) for i = 1, 3 do s2:insert{x1} x1 = x1 + 1 end if new[2] == 'fail' then error('fail') end pcall(s2.insert, s2, {123, 'fail'}) end); _ = s2:on_replace(function(old, new) for i = 1, 3 do s3:insert{x2} x2 = x2 + 1 end if new[2] == 'fail' then error('fail') end end); box.begin() s1:insert{1} pcall(s1.insert, s1, {123, 'fail'}) box.commit(); test_run:cmd("setopt delimiter ''"); s1:select() s2:select() s3:select() s1:drop() s2:drop() s3:drop() -- -- gh-3020: trigger chaining -- s1 = box.schema.space.create('test1') _ = s1:create_index('pk') s2 = box.schema.space.create('test2') _ = s2:create_index('pk') s3 = box.schema.space.create('test3') _ = s3:create_index('pk') x = 1 _ = s1:on_replace(function(old, new) s2:insert(new:update{{'!', 2, x}}) x = x + 1 end) _ = s1:on_replace(function(old, new) s3:insert(new:update{{'!', 2, x}}) x = x + 1 end) box.begin() s1:insert{1} s1:insert{2} s1:insert{3} box.commit() s1:select() s2:select() s3:select() s1:drop() s2:drop() s3:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/hash_multipart.test.lua0000664000000000000000000000321713306560010022457 0ustar rootrootutils = dofile('utils.lua') hash = box.schema.space.create('tweedledum') tmp = hash:create_index('primary', { type = 'hash', parts = {1, 'unsigned', 2, 'string', 3, 'unsigned'}, unique = true }) tmp = hash:create_index('unique', { type = 'hash', parts = {3, 'unsigned', 5, 'unsigned'}, unique = true }) -- insert rows hash:insert{0, 'foo', 0, '', 1} hash:insert{0, 'foo', 1, '', 1} hash:insert{1, 'foo', 0, '', 2} hash:insert{1, 'foo', 1, '', 2} hash:insert{0, 'bar', 0, '', 3} hash:insert{0, 'bar', 1, '', 3} hash:insert{1, 'bar', 0, '', 4} hash:insert{1, 'bar', 1, '', 4} -- try to insert a row with a duplicate key hash:insert{1, 'bar', 1, '', 5} -- output all rows env = require('test_run') test_run = env.new() test_run:cmd("setopt delimiter ';'") function select_all() local result = {} local tuple, v for tuple, v in hash:pairs() do table.insert(result, v) end return result end; test_run:cmd("setopt delimiter ''"); utils.sort(select_all()) select_all = nil -- primary index select hash.index['primary']:get{1, 'foo', 0} hash.index['primary']:get{1, 'bar', 0} -- primary index select with missing part hash.index['primary']:get{1, 'foo'} -- primary index select with extra part hash.index['primary']:get{1, 'foo', 0, 0} -- primary index select with wrong type hash.index['primary']:get{1, 'foo', 'baz'} -- secondary index select hash.index['unique']:get{1, 4} -- secondary index select with no such key hash.index['unique']:get{1, 5} -- secondary index select with missing part hash.index['unique']:get{1} -- secondary index select with wrong type hash.index['unique']:select{1, 'baz'} -- cleanup hash:truncate() hash:len() hash:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/net.box.result0000664000000000000000000011447213306565107020611 0ustar rootrootremote = require 'net.box' --- ... fiber = require 'fiber' --- ... log = require 'log' --- ... msgpack = require 'msgpack' --- ... env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua...\"]:: '") --- - true ... test_run:cmd("setopt delimiter ';'") --- - true ... function x_select(cn, space_id, index_id, iterator, offset, limit, key, opts) return cn:_request('select', opts, space_id, index_id, iterator, offset, limit, key) end function x_fatal(cn) cn._transport.perform_request(nil, nil, 'inject', nil, '\x80') end test_run:cmd("setopt delimiter ''"); --- ... LISTEN = require('uri').parse(box.cfg.listen) --- ... space = box.schema.space.create('net_box_test_space') --- ... index = space:create_index('primary', { type = 'tree' }) --- ... -- low level connection log.info("create connection") --- ... cn = remote.connect(LISTEN.host, LISTEN.service) --- ... log.info("state is %s", cn.state) --- ... cn:ping() --- - true ... log.info("ping is done") --- ... cn:ping() --- - true ... log.info("ping is done") --- ... cn:ping() --- - true ... -- check permissions cn:call('unexists_procedure') --- - error: Execute access to function 'unexists_procedure' is denied for user 'guest' ... function test_foo(a,b,c) return { {{ [a] = 1 }}, {{ [b] = 2 }}, c } end --- ... cn:call('test_foo', {'a', 'b', 'c'}) --- - error: Execute access to function 'test_foo' is denied for user 'guest' ... cn:eval('return 2+2') --- - error: Execute access to universe '' is denied for user 'guest' ... cn:close() --- ... -- connect and call without usage access box.schema.user.grant('guest','execute','universe') --- ... box.schema.user.revoke('guest','usage','universe') --- ... box.session.su("guest") --- ... cn = remote.connect(LISTEN.host, LISTEN.service) --- ... cn:call('test_foo', {'a', 'b', 'c'}) --- - error: Usage access to universe '' is denied for user 'guest' ... box.session.su("admin") --- ... box.schema.user.grant('guest','usage','universe') --- ... cn:close() --- ... cn = remote.connect(box.cfg.listen) --- ... cn:call('unexists_procedure') --- - error: Procedure 'unexists_procedure' is not defined ... cn:call('test_foo', {'a', 'b', 'c'}) --- - [[{'a': 1}], [{'b': 2}], 'c'] ... cn:call(nil, {'a', 'b', 'c'}) --- - error: Procedure 'nil' is not defined ... cn:eval('return 2+2') --- - 4 ... cn:eval('return 1, 2, 3') --- - 1 - 2 - 3 ... cn:eval('return ...', {1, 2, 3}) --- - 1 - 2 - 3 ... cn:eval('return { k = "v1" }, true, { xx = 10, yy = 15 }, nil') --- - {'k': 'v1'} - true - {'yy': 15, 'xx': 10} - null ... cn:eval('return nil') --- - null ... cn:eval('return') --- ... cn:eval('error("exception")') --- - error: 'eval:1: exception' ... cn:eval('box.error(0)') --- - error: Unknown error ... cn:eval('!invalid expression') --- - error: 'eval:1: unexpected symbol near ''!''' ... -- box.commit() missing at return of CALL/EVAL function no_commit() box.begin() fiber.sleep(0.001) end --- ... cn:call('no_commit') --- - error: Transaction is active at return from function ... cn:eval('no_commit()') --- - error: Transaction is active at return from function ... remote.self:eval('return 1+1, 2+2') --- - 2 - 4 ... remote.self:eval('return') --- ... remote.self:eval('error("exception")') --- - error: '[string "error("exception")"]:1: exception' ... remote.self:eval('box.error(0)') --- - error: Unknown error ... remote.self:eval('!invalid expression') --- - error: '[string "return !invalid expression"]:1: unexpected symbol near ''!''' ... -- -- gh-822: net.box.call should roll back local transaction on error -- _ = box.schema.space.create('gh822') --- ... _ = box.space.gh822:create_index('primary') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... -- rollback on invalid function function rollback_on_invalid_function() box.begin() box.space.gh822:insert{1, "netbox_test"} pcall(remote.self.call, remote.self, 'invalid_function') return box.space.gh822:get(1) == nil end; --- ... rollback_on_invalid_function(); --- - true ... -- rollback on call error function test_error() error('Some error') end; --- ... function rollback_on_call_error() box.begin() box.space.gh822:insert{1, "netbox_test"} pcall(remote.self.call, remote.self, 'test_error') return box.space.gh822:get(1) == nil end; --- ... rollback_on_call_error(); --- - true ... -- rollback on eval function rollback_on_eval_error() box.begin() box.space.gh822:insert{1, "netbox_test"} pcall(remote.self.eval, remote.self, "error('Some error')") return box.space.gh822:get(1) == nil end; --- ... rollback_on_eval_error(); --- - true ... test_run:cmd("setopt delimiter ''"); --- - true ... box.space.gh822:drop() --- ... box.schema.user.revoke('guest','execute','universe') --- ... box.schema.user.grant('guest','read,write,execute','universe') --- ... cn:close() --- ... cn = remote.connect(box.cfg.listen) --- ... x_select(cn, space.id, space.index.primary.id, box.index.EQ, 0, 0xFFFFFFFF, 123) --- - [] ... space:insert{123, 345} --- - [123, 345] ... x_select(cn, space.id, space.index.primary.id, box.index.EQ, 0, 0, 123) --- - [] ... x_select(cn, space.id, space.index.primary.id, box.index.EQ, 0, 1, 123) --- - - [123, 345] ... x_select(cn, space.id, space.index.primary.id, box.index.EQ, 1, 1, 123) --- - [] ... cn.space[space.id] ~= nil --- - true ... cn.space.net_box_test_space ~= nil --- - true ... cn.space.net_box_test_space ~= nil --- - true ... cn.space.net_box_test_space.index ~= nil --- - true ... cn.space.net_box_test_space.index.primary ~= nil --- - true ... cn.space.net_box_test_space.index[space.index.primary.id] ~= nil --- - true ... cn.space.net_box_test_space.index.primary:select(123) --- - - [123, 345] ... cn.space.net_box_test_space.index.primary:select(123, { limit = 0 }) --- - [] ... cn.space.net_box_test_space.index.primary:select(nil, { limit = 1, }) --- - - [123, 345] ... cn.space.net_box_test_space:insert{234, 1,2,3} --- - [234, 1, 2, 3] ... cn.space.net_box_test_space:insert{234, 1,2,3} --- - error: Duplicate key exists in unique index 'primary' in space 'net_box_test_space' ... cn.space.net_box_test_space.insert{234, 1,2,3} --- - error: 'builtin/box/schema.lua..."]:: Use space:insert(...) instead of space.insert(...)' ... cn.space.net_box_test_space:replace{354, 1,2,3} --- - [354, 1, 2, 3] ... cn.space.net_box_test_space:replace{354, 1,2,4} --- - [354, 1, 2, 4] ... cn.space.net_box_test_space:select{123} --- - - [123, 345] ... space:select({123}, { iterator = 'GE' }) --- - - [123, 345] - [234, 1, 2, 3] - [354, 1, 2, 4] ... cn.space.net_box_test_space:select({123}, { iterator = 'GE' }) --- - - [123, 345] - [234, 1, 2, 3] - [354, 1, 2, 4] ... cn.space.net_box_test_space:select({123}, { iterator = 'GT' }) --- - - [234, 1, 2, 3] - [354, 1, 2, 4] ... cn.space.net_box_test_space:select({123}, { iterator = 'GT', limit = 1 }) --- - - [234, 1, 2, 3] ... cn.space.net_box_test_space:select({123}, { iterator = 'GT', limit = 1, offset = 1 }) --- - - [354, 1, 2, 4] ... cn.space.net_box_test_space:select{123} --- - - [123, 345] ... cn.space.net_box_test_space:update({123}, { { '+', 2, 1 } }) --- - [123, 346] ... cn.space.net_box_test_space:update(123, { { '+', 2, 1 } }) --- - [123, 347] ... cn.space.net_box_test_space:select{123} --- - - [123, 347] ... cn.space.net_box_test_space:insert(cn.space.net_box_test_space:get{123}:update{ { '=', 1, 2 } }) --- - [2, 347] ... cn.space.net_box_test_space:delete{123} --- - [123, 347] ... cn.space.net_box_test_space:select{2} --- - - [2, 347] ... cn.space.net_box_test_space:select({234}, { iterator = 'LT' }) --- - - [2, 347] ... cn.space.net_box_test_space:update({1}, { { '+', 2, 2 } }) --- ... cn.space.net_box_test_space:delete{1} --- ... cn.space.net_box_test_space:delete{2} --- - [2, 347] ... cn.space.net_box_test_space:delete{2} --- ... -- test one-based indexing in splice operation (see update.test.lua) cn.space.net_box_test_space:replace({10, 'abcde'}) --- - [10, 'abcde'] ... cn.space.net_box_test_space:update(10, {{':', 2, 0, 0, '!'}}) --- - error: 'SPLICE error on field 2: offset is out of bound' ... cn.space.net_box_test_space:update(10, {{':', 2, 1, 0, '('}}) --- - [10, '(abcde'] ... cn.space.net_box_test_space:update(10, {{':', 2, 2, 0, '({'}}) --- - [10, '(({abcde'] ... cn.space.net_box_test_space:update(10, {{':', 2, -1, 0, ')'}}) --- - [10, '(({abcde)'] ... cn.space.net_box_test_space:update(10, {{':', 2, -2, 0, '})'}}) --- - [10, '(({abcde}))'] ... cn.space.net_box_test_space:delete{10} --- - [10, '(({abcde}))'] ... cn.space.net_box_test_space:select({}, { iterator = 'ALL' }) --- - - [234, 1, 2, 3] - [354, 1, 2, 4] ... -- gh-841: net.box uses incorrect iterator type for select with no arguments cn.space.net_box_test_space:select() --- - - [234, 1, 2, 3] - [354, 1, 2, 4] ... cn.space.net_box_test_space.index.primary:min() --- - [234, 1, 2, 3] ... cn.space.net_box_test_space.index.primary:min(354) --- - [354, 1, 2, 4] ... cn.space.net_box_test_space.index.primary:max() --- - [354, 1, 2, 4] ... cn.space.net_box_test_space.index.primary:max(234) --- - [234, 1, 2, 3] ... cn.space.net_box_test_space.index.primary:count() --- - 2 ... cn.space.net_box_test_space.index.primary:count(354) --- - 1 ... cn.space.net_box_test_space:get(354) --- - [354, 1, 2, 4] ... -- reconnects after errors -- -- 1. no reconnect x_fatal(cn) --- ... cn.state --- - error ... cn:ping() --- - false ... cn:call('test_foo') --- - error: Peer closed ... cn:wait_state('active') --- - false ... -- -- 2 reconnect cn = remote.connect(LISTEN.host, LISTEN.service, { reconnect_after = .1 }) --- ... cn.space ~= nil --- - true ... cn.space.net_box_test_space:select({}, { iterator = 'ALL' }) --- - - [234, 1, 2, 3] - [354, 1, 2, 4] ... x_fatal(cn) --- ... cn:wait_connected() --- - true ... cn:wait_state('active') --- - true ... cn:wait_state({active=true}) --- - true ... cn:ping() --- - true ... cn.state --- - active ... cn.space.net_box_test_space:select({}, { iterator = 'ALL' }) --- - - [234, 1, 2, 3] - [354, 1, 2, 4] ... x_fatal(cn) --- ... x_select(cn, space.id, 0, box.index.ALL, 0, 0xFFFFFFFF, {}) --- - - [234, 1, 2, 3] - [354, 1, 2, 4] ... cn.state --- - active ... cn:ping() --- - true ... -- -- dot-new-method cn1 = remote.new(LISTEN.host, LISTEN.service) --- ... x_select(cn1, space.id, 0, box.index.ALL, 0, 0xFFFFFFF, {}) --- - - [234, 1, 2, 3] - [354, 1, 2, 4] ... cn1:close() --- ... -- -- error while waiting for response type(fiber.create(function() fiber.sleep(.5) x_fatal(cn) end)) --- - userdata ... function pause() fiber.sleep(10) return true end --- ... cn:call('pause') --- - error: Peer closed ... cn:call('test_foo', {'a', 'b', 'c'}) --- - [[{'a': 1}], [{'b': 2}], 'c'] ... -- call remote.self:call('test_foo', {'a', 'b', 'c'}) --- - - - a: 1 - - b: 2 - c ... cn:call('test_foo', {'a', 'b', 'c'}) --- - [[{'a': 1}], [{'b': 2}], 'c'] ... -- long replies function long_rep() return { 1, string.rep('a', 5000) } end --- ... res = cn:call('long_rep') --- ... res[1] == 1 --- - true ... res[2] == string.rep('a', 5000) --- - true ... function long_rep() return { 1, string.rep('a', 50000) } end --- ... res = cn:call('long_rep') --- ... res[1] == 1 --- - true ... res[2] == string.rep('a', 50000) --- - true ... -- a.b.c.d u = '84F7BCFA-079C-46CC-98B4-F0C821BE833E' --- ... X = {} --- ... X.X = X --- ... function X.fn(x,y) return y or x end --- ... cn:call('X.fn', {u}) --- - 84F7BCFA-079C-46CC-98B4-F0C821BE833E ... cn:call('X.X.X.X.X.X.X.fn', {u}) --- - 84F7BCFA-079C-46CC-98B4-F0C821BE833E ... cn:call('X.X.X.X:fn', {u}) --- - 84F7BCFA-079C-46CC-98B4-F0C821BE833E ... -- auth cn = remote.connect(LISTEN.host, LISTEN.service, { user = 'netbox', password = '123', wait_connected = true }) --- ... cn:is_connected() --- - false ... cn.error --- - User 'netbox' is not found ... cn.state --- - error ... box.schema.user.create('netbox', { password = 'test' }) --- ... box.schema.user.grant('netbox', 'read, write, execute', 'universe'); --- ... cn = remote.connect(LISTEN.host, LISTEN.service, { user = 'netbox', password = 'test' }) --- ... cn.state --- - active ... cn.error --- - null ... cn:ping() --- - true ... function ret_after(to) fiber.sleep(to) return {{to}} end --- ... cn:ping({timeout = 1.00}) --- - true ... cn:ping({timeout = 1e-9}) --- - false ... cn:ping() --- - true ... remote_space = cn.space.net_box_test_space --- ... remote_pk = remote_space.index.primary --- ... remote_space:insert({0}, { timeout = 1.00 }) --- - [0] ... remote_space:insert({1}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_space:insert({2}) --- - [2] ... remote_space:replace({0}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_space:replace({1}) --- - [1] ... remote_space:replace({2}, { timeout = 1.00 }) --- - [2] ... remote_space:upsert({3}, {}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_space:upsert({4}, {}) --- ... remote_space:upsert({5}, {}, { timeout = 1.00 }) --- ... remote_space:upsert({3}, {}) --- ... remote_space:update({3}, {}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_space:update({4}, {}) --- - [4] ... remote_space:update({5}, {}, { timeout = 1.00 }) --- - [5] ... remote_space:update({3}, {}) --- - [3] ... remote_pk:update({5}, {}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_pk:update({4}, {}) --- - [4] ... remote_pk:update({3}, {}, { timeout = 1.00 }) --- - [3] ... remote_pk:update({5}, {}) --- - [5] ... remote_space:get({0}) --- - [0] ... remote_space:get({1}, { timeout = 1.00 }) --- - [1] ... remote_space:get({2}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_pk:get({3}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_pk:get({4}) --- - [4] ... remote_pk:get({5}, { timeout = 1.00 }) --- - [5] ... remote_space:select({2}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_space:select({2}, { timeout = 1.00 }) --- - - [2] ... remote_space:select({2}) --- - - [2] ... remote_pk:select({2}, { timeout = 1.00 }) --- - - [2] ... remote_pk:select({2}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_pk:select({2}) --- - - [2] ... remote_space:select({5}, { timeout = 1.00, iterator = 'LE', limit = 5 }) --- - - [5] - [4] - [3] - [2] - [1] ... remote_space:select({5}, { iterator = 'LE', limit = 5}) --- - - [5] - [4] - [3] - [2] - [1] ... remote_space:select({5}, { timeout = 1e-9, iterator = 'LE', limit = 5 }) --- - error: Timeout exceeded ... remote_pk:select({2}, { timeout = 1.00, iterator = 'LE', limit = 5 }) --- - - [2] - [1] - [0] ... remote_pk:select({2}, { iterator = 'LE', limit = 5}) --- - - [2] - [1] - [0] ... remote_pk:select({2}, { timeout = 1e-9, iterator = 'LE', limit = 5 }) --- - error: Timeout exceeded ... remote_pk:count({2}, { timeout = 1.00}) --- - 1 ... remote_pk:count({2}, { timeout = 1e-9}) --- - error: Timeout exceeded ... remote_pk:count({2}) --- - 1 ... remote_pk:count({2}, { timeout = 1.00, iterator = 'LE' }) --- - 1 ... remote_pk:count({2}, { iterator = 'LE'}) --- - 1 ... remote_pk:count({2}, { timeout = 1e-9, iterator = 'LE' }) --- - error: Timeout exceeded ... remote_pk:min(nil, { timeout = 1.00 }) --- - [0] ... remote_pk:min(nil, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_pk:min(nil) --- - [0] ... remote_pk:min({0}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_pk:min({1}) --- - [1] ... remote_pk:min({2}, { timeout = 1.00 }) --- - [2] ... remote_pk:max(nil) --- - [354, 1, 2, 4] ... remote_pk:max(nil, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_pk:max(nil, { timeout = 1.00 }) --- - [354, 1, 2, 4] ... remote_pk:max({0}, { timeout = 1.00 }) --- - [0] ... remote_pk:max({1}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... remote_pk:max({2}) --- - [2] ... _ = remote_space:delete({0}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... _ = remote_pk:delete({0}, { timeout = 1.00 }) --- ... _ = remote_space:delete({1}, { timeout = 1.00 }) --- ... _ = remote_pk:delete({1}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... _ = remote_space:delete({2}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... _ = remote_pk:delete({2}) --- ... _ = remote_pk:delete({3}) --- ... _ = remote_pk:delete({4}) --- ... _ = remote_pk:delete({5}) --- ... remote_space:get(0) --- ... remote_space:get(1) --- ... remote_space:get(2) --- ... remote_space = nil --- ... cn:call('ret_after', {0.01}, { timeout = 1.00 }) --- - [[0.01]] ... cn:call('ret_after', {1.00}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... cn:eval('return ret_after(...)', {0.01}, { timeout = 1.00 }) --- - [[0.01]] ... cn:eval('return ret_after(...)', {1.00}, { timeout = 1e-9 }) --- - error: Timeout exceeded ... -- -- :timeout() -- @deprecated since 1.7.4 -- cn:timeout(1).space.net_box_test_space.index.primary:select{234} --- - - [234, 1, 2, 3] ... cn:call('ret_after', {.01}) --- - [[0.01]] ... cn:timeout(1):call('ret_after', {.01}) --- - [[0.01]] ... cn:timeout(.01):call('ret_after', {1}) --- - error: Timeout exceeded ... cn = remote:timeout(0.0000000001):connect(LISTEN.host, LISTEN.service, { user = 'netbox', password = '123' }) --- ... cn:close() --- ... cn = remote:timeout(1):connect(LISTEN.host, LISTEN.service, { user = 'netbox', password = '123' }) --- ... remote.self:ping() --- - true ... remote.self.space.net_box_test_space:select{234} --- - - [234, 1, 2, 3] ... remote.self:timeout(123).space.net_box_test_space:select{234} --- - - [234, 1, 2, 3] ... remote.self:is_connected() --- - true ... remote.self:wait_connected() --- - true ... cn:close() --- ... -- cleanup database after tests space:drop() --- ... -- #1545 empty password cn = remote.connect(LISTEN.host, LISTEN.service, { user = 'test' }) --- ... cn ~= nil --- - true ... cn:close() --- ... cn = remote.connect(LISTEN.host, LISTEN.service, { password = 'test' }) --- - error: 'net.box: user is not defined' ... cn ~= nil --- - true ... cn:close() --- ... -- #544 usage for remote[point]method cn = remote.connect(LISTEN.host, LISTEN.service) --- ... cn:eval('return true') --- - true ... cn.eval('return true') --- - error: 'Use remote:eval(...) instead of remote.eval(...):' ... cn.ping() --- - error: 'Use remote:ping(...) instead of remote.ping(...):' ... cn:close() --- ... remote.self:eval('return true') --- - true ... remote.self.eval('return true') --- - error: 'Use remote:eval(...) instead of remote.eval(...):' ... -- uri as the first argument uri = string.format('%s:%s@%s:%s', 'netbox', 'test', LISTEN.host, LISTEN.service) --- ... cn = remote.new(uri) --- ... cn:ping() --- - true ... cn:close() --- ... uri = string.format('%s@%s:%s', 'netbox', LISTEN.host, LISTEN.service) --- ... cn = remote.new(uri) --- ... cn ~= nil, cn.state, cn.error --- - true - error - Incorrect password supplied for user 'netbox' ... cn:close() --- ... -- don't merge creds from uri & opts remote.new(uri, { password = 'test' }) --- - error: 'net.box: user is not defined' ... cn = remote.new(uri, { user = 'netbox', password = 'test' }) --- ... cn:ping() --- - true ... cn:close() --- ... box.schema.user.revoke('netbox', 'read, write, execute', 'universe'); --- ... box.schema.user.drop('netbox') --- ... -- #594: bad argument #1 to 'setmetatable' (table expected, got number) test_run:cmd("setopt delimiter ';'") --- - true ... function gh594() local cn = remote.connect(box.cfg.listen) local ping = fiber.create(function() cn:ping() end) cn:call('dostring', {'return 2 + 2'}) cn:close() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... gh594() --- ... -- #636: Reload schema on demand sp = box.schema.space.create('test_old') --- ... _ = sp:create_index('primary') --- ... sp:insert{1, 2, 3} --- - [1, 2, 3] ... con = remote.new(box.cfg.listen) --- ... con:ping() --- - true ... con.space.test_old:select{} --- - - [1, 2, 3] ... con.space.test == nil --- - true ... sp = box.schema.space.create('test') --- ... _ = sp:create_index('primary') --- ... sp:insert{2, 3, 4} --- - [2, 3, 4] ... con.space.test == nil --- - true ... con:reload_schema() --- ... con.space.test:select{} --- - - [2, 3, 4] ... box.space.test:drop() --- ... box.space.test_old:drop() --- ... con:close() --- ... name = string.match(arg[0], "([^,]+)%.lua") --- ... file_log = require('fio').open(name .. '.log', {'O_RDONLY', 'O_NONBLOCK'}) --- ... file_log:seek(0, 'SEEK_END') ~= 0 --- - true ... test_run:cmd("setopt delimiter ';'") --- - true ... _ = fiber.create( function() local conn = require('net.box').new(box.cfg.listen) conn:call('no_such_function', {}) conn:close() end ); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... test_run:grep_log("default", "ER_NO_SUCH_PROC") --- - ER_NO_SUCH_PROC ... -- gh-983 selecting a lot of data crashes the server or hangs the -- connection -- gh-983 test case: iproto connection selecting a lot of data _ = box.schema.space.create('test', { temporary = true }) --- ... _ = box.space.test:create_index('primary', {type = 'TREE', parts = {1,'unsigned'}}) --- ... data1k = "aaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhhaaaabbbbccccddddeeeeffffgggghhhh" --- ... for i = 0,10000 do box.space.test:insert{i, data1k} end --- ... net = require('net.box') --- ... c = net:connect(box.cfg.listen) --- ... r = c.space.test:select(nil, {limit=5000}) --- ... box.space.test:drop() --- ... -- gh-970 gh-971 UPSERT over network _ = box.schema.space.create('test') --- ... _ = box.space.test:create_index('primary', {type = 'TREE', parts = {1,'unsigned'}}) --- ... _ = box.space.test:create_index('covering', {type = 'TREE', parts = {1,'unsigned',3,'string',2,'unsigned'}}) --- ... _ = box.space.test:insert{1, 2, "string"} --- ... c = net:connect(box.cfg.listen) --- ... c.space.test:select{} --- - - [1, 2, 'string'] ... c.space.test:upsert({1, 2, 'nothing'}, {{'+', 2, 1}}) -- common update --- ... c.space.test:select{} --- - - [1, 3, 'string'] ... c.space.test:upsert({2, 4, 'something'}, {{'+', 2, 1}}) -- insert --- ... c.space.test:select{} --- - - [1, 3, 'string'] - [2, 4, 'something'] ... c.space.test:upsert({2, 4, 'nothing'}, {{'+', 3, 100500}}) -- wrong operation --- ... c.space.test:select{} --- - - [1, 3, 'string'] - [2, 4, 'something'] ... -- gh-1729 net.box index metadata incompatible with local metadata c.space.test.index.primary.parts --- - - type: unsigned is_nullable: false fieldno: 1 ... c.space.test.index.covering.parts --- - - type: unsigned is_nullable: false fieldno: 1 - type: string is_nullable: false fieldno: 3 - type: unsigned is_nullable: false fieldno: 2 ... box.space.test:drop() --- ... -- CALL vs CALL_16 in connect options function echo(...) return ... end --- ... c = net.connect(box.cfg.listen) --- ... c:call('echo', {42}) --- - 42 ... c:eval('return echo(...)', {42}) --- - 42 ... -- invalid arguments c:call('echo', 42) --- - error: 'builtin/box/net_box.lua..."]:: Use remote:call(func_name, {arg1, arg2, ...}, opts) instead of remote:call(func_name, arg1, arg2, ...)' ... c:eval('return echo(...)', 42) --- - error: 'builtin/box/net_box.lua..."]:: Use remote:eval(expression, {arg1, arg2, ...}, opts) instead of remote:eval(expression, arg1, arg2, ...)' ... c:close() --- ... c = net.connect(box.cfg.listen, {call_16 = true}) --- ... c:call('echo', 42) --- - - [42] ... c:eval('return echo(...)', 42) --- - 42 ... c:close() --- ... -- -- gh-2195 export pure msgpack from net.box -- space = box.schema.space.create('test') --- ... _ = box.space.test:create_index('primary') --- ... c = net.connect(box.cfg.listen) --- ... ibuf = require('buffer').ibuf() --- ... c:ping() --- - true ... c.space.test ~= nil --- - true ... c.space.test:replace({1, 'hello'}) --- - [1, 'hello'] ... -- replace c.space.test:replace({2}, {buffer = ibuf}) --- - 9 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: [[2]]} ... -- insert c.space.test:insert({3}, {buffer = ibuf}) --- - 9 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: [[3]]} ... -- update c.space.test:update({3}, {}, {buffer = ibuf}) --- - 9 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: [[3]]} ... c.space.test.index.primary:update({3}, {}, {buffer = ibuf}) --- - 9 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: [[3]]} ... -- upsert c.space.test:upsert({4}, {}, {buffer = ibuf}) --- ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: []} ... -- delete c.space.test:upsert({4}, {}, {buffer = ibuf}) --- ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: []} ... -- select c.space.test.index.primary:select({3}, {iterator = 'LE', buffer = ibuf}) --- - 19 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: [[3], [2], [1, 'hello']]} ... -- select len = c.space.test:select({}, {buffer = ibuf}) --- ... ibuf.rpos + len == ibuf.wpos --- - true ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... ibuf.rpos == ibuf.wpos --- - true ... len --- - 21 ... result --- - {48: [[1, 'hello'], [2], [3], [4]]} ... -- call c:call("echo", {1, 2, 3}, {buffer = ibuf}) --- - 10 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: [1, 2, 3]} ... c:call("echo", {}, {buffer = ibuf}) --- - 7 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: []} ... c:call("echo", nil, {buffer = ibuf}) --- - 7 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: []} ... -- eval c:eval("echo(...)", {1, 2, 3}, {buffer = ibuf}) --- - 7 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: []} ... c:eval("echo(...)", {}, {buffer = ibuf}) --- - 7 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: []} ... c:eval("echo(...)", nil, {buffer = ibuf}) --- - 7 ... result, ibuf.rpos = msgpack.decode_unchecked(ibuf.rpos) --- ... result --- - {48: []} ... -- unsupported methods c.space.test:get({1}, { buffer = ibuf}) --- - error: 'builtin/box/net_box.lua..."]:: index:get() doesn''t support `buffer` argument' ... c.space.test.index.primary:min({}, { buffer = ibuf}) --- - error: 'builtin/box/net_box.lua..."]:: index:min() doesn''t support `buffer` argument' ... c.space.test.index.primary:max({}, { buffer = ibuf}) --- - error: 'builtin/box/net_box.lua..."]:: index:max() doesn''t support `buffer` argument' ... c.space.test.index.primary:count({}, { buffer = ibuf}) --- - error: 'builtin/box/net_box.lua..."]:: index:count() doesn''t support `buffer` argument' ... c.space.test.index.primary:get({1}, { buffer = ibuf}) --- - error: 'builtin/box/net_box.lua..."]:: index:get() doesn''t support `buffer` argument' ... -- error handling rpos, wpos = ibuf.rpos, ibuf.wpos --- ... c.space.test:insert({1}, {buffer = ibuf}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... ibuf.rpos == rpos, ibuf.wpos == wpos --- - true - true ... ibuf = nil --- ... c:close() --- ... space:drop() --- ... -- gh-1904 net.box hangs in :close() if a fiber was cancelled -- while blocked in :_wait_state() in :_request() options = {user = 'netbox', password = 'badpass', wait_connected = false, reconnect_after = 0.01} --- ... c = net:new(box.cfg.listen, options) --- ... f = fiber.create(function() c:call("") end) --- ... fiber.sleep(0.01) --- ... f:cancel(); c:close() --- ... -- check for on_schema_reload callback test_run:cmd("setopt delimiter ';'") --- - true ... do local a = 0 function osr_cb() a = a + 1 end local con = net.new(box.cfg.listen, { wait_connected = false }) con:on_schema_reload(osr_cb) con:wait_connected() con.space._schema:select{} box.schema.space.create('misisipi') box.space.misisipi:drop() con.space._schema:select{} con:close() con = nil return a end; --- - 2 ... do local a = 0 function osr_cb() a = a + 1 end local con = net.new(box.cfg.listen, { wait_connected = true }) con:on_schema_reload(osr_cb) con.space._schema:select{} box.schema.space.create('misisipi') box.space.misisipi:drop() con.space._schema:select{} con:close() con = nil return a end; --- - 1 ... test_run:cmd("setopt delimiter ''"); --- - true ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... -- Tarantool < 1.7.1 compatibility (gh-1533) c = net.new(box.cfg.listen) --- ... c:ping() --- - true ... c:close() --- ... -- Test for connect_timeout > 0 in netbox connect test_run:cmd("setopt delimiter ';'"); --- - true ... greeting = "Tarantool 1.7.3 (Lua console)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" .. "type 'help' for interactive help~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"; --- ... socket = require('socket'); --- ... srv = socket.tcp_server('localhost', 3392, { handler = function(fd) local fiber = require('fiber') fiber.sleep(0.1) fd:write(greeting) end }); --- ... -- we must get timeout nb = net.new('localhost:3392', { wait_connected = true, console = true, connect_timeout = 0.01 }); --- ... nb.error == "Timeout exceeded" or nb.error == "Connection timed out"; --- - true ... nb:close(); --- ... -- we must get peer closed nb = net.new('localhost:3392', { wait_connected = true, console = true, connect_timeout = 0.2 }); --- ... nb.error ~= "Timeout exceeded"; --- - true ... nb:close(); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... srv:close() --- - true ... test_run:cmd("clear filter") --- - true ... -- -- gh-2402 net.box doesn't support space:format() -- space = box.schema.space.create('test', {format={{name="id", type="unsigned"}}}) --- ... space ~= nil --- - true ... _ = box.space.test:create_index('primary') --- ... box.schema.user.grant('guest','read,write,execute','space', 'test') --- ... c = net.connect(box.cfg.listen) --- ... c:ping() --- - true ... c.space.test ~= nil --- - true ... format = c.space.test:format() --- ... format[1] ~= nil --- - true ... format[1].name == "id" --- - true ... format[1].type == "unsigned" --- - true ... c.space.test:format({}) --- - error: net.box does not support setting space format ... c:close() --- ... space:drop() --- ... -- -- Check that it's possible to get connection object form net.box space -- space = box.schema.space.create('test', {format={{name="id", type="unsigned"}}}) --- ... space ~= nil --- - true ... _ = box.space.test:create_index('primary') --- ... box.schema.user.grant('guest','read,write,execute','space', 'test') --- ... c = net.connect(box.cfg.listen) --- ... c:ping() --- - true ... c.space.test ~= nil --- - true ... c.space.test.connection == c --- - true ... box.schema.user.revoke('guest','read,write,execute','space', 'test') --- ... c:close() --- ... -- -- gh-2642: box.session.type() -- box.schema.user.grant('guest','read,write,execute','universe') --- ... c = net.connect(box.cfg.listen) --- ... c:call("box.session.type") --- - binary ... c:close() --- ... -- -- On_connect/disconnect triggers. -- test_run:cmd('create server connecter with script = "box/proxy.lua"') --- - true ... test_run:cmd('start server connecter') --- - true ... test_run:cmd("set variable connect_to to 'connecter.listen'") --- - true ... conn = net.connect(connect_to, { reconnect_after = 0.1 }) --- ... conn.state --- - active ... connected_cnt = 0 --- ... disconnected_cnt = 0 --- ... function on_connect() connected_cnt = connected_cnt + 1 end --- ... function on_disconnect() disconnected_cnt = disconnected_cnt + 1 end --- ... conn:on_connect(on_connect) --- ... conn:on_disconnect(on_disconnect) --- ... test_run:cmd('stop server connecter') --- - true ... test_run:cmd('start server connecter') --- - true ... while conn.state ~= 'active' do fiber.sleep(0.1) end --- ... connected_cnt --- - 1 ... disconnected_cnt --- - 1 ... conn:close() --- ... disconnected_cnt --- - 2 ... test_run:cmd('stop server connecter') --- - true ... -- -- gh-2401 update pseudo objects not replace them -- space:drop() --- ... space = box.schema.space.create('test') --- ... c = net.connect(box.cfg.listen) --- ... cspace = c.space.test --- ... space.index.test_index == nil --- - true ... cspace.index.test_index == nil --- - true ... _ = space:create_index("test_index", {parts={1, 'string'}}) --- ... c:reload_schema() --- ... space.index.test_index ~= nil --- - true ... cspace.index.test_index ~= nil --- - true ... c.space.test.index.test_index ~= nil --- - true ... -- cleanup box.schema.user.revoke('guest','read,write,execute','universe') --- ... space:drop() --- ... -- -- gh-946: long polling CALL blocks input -- box.schema.user.grant('guest', 'execute', 'universe') --- ... c = net.connect(box.cfg.listen) --- ... N = 100 --- ... pad = string.rep('x', 1024) --- ... long_call_cond = fiber.cond() --- ... long_call_channel = fiber.channel() --- ... fast_call_channel = fiber.channel() --- ... function fast_call(x) return x end --- ... function long_call(x) long_call_cond:wait() return x * 2 end --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, N do fiber.create(function() fast_call_channel:put(c:call('fast_call', {i, pad})) end) fiber.create(function() long_call_channel:put(c:call('long_call', {i, pad})) end) end test_run:cmd("setopt delimiter ''"); --- ... x = 0 --- ... for i = 1, N do x = x + fast_call_channel:get() end --- ... x --- - 5050 ... long_call_cond:broadcast() --- ... x = 0 --- ... for i = 1, N do x = x + long_call_channel:get() end --- ... x --- - 10100 ... -- Check that a connection does not leak if there is -- a long CALL in progress when it is closed. disconnected = false --- ... function on_disconnect() disconnected = true end --- ... box.session.on_disconnect(on_disconnect) == on_disconnect --- - true ... ch1 = fiber.channel(1) --- ... ch2 = fiber.channel(1) --- ... function wait_signal() ch1:put(true) ch2:get() end --- ... _ = fiber.create(function() c:call('wait_signal') end) --- ... ch1:get() --- - true ... c:close() --- ... fiber.sleep(0) --- ... disconnected -- false --- - false ... ch2:put(true) --- - true ... while disconnected == false do fiber.sleep(0.01) end --- ... disconnected -- true --- - true ... box.session.on_disconnect(nil, on_disconnect) --- ... -- -- gh-2666: check that netbox.call is not repeated on schema -- change. -- box.schema.user.grant('guest', 'write', 'space', '_space') --- ... box.schema.user.grant('guest', 'write', 'space', '_schema') --- ... count = 0 --- ... function create_space(name) count = count + 1 box.schema.create_space(name) return true end --- ... c = net.connect(box.cfg.listen) --- ... c:call('create_space', {'test1'}) --- - true ... count --- - 1 ... c:call('create_space', {'test2'}) --- - true ... count --- - 2 ... c:call('create_space', {'test3'}) --- - true ... count --- - 3 ... box.space.test1:drop() --- ... box.space.test2:drop() --- ... box.space.test3:drop() --- ... box.schema.user.revoke('guest', 'write', 'space', '_space') --- ... box.schema.user.revoke('guest', 'write', 'space', '_schema') --- ... c:close() --- ... -- -- gh-3164: netbox connection is not closed and garbage collected -- ever, if reconnect_after is set. -- test_run:cmd('start server connecter') --- - true ... test_run:cmd("set variable connect_to to 'connecter.listen'") --- - true ... weak = setmetatable({}, {__mode = 'v'}) --- ... -- Create strong and weak reference. Weak is valid until strong -- is valid too. strong = net.connect(connect_to, {reconnect_after = 0.1}) --- ... weak.c = strong --- ... weak.c:ping() --- - true ... test_run:cmd('stop server connecter') --- - true ... test_run:cmd('cleanup server connecter') --- - true ... -- Check the connection tries to reconnect at least two times. -- 'Cannot assign requested address' is the crutch for running the -- tests in a docker. This error emits instead of -- 'Connection refused' inside a docker. old_log_level = box.cfg.log_level --- ... box.cfg{log_level = 6} --- ... log.info(string.rep('a', 1000)) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... while test_run:grep_log('default', 'Connection refused', 1000) == nil and test_run:grep_log('default', 'Cannot assign requested address', 1000) == nil do fiber.sleep(0.1) end; --- ... log.info(string.rep('a', 1000)); --- ... while test_run:grep_log('default', 'Connection refused', 1000) == nil and test_run:grep_log('default', 'Cannot assign requested address', 1000) == nil do fiber.sleep(0.1) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... box.cfg{log_level = old_log_level} --- ... collectgarbage('collect') --- - 0 ... strong.state --- - error_reconnect ... strong == weak.c --- - true ... -- Remove single strong reference. Now connection must be garbage -- collected. strong = nil --- ... collectgarbage('collect') --- - 0 ... -- Now weak.c is null, because it was weak reference, and the -- connection is deleted by 'collect'. weak.c --- - null ... box.schema.user.revoke('guest', 'execute', 'universe') --- ... c:close() --- ... c = nil --- ... -- -- gh-3256 net.box is_nullable and collation options output -- space = box.schema.create_space('test') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... _ = space:create_index('pk') --- ... _ = space:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}}) --- ... c = net:connect(box.cfg.listen) --- ... c.space.test.index.sk.parts --- - - type: unsigned is_nullable: true fieldno: 2 ... space:drop() --- ... space = box.schema.create_space('test') --- ... box.internal.collation.create('test', 'ICU', 'ru-RU') --- ... _ = space:create_index('sk', { type = 'tree', parts = {{1, 'str', collation = 'test'}}, unique = true }) --- ... c:reload_schema() --- ... c.space.test.index.sk.parts --- - - type: string is_nullable: false collation_id: 3 fieldno: 1 ... c:close() --- ... box.internal.collation.drop('test') --- ... space:drop() --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_misc.test.lua0000664000000000000000000002021413306565107021577 0ustar rootrootenv = require('test_run') test_run = env.new() s = box.schema.space.create('spatial') -- rtree index as primary key must be forbidden (unique) i = s:create_index('spatial', { type = 'rtree', unique = true, parts = {1, 'array'}}) -- any non-unique index as primary key must be forbidden i = s:create_index('spatial', { type = 'hash', unique = false, parts = {1, 'unsigned'}}) i = s:create_index('spatial', { type = 'tree', unique = false, parts = {1, 'unsigned'}}) i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {1, 'array'}}) -- tree and hash indexes over array field is not possible i = s:create_index('primary', { type = 'tree', parts = {1, 'array'}}) i = s:create_index('primary', { type = 'hash', parts = {1, 'array'}}) -- normal indexes i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) i = s:create_index('secondary', { type = 'hash', parts = {2, 'unsigned'}}) -- adding a tuple with array instead of num will fail i = s:insert{{1, 2, 3}, 4} i = s:insert{1, {2, 3, 4}} -- rtree index must be one-part i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {1, 'array', 2, 'array'}}) -- unique rtree index is not possible i = s:create_index('spatial', { type = 'rtree', unique = true, parts = {3, 'array'}}) -- num rtree index is not possible i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'unsigned'}}) -- str rtree index is not possible i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'string'}}) -- normal rtree index i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'array'}}) -- inserting wrong values (should fail) s:insert{1, 2, 3} s:insert{1, 2, "3"} s:insert{1, 2, nil, 3} s:insert{1, 2, {}} s:insert{1, 2, {"3", "4", "5", "6"}} s:insert{1, 2, {nil, 4, 5, 6}} s:insert{1, 2, {3, {4}, 5, 6}} s:insert{1, 2, {3, 4, {}, 6}} s:insert{1, 2, {3, 4, 5, "6"}} s:insert{1, 2, {3}} s:insert{1, 2, {3, 4, 5}} -- inserting good value s:insert{1, 2, {3, 4, 5, 6}} -- invalid alters s.index.spatial:alter({unique = true}) s.index.spatial:alter({type = 'tree'}) box.space[box.schema.SPACE_ID]:update({s.id}, {{"=", 4, 'vinyl'}}) -- chech that truncate works s.index.spatial:select({0, 0, 10, 10}, {iterator = 'le'}) s:truncate() s.index.spatial:select({0, 0, 10, 10}, {iterator = 'le'}) -- inserting lots of equvalent records for i = 1,500 do s:insert{i, i, {3, 4, 5, 6}} end -- and some records for chaos for i = 1,10 do for j = 1,10 do s:insert{500+i+j*20, 500+i*20+j, {i, j, i, j}} end end s.index.spatial:count() #s.index.spatial:select({3, 4, 5, 6}) for i = 1,500,2 do s:delete{i} end s.index.spatial:count() #s.index.spatial:select({3, 4, 5, 6}) s.index.spatial:min() s.index.spatial:max() -- seems that drop can't fail s.index.spatial:drop() s.index.spatial:select({}) s:drop() s = box.schema.space.create('vinyl', {engine = 'vinyl'}) -- rtree indexes are not enabled in vinyl i = s:create_index('spatial', { type = 'rtree', unique = true, parts = {3, 'array'}}) i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) -- ... even secondary i = s:create_index('spatial', { type = 'rtree', unique = true, parts = {3, 'array'}}) s:drop() -- rtree in temp space must work fine s = box.schema.space.create('spatial', {temporary = true}) i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'array'}}) s:insert{1, 2, {3, 4, 5, 6}} s.index.spatial:select({0, 0, 10, 10}, {iterator = 'le'}) s:drop() -- snapshot test s = box.schema.space.create('spatial') i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'array'}}) for i = 1,10 do s:insert{i, i, {i, i, i + 1, i + 1}} end box.snapshot() i:select({0, 0}, {iterator = 'neighbor'}) test_run:cmd("restart server default") s = box.space.spatial i = s.index.spatial i:select({0, 0}, {iterator = 'neighbor'}) s:drop() s = box.schema.space.create('spatial') i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'array'}, dimension = 4}) for i = 1,10 do s:insert{i, i, {i, i, i, i, i + 1, i + 1, i + 1, i + 1}} end box.snapshot() i:select({0, 0, 0, 0}, {iterator = 'neighbor'}) test_run:cmd("restart server default") s = box.space.spatial i = s.index.spatial i:select({0, 0, 0, 0}, {iterator = 'neighbor'}) s:drop() -- distance type iopts = { type = 'rtree', unique = false, parts = {2, 'array'} } iopts['distance'] = 'euclid' s = box.schema.space.create('spatial') i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) i = s:create_index('spatial', iopts) s:insert{1, {0, 5}} s:insert{2, {5, 0}} s:insert{3, {5, 5}} s:insert{4, {8, 0}} s:insert{5, {0, 8}} s.index.spatial:select({{0, 0}}, {iterator = 'neighbor'}) s:drop() iopts = { type = 'rtree', unique = false, parts = {2, 'array'} } iopts['distance'] = 'manhattan' s = box.schema.space.create('spatial') i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) i = s:create_index('spatial', iopts) s:insert{1, {0, 5}} s:insert{2, {5, 0}} s:insert{3, {5, 5}} s:insert{4, {8, 0}} s:insert{5, {0, 8}} s.index.spatial:select({{0, 0}}, {iterator = 'neighbor'}) test_run:cmd("restart server default") s = box.space.spatial i = s.index.spatial s.index.spatial:select({{0, 0}}, {iterator = 'neighbor'}) box.snapshot() test_run:cmd("restart server default") utils = require('utils') s = box.space.spatial i = s.index.spatial s.index.spatial:select({{0, 0}}, {iterator = 'neighbor'}) s:drop() -- RTREE QA https://github.com/tarantool/tarantool/issues/976 s = box.schema.space.create('s') i = s:create_index('p') -- dimension too big i = s:create_index('s', {type = 'rtree', parts = {2, 'array'}, dimension = 21}) -- dimension too low i = s:create_index('s', {type = 'rtree', parts = {2, 'array'}, dimension = 0}) -- cant be unique i = s:create_index('s', {type = 'rtree', parts = {2, 'array'}, unique = true}) -- wrong parts i = s:create_index('s', {type = 'rtree', parts = {2, 'unsigned'}}) i = s:create_index('s', {type = 'rtree', parts = {2, 'array', 3, 'array'}}) -- defaults test i = s:create_index('s', { type = 'rtree' }) i.dimension i.parts i:drop() -- hide first (id) field of tuple function f(t) local r = {} for i, v in ipairs(t) do r[i] = v end r[1] = 0 return setmetatable (r, {__serialize = 'seq'}) end -- new index through inserting to _index space f(box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{2, 'array'}}}) s.index.s:drop() -- with wrong args box.space._index:insert{s.id, 2, 's', 'rtree', nil, {{2, 'array'}}} box.space._index:insert{s.id, 2, 's', 'rtree', utils.setmap({}), {{2, 'array'}}} box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false, dimension = 22}, {{2, 'array'}}} box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false, dimension = 'dimension'}, {{2, 'array'}}} box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{2, 'unsigned'}}} box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{2, 'time'}}} box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{'no','time'}}} box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false, distance = 'lobachevsky'}, {{2, 'array'}}} box.space._index:insert{s.id, 2, 's', 'rtee', {unique = false}, {{2, 'array'}}} box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{}}} -- unknown args checked f(box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false, holy = 'cow'}, {{2, 'array'}}}) -- unknown part args are no more ignored (#2649) f(box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{field=2, type='array', part = 'opts'}}}) -- alter i = s:create_index('s', {type = 'rtree', parts = {2, 'array'}}) i:alter{type = 'tree' } i:alter{dimension = 3 } s:insert{1, {1, 1} } s:insert{1, {1, 1, 1} } i:alter{dimension = 4 } s:select{} s:insert{2, {2, 0, 0} } i:alter{distance = 'euclid' } i:select({0, 0, 0}, {iterator = 'neighbor'}) i:alter{distance = 'manhattan' } i:select({0, 0, 0}, {iterator = 'neighbor'}) -- gh-1467: invalid iterator type i:select({1, 2, 3, 4, 5, 6}, {iterator = 'BITS_ALL_SET' } ) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/access_bin.test.lua0000664000000000000000000001001613306565107021533 0ustar rootrootenv = require('test_run') test_run = env.new() -- -- Access control tests which require a binary protocol -- connection to the server -- box.schema.user.grant('guest','read,write,execute','universe') session = box.session remote = require('net.box') c = remote.connect(box.cfg.listen) c:eval("session.su('admin')") c:eval("return session.user()") c:close() box.schema.user.revoke('guest', 'read,write,execute', 'universe') -- gh-488 suid functions -- setuid_space = box.schema.space.create('setuid_space') index = setuid_space:create_index('primary') setuid_func = function() return box.space.setuid_space:auto_increment{} end box.schema.func.create('setuid_func') box.schema.user.grant('guest', 'execute', 'function', 'setuid_func') c = remote.connect(box.cfg.listen) c:call("setuid_func") session.su('guest') setuid_func() session.su('admin') box.schema.func.drop('setuid_func') box.schema.func.create('setuid_func', { setuid = true }) box.schema.user.grant('guest', 'execute', 'function', 'setuid_func') c:call("setuid_func") session.su('guest') setuid_func() session.su('admin') c:close() -- OPENTAR-84: crash in on_replace_dd_func during recovery -- _func space recovered after _user space, so setuid option can be -- handled incorrectly box.snapshot() test_run:cmd('restart server default') remote = require('net.box') session = box.session setuid_func = function() return box.space.setuid_space:auto_increment{} end c = remote.connect(box.cfg.listen) c:call("setuid_func") session.su('guest') setuid_func() session.su('admin') c:close() box.schema.func.drop('setuid_func') box.space.setuid_space:drop() -- -- gh-530 "assertion failed" -- If a user is dropped, its session should not be usable -- any more -- test = box.schema.space.create('test') index = test:create_index('primary') box.schema.user.create('test', {password='test'}) box.schema.user.grant('test', 'read,write', 'space','test') box.schema.user.grant('test', 'read', 'space', '_space') box.schema.user.grant('test', 'read', 'space', '_index') net = require('net.box') c = net.connect('test:test@'..box.cfg.listen) c.space.test:insert{1} box.schema.user.drop('test') c.space.test:insert{1} c:close() test:drop() -- -- gh-575: User loses 'universe' grants after alter -- box.space._priv:get{1} u = box.space._user:get{1} box.session.su('admin') box.schema.user.passwd('Gx5!') c = require('net.box').new('admin:Gx5!@'..box.cfg.listen) c:call('dostring', { 'return 2 + 2' }) c:close() box.space._user:replace(u) -- -- Roles: test that universal access of an authenticated -- session is not updated if grant is made from another -- session -- test = box.schema.space.create('test') _ = test:create_index('primary') test:insert{1} box.schema.user.create('test', {password='test'}) box.schema.user.grant('test', 'read', 'space', '_space') box.schema.user.grant('test', 'read', 'space', '_index') net = require('net.box') c = net.connect('test:test@'..box.cfg.listen) c.space.test:select{} box.schema.role.grant('public', 'read', 'universe') c.space.test:select{} c:close() c = net.connect('test:test@'..box.cfg.listen) c.space.test:select{} box.schema.role.revoke('public', 'read', 'universe') c.space.test:select{} box.session.su('test') test:select{} box.session.su('admin') c:close() box.schema.user.drop('test') test:drop() -- -- gh-508 - wrong check for universal access of setuid functions -- -- notice that guest can execute stuff, but can't read space _func box.schema.user.grant('guest', 'execute', 'universe') function f1() return box.space._func:get(1)[4] end function f2() return box.space._func:get(2)[4] end box.schema.func.create('f1') box.schema.func.create('f2',{setuid=true}) c = net.connect(box.cfg.listen) -- should return access denied c:call('f1') -- should work (used to return access denied, because was not setuid c:call('f2') c:close() box.schema.user.revoke('guest', 'execute', 'universe') box.schema.func.drop('f1') box.schema.func.drop('f2') -- --gh-2063 - improper params to su function -- box.session.su('admin', box.session.user) box.session.su('admin', box.session.user()) -- clenaup box.session.su('admin') tarantool_1.9.1.26.g63eb81e3c/test/box/lua.test.lua0000664000000000000000000002405513306560010020217 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua...\"]:: '") space = box.schema.space.create('tweedledum') tmp = space:create_index('primary', { type = 'hash', parts = {1, 'string'}, unique = true }) tmp = space:create_index('minmax', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true }) space:insert{'brave', 'new', 'world'} space:insert{'hello', 'old', 'world'} space.index['minmax']:min() space.index['minmax']:max() space.index['minmax']:get{'new', 'world'} -- A test case for Bug #904208 -- "assert failed, when key cardinality is greater than index cardinality" -- https://bugs.launchpad.net/tarantool/+bug/904208 space.index['minmax']:get{'new', 'world', 'order'} space:delete{'brave'} -- A test case for Bug #902091 -- "Positioned iteration over a multipart index doesn't work" -- https://bugs.launchpad.net/tarantool/+bug/902091 space:insert{'item 1', 'alabama', 'song'} space.index['minmax']:get{'alabama'} space:insert{'item 2', 'california', 'dreaming '} space:insert{'item 3', 'california', 'uber alles'} space:insert{'item 4', 'georgia', 'on my mind'} iter, param, state = space.index['minmax']:pairs('california', { iterator = box.index.GE }) state, v = iter(param, state) v state, v = iter(param, state) v space:delete{'item 1'} space:delete{'item 2'} space:delete{'item 3'} space:delete{'item 4'} space:truncate() -- -- Test that we print index number in error ER_INDEX_VIOLATION -- space:insert{'1', 'hello', 'world'} space:insert{'2', 'hello', 'world'} space:drop() -- -- Check range scan over multipart keys -- space = box.schema.space.create('tweedledum') tmp = space:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) tmp = space:create_index('minmax', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = false }) space:insert{1234567, 'new', 'world'} space:insert{0, 'of', 'puppets'} space:insert{00000001ULL, 'of', 'might', 'and', 'magic'} space.index['minmax']:select('of', { limit = 2, iterator = 'GE' }) space.index['minmax']:select('of', { limit = 2, iterator = 'LE' }) space:truncate() -- -- A test case for Bug#1060967: truncation of 64-bit numbers -- space:insert{2^51, 'hello', 'world'} space.index['primary']:get{2^51} space:drop() -- -- Lua 64bit numbers support -- space = box.schema.space.create('tweedledum') tmp = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) space:insert{tonumber64('18446744073709551615'), 'magic'} tuple = space.index['primary']:get{tonumber64('18446744073709551615')} num = tuple[1] num type(num) == 'cdata' num == tonumber64('18446744073709551615') num = tuple[1] num == tonumber64('18446744073709551615') space:delete{18446744073709551615ULL} space:insert{125ULL, 'magic'} tuple = space.index['primary']:get{125} tuple2 = space.index['primary']:get{125LL} num = tuple[1] num2 = tuple2[1] num, num2 type(num) == 'number' type(num2) == 'number' num == tonumber64('125') num2 == tonumber64('125') space:truncate() -- -- Tests for lua box.auto_increment with NUM keys -- -- lua box.auto_increment() with NUM keys testing space:auto_increment{'a'} space:insert{tonumber64(5)} space:auto_increment{'b'} space:auto_increment{'c'} -- gh-2258: Incomprehensive failure of auto_increment in absence of indices space.index.primary:drop() space:auto_increment{'a'} space:get({1}) space:select() space:update({1}, {}) space:upsert({1}, {}) space:delete({1}) space:bsize() space:count() space:len() space:pairs():totable() space:drop() -- -- Tests for lua idx:count() -- -- https://blueprints.launchpad.net/tarantool/+spec/lua-builtin-size-of-subtree space = box.schema.space.create('tweedledum') tmp = space:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) tmp = space:create_index('i1', { type = 'tree', parts = {2, 'unsigned', 3, 'unsigned'}, unique = false }) space:insert{1, 1, 1} space:insert{2, 2, 0} space:insert{3, 2, 1} space:insert{4, 3, 0} space:insert{5, 3, 1} space:insert{6, 3, 2} space.index['i1']:count() space:count() space.index['i1']:count(1) space:count(1) space.index['i1']:count(1) space.index['i1']:count(2, { iterator = 'LE' }) space.index['i1']:count(2, { iterator = 'GE' }) space:count(2, { iterator = 'GE' }) space.index['i1']:count({2, 0}, { iterator = 'LE' }) space.index['i1']:count({2, 1}, { iterator = 'GE' }) space.index['i1']:count(2) space.index['i1']:count({2, 1}) space.index['i1']:count({2, 2}) space.index['i1']:count(3) space.index['i1']:count({3, 3}) -- Returns total number of records -- https://github.com/tarantool/tarantool/issues/46 space.index['i1']:count() -- Test cases for #123: box.index.count does not check arguments properly space.index['i1']:count(function() end) space:drop() -- -- Tests for lua tuple:transform() -- space = box.schema.space.create('tweedledum') tmp = space:create_index('primary', { type = 'hash', parts = {1, 'string'}, unique = true }) t = space:insert{'1', '2', '3', '4', '5', '6', '7'} t:transform(8, 0, '8', '9', '100') t:transform(1, 1) t:transform(2, 4) t:transform(-1, 1) t:transform(-3, 2) t:transform(1, 0, 'A') t:transform(-1, 0, 'A') t:transform(1, 1, 'A') t:transform(-1, 1, 'B') t:transform(1, 2, 'C') t:transform(3, 0, 'hello') t:transform(1, -1, 'C') t:transform(1, 100) t:transform(-100, 1) t:transform(1, 3, 1, 2, 3) t:transform(4, 1, tonumber64(4)) t:transform(1, 1, {}) space:truncate() -- -- Tests for OPENTAR-64 - a limitation for the second argument to tuple:transform -- -- 50K is enough for everyone n = 2000 tab = {}; for i=1,n,1 do table.insert(tab, i) end t = box.tuple.new(tab) t:transform(1, n - 1) t = nil -- -- Tests for lua tuple:find() and tuple:findall() -- -- First space for hash_str tests t = space:insert{'A', '2', '3', '4', '3', '2', '5', '6', '3', '7'} t:find('2') t:find('4') t:find('5') t:find('A') t:find('0') t:findall('A') t:findall('2') t:findall('3') t:findall('0') t:find(2, '2') t:find(89, '2') t:findall(4, '3') t = space:insert{'Z', '2', 2, 3, tonumber64(2)} t:find(2) t:findall(tonumber64(2)) t:find('2') space:drop() -- A test case for Bug #1038784 -- transform returns wrong tuple and put broken reply into socket -- http://bugs.launchpad.net/tarantool/+bug/1038784 -- https://bugs.launchpad.net/tarantool/+bug/1006354 -- lua box.auto_increment() testing space = box.schema.space.create('tweedledum') tmp = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) dofile('push.lua') push_collection(space, 0, 1038784, 'hello') push_collection(space, 0, 1038784, 'hello') push_collection(space, 0, 1038784, 'hello') push_collection(space, 1, 1038784, 'hi') push_collection(space, 2, 1038784, 'hi') push_collection(space, 2, 1038784, 'hi') push_collection(space, 5, 1038784, 'hey') push_collection(space, 5, 1038784, 'hey') push_collection(space, 5, 1038784, 'hey') push_collection(space, 5, 1038784, 'hey') -- # lua box.auto_increment() testing -- # http://bugs.launchpad.net/tarantool/+bug/1006354 -- -- Tests for lua box.auto_increment -- space:truncate() space:auto_increment{'a'} space:insert{5} space:auto_increment{'b'} space:auto_increment{'c'} space:auto_increment{'d'} space:drop() -- A test case for Bug #1042798 -- Truncate hangs when primary key is not in linear or starts at the first field -- https://bugs.launchpad.net/tarantool/+bug/1042798 -- space = box.schema.space.create('tweedledum') tmp = space:create_index('primary', { type = 'tree', parts = {3, 'unsigned', 2, 'unsigned'}, unique = true }) -- Print key fields in pk space.index['primary'].parts space:insert{1, 2, 3, 4} space:insert{10, 20, 30, 40} space:insert{20, 30, 40, 50} space.index['primary']:select{} -- Truncate must not hang space:truncate() -- Empty result space.index['primary']:select{} space:drop() -- -- index:random test -- dofile('index_random_test.lua') space = box.schema.space.create('tweedledum') tmp = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) tmp = space:create_index('secondary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) ------------------------------------------------------------------------------- -- TreeIndex::random() ------------------------------------------------------------------------------- index_random_test(space, 'primary') ------------------------------------------------------------------------------- -- HashIndex::random() ------------------------------------------------------------------------------- index_random_test(space, 'secondary') space:drop() space = nil ------------------------------------------------------------------------------- -- space:format() ------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum') pk = space:create_index('primary') space:format() box.schema.space.format(space.id) box.space._space:get(space.id)[7] space:format({{name = 'id', type = 'unsigned'}}) space:format() box.schema.space.format(space.id) box.space._space:get(space.id)[7] space:format({}) space:format() box.schema.space.format(space.id) box.space._space:get(space.id)[7] space:drop() ------------------------------------------------------------------------------- -- Invalid arguments ------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum') pk = space:create_index('primary') space.len() space.count({}, {iterator = 'EQ'}) space.bsize() space.get({1}) space.select({}, {iterator = 'GE'}) space.insert({1, 2, 3}) space.replace({1, 2, 3}) space.put({1, 2, 3}) space.update({1}, {}) space.upsert({1, 2, 3}, {}) space.delete({1}) space.auto_increment({'hello'}) space.pairs({}, {iterator = 'EQ'}) space.truncate() space.format({}) space.drop() space.rename() space.create_index('secondary') space.run_triggers(false) pk.len() pk.bsize() pk.min() pk.min({}) pk.max() pk.max({}) pk.random(42) pk.pairs({}, {iterator = 'EQ'}) pk.count({}, {iterator = 'EQ'}) pk.get({1}) pk.select({}, {iterator = 'GE'}) pk.update({1}, {}) pk.delete({1}) pk.drop() pk.rename("newname") pk.alter({}) space:drop() pk = nil space = nil test_run:cmd("clear filter") -- vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 tarantool_1.9.1.26.g63eb81e3c/test/box/sequence.test.lua0000664000000000000000000004134413306560010021246 0ustar rootroottest_run = require('test_run').new() -- Options check on create. box.schema.sequence.create('test', {abc = 'abc'}) box.schema.sequence.create('test', {step = 'a'}) box.schema.sequence.create('test', {min = 'b'}) box.schema.sequence.create('test', {max = 'c'}) box.schema.sequence.create('test', {start = true}) box.schema.sequence.create('test', {cycle = 123}) box.schema.sequence.create('test', {name = 'test'}) box.schema.sequence.create('test', {step = 0}) box.schema.sequence.create('test', {min = 10, max = 1}) box.schema.sequence.create('test', {min = 10, max = 20, start = 1}) -- Options check on alter. _ = box.schema.sequence.create('test') box.schema.sequence.alter('test', {abc = 'abc'}) box.schema.sequence.alter('test', {step = 'a'}) box.schema.sequence.alter('test', {min = 'b'}) box.schema.sequence.alter('test', {max = 'c'}) box.schema.sequence.alter('test', {start = true}) box.schema.sequence.alter('test', {cycle = 123}) box.schema.sequence.alter('test', {name = 'test'}) box.schema.sequence.alter('test', {if_not_exists = false}) box.schema.sequence.alter('test', {step = 0}) box.schema.sequence.alter('test', {min = 10, max = 1}) box.schema.sequence.alter('test', {min = 10, max = 20, start = 1}) box.schema.sequence.drop('test') -- Duplicate name. sq1 = box.schema.sequence.create('test') box.schema.sequence.create('test') sq2, msg = box.schema.sequence.create('test', {if_not_exists = true}) sq1 == sq2, msg _ = box.schema.sequence.create('test2') box.schema.sequence.alter('test2', {name = 'test'}) box.schema.sequence.drop('test2') box.schema.sequence.drop('test') -- Check that box.sequence gets updated. sq = box.schema.sequence.create('test') box.sequence.test == sq sq.step sq:alter{step = 2} box.sequence.test == sq sq.step sq:drop() box.sequence.test == nil -- Attempt to delete a sequence that has a record in _sequence_data. sq = box.schema.sequence.create('test') sq:next() box.space._sequence:delete(sq.id) box.space._sequence_data:delete(sq.id) box.space._sequence:delete(sq.id) box.sequence.test == nil -- Default ascending sequence. sq = box.schema.sequence.create('test') sq.step, sq.min, sq.max, sq.start, sq.cycle sq:next() -- 1 sq:next() -- 2 sq:set(100) sq:next() -- 101 sq:next() -- 102 sq:reset() sq:next() -- 1 sq:next() -- 2 sq:drop() -- Default descending sequence. sq = box.schema.sequence.create('test', {step = -1}) sq.step, sq.min, sq.max, sq.start, sq.cycle sq:next() -- -1 sq:next() -- -2 sq:set(-100) sq:next() -- -101 sq:next() -- -102 sq:reset() sq:next() -- -1 sq:next() -- -2 sq:drop() -- Custom min/max. sq = box.schema.sequence.create('test', {min = 10}) sq.step, sq.min, sq.max, sq.start, sq.cycle sq:next() -- 10 sq:next() -- 11 sq:drop() sq = box.schema.sequence.create('test', {step = -1, max = 20}) sq.step, sq.min, sq.max, sq.start, sq.cycle sq:next() -- 20 sq:next() -- 19 sq:drop() -- Custom start value. sq = box.schema.sequence.create('test', {start = 1000}) sq.step, sq.min, sq.max, sq.start, sq.cycle sq:next() -- 1000 sq:next() -- 1001 sq:reset() sq:next() -- 1000 sq:next() -- 1001 sq:drop() -- Overflow and cycle. sq = box.schema.sequence.create('test', {max = 2}) sq:next() -- 1 sq:next() -- 2 sq:next() -- error sq:alter{cycle = true} sq:next() -- 1 sq:next() -- 2 sq:next() -- 1 sq:alter{step = 2} sq:next() -- 1 sq:alter{cycle = false} sq:next() -- error sq:drop() -- Setting sequence value outside boundaries. sq = box.schema.sequence.create('test') sq:alter{step = 1, min = 1, max = 10} sq:set(-100) sq:next() -- 1 sq:set(100) sq:next() -- error sq:reset() sq:next() -- 1 sq:alter{min = 5, start = 5} sq:next() -- 5 sq:reset() sq:alter{step = -1, min = 1, max = 10, start = 10} sq:set(100) sq:next() -- 10 sq:set(-100) sq:next() -- error sq:reset() sq:next() -- 10 sq:alter{max = 5, start = 5} sq:next() -- 5 sq:drop() -- number64 arguments. INT64_MIN = tonumber64('-9223372036854775808') INT64_MAX = tonumber64('9223372036854775807') sq = box.schema.sequence.create('test', {step = INT64_MAX, min = INT64_MIN, max = INT64_MAX, start = INT64_MIN}) sq:next() -- -9223372036854775808 sq:next() -- -1 sq:next() -- 9223372036854775806 sq:next() -- error sq:alter{step = INT64_MIN, start = INT64_MAX} sq:reset() sq:next() -- 9223372036854775807 sq:next() -- -1 sq:next() -- error sq:drop() -- Using in a transaction. s = box.schema.space.create('test') _ = s:create_index('pk') sq1 = box.schema.sequence.create('sq1', {step = 1}) sq2 = box.schema.sequence.create('sq2', {step = -1}) test_run:cmd("setopt delimiter ';'") box.begin() s:insert{sq1:next(), sq2:next()} s:insert{sq1:next(), sq2:next()} s:insert{sq1:next(), sq2:next()} box.rollback(); box.begin() s:insert{sq1:next(), sq2:next()} s:insert{sq1:next(), sq2:next()} s:insert{sq1:next(), sq2:next()} box.commit(); test_run:cmd("setopt delimiter ''"); s:select() -- [4, -4], [5, -5], [6, -6] sq1:drop() sq2:drop() s:drop() -- -- Attaching a sequence to a space. -- -- Index create/modify checks. s = box.schema.space.create('test') sq = box.schema.sequence.create('test') sq:set(123) s:create_index('pk', {parts = {1, 'string'}, sequence = 'test'}) -- error s:create_index('pk', {parts = {1, 'scalar'}, sequence = 'test'}) -- error s:create_index('pk', {parts = {1, 'number'}, sequence = 'test'}) -- error pk = s:create_index('pk', {parts = {1, 'integer'}, sequence = 'test'}) -- ok pk:drop() pk = s:create_index('pk', {parts = {1, 'unsigned'}, sequence = 'test'}) -- ok pk:drop() pk = s:create_index('pk') -- ok s:create_index('secondary', {parts = {2, 'unsigned'}, sequence = 'test'}) -- error s:create_index('secondary', {parts = {2, 'unsigned'}, sequence = true}) -- error sk = s:create_index('secondary', {parts = {2, 'unsigned'}}) -- ok sk:alter{sequence = 'test'} -- error sk:alter{sequence = true} -- error sk:alter{parts = {2, 'string'}} -- ok sk:alter{sequence = false} -- ok (ignored) pk:alter{sequence = 'test'} -- ok s.index.pk.sequence_id == sq.id sk:alter{sequence = 'test'} -- error sk:alter{sequence = true} -- error sk:alter{parts = {2, 'unsigned'}} -- ok sk:alter{sequence = false} -- ok (ignored) s.index.pk.sequence_id == sq.id sk:drop() s.index.pk.sequence_id == sq.id pk:drop() pk = s:create_index('pk', {parts = {1, 'unsigned'}, sequence = 'test'}) -- ok pk:alter{parts = {1, 'string'}} -- error box.space._index:update({s.id, pk.id}, {{'=', 6, {{0, 'string'}}}}) -- error box.space._index:delete{s.id, pk.id} -- error pk:alter{parts = {1, 'string'}, sequence = false} -- ok sk = s:create_index('sk', {parts = {2, 'unsigned'}}) sk:alter{sequence = 'test'} -- error box.space._space_sequence:insert{s.id, sq.id, false} -- error sk:drop() pk:drop() box.space._space_sequence:insert{s.id, sq.id, false} -- error s:create_index('pk', {sequence = {}}) -- error s:create_index('pk', {sequence = 'abc'}) -- error s:create_index('pk', {sequence = 12345}) -- error pk = s:create_index('pk', {sequence = 'test'}) -- ok s.index.pk.sequence_id == sq.id pk:drop() pk = s:create_index('pk', {sequence = sq.id}) -- ok s.index.pk.sequence_id == sq.id pk:drop() pk = s:create_index('pk', {sequence = false}) -- ok s.index.pk.sequence_id == nil pk:alter{sequence = {}} -- error pk:alter{sequence = 'abc'} -- error pk:alter{sequence = 12345} -- error pk:alter{sequence = 'test'} -- ok s.index.pk.sequence_id == sq.id pk:alter{sequence = sq.id} -- ok s.index.pk.sequence_id == sq.id pk:alter{sequence = false} -- ok s.index.pk.sequence_id == nil pk:drop() sq:next() -- 124 sq:drop() s:drop() -- Using a sequence for auto increment. sq = box.schema.sequence.create('test') s1 = box.schema.space.create('test1') _ = s1:create_index('pk', {parts = {1, 'unsigned'}, sequence = 'test'}) s2 = box.schema.space.create('test2') _ = s2:create_index('pk', {parts = {2, 'integer'}, sequence = 'test'}) s3 = box.schema.space.create('test3') _ = s3:create_index('pk', {parts = {2, 'unsigned', 1, 'string'}, sequence = 'test'}) s1:insert(box.tuple.new(nil)) -- 1 s2:insert(box.tuple.new('a', nil)) -- 2 s3:insert(box.tuple.new('b', nil)) -- 3 s1:truncate() s2:truncate() s3:truncate() s1:insert{nil, 123, 456} -- 4 s2:insert{'c', nil, 123} -- 5 s3:insert{'d', nil, 456} -- 6 sq:next() -- 7 sq:reset() s1:insert{nil, nil, 'aa'} -- 1 s2:insert{'bb', nil, nil, 'cc'} -- 2 s3:insert{'dd', nil, nil, 'ee'} -- 3 sq:next() -- 4 sq:set(100) s1:insert{nil, 'aaa', 1} -- 101 s2:insert{'bbb', nil, 2} -- 102 s3:insert{'ccc', nil, 3} -- 103 sq:next() -- 104 s1:insert{1000, 'xxx'} sq:next() -- 1001 s2:insert{'yyy', 2000} sq:next() -- 2001 s3:insert{'zzz', 3000} sq:next() -- 3001 s1:insert{500, 'xxx'} s3:insert{'zzz', 2500} s2:insert{'yyy', 1500} sq:next() -- 3002 sq:drop() -- error s1:drop() sq:drop() -- error s2:drop() sq:drop() -- error s3:drop() sq:drop() -- ok -- Automatically generated sequences. s = box.schema.space.create('test') sq = box.schema.sequence.create('test') sq:set(123) pk = s:create_index('pk', {sequence = true}) sk = s:create_index('sk', {parts = {2, 'string'}}) sq = box.sequence.test_seq sq.step, sq.min, sq.max, sq.start, sq.cycle s.index.pk.sequence_id == sq.id s:insert{nil, 'a'} -- 1 s:insert{nil, 'b'} -- 2 s:insert{nil, 'c'} -- 3 sq:next() -- 4 pk:alter{sequence = false} s.index.pk.sequence_id == nil s:insert{nil, 'x'} -- error box.sequence.test_seq == nil pk:alter{sequence = true} sq.step, sq.min, sq.max, sq.start, sq.cycle sq = box.sequence.test_seq s.index.pk.sequence_id == sq.id s:insert{100, 'abc'} s:insert{nil, 'cda'} -- 101 sq:next() -- 102 pk:alter{sequence = 'test'} s.index.pk.sequence_id == box.sequence.test.id box.sequence.test_seq == nil pk:alter{sequence = true} s.index.pk.sequence_id == box.sequence.test_seq.id sk:drop() pk:drop() box.sequence.test_seq == nil pk = s:create_index('pk', {sequence = true}) s.index.pk.sequence_id == box.sequence.test_seq.id s:drop() box.sequence.test_seq == nil sq = box.sequence.test sq:next() -- 124 sq:drop() -- Check that generated sequence cannot be attached to another space. s1 = box.schema.space.create('test1') _ = s1:create_index('pk', {sequence = true}) s2 = box.schema.space.create('test2') _ = s2:create_index('pk', {sequence = 'test1_seq'}) -- error box.space._space_sequence:insert{s2.id, box.sequence.test1_seq.id, false} -- error s1:drop() s2:drop() -- Sequences are compatible with Vinyl spaces. s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {sequence = true}) s:insert{nil, 'a'} -- 1 s:insert{100, 'b'} -- 100 box.begin() s:insert{nil, 'c'} -- 101 s:insert{nil, 'd'} -- 102 box.rollback() box.begin() s:insert{nil, 'e'} -- 103 s:insert{nil, 'f'} -- 104 box.commit() s:select() -- {1, 'a'}, {100, 'b'}, {103, 'e'}, {104, 'f'} s:drop() -- -- Check that sequences are persistent. -- s1 = box.schema.space.create('test1') _ = s1:create_index('pk', {sequence = true}) s1:insert{nil, 'a'} -- 1 box.snapshot() s2 = box.schema.space.create('test2') _ = s2:create_index('pk', {sequence = true}) s2:insert{101, 'aaa'} sq = box.schema.sequence.create('test', {step = 2, min = 10, max = 20, start = 15, cycle = true}) sq:next() test_run:cmd('restart server default') sq = box.sequence.test sq.step, sq.min, sq.max, sq.start, sq.cycle sq:next() sq:drop() s1 = box.space.test1 s1.index.pk.sequence_id == box.sequence.test1_seq.id s1:insert{nil, 'b'} -- 2 s1:drop() s2 = box.space.test2 s2.index.pk.sequence_id == box.sequence.test2_seq.id s2:insert{nil, 'bbb'} -- 102 s2:drop() -- -- Test permission checks. -- -- Sanity checks. box.schema.user.create('user') -- Setup read permissions for box.schema.user.info() to work. box.schema.user.grant('user', 'read', 'space', '_priv') box.schema.user.grant('user', 'read', 'space', '_user') box.schema.user.grant('user', 'read', 'space', '_space') box.schema.user.grant('user', 'read', 'space', '_sequence') sq = box.schema.sequence.create('seq') box.schema.user.grant('user', 'write', 'sequence', 'test') -- error: no such sequence box.schema.user.grant('user', 'write', 'sequence', 'seq') -- ok box.space._priv.index.object:select{'sequence'} box.space._sequence:delete(sq.id) -- error: sequence has grants sq:drop() -- ok box.space._priv.index.object:select{'sequence'} -- Access to a standalone sequence is denied unless -- the user has the corresponding privileges. sq = box.schema.sequence.create('seq') box.session.su('user') sq:set(100) -- error sq:next() -- error sq:reset() -- error box.session.su('admin') box.schema.user.grant('user', 'write', 'sequence', 'seq') box.session.su('user') box.schema.user.info() sq:set(100) -- ok sq:next() -- ok sq:reset() -- ok box.session.su('admin') box.schema.user.revoke('user', 'write', 'sequence', 'seq') -- Check that access via role works. box.schema.role.create('seq_role') box.schema.role.grant('seq_role', 'write', 'sequence', 'seq') box.schema.user.grant('user', 'execute', 'role', 'seq_role') box.session.su('user') sq:set(100) -- ok sq:next() -- ok sq:reset() -- ok box.session.su('admin') box.schema.role.drop('seq_role') -- Universe access grants access to any sequence. box.schema.user.grant('user', 'write', 'universe') box.session.su('user') sq:set(100) -- ok sq:next() -- ok sq:reset() -- ok box.session.su('admin') -- A sequence is inaccessible after privileges have been revoked. box.schema.user.revoke('user', 'write', 'universe') box.session.su('user') sq:set(100) -- error sq:next() -- error sq:reset() -- error box.session.su('admin') -- A user cannot alter sequences created by other users. box.schema.user.grant('user', 'read,write', 'universe') box.session.su('user') sq:alter{step = 2} -- error sq:drop() -- error box.session.su('admin') sq:drop() -- A user can alter/use sequences that he owns. box.session.su('user') sq = box.schema.sequence.create('seq') sq:alter{step = 2} -- ok sq:drop() -- ok sq = box.schema.sequence.create('seq') box.session.su('admin') box.schema.user.revoke('user', 'read,write', 'universe') box.session.su('user') sq:set(100) -- ok sq:next() -- ok sq:reset() -- ok box.session.su('admin') sq:drop() -- A sequence can be attached to a space only if the user owns both. sq1 = box.schema.sequence.create('seq1') s1 = box.schema.space.create('space1') _ = s1:create_index('pk') box.schema.user.grant('user', 'read,write', 'universe') box.session.su('user') sq2 = box.schema.sequence.create('seq2') s2 = box.schema.space.create('space2') _ = s2:create_index('pk', {sequence = 'seq1'}) -- error s1.index.pk:alter({sequence = 'seq1'}) -- error box.space._space_sequence:replace{s1.id, sq1.id, false} -- error box.space._space_sequence:replace{s1.id, sq2.id, false} -- error box.space._space_sequence:replace{s2.id, sq1.id, false} -- error s2.index.pk:alter({sequence = 'seq2'}) -- ok box.session.su('admin') -- If the user owns a sequence attached to a space, -- it can use it for auto increment, otherwise it -- needs privileges. box.schema.user.revoke('user', 'read,write', 'universe') box.session.su('user') s2:insert{nil, 1} -- ok: {1, 1} box.session.su('admin') s2.index.pk:alter{sequence = 'seq1'} box.session.su('user') s2:insert{2, 2} -- error s2:insert{nil, 2} -- error s2:update(1, {{'+', 2, 1}}) -- ok s2:delete(1) -- ok box.session.su('admin') box.schema.user.grant('user', 'write', 'sequence', 'seq1') box.session.su('user') s2:insert{2, 2} -- ok s2:insert{nil, 3} -- ok: {3, 3} box.session.su('admin') s1:drop() s2:drop() sq1:drop() sq2:drop() -- If the user has access to a space, it also has access to -- an automatically generated sequence attached to it. s = box.schema.space.create('test') _ = s:create_index('pk', {sequence = true}) box.schema.user.grant('user', 'read,write', 'space', 'test') box.session.su('user') s:insert{10, 10} -- ok s:insert{nil, 11} -- ok: {11, 11} box.sequence.test_seq:set(100) -- error box.sequence.test_seq:next() -- error box.sequence.test_seq:reset() -- error box.session.su('admin') s:drop() -- When a user is dropped, all his sequences are dropped as well. box.schema.user.grant('user', 'read,write', 'universe') box.session.su('user') _ = box.schema.sequence.create('test1') _ = box.schema.sequence.create('test2') box.session.su('admin') box.schema.user.drop('user') box.sequence -- Apart from the admin, only the owner can grant permissions -- to a sequence. box.schema.user.create('user1') box.schema.user.create('user2') box.schema.user.grant('user1', 'read,write', 'universe') box.schema.user.grant('user2', 'read,write', 'universe') box.session.su('user1') sq = box.schema.sequence.create('test') box.session.su('user2') box.schema.user.grant('user2', 'write', 'sequence', 'test') -- error box.session.su('user1') box.schema.user.grant('user2', 'write', 'sequence', 'test') -- ok box.session.su('admin') box.schema.user.drop('user1') box.schema.user.drop('user2') -- gh-2914: check identifier constraints. test_run = require('test_run').new() identifier = require("identifier") test_run:cmd("setopt delimiter ';'") identifier.run_test( function (identifier) box.schema.sequence.create(identifier) if box.sequence[identifier]:next() ~= 1 then error("Cannot access sequence by identifier") end end, function (identifier) box.schema.sequence.drop(identifier) end ); test_run:cmd("setopt delimiter ''"); tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_point_r2.result0000664000000000000000000000262013306560010022144 0ustar rootroots = box.schema.space.create('spatial') --- ... _ = s:create_index('primary') --- ... _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) --- ... s:insert{1,{0.0,0.0}} --- - [1, [0, 0]] ... s:insert{2,{0.0,10.0}} --- - [2, [0, 10]] ... s:insert{3,{0.0,50.0}} --- - [3, [0, 50]] ... s:insert{4,{10.0,0.0}} --- - [4, [10, 0]] ... s:insert{5,{50.0,0.0}} --- - [5, [50, 0]] ... s:insert{6,{10.0,10.0}} --- - [6, [10, 10]] ... s:insert{7,{10.0,50.0}} --- - [7, [10, 50]] ... s:insert{8,{50.0,10.0}} --- - [8, [50, 10]] ... s:insert{9,{50.0,50.0}} --- - [9, [50, 50]] ... -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) --- - - [1, [0, 0]] - [2, [0, 10]] - [3, [0, 50]] - [4, [10, 0]] - [5, [50, 0]] - [6, [10, 10]] - [7, [10, 50]] - [8, [50, 10]] - [9, [50, 50]] ... -- select records belonging to rectangle (0,0,10,10) s.index.spatial:select({0.0,0.0,10.0,10.0}, {iterator = 'LE'}) --- - - [1, [0, 0]] - [2, [0, 10]] - [4, [10, 0]] - [6, [10, 10]] ... -- select records with coordinates (10,10) s.index.spatial:select({10.0,10.0}, {iterator = 'EQ'}) --- - - [6, [10, 10]] ... -- select neighbors of point (5,5) s.index.spatial:select({5.0,5.0}, {iterator = 'NEIGHBOR'}) --- - - [1, [0, 0]] - [2, [0, 10]] - [4, [10, 0]] - [6, [10, 10]] - [3, [0, 50]] - [5, [50, 0]] - [7, [10, 50]] - [8, [50, 10]] - [9, [50, 50]] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_point.test.lua0000664000000000000000000000127713306560010021771 0ustar rootroots = box.schema.space.create('spatial') _ = s:create_index('primary') _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) s:insert{1,{0,0}} s:insert{2,{0,10}} s:insert{3,{0,50}} s:insert{4,{10,0}} s:insert{5,{50,0}} s:insert{6,{10,10}} s:insert{7,{10,50}} s:insert{8,{50,10}} s:insert{9,{50,50}} -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) -- select records belonging to rectangle (0,0,10,10) s.index.spatial:select({0,0,10,10}, {iterator = 'LE'}) -- select records with coordinates (10,10) s.index.spatial:select({10,10}, {iterator = 'EQ'}) -- select neighbors of point (5,5) s.index.spatial:select({5,5}, {iterator = 'NEIGHBOR'}) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/schema_reload.test.lua0000664000000000000000000000634713306565107022244 0ustar rootrootbox.schema.user.grant('guest', 'read,write,execute', 'universe') net_box = require('net.box') fiber = require('fiber') LISTEN = require('uri').parse(box.cfg.listen) -- create first space s = box.schema.create_space('test') i = s:create_index('primary') cn = net_box.connect(LISTEN.host, LISTEN.service) -- check that schema is correct cn.space.test ~= nil old_schema_version = cn.schema_version -- create one more space s2 = box.schema.create_space('test2') i2 = s2:create_index('primary') ---------------------------------- -- TEST #1 simple reload ---------------------------------- -- check that schema is not fresh cn.space.test2 == nil cn.schema_version == old_schema_version -- exec request with reload cn.space.test:select{} cn.schema_version > old_schema_version ---------------------------------- -- TEST #2 parallel select/reload ---------------------------------- env = require('test_run') test_run = env.new() requests = 0 reloads = 0 test_run:cmd('setopt delimiter ";"') function selector() while true do cn.space.test:select{} requests = requests + 1 fiber.sleep(0.01) end end function reloader() while true do cn:reload_schema() reloads = reloads + 1 fiber.sleep(0.001) end end; test_run:cmd('setopt delimiter ""'); request_fiber = fiber.create(selector) reload_fiber = fiber.create(reloader) -- Check that each fiber works while requests < 10 or reloads < 10 do fiber.sleep(0.01) end requests < reloads -- cleanup request_fiber:cancel() reload_fiber:cancel() s:drop() s2:drop() -------------------------------------------------------------------------------- -- gh-1808: support schema_version in CALL, EVAL and PING -------------------------------------------------------------------------------- test_run:cmd('setopt delimiter ";"') function bump_schema_version() if box.space.bump_schema_version == nil then box.schema.create_space('bump_schema_version') else box.space.bump_schema_version:drop() end end; test_run:cmd('setopt delimiter ""'); cn = net_box.connect(box.cfg.listen) -- ping schema_version = cn.schema_version bump_schema_version() cn:ping() -- Sic: net.box returns true on :ping() even on ER_WRONG_SCHEMA_VERSION while cn.schema_version == schema_version do fiber.sleep(0.0001) end cn.schema_version == schema_version + 1 -- call schema_version = cn.schema_version bump_schema_version() function somefunc() return true end cn:call('somefunc') cn.schema_version == schema_version + 1 somefunc = nil -- failed call schema_version = cn.schema_version bump_schema_version() cn:call('somefunc') cn.schema_version == schema_version + 1 -- eval schema_version = cn.schema_version bump_schema_version() cn:eval('return') cn.schema_version == schema_version + 1 somefunc = nil -- failed eval schema_version = cn.schema_version bump_schema_version() cn:eval('error("xx")') cn.schema_version == schema_version + 1 somefunc = nil cn:close() -- box.internal.schema_version() schema_version = box.internal.schema_version() schema_version > 0 bump_schema_version() box.internal.schema_version() == schema_version + 1 if box.space.bump_schema_version ~= nil then box.space.bump_schema_version:drop() end box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/box/sql.result0000664000000000000000000003547513306560010020024 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... net_box = require('net.box') --- ... s = box.schema.space.create('test') --- ... _ = box.schema.space.create('test1', { id = 555 }) --- ... box.schema.user.create('test', { password = 'test' }) --- ... box.schema.user.grant('test', 'execute,read,write', 'universe') --- ... conn = net_box.connect('test:test@' .. box.cfg.listen) --- ... space = conn.space.test --- ... index = box.space.test:create_index('primary', { type = 'hash' }) --- ... _ = box.space.test1:create_index('primary', { type = 'hash' }) --- ... _ = box.space.test1:create_index('secondary', { type = 'hash', parts = {2, 'string'}}) --- ... -- send request to remote server to force schema reloading conn:reload_schema() --- ... space:select{} --- - [] ... space:insert{1, 'I am a tuple'} --- - [1, 'I am a tuple'] ... space:select{1} --- - - [1, 'I am a tuple'] ... space:select{0} --- - [] ... space:select{2} --- - [] ... test_run:cmd('restart server default') net_box = require('net.box') --- ... conn = net_box.connect('test:test@' .. box.cfg.listen) --- ... space = conn.space.test --- ... space:select{1} --- - - [1, 'I am a tuple'] ... box.snapshot() --- - ok ... space:select{1} --- - - [1, 'I am a tuple'] ... test_run:cmd('restart server default') net_box = require('net.box') --- ... conn = net_box.connect('test:test@' .. box.cfg.listen) --- ... space = conn.space.test --- ... space:select{1} --- - - [1, 'I am a tuple'] ... space:delete{1} --- - [1, 'I am a tuple'] ... space:select{1} --- - [] ... -- xxx: update comes through, returns 0 rows affected space:update(1, {{'=', 2, 'I am a new tuple'}}) --- ... -- nothing is selected, since nothing was there space:select{1} --- - [] ... space:insert{1, 'I am a new tuple'} --- - [1, 'I am a new tuple'] ... space:select{1} --- - - [1, 'I am a new tuple'] ... space:update(1, {{'=', 2, 'I am the newest tuple'}}) --- - [1, 'I am the newest tuple'] ... space:select{1} --- - - [1, 'I am the newest tuple'] ... -- this is correct, can append field to tuple space:update(1, {{'=', 2, 'Huh'}, {'=', 3, 'I am a new field! I was added via append'}}) --- - [1, 'Huh', 'I am a new field! I was added via append'] ... space:select{1} --- - - [1, 'Huh', 'I am a new field! I was added via append'] ... -- this is illegal space:update(1, {{'=', 2, 'Huh'}, {'=', 1001, 'invalid field'}}) --- - error: Field 1001 was not found in the tuple ... space:select{1} --- - - [1, 'Huh', 'I am a new field! I was added via append'] ... space:replace{1, 'I am a new tuple', 'stub'} --- - [1, 'I am a new tuple', 'stub'] ... space:update(1, {{'=', 2, 'Huh'}, {'=', 3, 'Oh-ho-ho'}}) --- - [1, 'Huh', 'Oh-ho-ho'] ... space:select{1} --- - - [1, 'Huh', 'Oh-ho-ho'] ... -- check empty strings space:update(1, {{'=', 2, ''}, {'=', 3, ''}}) --- - [1, '', ''] ... space:select{1} --- - - [1, '', ''] ... -- check type change space:update(1, {{'=', 2, 2}, {'=', 3, 3}}) --- - [1, 2, 3] ... space:select{1} --- - - [1, 2, 3] ... -- check limits space:insert{0} --- - [0] ... space:select{0} --- - - [0] ... space:select{4294967295} --- - [] ... -- check update delete be secondary index conn.space.test1:insert{0, "hello", 1} --- - [0, 'hello', 1] ... conn.space.test1.index.secondary:update("hello", {{'=', 3, 2}}) --- - [0, 'hello', 2] ... conn.space.test1.index.secondary:delete("hello") --- - [0, 'hello', 2] ... -- cleanup space:delete(0) --- - [0] ... space:delete(4294967295) --- ... box.space.test:drop() --- ... box.space.test1:drop() --- ... box.schema.user.drop('test') --- ... space = nil --- ... net_box = require('net.box') --- ... -- Prepare spaces box.schema.user.create('test', { password = 'test' }) --- ... box.schema.user.grant('test', 'execute,read,write', 'universe') --- ... s = box.schema.space.create('tweedledum') --- ... index1 = s:create_index('primary', { type = 'tree', parts = { 1, 'string'} }) --- ... index2 = s:create_index('secondary', { type = 'tree', unique = false, parts = {2, 'string'}}) --- ... function compare(a,b) return a[1] < b[1] end --- ... conn = net_box.connect('test:test@' .. box.cfg.listen) --- ... space = conn.space.tweedledum --- ... -- A test case for Bug#729758 -- "SELECT fails with a disjunct and small LIMIT" -- https://bugs.launchpad.net/tarantool/+bug/729758 space:insert{'Doe', 'Richard'} --- - ['Doe', 'Richard'] ... space:insert{'Roe', 'Richard'} --- - ['Roe', 'Richard'] ... space:insert{'Woe', 'Richard'} --- - ['Woe', 'Richard'] ... space:insert{'Major', 'Tomas'} --- - ['Major', 'Tomas'] ... space:insert{'Kytes', 'Tomas'} --- - ['Kytes', 'Tomas'] ... sorted(space.index.secondary:select('Richard')) --- - - ['Doe', 'Richard'] - ['Roe', 'Richard'] - ['Woe', 'Richard'] ... -- A test case for Bug#729879 -- "Zero limit is treated the same as no limit" -- https://bugs.launchpad.net/tarantool/+bug/729879 sorted(space.index.secondary:select('Richard', { limit = 0 })) --- - [] ... s:truncate() --- ... -- A test case for Bug#730593 -- "Bad data if incomplete tuple" -- https://bugs.launchpad.net/tarantool/+bug/730593 -- Verify that if there is an index on, say, field 2, -- we can't insert tuples with cardinality 1 and -- get away with it. space:insert{'Britney'} --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 2) ... sorted(space.index.secondary:select('Anything')) --- - [] ... space:insert{'Stephanie'} --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 2) ... sorted(space.index.secondary:select('Anything')) --- - [] ... space:insert{'Spears', 'Britney'} --- - ['Spears', 'Britney'] ... space:select{'Spears'} --- - - ['Spears', 'Britney'] ... sorted(space.index.secondary:select('Anything')) --- - [] ... sorted(space.index.secondary:select('Britney')) --- - - ['Spears', 'Britney'] ... s.index[0]:select('Spears', { limit = 100, iterator = 'GE' }) --- - - ['Spears', 'Britney'] ... s.index[1]:select('Britney', { limit = 100, iterator = 'GE' }) --- - - ['Spears', 'Britney'] ... space:delete('Spears') --- - ['Spears', 'Britney'] ... -- Cleanup s:truncate() --- ... -- Test composite keys with trees -- Redefine the second key to be composite s.index.secondary:alter{unique = true, parts = { 2, 'string', 3, 'string'}} --- ... space:insert{'key1', 'part1', 'part2'} --- - ['key1', 'part1', 'part2'] ... -- Test a duplicate insert on unique index that once resulted in a crash (bug 926080) space:replace{'key1', 'part1', 'part2'} --- - ['key1', 'part1', 'part2'] ... space:insert{'key2', 'part1', 'part2_a'} --- - ['key2', 'part1', 'part2_a'] ... space:insert{'key3', 'part1', 'part2_b'} --- - ['key3', 'part1', 'part2_b'] ... s.index[1]:select{} --- - - ['key1', 'part1', 'part2'] - ['key2', 'part1', 'part2_a'] - ['key3', 'part1', 'part2_b'] ... space:select{'key1'} --- - - ['key1', 'part1', 'part2'] ... space:select{'key2'} --- - - ['key2', 'part1', 'part2_a'] ... space:select{'key3'} --- - - ['key3', 'part1', 'part2_b'] ... sorted(space.index.secondary:select('part1')) --- - - ['key1', 'part1', 'part2'] - ['key2', 'part1', 'part2_a'] - ['key3', 'part1', 'part2_b'] ... s.index[1]:select('part1', { limit = 100, iterator = 'GE' }) --- - - ['key1', 'part1', 'part2'] - ['key2', 'part1', 'part2_a'] - ['key3', 'part1', 'part2_b'] ... s.index[0]:select('key2', { limit = 100, iterator = 'GE' }) --- - - ['key2', 'part1', 'part2_a'] - ['key3', 'part1', 'part2_b'] ... s.index[1]:select({ 'part1', 'part2_a' }, { limit = 1, iterator = 'GE' }) --- - - ['key2', 'part1', 'part2_a'] ... space:select{'key1'} --- - - ['key1', 'part1', 'part2'] ... space:select{'key2'} --- - - ['key2', 'part1', 'part2_a'] ... space:select{'key3'} --- - - ['key3', 'part1', 'part2_b'] ... sorted(space.index.secondary:select('part1')) --- - - ['key1', 'part1', 'part2'] - ['key2', 'part1', 'part2_a'] - ['key3', 'part1', 'part2_b'] ... space:delete('key1') --- - ['key1', 'part1', 'part2'] ... space:delete('key2') --- - ['key2', 'part1', 'part2_a'] ... space:delete('key3') --- - ['key3', 'part1', 'part2_b'] ... s:truncate() --- ... -- check non-unique multipart keys s.index.primary:alter{type = 'tree', parts = { 1, 'unsigned'}} --- ... s.index.secondary:alter{unique = false} --- ... space:insert{1234567, 'part1', 'part2'} --- - [1234567, 'part1', 'part2'] ... space:insert{11234567, 'part1', 'part2'} --- - [11234567, 'part1', 'part2'] ... space:insert{21234567, 'part1', 'part2_a'} --- - [21234567, 'part1', 'part2_a'] ... space:insert{31234567, 'part1_a', 'part2'} --- - [31234567, 'part1_a', 'part2'] ... space:insert{41234567, 'part1_a', 'part2_a'} --- - [41234567, 'part1_a', 'part2_a'] ... l = {} --- ... for state, v in s:pairs() do table.insert(l, v) end --- ... l --- - - [1234567, 'part1', 'part2'] - [11234567, 'part1', 'part2'] - [21234567, 'part1', 'part2_a'] - [31234567, 'part1_a', 'part2'] - [41234567, 'part1_a', 'part2_a'] ... space:select{1234567} --- - - [1234567, 'part1', 'part2'] ... space:select{11234567} --- - - [11234567, 'part1', 'part2'] ... space:select{21234567} --- - - [21234567, 'part1', 'part2_a'] ... sorted(space.index.secondary:select('part1')) --- - - [1234567, 'part1', 'part2'] - [11234567, 'part1', 'part2'] - [21234567, 'part1', 'part2_a'] ... sorted(space.index.secondary:select('part1_a')) --- - - [31234567, 'part1_a', 'part2'] - [41234567, 'part1_a', 'part2_a'] ... sorted(space.index.secondary:select('part_none')) --- - [] ... sorted(s.index[1]:select({'part1', 'part2'})) --- - - [1234567, 'part1', 'part2'] - [11234567, 'part1', 'part2'] ... sorted(space.index.secondary:select('part1')) --- - - [1234567, 'part1', 'part2'] - [11234567, 'part1', 'part2'] - [21234567, 'part1', 'part2_a'] ... sorted(space.index.secondary:select('part2')) --- - [] ... -- cleanup space:delete(1234567) --- - [1234567, 'part1', 'part2'] ... space:delete(11234567) --- - [11234567, 'part1', 'part2'] ... space:delete(21234567) --- - [21234567, 'part1', 'part2_a'] ... space:delete(31234567) --- - [31234567, 'part1_a', 'part2'] ... space:delete(41234567) --- - [41234567, 'part1_a', 'part2_a'] ... s:select{} --- - [] ... s:truncate() --- ... s.index.primary:alter{type = 'hash'} --- ... s.index.secondary:alter{type = 'hash', unique = true, parts = { 2, 'string' }} --- ... space:insert{1, 'hello'} --- - [1, 'hello'] ... space:insert{2, 'brave'} --- - [2, 'brave'] ... space:insert{3, 'new'} --- - [3, 'new'] ... space:insert{4, 'world'} --- - [4, 'world'] ... env = require('test_run') --- ... test_run = env.new() --- ... -- Check how build_indexes() works test_run:cmd('restart server default') net_box = require('net.box') --- ... conn = net_box.connect('test:test@' .. box.cfg.listen) --- ... space = conn.space.tweedledum --- ... s = box.space.tweedledum --- ... -- Bug#929654 - secondary hash index is not built with build_indexes() sorted(space.index.secondary:select('hello')) --- - - [1, 'hello'] ... sorted(space.index.secondary:select('brave')) --- - - [2, 'brave'] ... sorted(space.index.secondary:select('new')) --- - - [3, 'new'] ... sorted(space.index.secondary:select('world')) --- - - [4, 'world'] ... s:truncate() --- ... -- A test case for: http://bugs.launchpad.net/bugs/735140 -- Partial REPLACE corrupts index. -- clean data and restart with appropriate config s.index.primary:alter{parts = {1, 'string'}} --- ... s.index.secondary:alter{type = 'tree', unique = false} --- ... space:insert{'Spears', 'Britney'} --- - ['Spears', 'Britney'] ... space:select{'Spears'} --- - - ['Spears', 'Britney'] ... sorted(space.index.secondary:select('Britney')) --- - - ['Spears', 'Britney'] ... -- try to insert the incoplete tuple space:replace{'Spears'} --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 2) ... -- check that nothing has been updated space:select{'Spears'} --- - - ['Spears', 'Britney'] ... -- cleanup space:delete('Spears') --- - ['Spears', 'Britney'] ... -- Test retrieval of duplicates via a secondary key s.index.primary:alter{parts = { 1, 'unsigned'}} --- ... space:insert{1, 'duplicate one'} --- - [1, 'duplicate one'] ... space:insert{2, 'duplicate one'} --- - [2, 'duplicate one'] ... space:insert{3, 'duplicate one'} --- - [3, 'duplicate one'] ... space:insert{4, 'duplicate one'} --- - [4, 'duplicate one'] ... space:insert{5, 'duplicate one'} --- - [5, 'duplicate one'] ... space:insert{6, 'duplicate two'} --- - [6, 'duplicate two'] ... space:insert{7, 'duplicate two'} --- - [7, 'duplicate two'] ... space:insert{8, 'duplicate two'} --- - [8, 'duplicate two'] ... space:insert{9, 'duplicate two'} --- - [9, 'duplicate two'] ... space:insert{10, 'duplicate two'} --- - [10, 'duplicate two'] ... space:insert{11, 'duplicate three'} --- - [11, 'duplicate three'] ... space:insert{12, 'duplicate three'} --- - [12, 'duplicate three'] ... space:insert{13, 'duplicate three'} --- - [13, 'duplicate three'] ... space:insert{14, 'duplicate three'} --- - [14, 'duplicate three'] ... space:insert{15, 'duplicate three'} --- - [15, 'duplicate three'] ... sorted(space.index.secondary:select('duplicate one')) --- - - [1, 'duplicate one'] - [2, 'duplicate one'] - [3, 'duplicate one'] - [4, 'duplicate one'] - [5, 'duplicate one'] ... sorted(space.index.secondary:select('duplicate two')) --- - - [6, 'duplicate two'] - [7, 'duplicate two'] - [8, 'duplicate two'] - [9, 'duplicate two'] - [10, 'duplicate two'] ... sorted(space.index.secondary:select('duplicate three')) --- - - [11, 'duplicate three'] - [12, 'duplicate three'] - [13, 'duplicate three'] - [14, 'duplicate three'] - [15, 'duplicate three'] ... space:delete(1) --- - [1, 'duplicate one'] ... space:delete(2) --- - [2, 'duplicate one'] ... space:delete(3) --- - [3, 'duplicate one'] ... space:delete(4) --- - [4, 'duplicate one'] ... space:delete(5) --- - [5, 'duplicate one'] ... space:delete(6) --- - [6, 'duplicate two'] ... space:delete(7) --- - [7, 'duplicate two'] ... space:delete(8) --- - [8, 'duplicate two'] ... space:delete(9) --- - [9, 'duplicate two'] ... space:delete(10) --- - [10, 'duplicate two'] ... space:delete(11) --- - [11, 'duplicate three'] ... space:delete(12) --- - [12, 'duplicate three'] ... space:delete(13) --- - [13, 'duplicate three'] ... space:delete(14) --- - [14, 'duplicate three'] ... space:delete(15) --- - [15, 'duplicate three'] ... -- Check min() and max() functions space:insert{1, 'Aardvark '} --- - [1, 'Aardvark '] ... space:insert{2, 'Bilimbi'} --- - [2, 'Bilimbi'] ... space:insert{3, 'Creature '} --- - [3, 'Creature '] ... s.index[1]:select{} --- - - [1, 'Aardvark '] - [2, 'Bilimbi'] - [3, 'Creature '] ... s.index[0]:min() --- - error: Index 'primary' (HASH) of space 'tweedledum' (memtx) does not support min() ... s.index[0]:max() --- - error: Index 'primary' (HASH) of space 'tweedledum' (memtx) does not support max() ... s.index[1]:min() --- - [1, 'Aardvark '] ... s.index[1]:max() --- - [3, 'Creature '] ... space:delete(1) --- - [1, 'Aardvark '] ... space:delete(2) --- - [2, 'Bilimbi'] ... space:delete(3) --- - [3, 'Creature '] ... box.schema.user.drop('test') --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/iproto_stress.result0000664000000000000000000000221013306560010022121 0ustar rootroottest_run = require('test_run').new() --- ... fiber = require('fiber') --- ... net_box = require('net.box') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... s = box.schema.space.create('test') --- ... _ = s:create_index('primary', {unique=true, parts={1, 'unsigned', 2, 'unsigned', 3, 'unsigned'}}) --- ... n_workers = 0 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function worker(i) n_workers = n_workers + 1 for j = 1,2 do local conn = net_box.connect(box.cfg.listen) for k = 1,10 do conn.space.test:insert{i, j, k} end conn:close() fiber.sleep(1) end n_workers = n_workers - 1 end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... for i = 1,5000 do fiber.create(worker, i) end --- ... fiber.sleep(0.1) --- ... -- check that iproto doesn't deplete tx fiber pool on wal stall (see gh-1892) box.error.injection.set("ERRINJ_WAL_DELAY", true) --- - ok ... fiber.sleep(1.0) --- ... box.error.injection.set("ERRINJ_WAL_DELAY", false) --- - ok ... repeat fiber.sleep(1) until n_workers == 0 --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/update.result0000664000000000000000000004220413306560010020473 0ustar rootroots = box.schema.space.create('tweedledum') --- ... index = s:create_index('pk') --- ... -- test delete field s:insert{1000001, 1000002, 1000003, 1000004, 1000005} --- - [1000001, 1000002, 1000003, 1000004, 1000005] ... s:update({1000001}, {{'#', 1, 1}}) --- - error: Attempt to modify a tuple field which is part of index 'pk' in space 'tweedledum' ... s:update({1000001}, {{'#', 1, "only one record please"}}) --- - error: 'Argument type in operation ''#'' on field 1 does not match field type: expected a number of fields to delete' ... s:truncate() --- ... -- test arithmetic s:insert{1, 0} --- - [1, 0] ... s:update(1, {{'+', 2, 10}}) --- - [1, 10] ... s:update(1, {{'+', 2, 15}}) --- - [1, 25] ... s:update(1, {{'-', 2, 5}}) --- - [1, 20] ... s:update(1, {{'-', 2, 20}}) --- - [1, 0] ... s:update(1, {{'|', 2, 0x9}}) --- - [1, 9] ... s:update(1, {{'|', 2, 0x6}}) --- - [1, 15] ... s:update(1, {{'&', 2, 0xabcde}}) --- - [1, 14] ... s:update(1, {{'&', 2, 0x2}}) --- - [1, 2] ... s:update(1, {{'^', 2, 0xa2}}) --- - [1, 160] ... s:update(1, {{'^', 2, 0xa2}}) --- - [1, 2] ... s:truncate() --- ... -- test delete multiple fields s:insert{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} --- - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] ... s:update({0}, {{'#', 42, 1}}) --- - error: Field 42 was not found in the tuple ... s:update({0}, {{'#', 4, 'abirvalg'}}) --- - error: 'Argument type in operation ''#'' on field 4 does not match field type: expected a number of fields to delete' ... s:update({0}, {{'#', 2, 1}, {'#', 4, 2}, {'#', 6, 1}}) --- - [0, 2, 3, 6, 7, 9, 10, 11, 12, 13, 14, 15] ... s:update({0}, {{'#', 4, 3}}) --- - [0, 2, 3, 10, 11, 12, 13, 14, 15] ... s:update({0}, {{'#', 5, 123456}}) --- - [0, 2, 3, 10] ... s:update({0}, {{'#', 3, 4294967295}}) --- - [0, 2] ... s:update({0}, {{'#', 2, 0}}) --- - error: 'Field 2 UPDATE error: cannot delete 0 fields' ... s:truncate() --- ... -- test insert field s:insert{1, 3, 6, 9} --- - [1, 3, 6, 9] ... s:update({1}, {{'!', 2, 2}}) --- - [1, 2, 3, 6, 9] ... s:update({1}, {{'!', 4, 4}, {'!', 4, 5}, {'!', 5, 7}, {'!', 5, 8}}) --- - [1, 2, 3, 5, 8, 7, 4, 6, 9] ... s:update({1}, {{'!', 10, 10}, {'!', 10, 11}, {'!', 10, 12}}) --- - [1, 2, 3, 5, 8, 7, 4, 6, 9, 12, 11, 10] ... s:truncate() --- ... s:insert{1, 'tuple'} --- - [1, 'tuple'] ... s:update({1}, {{'#', 2, 1}, {'!', 2, 'inserted tuple'}, {'=', 3, 'set tuple'}}) --- - [1, 'inserted tuple', 'set tuple'] ... s:truncate() --- ... s:insert{1, 'tuple'} --- - [1, 'tuple'] ... s:update({1}, {{'=', 2, 'set tuple'}, {'!', 2, 'inserted tuple'}, {'#', 3, 1}}) --- - [1, 'inserted tuple'] ... s:update({1}, {{'!', 1, 3}, {'!', 1, 2}}) --- - error: Attempt to modify a tuple field which is part of index 'pk' in space 'tweedledum' ... s:truncate() --- ... -- test update's assign opearations s:replace{1, 'field string value'} --- - [1, 'field string value'] ... s:update({1}, {{'=', 2, 'new field string value'}, {'=', 3, 42}, {'=', 4, 0xdeadbeef}}) --- - [1, 'new field string value', 42, 3735928559] ... -- test multiple update opearations on the same field s:update({1}, {{'+', 3, 16}, {'&', 4, 0xffff0000}, {'|', 4, 0x0000a0a0}, {'^', 4, 0xffff00aa}}) --- - error: 'Field 4 UPDATE error: double update of the same field' ... -- test update splice operation s:replace{1953719668, 'something to splice'} --- - [1953719668, 'something to splice'] ... s:update(1953719668, {{':', 2, 1, 4, 'no'}}) --- - [1953719668, 'nothing to splice'] ... s:update(1953719668, {{':', 2, 1, 2, 'every'}}) --- - [1953719668, 'everything to splice'] ... -- check an incorrect offset s:update(1953719668, {{':', 2, 100, 2, 'every'}}) --- - [1953719668, 'everything to spliceevery'] ... s:update(1953719668, {{':', 2, -100, 2, 'every'}}) --- - error: 'SPLICE error on field 2: offset is out of bound' ... s:truncate() --- ... s:insert{1953719668, 'hello', 'october', '20th'}:unpack() --- - 1953719668 - hello - october - 20th ... s:truncate() --- ... s:insert{1953719668, 'hello world'} --- - [1953719668, 'hello world'] ... s:update(1953719668, {{'=', 2, 'bye, world'}}) --- - [1953719668, 'bye, world'] ... s:delete{1953719668} --- - [1953719668, 'bye, world'] ... s:replace({10, 'abcde'}) --- - [10, 'abcde'] ... s:update(10, {{':', 2, 0, 0, '!'}}) --- - error: 'SPLICE error on field 2: offset is out of bound' ... s:update(10, {{':', 2, 1, 0, '('}}) --- - [10, '(abcde'] ... s:update(10, {{':', 2, 2, 0, '({'}}) --- - [10, '(({abcde'] ... s:update(10, {{':', 2, -1, 0, ')'}}) --- - [10, '(({abcde)'] ... s:update(10, {{':', 2, -2, 0, '})'}}) --- - [10, '(({abcde}))'] ... -- test update delete operations s:update({1}, {{'#', 4, 1}, {'#', 3, 1}}) --- ... -- test update insert operations s:update({1}, {{'!', 2, 1}, {'!', 2, 2}, {'!', 2, 3}, {'!', 2, 4}}) --- ... -- s:update: zero field s:insert{48} --- - [48] ... s:update(48, {{'=', 0, 'hello'}}) --- - error: Field 0 was not found in the tuple ... -- s:update: push/pop fields s:insert{1684234849} --- - [1684234849] ... s:update({1684234849}, {{'#', 2, 1}}) --- - error: Field 2 was not found in the tuple ... s:update({1684234849}, {{'!', -1, 'push1'}}) --- - [1684234849, 'push1'] ... s:update({1684234849}, {{'!', -1, 'push2'}}) --- - [1684234849, 'push1', 'push2'] ... s:update({1684234849}, {{'!', -1, 'push3'}}) --- - [1684234849, 'push1', 'push2', 'push3'] ... s:update({1684234849}, {{'#', 2, 1}, {'!', -1, 'swap1'}}) --- - [1684234849, 'push2', 'push3', 'swap1'] ... s:update({1684234849}, {{'#', 2, 1}, {'!', -1, 'swap2'}}) --- - [1684234849, 'push3', 'swap1', 'swap2'] ... s:update({1684234849}, {{'#', 2, 1}, {'!', -1, 'swap3'}}) --- - [1684234849, 'swap1', 'swap2', 'swap3'] ... s:update({1684234849}, {{'#', -1, 1}, {'!', -1, 'noop1'}}) --- - [1684234849, 'swap1', 'swap2', 'noop1'] ... s:update({1684234849}, {{'#', -1, 1}, {'!', -1, 'noop2'}}) --- - [1684234849, 'swap1', 'swap2', 'noop2'] ... s:update({1684234849}, {{'#', -1, 1}, {'!', -1, 'noop3'}}) --- - [1684234849, 'swap1', 'swap2', 'noop3'] ... -- -- negative indexes -- box.tuple.new({1, 2, 3, 4, 5}):update({{'!', 0, 'Test'}}) --- - error: Field 0 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -1, 'Test'}}) --- - [1, 2, 3, 4, 5, 'Test'] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -3, 'Test'}}) --- - [1, 2, 3, 'Test', 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -5, 'Test'}}) --- - [1, 'Test', 2, 3, 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -6, 'Test'}}) --- - ['Test', 1, 2, 3, 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -7, 'Test'}}) --- - error: Field -7 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'!', -100500, 'Test'}}) --- - error: Field -100500 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'=', 0, 'Test'}}) --- - error: Field 0 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -1, 'Test'}}) --- - [1, 2, 3, 4, 'Test'] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -3, 'Test'}}) --- - [1, 2, 'Test', 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -5, 'Test'}}) --- - ['Test', 2, 3, 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -6, 'Test'}}) --- - error: Field -6 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'=', -100500, 'Test'}}) --- - error: Field -100500 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'+', 0, 100}}) --- - error: Field 0 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -1, 100}}) --- - [1, 2, 3, 4, 105] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -3, 100}}) --- - [1, 2, 103, 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -5, 100}}) --- - [101, 2, 3, 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -6, 100}}) --- - error: Field -6 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'+', -100500, 100}}) --- - error: Field -100500 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'|', 0, 100}}) --- - error: Field 0 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -1, 100}}) --- - [1, 2, 3, 4, 101] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -3, 100}}) --- - [1, 2, 103, 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -5, 100}}) --- - [101, 2, 3, 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -6, 100}}) --- - error: Field -6 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'|', -100500, 100}}) --- - error: Field -100500 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'#', 0, 1}}) --- - error: Field 0 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -1, 1}}) --- - [1, 2, 3, 4] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -3, 1}}) --- - [1, 2, 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -5, 1}}) --- - [2, 3, 4, 5] ... box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -6, 1}}) --- - error: Field -6 was not found in the tuple ... box.tuple.new({1, 2, 3, 4, 5}):update({{'#', -100500, 1}}) --- - error: Field -100500 was not found in the tuple ... -- -- #416: UPDATEs from Lua can't be properly restored due to one based indexing -- env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default') s = box.space.tweedledum --- ... s:select{} --- - - [10, '(({abcde}))'] - [48] - [1684234849, 'swap1', 'swap2', 'noop3'] ... s:truncate() --- ... s:drop() --- ... -- #521: Cryptic error message in update operation s = box.schema.space.create('tweedledum') --- ... index = s:create_index('pk') --- ... s:insert{1, 2, 3} --- - [1, 2, 3] ... s:update({1}) --- - error: Usage index:update(key, ops) ... s:update({1}, {'=', 1, 1}) --- - error: Illegal parameters, update operation must be an array {op,..} ... s:drop() --- ... -- #528: Different types in arithmetical update, overflow check ffi = require('ffi') --- ... s = box.schema.create_space('tweedledum') --- ... index = s:create_index('pk') --- ... s:insert{0, -1} --- - [0, -1] ... -- + -- s:update({0}, {{'+', 2, "a"}}) -- err --- - error: 'Argument type in operation ''+'' on field 2 does not match field type: expected a number' ... s:update({0}, {{'+', 2, 10}}) -- neg(ative) + pos(itive) = pos(itive) 9 --- - [0, 9] ... s:update({0}, {{'+', 2, 5}}) -- pos + pos = pos 14 --- - [0, 14] ... s:update({0}, {{'+', 2, -4}}) -- pos + neg = pos 10 --- - [0, 10] ... s:update({0}, {{'+', 2, -22}}) -- pos + neg = neg -12 --- - [0, -12] ... s:update({0}, {{'+', 2, -3}}) -- neg + neg = neg -15 --- - [0, -15] ... s:update({0}, {{'+', 2, 7}}) -- neg + pos = neg -8 --- - [0, -8] ... -- - -- s:update({0}, {{'-', 2, "a"}}) -- err --- - error: 'Argument type in operation ''-'' on field 2 does not match field type: expected a number' ... s:update({0}, {{'-', 2, 16}}) -- neg(ative) - pos(itive) = neg(ative) -24 --- - [0, -24] ... s:update({0}, {{'-', 2, -4}}) -- neg - neg = neg 20 --- - [0, -20] ... s:update({0}, {{'-', 2, -32}}) -- neg - neg = pos 12 --- - [0, 12] ... s:update({0}, {{'-', 2, 3}}) -- pos - pos = pos 9 --- - [0, 9] ... s:update({0}, {{'-', 2, -5}}) -- pos - neg = pos 14 --- - [0, 14] ... s:update({0}, {{'-', 2, 17}}) -- pos - pos = neg -3 --- - [0, -3] ... -- bit -- s:replace{0, 0} -- 0 --- - [0, 0] ... s:update({0}, {{'|', 2, 24}}) -- 24 --- - [0, 24] ... s:update({0}, {{'|', 2, 2}}) -- 26 --- - [0, 26] ... s:update({0}, {{'&', 2, 50}}) -- 18 --- - [0, 18] ... s:update({0}, {{'^', 2, 6}}) -- 20 --- - [0, 20] ... s:update({0}, {{'|', 2, -1}}) -- err --- - error: 'Argument type in operation ''|'' on field 2 does not match field type: expected a positive integer' ... s:update({0}, {{'&', 2, -1}}) -- err --- - error: 'Argument type in operation ''&'' on field 2 does not match field type: expected a positive integer' ... s:update({0}, {{'^', 2, -1}}) -- err --- - error: 'Argument type in operation ''^'' on field 2 does not match field type: expected a positive integer' ... s:replace{0, -1} -- -1 --- - [0, -1] ... s:update({0}, {{'|', 2, 2}}) -- err --- - error: 'Argument type in operation ''|'' on field 2 does not match field type: expected a positive integer' ... s:update({0}, {{'&', 2, 40}}) -- err --- - error: 'Argument type in operation ''&'' on field 2 does not match field type: expected a positive integer' ... s:update({0}, {{'^', 2, 6}}) -- err --- - error: 'Argument type in operation ''^'' on field 2 does not match field type: expected a positive integer' ... s:replace{0, 1.5} -- 1.5 --- - [0, 1.5] ... s:update({0}, {{'|', 2, 2}}) -- err --- - error: 'Argument type in operation ''|'' on field 2 does not match field type: expected a positive integer' ... s:update({0}, {{'&', 2, 40}}) -- err --- - error: 'Argument type in operation ''&'' on field 2 does not match field type: expected a positive integer' ... s:update({0}, {{'^', 2, 6}}) -- err --- - error: 'Argument type in operation ''^'' on field 2 does not match field type: expected a positive integer' ... -- double s:replace{0, 5} -- 5 --- - [0, 5] ... s:update({0}, {{'+', 2, 1.5}}) -- int + double = double 6.5 --- - [0, 6.5] ... s:update({0}, {{'|', 2, 2}}) -- err (double!) --- - error: 'Argument type in operation ''|'' on field 2 does not match field type: expected a positive integer' ... s:update({0}, {{'-', 2, 0.5}}) -- double - double = double 6 --- - [0, 6] ... s:update({0}, {{'+', 2, 1.5}}) -- double + double = double 7.5 --- - [0, 7.5] ... -- float s:replace{0, ffi.new("float", 1.5)} -- 1.5 --- - [0, 1.5] ... s:update({0}, {{'+', 2, 2}}) -- float + int = float 3.5 --- - [0, 3.5] ... s:update({0}, {{'+', 2, ffi.new("float", 3.5)}}) -- float + int = float 7 --- - [0, 7] ... s:update({0}, {{'|', 2, 2}}) -- err (float!) --- - error: 'Argument type in operation ''|'' on field 2 does not match field type: expected a positive integer' ... s:update({0}, {{'-', 2, ffi.new("float", 1.5)}}) -- float - float = float 5.5 --- - [0, 5.5] ... s:update({0}, {{'+', 2, ffi.new("float", 3.5)}}) -- float + float = float 9 --- - [0, 9] ... s:update({0}, {{'-', 2, ffi.new("float", 9)}}) -- float + float = float 0 --- - [0, 0] ... s:update({0}, {{'+', 2, ffi.new("float", 1.2)}}) -- float + float = float 1.2 --- - [0, 1.2000000476837] ... -- overflow -- s:replace{0, 0xfffffffffffffffeull} --- - [0, 18446744073709551614] ... s:update({0}, {{'+', 2, 1}}) -- ok --- - [0, 18446744073709551615] ... s:update({0}, {{'+', 2, 1}}) -- overflow --- - error: Integer overflow when performing '+' operation on field 2 ... s:update({0}, {{'+', 2, 100500}}) -- overflow --- - error: Integer overflow when performing '+' operation on field 2 ... s:replace{0, 1} --- - [0, 1] ... s:update({0}, {{'+', 2, 0xffffffffffffffffull}}) -- overflow --- - error: Integer overflow when performing '+' operation on field 2 ... s:replace{0, -1} --- - [0, -1] ... s:update({0}, {{'+', 2, 0xffffffffffffffffull}}) -- ok --- - [0, 18446744073709551614] ... s:replace{0, 0} --- - [0, 0] ... s:update({0}, {{'-', 2, 0x7fffffffffffffffull}}) -- ok --- - [0, -9223372036854775807] ... s:replace{0, -1} --- - [0, -1] ... s:update({0}, {{'-', 2, 0x7fffffffffffffffull}}) -- ok --- - [0, -9223372036854775808] ... s:replace{0, -2} --- - [0, -2] ... s:update({0}, {{'-', 2, 0x7fffffffffffffffull}}) -- overflow --- - error: Integer overflow when performing '-' operation on field 2 ... s:replace{0, 1} --- - [0, 1] ... s:update({0}, {{'-', 2, 0xffffffffffffffffull}}) -- overflow --- - error: Integer overflow when performing '-' operation on field 2 ... s:replace{0, 0xffffffffffffffefull} --- - [0, 18446744073709551599] ... s:update({0}, {{'-', 2, -16}}) -- ok --- - [0, 18446744073709551615] ... s:update({0}, {{'-', 2, -16}}) -- overflow --- - error: Integer overflow when performing '-' operation on field 2 ... s:replace{0, -0x4000000000000000ll} --- - [0, -4611686018427387904] ... s:update({0}, {{'+', 2, -0x4000000000000000ll}}) -- ok --- - [0, -9223372036854775808] ... s:replace{0, -0x4000000000000000ll} --- - [0, -4611686018427387904] ... s:update({0}, {{'+', 2, -0x4000000000000001ll}}) -- overflow --- - error: Integer overflow when performing '+' operation on field 2 ... -- some wrong updates -- s:update({0}, 0) --- - error: Usage index:update(key, ops) ... s:update({0}, {'+', 2, 2}) --- - error: Illegal parameters, update operation must be an array {op,..} ... s:update({0}, {{}}) --- - error: Illegal parameters, update operation must be an array {op,..}, got empty array ... s:update({0}, {{'+'}}) --- - error: Unknown UPDATE operation ... s:update({0}, {{'+', 0}}) --- - error: Unknown UPDATE operation ... s:update({0}, {{'+', '+', '+'}}) --- - error: Illegal parameters, field id must be a number ... s:update({0}, {{0, 0, 0}}) --- - error: Illegal parameters, update operation name must be a string ... -- test for https://github.com/tarantool/tarantool/issues/1142 -- broken WAL during upsert ops = {} --- ... for i = 1,10 do table.insert(ops, {'=', 2, '1234567890'}) end --- ... s:upsert({0}, ops) --- ... -- https://github.com/tarantool/tarantool/issues/1854 s:get{0} --- - [0, '1234567890'] ... s:update({0}, {}) --- - [0, '1234567890'] ... --#stop server default --#start server default s = box.space.tweedledum --- ... -- -- gh-2036: msgpackffi doesn't support __serialize hint -- map = setmetatable({}, { __serialize = 'map' }) --- ... t = box.tuple.new({1, 2, 3}) --- ... s:replace({1, 2, 3}) --- - [1, 2, 3] ... t:update({{'=', 3, map}}) --- - [1, 2, {}] ... s:update(1, {{'=', 3, map}}) --- - [1, 2, {}] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/alter_limits.test.lua0000664000000000000000000003525413306560010022131 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua...\"]:: '") -- ---------------------------------------------------------------- -- LIMITS -- ---------------------------------------------------------------- box.schema.SYSTEM_ID_MIN box.schema.FIELD_MAX box.schema.INDEX_FIELD_MAX box.schema.NAME_MAX box.schema.INDEX_ID box.schema.SPACE_ID box.schema.INDEX_MAX box.schema.SPACE_MAX box.schema.SYSTEM_ID_MAX box.schema.SCHEMA_ID box.schema.FORMAT_ID_MAX -- ---------------------------------------------------------------- -- CREATE SPACE -- ---------------------------------------------------------------- s = box.schema.space.create('tweedledum') -- space already exists box.schema.space.create('tweedledum') -- create if not exists s = box.schema.space.create('tweedledum', { if_not_exists = true }) s:drop() -- no such space s:drop() -- no such engine box.schema.space.create('tweedleedee', { engine = 'unknown' }) -- explicit space id s = box.schema.space.create('tweedledum', { id = 3000 }) s.id -- duplicate id box.schema.space.create('tweedledee', { id = 3000 }) -- stupid space id box.schema.space.create('tweedledee', { id = 'tweedledee' }) s:drop() -- too long space name box.schema.space.create(string.rep('t', box.schema.NAME_MAX + 1)) -- too long space engine name box.schema.space.create('tweedleedee', { engine = string.rep('too-long', 100) }) -- space name limit box.schema.space.create(string.rep('t', box.schema.NAME_MAX)..'_') s = box.schema.space.create(string.rep('t', box.schema.NAME_MAX - 1)..'_') s.name:len() s:drop() s = box.schema.space.create(string.rep('t', box.schema.NAME_MAX - 2)..'_') s.name:len() s:drop() -- space with no indexes - test update, delete, select, truncate s = box.schema.space.create('tweedledum') s:insert{0} s:select{} s:delete{0} s:update(0, {{"=", 1, 0}}) s:insert{0} s.index[0] s:truncate() s.enabled -- enabled/disabled transition index = s:create_index('primary', { type = 'hash' }) s.enabled -- rename space - same name s:rename('tweedledum') s.name -- rename space - different name s:rename('tweedledee') s.name -- the reference from box.space[] to the space by old name should be gone box.space['tweedledum'] -- rename space - bad name s:rename(string.rep('t', box.schema.NAME_MAX * 2)) s.name -- access to a renamed space s:insert{0} s:delete{0} -- cleanup s:drop() -- check DDL on invalid space object s:create_index('primary') s:rename('xxx') s:drop() -- create a space with reserved id (ok, but warns in the log) s = box.schema.space.create('test', { id = 256 }) s.id s:drop() s = box.schema.space.create('test', { field_count = 2 }) s.field_count index = s:create_index('primary') -- field_count actually works s:insert{1} s:insert{1, 2} s:insert{1, 2, 3} s:select{} FIELD_COUNT = 4 -- increase field_count -- error box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 3}}) s:select{} -- decrease field_count - error box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 1}}) -- remove field_count - ok _ = box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 0}}) s:select{} -- increase field_count - error box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 3}}) s:truncate() s:select{} -- set field_count of an empty space _ = box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 3}}) s:select{} -- field_count actually works s:insert{3, 4} s:insert{3, 4, 5} s:insert{3, 4, 5, 6} s:insert{7, 8, 9} s:select{} -- check transition of space from enabled to disabled on -- deletion of the primary key s.enabled s.index[0]:drop() s.enabled s.index[0] -- "disabled" on -- deletion of primary key s:drop() -- ---------------------------------------------------------------- -- CREATE INDEX -- ---------------------------------------------------------------- -- s = box.schema.space.create('test') test_run:cmd("setopt delimiter ';'") for k=1, box.schema.INDEX_MAX, 1 do index = s:create_index('i'..k, { type = 'hash' }) end; -- cleanup for k=2, box.schema.INDEX_MAX, 1 do s.index['i'..k]:drop() end; test_run:cmd("setopt delimiter ''"); -- test limits enforced in key_def_check: -- unknown index type index = s:create_index('test', { type = 'nosuchtype' }) -- hash index is not unique index = s:create_index('test', { type = 'hash', unique = false }) -- bitset index is unique index = s:create_index('test', { type = 'bitset', unique = true }) -- bitset index is multipart index = s:create_index('test', { type = 'bitset', parts = {1, 'unsigned', 2, 'unsigned'}}) -- part count must be positive index = s:create_index('test', { type = 'hash', parts = {}}) -- unknown field type index = s:create_index('test', { type = 'hash', parts = { 2, 'nosuchtype' }}) index = s:create_index('test', { type = 'hash', parts = { 2, 'any' }}) index = s:create_index('test', { type = 'hash', parts = { 2, 'array' }}) index = s:create_index('test', { type = 'hash', parts = { 2, 'map' }}) index = s:create_index('test', { type = 'rtree', parts = { 2, 'nosuchtype' }}) index = s:create_index('test', { type = 'rtree', parts = { 2, 'any' }}) index = s:create_index('test', { type = 'rtree', parts = { 2, 'map' }}) -- bad field no index = s:create_index('test', { type = 'hash', parts = { 'qq', 'nosuchtype' }}) -- big field no index = s:create_index('test', { type = 'hash', parts = { box.schema.FIELD_MAX, 'unsigned' }}) index = s:create_index('test', { type = 'hash', parts = { box.schema.FIELD_MAX - 1, 'unsigned' }}) index = s:create_index('test', { type = 'hash', parts = { box.schema.FIELD_MAX + 90, 'unsigned' }}) index = s:create_index('test', { type = 'hash', parts = { box.schema.INDEX_FIELD_MAX + 1, 'unsigned' }}) index = s:create_index('t1', { type = 'hash', parts = { box.schema.INDEX_FIELD_MAX, 'unsigned' }}) index = s:create_index('t2', { type = 'hash', parts = { box.schema.INDEX_FIELD_MAX - 1, 'unsigned' }}) -- cleanup s:drop() s = box.schema.space.create('test') -- same part can't be indexed twice index = s:create_index('t1', { type = 'hash', parts = { 1, 'unsigned', 1, 'string' }}) -- a lot of key parts parts = {} test_run:cmd("setopt delimiter ';'") for k=1, box.schema.INDEX_PART_MAX + 1, 1 do table.insert(parts, k) table.insert(parts, 'unsigned') end; #parts; index = s:create_index('t1', { type = 'hash', parts = parts}); parts = {}; for k=1, box.schema.INDEX_PART_MAX, 1 do table.insert(parts, k + 1) table.insert(parts, 'unsigned') end; #parts; index = s:create_index('t1', { type = 'hash', parts = parts}); test_run:cmd("setopt delimiter ''"); -- this is actually incorrect since parts is a lua table -- and length of a lua table which has index 0 set is not correct #s.index[0].parts -- cleanup s:drop() -- check costraints in tuple_format_new() s = box.schema.space.create('test') index = s:create_index('t1', { type = 'hash' }) -- field type contradicts field type of another index index = s:create_index('t2', { type = 'hash', parts = { 1, 'string' }}) -- ok index = s:create_index('t2', { type = 'hash', parts = { 2, 'string' }}) -- don't allow drop of the primary key in presence of other keys s.index[0]:drop() -- cleanup s:drop() -- index name, name manipulation s = box.schema.space.create('test') index = s:create_index('primary', { type = 'hash' }) -- space cache is updated correctly s.index[0].name s.index[0].id s.index[0].type s.index['primary'].name s.index['primary'].id s.index['primary'].type s.index.primary.name s.index.primary.id -- other properties are preserved s.index.primary.type s.index.primary.unique s.index.primary:rename('new') s.index[0].name s.index.primary s.index.new.name -- too long name s.index[0]:rename(string.rep('t', box.schema.NAME_MAX)..'_') s.index[0].name s.index[0]:rename(string.rep('t', box.schema.NAME_MAX - 1)..'_') s.index[0].name:len() s.index[0]:rename(string.rep('t', box.schema.NAME_MAX - 2)..'_') s.index[0].name:len() s.index[0]:rename('primary') s.index.primary.name -- cleanup s:drop() -- modify index s = box.schema.space.create('test') index = s:create_index('primary', { type = 'hash' }) -- correct error on misuse of alter s.index.primary.alter({unique=false}) s.index.primary:alter({unique=false}) -- unique -> non-unique, index type s.index.primary:alter({type='tree', unique=false, name='pk'}) s.index.primary.name s.index.primary.id s.index.pk.type s.index.pk.unique s.index.pk:rename('primary') index = s:create_index('second', { type = 'tree', parts = { 2, 'string' } }) s.index.second.id index = s:create_index('third', { type = 'hash', parts = { 3, 'unsigned' } }) s.index.third:rename('second') s.index.third.id s.index.second:drop() s.index.third:alter({name = 'second'}) s.index.third s.index.second.name s.index.second.id s:drop() -- ---------------------------------------------------------------- -- BUILD INDEX: changes of a non-empty index -- ---------------------------------------------------------------- s = box.schema.space.create('full') index = s:create_index('primary', { type = 'tree', parts = { 1, 'string' }}) s:insert{'No such movie', 999} s:insert{'Barbara', 2012} s:insert{'Cloud Atlas', 2012} s:insert{'Almanya - Willkommen in Deutschland', 2011} s:insert{'Halt auf freier Strecke', 2011} s:insert{'Homevideo', 2011} s:insert{'Die Fremde', 2010} -- create index with data index = s:create_index('year', { type = 'tree', unique=false, parts = { 2, 'unsigned'} }) s.index.primary:select{} -- a duplicate in the created index index = s:create_index('nodups', { type = 'tree', unique=true, parts = { 2, 'unsigned'} }) -- change of non-unique index to unique: same effect s.index.year:alter({unique=true}) s.index.primary:select{} -- ambiguous field type index = s:create_index('string', { type = 'tree', unique = false, parts = { 2, 'string'}}) -- create index on a non-existing field index = s:create_index('nosuchfield', { type = 'tree', unique = true, parts = { 3, 'string'}}) s.index.year:drop() s:insert{'Der Baader Meinhof Komplex', '2009 '} -- create an index on a field with a wrong type index = s:create_index('year', { type = 'tree', unique = false, parts = { 2, 'unsigned'}}) -- a field is missing s:replace{'Der Baader Meinhof Komplex'} index = s:create_index('year', { type = 'tree', unique = false, parts = { 2, 'unsigned'}}) s:drop() -- unique -> non-unique transition s = box.schema.space.create('test') -- primary key must be unique index = s:create_index('primary', { unique = false }) -- create primary key index = s:create_index('primary', { type = 'hash' }) s:insert{1, 1} index = s:create_index('secondary', { type = 'tree', unique = false, parts = {2, 'unsigned'}}) s:insert{2, 1} s.index.secondary:alter{ unique = true } s:delete{2} s.index.secondary:alter{ unique = true } s:insert{2, 1} s:insert{2, 2} s.index.secondary:alter{ unique = false} s:insert{3, 2} -- changing index id is not allowed s.index.secondary:alter{ id = 10} s:drop() -- ---------------------------------------------------------------- -- SPACE CACHE: what happens to a space cache when an object is gone -- ---------------------------------------------------------------- s = box.schema.space.create('test') s1 = s index = s:create_index('primary') s1.index.primary.id primary = s1.index.primary s.index.primary:drop() primary.id primary:select{} s:drop() -- @todo: add a test case for dangling iterator (currently no checks -- for a dangling iterator in the code -- ---------------------------------------------------------------- -- ---------------------------------------------------------------- -- RECOVERY: check that all indexes are correctly built -- during recovery regardless of when they are created -- ---------------------------------------------------------------- -- primary, secondary keys in a snapshot s_empty = box.schema.space.create('s_empty') indexe1 = s_empty:create_index('primary') indexe2 = s_empty:create_index('secondary', { type = 'hash', unique = true, parts = {2, 'unsigned'}}) s_full = box.schema.space.create('s_full') indexf1 = s_full:create_index('primary') indexf2 = s_full:create_index('secondary', { type = 'hash', unique = true, parts = {2, 'unsigned'}}) s_full:insert{1, 1, 'a'} s_full:insert{2, 2, 'b'} s_full:insert{3, 3, 'c'} s_full:insert{4, 4, 'd'} s_full:insert{5, 5, 'e'} s_nil = box.schema.space.create('s_nil') s_drop = box.schema.space.create('s_drop') box.snapshot() s_drop:drop() indexn1 = s_nil:create_index('primary', { type = 'hash'}) s_nil:insert{1,2,3,4,5,6} s_nil:insert{7, 8, 9, 10, 11,12} indexn2 = s_nil:create_index('secondary', { type = 'tree', unique=false, parts = {2, 'unsigned', 3, 'unsigned', 4, 'unsigned'}}) s_nil:insert{13, 14, 15, 16, 17} r_empty = box.schema.space.create('r_empty') indexe1 = r_empty:create_index('primary') indexe2 = r_empty:create_index('secondary', { type = 'hash', unique = true, parts = {2, 'unsigned'}}) r_full = box.schema.space.create('r_full') indexf1 = r_full:create_index('primary', { type = 'tree', unique = true, parts = {1, 'unsigned'}}) indexf2 = r_full:create_index('secondary', { type = 'hash', unique = true, parts = {2, 'unsigned'}}) r_full:insert{1, 1, 'a'} r_full:insert{2, 2, 'b'} r_full:insert{3, 3, 'c'} r_full:insert{4, 4, 'd'} r_full:insert{5, 5, 'e'} indexf1 = s_full:create_index('multikey', { type = 'tree', unique = true, parts = { 2, 'unsigned', 3, 'string'}}) s_full:insert{6, 6, 'f'} s_full:insert{7, 7, 'g'} s_full:insert{8, 8, 'h'} r_disabled = box.schema.space.create('r_disabled') test_run:cmd("restart server default") s_empty = box.space['s_empty'] s_full = box.space['s_full'] s_nil = box.space['s_nil'] s_drop = box.space['s_drop'] r_empty = box.space['r_empty'] r_full = box.space['r_full'] r_disabled = box.space['r_disabled'] s_drop s_empty.index.primary.type s_full.index.primary.type r_empty.index.primary.type r_full.index.primary.type s_nil.index.primary.type s_empty.index.primary.name s_full.index.primary.name r_empty.index.primary.name r_full.index.primary.name s_nil.index.primary.name s_empty.enabled s_full.enabled r_empty.enabled r_full.enabled s_nil.enabled r_disabled.enabled s_empty.index.secondary.name s_full.index.secondary.name r_empty.index.secondary.name r_full.index.secondary.name s_nil.index.secondary.name s_empty.index.primary:count(1) s_full.index.primary:count(1) r_empty.index.primary:count(1) r_full.index.primary:count(1) s_nil.index.primary:count(1) s_empty.index.secondary:count(1) s_full.index.secondary:count(1) r_empty.index.secondary:count(1) r_full.index.secondary:count(1) s_nil.index.secondary:count(1) -- gh-503 if_not_exits option in create index i1 = s_empty:create_index("test") i1:select{} i2 = s_empty:create_index("test") i3 = s_empty:create_index("test", { if_not_exists = true } ) i3:select{} -- cleanup s_empty:drop() s_full:drop() r_empty:drop() r_full:drop() s_nil:drop() r_disabled:drop() -- -- @todo usability -- --------- -- - space name in all error messages! -- error: Duplicate key exists in unique index 1 (ugly) -- -- @todo features -------- -- - ffi function to enable/disable space -- test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/box/tuple.test.lua0000664000000000000000000002013013306565107020571 0ustar rootroot-- box.tuple test env = require('test_run') test_run = env.new() test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua...\"]:: '") -- Test box.tuple:slice() t=box.tuple.new{'0', '1', '2', '3', '4', '5', '6', '7'} t:slice(0) t:slice(-1) t:slice(1) t:slice(-1, -1) t:slice(-1, 1) t:slice(1, -1) t:slice(1, 3) t:slice(7) t:slice(8) t:slice(9) t:slice(100500) t:slice(9, -1) t:slice(6, -1) t:slice(4, 4) t:slice(6, 4) t:slice(0, 0) t:slice(9, 10) t:slice(-7) t:slice(-8) t:slice(-9) t:slice(-100500) t:slice(500, 700) -- box.tuple.new test box.tuple.new() box.tuple.new(1) box.tuple.new('string') box.tuple.new(tonumber64('18446744073709551615')) box.tuple.new{tonumber64('18446744073709551615'), 'string', 1} -- A test case for Bug#1131108 'incorrect conversion from boolean lua value to tarantool tuple' function bug1075677() local range = {} table.insert(range, 1>0) return range end box.tuple.new(bug1075677()) bug1075677=nil -- boolean values in a tuple box.tuple.new(false) box.tuple.new({false}) -- tuple:bsize() t = box.tuple.new('abc') t t:bsize() -- -- Test cases for #106 box.tuple.new fails on multiple items -- box.tuple.new() box.tuple.new{} box.tuple.new(1) box.tuple.new{1} box.tuple.new(1, 2, 3, 4, 5) box.tuple.new{1, 2, 3, 4, 5} box.tuple.new({'a', 'b'}, {'c', 'd'}, {'e', 'f'}) box.tuple.new{{'a', 'b'}, {'c', 'd'}, {'e', 'f'}} box.tuple.new({1, 2}, 'x', 'y', 'z', {c = 3, d = 4}, {e = 5, f = 6}) box.tuple.new{{1, 2}, 'x', 'y', 'z', {c = 3, d = 4}, {e = 5, f = 6}} box.tuple.new('x', 'y', 'z', {1, 2}, {c = 3, d = 4}, {e = 5, f = 6}) box.tuple.new{'x', 'y', 'z', {1, 2}, {c = 3, d = 4}, {e = 5, f = 6}} t=box.tuple.new{'a','b','c'} t:totable() t:unpack() t:totable(1) t:unpack(1) t:totable(2) t:unpack(2) t:totable(1, 3) t:unpack(1, 3) t:totable(2, 3) t:unpack(2, 3) t:totable(2, 4) t:unpack(2, 4) t:totable(nil, 2) t:unpack(nil, 2) t:totable(2, 1) t:unpack(2, 1) t:totable(0) t:totable(1, 0) -- -- Check that tuple:totable correctly sets serializer hints -- box.tuple.new{1, 2, 3}:totable() getmetatable(box.tuple.new{1, 2, 3}:totable()).__serialize -- A test case for the key as an tuple space = box.schema.space.create('tweedledum') index = space:create_index('primary') space:truncate() t=space:insert{0, 777, '0', '1', '2', '3'} t space:replace(t) space:replace{777, { 'a', 'b', 'c', {'d', 'e', t}}} -- A test case for tuple:totable() method t=space:get{777}:totable() t[2], t[3], t[4], t[5] space:truncate() -- A test case for Bug#1119389 '(lbox_tuple_index) crashes on 'nil' argument' t=space:insert{0, 8989} t[nil] -------------------------------------------------------------------------------- -- test tuple:next -------------------------------------------------------------------------------- t = box.tuple.new({'a', 'b', 'c'}) state, val = t:next() state, val state, val = t:next(state) state, val state, val = t:next(state) state, val state, val = t:next(state) state, val t:next(nil) t:next(0) t:next(1) t:next(2) t:next(3) t:next(4) t:next(-1) t:next("fdsaf") box.tuple.new({'x', 'y', 'z'}):next() t=space:insert{1953719668} t:next(1684234849) t:next(1) t:next(nil) t:next(t:next()) -------------------------------------------------------------------------------- -- test tuple:pairs -------------------------------------------------------------------------------- ta = {} for k, v in t:pairs() do table.insert(ta, v) end ta t=space:replace{1953719668, 'another field'} ta = {} for k, v in t:pairs() do table.insert(ta, v) end ta t=space:replace{1953719668, 'another field', 'one more'} ta = {} for k, v in t:pairs() do table.insert(ta, v) end ta t=box.tuple.new({'a', 'b', 'c', 'd'}) ta = {} for it,field in t:pairs() do table.insert(ta, field); end ta t = box.tuple.new({'a', 'b', 'c'}) gen, init, state = t:pairs() gen, init, state state, val = gen(init, state) state, val state, val = gen(init, state) state, val state, val = gen(init, state) state, val state, val = gen(init, state) state, val r = {} for _state, val in t:pairs() do table.insert(r, val) end r r = {} for _state, val in t:pairs() do table.insert(r, val) end r r = {} for _state, val in t:pairs(1) do table.insert(r, val) end r r = {} for _state, val in t:pairs(3) do table.insert(r, val) end r r = {} for _state, val in t:pairs(10) do table.insert(r, val) end r r = {} for _state, val in t:pairs(nil) do table.insert(r, val) end r t:pairs(nil) t:pairs("fdsaf") -------------------------------------------------------------------------------- -- test tuple:find -------------------------------------------------------------------------------- env = require('test_run') test_run = env.new() test_run:cmd("setopt delimiter ';'") t = box.tuple.new({'a','b','c','a', -1, 0, 1, 2, true, 9223372036854775807ULL, -9223372036854775807LL}); test_run:cmd("setopt delimiter ''"); t:find('a') t:find(1, 'a') t:find('c') t:find('xxxxx') t:find(1, 'xxxxx') t:findall('a') t:findall(1, 'a') t:findall('xxxxx') t:findall(1, 'xxxxx') t:find(100, 'a') t:findall(100, 'a') t:find(100, 'xxxxx') t:findall(100, 'xxxxx') --- -- Lua type coercion --- t:find(2) t:findall(2) t:find(2ULL) t:findall(2ULL) t:find(2LL) t:findall(2LL) t:find(2) t:findall(2) t:find(-1) t:findall(-1) t:find(-1LL) t:findall(-1LL) t:find(true) t:findall(true) t:find(9223372036854775807LL) t:findall(9223372036854775807LL) t:find(9223372036854775807ULL) t:findall(9223372036854775807ULL) t:find(-9223372036854775807LL) t:findall(-9223372036854775807LL) -------------------------------------------------------------------------------- -- test tuple:update -------------------------------------------------------------------------------- -- see box/update.test.lua for more test cases t = box.tuple.new({'a', 'b', 'c', 'd', 'e'}) t:update() t:update(10) t:update({}) t:update({{ '!', -1, 'f'}}) t:update({{ '#', 4, 1}}) t t = nil -- gh-2454 Regression in msgpack t = box.tuple.new(require('yaml').decode("[17711728, {1000: 'xxx'}]")) t:update({{'=', 2, t[2]}}) t t = nil -------------------------------------------------------------------------------- -- test msgpack.encode + tuple -------------------------------------------------------------------------------- msgpack = require('msgpack') encode_load_metatables = msgpack.cfg.encode_load_metatables -- disable __serialize hook to test internal on_encode hook msgpack.cfg{encode_load_metatables = false} msgpackffi = require('msgpackffi') t = box.tuple.new({'a', 'b', 'c'}) msgpack.decode(msgpackffi.encode(t)) msgpack.decode(msgpack.encode(t)) msgpack.decode(msgpackffi.encode({1, {'x', 'y', t, 'z'}, 2, 3})) msgpack.decode(msgpack.encode({1, {'x', 'y', t, 'z'}, 2, 3})) -- restore configuration msgpack.cfg{encode_load_metatables = encode_load_metatables} -- gh-738: Serializer hints are unclear t = box.tuple.new({1, 2, {}}) map = t[3] getmetatable(map) ~= nil map map['test'] = 48 map getmetatable(map) == nil -- gh-1189: tuple is not checked as first argument t = box.tuple.new({1, 2, {}}) t.bsize() t.find(9223372036854775807LL) t.findall(9223372036854775807LL) t.update() t.upsert() t = nil space:drop() -- gh-1266: luaL_convertfield crashes on ffi.typeof() ffi = require('ffi') ffi.typeof('struct tuple') -- gh-1345: lbox_tuple_new() didn't check result of box_tuple_new() for NULL -- try to allocate 100Mb tuple and checked that server won't crash box.tuple.new(string.rep('x', 100 * 1024 * 1024)) ~= nil collectgarbage('collect') -- collect huge string -- testing tostring test_run:cmd("setopt delimiter ';'") null = nil t = box.tuple.new({1, -2, 1.2, -1.2}, 'x', 'y', 'z', null, true, false, {bin = "\x08\x5c\xc2\x80\x12\x2f", big_num = tonumber64('18446744073709551615'), map = {key = "value"}, double=1.0000000001, utf8="Кудыкины горы"}); tostring(t); t; test_run:cmd("setopt delimiter ''"); -- -- gh-1014: tuple field names and tuple methods aliases. -- t = box.tuple.new({1, 2, 3}) box.tuple.next == t.next box.tuple.ipairs == t.ipairs box.tuple.pairs == t.pairs box.tuple.slice == t.slice box.tuple.transform == t.transform box.tuple.find == t.find box.tuple.findall == t.findall box.tuple.unpack == t.unpack box.tuple.totable == t.totable box.tuple.update == t.update box.tuple.upsert == t.upsert box.tuple.bsize == t.bsize test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_rect.result0000664000000000000000000000332613306560010021351 0ustar rootroots = box.schema.space.create('spatial') --- ... _ = s:create_index('primary') --- ... _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) --- ... s:insert{1,{0,0,10,10}{ --- - error: '[string "s:insert{1,{0,0,10,10}{ "]:1: ''}'' expected near ''{''' ... s:insert{2,{5,5,10,10}} --- - [2, [5, 5, 10, 10]] ... s:insert{3,{0,0,5,5}} --- - [3, [0, 0, 5, 5]] ... -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) --- - - [2, [5, 5, 10, 10]] - [3, [0, 0, 5, 5]] ... -- select records belonging to rectangle (0,0,5,5) s.index.spatial:select({0,0,5,5}, {iterator = 'LE'}) --- - - [3, [0, 0, 5, 5]] ... -- select records strict belonging to rectangle (0,0,5,5) s.index.spatial:select({0,0,5,5}, {iterator = 'LT'}) --- - [] ... -- select records strict belonging to rectangle (4,4,11,11) s.index.spatial:select({4,4,11,11}, {iterator = 'LT'}) --- - - [2, [5, 5, 10, 10]] ... -- select records containing point (5,5) s.index.spatial:select({5,5}, {iterator = 'GE'}) --- - - [2, [5, 5, 10, 10]] - [3, [0, 0, 5, 5]] ... -- select records containing rectangle (1,1,2,2) s.index.spatial:select({1,1,2,2}, {iterator = 'GE'}) --- - - [3, [0, 0, 5, 5]] ... -- select records strict containing rectangle (0,0,5,5) s.index.spatial:select({0,0,5,5}, {iterator = 'GT'}) --- - [] ... -- select records overlapping rectangle (9,4,11,6) s.index.spatial:select({9,4,11,6}, {iterator = 'OVERLAPS'}) --- - - [2, [5, 5, 10, 10]] ... -- select records with coordinates (0,0,5,5) s.index.spatial:select({0,0,5,5}, {iterator = 'EQ'}) --- - - [3, [0, 0, 5, 5]] ... -- select neighbors of point (1,1) s.index.spatial:select({1,1}, {iterator = 'NEIGHBOR'}) --- - - [3, [0, 0, 5, 5]] - [2, [5, 5, 10, 10]] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/hash.result0000664000000000000000000004142313306560010020136 0ustar rootroot--============================================================================= -- 32-bit hash tests --============================================================================= ------------------------------------------------------------------------------- -- 32-bit hash insert fields tests ------------------------------------------------------------------------------- hash = box.schema.space.create('tweedledum') --- ... tmp = hash:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... bsize = tmp:bsize() --- ... -- Insert valid fields hash:insert{0, 'value1 v1.0', 'value2 v1.0'} --- - [0, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{1, 'value1 v1.0', 'value2 v1.0'} --- - [1, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{2, 'value1 v1.0', 'value2 v1.0'} --- - [2, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{3, 'value1 v1.0', 'value2 v1.0'} --- - [3, 'value1 v1.0', 'value2 v1.0'] ... tmp:bsize() > bsize --- - true ... -- Insert invalid fields hash:insert{'invalid key', 'value1 v1.0', 'value2 v1.0'} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... ------------------------------------------------------------------------------- -- 32-bit hash replace fields tests ------------------------------------------------------------------------------- -- Replace valid fields hash:replace{3, 'value1 v1.31', 'value2 1.12'} --- - [3, 'value1 v1.31', 'value2 1.12'] ... hash:replace{1, 'value1 v1.32', 'value2 1.72'} --- - [1, 'value1 v1.32', 'value2 1.72'] ... hash:replace{2, 'value1 v1.43', 'value2 1.92'} --- - [2, 'value1 v1.43', 'value2 1.92'] ... -- Replace invalid fields hash:replace{'invalid key', 'value1 v1.0', 'value2 v1.0'} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... ------------------------------------------------------------------------------- -- 32-bit hash select fields test ------------------------------------------------------------------------------- -- select by valid keys hash.index['primary']:get{0} --- - [0, 'value1 v1.0', 'value2 v1.0'] ... hash.index['primary']:get{1} --- - [1, 'value1 v1.32', 'value2 1.72'] ... hash.index['primary']:get{2} --- - [2, 'value1 v1.43', 'value2 1.92'] ... hash.index['primary']:get{3} --- - [3, 'value1 v1.31', 'value2 1.12'] ... hash.index['primary']:get{4} --- ... hash.index['primary']:get{5} --- ... -- select by invalid keys hash.index['primary']:get{'invalid key'} --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... hash.index['primary']:get{1, 2} --- - error: Invalid key part count in an exact match (expected 1, got 2) ... ------------------------------------------------------------------------------- -- 32-bit hash delete fields test ------------------------------------------------------------------------------- -- delete by valid keys hash:delete{0} --- - [0, 'value1 v1.0', 'value2 v1.0'] ... hash:delete{1} --- - [1, 'value1 v1.32', 'value2 1.72'] ... hash:delete{2} --- - [2, 'value1 v1.43', 'value2 1.92'] ... hash:delete{3} --- - [3, 'value1 v1.31', 'value2 1.12'] ... hash:delete{4} --- ... hash:delete{5} --- ... -- delete by invalid keys hash:delete{'invalid key'} --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... hash:delete{1, 2} --- - error: Invalid key part count in an exact match (expected 1, got 2) ... hash:truncate() --- ... --============================================================================= -- 64-bit hash tests --============================================================================= ------------------------------------------------------------------------------- -- 64-bit hash inset fields tests ------------------------------------------------------------------------------- -- Insert valid fields hash:insert{0ULL, 'value1 v1.0', 'value2 v1.0'} --- - [0, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{1ULL, 'value1 v1.0', 'value2 v1.0'} --- - [1, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{2ULL, 'value1 v1.0', 'value2 v1.0'} --- - [2, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{3ULL, 'value1 v1.0', 'value2 v1.0'} --- - [3, 'value1 v1.0', 'value2 v1.0'] ... -- Insert invalid fields hash:insert{100, 'value1 v1.0', 'value2 v1.0'} --- - [100, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{101, 'value1 v1.0', 'value2 v1.0'} --- - [101, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{102, 'value1 v1.0', 'value2 v1.0'} --- - [102, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{103, 'value1 v1.0', 'value2 v1.0'} --- - [103, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{'invalid key', 'value1 v1.0', 'value2 v1.0'} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... ------------------------------------------------------------------------------- -- 64-bit hash replace fields tests ------------------------------------------------------------------------------- -- Replace valid fields hash:replace{3ULL, 'value1 v1.31', 'value2 1.12'} --- - [3, 'value1 v1.31', 'value2 1.12'] ... hash:replace{1ULL, 'value1 v1.32', 'value2 1.72'} --- - [1, 'value1 v1.32', 'value2 1.72'] ... hash:replace{2ULL, 'value1 v1.43', 'value2 1.92'} --- - [2, 'value1 v1.43', 'value2 1.92'] ... -- Replace invalid fields hash:replace{3, 'value1 v1.31', 'value2 1.12'} --- - [3, 'value1 v1.31', 'value2 1.12'] ... hash:replace{1, 'value1 v1.32', 'value2 1.72'} --- - [1, 'value1 v1.32', 'value2 1.72'] ... hash:replace{2, 'value1 v1.43', 'value2 1.92'} --- - [2, 'value1 v1.43', 'value2 1.92'] ... hash:replace{'invalid key', 'value1 v1.0', 'value2 v1.0'} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... ------------------------------------------------------------------------------- -- 64-bit hash select fields test ------------------------------------------------------------------------------- -- select by valid keys hash.index['primary']:get{0ULL} --- - [0, 'value1 v1.0', 'value2 v1.0'] ... hash.index['primary']:get{1ULL} --- - [1, 'value1 v1.32', 'value2 1.72'] ... hash.index['primary']:get{2ULL} --- - [2, 'value1 v1.43', 'value2 1.92'] ... hash.index['primary']:get{3ULL} --- - [3, 'value1 v1.31', 'value2 1.12'] ... hash.index['primary']:get{4ULL} --- ... hash.index['primary']:get{5ULL} --- ... -- select by valid NUM keys hash.index['primary']:get{0} --- - [0, 'value1 v1.0', 'value2 v1.0'] ... hash.index['primary']:get{1} --- - [1, 'value1 v1.32', 'value2 1.72'] ... hash.index['primary']:get{2} --- - [2, 'value1 v1.43', 'value2 1.92'] ... hash.index['primary']:get{3} --- - [3, 'value1 v1.31', 'value2 1.12'] ... hash.index['primary']:get{4} --- ... hash.index['primary']:get{5} --- ... -- select by invalid keys hash.index['primary']:get{'invalid key'} --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... hash.index['primary']:get{'00000001', '00000002'} --- - error: Invalid key part count in an exact match (expected 1, got 2) ... ------------------------------------------------------------------------------- -- 64-bit hash delete fields test ------------------------------------------------------------------------------- -- delete by valid keys hash:delete{0ULL} --- - [0, 'value1 v1.0', 'value2 v1.0'] ... hash:delete{1ULL} --- - [1, 'value1 v1.32', 'value2 1.72'] ... hash:delete{2ULL} --- - [2, 'value1 v1.43', 'value2 1.92'] ... hash:delete{3ULL} --- - [3, 'value1 v1.31', 'value2 1.12'] ... hash:delete{4ULL} --- ... hash:delete{5ULL} --- ... hash:insert{0ULL, 'value1 v1.0', 'value2 v1.0'} --- - [0, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{1ULL, 'value1 v1.0', 'value2 v1.0'} --- - [1, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{2ULL, 'value1 v1.0', 'value2 v1.0'} --- - [2, 'value1 v1.0', 'value2 v1.0'] ... hash:insert{3ULL, 'value1 v1.0', 'value2 v1.0'} --- - [3, 'value1 v1.0', 'value2 v1.0'] ... -- delete by valid NUM keys hash:delete{0} --- - [0, 'value1 v1.0', 'value2 v1.0'] ... hash:delete{1} --- - [1, 'value1 v1.0', 'value2 v1.0'] ... hash:delete{2} --- - [2, 'value1 v1.0', 'value2 v1.0'] ... hash:delete{3} --- - [3, 'value1 v1.0', 'value2 v1.0'] ... hash:delete{4} --- ... hash:delete{5} --- ... -- delete by invalid keys hash:delete{'invalid key'} --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... hash:delete{'00000001', '00000002'} --- - error: Invalid key part count in an exact match (expected 1, got 2) ... hash:truncate() --- ... --============================================================================= -- String hash tests --============================================================================= ------------------------------------------------------------------------------- -- String hash inset fields tests ------------------------------------------------------------------------------- hash.index['primary']:drop() --- ... tmp = hash:create_index('primary', { type = 'hash', parts = {1, 'string'}, unique = true }) --- ... -- Insert valid fields hash:insert{'key 0', 'value1 v1.0', 'value2 v1.0'} --- - ['key 0', 'value1 v1.0', 'value2 v1.0'] ... hash:insert{'key 1', 'value1 v1.0', 'value2 v1.0'} --- - ['key 1', 'value1 v1.0', 'value2 v1.0'] ... hash:insert{'key 2', 'value1 v1.0', 'value2 v1.0'} --- - ['key 2', 'value1 v1.0', 'value2 v1.0'] ... hash:insert{'key 3', 'value1 v1.0', 'value2 v1.0'} --- - ['key 3', 'value1 v1.0', 'value2 v1.0'] ... ------------------------------------------------------------------------------- -- String hash replace fields tests ------------------------------------------------------------------------------- -- Replace valid fields hash:replace{'key 3', 'value1 v1.31', 'value2 1.12'} --- - ['key 3', 'value1 v1.31', 'value2 1.12'] ... hash:replace{'key 1', 'value1 v1.32', 'value2 1.72'} --- - ['key 1', 'value1 v1.32', 'value2 1.72'] ... hash:replace{'key 2', 'value1 v1.43', 'value2 1.92'} --- - ['key 2', 'value1 v1.43', 'value2 1.92'] ... ------------------------------------------------------------------------------- -- String hash select fields test ------------------------------------------------------------------------------- -- select by valid keys hash.index['primary']:get{'key 0'} --- - ['key 0', 'value1 v1.0', 'value2 v1.0'] ... hash.index['primary']:get{'key 1'} --- - ['key 1', 'value1 v1.32', 'value2 1.72'] ... hash.index['primary']:get{'key 2'} --- - ['key 2', 'value1 v1.43', 'value2 1.92'] ... hash.index['primary']:get{'key 3'} --- - ['key 3', 'value1 v1.31', 'value2 1.12'] ... hash.index['primary']:get{'key 4'} --- ... hash.index['primary']:get{'key 5'} --- ... -- select by invalid keys hash.index['primary']:get{'key 1', 'key 2'} --- - error: Invalid key part count in an exact match (expected 1, got 2) ... ------------------------------------------------------------------------------- -- String hash delete fields test ------------------------------------------------------------------------------- -- delete by valid keys hash:delete{'key 0'} --- - ['key 0', 'value1 v1.0', 'value2 v1.0'] ... hash:delete{'key 1'} --- - ['key 1', 'value1 v1.32', 'value2 1.72'] ... hash:delete{'key 2'} --- - ['key 2', 'value1 v1.43', 'value2 1.92'] ... hash:delete{'key 3'} --- - ['key 3', 'value1 v1.31', 'value2 1.12'] ... hash:delete{'key 4'} --- ... hash:delete{'key 5'} --- ... -- delete by invalid keys hash:delete{'key 1', 'key 2'} --- - error: Invalid key part count in an exact match (expected 1, got 2) ... hash:truncate() --- ... ------------------------------------------------------------------------------- -- Collation test ------------------------------------------------------------------------------- hash.index['primary']:drop() --- ... tmp = hash:create_index('primary', { type = 'hash', parts = {{1, 'string', collation = 'unicode_ci'}}, unique = true}) --- ... tmp = hash:create_index('secondary', { type = 'hash', parts = {{2, 'scalar', collation = 'unicode_ci'}}, unique = true}) --- ... hash:insert{'Ёж', 'Hedgehog'} --- - ['Ёж', 'Hedgehog'] ... hash:insert{'Ёлка', 'Spruce'} --- - ['Ёлка', 'Spruce'] ... hash:insert{'Jogurt', 'Йогурт'} --- - ['Jogurt', 'Йогурт'] ... hash:insert{'Один', 1} --- - ['Один', 1] ... hash.index.primary:get('ёж') --- - ['Ёж', 'Hedgehog'] ... hash.index.primary:get('елка') --- - ['Ёлка', 'Spruce'] ... hash.index.secondary:get('spruce') --- - ['Ёлка', 'Spruce'] ... hash.index.secondary:get('йогурт') --- - ['Jogurt', 'Йогурт'] ... hash.index.secondary:get(1) --- - ['Один', 1] ... hash.index.secondary:get('иогурт') --- ... hash.index.secondary:get(2) --- ... ------------------------ -- hash::replace tests ------------------------ hash.index['secondary']:drop() --- ... hash.index['primary']:drop() --- ... tmp = hash:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... tmp = hash:create_index('field1', { type = 'hash', parts = {2, 'unsigned'}, unique = true }) --- ... tmp = hash:create_index('field2', { type = 'hash', parts = {3, 'unsigned'}, unique = true }) --- ... tmp = hash:create_index('field3', { type = 'hash', parts = {4, 'unsigned'}, unique = true }) --- ... hash:insert{0, 0, 0, 0} --- - [0, 0, 0, 0] ... hash:insert{1, 1, 1, 1} --- - [1, 1, 1, 1] ... hash:insert{2, 2, 2, 2} --- - [2, 2, 2, 2] ... -- OK hash:replace{1, 1, 1, 1} --- - [1, 1, 1, 1] ... hash.index['primary']:get{10} --- ... hash.index['field1']:get{10} --- ... hash.index['field2']:get{10} --- ... hash.index['field3']:get{10} --- ... hash.index['primary']:get{1} --- - [1, 1, 1, 1] ... hash.index['field1']:get{1} --- - [1, 1, 1, 1] ... hash.index['field2']:get{1} --- - [1, 1, 1, 1] ... hash.index['field3']:get{1} --- - [1, 1, 1, 1] ... -- OK hash:insert{10, 10, 10, 10} --- - [10, 10, 10, 10] ... hash:delete{10} --- - [10, 10, 10, 10] ... hash.index['primary']:get{10} --- ... hash.index['field1']:get{10} --- ... hash.index['field2']:get{10} --- ... hash.index['field3']:get{10} --- ... -- TupleFound (primary key) hash:insert{1, 10, 10, 10} --- - error: Duplicate key exists in unique index 'primary' in space 'tweedledum' ... hash.index['primary']:get{10} --- ... hash.index['field1']:get{10} --- ... hash.index['field2']:get{10} --- ... hash.index['field3']:get{10} --- ... hash.index['primary']:get{1} --- - [1, 1, 1, 1] ... -- TupleNotFound (primary key) hash:replace{10, 10, 10, 10} --- - [10, 10, 10, 10] ... hash.index['primary']:get{10} --- - [10, 10, 10, 10] ... hash.index['field1']:get{10} --- - [10, 10, 10, 10] ... hash.index['field2']:get{10} --- - [10, 10, 10, 10] ... hash.index['field3']:get{10} --- - [10, 10, 10, 10] ... -- TupleFound (key --1) hash:insert{10, 0, 10, 10} --- - error: Duplicate key exists in unique index 'primary' in space 'tweedledum' ... hash.index['primary']:get{10} --- - [10, 10, 10, 10] ... hash.index['field1']:get{10} --- - [10, 10, 10, 10] ... hash.index['field2']:get{10} --- - [10, 10, 10, 10] ... hash.index['field3']:get{10} --- - [10, 10, 10, 10] ... hash.index['field1']:get{0} --- - [0, 0, 0, 0] ... -- TupleFound (key --1) -- hash:replace_if_exists(2, 0, 10, 10) hash.index['primary']:get{10} --- - [10, 10, 10, 10] ... hash.index['field1']:get{10} --- - [10, 10, 10, 10] ... hash.index['field2']:get{10} --- - [10, 10, 10, 10] ... hash.index['field3']:get{10} --- - [10, 10, 10, 10] ... hash.index['field1']:get{0} --- - [0, 0, 0, 0] ... -- TupleFound (key --3) hash:insert{10, 10, 10, 0} --- - error: Duplicate key exists in unique index 'primary' in space 'tweedledum' ... hash.index['primary']:get{10} --- - [10, 10, 10, 10] ... hash.index['field1']:get{10} --- - [10, 10, 10, 10] ... hash.index['field2']:get{10} --- - [10, 10, 10, 10] ... hash.index['field3']:get{10} --- - [10, 10, 10, 10] ... hash.index['field3']:get{0} --- - [0, 0, 0, 0] ... -- TupleFound (key --3) -- hash:replace_if_exists(2, 10, 10, 0) hash.index['primary']:get{10} --- - [10, 10, 10, 10] ... hash.index['field1']:get{10} --- - [10, 10, 10, 10] ... hash.index['field2']:get{10} --- - [10, 10, 10, 10] ... hash.index['field3']:get{10} --- - [10, 10, 10, 10] ... hash.index['field3']:get{0} --- - [0, 0, 0, 0] ... hash:drop() --- ... hash = box.schema.space.create('tweedledum') --- ... hi = hash:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... hash:insert{0} --- - [0] ... hash:insert{16} --- - [16] ... for _, tuple in hi:pairs(nil, {iterator = box.index.ALL}) do hash:delete{tuple[1]} end --- ... hash:drop() --- ... -- -- gh-616 "1-based indexing and 0-based error message -- _ = box.schema.create_space('test') --- ... _ = box.space.test:create_index('i',{parts={1,'string'}}) --- ... box.space.test:insert{1} --- - error: 'Tuple field 1 type does not match one required by operation: expected string' ... box.space.test:drop() --- ... -- gh-1467: invalid iterator type space = box.schema.space.create('test') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... space:select({1}, {iterator = 'BITS_ALL_SET' } ) --- - error: Index 'primary' (HASH) of space 'test' (memtx) does not support requested iterator type ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/tree_pk.result0000664000000000000000000003130613306560010020643 0ustar rootrootutils = dofile('utils.lua') --- ... s0 = box.schema.space.create('tweedledum') --- ... i0 = s0:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) --- ... bsize = i0:bsize() --- ... -- integer keys s0:insert{1, 'tuple'} --- - [1, 'tuple'] ... box.snapshot() --- - ok ... s0:insert{2, 'tuple 2'} --- - [2, 'tuple 2'] ... box.snapshot() --- - ok ... i0:bsize() > bsize --- - true ... s0:insert{3, 'tuple 3'} --- - [3, 'tuple 3'] ... s0.index['primary']:get{1} --- - [1, 'tuple'] ... s0.index['primary']:get{2} --- - [2, 'tuple 2'] ... s0.index['primary']:get{3} --- - [3, 'tuple 3'] ... -- Cleanup s0:delete{1} --- - [1, 'tuple'] ... s0:delete{2} --- - [2, 'tuple 2'] ... s0:delete{3} --- - [3, 'tuple 3'] ... -- Test incorrect keys - supplied key field type does not match index type -- https://bugs.launchpad.net/tarantool/+bug/1072624 s0:insert{'xxxxxxx'} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... s0:insert{''} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... s0:insert{'12'} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... s1 = box.schema.space.create('tweedledee') --- ... i1 = s1:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) --- ... s2 = box.schema.space.create('alice') --- ... i2 = s2:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) --- ... -- string keys s1:insert{'identifier', 'tuple'} --- - ['identifier', 'tuple'] ... box.snapshot() --- - ok ... s1:insert{'second', 'tuple 2'} --- - ['second', 'tuple 2'] ... box.snapshot() --- - ok ... s1.index['primary']:select('second', { limit = 100, iterator = 'GE' }) --- - - ['second', 'tuple 2'] ... s1.index['primary']:select('identifier', { limit = 100, iterator = 'GE' }) --- - - ['identifier', 'tuple'] - ['second', 'tuple 2'] ... s1:insert{'third', 'tuple 3'} --- - ['third', 'tuple 3'] ... s1.index['primary']:get{'identifier'} --- - ['identifier', 'tuple'] ... s1.index['primary']:get{'second'} --- - ['second', 'tuple 2'] ... s1.index['primary']:get{'third'} --- - ['third', 'tuple 3'] ... -- Cleanup s1:delete{'identifier'} --- - ['identifier', 'tuple'] ... s1:delete{'second'} --- - ['second', 'tuple 2'] ... s1:delete{'third'} --- - ['third', 'tuple 3'] ... env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function crossjoin(space0, space1, limit) local result = {} for state, v0 in space0:pairs() do for state, v1 in space1:pairs() do if limit <= 0 then return result end local newtuple = v0:totable() for _, v in v1:pairs() do table.insert(newtuple, v) end table.insert(result, box.tuple.new(newtuple)) limit = limit - 1 end end return result end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... s2:insert{'1', 'tuple'} --- - ['1', 'tuple'] ... s1:insert{'1', 'tuple'} --- - ['1', 'tuple'] ... s1:insert{'2', 'tuple'} --- - ['2', 'tuple'] ... crossjoin(s1, s1, 0) --- - [] ... crossjoin(s1, s1, 5) --- - - ['1', 'tuple', '1', 'tuple'] - ['1', 'tuple', '2', 'tuple'] - ['2', 'tuple', '1', 'tuple'] - ['2', 'tuple', '2', 'tuple'] ... crossjoin(s1, s1, 10000) --- - - ['1', 'tuple', '1', 'tuple'] - ['1', 'tuple', '2', 'tuple'] - ['2', 'tuple', '1', 'tuple'] - ['2', 'tuple', '2', 'tuple'] ... crossjoin(s1, s2, 10000) --- - - ['1', 'tuple', '1', 'tuple'] - ['2', 'tuple', '1', 'tuple'] ... s1:truncate() --- ... s2:truncate() --- ... -- Bug #922520 - select missing keys s0:insert{200, 'select me!'} --- - [200, 'select me!'] ... s0.index['primary']:get{200} --- - [200, 'select me!'] ... s0.index['primary']:get{199} --- ... s0.index['primary']:get{201} --- ... s1:drop() --- ... s1 = nil --- ... s2:drop() --- ... s2 = nil --- ... -- -- tree::replace tests -- s0:truncate() --- ... i1 = s0:create_index('i1', { type = 'tree', parts = {2, 'unsigned'}, unique = true }) --- ... i2 = s0:create_index('i2', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) --- ... i3 = s0:create_index('i3', { type = 'tree', parts = {4, 'unsigned'}, unique = true }) --- ... s0:insert{0, 0, 0, 0} --- - [0, 0, 0, 0] ... s0:insert{1, 1, 1, 1} --- - [1, 1, 1, 1] ... s0:insert{2, 2, 2, 2} --- - [2, 2, 2, 2] ... -- OK s0:replace{1, 1, 1, 1} --- - [1, 1, 1, 1] ... s0:replace{1, 10, 10, 10} --- - [1, 10, 10, 10] ... s0:replace{1, 1, 1, 1} --- - [1, 1, 1, 1] ... s0.index['primary']:get{10} --- ... s0.index['i1']:select{10} --- - [] ... s0.index['i2']:select{10} --- - [] ... s0.index['i3']:select{10} --- - [] ... s0.index['primary']:get{1} --- - [1, 1, 1, 1] ... s0.index['i1']:select{1} --- - - [1, 1, 1, 1] ... s0.index['i2']:select{1} --- - - [1, 1, 1, 1] ... s0.index['i3']:select{1} --- - - [1, 1, 1, 1] ... -- OK s0:insert{10, 10, 10, 10} --- - [10, 10, 10, 10] ... s0:delete{10} --- - [10, 10, 10, 10] ... s0.index['primary']:get{10} --- ... s0.index['i1']:select{10} --- - [] ... s0.index['i2']:select{10} --- - [] ... s0.index['i3']:select{10} --- - [] ... -- TupleFound (primary key) s0:insert{1, 10, 10, 10} --- - error: Duplicate key exists in unique index 'primary' in space 'tweedledum' ... s0.index['primary']:get{10} --- ... s0.index['i1']:select{10} --- - [] ... s0.index['i2']:select{10} --- - [] ... s0.index['i3']:select{10} --- - [] ... s0.index['primary']:get{1} --- - [1, 1, 1, 1] ... -- TupleNotFound (primary key) s0:replace{10, 10, 10, 10} --- - [10, 10, 10, 10] ... s0.index['primary']:get{10} --- - [10, 10, 10, 10] ... s0.index['i1']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i2']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i3']:select{10} --- - - [10, 10, 10, 10] ... -- TupleFound (key #1) s0:insert{10, 0, 10, 10} --- - error: Duplicate key exists in unique index 'primary' in space 'tweedledum' ... s0.index['primary']:get{10} --- - [10, 10, 10, 10] ... s0.index['i1']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i2']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i3']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i1']:select{0} --- - - [0, 0, 0, 0] ... -- TupleFound (key #1) s0:replace{2, 0, 10, 10} --- - error: Duplicate key exists in unique index 'i1' in space 'tweedledum' ... s0.index['primary']:get{10} --- - [10, 10, 10, 10] ... s0.index['i1']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i2']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i3']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i1']:select{0} --- - - [0, 0, 0, 0] ... -- TupleFound (key #3) s0:insert{10, 10, 10, 0} --- - error: Duplicate key exists in unique index 'primary' in space 'tweedledum' ... s0.index['primary']:get{10} --- - [10, 10, 10, 10] ... s0.index['i1']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i2']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i3']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i3']:select{0} --- - - [0, 0, 0, 0] ... -- TupleFound (key #3) s0:replace{2, 10, 10, 0} --- - error: Duplicate key exists in unique index 'i1' in space 'tweedledum' ... s0.index['primary']:get{10} --- - [10, 10, 10, 10] ... s0.index['i1']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i2']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i3']:select{10} --- - - [10, 10, 10, 10] ... s0.index['i3']:select{0} --- - - [0, 0, 0, 0] ... -- Non-Uniq test (key #2) s0:insert{4, 4, 0, 4} --- - [4, 4, 0, 4] ... s0:insert{5, 5, 0, 5} --- - [5, 5, 0, 5] ... s0:insert{6, 6, 0, 6} --- - [6, 6, 0, 6] ... s0:replace{5, 5, 0, 5} --- - [5, 5, 0, 5] ... utils.sort(s0.index['i2']:select(0)) --- - - [0, 0, 0, 0] - [4, 4, 0, 4] - [5, 5, 0, 5] - [6, 6, 0, 6] ... s0:delete{5} --- - [5, 5, 0, 5] ... utils.sort(s0.index['i2']:select(0)) --- - - [0, 0, 0, 0] - [4, 4, 0, 4] - [6, 6, 0, 6] ... s0:drop() --- ... s0 = nil --- ... -- Stable non-unique indexes -- https://github.com/tarantool/tarantool/issues/2476 s = box.schema.space.create('test') --- ... i1 = s:create_index('i1', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) --- ... i2 = s:create_index('i2', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) --- ... i3 = s:create_index('i3', { type = 'tree', parts = {{3, 'unsigned', is_nullable = true}}, unique = true }) --- ... _ = s:replace{5, 1, box.NULL, 1} --- ... _ = s:replace{4, 1, box.NULL, 3} --- ... _ = s:replace{6, 1, box.NULL, 2} --- ... _ = s:replace{3, 1, box.NULL, 0} --- ... _ = s:replace{7, 1, box.NULL, 100} --- ... _ = s:replace{15, 2, 100, 11} --- ... _ = s:replace{14, 2, 500, 41} --- ... _ = s:replace{16, 2, 200, 31} --- ... _ = s:replace{13, 2, 300, 13} --- ... _ = s:replace{17, 2, 400, 10} --- ... i2:select{1} --- - - [3, 1, null, 0] - [4, 1, null, 3] - [5, 1, null, 1] - [6, 1, null, 2] - [7, 1, null, 100] ... i2:select{2} --- - - [13, 2, 300, 13] - [14, 2, 500, 41] - [15, 2, 100, 11] - [16, 2, 200, 31] - [17, 2, 400, 10] ... i2:select{1, 5} --- - error: Invalid key part count (expected [0..1], got 2) ... i3:select{box.NULL} --- - - [3, 1, null, 0] - [4, 1, null, 3] - [5, 1, null, 1] - [6, 1, null, 2] - [7, 1, null, 100] ... i1:alter{parts = {4, 'unsigned'}} --- ... i2:select{1} --- - - [3, 1, null, 0] - [5, 1, null, 1] - [6, 1, null, 2] - [4, 1, null, 3] - [7, 1, null, 100] ... i2:select{2} --- - - [17, 2, 400, 10] - [15, 2, 100, 11] - [13, 2, 300, 13] - [16, 2, 200, 31] - [14, 2, 500, 41] ... i2:select{1, 1} --- - error: Invalid key part count (expected [0..1], got 2) ... i3:select{box.NULL} --- - - [3, 1, null, 0] - [5, 1, null, 1] - [6, 1, null, 2] - [4, 1, null, 3] - [7, 1, null, 100] ... s:truncate() --- ... i1:alter{parts = {1, 'str'}} --- ... _ = s:replace{"5", 1, box.NULL} --- ... _ = s:replace{"4", 1, box.NULL} --- ... _ = s:replace{"6", 1, box.NULL} --- ... _ = s:replace{"3", 1, box.NULL} --- ... _ = s:replace{"7", 1, box.NULL} --- ... _ = s:replace{"15", 2, 100} --- ... _ = s:replace{"14", 2, 500} --- ... _ = s:replace{"16", 2, 200} --- ... _ = s:replace{"13", 2, 300} --- ... _ = s:replace{"17", 2, 400} --- ... i2:select{1} --- - - ['3', 1, null] - ['4', 1, null] - ['5', 1, null] - ['6', 1, null] - ['7', 1, null] ... i2:select{2} --- - - ['13', 2, 300] - ['14', 2, 500] - ['15', 2, 100] - ['16', 2, 200] - ['17', 2, 400] ... i3:select{box.NULL} --- - - ['3', 1, null] - ['4', 1, null] - ['5', 1, null] - ['6', 1, null] - ['7', 1, null] ... s:drop() --- ... --https://github.com/tarantool/tarantool/issues/2649 -- create standart index and alter it to collation index box.internal.collation.create('test', 'ICU', 'ru-RU') --- ... box.internal.collation.create('test-ci', 'ICU', 'ru-RU', {strength = 'secondary'}) --- ... s = box.schema.space.create('test') --- ... i1 = s:create_index('i1', { type = 'tree', parts = {{1, 'str'}}, unique = true }) --- ... _ = s:replace{"ааа"} --- ... _ = s:replace{"еее"} --- ... _ = s:replace{"ёёё"} --- ... _ = s:replace{"жжж"} --- ... _ = s:replace{"яяя"} --- ... _ = s:replace{"ААА"} --- ... _ = s:replace{"ЯЯЯ"} --- ... -- bad output s:select{} --- - - ['ААА'] - ['ЯЯЯ'] - ['ааа'] - ['еее'] - ['жжж'] - ['яяя'] - ['ёёё'] ... i1:alter({parts = {{1, 'str', collation='test'}}}) --- ... -- good output s:select{} --- - - ['ааа'] - ['ААА'] - ['еее'] - ['ёёё'] - ['жжж'] - ['яяя'] - ['ЯЯЯ'] ... i1:alter({parts = {{1, 'str', collation='test-ci'}}}) --- - error: Duplicate key exists in unique index 'i1' in space 'test' ... _ = s:delete{"ААА"} --- ... _ = s:delete{"ЯЯЯ"} --- ... i1:alter({parts = {{1, 'str', collation='test-ci'}}}) --- ... -- good output s:select{} --- - - ['ааа'] - ['еее'] - ['ёёё'] - ['жжж'] - ['яяя'] ... s:insert{"ААА"} --- - error: Duplicate key exists in unique index 'i1' in space 'test' ... s:replace{"ЯЯЯ"} --- - ['ЯЯЯ'] ... -- good output s:select{} --- - - ['ааа'] - ['еее'] - ['ёёё'] - ['жжж'] - ['ЯЯЯ'] ... s:drop() --- ... -- create collation index and alter it to standart index s = box.schema.space.create('test') --- ... i1 = s:create_index('i1', { type = 'tree', parts = {{1, 'str', collation='test'}}, unique = true }) --- ... _ = s:replace{"ааа"} --- ... _ = s:replace{"еее"} --- ... _ = s:replace{"ёёё"} --- ... _ = s:replace{"жжж"} --- ... _ = s:replace{"яяя"} --- ... _ = s:replace{"ААА"} --- ... _ = s:replace{"ЯЯЯ"} --- ... -- good output s:select{} --- - - ['ааа'] - ['ААА'] - ['еее'] - ['ёёё'] - ['жжж'] - ['яяя'] - ['ЯЯЯ'] ... i1:alter({parts = {{1, 'str'}}}) --- ... -- bad output s:select{} --- - - ['ААА'] - ['ЯЯЯ'] - ['ааа'] - ['еее'] - ['жжж'] - ['яяя'] - ['ёёё'] ... s:drop() --- ... box.internal.collation.drop('test') --- ... box.internal.collation.drop('test-ci') --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/call.result0000664000000000000000000003326313306565107020145 0ustar rootrootbox.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... conn = require('net.box').connect(box.cfg.listen) --- ... conn:ping() --- - true ... -- -- gh-291: IPROTO: call returns wrong tuple -- function return_none() return end --- ... conn:call("return_none") --- ... conn:eval("return return_none()") --- ... conn:call_16("return_none") --- - [] ... function return_nil() return nil end --- ... conn:call("return_nil") --- - null ... conn:eval("return return_nil()") --- - null ... conn:call_16("return_nil") --- - - [null] ... function return_nils() return nil, nil, nil end --- ... conn:call("return_nils") --- - null - null - null ... conn:eval("return return_nils()") --- - null - null - null ... conn:call_16("return_nils") --- - - [null] - [null] - [null] ... function return_bool() return true end --- ... conn:call("return_bool") --- - true ... conn:eval("return return_bool()") --- - true ... conn:call_16("return_bool") --- - - [true] ... function return_bools() return true, false, true end --- ... conn:call("return_bools") --- - true - false - true ... conn:eval("return return_bools()") --- - true - false - true ... conn:call_16("return_bools") --- - - [true] - [false] - [true] ... function return_number() return 1 end --- ... conn:call("return_number") --- - 1 ... conn:eval("return return_number()") --- - 1 ... conn:call_16("return_number") --- - - [1] ... function return_numbers() return 1, 2, 3 end --- ... conn:call("return_numbers") --- - 1 - 2 - 3 ... conn:eval("return return_numbers()") --- - 1 - 2 - 3 ... conn:call_16("return_numbers") --- - - [1] - [2] - [3] ... function return_string() return "string" end --- ... conn:call("return_string") --- - string ... conn:eval("return return_string()") --- - string ... conn:call_16("return_string") --- - - ['string'] ... function return_strings() return "a", "b", "c" end --- ... conn:call("return_strings") --- - a - b - c ... conn:eval("return return_strings()") --- - a - b - c ... conn:call_16("return_strings") --- - - ['a'] - ['b'] - ['c'] ... function return_emptytuple() return box.tuple.new() end --- ... conn:call("return_emptytuple") --- - [] ... conn:eval("return return_emptytuple()") --- - [] ... conn:call_16("return_emptytuple") --- - - [] ... function return_tuple() return box.tuple.new(1, 2, 3) end --- ... conn:call("return_tuple") --- - [1, 2, 3] ... conn:eval("return return_tuple()") --- - [1, 2, 3] ... conn:call_16("return_tuple") --- - - [1, 2, 3] ... function return_tuples() return box.tuple.new(1, 2, 3), box.tuple.new(3, 4, 5) end --- ... conn:call("return_tuples") --- - [1, 2, 3] - [3, 4, 5] ... conn:eval("return return_tuples()") --- - [1, 2, 3] - [3, 4, 5] ... conn:call_16("return_tuples") --- - - [1, 2, 3] - [3, 4, 5] ... function return_map() return { k1 = 'v1', k2 = 'v2'} end --- ... conn:call("return_map") --- - {'k1': 'v1', 'k2': 'v2'} ... conn:eval("return return_map()") --- - {'k1': 'v1', 'k2': 'v2'} ... conn:call_16("return_map") --- - - [{'k1': 'v1', 'k2': 'v2'}] ... function return_emptyarray() return {} end --- ... conn:call("return_emptyarray") --- - [] ... conn:eval("return return_emptyarray()") --- - [] ... conn:call_16("return_emptyarray") --- - - [] ... function return_array1() return {1} end --- ... conn:call("return_array1") --- - [1] ... conn:eval("return return_array1()") --- - [1] ... conn:call_16("return_array1") --- - - [1] ... function return_array2() return {1, 2} end --- ... conn:call("return_array2") --- - [1, 2] ... conn:eval("return return_array2()") --- - [1, 2] ... conn:call_16("return_array2") --- - - [1, 2] ... function return_complexarray1() return {1, 2, {k1 = 'v1', k2 = 'v2' }} end --- ... conn:call("return_complexarray1") --- - [1, 2, {'k1': 'v1', 'k2': 'v2'}] ... conn:eval("return return_complexarray1()") --- - [1, 2, {'k1': 'v1', 'k2': 'v2'}] ... conn:call_16("return_complexarray1") --- - - [1, 2, {'k1': 'v1', 'k2': 'v2'}] ... function return_complexarray2() return {{k1 = 'v1', k2 = 'v2' }, 2, 3} end --- ... conn:call("return_complexarray2") --- - [{'k1': 'v1', 'k2': 'v2'}, 2, 3] ... conn:eval("return return_complexarray2()") --- - [{'k1': 'v1', 'k2': 'v2'}, 2, 3] ... conn:call_16("return_complexarray2") --- - - [{'k1': 'v1', 'k2': 'v2'}, 2, 3] ... function return_complexarray3() return {1, {k1 = 'v1', k2 = 'v2' }, 3} end --- ... conn:call("return_complexarray3") --- - [1, {'k1': 'v1', 'k2': 'v2'}, 3] ... conn:eval("return return_complexarray3()") --- - [1, {'k1': 'v1', 'k2': 'v2'}, 3] ... conn:call_16("return_complexarray3") --- - - [1, {'k1': 'v1', 'k2': 'v2'}, 3] ... function return_complexarray4() return {{k1 = 'v1', k2 = 'v2' }} end --- ... conn:call("return_complexarray4") --- - [{'k1': 'v1', 'k2': 'v2'}] ... conn:eval("return return_complexarray4()") --- - [{'k1': 'v1', 'k2': 'v2'}] ... conn:call_16("return_complexarray4") --- - - [{'k1': 'v1', 'k2': 'v2'}] ... function return_tableofarrays1() return {{1}} end --- ... conn:call("return_tableofarrays1") --- - [[1]] ... conn:eval("return return_tableofarrays1()") --- - [[1]] ... conn:call_16("return_tableofarrays1") --- - - [1] ... function return_tableofarrays2() return {{1, 2, 3}} end --- ... conn:call("return_tableofarrays2") --- - [[1, 2, 3]] ... conn:eval("return return_tableofarrays2()") --- - [[1, 2, 3]] ... conn:call_16("return_tableofarrays2") --- - - [1, 2, 3] ... function return_tableofarrays3() return {{1}, {2}, {3}} end --- ... conn:call("return_tableofarrays3") --- - [[1], [2], [3]] ... conn:eval("return return_tableofarrays3()") --- - [[1], [2], [3]] ... conn:call_16("return_tableofarrays3") --- - - [1] - [2] - [3] ... function return_tableoftuples1() return {box.tuple.new(1)} end --- ... conn:call("return_tableoftuples1") --- - [[1]] ... conn:eval("return return_tableoftuples1()") --- - [[1]] ... conn:call_16("return_tableoftuples1") --- - - [1] ... function return_tableoftuples2() return {box.tuple.new(1), box.tuple.new(2)} end --- ... conn:call("return_tableoftuples2") --- - [[1], [2]] ... conn:eval("return return_tableoftuples2()") --- - [[1], [2]] ... conn:call_16("return_tableoftuples2") --- - - [1] - [2] ... function return_indecipherable1() return {{1}, 2, 3} end --- ... conn:call("return_indecipherable1") --- - [[1], 2, 3] ... conn:eval("return return_indecipherable1()") --- - [[1], 2, 3] ... conn:call_16("return_indecipherable1") --- - - [1] - [2] - [3] ... function return_indecipherable2() return {box.tuple.new(1), 2, 3} end --- ... conn:call("return_indecipherable2") --- - [[1], 2, 3] ... conn:eval("return return_indecipherable2()") --- - [[1], 2, 3] ... conn:call_16("return_indecipherable2") --- - - [1] - [2] - [3] ... function return_indecipherable3() return {1, {2}, 3} end --- ... conn:call("return_indecipherable3") --- - [1, [2], 3] ... conn:eval("return return_indecipherable3()") --- - [1, [2], 3] ... conn:call_16("return_indecipherable3") --- - - [1, [2], 3] ... function return_indecipherable4() return {1, box.tuple.new(2), 3} end --- ... conn:call("return_indecipherable4") --- - [1, [2], 3] ... conn:eval("return return_indecipherable4()") --- - [1, [2], 3] ... conn:call_16("return_indecipherable4") --- - - [1, [2], 3] ... function toarray(x) return setmetatable(x, { __serialize = 'array' }) end --- ... function tomap(x) return setmetatable(x, { __serialize = 'map' }) end --- ... function return_serialize1() return toarray({ [1] = 1, [20] = 1}) end --- ... conn:call("return_serialize1") --- - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:eval("return return_serialize1()") --- - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:call_16("return_serialize1") --- - - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... function return_serialize2() return tomap({ 'a', 'b', 'c'}) end --- ... conn:call("return_serialize2") --- - {1: 'a', 2: 'b', 3: 'c'} ... conn:eval("return return_serialize2()") --- - {1: 'a', 2: 'b', 3: 'c'} ... conn:call_16("return_serialize2") --- - - [{1: 'a', 2: 'b', 3: 'c'}] ... function return_serialize3() return {'x', toarray({ [1] = 1, [20] = 1})} end --- ... conn:call("return_serialize3") --- - ['x', [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1]] ... conn:eval("return return_serialize3()") --- - ['x', [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1]] ... conn:call_16("return_serialize3") --- - - ['x', [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1]] ... function return_serialize4() return {'x', tomap({ 'a', 'b', 'c'})} end --- ... conn:call_16("return_serialize4") --- - - ['x', {1: 'a', 2: 'b', 3: 'c'}] ... function return_serialize5() return {toarray({ [1] = 1, [20] = 1}), 'x'} end --- ... conn:call("return_serialize5") --- - [[1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1], 'x'] ... conn:eval("return return_serialize5()") --- - [[1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1], 'x'] ... conn:call_16("return_serialize5") --- - - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] - ['x'] ... function return_serialize6() return { tomap({ 'a', 'b', 'c'}), 'x'} end --- ... conn:call("return_serialize6") --- - [{1: 'a', 2: 'b', 3: 'c'}, 'x'] ... conn:eval("return return_serialize6()") --- - [{1: 'a', 2: 'b', 3: 'c'}, 'x'] ... conn:call_16("return_serialize6") --- - - [{1: 'a', 2: 'b', 3: 'c'}, 'x'] ... function return_serialize7() return {toarray({ [1] = 1, [20] = 1})} end --- ... conn:call("return_serialize7") --- - [[1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1]] ... conn:eval("return return_serialize7()") --- - [[1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1]] ... conn:call_16("return_serialize7") --- - - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... function return_serialize8() return { tomap({ 'a', 'b', 'c'})} end --- ... conn:call("return_serialize8") --- - [{1: 'a', 2: 'b', 3: 'c'}] ... conn:eval("return return_serialize8()") --- - [{1: 'a', 2: 'b', 3: 'c'}] ... conn:call_16("return_serialize8") --- - - [{1: 'a', 2: 'b', 3: 'c'}] ... -- -- gh-1167 -- sparse_safe = require('msgpack').cfg.encode_sparse_safe --- ... sparse_safe --- - 10 ... function return_sparse1() local res = {} res[1] = 1 res[20] = 1 return res end --- ... conn:call("return_sparse1") --- - {20: 1, 1: 1} ... conn:eval("return return_sparse1()") --- - {20: 1, 1: 1} ... conn:call_16("return_sparse1") --- - - [{20: 1, 1: 1}] ... function return_sparse2() return { [1] = 1, [20] = 1} end --- ... conn:call("return_sparse2") --- - {20: 1, 1: 1} ... conn:eval("return return_sparse2()") --- - {20: 1, 1: 1} ... conn:call_16("return_sparse2") --- - - [{20: 1, 1: 1}] ... function return_sparse3() local res = {} res[5] = 5 res[20] = 1 return res end --- ... conn:call("return_sparse3") --- - {5: 5, 20: 1} ... conn:eval("return return_sparse3()") --- - {5: 5, 20: 1} ... conn:call_16("return_sparse3") --- - - [{5: 5, 20: 1}] ... function return_sparse4() return { [5] = 1, [20] = 1} end --- ... conn:call("return_sparse4") --- - {5: 1, 20: 1} ... conn:eval("return return_sparse4()") --- - {5: 1, 20: 1} ... conn:call_16("return_sparse4") --- - - [{5: 1, 20: 1}] ... require('msgpack').cfg { encode_sparse_safe = 50 } --- ... conn:call("return_sparse1") --- - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:eval("return return_sparse1()") --- - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:call_16("return_sparse1") --- - - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:call("return_sparse2") --- - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:eval("return return_sparse2()") --- - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:call_16("return_sparse2") --- - - [1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:call("return_sparse3") --- - [null, null, null, null, 5, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:eval("return return_sparse3()") --- - [null, null, null, null, 5, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:call_16("return_sparse3") --- - - [null, null, null, null, 5, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:call("return_sparse4") --- - [null, null, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:eval("return return_sparse4()") --- - [null, null, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:call_16("return_sparse4") --- - - [null, null, null, null, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 1] ... conn:close() --- ... require('msgpack').cfg { encode_sparse_safe = sparse_safe } --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/info.test.lua0000664000000000000000000000377013306560010020372 0ustar rootrootfiber = require('fiber') -- Test Lua from admin console. Whenever producing output, -- make sure it's a valid YAML box.info.unknown_variable box.info[23] box.info['unknown_variable'] string.match(box.info.version, '^[1-9]') ~= nil string.match(box.info.pid, '^[1-9][0-9]*$') ~= nil box.info.id > 0 box.info.uuid == box.space._cluster:get(box.info.id)[2] box.info.lsn >= 0 box.info.signature >= 0 box.info.ro == false box.info.replication[1].id box.info.status string.len(box.info.uptime) > 0 string.match(box.info.uptime, '^[1-9][0-9]*$') ~= nil box.info.cluster.uuid == box.space._schema:get{'cluster'}[2] t = {} for k, _ in pairs(box.info()) do table.insert(t, k) end table.sort(t) t -- Tarantool 1.6.x compat box.info.server.id == box.info.id box.info.server.uuid == box.info.uuid box.info.server.lsn == box.info.lsn box.info.ro == box.info.server.ro box.info().server.id == box.info.id box.info().server.uuid == box.info.uuid box.info().server.lsn == box.info.lsn box.info().ro == box.info.server.ro -- -- box.ctl.wait_ro and box.ctl.wait_rw -- box.ctl.wait_ro("abc") -- invalid argument box.ctl.wait_rw("def") -- invalid argument box.info.ro -- false box.ctl.wait_rw() -- success box.ctl.wait_ro(0.001) -- timeout box.cfg{read_only = true} box.ctl.wait_ro() -- success box.ctl.wait_rw(0.001) -- timeout status, err = nil f = fiber.create(function() status, err = pcall(box.ctl.wait_rw) end) fiber.sleep(0.001) f:cancel() while f:status() ~= 'dead' do fiber.sleep(0.001) end status, err -- fiber is cancelled box.cfg{read_only = false} status, err = nil f = fiber.create(function() status, err = pcall(box.ctl.wait_ro) end) fiber.sleep(0.001) f:cancel() while f:status() ~= 'dead' do fiber.sleep(0.001) end status, err -- fiber is cancelled ch = fiber.channel(1) _ = fiber.create(function() box.ctl.wait_ro() ch:put(box.info.ro) end) fiber.sleep(0.001) box.cfg{read_only = true} ch:get() -- true _ = fiber.create(function() box.ctl.wait_rw() ch:put(box.info.ro) end) fiber.sleep(0.001) box.cfg{read_only = false} ch:get() -- false tarantool_1.9.1.26.g63eb81e3c/test/box/info.result0000664000000000000000000000576213306565107020170 0ustar rootrootfiber = require('fiber') --- ... -- Test Lua from admin console. Whenever producing output, -- make sure it's a valid YAML box.info.unknown_variable --- - null ... box.info[23] --- - null ... box.info['unknown_variable'] --- - null ... string.match(box.info.version, '^[1-9]') ~= nil --- - true ... string.match(box.info.pid, '^[1-9][0-9]*$') ~= nil --- - true ... box.info.id > 0 --- - true ... box.info.uuid == box.space._cluster:get(box.info.id)[2] --- - true ... box.info.lsn >= 0 --- - true ... box.info.signature >= 0 --- - true ... box.info.ro == false --- - true ... box.info.replication[1].id --- - 1 ... box.info.status --- - running ... string.len(box.info.uptime) > 0 --- - true ... string.match(box.info.uptime, '^[1-9][0-9]*$') ~= nil --- - true ... box.info.cluster.uuid == box.space._schema:get{'cluster'}[2] --- - true ... t = {} --- ... for k, _ in pairs(box.info()) do table.insert(t, k) end --- ... table.sort(t) --- ... t --- - - cluster - id - lsn - memory - pid - replication - ro - signature - status - uptime - uuid - vclock - version - vinyl ... -- Tarantool 1.6.x compat box.info.server.id == box.info.id --- - true ... box.info.server.uuid == box.info.uuid --- - true ... box.info.server.lsn == box.info.lsn --- - true ... box.info.ro == box.info.server.ro --- - true ... box.info().server.id == box.info.id --- - true ... box.info().server.uuid == box.info.uuid --- - true ... box.info().server.lsn == box.info.lsn --- - true ... box.info().ro == box.info.server.ro --- - true ... -- -- box.ctl.wait_ro and box.ctl.wait_rw -- box.ctl.wait_ro("abc") -- invalid argument --- - error: 'bad argument #1 to ''?'' (number expected, got string)' ... box.ctl.wait_rw("def") -- invalid argument --- - error: 'bad argument #1 to ''?'' (number expected, got string)' ... box.info.ro -- false --- - false ... box.ctl.wait_rw() -- success --- ... box.ctl.wait_ro(0.001) -- timeout --- - error: timed out ... box.cfg{read_only = true} --- ... box.ctl.wait_ro() -- success --- ... box.ctl.wait_rw(0.001) -- timeout --- - error: timed out ... status, err = nil --- ... f = fiber.create(function() status, err = pcall(box.ctl.wait_rw) end) --- ... fiber.sleep(0.001) --- ... f:cancel() --- ... while f:status() ~= 'dead' do fiber.sleep(0.001) end --- ... status, err -- fiber is cancelled --- - false - fiber is cancelled ... box.cfg{read_only = false} --- ... status, err = nil --- ... f = fiber.create(function() status, err = pcall(box.ctl.wait_ro) end) --- ... fiber.sleep(0.001) --- ... f:cancel() --- ... while f:status() ~= 'dead' do fiber.sleep(0.001) end --- ... status, err -- fiber is cancelled --- - false - fiber is cancelled ... ch = fiber.channel(1) --- ... _ = fiber.create(function() box.ctl.wait_ro() ch:put(box.info.ro) end) --- ... fiber.sleep(0.001) --- ... box.cfg{read_only = true} --- ... ch:get() -- true --- - true ... _ = fiber.create(function() box.ctl.wait_rw() ch:put(box.info.ro) end) --- ... fiber.sleep(0.001) --- ... box.cfg{read_only = false} --- ... ch:get() -- false --- - false ... tarantool_1.9.1.26.g63eb81e3c/test/box/function1.result0000664000000000000000000001200113306560010021107 0ustar rootrootbuild_path = os.getenv("BUILDDIR") --- ... package.cpath = build_path..'/test/box/?.so;'..build_path..'/test/box/?.dylib;'..package.cpath --- ... log = require('log') --- ... net = require('net.box') --- ... c = net.connect(os.getenv("LISTEN")) --- ... box.schema.func.create('function1', {language = "C"}) --- ... box.schema.user.grant('guest', 'execute', 'function', 'function1') --- ... _ = box.schema.space.create('test') --- ... _ = box.space.test:create_index('primary') --- ... box.schema.user.grant('guest', 'read,write', 'space', 'test') --- ... c:call('function1') --- - [] ... box.schema.func.drop("function1") --- ... box.schema.func.create('function1.args', {language = "C"}) --- ... box.schema.user.grant('guest', 'execute', 'function', 'function1.args') --- ... c:call('function1.args') --- - error: invalid argument count ... c:call('function1.args', { "xx" }) --- - error: first tuple field must be uint ... c:call('function1.args', { 15 }) --- - [[15, 'hello']] ... box.schema.func.drop("function1.args") --- ... box.schema.func.create('function1.multi_inc', {language = "C"}) --- ... box.schema.user.grant('guest', 'execute', 'function', 'function1.multi_inc') --- ... c:call('function1.multi_inc') --- - [] ... box.space.test:select{} --- - [] ... c:call('function1.multi_inc', { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }) --- - [] ... box.space.test:select{} --- - - [1, 0] - [2, 0] - [3, 0] - [4, 0] - [5, 0] - [6, 0] - [7, 0] - [8, 0] - [9, 0] - [10, 0] ... c:call('function1.multi_inc', { 2, 4, 6, 8, 10 }) --- - [] ... box.space.test:select{} --- - - [1, 0] - [2, 1] - [3, 0] - [4, 1] - [5, 0] - [6, 1] - [7, 0] - [8, 1] - [9, 0] - [10, 1] ... c:call('function1.multi_inc', { 0, 2, 4 }) --- - [] ... box.space.test:select{} --- - - [0, 0] - [1, 0] - [2, 2] - [3, 0] - [4, 2] - [5, 0] - [6, 1] - [7, 0] - [8, 1] - [9, 0] - [10, 1] ... box.schema.func.drop("function1.multi_inc") --- ... box.schema.func.create('function1.errors', {language = "C"}) --- ... box.schema.user.grant('guest', 'execute', 'function', 'function1.errors') --- ... c:call('function1.errors') --- - error: unknown error ... box.schema.func.drop("function1.errors") --- ... box.schema.func.create('xxx', {language = 'invalid'}) --- - error: Unsupported language 'INVALID' specified for function 'xxx' ... -- language normalization function func_lang(name) return (box.space._func.index[2]:select{name}[1] or {})[5] end --- ... box.schema.func.create('f11'), func_lang('f11') --- - null - LUA ... box.schema.func.create('f12', {language = 'Lua'}), func_lang('f12') --- - null - LUA ... box.schema.func.create('f13', {language = 'lua'}), func_lang('f13') --- - null - LUA ... box.schema.func.create('f14', {language = 'lUa'}), func_lang('f14') --- - null - LUA ... box.schema.func.create('f15', {language = 'c'}), func_lang('f15') --- - null - C ... box.schema.func.create('f16', {language = 'C'}), func_lang('f16') --- - null - C ... box.schema.func.drop("f11") --- ... box.schema.func.drop("f12") --- ... box.schema.func.drop("f13") --- ... box.schema.func.drop("f14") --- ... box.schema.func.drop("f15") --- ... box.schema.func.drop("f16") --- ... box.space.test:drop() --- ... -- Missing shared library name = 'unkownmod.unknownfunc' --- ... box.schema.func.create(name, {language = 'C'}) --- ... box.schema.user.grant('guest', 'execute', 'function', name) --- ... c:call(name) --- - error: 'Failed to dynamically load module ''unkownmod'': module not found' ... box.schema.func.drop(name) --- ... -- Drop function while executing gh-910 box.schema.func.create('function1.test_yield', {language = "C"}) --- ... box.schema.user.grant('guest', 'execute', 'function', 'function1.test_yield') --- ... s = box.schema.space.create('test_yield') --- ... _ = s:create_index('pk') --- ... box.schema.user.grant('guest', 'read,write', 'space', 'test_yield') --- ... fiber = require('fiber') --- ... ch = fiber.channel(1) --- ... _ = fiber.create(function() c:call('function1.test_yield') ch:put(true) end) --- ... while s:get({1}) == nil do fiber.yield(0.0001) end --- ... box.schema.func.drop('function1.test_yield') --- ... ch:get() --- - true ... s:drop() --- ... -- gh-2914: check identifier constraints. test_run = require('test_run').new() --- ... identifier = require("identifier") --- ... test_run:cmd("setopt delimiter ';'") --- - true ... -- -- '.' in func name is used to point out path therefore '.' in name -- itself is prohibited -- -- identifier.run_test( function (identifier) if identifier == "." then return end box.schema.func.create(identifier, {language = "lua"}) box.schema.user.grant('guest', 'execute', 'function', identifier) rawset(_G, identifier, function () return 1 end) local res = pcall(c.call, c, identifier) if c:call(identifier) ~= 1 then error("Should not fire") end rawset(_G, identifier, nil) end, function (identifier) if identifier == "." then return end box.schema.func.drop(identifier) end ); --- - All tests passed ... test_run:cmd("setopt delimiter ''"); --- - true ... c:close() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/select.test.lua0000664000000000000000000001123713306560010020713 0ustar rootrootmsgpack = require('msgpack') env = require('test_run') test_run = env.new() s = box.schema.space.create('select', { temporary = true }) index1 = s:create_index('primary', { type = 'tree' }) index2 = s:create_index('second', { type = 'tree', unique = true, parts = {2, 'unsigned', 1, 'unsigned'}}) for i = 1, 20 do s:insert({ i, 1, 2, 3 }) end test_run:cmd("setopt delimiter ';'") local function test_op(op, idx, ...) local t1 = idx[op .. '_ffi'](idx, ...) local t2 = idx[op .. '_luac'](idx, ...) if msgpack.encode(t1) ~= msgpack.encode(t2) then return 'different result from '..op..'_ffi and '..op..'_luac', t1, t2 end return t1 end test = setmetatable({}, { __index = function(_, op) return function(...) return test_op(op, ...) end end }); test_run:cmd("setopt delimiter ''"); -------------------------------------------------------------------------------- -- get tests -------------------------------------------------------------------------------- s.index[0].get == s.index[0].get_ffi or s.index[0].get == s.index[0].get_luac test.get(s.index[0]) test.get(s.index[0], {}) test.get(s.index[0], nil) test.get(s.index[0], 1) test.get(s.index[0], {1}) test.get(s.index[0], {1, 2}) test.get(s.index[0], 0) test.get(s.index[0], {0}) test.get(s.index[0], "0") test.get(s.index[0], {"0"}) test.get(s.index[1], 1) test.get(s.index[1], {1}) test.get(s.index[1], {1, 2}) -------------------------------------------------------------------------------- -- select tests -------------------------------------------------------------------------------- s.index[0].select == s.index[0].select_ffi or s.index[0].select == s.index[0].select_luac test.select(s.index[0]) test.select(s.index[0], {}) test.select(s.index[0], nil) test.select(s.index[0], {}, {iterator = 'ALL'}) test.select(s.index[0], nil, {iterator = box.index.ALL }) test.select(s.index[0], {}, {iterator = box.index.ALL, limit = 10}) test.select(s.index[0], nil, {iterator = box.index.ALL, limit = 0}) test.select(s.index[0], {}, {iterator = 'ALL', limit = 1, offset = 15}) test.select(s.index[0], nil, {iterator = 'ALL', limit = 20, offset = 15}) test.select(s.index[0], nil, {iterator = box.index.EQ}) test.select(s.index[0], {}, {iterator = 'EQ'}) test.select(s.index[0], nil, {iterator = 'REQ'}) test.select(s.index[0], {}, {iterator = box.index.REQ}) test.select(s.index[0], nil, {iterator = 'EQ', limit = 2, offset = 1}) test.select(s.index[0], {}, {iterator = box.index.REQ, limit = 2, offset = 1}) test.select(s.index[0], 1) test.select(s.index[0], {1}) test.select(s.index[0], {1, 2}) test.select(s.index[0], 0) test.select(s.index[0], {0}) test.select(s.index[0], "0") test.select(s.index[0], {"0"}) test.select(s.index[1], 1) test.select(s.index[1], {1}) test.select(s.index[1], {1}, {limit = 2}) test.select(s.index[1], 1, {iterator = 'EQ'}) test.select(s.index[1], {1}, {iterator = box.index.EQ, offset = 16, limit = 2}) test.select(s.index[1], {1}, {iterator = box.index.REQ, offset = 16, limit = 2 }) test.select(s.index[1], {1, 2}, {iterator = 'EQ'}) test.select(s.index[1], {1, 2}, {iterator = box.index.REQ}) test.select(s.index[1], {1, 2}) test.select(s.index[0], nil, { iterator = 'ALL', offset = 0, limit = 4294967295 }) test.select(s.index[0], {}, { iterator = 'ALL', offset = 0, limit = 4294967295 }) test.select(s.index[0], 1) test.select(s.index[0], 1, { iterator = box.index.EQ }) test.select(s.index[0], 1, { iterator = 'EQ' }) test.select(s.index[0], 1, { iterator = 'GE' }) test.select(s.index[0], 1, { iterator = 'GE', limit = 2 }) test.select(s.index[0], 1, { iterator = 'LE', limit = 2 }) test.select(s.index[0], 1, { iterator = 'GE', offset = 10, limit = 2 }) s:select(2) -------------------------------------------------------------------------------- -- min/max tests -------------------------------------------------------------------------------- test.min(s.index[1]) test.max(s.index[1]) -------------------------------------------------------------------------------- -- count tests -------------------------------------------------------------------------------- test.count(s.index[1]) test.count(s.index[0], nil) test.count(s.index[0], {}) test.count(s.index[0], 10, { iterator = 'GT'}) -------------------------------------------------------------------------------- -- random tests -------------------------------------------------------------------------------- test.random(s.index[0], 48) s:drop() collectgarbage('collect') s = box.schema.space.create('select', { temporary = true }) index = s:create_index('primary', { type = 'tree' }) a = s:insert{0} lots_of_links = {} ref_count = 0 while (true) do table.insert(lots_of_links, s:get{0}) ref_count = ref_count + 1 end ref_count lots_of_links = {} s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/lua.result0000664000000000000000000004525113306560010017777 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua...\"]:: '") --- - true ... space = box.schema.space.create('tweedledum') --- ... tmp = space:create_index('primary', { type = 'hash', parts = {1, 'string'}, unique = true }) --- ... tmp = space:create_index('minmax', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true }) --- ... space:insert{'brave', 'new', 'world'} --- - ['brave', 'new', 'world'] ... space:insert{'hello', 'old', 'world'} --- - ['hello', 'old', 'world'] ... space.index['minmax']:min() --- - ['brave', 'new', 'world'] ... space.index['minmax']:max() --- - ['hello', 'old', 'world'] ... space.index['minmax']:get{'new', 'world'} --- - ['brave', 'new', 'world'] ... -- A test case for Bug #904208 -- "assert failed, when key cardinality is greater than index cardinality" -- https://bugs.launchpad.net/tarantool/+bug/904208 space.index['minmax']:get{'new', 'world', 'order'} --- - error: Invalid key part count in an exact match (expected 2, got 3) ... space:delete{'brave'} --- - ['brave', 'new', 'world'] ... -- A test case for Bug #902091 -- "Positioned iteration over a multipart index doesn't work" -- https://bugs.launchpad.net/tarantool/+bug/902091 space:insert{'item 1', 'alabama', 'song'} --- - ['item 1', 'alabama', 'song'] ... space.index['minmax']:get{'alabama'} --- - error: Invalid key part count in an exact match (expected 2, got 1) ... space:insert{'item 2', 'california', 'dreaming '} --- - ['item 2', 'california', 'dreaming '] ... space:insert{'item 3', 'california', 'uber alles'} --- - ['item 3', 'california', 'uber alles'] ... space:insert{'item 4', 'georgia', 'on my mind'} --- - ['item 4', 'georgia', 'on my mind'] ... iter, param, state = space.index['minmax']:pairs('california', { iterator = box.index.GE }) --- ... state, v = iter(param, state) --- ... v --- - ['item 2', 'california', 'dreaming '] ... state, v = iter(param, state) --- ... v --- - ['item 3', 'california', 'uber alles'] ... space:delete{'item 1'} --- - ['item 1', 'alabama', 'song'] ... space:delete{'item 2'} --- - ['item 2', 'california', 'dreaming '] ... space:delete{'item 3'} --- - ['item 3', 'california', 'uber alles'] ... space:delete{'item 4'} --- - ['item 4', 'georgia', 'on my mind'] ... space:truncate() --- ... -- -- Test that we print index number in error ER_INDEX_VIOLATION -- space:insert{'1', 'hello', 'world'} --- - ['1', 'hello', 'world'] ... space:insert{'2', 'hello', 'world'} --- - error: Duplicate key exists in unique index 'minmax' in space 'tweedledum' ... space:drop() --- ... -- -- Check range scan over multipart keys -- space = box.schema.space.create('tweedledum') --- ... tmp = space:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... tmp = space:create_index('minmax', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = false }) --- ... space:insert{1234567, 'new', 'world'} --- - [1234567, 'new', 'world'] ... space:insert{0, 'of', 'puppets'} --- - [0, 'of', 'puppets'] ... space:insert{00000001ULL, 'of', 'might', 'and', 'magic'} --- - [1, 'of', 'might', 'and', 'magic'] ... space.index['minmax']:select('of', { limit = 2, iterator = 'GE' }) --- - - [1, 'of', 'might', 'and', 'magic'] - [0, 'of', 'puppets'] ... space.index['minmax']:select('of', { limit = 2, iterator = 'LE' }) --- - - [0, 'of', 'puppets'] - [1, 'of', 'might', 'and', 'magic'] ... space:truncate() --- ... -- -- A test case for Bug#1060967: truncation of 64-bit numbers -- space:insert{2^51, 'hello', 'world'} --- - [2251799813685248, 'hello', 'world'] ... space.index['primary']:get{2^51} --- - [2251799813685248, 'hello', 'world'] ... space:drop() --- ... -- -- Lua 64bit numbers support -- space = box.schema.space.create('tweedledum') --- ... tmp = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) --- ... space:insert{tonumber64('18446744073709551615'), 'magic'} --- - [18446744073709551615, 'magic'] ... tuple = space.index['primary']:get{tonumber64('18446744073709551615')} --- ... num = tuple[1] --- ... num --- - 18446744073709551615 ... type(num) == 'cdata' --- - true ... num == tonumber64('18446744073709551615') --- - true ... num = tuple[1] --- ... num == tonumber64('18446744073709551615') --- - true ... space:delete{18446744073709551615ULL} --- - [18446744073709551615, 'magic'] ... space:insert{125ULL, 'magic'} --- - [125, 'magic'] ... tuple = space.index['primary']:get{125} --- ... tuple2 = space.index['primary']:get{125LL} --- ... num = tuple[1] --- ... num2 = tuple2[1] --- ... num, num2 --- - 125 - 125 ... type(num) == 'number' --- - true ... type(num2) == 'number' --- - true ... num == tonumber64('125') --- - true ... num2 == tonumber64('125') --- - true ... space:truncate() --- ... -- -- Tests for lua box.auto_increment with NUM keys -- -- lua box.auto_increment() with NUM keys testing space:auto_increment{'a'} --- - [1, 'a'] ... space:insert{tonumber64(5)} --- - [5] ... space:auto_increment{'b'} --- - [6, 'b'] ... space:auto_increment{'c'} --- - [7, 'c'] ... -- gh-2258: Incomprehensive failure of auto_increment in absence of indices space.index.primary:drop() --- ... space:auto_increment{'a'} --- - error: 'No index #0 is defined in space ''tweedledum''' ... space:get({1}) --- - error: 'No index #0 is defined in space ''tweedledum''' ... space:select() --- - error: 'No index #0 is defined in space ''tweedledum''' ... space:update({1}, {}) --- - error: 'No index #0 is defined in space ''tweedledum''' ... space:upsert({1}, {}) --- - error: 'No index #0 is defined in space ''tweedledum''' ... space:delete({1}) --- - error: 'No index #0 is defined in space ''tweedledum''' ... space:bsize() --- - 0 ... space:count() --- - 0 ... space:len() --- - 0 ... space:pairs():totable() --- - [] ... space:drop() --- ... -- -- Tests for lua idx:count() -- -- https://blueprints.launchpad.net/tarantool/+spec/lua-builtin-size-of-subtree space = box.schema.space.create('tweedledum') --- ... tmp = space:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... tmp = space:create_index('i1', { type = 'tree', parts = {2, 'unsigned', 3, 'unsigned'}, unique = false }) --- ... space:insert{1, 1, 1} --- - [1, 1, 1] ... space:insert{2, 2, 0} --- - [2, 2, 0] ... space:insert{3, 2, 1} --- - [3, 2, 1] ... space:insert{4, 3, 0} --- - [4, 3, 0] ... space:insert{5, 3, 1} --- - [5, 3, 1] ... space:insert{6, 3, 2} --- - [6, 3, 2] ... space.index['i1']:count() --- - 6 ... space:count() --- - 6 ... space.index['i1']:count(1) --- - 1 ... space:count(1) --- - 1 ... space.index['i1']:count(1) --- - 1 ... space.index['i1']:count(2, { iterator = 'LE' }) --- - 3 ... space.index['i1']:count(2, { iterator = 'GE' }) --- - 5 ... space:count(2, { iterator = 'GE' }) --- - error: Index 'primary' (HASH) of space 'tweedledum' (memtx) does not support requested iterator type ... space.index['i1']:count({2, 0}, { iterator = 'LE' }) --- - 2 ... space.index['i1']:count({2, 1}, { iterator = 'GE' }) --- - 4 ... space.index['i1']:count(2) --- - 2 ... space.index['i1']:count({2, 1}) --- - 1 ... space.index['i1']:count({2, 2}) --- - 0 ... space.index['i1']:count(3) --- - 3 ... space.index['i1']:count({3, 3}) --- - 0 ... -- Returns total number of records -- https://github.com/tarantool/tarantool/issues/46 space.index['i1']:count() --- - 6 ... -- Test cases for #123: box.index.count does not check arguments properly space.index['i1']:count(function() end) --- - error: 'builtin/msgpackffi.lua..."]:: can not encode Lua type: ''function''' ... space:drop() --- ... -- -- Tests for lua tuple:transform() -- space = box.schema.space.create('tweedledum') --- ... tmp = space:create_index('primary', { type = 'hash', parts = {1, 'string'}, unique = true }) --- ... t = space:insert{'1', '2', '3', '4', '5', '6', '7'} --- ... t:transform(8, 0, '8', '9', '100') --- - ['1', '2', '3', '4', '5', '6', '7', '8', '9', '100'] ... t:transform(1, 1) --- - ['2', '3', '4', '5', '6', '7'] ... t:transform(2, 4) --- - ['1', '6', '7'] ... t:transform(-1, 1) --- - ['1', '2', '3', '4', '5', '6'] ... t:transform(-3, 2) --- - ['1', '2', '3', '4', '7'] ... t:transform(1, 0, 'A') --- - ['A', '1', '2', '3', '4', '5', '6', '7'] ... t:transform(-1, 0, 'A') --- - ['1', '2', '3', '4', '5', '6', 'A', '7'] ... t:transform(1, 1, 'A') --- - ['A', '2', '3', '4', '5', '6', '7'] ... t:transform(-1, 1, 'B') --- - ['1', '2', '3', '4', '5', '6', 'B'] ... t:transform(1, 2, 'C') --- - ['C', '3', '4', '5', '6', '7'] ... t:transform(3, 0, 'hello') --- - ['1', '2', 'hello', '3', '4', '5', '6', '7'] ... t:transform(1, -1, 'C') --- - error: 'tuple.transform(): len is negative' ... t:transform(1, 100) --- - [] ... t:transform(-100, 1) --- - error: 'tuple.transform(): offset is out of bound' ... t:transform(1, 3, 1, 2, 3) --- - [1, 2, 3, '4', '5', '6', '7'] ... t:transform(4, 1, tonumber64(4)) --- - ['1', '2', '3', 4, '5', '6', '7'] ... t:transform(1, 1, {}) --- - [[], '2', '3', '4', '5', '6', '7'] ... space:truncate() --- ... -- -- Tests for OPENTAR-64 - a limitation for the second argument to tuple:transform -- -- 50K is enough for everyone n = 2000 --- ... tab = {}; for i=1,n,1 do table.insert(tab, i) end --- ... t = box.tuple.new(tab) --- ... t:transform(1, n - 1) --- - [2000] ... t = nil --- ... -- -- Tests for lua tuple:find() and tuple:findall() -- -- First space for hash_str tests t = space:insert{'A', '2', '3', '4', '3', '2', '5', '6', '3', '7'} --- ... t:find('2') --- - 2 ... t:find('4') --- - 4 ... t:find('5') --- - 7 ... t:find('A') --- - 1 ... t:find('0') --- - null ... t:findall('A') --- - - 1 ... t:findall('2') --- - - 2 - 6 ... t:findall('3') --- - - 3 - 5 - 9 ... t:findall('0') --- - [] ... t:find(2, '2') --- - 6 ... t:find(89, '2') --- - error: 'builtin/box/tuple.lua..."]:: error: invalid key to ''next''' ... t:findall(4, '3') --- - - 5 - 9 ... t = space:insert{'Z', '2', 2, 3, tonumber64(2)} --- ... t:find(2) --- - 3 ... t:findall(tonumber64(2)) --- - - 3 - 5 ... t:find('2') --- - 2 ... space:drop() --- ... -- A test case for Bug #1038784 -- transform returns wrong tuple and put broken reply into socket -- http://bugs.launchpad.net/tarantool/+bug/1038784 -- https://bugs.launchpad.net/tarantool/+bug/1006354 -- lua box.auto_increment() testing space = box.schema.space.create('tweedledum') --- ... tmp = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) --- ... dofile('push.lua') --- ... push_collection(space, 0, 1038784, 'hello') --- - [1038784, 'hello'] ... push_collection(space, 0, 1038784, 'hello') --- - [1038784] ... push_collection(space, 0, 1038784, 'hello') --- - [1038784] ... push_collection(space, 1, 1038784, 'hi') --- - [1038784, 'hi'] ... push_collection(space, 2, 1038784, 'hi') --- - [1038784, 'hi', 'hi'] ... push_collection(space, 2, 1038784, 'hi') --- - [1038784, 'hi', 'hi'] ... push_collection(space, 5, 1038784, 'hey') --- - [1038784, 'hi', 'hi', 'hey'] ... push_collection(space, 5, 1038784, 'hey') --- - [1038784, 'hi', 'hi', 'hey', 'hey'] ... push_collection(space, 5, 1038784, 'hey') --- - [1038784, 'hi', 'hi', 'hey', 'hey', 'hey'] ... push_collection(space, 5, 1038784, 'hey') --- - [1038784, 'hi', 'hey', 'hey', 'hey', 'hey'] ... -- # lua box.auto_increment() testing -- # http://bugs.launchpad.net/tarantool/+bug/1006354 -- -- Tests for lua box.auto_increment -- space:truncate() --- ... space:auto_increment{'a'} --- - [1, 'a'] ... space:insert{5} --- - [5] ... space:auto_increment{'b'} --- - [6, 'b'] ... space:auto_increment{'c'} --- - [7, 'c'] ... space:auto_increment{'d'} --- - [8, 'd'] ... space:drop() --- ... -- A test case for Bug #1042798 -- Truncate hangs when primary key is not in linear or starts at the first field -- https://bugs.launchpad.net/tarantool/+bug/1042798 -- space = box.schema.space.create('tweedledum') --- ... tmp = space:create_index('primary', { type = 'tree', parts = {3, 'unsigned', 2, 'unsigned'}, unique = true }) --- ... -- Print key fields in pk space.index['primary'].parts --- - - type: unsigned is_nullable: false fieldno: 3 - type: unsigned is_nullable: false fieldno: 2 ... space:insert{1, 2, 3, 4} --- - [1, 2, 3, 4] ... space:insert{10, 20, 30, 40} --- - [10, 20, 30, 40] ... space:insert{20, 30, 40, 50} --- - [20, 30, 40, 50] ... space.index['primary']:select{} --- - - [1, 2, 3, 4] - [10, 20, 30, 40] - [20, 30, 40, 50] ... -- Truncate must not hang space:truncate() --- ... -- Empty result space.index['primary']:select{} --- - [] ... space:drop() --- ... -- -- index:random test -- dofile('index_random_test.lua') --- ... space = box.schema.space.create('tweedledum') --- ... tmp = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) --- ... tmp = space:create_index('secondary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) --- ... ------------------------------------------------------------------------------- -- TreeIndex::random() ------------------------------------------------------------------------------- index_random_test(space, 'primary') --- - true ... ------------------------------------------------------------------------------- -- HashIndex::random() ------------------------------------------------------------------------------- index_random_test(space, 'secondary') --- - true ... space:drop() --- ... space = nil --- ... ------------------------------------------------------------------------------- -- space:format() ------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum') --- ... pk = space:create_index('primary') --- ... space:format() --- - [] ... box.schema.space.format(space.id) --- - [] ... box.space._space:get(space.id)[7] --- - [] ... space:format({{name = 'id', type = 'unsigned'}}) --- ... space:format() --- - [{'name': 'id', 'type': 'unsigned'}] ... box.schema.space.format(space.id) --- - [{'name': 'id', 'type': 'unsigned'}] ... box.space._space:get(space.id)[7] --- - [{'name': 'id', 'type': 'unsigned'}] ... space:format({}) --- ... space:format() --- - [] ... box.schema.space.format(space.id) --- - [] ... box.space._space:get(space.id)[7] --- - [] ... space:drop() --- ... ------------------------------------------------------------------------------- -- Invalid arguments ------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum') --- ... pk = space:create_index('primary') --- ... space.len() --- - error: 'builtin/box/schema.lua..."]:: Use space:len(...) instead of space.len(...)' ... space.count({}, {iterator = 'EQ'}) --- - error: 'builtin/box/schema.lua..."]:: Use space:count(...) instead of space.count(...)' ... space.bsize() --- - error: 'builtin/box/schema.lua..."]:: Use space:bsize(...) instead of space.bsize(...)' ... space.get({1}) --- - error: 'builtin/box/schema.lua..."]:: Use space:get(...) instead of space.get(...)' ... space.select({}, {iterator = 'GE'}) --- - error: 'builtin/box/schema.lua..."]:: Use space:select(...) instead of space.select(...)' ... space.insert({1, 2, 3}) --- - error: 'builtin/box/schema.lua..."]:: Use space:insert(...) instead of space.insert(...)' ... space.replace({1, 2, 3}) --- - error: 'builtin/box/schema.lua..."]:: Use space:replace(...) instead of space.replace(...)' ... space.put({1, 2, 3}) --- - error: 'builtin/box/schema.lua..."]:: Use space:replace(...) instead of space.replace(...)' ... space.update({1}, {}) --- - error: 'builtin/box/schema.lua..."]:: Use space:update(...) instead of space.update(...)' ... space.upsert({1, 2, 3}, {}) --- - error: 'builtin/box/schema.lua..."]:: Use space:upsert(...) instead of space.upsert(...)' ... space.delete({1}) --- - error: 'builtin/box/schema.lua..."]:: Use space:delete(...) instead of space.delete(...)' ... space.auto_increment({'hello'}) --- - error: 'builtin/box/schema.lua..."]:: Use space:auto_increment(...) instead of space.auto_increment(...)' ... space.pairs({}, {iterator = 'EQ'}) --- - error: 'builtin/box/schema.lua..."]:: Use space:pairs(...) instead of space.pairs(...)' ... space.truncate() --- - error: 'builtin/box/schema.lua..."]:: Use space:truncate(...) instead of space.truncate(...)' ... space.format({}) --- - error: 'builtin/box/schema.lua..."]:: Use space:format(...) instead of space.format(...)' ... space.drop() --- - error: 'builtin/box/schema.lua..."]:: Use space:drop(...) instead of space.drop(...)' ... space.rename() --- - error: 'builtin/box/schema.lua..."]:: Use space:rename(...) instead of space.rename(...)' ... space.create_index('secondary') --- - error: 'builtin/box/schema.lua..."]:: Use space:create_index(...) instead of space.create_index(...)' ... space.run_triggers(false) --- - error: 'builtin/box/schema.lua..."]:: Use space:run_triggers(...) instead of space.run_triggers(...)' ... pk.len() --- - error: 'builtin/box/schema.lua..."]:: Use index:len(...) instead of index.len(...)' ... pk.bsize() --- - error: 'builtin/box/schema.lua..."]:: Use index:bsize(...) instead of index.bsize(...)' ... pk.min() --- - error: 'builtin/box/schema.lua..."]:: Use index:min(...) instead of index.min(...)' ... pk.min({}) --- - error: 'builtin/box/schema.lua..."]:: Use index:min(...) instead of index.min(...)' ... pk.max() --- - error: 'builtin/box/schema.lua..."]:: Use index:max(...) instead of index.max(...)' ... pk.max({}) --- - error: 'builtin/box/schema.lua..."]:: Use index:max(...) instead of index.max(...)' ... pk.random(42) --- - error: 'builtin/box/schema.lua..."]:: Use index:random(...) instead of index.random(...)' ... pk.pairs({}, {iterator = 'EQ'}) --- - error: 'builtin/box/schema.lua..."]:: Use index:pairs(...) instead of index.pairs(...)' ... pk.count({}, {iterator = 'EQ'}) --- - error: 'builtin/box/schema.lua..."]:: Use index:count(...) instead of index.count(...)' ... pk.get({1}) --- - error: 'builtin/box/schema.lua..."]:: Use index:get(...) instead of index.get(...)' ... pk.select({}, {iterator = 'GE'}) --- - error: 'builtin/box/schema.lua..."]:: Use index:select(...) instead of index.select(...)' ... pk.update({1}, {}) --- - error: 'builtin/box/schema.lua..."]:: Use index:update(...) instead of index.update(...)' ... pk.delete({1}) --- - error: 'builtin/box/schema.lua..."]:: Use index:delete(...) instead of index.delete(...)' ... pk.drop() --- - error: 'builtin/box/schema.lua..."]:: Use index:drop(...) instead of index.drop(...)' ... pk.rename("newname") --- - error: 'builtin/box/schema.lua..."]:: Use index:rename(...) instead of index.rename(...)' ... pk.alter({}) --- - error: 'builtin/box/schema.lua..."]:: Use index:alter(...) instead of index.alter(...)' ... space:drop() --- ... pk = nil --- ... space = nil --- ... test_run:cmd("clear filter") --- - true ... -- vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 tarantool_1.9.1.26.g63eb81e3c/test/box/before_replace.test.lua0000664000000000000000000001461413306560010022373 0ustar rootroottest_run = require('test_run').new() s = box.schema.space.create('test') _ = s:create_index('primary') _ = s:create_index('secondary', {unique = false, parts = {2, 'unsigned'}}) function fail(old, new) error('fail') end function save(old, new) old_tuple = old new_tuple = new end function ret_new(old, new) return new end function ret_old(old, new) return old end function ret_nil(old, new) return nil end function ret_null(old, new) return nil end function ret_none(old, new) return end function ret_invalid(old, new) return 'test' end function ret_update(old, new) return box.tuple.update(new, {{'+', 3, 1}}) end function ret_update_pk(old, new) return box.tuple.update(new, {{'+', 1, 1}}) end -- Exception in trigger. s:before_replace(fail) == fail s:insert{1, 1} s:select() s:before_replace(nil, fail) -- Check 'old' and 'new' trigger arguments. old_tuple = nil new_tuple = nil s:before_replace(save) == save s:insert{1, 1} old_tuple, new_tuple s:replace{1, 2} old_tuple, new_tuple s:update(1, {{'+', 2, 1}}) old_tuple, new_tuple s:upsert({1, 1}, {{'=', 2, 1}}) old_tuple, new_tuple s:upsert({2, 2}, {{'=', 2, 2}}) old_tuple, new_tuple s:select() s:delete(1) old_tuple, new_tuple s:delete(2) old_tuple, new_tuple s:select() s:before_replace(nil, save) -- Returning 'new' from trigger doesn't affect statement. s:before_replace(ret_new) == ret_new s:insert{1, 1} s:update(1, {{'+', 2, 1}}) s:select() s:delete(1) s:select() s:before_replace(nil, ret_new) -- Returning 'old' from trigger skips statement. s:insert{1, 1} s:before_replace(ret_old) == ret_old s:insert{2, 2} s:update(1, {{'+', 2, 1}}) s:delete(1) s:select() s:before_replace(nil, ret_old) s:delete(1) -- Returning nil from trigger turns statement into DELETE. s:insert{1, 1} s:before_replace(ret_nil) == ret_nil s:replace{1, 2} s:select() s:before_replace(nil, ret_nil) -- Returning box.NULL from trigger turns statement into DELETE. s:insert{1, 1} s:before_replace(ret_null) == ret_null s:replace{1, 2} s:select() s:before_replace(nil, ret_null) -- Returning nothing doesn't affect the operation. s:insert{1, 1} s:insert{2, 2} s:before_replace(ret_none) == ret_none s:replace{1, 2} s:update(1, {{'+', 2, 1}}) s:delete(2) s:select() s:before_replace(nil, ret_none) s:delete(1) -- Update statement from trigger. s:before_replace(ret_update) == ret_update s:insert{1, 1, 1} s:update(1, {{'+', 2, 1}}) s:select() s:before_replace(nil, ret_update) s:delete(1) -- Invalid return value. s:before_replace(ret_invalid) == ret_invalid s:insert{1, 1} s:select() s:before_replace(nil, ret_invalid) -- Update of the primary key from trigger is forbidden. s:insert{1, 1} s:before_replace(ret_update_pk) == ret_update_pk s:replace{1, 2} s:before_replace(nil, ret_update_pk) s:delete(1) -- Update over secondary index + space:before_replace. s2 = box.schema.space.create('test2') _ = s2:create_index('pk') _ = s2:create_index('sk', {unique = true, parts = {2, 'unsigned'}}) s2:insert{1, 1, 1, 1} s2:before_replace(ret_update) == ret_update s2.index.sk:update(1, {{'+', 4, 1}}) s2:select() s2:drop() -- Stacking triggers. old_tuple = nil new_tuple = nil s:before_replace(save) == save s:before_replace(ret_update) == ret_update s:insert{1, 1, 1} old_tuple, new_tuple s:before_replace(nil, save) s:before_replace(nil, ret_update) s:delete(1) -- Issue DML from trigger. s2 = box.schema.space.create('test2') _ = s2:create_index('pk') cb = function(old, new) s2:insert{i, old, new} end s:before_replace(cb) == cb i = 1 s:insert{1, 1} i = 2 s:replace{1, 2} s:replace{1, 3} -- error: conflict in s2 s:select() s2:select() -- DML done from space:before_replace is undone -- if space:on_replace fails. s:truncate() s2:truncate() s:on_replace(fail) == fail s:replace{1, 3} s:select() s2:select() s:on_replace(nil, fail) s:before_replace(nil, cb) s2:drop() -- If space:before_replace turns the request into NOP, -- space:on_replace isn't called. old_tuple = nil new_tuple = nil s:insert{1, 1} s:before_replace(ret_old) == ret_old s:on_replace(save) == save s:replace{1, 2} old_tuple, new_tuple s:delete(1) old_tuple, new_tuple s:insert{2, 2} old_tuple, new_tuple s:select() s:before_replace(nil, ret_old) s:on_replace(nil, save) s:delete(1) -- Changed done in space:before_replace are visible -- in space:on_replace old_tuple = nil new_tuple = nil s:before_replace(ret_update) == ret_update s:on_replace(save) == save s:insert{1, 1, 1} old_tuple, new_tuple s:replace{1, 2, 2} old_tuple, new_tuple s:select() s:before_replace(nil, ret_update) s:on_replace(nil, save) s:delete(1) -- Nesting limit: space.before_replace cb = function(old, new) s:insert{1, 1} end s:before_replace(cb) == cb s:insert{1, 1} -- error s:select() s:before_replace(nil, cb) -- Nesting limit: space.before_replace + space.on_replace cb = function(old, new) s:delete(1) end s:before_replace(cb) == cb s:on_replace(cb) == cb s:insert{1, 1} -- error s:select() s:before_replace(nil, cb) s:on_replace(nil, cb) -- Make sure the server can recover from xlogs after -- using space:before_replace. test_run:cmd('restart server default') s = box.space.test s:select() -- Check that IPROTO_NOP is actually written to xlog. fio = require('fio') xlog = require('xlog') type(s:before_replace(function(old, new) return old end)) s:insert{1, 1} path = fio.pathjoin(box.cfg.wal_dir, string.format('%020d.xlog', box.info.lsn - 1)) fun, param, state = xlog.pairs(path) state, row = fun(param, state) row.HEADER.type row.BODY.space_id == s.id -- gh-3128 before_replace with run_triggers s2 = box.schema.space.create("test2") _ = s2:create_index("prim") before_replace1 = function() s2:insert{1} s:run_triggers(false) end before_replace2 = function() s2:insert{2} end on_replace = function() s2:insert{3} end type(s:on_replace(on_replace)) type(s:before_replace(before_replace1)) type(s:before_replace(before_replace2)) s:insert{1, 1} s2:select{} s:truncate() s2:truncate() s:on_replace(nil, on_replace) s:before_replace(nil, before_replace1) s:before_replace(nil, before_replace2) -- -- gh-3128 -- If at least one before trigger returns old -- insertion will be aborted, but other before triggers -- will be executed before_replace1 = function(old, new) s2:insert{1} return old end before_replace2 = function(old, new) s2:insert{2} end type(s:on_replace(on_replace)) type(s:before_replace(before_replace1)) type(s:before_replace(before_replace2)) s:insert{1, 1} s:select{} s2:select{} s:on_replace(nil, on_replace) s:before_replace(nil, before_replace1) s:before_replace(nil, before_replace2) s2:drop() s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/indices_any_type.result0000664000000000000000000001670013306560010022541 0ustar rootroot-- Tests for HASH index type s3 = box.schema.space.create('my_space4') --- ... i3_1 = s3:create_index('my_space4_idx1', {type='HASH', parts={1, 'scalar', 2, 'integer', 3, 'number'}, unique=true}) --- ... i3_2 = s3:create_index('my_space4_idx2', {type='HASH', parts={4, 'string', 5, 'scalar'}, unique=true}) --- ... i3_3 = s3:create_index('my_space4_idx3', {type='TREE', parts={6, 'boolean'}, unique=false}) --- ... s3:insert({100.5, 30, 95, "str1", 5, true}) --- - [100.5, 30, 95, 'str1', 5, true] ... s3:insert({"abc#$23", 1000, -21.542, "namesurname", 99, false}) --- - ['abc#$23', 1000, -21.542, 'namesurname', 99, false] ... s3:insert({true, -459, 4000, "foobar", "36.6", true}) --- - [true, -459, 4000, 'foobar', '36.6', true] ... s3:select{} --- - - [true, -459, 4000, 'foobar', '36.6', true] - ['abc#$23', 1000, -21.542, 'namesurname', 99, false] - [100.5, 30, 95, 'str1', 5, true] ... i3_1:select({100.5}) --- - error: HASH index does not support selects via a partial key (expected 3 parts, got 1). Please Consider changing index type to TREE. ... i3_1:select({true, -459}) --- - error: HASH index does not support selects via a partial key (expected 3 parts, got 2). Please Consider changing index type to TREE. ... i3_1:select({"abc#$23", 1000, -21.542}) --- - - ['abc#$23', 1000, -21.542, 'namesurname', 99, false] ... i3_2:select({"str1", 5}) --- - - [100.5, 30, 95, 'str1', 5, true] ... i3_2:select({"str"}) --- - error: HASH index does not support selects via a partial key (expected 2 parts, got 1). Please Consider changing index type to TREE. ... i3_2:select({"str", 5}) --- - [] ... i3_2:select({"foobar", "36.6"}) --- - - [true, -459, 4000, 'foobar', '36.6', true] ... i3_3:select{true} --- - - [true, -459, 4000, 'foobar', '36.6', true] - [100.5, 30, 95, 'str1', 5, true] ... i3_3:select{false} --- - - ['abc#$23', 1000, -21.542, 'namesurname', 99, false] ... i3_3:select{} --- - - ['abc#$23', 1000, -21.542, 'namesurname', 99, false] - [true, -459, 4000, 'foobar', '36.6', true] - [100.5, 30, 95, 'str1', 5, true] ... s3:drop() --- ... -- #2112 int vs. double compare s5 = box.schema.space.create('my_space5') --- ... _ = s5:create_index('primary', {parts={1, 'scalar'}}) --- ... -- small range 1 s5:insert({5}) --- - [5] ... s5:insert({5.1}) --- - [5.1] ... s5:select() --- - - [5] - [5.1] ... s5:truncate() --- ... -- small range 2 s5:insert({5.1}) --- - [5.1] ... s5:insert({5}) --- - [5] ... s5:select() --- - - [5] - [5.1] ... s5:truncate() --- ... -- small range 3 s5:insert({-5}) --- - [-5] ... s5:insert({-5.1}) --- - [-5.1] ... s5:select() --- - - [-5.1] - [-5] ... s5:truncate() --- ... -- small range 4 s5:insert({-5.1}) --- - [-5.1] ... s5:insert({-5}) --- - [-5] ... s5:select() --- - - [-5.1] - [-5] ... s5:truncate() --- ... -- conversion to another type is lossy for both values s5:insert({18446744073709551615ULL}) --- - [18446744073709551615] ... s5:insert({3.6893488147419103e+19}) --- - [3.6893488147419e+19] ... s5:select() --- - - [18446744073709551615] - [3.6893488147419e+19] ... s5:truncate() --- ... -- insert in a different order to excersise another codepath s5:insert({3.6893488147419103e+19}) --- - [3.6893488147419e+19] ... s5:insert({18446744073709551615ULL}) --- - [18446744073709551615] ... s5:select() --- - - [18446744073709551615] - [3.6893488147419e+19] ... s5:truncate() --- ... -- MP_INT vs MP_UINT s5:insert({-9223372036854775808LL}) --- - [-9223372036854775808] ... s5:insert({-3.6893488147419103e+19}) --- - [-3.6893488147419e+19] ... s5:select() --- - - [-3.6893488147419e+19] - [-9223372036854775808] ... s5:truncate() --- ... -- insert in a different order to excersise another codepath s5:insert({-3.6893488147419103e+19}) --- - [-3.6893488147419e+19] ... s5:insert({-9223372036854775808LL}) --- - [-9223372036854775808] ... s5:select() --- - - [-3.6893488147419e+19] - [-9223372036854775808] ... s5:truncate() --- ... -- different signs 1 s5:insert({9223372036854775807LL}) --- - [9223372036854775807] ... s5:insert({-3.6893488147419103e+19}) --- - [-3.6893488147419e+19] ... s5:select() --- - - [-3.6893488147419e+19] - [9223372036854775807] ... s5:truncate() --- ... -- different signs 2 s5:insert({-3.6893488147419103e+19}) --- - [-3.6893488147419e+19] ... s5:insert({9223372036854775807LL}) --- - [9223372036854775807] ... s5:select() --- - - [-3.6893488147419e+19] - [9223372036854775807] ... s5:truncate() --- ... -- different signs 3 s5:insert({-9223372036854775808LL}) --- - [-9223372036854775808] ... s5:insert({3.6893488147419103e+19}) --- - [3.6893488147419e+19] ... s5:select() --- - - [-9223372036854775808] - [3.6893488147419e+19] ... s5:truncate() --- ... -- different signs 4 s5:insert({3.6893488147419103e+19}) --- - [3.6893488147419e+19] ... s5:insert({-9223372036854775808LL}) --- - [-9223372036854775808] ... s5:select() --- - - [-9223372036854775808] - [3.6893488147419e+19] ... s5:truncate() --- ... -- different magnitude 1 s5:insert({1.1}) --- - [1.1] ... s5:insert({18446744073709551615ULL}) --- - [18446744073709551615] ... s5:select() --- - - [1.1] - [18446744073709551615] ... s5:truncate() --- ... -- different magnitude 2 s5:insert({18446744073709551615ULL}) --- - [18446744073709551615] ... s5:insert({1.1}) --- - [1.1] ... s5:select() --- - - [1.1] - [18446744073709551615] ... s5:truncate() --- ... -- Close values ffi = require('ffi') --- ... ffi.new('double', 1152921504606846976) == 1152921504606846976ULL --- - true ... ffi.new('double', 1152921504606846977) == 1152921504606846976ULL --- - true ... -- Close values 1 s5:insert({1152921504606846976ULL}) --- - [1152921504606846976] ... s5:insert({ffi.new('double', 1152921504606846976ULL)}) -- fail --- - error: Duplicate key exists in unique index 'primary' in space 'my_space5' ... s5:select() --- - - [1152921504606846976] ... s5:truncate() --- ... -- Close values 2 s5:insert({1152921504606846977ULL}) --- - [1152921504606846977] ... s5:insert({ffi.new('double', 1152921504606846976ULL)}) -- success --- - [1152921504606846976] ... s5:select() --- - - [1152921504606846976] - [1152921504606846977] ... s5:truncate() --- ... -- Close values 3 s5:insert({-1152921504606846976LL}) --- - [-1152921504606846976] ... s5:insert({ffi.new('double', -1152921504606846976LL)}) -- fail --- - error: Duplicate key exists in unique index 'primary' in space 'my_space5' ... s5:select() --- - - [-1152921504606846976] ... s5:truncate() --- ... -- Close values 4 s5:insert({-1152921504606846977LL}) --- - [-1152921504606846977] ... s5:insert({ffi.new('double', -1152921504606846976LL)}) -- success --- - [-1152921504606846976] ... s5:select() --- - - [-1152921504606846977] - [-1152921504606846976] ... s5:truncate() --- ... -- Close values 5 ffi.cdef "double exp2(double);" --- ... s5:insert({0xFFFFFFFFFFFFFFFFULL}) --- - [18446744073709551615] ... s5:insert({ffi.new('double', ffi.C.exp2(64))}) -- success --- - [1.844674407371e+19] ... s5:select() --- - - [18446744073709551615] - [1.844674407371e+19] ... s5:truncate() --- ... -- Close values 6 s5:insert({0x8000000000000000LL}) --- - [-9223372036854775808] ... s5:insert({ffi.new('double', -ffi.C.exp2(63))}) -- duplicate --- - error: Duplicate key exists in unique index 'primary' in space 'my_space5' ... s5:select() --- - - [-9223372036854775808] ... s5:truncate() --- ... -- Close values 7 s5:insert({0x7FFFFFFFFFFFFFFFLL}) --- - [9223372036854775807] ... s5:insert({ffi.new('double', ffi.C.exp2(63))}) -- ok --- - [9223372036854775808] ... s5:select() --- - - [9223372036854775807] - [9223372036854775808] ... s5:truncate() --- ... s5:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/temp_spaces.test.lua0000664000000000000000000000155013306565107021750 0ustar rootroot-- temporary spaces _space = box.space._space -- not a temporary FLAGS = 6 s = box.schema.space.create('t', { temporary = true }) s.temporary s:drop() -- not a temporary, too s = box.schema.space.create('t', { temporary = false }) s.temporary s:drop() -- not a temporary, too s = box.schema.space.create('t', { temporary = nil }) s.temporary s:drop() s = box.schema.space.create('t', { temporary = true }) index = s:create_index('primary', { type = 'hash' }) s:insert{1, 2, 3} s:get{1} s:len() -- check that temporary space can be modified in read-only mode (gh-1378) box.cfg{read_only=true} box.cfg.read_only s:insert{2, 3, 4} s:get{2} s:len() box.cfg{read_only=false} box.cfg.read_only env = require('test_run') test_run = env.new() test_run:cmd('restart server default') FLAGS = 6 _space = box.space._space s = box.space.t s:len() s.temporary s:drop() s = nil tarantool_1.9.1.26.g63eb81e3c/test/box/errinj.result0000664000000000000000000004216113306565107020520 0ustar rootrooterrinj = box.error.injection --- ... net_box = require('net.box') --- ... space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... errinj.info() --- - ERRINJ_VY_RUN_WRITE_STMT_TIMEOUT: state: 0 ERRINJ_WAL_WRITE: state: false ERRINJ_VYRUN_DATA_READ: state: false ERRINJ_VY_SCHED_TIMEOUT: state: 0 ERRINJ_WAL_WRITE_PARTIAL: state: -1 ERRINJ_VY_GC: state: false ERRINJ_WAL_DELAY: state: false ERRINJ_XLOG_READ: state: -1 ERRINJ_WAL_WRITE_EOF: state: false ERRINJ_VYRUN_INDEX_GARBAGE: state: false ERRINJ_VY_DELAY_PK_LOOKUP: state: false ERRINJ_VY_TASK_COMPLETE: state: false ERRINJ_PORT_DUMP: state: false ERRINJ_WAL_IO: state: false ERRINJ_TUPLE_ALLOC: state: false ERRINJ_VY_READ_PAGE: state: false ERRINJ_RELAY_REPORT_INTERVAL: state: 0 ERRINJ_VY_READ_PAGE_TIMEOUT: state: 0 ERRINJ_XLOG_META: state: false ERRINJ_WAL_WRITE_DISK: state: false ERRINJ_VY_RUN_WRITE: state: false ERRINJ_VY_LOG_FLUSH_DELAY: state: false ERRINJ_SNAP_COMMIT_DELAY: state: false ERRINJ_RELAY_FINAL_SLEEP: state: false ERRINJ_VY_RUN_DISCARD: state: false ERRINJ_WAL_ROTATE: state: false ERRINJ_LOG_ROTATE: state: false ERRINJ_VY_POINT_ITER_WAIT: state: false ERRINJ_RELAY_EXIT_DELAY: state: 0 ERRINJ_IPROTO_TX_DELAY: state: false ERRINJ_BUILD_SECONDARY: state: -1 ERRINJ_TUPLE_FIELD: state: false ERRINJ_XLOG_GARBAGE: state: false ERRINJ_INDEX_ALLOC: state: false ERRINJ_RELAY_TIMEOUT: state: 0 ERRINJ_TESTING: state: false ERRINJ_VY_RUN_WRITE_TIMEOUT: state: 0 ERRINJ_VY_SQUASH_TIMEOUT: state: 0 ERRINJ_VY_LOG_FLUSH: state: false ERRINJ_VY_INDEX_DUMP: state: -1 ... errinj.set("some-injection", true) --- - 'error: can''t find error injection ''some-injection''' ... errinj.set("some-injection") -- check error --- - 'error: can''t find error injection ''some-injection''' ... space:select{222444} --- - [] ... errinj.set("ERRINJ_TESTING", true) --- - ok ... space:select{222444} --- - error: Error injection 'ERRINJ_TESTING' ... errinj.set("ERRINJ_TESTING", false) --- - ok ... -- Check how well we handle a failed log write errinj.set("ERRINJ_WAL_IO", true) --- - ok ... space:insert{1} --- - error: Failed to write to disk ... space:get{1} --- ... errinj.set("ERRINJ_WAL_IO", false) --- - ok ... space:insert{1} --- - [1] ... errinj.set("ERRINJ_WAL_IO", true) --- - ok ... space:update(1, {{'=', 2, 2}}) --- - error: Failed to write to disk ... space:get{1} --- - [1] ... space:get{2} --- ... errinj.set("ERRINJ_WAL_IO", false) --- - ok ... space:truncate() --- ... -- Check a failed log rotation errinj.set("ERRINJ_WAL_ROTATE", true) --- - ok ... space:insert{1} --- - error: Failed to write to disk ... space:get{1} --- ... errinj.set("ERRINJ_WAL_ROTATE", false) --- - ok ... space:insert{1} --- - [1] ... errinj.set("ERRINJ_WAL_ROTATE", true) --- - ok ... space:update(1, {{'=', 2, 2}}) --- - error: Failed to write to disk ... space:get{1} --- - [1] ... space:get{2} --- ... errinj.set("ERRINJ_WAL_ROTATE", false) --- - ok ... space:update(1, {{'=', 2, 2}}) --- - [1, 2] ... space:get{1} --- - [1, 2] ... space:get{2} --- ... errinj.set("ERRINJ_WAL_ROTATE", true) --- - ok ... space:truncate() --- - error: Failed to write to disk ... errinj.set("ERRINJ_WAL_ROTATE", false) --- - ok ... space:truncate() --- ... space:drop() --- ... -- Check how well we handle a failed log write in DDL s_disabled = box.schema.space.create('disabled') --- ... s_withindex = box.schema.space.create('withindex') --- ... index1 = s_withindex:create_index('primary', { type = 'hash' }) --- ... s_withdata = box.schema.space.create('withdata') --- ... index2 = s_withdata:create_index('primary', { type = 'tree' }) --- ... s_withdata:insert{1, 2, 3, 4, 5} --- - [1, 2, 3, 4, 5] ... s_withdata:insert{4, 5, 6, 7, 8} --- - [4, 5, 6, 7, 8] ... index3 = s_withdata:create_index('secondary', { type = 'hash', parts = {2, 'unsigned', 3, 'unsigned' }}) --- ... errinj.set("ERRINJ_WAL_IO", true) --- - ok ... test = box.schema.space.create('test') --- - error: Failed to write to disk ... s_disabled:create_index('primary', { type = 'hash' }) --- - error: Failed to write to disk ... s_disabled.enabled --- - false ... s_disabled:insert{0} --- - error: 'No index #0 is defined in space ''disabled''' ... s_withindex:create_index('secondary', { type = 'tree', parts = { 2, 'unsigned'} }) --- - error: Failed to write to disk ... s_withindex.index.secondary --- - null ... s_withdata.index.secondary:drop() --- - error: Failed to write to disk ... s_withdata.index.secondary.unique --- - true ... s_withdata:drop() --- - error: Failed to write to disk ... box.space['withdata'].enabled --- - true ... index4 = s_withdata:create_index('another', { type = 'tree', parts = { 5, 'unsigned' }, unique = false}) --- - error: Failed to write to disk ... s_withdata.index.another --- - null ... errinj.set("ERRINJ_WAL_IO", false) --- - ok ... test = box.schema.space.create('test') --- ... index5 = s_disabled:create_index('primary', { type = 'hash' }) --- ... s_disabled.enabled --- - true ... s_disabled:insert{0} --- - [0] ... index6 = s_withindex:create_index('secondary', { type = 'tree', parts = { 2, 'unsigned'} }) --- ... s_withindex.index.secondary.unique --- - true ... s_withdata.index.secondary:drop() --- ... s_withdata.index.secondary --- - null ... s_withdata:drop() --- ... box.space['withdata'] --- - null ... index7 = s_withdata:create_index('another', { type = 'tree', parts = { 5, 'unsigned' }, unique = false}) --- - error: Space 'withdata' does not exist ... s_withdata.index.another --- - null ... test:drop() --- ... s_disabled:drop() --- ... s_withindex:drop() --- ... -- Check transaction rollback when out of memory env = require('test_run') --- ... test_run = env.new() --- ... s = box.schema.space.create('s') --- ... _ = s:create_index('pk') --- ... errinj.set("ERRINJ_TUPLE_ALLOC", true) --- - ok ... s:auto_increment{} --- - error: Failed to allocate 18 bytes in slab allocator for memtx_tuple ... s:select{} --- - [] ... s:auto_increment{} --- - error: Failed to allocate 18 bytes in slab allocator for memtx_tuple ... s:select{} --- - [] ... s:auto_increment{} --- - error: Failed to allocate 18 bytes in slab allocator for memtx_tuple ... s:select{} --- - [] ... test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s:insert{1} box.commit(); --- - error: Failed to allocate 18 bytes in slab allocator for memtx_tuple ... box.rollback(); --- ... s:select{}; --- - [] ... box.begin() s:insert{1} s:insert{2} box.commit(); --- - error: Failed to allocate 18 bytes in slab allocator for memtx_tuple ... s:select{}; --- - [] ... box.rollback(); --- ... box.begin() pcall(s.insert, s, {1}) s:insert{2} box.commit(); --- - error: Failed to allocate 18 bytes in slab allocator for memtx_tuple ... s:select{}; --- - [] ... box.rollback(); --- ... errinj.set("ERRINJ_TUPLE_ALLOC", false); --- - ok ... box.begin() s:insert{1} errinj.set("ERRINJ_TUPLE_ALLOC", true) s:insert{2} box.commit(); --- - error: Failed to allocate 18 bytes in slab allocator for memtx_tuple ... errinj.set("ERRINJ_TUPLE_ALLOC", false); --- - ok ... box.rollback(); --- ... s:select{}; --- - [] ... box.begin() s:insert{1} errinj.set("ERRINJ_TUPLE_ALLOC", true) pcall(s.insert, s, {2}) box.commit(); --- ... s:select{}; --- - - [1] ... box.rollback(); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... errinj.set("ERRINJ_TUPLE_ALLOC", false) --- - ok ... s:drop() --- ... s = box.schema.space.create('test') --- ... _ = s:create_index('test', {parts = {1, 'unsigned', 3, 'unsigned', 5, 'unsigned'}}) --- ... s:insert{1, 2, 3, 4, 5, 6} --- - [1, 2, 3, 4, 5, 6] ... t = s:select{}[1] --- ... errinj.set("ERRINJ_TUPLE_FIELD", true) --- - ok ... tostring(t[1]) .. tostring(t[2]) ..tostring(t[3]) .. tostring(t[4]) .. tostring(t[5]) .. tostring(t[6]) --- - 1nil3nil5nil ... errinj.set("ERRINJ_TUPLE_FIELD", false) --- - ok ... tostring(t[1]) .. tostring(t[2]) ..tostring(t[3]) .. tostring(t[4]) .. tostring(t[5]) .. tostring(t[6]) --- - '123456' ... s:drop() --- ... s = box.schema.space.create('test') --- ... _ = s:create_index('test', {parts = {2, 'unsigned', 4, 'unsigned', 6, 'unsigned'}}) --- ... s:insert{1, 2, 3, 4, 5, 6} --- - [1, 2, 3, 4, 5, 6] ... t = s:select{}[1] --- ... errinj.set("ERRINJ_TUPLE_FIELD", true) --- - ok ... tostring(t[1]) .. tostring(t[2]) ..tostring(t[3]) .. tostring(t[4]) .. tostring(t[5]) .. tostring(t[6]) --- - 12nil4nil6 ... errinj.set("ERRINJ_TUPLE_FIELD", false) --- - ok ... tostring(t[1]) .. tostring(t[2]) ..tostring(t[3]) .. tostring(t[4]) .. tostring(t[5]) .. tostring(t[6]) --- - '123456' ... -- Cleanup s:drop() --- ... -- -- gh-2046: don't store offsets for sequential multi-parts keys -- s = box.schema.space.create('test') --- ... _ = s:create_index('seq2', { parts = { 1, 'unsigned', 2, 'unsigned' }}) --- ... _ = s:create_index('seq3', { parts = { 1, 'unsigned', 2, 'unsigned', 3, 'unsigned' }}) --- ... _ = s:create_index('seq5', { parts = { 1, 'unsigned', 2, 'unsigned', 3, 'unsigned', 4, 'scalar', 5, 'number' }}) --- ... _ = s:create_index('rnd1', { parts = { 3, 'unsigned' }}) --- ... errinj.set("ERRINJ_TUPLE_FIELD", true) --- - ok ... tuple = s:insert({1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) --- ... tuple --- - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ... tuple[1] -- not-null, always accessible --- - 1 ... tuple[2] -- null, doesn't have offset --- - null ... tuple[3] -- not null, has offset --- - 3 ... tuple[4] -- null, doesn't have offset --- - null ... tuple[5] -- null, doesn't have offset --- - null ... s.index.seq2:select({1}) --- - - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ... s.index.seq2:select({1, 2}) --- - - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ... s.index.seq3:select({1}) --- - - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ... s.index.seq3:select({1, 2, 3}) --- - - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ... s.index.seq5:select({1}) --- - - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ... s.index.seq5:select({1, 2, 3, 4, 5}) --- - - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ... s.index.rnd1:select({3}) --- - - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ... errinj.set("ERRINJ_TUPLE_FIELD", false) --- - ok ... s:drop() --- ... space = box.schema.space.create('test') --- ... _ = space:create_index('pk') --- ... errinj.set("ERRINJ_WAL_WRITE", true) --- - ok ... space:insert{1} --- - error: Failed to write to disk ... errinj.set("ERRINJ_WAL_WRITE", false) --- - ok ... errinj.set("ERRINJ_WAL_WRITE_DISK", true) --- - ok ... _ = space:insert{1, require'digest'.urandom(192 * 1024)} --- - error: Failed to write to disk ... errinj.set("ERRINJ_WAL_WRITE_DISK", false) --- - ok ... _ = space:insert{1} --- ... errinj.set("ERRINJ_WAL_WRITE", true) --- - ok ... box.snapshot() --- - error: Error injection 'xlog write injection' ... errinj.set("ERRINJ_WAL_WRITE", false) --- - ok ... space:drop() --- ... --test space:bsize() in case of memory error utils = dofile('utils.lua') --- ... s = box.schema.space.create('space_bsize') --- ... idx = s:create_index('primary') --- ... for i = 1, 13 do s:insert{ i, string.rep('x', i) } end --- ... s:bsize() --- - 130 ... utils.space_bsize(s) --- - 130 ... errinj.set("ERRINJ_TUPLE_ALLOC", true) --- - ok ... s:replace{1, "test"} --- - error: Failed to allocate 23 bytes in slab allocator for memtx_tuple ... s:bsize() --- - 130 ... utils.space_bsize(s) --- - 130 ... s:update({1}, {{'=', 3, '!'}}) --- - error: Failed to allocate 22 bytes in slab allocator for memtx_tuple ... s:bsize() --- - 130 ... utils.space_bsize(s) --- - 130 ... errinj.set("ERRINJ_TUPLE_ALLOC", false) --- - ok ... s:drop() --- ... space = box.schema.space.create('test') --- ... index1 = space:create_index('primary') --- ... fiber = require'fiber' --- ... ch = fiber.channel(1) --- ... test_run:cmd('setopt delimiter ";"') --- - true ... function test() errinj.set('ERRINJ_WAL_WRITE_DISK', true) pcall(box.space.test.replace, box.space.test, {1, 1}) errinj.set('ERRINJ_WAL_WRITE_DISK', false) ch:put(true) end ; --- ... function run() fiber.create(test) box.snapshot() end ; --- ... test_run:cmd('setopt delimiter ""'); --- - true ... -- Port_dump can fail. box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... cn = net_box.connect(box.cfg.listen) --- ... cn:ping() --- - true ... errinj.set('ERRINJ_PORT_DUMP', true) --- - ok ... ok, ret = pcall(cn.space._space.select, cn.space._space) --- ... assert(not ok) --- - true ... assert(string.match(tostring(ret), 'Failed to allocate')) --- - Failed to allocate ... errinj.set('ERRINJ_PORT_DUMP', false) --- - ok ... cn:close() --- ... box.schema.user.revoke('guest', 'read, write, execute', 'universe') --- ... run() --- - error: Can't start a checkpoint while in cascading rollback ... ch:get() --- - true ... box.space.test:select() --- - [] ... test_run:cmd('restart server default') box.space.test:select() --- - [] ... box.space.test:drop() --- ... errinj = box.error.injection --- ... net_box = require('net.box') --- ... fiber = require'fiber' --- ... s = box.schema.space.create('test') --- ... _ = s:create_index('pk') --- ... ch = fiber.channel(2) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function test(tuple) ch:put({pcall(s.replace, s, tuple)}) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... errinj.set("ERRINJ_WAL_WRITE", true) --- - ok ... _ = {fiber.create(test, {1, 2, 3}), fiber.create(test, {3, 4, 5})} --- ... {ch:get(), ch:get()} --- - - - false - Failed to write to disk - - false - Failed to write to disk ... errinj.set("ERRINJ_WAL_WRITE", false) --- - ok ... s:drop() --- ... -- rebuild some secondary indexes if the primary was changed s = box.schema.space.create('test') --- ... i1 = s:create_index('i1', {parts = {1, 'unsigned'}}) --- ... --i2 = s:create_index('i2', {parts = {5, 'unsigned'}, unique = false}) --i3 = s:create_index('i3', {parts = {6, 'unsigned'}, unique = false}) i2 = i1 i3 = i1 --- ... _ = s:insert{1, 4, 3, 4, 10, 10} --- ... _ = s:insert{2, 3, 1, 2, 10, 10} --- ... _ = s:insert{3, 2, 2, 1, 10, 10} --- ... _ = s:insert{4, 1, 4, 3, 10, 10} --- ... i1:select{} --- - - [1, 4, 3, 4, 10, 10] - [2, 3, 1, 2, 10, 10] - [3, 2, 2, 1, 10, 10] - [4, 1, 4, 3, 10, 10] ... i2:select{} --- - - [1, 4, 3, 4, 10, 10] - [2, 3, 1, 2, 10, 10] - [3, 2, 2, 1, 10, 10] - [4, 1, 4, 3, 10, 10] ... i3:select{} --- - - [1, 4, 3, 4, 10, 10] - [2, 3, 1, 2, 10, 10] - [3, 2, 2, 1, 10, 10] - [4, 1, 4, 3, 10, 10] ... i1:alter({parts={2, 'unsigned'}}) --- ... _ = collectgarbage('collect') --- ... i1:select{} --- - - [4, 1, 4, 3, 10, 10] - [3, 2, 2, 1, 10, 10] - [2, 3, 1, 2, 10, 10] - [1, 4, 3, 4, 10, 10] ... i2:select{} --- - - [4, 1, 4, 3, 10, 10] - [3, 2, 2, 1, 10, 10] - [2, 3, 1, 2, 10, 10] - [1, 4, 3, 4, 10, 10] ... i3:select{} --- - - [4, 1, 4, 3, 10, 10] - [3, 2, 2, 1, 10, 10] - [2, 3, 1, 2, 10, 10] - [1, 4, 3, 4, 10, 10] ... box.error.injection.set('ERRINJ_BUILD_SECONDARY', i2.id) --- - ok ... i1:alter{parts = {3, "unsigned"}} --- - error: Error injection 'buildSecondaryKey' ... _ = collectgarbage('collect') --- ... i1:select{} --- - - [4, 1, 4, 3, 10, 10] - [3, 2, 2, 1, 10, 10] - [2, 3, 1, 2, 10, 10] - [1, 4, 3, 4, 10, 10] ... i2:select{} --- - - [4, 1, 4, 3, 10, 10] - [3, 2, 2, 1, 10, 10] - [2, 3, 1, 2, 10, 10] - [1, 4, 3, 4, 10, 10] ... i3:select{} --- - - [4, 1, 4, 3, 10, 10] - [3, 2, 2, 1, 10, 10] - [2, 3, 1, 2, 10, 10] - [1, 4, 3, 4, 10, 10] ... box.error.injection.set('ERRINJ_BUILD_SECONDARY', i3.id) --- - ok ... i1:alter{parts = {4, "unsigned"}} --- - error: Error injection 'buildSecondaryKey' ... _ = collectgarbage('collect') --- ... i1:select{} --- - - [4, 1, 4, 3, 10, 10] - [3, 2, 2, 1, 10, 10] - [2, 3, 1, 2, 10, 10] - [1, 4, 3, 4, 10, 10] ... i2:select{} --- - - [4, 1, 4, 3, 10, 10] - [3, 2, 2, 1, 10, 10] - [2, 3, 1, 2, 10, 10] - [1, 4, 3, 4, 10, 10] ... i3:select{} --- - - [4, 1, 4, 3, 10, 10] - [3, 2, 2, 1, 10, 10] - [2, 3, 1, 2, 10, 10] - [1, 4, 3, 4, 10, 10] ... box.error.injection.set('ERRINJ_BUILD_SECONDARY', -1) --- - ok ... s:drop() --- ... -- -- Do not rebuild index if the only change is a key part type -- compatible change. -- s = box.schema.space.create('test') --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sk', {parts = {2, 'unsigned'}}) --- ... s:replace{1, 1} --- - [1, 1] ... box.error.injection.set('ERRINJ_BUILD_SECONDARY', sk.id) --- - ok ... sk:alter({parts = {2, 'number'}}) --- ... box.error.injection.set('ERRINJ_BUILD_SECONDARY', -1) --- - ok ... s:drop() --- ... -- -- gh-3255: iproto can crash and discard responses, if a network -- is saturated, and DML yields too long on commit. -- box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... s = box.schema.space.create('test') --- ... _ = s:create_index('pk') --- ... c = net_box.connect(box.cfg.listen) --- ... ch = fiber.channel(200) --- ... errinj.set("ERRINJ_IPROTO_TX_DELAY", true) --- - ok ... for i = 1, 100 do fiber.create(function() for j = 1, 10 do c.space.test:replace{1} end ch:put(true) end) end --- ... for i = 1, 100 do fiber.create(function() for j = 1, 10 do c.space.test:select() end ch:put(true) end) end --- ... for i = 1, 200 do ch:get() end --- ... errinj.set("ERRINJ_IPROTO_TX_DELAY", false) --- - ok ... s:drop() --- ... box.schema.user.revoke('guest', 'read,write,execute','universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/CMakeLists.txt0000664000000000000000000000027113306560010020507 0ustar rootrootinclude_directories(${MSGPUCK_INCLUDE_DIRS}) build_module(function1 function1.c) build_module(reload1 reload1.c) build_module(reload2 reload2.c) build_module(tuple_bench tuple_bench.c) tarantool_1.9.1.26.g63eb81e3c/test/box/temp_spaces.result0000664000000000000000000000231013306565107021522 0ustar rootroot-- temporary spaces _space = box.space._space --- ... -- not a temporary FLAGS = 6 --- ... s = box.schema.space.create('t', { temporary = true }) --- ... s.temporary --- - true ... s:drop() --- ... -- not a temporary, too s = box.schema.space.create('t', { temporary = false }) --- ... s.temporary --- - false ... s:drop() --- ... -- not a temporary, too s = box.schema.space.create('t', { temporary = nil }) --- ... s.temporary --- - false ... s:drop() --- ... s = box.schema.space.create('t', { temporary = true }) --- ... index = s:create_index('primary', { type = 'hash' }) --- ... s:insert{1, 2, 3} --- - [1, 2, 3] ... s:get{1} --- - [1, 2, 3] ... s:len() --- - 1 ... -- check that temporary space can be modified in read-only mode (gh-1378) box.cfg{read_only=true} --- ... box.cfg.read_only --- - true ... s:insert{2, 3, 4} --- - [2, 3, 4] ... s:get{2} --- - [2, 3, 4] ... s:len() --- - 2 ... box.cfg{read_only=false} --- ... box.cfg.read_only --- - false ... env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default') FLAGS = 6 --- ... _space = box.space._space --- ... s = box.space.t --- ... s:len() --- - 0 ... s.temporary --- - true ... s:drop() --- ... s = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/leak.result0000664000000000000000000000155513306560010020131 0ustar rootroot-- -- gh-853 - memory leak on start if replace in xlog -- env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("create server tiny with script='box/tiny.lua'") --- - true ... test_run:cmd("start server tiny") --- - true ... test_run:cmd("switch tiny") --- - true ... _ = box.schema.space.create('test') --- ... _ = box.space.test:create_index('pk') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... -- or we run out of memory too soon for i=1, 500 do box.space.test:replace{1, string.rep('a', 50000)} collectgarbage('collect') end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... test_run:cmd('restart server tiny') box.space.test:len() --- - 1 ... box.space.test:drop() --- ... test_run:cmd("switch default") --- - true ... test_run:cmd("stop server tiny") --- - true ... test_run:cmd("cleanup server tiny") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_rect.test.lua0000664000000000000000000000232513306560010021570 0ustar rootroots = box.schema.space.create('spatial') _ = s:create_index('primary') _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) s:insert{1,{0,0,10,10}{ s:insert{2,{5,5,10,10}} s:insert{3,{0,0,5,5}} -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) -- select records belonging to rectangle (0,0,5,5) s.index.spatial:select({0,0,5,5}, {iterator = 'LE'}) -- select records strict belonging to rectangle (0,0,5,5) s.index.spatial:select({0,0,5,5}, {iterator = 'LT'}) -- select records strict belonging to rectangle (4,4,11,11) s.index.spatial:select({4,4,11,11}, {iterator = 'LT'}) -- select records containing point (5,5) s.index.spatial:select({5,5}, {iterator = 'GE'}) -- select records containing rectangle (1,1,2,2) s.index.spatial:select({1,1,2,2}, {iterator = 'GE'}) -- select records strict containing rectangle (0,0,5,5) s.index.spatial:select({0,0,5,5}, {iterator = 'GT'}) -- select records overlapping rectangle (9,4,11,6) s.index.spatial:select({9,4,11,6}, {iterator = 'OVERLAPS'}) -- select records with coordinates (0,0,5,5) s.index.spatial:select({0,0,5,5}, {iterator = 'EQ'}) -- select neighbors of point (1,1) s.index.spatial:select({1,1}, {iterator = 'NEIGHBOR'}) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/tuple_bench.result0000664000000000000000000000221313306560010021475 0ustar rootrootbuild_path = os.getenv("BUILDDIR") --- ... package.cpath = build_path..'/test/box/?.so;'..build_path..'/test/box/?.dylib;'..package.cpath --- ... net = require('net.box') --- ... c = net:new(os.getenv("LISTEN")) --- ... box.schema.func.create('tuple_bench', {language = "C"}) --- ... box.schema.user.grant('guest', 'execute', 'function', 'tuple_bench') --- ... space = box.schema.space.create('tester') --- ... key_parts = {1, 'unsigned', 2, 'string'} --- ... _ = space:create_index('primary', {type = 'TREE', parts = key_parts}) --- ... box.schema.user.grant('guest', 'read,write', 'space', 'tester') --- ... box.space.tester:insert({1, "abc", 100}) --- - [1, 'abc', 100] ... box.space.tester:insert({2, "bcd", 200}) --- - [2, 'bcd', 200] ... box.space.tester:insert({3, "ccd", 200}) --- - [3, 'ccd', 200] ... prof = require('gperftools.cpu') --- ... prof.start('tuple.prof') --- - true ... key_types = {} --- ... for i = 1, #key_parts, 2 do table.insert(key_types, key_parts[i + 1]) end --- ... c:call('tuple_bench', key_types) --- - [] ... prof.flush() --- ... prof.stop() --- ... box.schema.func.drop("tuple_bench") --- ... box.space.tester:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/proxy.lua0000664000000000000000000000040313306560010017630 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", rows_per_wal = 50 } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/box/configuration.result0000664000000000000000000000372113306560010022061 0ustar rootroot # Bug #876541: # Test floating point values (wal_fsync_delay) with fractional part # (https://bugs.launchpad.net/bugs/876541) box.cfg.wal_fsync_delay --- - 0.01 ... print_config() --- - io_collect_interval: 0 pid_file: box.pid slab_alloc_factor: 2 slab_alloc_minimal: 64 admin_port: logger: cat - >> tarantool.log readahead: 16320 wal_dir: . logger_nonblock: true log_level: 5 snap_dir: . coredump: false background: false too_long_threshold: 0.5 rows_per_wal: 50 wal_mode: fsync_delay snap_io_rate_limit: 0 panic_on_snap_error: true panic_on_wal_error: false local_hot_standby: false slab_alloc_arena: 0.1 bind_ipaddr: INADDR_ANY wal_fsync_delay: 0 primary_port: wal_dir_rescan_delay: 0.1 ... # Test bug #977898 box.space.tweedledum:insert{4, 8, 16} --- - [4, 8, 16] ... # Test insert from init.lua box.space.tweedledum:get(1) --- - [1, 2, 4, 8] ... box.space.tweedledum:get(2) --- - [2, 4, 8, 16] ... box.space.tweedledum:get(4) --- - [4, 8, 16] ... # Test bug #1002272 floor(0.5) --- - 0 ... floor(0.9) --- - 0 ... floor(1.1) --- - 1 ... mod.test(10, 15) --- - 25 ... # Bug#99 Salloc initialization is not checked on startup # (https://github.com/tarantool/tarantool/issues/99) Can't start Tarantool ok # Bug#100 Segmentation fault if rows_per_wal = 0 # (https://github.com/tarantool/tarantool/issues/100) Can't start Tarantool ok # # Check that --background doesn't work if there is no logger # This is a test case for # https://bugs.launchpad.net/tarantool/+bug/750658 # "--background neither closes nor redirects stdin/stdout/stderr" Can't start Tarantool ok # A test case for Bug#726778 "Gopt broke wal_dir and snap_dir: they are no # longer relative to work_dir". # https://bugs.launchpad.net/tarantool/+bug/726778 # After addition of gopt(), we started to chdir() to the working # directory after option parsing. # Verify that this is not the case, and snap_dir and xlog_dir # can be relative to work_dir. tarantool_1.9.1.26.g63eb81e3c/test/box/access.result0000664000000000000000000007117713306565107020501 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... session = box.session --- ... -- user id for a Lua session is admin - 1 session.uid() --- - 1 ... -- extra arguments are ignored session.uid(nil) --- - 1 ... -- admin session.user() --- - admin ... session.effective_user() --- - admin ... -- extra argumentes are ignored session.user(nil) --- - admin ... -- password() is a function which returns base64(sha1(sha1(password)) -- a string to store in _user table box.schema.user.password('test') --- - lL3OvhkIPOKh+Vn9Avlkx69M/Ck= ... box.schema.user.password('test1') --- - BsC/W2Ts4vZItfBIpxkDkGugjlw= ... -- admin can create any user box.schema.user.create('test', { password = 'test' }) --- ... -- su() let's you change the user of the session -- the user will be unabe to change back unless he/she -- is granted access to 'su' session.su('test') --- ... -- you can't create spaces unless you have a write access on -- system space _space -- in future we may introduce a separate privilege box.schema.space.create('test') --- - error: Write access to space '_schema' is denied for user 'test' ... -- su() goes through because called from admin -- console, and it has no access checks -- for functions session.su('admin') --- ... box.schema.user.grant('test', 'write', 'space', '_space') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function usermax() local i = 1 while true do box.schema.user.create('user'..i) i = i + 1 end end; --- ... usermax(); --- - error: 'A limit on the total number of users has been reached: 32' ... function usermax() local i = 1 while true do box.schema.user.drop('user'..i) i = i + 1 end end; --- ... usermax(); --- - error: User 'user27' is not found ... test_run:cmd("setopt delimiter ''"); --- - true ... box.schema.user.create('rich') --- ... box.schema.user.grant('rich', 'read,write', 'universe') --- ... session.su('rich') --- ... uid = session.uid() --- ... box.schema.func.create('dummy') --- ... session.su('admin') --- ... box.space['_user']:delete{uid} --- - error: 'Failed to drop user or role ''rich'': the user has objects' ... box.schema.func.drop('dummy') --- ... box.space['_user']:delete{uid} --- - error: 'Failed to drop user or role ''rich'': the user has objects' ... box.schema.user.revoke('rich', 'read,write', 'universe') --- ... box.schema.user.revoke('rich', 'public') --- ... box.schema.user.disable("rich") --- ... -- test double disable is a no op box.schema.user.disable("rich") --- ... box.space['_user']:delete{uid} --- - [33, 1, 'rich', 'user', {}] ... box.schema.user.drop('test') --- ... -- gh-944 name is too long name = string.rep('a', box.schema.NAME_MAX - 1) --- ... box.schema.user.create(name..'aa') --- - error: 'Failed to create user ''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'': user name is too long' ... box.schema.user.create(name..'a') --- ... box.schema.user.drop(name..'a') --- ... box.schema.user.create(name) --- ... box.schema.user.drop(name) --- ... -- sudo box.schema.user.create('tester') --- ... -- admin -> user session.user() --- - admin ... session.su('tester', function() return session.user(), session.effective_user() end) --- - admin - tester ... session.user() --- - admin ... -- user -> admin session.su('tester') --- ... session.effective_user() --- - tester ... session.su('admin', function() return session.user(), session.effective_user() end) --- - tester - admin ... session.user() --- - tester ... session.effective_user() --- - tester ... -- drop current user session.su('admin', function() return box.schema.user.drop('tester') end) --- - error: 'Failed to drop user or role ''tester'': the user is active in the current session' ... session.user() --- - tester ... session.su('admin') --- ... box.schema.user.drop('tester') --- ... session.user() --- - admin ... -------------------------------------------------------------------------------- -- Check if identifiers obey the common constraints -------------------------------------------------------------------------------- identifier = require("identifier") --- ... test_run:cmd("setopt delimiter ';'") --- - true ... identifier.run_test( function (identifier) box.schema.user.create(identifier) box.schema.user.grant(identifier, 'super') box.session.su(identifier) box.session.su("admin") box.schema.user.revoke(identifier, 'super') end, box.schema.user.drop ); --- - All tests passed ... identifier.run_test( function (identifier) box.schema.role.create(identifier) box.schema.role.grant(identifier, 'execute,read,write', 'universe', nil, {if_not_exists = false}) end, box.schema.role.drop ); --- - All tests passed ... test_run:cmd("setopt delimiter ''"); --- - true ... -- valid identifiers box.schema.user.create('Петя_Иванов') --- ... box.schema.user.drop('Петя_Иванов') --- ... -- gh-300: misleading error message if a function does not exist LISTEN = require('uri').parse(box.cfg.listen) --- ... LISTEN ~= nil --- - true ... c = (require 'net.box').connect(LISTEN.host, LISTEN.service) --- ... c:call('nosuchfunction') --- - error: Execute access to function 'nosuchfunction' is denied for user 'guest' ... function nosuchfunction() end --- ... c:call('nosuchfunction') --- - error: Execute access to function 'nosuchfunction' is denied for user 'guest' ... nosuchfunction = nil --- ... c:call('nosuchfunction') --- - error: Execute access to function 'nosuchfunction' is denied for user 'guest' ... c:close() --- ... -- Dropping a space recursively drops all grants - it's possible to -- restore from a snapshot box.schema.user.create('testus') --- ... s = box.schema.space.create('admin_space') --- ... index = s:create_index('primary', {type = 'hash', parts = {1, 'unsigned'}}) --- ... box.schema.user.grant('testus', 'write', 'space', 'admin_space') --- ... s:drop() --- ... box.snapshot() --- - ok ... test_run:cmd('restart server default') box.schema.user.drop('testus') --- ... -- ------------------------------------------------------------ -- a test case for gh-289 -- box.schema.user.drop() with cascade doesn't work -- ------------------------------------------------------------ session = box.session --- ... box.schema.user.create('uniuser') --- ... box.schema.user.grant('uniuser', 'read, write, execute', 'universe') --- ... session.su('uniuser') --- ... us = box.schema.space.create('uniuser_space') --- ... session.su('admin') --- ... box.schema.user.drop('uniuser') --- ... -- ------------------------------------------------------------ -- A test case for gh-253 -- A user with universal grant has no access to drop oneself -- ------------------------------------------------------------ -- This behaviour is expected, since an object may be destroyed -- only by its creator at the moment -- ------------------------------------------------------------ box.schema.user.create('grantor') --- ... box.schema.user.grant('grantor', 'read, write, execute', 'universe') --- ... session.su('grantor') --- ... box.schema.user.create('grantee') --- ... box.schema.user.grant('grantee', 'read, write, execute', 'universe') --- - error: Grant access to universe '' is denied for user 'grantor' ... session.su('grantee') --- ... -- fails - can't suicide - ask the creator to kill you box.schema.user.drop('grantee') --- - error: Read access to space '_user' is denied for user 'grantee' ... session.su('grantor') --- ... box.schema.user.drop('grantee') --- ... -- fails, can't kill oneself box.schema.user.drop('grantor') --- - error: 'Failed to drop user or role ''grantor'': the user is active in the current session' ... session.su('admin') --- ... box.schema.user.drop('grantor') --- ... -- ---------------------------------------------------------- -- A test case for gh-299 -- It appears to be too easy to read all fields in _user -- table -- guest can't read _user table, add a test case -- ---------------------------------------------------------- session.su('guest') --- ... box.space._user:select{0} --- - error: Read access to space '_user' is denied for user 'guest' ... box.space._user:select{1} --- - error: Read access to space '_user' is denied for user 'guest' ... session.su('admin') --- ... -- ---------------------------------------------------------- -- A test case for gh-358 Change user does not work from lua -- Correct the update syntax in schema.lua -- ---------------------------------------------------------- box.schema.user.create('user1') --- ... box.space._user.index.name:select{'user1'} --- - - [32, 1, 'user1', 'user', {}] ... session.su('user1') --- ... box.schema.user.passwd('new_password') --- ... session.su('admin') --- ... box.space._user.index.name:select{'user1'} --- - - [32, 1, 'user1', 'user', {'chap-sha1': 'CRO/LiziDOIb+xlhrxJNSSBFjl8='}] ... box.schema.user.passwd('user1', 'extra_new_password') --- ... box.space._user.index.name:select{'user1'} --- - - [32, 1, 'user1', 'user', {'chap-sha1': 'nMc3F1oaUtz37IYbgGYYPZawmfE='}] ... box.schema.user.passwd('invalid_user', 'some_password') --- - error: User 'invalid_user' is not found ... box.schema.user.passwd() --- - error: 'Usage: box.schema.user.passwd([user,] password)' ... session.su('user1') --- ... -- permission denied box.schema.user.passwd('admin', 'xxx') --- - error: Read access to space '_user' is denied for user 'user1' ... session.su('admin') --- ... box.schema.user.drop('user1') --- ... box.space._user.index.name:select{'user1'} --- - [] ... -- ---------------------------------------------------------- -- A test case for gh-421 Granting a privilege revokes an -- existing grant -- ---------------------------------------------------------- box.schema.user.create('user') --- ... id = box.space._user.index.name:get{'user'}[1] --- ... box.schema.user.grant('user', 'read,write', 'universe') --- ... box.space._priv:select{id} --- - - [1, 32, 'role', 2, 4] - [1, 32, 'universe', 0, 27] ... box.schema.user.grant('user', 'read', 'universe') --- - error: User 'user' already has read access on universe 'nil' ... box.space._priv:select{id} --- - - [1, 32, 'role', 2, 4] - [1, 32, 'universe', 0, 27] ... box.schema.user.revoke('user', 'write', 'universe') --- ... box.space._priv:select{id} --- - - [1, 32, 'role', 2, 4] - [1, 32, 'universe', 0, 25] ... box.schema.user.revoke('user', 'read', 'universe') --- ... box.space._priv:select{id} --- - - [1, 32, 'role', 2, 4] - [1, 32, 'universe', 0, 24] ... box.schema.user.grant('user', 'write', 'universe') --- ... box.space._priv:select{id} --- - - [1, 32, 'role', 2, 4] - [1, 32, 'universe', 0, 26] ... box.schema.user.grant('user', 'read', 'universe') --- ... box.space._priv:select{id} --- - - [1, 32, 'role', 2, 4] - [1, 32, 'universe', 0, 27] ... box.schema.user.drop('user') --- ... box.space._priv:select{id} --- - [] ... -- ----------------------------------------------------------- -- Be a bit more rigorous in what is accepted in space _user -- ----------------------------------------------------------- utils = require('utils') --- ... box.space._user:insert{10, 1, 'name', 'strange-object-type', utils.setmap({})} --- - error: 'Failed to create user ''name'': unknown user type' ... box.space._user:insert{10, 1, 'name', 'role', utils.setmap{'password'}} --- - error: 'Failed to create role ''name'': authentication data can not be set for a role' ... session = nil --- ... -- ----------------------------------------------------------- -- admin can't manage grants on not owned objects -- ----------------------------------------------------------- box.schema.user.create('twostep') --- ... box.schema.user.grant('twostep', 'read,write,execute', 'universe') --- ... box.session.su('twostep') --- ... twostep = box.schema.space.create('twostep') --- ... index2 = twostep:create_index('primary') --- ... box.schema.func.create('test') --- ... box.session.su('admin') --- ... box.schema.user.revoke('twostep', 'execute,read,write', 'universe') --- ... box.schema.user.create('twostep_client') --- ... box.schema.user.grant('twostep_client', 'execute', 'function', 'test') --- ... box.schema.user.drop('twostep') --- ... box.schema.user.drop('twostep_client') --- ... -- the space is dropped when the user is dropped -- -- box.schema.user.exists() box.schema.user.exists('guest') --- - true ... box.schema.user.exists(nil) --- - false ... box.schema.user.exists(0) --- - true ... box.schema.user.exists(1) --- - true ... box.schema.user.exists(100500) --- - false ... box.schema.user.exists('admin') --- - true ... box.schema.user.exists('nosuchuser') --- - false ... box.schema.user.exists{} --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... -- gh-671: box.schema.func.exists() box.schema.func.exists('nosuchfunc') --- - false ... box.schema.func.exists('guest') --- - false ... box.schema.func.exists(1) --- - true ... box.schema.func.exists(2) --- - false ... box.schema.func.exists('box.schema.user.info') --- - true ... box.schema.func.exists() --- - false ... box.schema.func.exists(nil) --- - false ... -- gh-665: user.exists() should nto be true for roles box.schema.user.exists('public') --- - false ... box.schema.role.exists('public') --- - true ... box.schema.role.exists(nil) --- - false ... -- test if_exists/if_not_exists in grant/revoke box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- - error: User 'guest' already has read,write,execute access on universe 'nil' ... box.schema.user.grant('guest', 'read,write,execute', 'universe', '', { if_not_exists = true }) --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... box.schema.user.revoke('guest', 'usage,session', 'universe') --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- - error: User 'guest' does not have read,write,execute access on universe 'nil' ... box.schema.user.revoke('guest', 'read,write,execute', 'universe', '', { if_exists = true }) --- ... box.schema.user.grant('guest', 'usage,session', 'universe') --- ... box.schema.func.create('dummy', { if_not_exists = true }) --- ... box.schema.func.create('dummy', { if_not_exists = true }) --- ... box.schema.func.drop('dummy') --- ... -- gh-664 roles: accepting bad syntax for create box.schema.user.create('user', 'blah') --- - error: Illegal parameters, options should be a table ... box.schema.user.drop('user', 'blah') --- - error: Illegal parameters, options should be a table ... -- gh-664 roles: accepting bad syntax for create box.schema.func.create('func', 'blah') --- - error: Illegal parameters, options should be a table ... box.schema.func.drop('blah', 'blah') --- - error: Illegal parameters, options should be a table ... -- gh-758 attempt to set password for user guest box.schema.user.passwd('guest', 'sesame') --- - error: Setting password for guest user has no effect ... -- gh-1205 box.schema.user.info fails box.schema.user.drop('guest') --- - error: 'Failed to drop user or role ''guest'': the user or the role is a system' ... box.schema.role.drop('guest') --- - error: Role 'guest' is not found ... box.space._user.index.name:delete{'guest'} --- - error: 'Failed to drop user or role ''guest'': the user or the role is a system' ... box.space._user:delete{box.schema.GUEST_ID} --- - error: 'Failed to drop user or role ''guest'': the user or the role is a system' ... #box.schema.user.info('guest') > 0 --- - true ... box.schema.user.drop('admin') --- - error: 'Failed to drop user or role ''admin'': the user or the role is a system' ... box.schema.role.drop('admin') --- - error: Role 'admin' is not found ... box.space._user.index.name:delete{'admin'} --- - error: 'Failed to drop user or role ''admin'': the user or the role is a system' ... box.space._user:delete{box.schema.ADMIN_ID} --- - error: 'Failed to drop user or role ''admin'': the user or the role is a system' ... #box.schema.user.info('admin') > 0 --- - true ... box.schema.user.drop('public') --- - error: User 'public' is not found ... box.schema.role.drop('public') --- - error: 'Failed to drop user or role ''public'': the user or the role is a system' ... box.space._user.index.name:delete{'public'} --- - error: 'Failed to drop user or role ''public'': the user or the role is a system' ... box.space._user:delete{box.schema.PUBLIC_ROLE_ID} --- - error: 'Failed to drop user or role ''public'': the user or the role is a system' ... #box.schema.role.info('public') > 0 --- - true ... box.schema.role.drop('super') --- - error: 'Failed to drop user or role ''super'': the user or the role is a system' ... box.schema.user.drop('super') --- - error: User 'super' is not found ... box.space._user.index.name:delete{'super'} --- - error: 'Failed to drop user or role ''super'': the user or the role is a system' ... box.space._user:delete{box.schema.SUPER_ROLE_ID} --- - error: 'Failed to drop user or role ''super'': the user or the role is a system' ... #box.schema.role.info('super') > 0 --- - true ... -- gh-944 name is too long name = string.rep('a', box.schema.NAME_MAX - 1) --- ... box.schema.func.create(name..'aa') --- - error: 'Failed to create function ''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'': function name is too long' ... box.schema.func.create(name..'a') --- ... box.schema.func.drop(name..'a') --- ... box.schema.func.create(name) --- ... box.schema.func.drop(name) --- ... -- A test case for: http://bugs.launchpad.net/bugs/712456 -- Verify that when trying to access a non-existing or -- very large space id, no crash occurs. LISTEN = require('uri').parse(box.cfg.listen) --- ... c = (require 'net.box').connect(LISTEN.host, LISTEN.service) --- ... c:_request("select", nil, 1, box.index.EQ, 0, 0, 0xFFFFFFFF, {}) --- - error: Space '1' does not exist ... c:_request("select", nil, 65537, box.index.EQ, 0, 0, 0xFFFFFFFF, {}) --- - error: Space '65537' does not exist ... c:_request("select", nil, 4294967295, box.index.EQ, 0, 0, 0xFFFFFFFF, {}) --- - error: Space '4294967295' does not exist ... c:close() --- ... session = box.session --- ... box.schema.user.create('test') --- ... box.schema.user.grant('test', 'read,write', 'universe') --- ... session.su('test') --- ... box.internal.collation.create('test', 'ICU', 'ru_RU') --- ... session.su('admin') --- ... box.internal.collation.drop('test') -- success --- ... box.internal.collation.create('test', 'ICU', 'ru_RU') --- ... session.su('test') --- ... box.internal.collation.drop('test') -- fail --- - error: Drop access to collation 'test' is denied for user 'test' ... session.su('admin') --- ... box.internal.collation.drop('test') -- success --- ... box.schema.user.drop('test') --- ... -- -- gh-2710 object drop revokes all associated privileges -- _ = box.schema.space.create('test_space') --- ... _ = box.schema.sequence.create('test_sequence') --- ... box.schema.func.create('test_function') --- ... box.schema.user.create('test_user') --- ... box.schema.user.grant('test_user', 'read', 'space', 'test_space') --- ... box.schema.user.grant('test_user', 'write', 'sequence', 'test_sequence') --- ... box.schema.user.grant('test_user', 'execute', 'function', 'test_function') --- ... box.schema.role.create('test_role') --- ... box.schema.role.grant('test_role', 'read', 'space', 'test_space') --- ... box.schema.role.grant('test_role', 'write', 'sequence', 'test_sequence') --- ... box.schema.role.grant('test_role', 'execute', 'function', 'test_function') --- ... box.schema.user.info('test_user') --- - - - execute - function - test_function - - execute - role - public - - write - sequence - test_sequence - - read - space - test_space - - session,usage - universe - ... box.schema.role.info('test_role') --- - - - execute - function - test_function - - write - sequence - test_sequence - - read - space - test_space ... box.space.test_space:drop() --- ... box.sequence.test_sequence:drop() --- ... box.schema.func.drop('test_function') --- ... box.schema.user.info('test_user') --- - - - execute - role - public - - session,usage - universe - ... box.schema.role.info('test_role') --- - [] ... box.schema.user.drop('test_user') --- ... box.schema.role.drop('test_role') --- ... -- gh-3023: box.session.su() changes both authenticated and effective -- user, while should only change the effective user -- function uids() return { uid = box.session.uid(), euid = box.session.euid() } end --- ... box.session.su('guest') --- ... uids() --- - uid: 0 euid: 0 ... box.session.su('admin') --- ... box.session.su('guest', uids) --- - uid: 1 euid: 0 ... -- -- gh-2898 System privileges -- s = box.schema.create_space("tweed") --- ... _ = s:create_index('primary', {type = 'hash', parts = {1, 'unsigned'}}) --- ... box.schema.user.create('test', {password="pass"}) --- ... box.schema.user.grant('test', 'read,write', 'universe') --- ... -- other users can't disable box.schema.user.create('test1') --- ... session.su("test1") --- ... box.schema.user.disable("test") --- - error: Read access to space '_user' is denied for user 'test1' ... session.su("admin") --- ... box.schema.user.disable("test") --- ... -- test double disable is a no op box.schema.user.disable("test") --- ... session.su("test") --- - error: Session access to universe '' is denied for user 'test' ... c = (require 'net.box').connect(LISTEN.host, LISTEN.service, {user="test", password="pass"}) --- ... c.state --- - error ... c.error --- - Session access to universe '' is denied for user 'test' ... session.su("test1") --- ... box.schema.user.grant("test", "usage", "universe") --- - error: Read access to space '_user' is denied for user 'test1' ... session.su('admin') --- ... box.schema.user.grant("test", "session", "universe") --- ... session.su("test") --- ... s:select{} --- - error: Usage access to universe '' is denied for user 'test' ... session.su('admin') --- ... box.schema.user.enable("test") --- ... -- check enable not fails on double enabling box.schema.user.enable("test") --- ... session.su("test") --- ... s:select{} --- - [] ... session.su("admin") --- ... box.schema.user.drop('test') --- ... box.schema.user.drop('test1') --- ... s:drop() --- ... -- -- gh-3022 role 'super' -- s = box.schema.space.create("admin_space") --- ... box.schema.user.grant('guest', 'super') --- ... box.session.su('guest') --- ... _ = box.schema.space.create('test') --- ... box.space.test:drop() --- ... _ = box.schema.user.create('test') --- ... box.schema.user.drop('test') --- ... _ = box.schema.func.create('test') --- ... box.schema.func.drop('test') --- ... -- gh-3088 bug: super role lacks drop privileges on other users' spaces s:drop() --- ... box.session.su('admin') --- ... box.schema.user.revoke('guest', 'super') --- ... box.session.su('guest') --- ... box.schema.space.create('test') --- - error: Write access to space '_schema' is denied for user 'guest' ... box.schema.user.create('test') --- - error: Read access to space '_user' is denied for user 'guest' ... box.schema.func.create('test') --- - error: Read access to space '_func' is denied for user 'guest' ... box.session.su('admin') --- ... -- -- gh-2911 on_access_denied trigger -- obj_type = nil --- ... obj_name = nil --- ... op_type = nil --- ... euid = nil --- ... auid = nil --- ... function access_denied_trigger(op, type, name) obj_type = type; obj_name = name; op_type = op end --- ... function uid() euid = box.session.euid(); auid = box.session.uid() end --- ... _ = box.session.on_access_denied(access_denied_trigger) --- ... _ = box.session.on_access_denied(uid) --- ... s = box.schema.space.create('admin_space', {engine="vinyl"}) --- ... seq = box.schema.sequence.create('test_sequence') --- ... index = s:create_index('primary', {type = 'tree', parts = {1, 'unsigned'}}) --- ... box.schema.user.create('test_user', {password="pass"}) --- ... box.session.su("test_user") --- ... s:select{} --- - error: Read access to space 'admin_space' is denied for user 'test_user' ... obj_type, obj_name, op_type --- - space - admin_space - Read ... euid, auid --- - 32 - 32 ... seq:set(1) --- - error: Write access to sequence 'test_sequence' is denied for user 'test_user' ... obj_type, obj_name, op_type --- - sequence - test_sequence - Write ... euid, auid --- - 32 - 32 ... box.session.su("admin") --- ... c = (require 'net.box').connect(LISTEN.host, LISTEN.service, {user="test_user", password="pass"}) --- ... function func() end --- ... st, e = pcall(c.call, c, func) --- ... obj_type, op_type --- - function - Execute ... euid, auid --- - 32 - 32 ... obj_name:match("function") --- - function ... box.schema.user.revoke("test_user", "usage", "universe") --- ... box.session.su("test_user") --- ... st, e = pcall(s.select, s, {}) --- ... e = e:unpack() --- ... e.type, e.access_type, e.object_type, e.message --- - AccessDeniedError - Usage - universe - Usage access to universe '' is denied for user 'test_user' ... obj_type, obj_name, op_type --- - universe - - Usage ... euid, auid --- - 32 - 32 ... box.session.su("admin") --- ... box.schema.user.revoke("test_user", "session", "universe") --- ... c = (require 'net.box').connect(LISTEN.host, LISTEN.service, {user="test_user", password="pass"}) --- ... obj_type, obj_name, op_type --- - universe - - Session ... euid, auid --- - 0 - 0 ... box.session.on_access_denied(nil, access_denied_trigger) --- ... box.session.on_access_denied(nil, uid) --- ... box.schema.user.drop("test_user") --- ... seq:drop() --- ... s:drop() --- ... -- -- gh-945 create, drop, alter privileges -- box.schema.user.create("tester") --- ... s = box.schema.space.create("test") --- ... u = box.schema.user.create("test") --- ... f = box.schema.func.create("test") --- ... box.schema.user.grant("tester", "read,execute", "universe") --- ... -- failed create box.session.su("tester", box.schema.space.create, "test_space") --- - error: Write access to space '_schema' is denied for user 'tester' ... box.session.su("tester", box.schema.user.create, 'test_user') --- - error: Write access to space '_user' is denied for user 'tester' ... box.session.su("tester", box.schema.func.create, 'test_func') --- - error: Write access to space '_func' is denied for user 'tester' ... -- -- FIXME 2.0: we still need to grant 'write' on universe -- explicitly since we still use process_rw to write to system -- tables from ddl -- box.schema.user.grant("tester", "create,write", "universe") --- ... -- successful create s1 = box.session.su("tester", box.schema.space.create, "test_space") --- ... _ = box.session.su("tester", box.schema.user.create, 'test_user') --- ... _ = box.session.su("tester", box.schema.func.create, 'test_func') --- ... -- successful drop of owned objects _ = box.session.su("tester", s1.drop, s1) --- ... _ = box.session.su("tester", box.schema.user.drop, 'test_user') --- ... _ = box.session.su("tester", box.schema.func.drop, 'test_func') --- ... -- failed alter -- box.session.su("tester", s.format, s, {name="id", type="unsigned"}) -- box.schema.user.grant("tester", "alter", "universe") -- successful alter -- box.session.su("tester", s.format, s, {name="id", type="unsigned"}) -- failed drop -- box.session.su("tester", s.drop, s) -- can't use here sudo -- because drop use sudo inside -- and currently sudo can't be performed nested box.session.su("tester") --- ... box.schema.user.drop("test") --- - error: Revoke access to role 'public' is denied for user 'tester' ... box.session.su("admin") --- ... box.session.su("tester", box.schema.func.drop, "test") --- - error: Drop access to function 'test' is denied for user 'tester' ... box.schema.user.grant("tester", "drop", "universe") --- ... -- successful drop box.session.su("tester", s.drop, s) --- ... box.session.su("tester", box.schema.user.drop, "test") --- ... box.session.su("tester", box.schema.func.drop, "test") --- ... box.session.su("admin") --- ... box.schema.user.drop("tester") --- ... -- gh-3146 gotcha for granting universe with options box.schema.user.grant("guest", "read", "universe", {if_not_exists = true}) --- - error: Illegal parameters, wrong object name type ... box.schema.user.grant("guest", "read", "universe", "useless name") --- ... box.schema.user.grant("guest", "read", "universe", "useless name", {if_not_exists = true}) --- ... box.schema.user.grant("guest", "read", "universe", 0, {if_not_exists = true}) --- ... box.schema.user.grant("guest", "read", "universe", nil, {if_not_exists = true}) --- ... box.schema.user.grant("guest", "read", "universe", {}, {if_not_exists = true}) --- - error: Illegal parameters, wrong object name type ... box.schema.user.revoke("guest", "read", "universe", {if_exists = true}) --- - error: Illegal parameters, wrong object name type ... box.schema.user.revoke("guest", "read", "universe", "useless name") --- ... box.schema.user.revoke("guest", "read", "universe", "useless name", {if_exists = true}) --- ... box.schema.user.revoke("guest", "read", "universe", 0, {if_exists = true}) --- ... box.schema.user.revoke("guest", "read", "universe", nil, {if_exists = true}) --- ... box.schema.user.revoke("guest", "read", "universe", {}, {if_exists = true}) --- - error: Illegal parameters, wrong object name type ... tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_point.result0000664000000000000000000000253413306560010021545 0ustar rootroots = box.schema.space.create('spatial') --- ... _ = s:create_index('primary') --- ... _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) --- ... s:insert{1,{0,0}} --- - [1, [0, 0]] ... s:insert{2,{0,10}} --- - [2, [0, 10]] ... s:insert{3,{0,50}} --- - [3, [0, 50]] ... s:insert{4,{10,0}} --- - [4, [10, 0]] ... s:insert{5,{50,0}} --- - [5, [50, 0]] ... s:insert{6,{10,10}} --- - [6, [10, 10]] ... s:insert{7,{10,50}} --- - [7, [10, 50]] ... s:insert{8,{50,10}} --- - [8, [50, 10]] ... s:insert{9,{50,50}} --- - [9, [50, 50]] ... -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) --- - - [1, [0, 0]] - [2, [0, 10]] - [3, [0, 50]] - [4, [10, 0]] - [5, [50, 0]] - [6, [10, 10]] - [7, [10, 50]] - [8, [50, 10]] - [9, [50, 50]] ... -- select records belonging to rectangle (0,0,10,10) s.index.spatial:select({0,0,10,10}, {iterator = 'LE'}) --- - - [1, [0, 0]] - [2, [0, 10]] - [4, [10, 0]] - [6, [10, 10]] ... -- select records with coordinates (10,10) s.index.spatial:select({10,10}, {iterator = 'EQ'}) --- - - [6, [10, 10]] ... -- select neighbors of point (5,5) s.index.spatial:select({5,5}, {iterator = 'NEIGHBOR'}) --- - - [1, [0, 0]] - [2, [0, 10]] - [4, [10, 0]] - [6, [10, 10]] - [3, [0, 50]] - [5, [50, 0]] - [7, [10, 50]] - [8, [50, 10]] - [9, [50, 50]] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/ddl.result0000664000000000000000000003124013306565107017766 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... fiber = require'fiber' --- ... -- simple test for parallel ddl execution _ = box.schema.space.create('test'):create_index('pk') --- ... ch = fiber.channel(2) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function f1() box.space.test:create_index('sec', {parts = {2, 'num'}}) ch:put(true) end; --- ... function f2() box.space.test:create_index('third', {parts = {3, 'string'}}) ch:put(true) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... _ = {fiber.create(f1), fiber.create(f2)} --- ... ch:get() --- - true ... ch:get() --- - true ... _ = box.space.test:drop() --- ... test_run:cmd('restart server default') env = require('test_run') --- ... test_run = env.new() --- ... fiber = require'fiber' --- ... ch = fiber.channel(2) --- ... --issue #928 space = box.schema.space.create('test_trunc') --- ... _ = space:create_index('pk') --- ... _ = box.space.test_trunc:create_index('i1', {type = 'hash', parts = {2, 'STR'}}) --- ... _ = box.space.test_trunc:create_index('i2', {type = 'hash', parts = {2, 'STR'}}) --- ... function test_trunc() space:truncate() ch:put(true) end --- ... _ = {fiber.create(test_trunc), fiber.create(test_trunc)} --- ... _ = {ch:get(), ch:get()} --- ... space:drop() --- ... -- index should not crash after alter space = box.schema.space.create('test_swap') --- ... index = space:create_index('pk') --- ... space:replace({1, 2, 3}) --- - [1, 2, 3] ... index:rename('primary') --- ... index2 = space:create_index('sec') --- ... space:replace({2, 3, 1}) --- - [2, 3, 1] ... space:select() --- - - [1, 2, 3] - [2, 3, 1] ... space:drop() --- ... ch = fiber.channel(3) --- ... _ = box.schema.space.create('test'):create_index('pk') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function add_index() box.space.test:create_index('sec', {parts = {2, 'num'}}) ch:put(true) end; --- ... function insert_tuple(tuple) ch:put({pcall(box.space.test.replace, box.space.test, tuple)}) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... _ = {fiber.create(insert_tuple, {1, 2, 'a'}), fiber.create(add_index), fiber.create(insert_tuple, {2, '3', 'b'})} --- ... {ch:get(), ch:get(), ch:get()} --- - - - false - 'Tuple field 2 type does not match one required by operation: expected unsigned' - - true - [1, 2, 'a'] - true ... box.space.test:select() --- - - [1, 2, 'a'] ... test_run:cmd('restart server default') box.space.test:select() --- - - [1, 2, 'a'] ... box.space.test:drop() --- ... -- gh-2336 crash if format called twice during snapshot fiber = require'fiber' --- ... space = box.schema.space.create('test_format') --- ... _ = space:create_index('pk', { parts = { 1,'str' }}) --- ... space:format({{ name ="key"; type = "string" }, { name ="dataAB"; type = "string" }}) --- ... str = string.rep("t",1024) --- ... for i = 1, 10000 do space:insert{tostring(i), str} end --- ... ch = fiber.channel(3) --- ... _ = fiber.create(function() fiber.yield() box.snapshot() ch:put(true) end) --- ... format = {{name ="key"; type = "string"}, {name ="data"; type = "string"}} --- ... for i = 1, 2 do fiber.create(function() fiber.yield() space:format(format) ch:put(true) end) end --- ... {ch:get(), ch:get(), ch:get()} --- - - true - true - true ... space:drop() --- ... -- collation function setmap(table) return setmetatable(table, { __serialize = 'map' }) end --- ... box.internal.collation.create('test') --- - error: Illegal parameters, type (second arg) must be a string ... box.internal.collation.create('test', 'ICU') --- - error: Illegal parameters, locale (third arg) must be a string ... box.internal.collation.create(42, 'ICU', 'ru_RU') --- - error: Illegal parameters, name (first arg) must be a string ... box.internal.collation.create('test', 42, 'ru_RU') --- - error: Illegal parameters, type (second arg) must be a string ... box.internal.collation.create('test', 'ICU', 42) --- - error: Illegal parameters, locale (third arg) must be a string ... box.internal.collation.create('test', 'nothing', 'ru_RU') --- - error: 'Failed to initialize collation: unknown collation type.' ... box.internal.collation.create('test', 'ICU', 'ru_RU', setmap{}) --ok --- ... box.internal.collation.create('test', 'ICU', 'ru_RU') --- - error: Duplicate key exists in unique index 'name' in space '_collation' ... box.internal.collation.drop('test') --- ... box.internal.collation.drop('nothing') -- allowed --- ... box.internal.collation.create('test', 'ICU', 'ru_RU', 42) --- - error: Illegal parameters, options (fourth arg) must be a table or nil ... box.internal.collation.create('test', 'ICU', 'ru_RU', 'options') --- - error: Illegal parameters, options (fourth arg) must be a table or nil ... box.internal.collation.create('test', 'ICU', 'ru_RU', {ping='pong'}) --- - error: 'Wrong collation options (field 5): unexpected option ''ping''' ... box.internal.collation.create('test', 'ICU', 'ru_RU', {french_collation='german'}) --- - error: 'Failed to initialize collation: ICU wrong french_collation option setting, expected ON | OFF.' ... box.internal.collation.create('test', 'ICU', 'ru_RU', {french_collation='on'}) --ok --- ... box.internal.collation.drop('test') --ok --- ... box.internal.collation.create('test', 'ICU', 'ru_RU', {strength='supervillian'}) --- - error: 'Failed to initialize collation: ICU wrong strength option setting, expected PRIMARY | SECONDARY | TERTIARY | QUATERNARY | IDENTICAL.' ... box.internal.collation.create('test', 'ICU', 'ru_RU', {strength=42}) --- - error: 'Wrong collation options (field 5): ''strength'' must be enum' ... box.internal.collation.create('test', 'ICU', 'ru_RU', {strength=2}) --ok --- - error: 'Wrong collation options (field 5): ''strength'' must be enum' ... box.internal.collation.drop('test') --ok --- ... box.internal.collation.create('test', 'ICU', 'ru_RU', {strength='primary'}) --ok --- ... box.internal.collation.drop('test') --ok --- ... box.begin() box.internal.collation.create('test2', 'ICU', 'ru_RU') --- - error: Space _collation does not support multi-statement transactions ... box.rollback() --- ... box.internal.collation.create('test', 'ICU', 'ru_RU') --- ... box.internal.collation.exists('test') --- - true ... test_run:cmd('restart server default') function setmap(table) return setmetatable(table, { __serialize = 'map' }) end --- ... box.internal.collation.exists('test') --- - true ... box.internal.collation.drop('test') --- ... box.space._collation:auto_increment{'test'} --- - error: Tuple field count 2 is less than required by space format or defined indexes (expected at least 6) ... box.space._collation:auto_increment{'test', 0, 'ICU'} --- - error: Tuple field count 4 is less than required by space format or defined indexes (expected at least 6) ... box.space._collation:auto_increment{'test', 'ADMIN', 'ICU', 'ru_RU'} --- - error: Tuple field count 5 is less than required by space format or defined indexes (expected at least 6) ... box.space._collation:auto_increment{42, 0, 'ICU', 'ru_RU'} --- - error: Tuple field count 5 is less than required by space format or defined indexes (expected at least 6) ... box.space._collation:auto_increment{'test', 0, 42, 'ru_RU'} --- - error: Tuple field count 5 is less than required by space format or defined indexes (expected at least 6) ... box.space._collation:auto_increment{'test', 0, 'ICU', 42} --- - error: Tuple field count 5 is less than required by space format or defined indexes (expected at least 6) ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', setmap{}} --ok --- - [3, 'test', 0, 'ICU', 'ru_RU', {}] ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', setmap{}} --- - error: Duplicate key exists in unique index 'name' in space '_collation' ... box.space._collation.index.name:delete{'test'} -- ok --- - [3, 'test', 0, 'ICU', 'ru_RU', {}] ... box.space._collation.index.name:delete{'nothing'} -- allowed --- ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', 42} --- - error: 'Tuple field 6 type does not match one required by operation: expected map' ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', 'options'} --- - error: 'Tuple field 6 type does not match one required by operation: expected map' ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', {ping='pong'}} --- - error: 'Wrong collation options (field 5): unexpected option ''ping''' ... opts = {normalization_mode='NORMAL'} --- ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --- - error: 'Failed to initialize collation: ICU wrong normalization_mode option setting, expected ON | OFF.' ... opts.normalization_mode = 'OFF' --- ... _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} -- ok --- ... _ = box.space._collation.index.name:delete{'test'} -- ok --- ... opts.numeric_collation = 'PERL' --- ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --- - error: 'Failed to initialize collation: ICU wrong numeric_collation option setting, expected ON | OFF.' ... opts.numeric_collation = 'ON' --- ... _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --ok --- ... _ = box.space._collation.index.name:delete{'test'} -- ok --- ... opts.alternate_handling1 = 'ON' --- ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --- - error: 'Wrong collation options (field 5): unexpected option ''alternate_handling1''' ... opts.alternate_handling1 = nil --- ... opts.alternate_handling = 'ON' --- ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --- - error: 'Failed to initialize collation: ICU wrong alternate_handling option setting, expected NON_IGNORABLE | SHIFTED.' ... opts.alternate_handling = 'SHIFTED' --- ... _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --ok --- ... _ = box.space._collation.index.name:delete{'test'} -- ok --- ... opts.case_first = 'ON' --- ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --- - error: 'Failed to initialize collation: ICU wrong case_first option setting, expected OFF | UPPER_FIRST | LOWER_FIRST.' ... opts.case_first = 'OFF' --- ... _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --ok --- ... _ = box.space._collation.index.name:delete{'test'} -- ok --- ... opts.case_level = 'UPPER' --- ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --- - error: 'Failed to initialize collation: ICU wrong case_level option setting, expected ON | OFF.' ... opts.case_level = 'DEFAULT' --- ... _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --ok --- - error: 'Failed to initialize collation: ICU wrong case_level option setting, expected ON | OFF.' ... _ = box.space._collation.index.name:delete{'test'} -- ok --- ... box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', setmap{}} --- - [3, 'test', 0, 'ICU', 'ru_RU', {}] ... box.space._collation:select{} --- - - [1, 'unicode', 1, 'ICU', '', {}] - [2, 'unicode_ci', 1, 'ICU', '', {'strength': 'primary'}] - [3, 'test', 0, 'ICU', 'ru_RU', {}] ... test_run:cmd('restart server default') box.space._collation:select{} --- - - [1, 'unicode', 1, 'ICU', '', {}] - [2, 'unicode_ci', 1, 'ICU', '', {'strength': 'primary'}] - [3, 'test', 0, 'ICU', 'ru_RU', {}] ... box.space._collation.index.name:delete{'test'} --- - [3, 'test', 0, 'ICU', 'ru_RU', {}] ... -- -- gh-2839: allow to store custom fields in field definition. -- format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {'field2', 'unsigned'} --- ... format[3] = {'field3', 'unsigned', custom_field = 'custom_value'} --- ... s = box.schema.create_space('test', {format = format}) --- ... s:format()[3].custom_field --- - custom_value ... s:drop() --- ... -- -- gh-2783 -- A ddl operation shoud fail before trying to lock a ddl latch -- in a multi-statement transaction. -- If operation tries to lock already an locked latch then the -- current transaction will be silently rolled back under our feet. -- This is confusing. So check for multi-statement transaction -- before locking the latch. -- test_latch = box.schema.space.create('test_latch') --- ... _ = test_latch:create_index('primary', {unique = true, parts = {1, 'unsigned'}}) --- ... fiber = require('fiber') --- ... c = fiber.channel(1) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... _ = fiber.create(function() test_latch:create_index("sec", {unique = true, parts = {2, 'unsigned'}}) c:put(true) end); --- ... box.begin() test_latch:create_index("sec2", {unique = true, parts = {2, 'unsigned'}}) box.commit(); --- - error: DDL does not support multi-statement transactions ... test_run:cmd("setopt delimiter ''"); --- - true ... _ = c:get() --- ... test_latch:drop() -- this is where everything stops --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/sequence.result0000664000000000000000000006764313306560010021037 0ustar rootroottest_run = require('test_run').new() --- ... -- Options check on create. box.schema.sequence.create('test', {abc = 'abc'}) --- - error: Illegal parameters, unexpected option 'abc' ... box.schema.sequence.create('test', {step = 'a'}) --- - error: Illegal parameters, options parameter 'step' should be of type number ... box.schema.sequence.create('test', {min = 'b'}) --- - error: Illegal parameters, options parameter 'min' should be of type number ... box.schema.sequence.create('test', {max = 'c'}) --- - error: Illegal parameters, options parameter 'max' should be of type number ... box.schema.sequence.create('test', {start = true}) --- - error: Illegal parameters, options parameter 'start' should be of type number ... box.schema.sequence.create('test', {cycle = 123}) --- - error: Illegal parameters, options parameter 'cycle' should be of type boolean ... box.schema.sequence.create('test', {name = 'test'}) --- - error: Illegal parameters, unexpected option 'name' ... box.schema.sequence.create('test', {step = 0}) --- - error: 'Failed to create sequence ''test'': step option must be non-zero' ... box.schema.sequence.create('test', {min = 10, max = 1}) --- - error: 'Failed to create sequence ''test'': max must be greater than or equal to min' ... box.schema.sequence.create('test', {min = 10, max = 20, start = 1}) --- - error: 'Failed to create sequence ''test'': start must be between min and max' ... -- Options check on alter. _ = box.schema.sequence.create('test') --- ... box.schema.sequence.alter('test', {abc = 'abc'}) --- - error: Illegal parameters, unexpected option 'abc' ... box.schema.sequence.alter('test', {step = 'a'}) --- - error: Illegal parameters, options parameter 'step' should be of type number ... box.schema.sequence.alter('test', {min = 'b'}) --- - error: Illegal parameters, options parameter 'min' should be of type number ... box.schema.sequence.alter('test', {max = 'c'}) --- - error: Illegal parameters, options parameter 'max' should be of type number ... box.schema.sequence.alter('test', {start = true}) --- - error: Illegal parameters, options parameter 'start' should be of type number ... box.schema.sequence.alter('test', {cycle = 123}) --- - error: Illegal parameters, options parameter 'cycle' should be of type boolean ... box.schema.sequence.alter('test', {name = 'test'}) --- ... box.schema.sequence.alter('test', {if_not_exists = false}) --- - error: Illegal parameters, unexpected option 'if_not_exists' ... box.schema.sequence.alter('test', {step = 0}) --- - error: 'Can''t modify sequence ''test'': step option must be non-zero' ... box.schema.sequence.alter('test', {min = 10, max = 1}) --- - error: 'Can''t modify sequence ''test'': max must be greater than or equal to min' ... box.schema.sequence.alter('test', {min = 10, max = 20, start = 1}) --- - error: 'Can''t modify sequence ''test'': start must be between min and max' ... box.schema.sequence.drop('test') --- ... -- Duplicate name. sq1 = box.schema.sequence.create('test') --- ... box.schema.sequence.create('test') --- - error: Sequence 'test' already exists ... sq2, msg = box.schema.sequence.create('test', {if_not_exists = true}) --- ... sq1 == sq2, msg --- - true - not created ... _ = box.schema.sequence.create('test2') --- ... box.schema.sequence.alter('test2', {name = 'test'}) --- - error: Duplicate key exists in unique index 'name' in space '_sequence' ... box.schema.sequence.drop('test2') --- ... box.schema.sequence.drop('test') --- ... -- Check that box.sequence gets updated. sq = box.schema.sequence.create('test') --- ... box.sequence.test == sq --- - true ... sq.step --- - 1 ... sq:alter{step = 2} --- ... box.sequence.test == sq --- - true ... sq.step --- - 2 ... sq:drop() --- ... box.sequence.test == nil --- - true ... -- Attempt to delete a sequence that has a record in _sequence_data. sq = box.schema.sequence.create('test') --- ... sq:next() --- - 1 ... box.space._sequence:delete(sq.id) --- - error: 'Can''t drop sequence ''test'': the sequence has data' ... box.space._sequence_data:delete(sq.id) --- - [1, 1] ... box.space._sequence:delete(sq.id) --- - [1, 1, 'test', 1, 1, 9223372036854775807, 1, 0, false] ... box.sequence.test == nil --- - true ... -- Default ascending sequence. sq = box.schema.sequence.create('test') --- ... sq.step, sq.min, sq.max, sq.start, sq.cycle --- - 1 - 1 - 9223372036854775807 - 1 - false ... sq:next() -- 1 --- - 1 ... sq:next() -- 2 --- - 2 ... sq:set(100) --- ... sq:next() -- 101 --- - 101 ... sq:next() -- 102 --- - 102 ... sq:reset() --- ... sq:next() -- 1 --- - 1 ... sq:next() -- 2 --- - 2 ... sq:drop() --- ... -- Default descending sequence. sq = box.schema.sequence.create('test', {step = -1}) --- ... sq.step, sq.min, sq.max, sq.start, sq.cycle --- - -1 - -9223372036854775808 - -1 - -1 - false ... sq:next() -- -1 --- - -1 ... sq:next() -- -2 --- - -2 ... sq:set(-100) --- ... sq:next() -- -101 --- - -101 ... sq:next() -- -102 --- - -102 ... sq:reset() --- ... sq:next() -- -1 --- - -1 ... sq:next() -- -2 --- - -2 ... sq:drop() --- ... -- Custom min/max. sq = box.schema.sequence.create('test', {min = 10}) --- ... sq.step, sq.min, sq.max, sq.start, sq.cycle --- - 1 - 10 - 9223372036854775807 - 10 - false ... sq:next() -- 10 --- - 10 ... sq:next() -- 11 --- - 11 ... sq:drop() --- ... sq = box.schema.sequence.create('test', {step = -1, max = 20}) --- ... sq.step, sq.min, sq.max, sq.start, sq.cycle --- - -1 - -9223372036854775808 - 20 - 20 - false ... sq:next() -- 20 --- - 20 ... sq:next() -- 19 --- - 19 ... sq:drop() --- ... -- Custom start value. sq = box.schema.sequence.create('test', {start = 1000}) --- ... sq.step, sq.min, sq.max, sq.start, sq.cycle --- - 1 - 1 - 9223372036854775807 - 1000 - false ... sq:next() -- 1000 --- - 1000 ... sq:next() -- 1001 --- - 1001 ... sq:reset() --- ... sq:next() -- 1000 --- - 1000 ... sq:next() -- 1001 --- - 1001 ... sq:drop() --- ... -- Overflow and cycle. sq = box.schema.sequence.create('test', {max = 2}) --- ... sq:next() -- 1 --- - 1 ... sq:next() -- 2 --- - 2 ... sq:next() -- error --- - error: Sequence 'test' has overflowed ... sq:alter{cycle = true} --- ... sq:next() -- 1 --- - 1 ... sq:next() -- 2 --- - 2 ... sq:next() -- 1 --- - 1 ... sq:alter{step = 2} --- ... sq:next() -- 1 --- - 1 ... sq:alter{cycle = false} --- ... sq:next() -- error --- - error: Sequence 'test' has overflowed ... sq:drop() --- ... -- Setting sequence value outside boundaries. sq = box.schema.sequence.create('test') --- ... sq:alter{step = 1, min = 1, max = 10} --- ... sq:set(-100) --- ... sq:next() -- 1 --- - 1 ... sq:set(100) --- ... sq:next() -- error --- - error: Sequence 'test' has overflowed ... sq:reset() --- ... sq:next() -- 1 --- - 1 ... sq:alter{min = 5, start = 5} --- ... sq:next() -- 5 --- - 5 ... sq:reset() --- ... sq:alter{step = -1, min = 1, max = 10, start = 10} --- ... sq:set(100) --- ... sq:next() -- 10 --- - 10 ... sq:set(-100) --- ... sq:next() -- error --- - error: Sequence 'test' has overflowed ... sq:reset() --- ... sq:next() -- 10 --- - 10 ... sq:alter{max = 5, start = 5} --- ... sq:next() -- 5 --- - 5 ... sq:drop() --- ... -- number64 arguments. INT64_MIN = tonumber64('-9223372036854775808') --- ... INT64_MAX = tonumber64('9223372036854775807') --- ... sq = box.schema.sequence.create('test', {step = INT64_MAX, min = INT64_MIN, max = INT64_MAX, start = INT64_MIN}) --- ... sq:next() -- -9223372036854775808 --- - -9223372036854775808 ... sq:next() -- -1 --- - -1 ... sq:next() -- 9223372036854775806 --- - 9223372036854775806 ... sq:next() -- error --- - error: Sequence 'test' has overflowed ... sq:alter{step = INT64_MIN, start = INT64_MAX} --- ... sq:reset() --- ... sq:next() -- 9223372036854775807 --- - 9223372036854775807 ... sq:next() -- -1 --- - -1 ... sq:next() -- error --- - error: Sequence 'test' has overflowed ... sq:drop() --- ... -- Using in a transaction. s = box.schema.space.create('test') --- ... _ = s:create_index('pk') --- ... sq1 = box.schema.sequence.create('sq1', {step = 1}) --- ... sq2 = box.schema.sequence.create('sq2', {step = -1}) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s:insert{sq1:next(), sq2:next()} s:insert{sq1:next(), sq2:next()} s:insert{sq1:next(), sq2:next()} box.rollback(); --- ... box.begin() s:insert{sq1:next(), sq2:next()} s:insert{sq1:next(), sq2:next()} s:insert{sq1:next(), sq2:next()} box.commit(); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... s:select() -- [4, -4], [5, -5], [6, -6] --- - - [4, -4] - [5, -5] - [6, -6] ... sq1:drop() --- ... sq2:drop() --- ... s:drop() --- ... -- -- Attaching a sequence to a space. -- -- Index create/modify checks. s = box.schema.space.create('test') --- ... sq = box.schema.sequence.create('test') --- ... sq:set(123) --- ... s:create_index('pk', {parts = {1, 'string'}, sequence = 'test'}) -- error --- - error: 'Can''t create or modify index ''pk'' in space ''test'': sequence cannot be used with a non-integer key' ... s:create_index('pk', {parts = {1, 'scalar'}, sequence = 'test'}) -- error --- - error: 'Can''t create or modify index ''pk'' in space ''test'': sequence cannot be used with a non-integer key' ... s:create_index('pk', {parts = {1, 'number'}, sequence = 'test'}) -- error --- - error: 'Can''t create or modify index ''pk'' in space ''test'': sequence cannot be used with a non-integer key' ... pk = s:create_index('pk', {parts = {1, 'integer'}, sequence = 'test'}) -- ok --- ... pk:drop() --- ... pk = s:create_index('pk', {parts = {1, 'unsigned'}, sequence = 'test'}) -- ok --- ... pk:drop() --- ... pk = s:create_index('pk') -- ok --- ... s:create_index('secondary', {parts = {2, 'unsigned'}, sequence = 'test'}) -- error --- - error: 'Can''t create or modify index ''secondary'' in space ''test'': sequence cannot be used with a secondary key' ... s:create_index('secondary', {parts = {2, 'unsigned'}, sequence = true}) -- error --- - error: 'Can''t create or modify index ''secondary'' in space ''test'': sequence cannot be used with a secondary key' ... sk = s:create_index('secondary', {parts = {2, 'unsigned'}}) -- ok --- ... sk:alter{sequence = 'test'} -- error --- - error: 'Can''t create or modify index ''secondary'' in space ''test'': sequence cannot be used with a secondary key' ... sk:alter{sequence = true} -- error --- - error: 'Can''t create or modify index ''secondary'' in space ''test'': sequence cannot be used with a secondary key' ... sk:alter{parts = {2, 'string'}} -- ok --- ... sk:alter{sequence = false} -- ok (ignored) --- ... pk:alter{sequence = 'test'} -- ok --- ... s.index.pk.sequence_id == sq.id --- - true ... sk:alter{sequence = 'test'} -- error --- - error: 'Can''t create or modify index ''secondary'' in space ''test'': sequence cannot be used with a secondary key' ... sk:alter{sequence = true} -- error --- - error: 'Can''t create or modify index ''secondary'' in space ''test'': sequence cannot be used with a secondary key' ... sk:alter{parts = {2, 'unsigned'}} -- ok --- ... sk:alter{sequence = false} -- ok (ignored) --- ... s.index.pk.sequence_id == sq.id --- - true ... sk:drop() --- ... s.index.pk.sequence_id == sq.id --- - true ... pk:drop() --- ... pk = s:create_index('pk', {parts = {1, 'unsigned'}, sequence = 'test'}) -- ok --- ... pk:alter{parts = {1, 'string'}} -- error --- - error: 'Can''t create or modify index ''pk'' in space ''test'': sequence cannot be used with a non-integer key' ... box.space._index:update({s.id, pk.id}, {{'=', 6, {{0, 'string'}}}}) -- error --- - error: 'Can''t create or modify index ''pk'' in space ''test'': sequence cannot be used with a non-integer key' ... box.space._index:delete{s.id, pk.id} -- error --- - error: 'Can''t modify space ''test'': can not drop primary key while space sequence exists' ... pk:alter{parts = {1, 'string'}, sequence = false} -- ok --- ... sk = s:create_index('sk', {parts = {2, 'unsigned'}}) --- ... sk:alter{sequence = 'test'} -- error --- - error: 'Can''t create or modify index ''sk'' in space ''test'': sequence cannot be used with a secondary key' ... box.space._space_sequence:insert{s.id, sq.id, false} -- error --- - error: 'Can''t create or modify index ''pk'' in space ''test'': sequence cannot be used with a non-integer key' ... sk:drop() --- ... pk:drop() --- ... box.space._space_sequence:insert{s.id, sq.id, false} -- error --- - error: 'No index #0 is defined in space ''test''' ... s:create_index('pk', {sequence = {}}) -- error --- - error: 'Illegal parameters, options parameter ''sequence'' should be one of types: boolean, number, string' ... s:create_index('pk', {sequence = 'abc'}) -- error --- - error: Sequence 'abc' does not exist ... s:create_index('pk', {sequence = 12345}) -- error --- - error: Sequence '12345' does not exist ... pk = s:create_index('pk', {sequence = 'test'}) -- ok --- ... s.index.pk.sequence_id == sq.id --- - true ... pk:drop() --- ... pk = s:create_index('pk', {sequence = sq.id}) -- ok --- ... s.index.pk.sequence_id == sq.id --- - true ... pk:drop() --- ... pk = s:create_index('pk', {sequence = false}) -- ok --- ... s.index.pk.sequence_id == nil --- - true ... pk:alter{sequence = {}} -- error --- - error: 'Illegal parameters, options parameter ''sequence'' should be one of types: boolean, number, string' ... pk:alter{sequence = 'abc'} -- error --- - error: Sequence 'abc' does not exist ... pk:alter{sequence = 12345} -- error --- - error: Sequence '12345' does not exist ... pk:alter{sequence = 'test'} -- ok --- ... s.index.pk.sequence_id == sq.id --- - true ... pk:alter{sequence = sq.id} -- ok --- ... s.index.pk.sequence_id == sq.id --- - true ... pk:alter{sequence = false} -- ok --- ... s.index.pk.sequence_id == nil --- - true ... pk:drop() --- ... sq:next() -- 124 --- - 124 ... sq:drop() --- ... s:drop() --- ... -- Using a sequence for auto increment. sq = box.schema.sequence.create('test') --- ... s1 = box.schema.space.create('test1') --- ... _ = s1:create_index('pk', {parts = {1, 'unsigned'}, sequence = 'test'}) --- ... s2 = box.schema.space.create('test2') --- ... _ = s2:create_index('pk', {parts = {2, 'integer'}, sequence = 'test'}) --- ... s3 = box.schema.space.create('test3') --- ... _ = s3:create_index('pk', {parts = {2, 'unsigned', 1, 'string'}, sequence = 'test'}) --- ... s1:insert(box.tuple.new(nil)) -- 1 --- - [1] ... s2:insert(box.tuple.new('a', nil)) -- 2 --- - ['a', 2] ... s3:insert(box.tuple.new('b', nil)) -- 3 --- - ['b', 3] ... s1:truncate() --- ... s2:truncate() --- ... s3:truncate() --- ... s1:insert{nil, 123, 456} -- 4 --- - [4, 123, 456] ... s2:insert{'c', nil, 123} -- 5 --- - ['c', 5, 123] ... s3:insert{'d', nil, 456} -- 6 --- - ['d', 6, 456] ... sq:next() -- 7 --- - 7 ... sq:reset() --- ... s1:insert{nil, nil, 'aa'} -- 1 --- - [1, null, 'aa'] ... s2:insert{'bb', nil, nil, 'cc'} -- 2 --- - ['bb', 2, null, 'cc'] ... s3:insert{'dd', nil, nil, 'ee'} -- 3 --- - ['dd', 3, null, 'ee'] ... sq:next() -- 4 --- - 4 ... sq:set(100) --- ... s1:insert{nil, 'aaa', 1} -- 101 --- - [101, 'aaa', 1] ... s2:insert{'bbb', nil, 2} -- 102 --- - ['bbb', 102, 2] ... s3:insert{'ccc', nil, 3} -- 103 --- - ['ccc', 103, 3] ... sq:next() -- 104 --- - 104 ... s1:insert{1000, 'xxx'} --- - [1000, 'xxx'] ... sq:next() -- 1001 --- - 1001 ... s2:insert{'yyy', 2000} --- - ['yyy', 2000] ... sq:next() -- 2001 --- - 2001 ... s3:insert{'zzz', 3000} --- - ['zzz', 3000] ... sq:next() -- 3001 --- - 3001 ... s1:insert{500, 'xxx'} --- - [500, 'xxx'] ... s3:insert{'zzz', 2500} --- - ['zzz', 2500] ... s2:insert{'yyy', 1500} --- - ['yyy', 1500] ... sq:next() -- 3002 --- - 3002 ... sq:drop() -- error --- - error: 'Can''t drop sequence ''test'': the sequence is in use' ... s1:drop() --- ... sq:drop() -- error --- - error: 'Can''t drop sequence ''test'': the sequence is in use' ... s2:drop() --- ... sq:drop() -- error --- - error: 'Can''t drop sequence ''test'': the sequence is in use' ... s3:drop() --- ... sq:drop() -- ok --- ... -- Automatically generated sequences. s = box.schema.space.create('test') --- ... sq = box.schema.sequence.create('test') --- ... sq:set(123) --- ... pk = s:create_index('pk', {sequence = true}) --- ... sk = s:create_index('sk', {parts = {2, 'string'}}) --- ... sq = box.sequence.test_seq --- ... sq.step, sq.min, sq.max, sq.start, sq.cycle --- - 1 - 1 - 9223372036854775807 - 1 - false ... s.index.pk.sequence_id == sq.id --- - true ... s:insert{nil, 'a'} -- 1 --- - [1, 'a'] ... s:insert{nil, 'b'} -- 2 --- - [2, 'b'] ... s:insert{nil, 'c'} -- 3 --- - [3, 'c'] ... sq:next() -- 4 --- - 4 ... pk:alter{sequence = false} --- ... s.index.pk.sequence_id == nil --- - true ... s:insert{nil, 'x'} -- error --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... box.sequence.test_seq == nil --- - true ... pk:alter{sequence = true} --- ... sq.step, sq.min, sq.max, sq.start, sq.cycle --- - 1 - 1 - 9223372036854775807 - 1 - false ... sq = box.sequence.test_seq --- ... s.index.pk.sequence_id == sq.id --- - true ... s:insert{100, 'abc'} --- - [100, 'abc'] ... s:insert{nil, 'cda'} -- 101 --- - [101, 'cda'] ... sq:next() -- 102 --- - 102 ... pk:alter{sequence = 'test'} --- ... s.index.pk.sequence_id == box.sequence.test.id --- - true ... box.sequence.test_seq == nil --- - true ... pk:alter{sequence = true} --- ... s.index.pk.sequence_id == box.sequence.test_seq.id --- - true ... sk:drop() --- ... pk:drop() --- ... box.sequence.test_seq == nil --- - true ... pk = s:create_index('pk', {sequence = true}) --- ... s.index.pk.sequence_id == box.sequence.test_seq.id --- - true ... s:drop() --- ... box.sequence.test_seq == nil --- - true ... sq = box.sequence.test --- ... sq:next() -- 124 --- - 124 ... sq:drop() --- ... -- Check that generated sequence cannot be attached to another space. s1 = box.schema.space.create('test1') --- ... _ = s1:create_index('pk', {sequence = true}) --- ... s2 = box.schema.space.create('test2') --- ... _ = s2:create_index('pk', {sequence = 'test1_seq'}) -- error --- - error: 'Can''t modify space ''test2'': can not attach generated sequence' ... box.space._space_sequence:insert{s2.id, box.sequence.test1_seq.id, false} -- error --- - error: 'Can''t modify space ''test2'': can not attach generated sequence' ... s1:drop() --- ... s2:drop() --- ... -- Sequences are compatible with Vinyl spaces. s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {sequence = true}) --- ... s:insert{nil, 'a'} -- 1 --- - [1, 'a'] ... s:insert{100, 'b'} -- 100 --- - [100, 'b'] ... box.begin() --- ... s:insert{nil, 'c'} -- 101 --- - [101, 'c'] ... s:insert{nil, 'd'} -- 102 --- - [102, 'd'] ... box.rollback() --- ... box.begin() --- ... s:insert{nil, 'e'} -- 103 --- - [103, 'e'] ... s:insert{nil, 'f'} -- 104 --- - [104, 'f'] ... box.commit() --- ... s:select() -- {1, 'a'}, {100, 'b'}, {103, 'e'}, {104, 'f'} --- - - [1, 'a'] - [100, 'b'] - [103, 'e'] - [104, 'f'] ... s:drop() --- ... -- -- Check that sequences are persistent. -- s1 = box.schema.space.create('test1') --- ... _ = s1:create_index('pk', {sequence = true}) --- ... s1:insert{nil, 'a'} -- 1 --- - [1, 'a'] ... box.snapshot() --- - ok ... s2 = box.schema.space.create('test2') --- ... _ = s2:create_index('pk', {sequence = true}) --- ... s2:insert{101, 'aaa'} --- - [101, 'aaa'] ... sq = box.schema.sequence.create('test', {step = 2, min = 10, max = 20, start = 15, cycle = true}) --- ... sq:next() --- - 15 ... test_run:cmd('restart server default') sq = box.sequence.test --- ... sq.step, sq.min, sq.max, sq.start, sq.cycle --- - 2 - 10 - 20 - 15 - true ... sq:next() --- - 17 ... sq:drop() --- ... s1 = box.space.test1 --- ... s1.index.pk.sequence_id == box.sequence.test1_seq.id --- - true ... s1:insert{nil, 'b'} -- 2 --- - [2, 'b'] ... s1:drop() --- ... s2 = box.space.test2 --- ... s2.index.pk.sequence_id == box.sequence.test2_seq.id --- - true ... s2:insert{nil, 'bbb'} -- 102 --- - [102, 'bbb'] ... s2:drop() --- ... -- -- Test permission checks. -- -- Sanity checks. box.schema.user.create('user') --- ... -- Setup read permissions for box.schema.user.info() to work. box.schema.user.grant('user', 'read', 'space', '_priv') --- ... box.schema.user.grant('user', 'read', 'space', '_user') --- ... box.schema.user.grant('user', 'read', 'space', '_space') --- ... box.schema.user.grant('user', 'read', 'space', '_sequence') --- ... sq = box.schema.sequence.create('seq') --- ... box.schema.user.grant('user', 'write', 'sequence', 'test') -- error: no such sequence --- - error: Sequence 'test' does not exist ... box.schema.user.grant('user', 'write', 'sequence', 'seq') -- ok --- ... box.space._priv.index.object:select{'sequence'} --- - - [1, 32, 'sequence', 1, 2] ... box.space._sequence:delete(sq.id) -- error: sequence has grants --- - error: 'Can''t drop sequence ''seq'': the sequence has grants' ... sq:drop() -- ok --- ... box.space._priv.index.object:select{'sequence'} --- - [] ... -- Access to a standalone sequence is denied unless -- the user has the corresponding privileges. sq = box.schema.sequence.create('seq') --- ... box.session.su('user') --- ... sq:set(100) -- error --- - error: Write access to sequence 'seq' is denied for user 'user' ... sq:next() -- error --- - error: Write access to sequence 'seq' is denied for user 'user' ... sq:reset() -- error --- - error: Write access to sequence 'seq' is denied for user 'user' ... box.session.su('admin') --- ... box.schema.user.grant('user', 'write', 'sequence', 'seq') --- ... box.session.su('user') --- ... box.schema.user.info() --- - - - execute - role - public - - write - sequence - seq - - read - space - _space - - read - space - _sequence - - read - space - _user - - read - space - _priv - - session,usage - universe - ... sq:set(100) -- ok --- ... sq:next() -- ok --- - 101 ... sq:reset() -- ok --- ... box.session.su('admin') --- ... box.schema.user.revoke('user', 'write', 'sequence', 'seq') --- ... -- Check that access via role works. box.schema.role.create('seq_role') --- ... box.schema.role.grant('seq_role', 'write', 'sequence', 'seq') --- ... box.schema.user.grant('user', 'execute', 'role', 'seq_role') --- ... box.session.su('user') --- ... sq:set(100) -- ok --- ... sq:next() -- ok --- - 101 ... sq:reset() -- ok --- ... box.session.su('admin') --- ... box.schema.role.drop('seq_role') --- ... -- Universe access grants access to any sequence. box.schema.user.grant('user', 'write', 'universe') --- ... box.session.su('user') --- ... sq:set(100) -- ok --- ... sq:next() -- ok --- - 101 ... sq:reset() -- ok --- ... box.session.su('admin') --- ... -- A sequence is inaccessible after privileges have been revoked. box.schema.user.revoke('user', 'write', 'universe') --- ... box.session.su('user') --- ... sq:set(100) -- error --- - error: Write access to sequence 'seq' is denied for user 'user' ... sq:next() -- error --- - error: Write access to sequence 'seq' is denied for user 'user' ... sq:reset() -- error --- - error: Write access to sequence 'seq' is denied for user 'user' ... box.session.su('admin') --- ... -- A user cannot alter sequences created by other users. box.schema.user.grant('user', 'read,write', 'universe') --- ... box.session.su('user') --- ... sq:alter{step = 2} -- error --- - error: Alter access to sequence 'seq' is denied for user 'user' ... sq:drop() -- error --- - error: Drop access to sequence 'seq' is denied for user 'user' ... box.session.su('admin') --- ... sq:drop() --- ... -- A user can alter/use sequences that he owns. box.session.su('user') --- ... sq = box.schema.sequence.create('seq') --- ... sq:alter{step = 2} -- ok --- ... sq:drop() -- ok --- ... sq = box.schema.sequence.create('seq') --- ... box.session.su('admin') --- ... box.schema.user.revoke('user', 'read,write', 'universe') --- ... box.session.su('user') --- ... sq:set(100) -- ok --- ... sq:next() -- ok --- - 101 ... sq:reset() -- ok --- ... box.session.su('admin') --- ... sq:drop() --- ... -- A sequence can be attached to a space only if the user owns both. sq1 = box.schema.sequence.create('seq1') --- ... s1 = box.schema.space.create('space1') --- ... _ = s1:create_index('pk') --- ... box.schema.user.grant('user', 'read,write', 'universe') --- ... box.session.su('user') --- ... sq2 = box.schema.sequence.create('seq2') --- ... s2 = box.schema.space.create('space2') --- ... _ = s2:create_index('pk', {sequence = 'seq1'}) -- error --- - error: Create access to sequence 'seq1' is denied for user 'user' ... s1.index.pk:alter({sequence = 'seq1'}) -- error --- - error: Create access to sequence 'seq1' is denied for user 'user' ... box.space._space_sequence:replace{s1.id, sq1.id, false} -- error --- - error: Create access to sequence 'seq1' is denied for user 'user' ... box.space._space_sequence:replace{s1.id, sq2.id, false} -- error --- - error: Alter access to space 'space1' is denied for user 'user' ... box.space._space_sequence:replace{s2.id, sq1.id, false} -- error --- - error: Create access to sequence 'seq1' is denied for user 'user' ... s2.index.pk:alter({sequence = 'seq2'}) -- ok --- ... box.session.su('admin') --- ... -- If the user owns a sequence attached to a space, -- it can use it for auto increment, otherwise it -- needs privileges. box.schema.user.revoke('user', 'read,write', 'universe') --- ... box.session.su('user') --- ... s2:insert{nil, 1} -- ok: {1, 1} --- - [1, 1] ... box.session.su('admin') --- ... s2.index.pk:alter{sequence = 'seq1'} --- ... box.session.su('user') --- ... s2:insert{2, 2} -- error --- - error: Write access to sequence 'seq1' is denied for user 'user' ... s2:insert{nil, 2} -- error --- - error: Write access to sequence 'seq1' is denied for user 'user' ... s2:update(1, {{'+', 2, 1}}) -- ok --- - [1, 2] ... s2:delete(1) -- ok --- - [1, 2] ... box.session.su('admin') --- ... box.schema.user.grant('user', 'write', 'sequence', 'seq1') --- ... box.session.su('user') --- ... s2:insert{2, 2} -- ok --- - [2, 2] ... s2:insert{nil, 3} -- ok: {3, 3} --- - [3, 3] ... box.session.su('admin') --- ... s1:drop() --- ... s2:drop() --- ... sq1:drop() --- ... sq2:drop() --- ... -- If the user has access to a space, it also has access to -- an automatically generated sequence attached to it. s = box.schema.space.create('test') --- ... _ = s:create_index('pk', {sequence = true}) --- ... box.schema.user.grant('user', 'read,write', 'space', 'test') --- ... box.session.su('user') --- ... s:insert{10, 10} -- ok --- - [10, 10] ... s:insert{nil, 11} -- ok: {11, 11} --- - [11, 11] ... box.sequence.test_seq:set(100) -- error --- - error: Write access to sequence 'test_seq' is denied for user 'user' ... box.sequence.test_seq:next() -- error --- - error: Write access to sequence 'test_seq' is denied for user 'user' ... box.sequence.test_seq:reset() -- error --- - error: Write access to sequence 'test_seq' is denied for user 'user' ... box.session.su('admin') --- ... s:drop() --- ... -- When a user is dropped, all his sequences are dropped as well. box.schema.user.grant('user', 'read,write', 'universe') --- ... box.session.su('user') --- ... _ = box.schema.sequence.create('test1') --- ... _ = box.schema.sequence.create('test2') --- ... box.session.su('admin') --- ... box.schema.user.drop('user') --- ... box.sequence --- - [] ... -- Apart from the admin, only the owner can grant permissions -- to a sequence. box.schema.user.create('user1') --- ... box.schema.user.create('user2') --- ... box.schema.user.grant('user1', 'read,write', 'universe') --- ... box.schema.user.grant('user2', 'read,write', 'universe') --- ... box.session.su('user1') --- ... sq = box.schema.sequence.create('test') --- ... box.session.su('user2') --- ... box.schema.user.grant('user2', 'write', 'sequence', 'test') -- error --- - error: Grant access to sequence 'test' is denied for user 'user2' ... box.session.su('user1') --- ... box.schema.user.grant('user2', 'write', 'sequence', 'test') -- ok --- ... box.session.su('admin') --- ... box.schema.user.drop('user1') --- ... box.schema.user.drop('user2') --- ... -- gh-2914: check identifier constraints. test_run = require('test_run').new() --- ... identifier = require("identifier") --- ... test_run:cmd("setopt delimiter ';'") --- - true ... identifier.run_test( function (identifier) box.schema.sequence.create(identifier) if box.sequence[identifier]:next() ~= 1 then error("Cannot access sequence by identifier") end end, function (identifier) box.schema.sequence.drop(identifier) end ); --- - All tests passed ... test_run:cmd("setopt delimiter ''"); --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/box/tree_pk_multipart.test.lua0000664000000000000000000001637513306560010023176 0ustar rootroot-- -- Insert test -- env = require('test_run') test_run = env.new() space = box.schema.space.create('tweedledum') -- Multipart primary key (sender nickname, receiver nickname, message id) i1 = space:create_index('primary', { type = 'tree', parts = {1, 'string', 2, 'string', 3, 'unsigned'}, unique = true }) space:insert{'Vincent', 'Jules', 0, 'Do you know what they call a - a - a Quarter Pounder with cheese in Paris?'} space:insert{'Jules', 'Vincent', 0, 'They don`t call it a Quarter Pounder with cheese?'} space:insert{'Vincent', 'Jules', 1, 'No man, they got the metric system. They wouldn`t know what the f--k a Quarter Pounder is.'} space:insert{'Jules', 'Vincent', 1, 'Then what do they call it?'} space:insert{'Vincent', 'Jules', 2, 'They call it a `Royale` with cheese.'} space:insert{'Jules', 'Vincent', 2, 'A `Royale` with cheese!'} space:insert{'Vincent', 'Jules', 3, 'That`s right.'} space:insert{'Jules', 'Vincent', 3, 'What do they call a Big Mac?'} space:insert{'Vincent', 'Jules', 4, 'A Big Mac`s a Big Mac, but they call it `Le Big Mac.`'} space:insert{'Jules', 'Vincent', 4, '`Le Big Mac!`'} space:insert{'Vincent', 'Jules', 5, 'Ha, ha, ha.'} space:insert{'Jules', 'Vincent', 5, 'What do they call a `Whopper`?'} space:insert{'Vincent', 'Jules', 6, 'I dunno, I didn`t go into Burger King.'} space:insert{'The Wolf!', 'Vincent', 0, 'Jimmie, lead the way. Boys, get to work.'} space:insert{'Vincent', 'The Wolf!', 0, 'A please would be nice.'} space:insert{'The Wolf!', 'Vincent', 1, 'Come again?'} space:insert{'Vincent', 'The Wolf!', 1, 'I said a please would be nice.'} space:insert{'The Wolf!', 'Vincent', 2, 'Get it straight buster - I`m not here to say please, I`m here to tell you what to do and if self-preservation is an instinct you possess you`d better fucking do it and do it quick. I`m here to help - if my help`s not appreciated then lotsa luck, gentlemen.'} space:insert{'The Wolf!', 'Vincent', 3, 'I don`t mean any disrespect, I just don`t like people barking orders at me.'} space:insert{'Vincent', 'The Wolf!', 2, 'If I`m curt with you it`s because time is a factor. I think fast, I talk fast and I need you guys to act fast if you wanna get out of this. So, pretty please... with sugar on top. Clean the fucking car.'} -- -- Select test -- -- Select by one entry space.index['primary']:get{'Vincent', 'Jules', 0} space.index['primary']:get{'Jules', 'Vincent', 0} space.index['primary']:get{'Vincent', 'Jules', 1} space.index['primary']:get{'Jules', 'Vincent', 1} space.index['primary']:get{'Vincent', 'Jules', 2} space.index['primary']:get{'Jules', 'Vincent', 2} space.index['primary']:get{'Vincent', 'Jules', 3} space.index['primary']:get{'Jules', 'Vincent', 3} space.index['primary']:get{'Vincent', 'Jules', 4} space.index['primary']:get{'Jules', 'Vincent', 4} space.index['primary']:get{'Vincent', 'Jules', 5} space.index['primary']:get{'Jules', 'Vincent', 5} space.index['primary']:get{'Vincent', 'Jules', 6} space.index['primary']:get{'The Wolf!', 'Vincent', 0} space.index['primary']:get{'Vincent', 'The Wolf!', 0} space.index['primary']:get{'The Wolf!', 'Vincent', 1} space.index['primary']:get{'Vincent', 'The Wolf!', 1} space.index['primary']:get{'The Wolf!', 'Vincent', 2} space.index['primary']:get{'The Wolf!', 'Vincent', 3} space.index['primary']:get{'Vincent', 'The Wolf!', 2} -- Select all messages from Vincent to Jules space.index['primary']:select({'Vincent', 'Jules'}) -- Select all messages from Jules to Vincent space.index['primary']:select({'Jules', 'Vincent'}) -- Select all messages from Vincent to The Wolf space.index['primary']:select({'Vincent', 'The Wolf!'}) -- Select all messages from The Wolf to Vincent space.index['primary']:select({'The Wolf!', 'Vincent'}) -- Select all Vincent messages space.index['primary']:select({'Vincent'}) -- -- Delete test -- -- Delete some messages from the The Wolf and Vincent dialog space:delete{'The Wolf!', 'Vincent', 0} space:delete{'The Wolf!', 'Vincent', 3} space:delete{'Vincent', 'The Wolf!', 0} space:update({'Vincent', 'The Wolf!', 1}, {{ '=', 1, 'Updated' }, {'=', 5, 'New'}}) space:update({'Updated', 'The Wolf!', 1}, {{ '=', 1, 'Vincent'}, { '#', 5, 1 }}) -- Checking Vincent's last messages space.index['primary']:select({'Vincent', 'The Wolf!'}) -- Checking The Wolf's last messages space.index['primary']:select({'The Wolf!', 'Vincent'}) -- try to delete nonexistent message space:delete{'Vincent', 'The Wolf!', 3} -- try to delete patrial defined key space:delete{'Vincent', 'The Wolf!'} -- try to delete by invalid key space:delete{'The Wolf!', 'Vincent', 1, 'Come again?'} -- -- Update test -- space:update({'The Wolf!', 'Vincent', 1}, {{'=', 4, ''}}) space:update({'Vincent', 'The Wolf!', 1}, {{'=', 4, ''}}) -- Checking Vincent's last messages space.index['primary']:select({'Vincent', 'The Wolf!'}) -- Checking The Wolf's last messages space.index['primary']:select({'The Wolf!', 'Vincent'}) -- try to update a nonexistent message space:update({'Vincent', 'The Wolf!', 4}, {{'=', 4, ''}}) -- try to update patrial defined key space:update({'Vincent', 'The Wolf!'}, {{'=', 4, ''}}) -- try to update by invalid key space:update({'The Wolf!', 'Vincent', 1, 'Come again?'}, {{'=', 4, ''}}) space:len() space:truncate() space:len() -- A test case for Bug#1051006 Tree iterators return garbage --if an index is modified between calls -- space.index['primary']:drop() i1 = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) i2 = space:create_index('second', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true }) space:insert{'a', 'a', 'a'} space:insert{'d', 'd', 'd'} space:insert{'e', 'e', 'e'} space:insert{'b', 'b', 'b'} space:insert{'c', 'c', 'c'} t = {} gen, param, state = space.index['second']:pairs(nil, { iterator = box.index.GE }) test_run:cmd("setopt delimiter ';'") for i = 1, 2 do state, v = gen(param, state) table.insert(t, v) end; test_run:cmd("setopt delimiter ''"); t space:truncate() v collectgarbage('collect') v param, v = gen(param, state) v collectgarbage('collect') v t = {} test_run:cmd("setopt delimiter ';'") for i = 1, 3 do param, v = gen(param, state) table.insert(t, v) end; test_run:cmd("setopt delimiter ''"); t space:drop() space = nil -- Bug #1082356 -- Space #19, https://bugs.launchpad.net/tarantool/+bug/1082356 space = box.schema.space.create('tweedledum') -- Multipart primary key (sender nickname, receiver nickname, message id) i1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 3, 'unsigned'}, unique = true }) space:insert{1, 1} space:replace{1, 1} space:drop() -- test deletion of data one by one space = box.schema.space.create('test') i1 = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) i2 = space:create_index('second', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true }) test_run:cmd("setopt delimiter ';'") for i = 1, 100 do v = tostring(i) space:insert{v, string.rep(v, 2) , string.rep(v, 3)} end; local pk = space.index[0] while pk:len() > 0 do local state, t for state, t in pk:pairs() do local key = {} for _k2, parts in ipairs(pk.parts) do table.insert(key, t[parts.fieldno]) end space:delete(key) end end; test_run:cmd("setopt delimiter ''"); space:drop() space = nil tarantool_1.9.1.26.g63eb81e3c/test/box/function1.c0000664000000000000000000001171313306560010020024 0ustar rootroot#include #include "module.h" #include #include int function1(box_function_ctx_t *ctx, const char *args, const char *args_end) { say_info("-- function1 - called --"); printf("ok - function1\n"); return 0; } int args(box_function_ctx_t *ctx, const char *args, const char *args_end) { uint32_t arg_count = mp_decode_array(&args); if (arg_count < 1) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "%s", "invalid argument count"); } if (mp_typeof(*args) != MP_UINT) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "%s", "first tuple field must be uint"); } uint32_t num = mp_decode_uint(&args); char tuple_buf[512]; char *d = tuple_buf; d = mp_encode_array(d, 2); d = mp_encode_uint(d, num); d = mp_encode_str(d, "hello", strlen("hello")); assert(d <= tuple_buf + sizeof(tuple_buf)); box_tuple_format_t *fmt = box_tuple_format_default(); box_tuple_t *tuple = box_tuple_new(fmt, tuple_buf, d); if (tuple == NULL) return -1; return box_return_tuple(ctx, tuple); } /* * For each UINT key in arguments create or increment counter in * box.space.test space. */ int multi_inc(box_function_ctx_t *ctx, const char *args, const char *args_end) { (void )ITER_ALL; static const char *SPACE_NAME = "test"; static const char *INDEX_NAME = "primary"; uint32_t space_id = box_space_id_by_name(SPACE_NAME, strlen(SPACE_NAME)); uint32_t index_id = box_index_id_by_name(space_id, INDEX_NAME, strlen(INDEX_NAME)); if (space_id == BOX_ID_NIL || index_id == BOX_ID_NIL) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Can't find index %s in space %s", INDEX_NAME, SPACE_NAME); } say_debug("space_id = %u, index_id = %u", space_id, index_id); uint32_t arg_count = mp_decode_array(&args); assert(!box_txn()); box_txn_begin(); assert(box_txn()); for (uint32_t i = 0; i < arg_count; i++) { /* Decode next argument */ if (mp_typeof(*args) != MP_UINT) return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Expected uint keys"); uint32_t key = mp_decode_uint(&args); (void) key; /* Prepare MsgPack key for search */ char key_buf[16]; char *key_end = key_buf; key_end = mp_encode_array(key_end, 1); key_end = mp_encode_uint(key_end, key); assert(key_end < key_buf + sizeof(key_buf)); /* Get current value from space */ uint64_t counter = 0; box_tuple_t *tuple; if (box_index_get(space_id, index_id, key_buf, key_end, &tuple) != 0) { return -1; /* error */ } else if (tuple != NULL) { const char *field = box_tuple_field(tuple, 1); if (field == NULL || mp_typeof(*field) != MP_UINT) return box_error_set(__FILE__, __LINE__, ER_PROC_LUA, "Invalid tuple"); counter = mp_decode_uint(&field) + 1; } /* Replace value */ char tuple_buf[16]; char *tuple_end = tuple_buf; tuple_end = mp_encode_array(tuple_end, 2); tuple_end = mp_encode_uint(tuple_end, key); /* key */ tuple_end = mp_encode_uint(tuple_end, counter); /* counter */ assert(tuple_end <= tuple_buf + sizeof(tuple_buf)); if (box_replace(space_id, tuple_buf, tuple_end, NULL) != 0) return -1; } box_txn_commit(); assert(!box_txn()); return 0; } int errors(box_function_ctx_t *ctx, const char *args, const char *args_end) { box_error_set(__FILE__, __LINE__, ER_PROC_C, "%s", "Proc error"); const box_error_t *error = box_error_last(); assert(strcmp(box_error_type(error), "ClientError") == 0); assert(box_error_code(error) == ER_PROC_C); assert(strcmp(box_error_message(error), "Proc error") == 0); (void) error; /* Backwards compatibility */ box_error_raise(ER_PROC_C, "hello %s", "world"); assert(box_error_last() != NULL); error = box_error_last(); assert(box_error_code(error) == ER_PROC_C); assert(strcmp(box_error_message(error), "hello world") == 0); /* Backwards compatibility */ box_error_raise(ER_PROC_C, "hello, lalala"); assert(box_error_last() != NULL); error = box_error_last(); assert(box_error_code(error) == ER_PROC_C); assert(strcmp(box_error_message(error), "hello, lalala") == 0); box_error_clear(); assert(box_error_last() == NULL); return -1; /* raises "Unknown procedure error" */ } int test_yield(box_function_ctx_t *ctx, const char *args, const char *args_end) { static const char *SPACE_NAME = "test_yield"; uint32_t space_id = box_space_id_by_name(SPACE_NAME, strlen(SPACE_NAME)); if (space_id == BOX_ID_NIL) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Can't find space %s", SPACE_NAME); } assert(!box_txn()); box_txn_begin(); assert(box_txn()); /* Replace value */ char tuple_buf[16]; char *tuple_end = tuple_buf; tuple_end = mp_encode_array(tuple_end, 2); tuple_end = mp_encode_uint(tuple_end, 1); tuple_end = mp_encode_uint(tuple_end, 2); /* counter */ assert(tuple_end <= tuple_buf + sizeof(tuple_buf)); if (box_replace(space_id, tuple_buf, tuple_end, NULL) != 0) return -1; box_txn_commit(); assert(!box_txn()); say_info("-- yield - called --"); fiber_sleep(0.001); printf("ok - yield\n"); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/box/ddl.test.lua0000664000000000000000000001763513306565107020223 0ustar rootrootenv = require('test_run') test_run = env.new() fiber = require'fiber' -- simple test for parallel ddl execution _ = box.schema.space.create('test'):create_index('pk') ch = fiber.channel(2) test_run:cmd("setopt delimiter ';'") function f1() box.space.test:create_index('sec', {parts = {2, 'num'}}) ch:put(true) end; function f2() box.space.test:create_index('third', {parts = {3, 'string'}}) ch:put(true) end; test_run:cmd("setopt delimiter ''"); _ = {fiber.create(f1), fiber.create(f2)} ch:get() ch:get() _ = box.space.test:drop() test_run:cmd('restart server default') env = require('test_run') test_run = env.new() fiber = require'fiber' ch = fiber.channel(2) --issue #928 space = box.schema.space.create('test_trunc') _ = space:create_index('pk') _ = box.space.test_trunc:create_index('i1', {type = 'hash', parts = {2, 'STR'}}) _ = box.space.test_trunc:create_index('i2', {type = 'hash', parts = {2, 'STR'}}) function test_trunc() space:truncate() ch:put(true) end _ = {fiber.create(test_trunc), fiber.create(test_trunc)} _ = {ch:get(), ch:get()} space:drop() -- index should not crash after alter space = box.schema.space.create('test_swap') index = space:create_index('pk') space:replace({1, 2, 3}) index:rename('primary') index2 = space:create_index('sec') space:replace({2, 3, 1}) space:select() space:drop() ch = fiber.channel(3) _ = box.schema.space.create('test'):create_index('pk') test_run:cmd("setopt delimiter ';'") function add_index() box.space.test:create_index('sec', {parts = {2, 'num'}}) ch:put(true) end; function insert_tuple(tuple) ch:put({pcall(box.space.test.replace, box.space.test, tuple)}) end; test_run:cmd("setopt delimiter ''"); _ = {fiber.create(insert_tuple, {1, 2, 'a'}), fiber.create(add_index), fiber.create(insert_tuple, {2, '3', 'b'})} {ch:get(), ch:get(), ch:get()} box.space.test:select() test_run:cmd('restart server default') box.space.test:select() box.space.test:drop() -- gh-2336 crash if format called twice during snapshot fiber = require'fiber' space = box.schema.space.create('test_format') _ = space:create_index('pk', { parts = { 1,'str' }}) space:format({{ name ="key"; type = "string" }, { name ="dataAB"; type = "string" }}) str = string.rep("t",1024) for i = 1, 10000 do space:insert{tostring(i), str} end ch = fiber.channel(3) _ = fiber.create(function() fiber.yield() box.snapshot() ch:put(true) end) format = {{name ="key"; type = "string"}, {name ="data"; type = "string"}} for i = 1, 2 do fiber.create(function() fiber.yield() space:format(format) ch:put(true) end) end {ch:get(), ch:get(), ch:get()} space:drop() -- collation function setmap(table) return setmetatable(table, { __serialize = 'map' }) end box.internal.collation.create('test') box.internal.collation.create('test', 'ICU') box.internal.collation.create(42, 'ICU', 'ru_RU') box.internal.collation.create('test', 42, 'ru_RU') box.internal.collation.create('test', 'ICU', 42) box.internal.collation.create('test', 'nothing', 'ru_RU') box.internal.collation.create('test', 'ICU', 'ru_RU', setmap{}) --ok box.internal.collation.create('test', 'ICU', 'ru_RU') box.internal.collation.drop('test') box.internal.collation.drop('nothing') -- allowed box.internal.collation.create('test', 'ICU', 'ru_RU', 42) box.internal.collation.create('test', 'ICU', 'ru_RU', 'options') box.internal.collation.create('test', 'ICU', 'ru_RU', {ping='pong'}) box.internal.collation.create('test', 'ICU', 'ru_RU', {french_collation='german'}) box.internal.collation.create('test', 'ICU', 'ru_RU', {french_collation='on'}) --ok box.internal.collation.drop('test') --ok box.internal.collation.create('test', 'ICU', 'ru_RU', {strength='supervillian'}) box.internal.collation.create('test', 'ICU', 'ru_RU', {strength=42}) box.internal.collation.create('test', 'ICU', 'ru_RU', {strength=2}) --ok box.internal.collation.drop('test') --ok box.internal.collation.create('test', 'ICU', 'ru_RU', {strength='primary'}) --ok box.internal.collation.drop('test') --ok box.begin() box.internal.collation.create('test2', 'ICU', 'ru_RU') box.rollback() box.internal.collation.create('test', 'ICU', 'ru_RU') box.internal.collation.exists('test') test_run:cmd('restart server default') function setmap(table) return setmetatable(table, { __serialize = 'map' }) end box.internal.collation.exists('test') box.internal.collation.drop('test') box.space._collation:auto_increment{'test'} box.space._collation:auto_increment{'test', 0, 'ICU'} box.space._collation:auto_increment{'test', 'ADMIN', 'ICU', 'ru_RU'} box.space._collation:auto_increment{42, 0, 'ICU', 'ru_RU'} box.space._collation:auto_increment{'test', 0, 42, 'ru_RU'} box.space._collation:auto_increment{'test', 0, 'ICU', 42} box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', setmap{}} --ok box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', setmap{}} box.space._collation.index.name:delete{'test'} -- ok box.space._collation.index.name:delete{'nothing'} -- allowed box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', 42} box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', 'options'} box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', {ping='pong'}} opts = {normalization_mode='NORMAL'} box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} opts.normalization_mode = 'OFF' _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} -- ok _ = box.space._collation.index.name:delete{'test'} -- ok opts.numeric_collation = 'PERL' box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} opts.numeric_collation = 'ON' _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --ok _ = box.space._collation.index.name:delete{'test'} -- ok opts.alternate_handling1 = 'ON' box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} opts.alternate_handling1 = nil opts.alternate_handling = 'ON' box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} opts.alternate_handling = 'SHIFTED' _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --ok _ = box.space._collation.index.name:delete{'test'} -- ok opts.case_first = 'ON' box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} opts.case_first = 'OFF' _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --ok _ = box.space._collation.index.name:delete{'test'} -- ok opts.case_level = 'UPPER' box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} opts.case_level = 'DEFAULT' _ = box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', opts} --ok _ = box.space._collation.index.name:delete{'test'} -- ok box.space._collation:auto_increment{'test', 0, 'ICU', 'ru_RU', setmap{}} box.space._collation:select{} test_run:cmd('restart server default') box.space._collation:select{} box.space._collation.index.name:delete{'test'} -- -- gh-2839: allow to store custom fields in field definition. -- format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {'field2', 'unsigned'} format[3] = {'field3', 'unsigned', custom_field = 'custom_value'} s = box.schema.create_space('test', {format = format}) s:format()[3].custom_field s:drop() -- -- gh-2783 -- A ddl operation shoud fail before trying to lock a ddl latch -- in a multi-statement transaction. -- If operation tries to lock already an locked latch then the -- current transaction will be silently rolled back under our feet. -- This is confusing. So check for multi-statement transaction -- before locking the latch. -- test_latch = box.schema.space.create('test_latch') _ = test_latch:create_index('primary', {unique = true, parts = {1, 'unsigned'}}) fiber = require('fiber') c = fiber.channel(1) test_run:cmd("setopt delimiter ';'") _ = fiber.create(function() test_latch:create_index("sec", {unique = true, parts = {2, 'unsigned'}}) c:put(true) end); box.begin() test_latch:create_index("sec2", {unique = true, parts = {2, 'unsigned'}}) box.commit(); test_run:cmd("setopt delimiter ''"); _ = c:get() test_latch:drop() -- this is where everything stops tarantool_1.9.1.26.g63eb81e3c/test/box/tuple.result0000664000000000000000000003524213306565107020362 0ustar rootroot-- box.tuple test env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua...\"]:: '") --- - true ... -- Test box.tuple:slice() t=box.tuple.new{'0', '1', '2', '3', '4', '5', '6', '7'} --- ... t:slice(0) --- - '0' - '1' - '2' - '3' - '4' - '5' - '6' - '7' ... t:slice(-1) --- - '7' ... t:slice(1) --- - '1' - '2' - '3' - '4' - '5' - '6' - '7' ... t:slice(-1, -1) --- - error: 'tuple.slice(): start must be less than end' ... t:slice(-1, 1) --- - error: 'tuple.slice(): start must be less than end' ... t:slice(1, -1) --- - '1' - '2' - '3' - '4' - '5' - '6' ... t:slice(1, 3) --- - '1' - '2' ... t:slice(7) --- - '7' ... t:slice(8) --- - error: 'tuple.slice(): start >= field count' ... t:slice(9) --- - error: 'tuple.slice(): start >= field count' ... t:slice(100500) --- - error: 'tuple.slice(): start >= field count' ... t:slice(9, -1) --- - error: 'tuple.slice(): start >= field count' ... t:slice(6, -1) --- - '6' ... t:slice(4, 4) --- - error: 'tuple.slice(): start must be less than end' ... t:slice(6, 4) --- - error: 'tuple.slice(): start must be less than end' ... t:slice(0, 0) --- - error: 'tuple.slice(): end > field count' ... t:slice(9, 10) --- - error: 'tuple.slice(): start >= field count' ... t:slice(-7) --- - '1' - '2' - '3' - '4' - '5' - '6' - '7' ... t:slice(-8) --- - '0' - '1' - '2' - '3' - '4' - '5' - '6' - '7' ... t:slice(-9) --- - error: 'tuple.slice(): start >= field count' ... t:slice(-100500) --- - error: 'tuple.slice(): start >= field count' ... t:slice(500, 700) --- - error: 'tuple.slice(): start >= field count' ... -- box.tuple.new test box.tuple.new() --- - [] ... box.tuple.new(1) --- - [1] ... box.tuple.new('string') --- - ['string'] ... box.tuple.new(tonumber64('18446744073709551615')) --- - [18446744073709551615] ... box.tuple.new{tonumber64('18446744073709551615'), 'string', 1} --- - [18446744073709551615, 'string', 1] ... -- A test case for Bug#1131108 'incorrect conversion from boolean lua value to tarantool tuple' function bug1075677() local range = {} table.insert(range, 1>0) return range end --- ... box.tuple.new(bug1075677()) --- - [true] ... bug1075677=nil --- ... -- boolean values in a tuple box.tuple.new(false) --- - [false] ... box.tuple.new({false}) --- - [false] ... -- tuple:bsize() t = box.tuple.new('abc') --- ... t --- - ['abc'] ... t:bsize() --- - 5 ... -- -- Test cases for #106 box.tuple.new fails on multiple items -- box.tuple.new() --- - [] ... box.tuple.new{} --- - [] ... box.tuple.new(1) --- - [1] ... box.tuple.new{1} --- - [1] ... box.tuple.new(1, 2, 3, 4, 5) --- - [1, 2, 3, 4, 5] ... box.tuple.new{1, 2, 3, 4, 5} --- - [1, 2, 3, 4, 5] ... box.tuple.new({'a', 'b'}, {'c', 'd'}, {'e', 'f'}) --- - [['a', 'b'], ['c', 'd'], ['e', 'f']] ... box.tuple.new{{'a', 'b'}, {'c', 'd'}, {'e', 'f'}} --- - [['a', 'b'], ['c', 'd'], ['e', 'f']] ... box.tuple.new({1, 2}, 'x', 'y', 'z', {c = 3, d = 4}, {e = 5, f = 6}) --- - [[1, 2], 'x', 'y', 'z', {'c': 3, 'd': 4}, {'e': 5, 'f': 6}] ... box.tuple.new{{1, 2}, 'x', 'y', 'z', {c = 3, d = 4}, {e = 5, f = 6}} --- - [[1, 2], 'x', 'y', 'z', {'c': 3, 'd': 4}, {'e': 5, 'f': 6}] ... box.tuple.new('x', 'y', 'z', {1, 2}, {c = 3, d = 4}, {e = 5, f = 6}) --- - ['x', 'y', 'z', [1, 2], {'c': 3, 'd': 4}, {'e': 5, 'f': 6}] ... box.tuple.new{'x', 'y', 'z', {1, 2}, {c = 3, d = 4}, {e = 5, f = 6}} --- - ['x', 'y', 'z', [1, 2], {'c': 3, 'd': 4}, {'e': 5, 'f': 6}] ... t=box.tuple.new{'a','b','c'} --- ... t:totable() --- - ['a', 'b', 'c'] ... t:unpack() --- - a - b - c ... t:totable(1) --- - ['a', 'b', 'c'] ... t:unpack(1) --- - a - b - c ... t:totable(2) --- - ['b', 'c'] ... t:unpack(2) --- - b - c ... t:totable(1, 3) --- - ['a', 'b', 'c'] ... t:unpack(1, 3) --- - a - b - c ... t:totable(2, 3) --- - ['b', 'c'] ... t:unpack(2, 3) --- - b - c ... t:totable(2, 4) --- - ['b', 'c'] ... t:unpack(2, 4) --- - b - c ... t:totable(nil, 2) --- - ['a', 'b'] ... t:unpack(nil, 2) --- - a - b ... t:totable(2, 1) --- - [] ... t:unpack(2, 1) --- ... t:totable(0) --- - error: 'builtin/box/tuple.lua..."]:: tuple.totable: invalid second argument' ... t:totable(1, 0) --- - error: 'builtin/box/tuple.lua..."]:: tuple.totable: invalid third argument' ... -- -- Check that tuple:totable correctly sets serializer hints -- box.tuple.new{1, 2, 3}:totable() --- - [1, 2, 3] ... getmetatable(box.tuple.new{1, 2, 3}:totable()).__serialize --- - seq ... -- A test case for the key as an tuple space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary') --- ... space:truncate() --- ... t=space:insert{0, 777, '0', '1', '2', '3'} --- ... t --- - [0, 777, '0', '1', '2', '3'] ... space:replace(t) --- - [0, 777, '0', '1', '2', '3'] ... space:replace{777, { 'a', 'b', 'c', {'d', 'e', t}}} --- - [777, ['a', 'b', 'c', ['d', 'e', [0, 777, '0', '1', '2', '3']]]] ... -- A test case for tuple:totable() method t=space:get{777}:totable() --- ... t[2], t[3], t[4], t[5] --- - ['a', 'b', 'c', ['d', 'e', [0, 777, '0', '1', '2', '3']]] - null - null - null ... space:truncate() --- ... -- A test case for Bug#1119389 '(lbox_tuple_index) crashes on 'nil' argument' t=space:insert{0, 8989} --- ... t[nil] --- - null ... -------------------------------------------------------------------------------- -- test tuple:next -------------------------------------------------------------------------------- t = box.tuple.new({'a', 'b', 'c'}) --- ... state, val = t:next() --- ... state, val --- - 1 - a ... state, val = t:next(state) --- ... state, val --- - 2 - b ... state, val = t:next(state) --- ... state, val --- - 3 - c ... state, val = t:next(state) --- ... state, val --- - null - null ... t:next(nil) --- - 1 - a ... t:next(0) --- - 1 - a ... t:next(1) --- - 2 - b ... t:next(2) --- - 3 - c ... t:next(3) --- - null ... t:next(4) --- - null ... t:next(-1) --- - null ... t:next("fdsaf") --- - error: 'builtin/box/tuple.lua..."]:: bad argument #2 to ''box_tuple_field'' (cannot convert ''string'' to ''unsigned int'')' ... box.tuple.new({'x', 'y', 'z'}):next() --- - 1 - x ... t=space:insert{1953719668} --- ... t:next(1684234849) --- - null ... t:next(1) --- - null ... t:next(nil) --- - 1 - 1953719668 ... t:next(t:next()) --- - null ... -------------------------------------------------------------------------------- -- test tuple:pairs -------------------------------------------------------------------------------- ta = {} for k, v in t:pairs() do table.insert(ta, v) end --- ... ta --- - - 1953719668 ... t=space:replace{1953719668, 'another field'} --- ... ta = {} for k, v in t:pairs() do table.insert(ta, v) end --- ... ta --- - - 1953719668 - another field ... t=space:replace{1953719668, 'another field', 'one more'} --- ... ta = {} for k, v in t:pairs() do table.insert(ta, v) end --- ... ta --- - - 1953719668 - another field - one more ... t=box.tuple.new({'a', 'b', 'c', 'd'}) --- ... ta = {} for it,field in t:pairs() do table.insert(ta, field); end --- ... ta --- - - a - b - c - d ... t = box.tuple.new({'a', 'b', 'c'}) --- ... gen, init, state = t:pairs() --- ... gen, init, state --- - gen: param: ['a', 'b', 'c'] - ['a', 'b', 'c'] - null ... state, val = gen(init, state) --- ... state, val --- - 1 - a ... state, val = gen(init, state) --- ... state, val --- - 2 - b ... state, val = gen(init, state) --- ... state, val --- - 3 - c ... state, val = gen(init, state) --- ... state, val --- - null - null ... r = {} --- ... for _state, val in t:pairs() do table.insert(r, val) end --- ... r --- - - a - b - c ... r = {} --- ... for _state, val in t:pairs() do table.insert(r, val) end --- ... r --- - - a - b - c ... r = {} --- ... for _state, val in t:pairs(1) do table.insert(r, val) end --- ... r --- - - b - c ... r = {} --- ... for _state, val in t:pairs(3) do table.insert(r, val) end --- ... r --- - [] ... r = {} --- ... for _state, val in t:pairs(10) do table.insert(r, val) end --- - error: 'builtin/box/tuple.lua..."]:: error: invalid key to ''next''' ... r --- - [] ... r = {} --- ... for _state, val in t:pairs(nil) do table.insert(r, val) end --- ... r --- - - a - b - c ... t:pairs(nil) --- - gen: param: ['a', 'b', 'c'] - ['a', 'b', 'c'] - null ... t:pairs("fdsaf") --- - state: fdsaf gen: param: ['a', 'b', 'c'] - ['a', 'b', 'c'] - fdsaf ... -------------------------------------------------------------------------------- -- test tuple:find -------------------------------------------------------------------------------- env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("setopt delimiter ';'") --- - true ... t = box.tuple.new({'a','b','c','a', -1, 0, 1, 2, true, 9223372036854775807ULL, -9223372036854775807LL}); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... t:find('a') --- - 1 ... t:find(1, 'a') --- - 4 ... t:find('c') --- - 3 ... t:find('xxxxx') --- - null ... t:find(1, 'xxxxx') --- - null ... t:findall('a') --- - - 1 - 4 ... t:findall(1, 'a') --- - - 4 ... t:findall('xxxxx') --- - [] ... t:findall(1, 'xxxxx') --- - [] ... t:find(100, 'a') --- - error: 'builtin/box/tuple.lua..."]:: error: invalid key to ''next''' ... t:findall(100, 'a') --- - error: 'builtin/box/tuple.lua..."]:: error: invalid key to ''next''' ... t:find(100, 'xxxxx') --- - error: 'builtin/box/tuple.lua..."]:: error: invalid key to ''next''' ... t:findall(100, 'xxxxx') --- - error: 'builtin/box/tuple.lua..."]:: error: invalid key to ''next''' ... --- -- Lua type coercion --- t:find(2) --- - 8 ... t:findall(2) --- - - 8 ... t:find(2ULL) --- - 8 ... t:findall(2ULL) --- - - 8 ... t:find(2LL) --- - 8 ... t:findall(2LL) --- - - 8 ... t:find(2) --- - 8 ... t:findall(2) --- - - 8 ... t:find(-1) --- - 5 ... t:findall(-1) --- - - 5 ... t:find(-1LL) --- - 5 ... t:findall(-1LL) --- - - 5 ... t:find(true) --- - 9 ... t:findall(true) --- - - 9 ... t:find(9223372036854775807LL) --- - 10 ... t:findall(9223372036854775807LL) --- - - 10 ... t:find(9223372036854775807ULL) --- - 10 ... t:findall(9223372036854775807ULL) --- - - 10 ... t:find(-9223372036854775807LL) --- - 11 ... t:findall(-9223372036854775807LL) --- - - 11 ... -------------------------------------------------------------------------------- -- test tuple:update -------------------------------------------------------------------------------- -- see box/update.test.lua for more test cases t = box.tuple.new({'a', 'b', 'c', 'd', 'e'}) --- ... t:update() --- - error: 'builtin/box/tuple.lua..."]:: Usage: tuple:update({ { op, field, arg}+ })' ... t:update(10) --- - error: 'builtin/box/tuple.lua..."]:: Usage: tuple:update({ { op, field, arg}+ })' ... t:update({}) --- - ['a', 'b', 'c', 'd', 'e'] ... t:update({{ '!', -1, 'f'}}) --- - ['a', 'b', 'c', 'd', 'e', 'f'] ... t:update({{ '#', 4, 1}}) --- - ['a', 'b', 'c', 'e'] ... t --- - ['a', 'b', 'c', 'd', 'e'] ... t = nil --- ... -- gh-2454 Regression in msgpack t = box.tuple.new(require('yaml').decode("[17711728, {1000: 'xxx'}]")) --- ... t:update({{'=', 2, t[2]}}) --- - [17711728, {1000: 'xxx'}] ... t --- - [17711728, {1000: 'xxx'}] ... t = nil --- ... -------------------------------------------------------------------------------- -- test msgpack.encode + tuple -------------------------------------------------------------------------------- msgpack = require('msgpack') --- ... encode_load_metatables = msgpack.cfg.encode_load_metatables --- ... -- disable __serialize hook to test internal on_encode hook msgpack.cfg{encode_load_metatables = false} --- ... msgpackffi = require('msgpackffi') --- ... t = box.tuple.new({'a', 'b', 'c'}) --- ... msgpack.decode(msgpackffi.encode(t)) --- - ['a', 'b', 'c'] - 8 ... msgpack.decode(msgpack.encode(t)) --- - ['a', 'b', 'c'] - 8 ... msgpack.decode(msgpackffi.encode({1, {'x', 'y', t, 'z'}, 2, 3})) --- - [1, ['x', 'y', ['a', 'b', 'c'], 'z'], 2, 3] - 19 ... msgpack.decode(msgpack.encode({1, {'x', 'y', t, 'z'}, 2, 3})) --- - [1, ['x', 'y', ['a', 'b', 'c'], 'z'], 2, 3] - 19 ... -- restore configuration msgpack.cfg{encode_load_metatables = encode_load_metatables} --- ... -- gh-738: Serializer hints are unclear t = box.tuple.new({1, 2, {}}) --- ... map = t[3] --- ... getmetatable(map) ~= nil --- - true ... map --- - [] ... map['test'] = 48 --- ... map --- - test: 48 ... getmetatable(map) == nil --- - true ... -- gh-1189: tuple is not checked as first argument t = box.tuple.new({1, 2, {}}) --- ... t.bsize() --- - error: 'builtin/box/tuple.lua..."]:: Usage: tuple:bsize()' ... t.find(9223372036854775807LL) --- - error: 'builtin/box/tuple.lua..."]:: Usage: tuple:find([offset, ]val)' ... t.findall(9223372036854775807LL) --- - error: 'builtin/box/tuple.lua..."]:: Usage: tuple:findall([offset, ]val)' ... t.update() --- - error: 'builtin/box/tuple.lua..."]:: Usage: tuple:update({ { op, field, arg}+ })' ... t.upsert() --- - error: 'builtin/box/tuple.lua..."]:: Usage: tuple:upsert({ { op, field, arg}+ })' ... t = nil --- ... space:drop() --- ... -- gh-1266: luaL_convertfield crashes on ffi.typeof() ffi = require('ffi') --- ... ffi.typeof('struct tuple') --- - ctype ... -- gh-1345: lbox_tuple_new() didn't check result of box_tuple_new() for NULL -- try to allocate 100Mb tuple and checked that server won't crash box.tuple.new(string.rep('x', 100 * 1024 * 1024)) ~= nil --- - true ... collectgarbage('collect') -- collect huge string --- - 0 ... -- testing tostring test_run:cmd("setopt delimiter ';'") --- - true ... null = nil t = box.tuple.new({1, -2, 1.2, -1.2}, 'x', 'y', 'z', null, true, false, {bin = "\x08\x5c\xc2\x80\x12\x2f", big_num = tonumber64('18446744073709551615'), map = {key = "value"}, double=1.0000000001, utf8="Кудыкины горы"}); --- ... tostring(t); --- - '[[1, -2, 1.2, -1.2], ''x'', ''y'', ''z'', null, true, false, {''big_num'': 18446744073709551615, ''double'': 1.0000000001, ''utf8'': ''Кудыкины горы'', ''bin'': !!binary CFzCgBIv, ''map'': {''key'': ''value''}}]' ... t; --- - [[1, -2, 1.2, -1.2], 'x', 'y', 'z', null, true, false, {'big_num': 18446744073709551615, 'double': 1.0000000001, 'utf8': 'Кудыкины горы', 'bin': !!binary CFzCgBIv, 'map': { 'key': 'value'}}] ... test_run:cmd("setopt delimiter ''"); --- - true ... -- -- gh-1014: tuple field names and tuple methods aliases. -- t = box.tuple.new({1, 2, 3}) --- ... box.tuple.next == t.next --- - true ... box.tuple.ipairs == t.ipairs --- - true ... box.tuple.pairs == t.pairs --- - true ... box.tuple.slice == t.slice --- - true ... box.tuple.transform == t.transform --- - true ... box.tuple.find == t.find --- - true ... box.tuple.findall == t.findall --- - true ... box.tuple.unpack == t.unpack --- - true ... box.tuple.totable == t.totable --- - true ... box.tuple.update == t.update --- - true ... box.tuple.upsert == t.upsert --- - true ... box.tuple.bsize == t.bsize --- - true ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_point_r2.test.lua0000664000000000000000000000136313306560010022370 0ustar rootroots = box.schema.space.create('spatial') _ = s:create_index('primary') _ = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) s:insert{1,{0.0,0.0}} s:insert{2,{0.0,10.0}} s:insert{3,{0.0,50.0}} s:insert{4,{10.0,0.0}} s:insert{5,{50.0,0.0}} s:insert{6,{10.0,10.0}} s:insert{7,{10.0,50.0}} s:insert{8,{50.0,10.0}} s:insert{9,{50.0,50.0}} -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) -- select records belonging to rectangle (0,0,10,10) s.index.spatial:select({0.0,0.0,10.0,10.0}, {iterator = 'LE'}) -- select records with coordinates (10,10) s.index.spatial:select({10.0,10.0}, {iterator = 'EQ'}) -- select neighbors of point (5,5) s.index.spatial:select({5.0,5.0}, {iterator = 'NEIGHBOR'}) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_array.test.lua0000664000000000000000000000437213306560010021755 0ustar rootroots = box.schema.space.create('spatial') _ = s:create_index('primary') spatial = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}}) spatial.type s:insert{1,{0.0,0.0}} s:insert{2,{0.0,10.0}} s:insert{3,{0.0,50.0}} s:insert{4,{10.0,0.0}} s:insert{5,{50.0,0.0}} s:insert{6,{10.0,10.0}} s:insert{7,{10.0,50.0}} s:insert{8,{50.0,10.0}} s:insert{9,{50.0,50.0}} -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) -- select records belonging to rectangle (0,0,10,10) s.index.spatial:select({0.0,0.0,10.0,10.0}, {iterator = 'LE'}) -- select records with coordinates (10,10) s.index.spatial:select({10.0,10.0}, {iterator = 'EQ'}) -- select neighbors of point (5,5) s.index.spatial:select({5.0,5.0}, {iterator = 'NEIGHBOR'}) s:drop() s = box.schema.space.create('spatial') _ = s:create_index('primary') spatial = s:create_index('spatial', { type = 'rtree', unique = false, parts = {2, 'array'}, dimension = 8}) spatial.type s:insert{ 1,{0, 0, 0, 0, 0, 0, 0, 0}} s:insert{ 2,{10, 0, 0, 0, 0, 0, 0, 0}} s:insert{ 3,{0, 10, 0, 0, 0, 0, 0, 0}} s:insert{ 4,{0, 0, 10, 0, 0, 0, 0, 0}} s:insert{ 5,{0, 0, 0, 10, 0, 0, 0, 0}} s:insert{ 6,{0, 0, 0, 0, 10, 0, 0, 0}} s:insert{ 7,{0, 0, 0, 0, 0, 10, 0, 0}} s:insert{ 8,{0, 0, 0, 0, 0, 0, 10, 0}} s:insert{ 9,{0, 0, 0, 0, 0, 0, 0, 10}} s:insert{10,{50, 0, 0, 0, 0, 0, 0, 0}} s:insert{11,{0, 50, 0, 0, 0, 0, 0, 0}} s:insert{12,{0, 0, 50, 0, 0, 0, 0, 0}} s:insert{13,{0, 0, 0, 50, 0, 0, 0, 0}} s:insert{14,{0, 0, 0, 0, 50, 0, 0, 0}} s:insert{15,{0, 0, 0, 0, 0, 50, 0, 0}} s:insert{16,{0, 0, 0, 0, 0, 0, 50, 0}} s:insert{17,{0, 0, 0, 0, 0, 0, 0, 50}} s:insert{18,{10, 10, 10, 10, 10, 10, 10, 10}} s:insert{19,{10, 50, 10, 50, 10, 50, 10, 50}} s:insert{20,{0, 10, 50, 0, 10, 50, 0, 10}} p0 = {0, 0, 0, 0, 0, 0, 0, 0} p5 = {5, 5, 5, 5, 5, 5, 5, 5} p10 = {10, 10, 10, 10, 10, 10, 10, 10 } rt0_10 = {0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 10, 10, 10, 10, 10, 10 } -- select all records s.index.spatial:select({}, {iterator = 'ALL'}) -- select records belonging to rectangle (0,0,..10,10,..) s.index.spatial:select(rt0_10, {iterator = 'LE'}) -- select records with coordinates (10,10) s.index.spatial:select(p10, {iterator = 'EQ'}) -- select neighbors of point (5,5) s.index.spatial:select(p5, {iterator = 'NEIGHBOR'}) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/access_misc.result0000664000000000000000000004035613306565107021507 0ustar rootrootsession = box.session --- ... utils = require('utils') --- ... EMPTY_MAP = utils.setmap({}) --- ... -- -- Check a double create space -- s = box.schema.space.create('test') --- ... s = box.schema.space.create('test') --- - error: Space 'test' already exists ... -- -- Check a double drop space -- s:drop() --- ... s:drop() --- - error: Space 'test' does not exist ... -- -- Check double create user -- box.schema.user.create('testus') --- ... box.schema.user.create('testus') --- - error: User 'testus' already exists ... s = box.schema.space.create('admin_space') --- ... index = s:create_index('primary', {type = 'hash', parts = {1, 'unsigned'}}) --- ... s:insert({1}) --- - [1] ... s:insert({2}) --- - [2] ... -- -- Check double grant and read access -- box.schema.user.grant('testus', 'read', 'space', 'admin_space') --- ... box.schema.user.grant('testus', 'read', 'space', 'admin_space') --- - error: User 'testus' already has read access on space 'admin_space' ... session.su('testus') --- ... s:select(1) --- - - [1] ... s:insert({3}) --- - error: Write access to space 'admin_space' is denied for user 'testus' ... s:delete(1) --- - error: Write access to space 'admin_space' is denied for user 'testus' ... s:drop() --- - error: Write access to space '_space_sequence' is denied for user 'testus' ... -- -- Check double revoke -- session.su('admin') --- ... box.schema.user.revoke('testus', 'read', 'space', 'admin_space') --- ... box.schema.user.revoke('testus', 'read', 'space', 'admin_space') --- - error: User 'testus' does not have read access on space 'admin_space' ... session.su('testus') --- ... s:select(1) --- - error: Read access to space 'admin_space' is denied for user 'testus' ... session.su('admin') --- ... -- -- Check write access on space -- box.schema.user.grant('testus', 'write', 'space', 'admin_space') --- ... session.su('testus') --- ... s:select(1) --- - error: Read access to space 'admin_space' is denied for user 'testus' ... s:delete(1) --- - [1] ... s:insert({3}) --- - [3] ... s:drop() --- - error: Write access to space '_space_sequence' is denied for user 'testus' ... session.su('admin') --- ... -- -- Check double drop user -- box.schema.user.drop('testus') --- ... box.schema.user.drop('testus') --- - error: User 'testus' is not found ... -- -- Check 'guest' user -- session.su('guest') --- ... session.uid() --- - 0 ... box.space._user:select(1) --- - error: Read access to space '_user' is denied for user 'guest' ... s:select(1) --- - error: Read access to space 'admin_space' is denied for user 'guest' ... s:insert({4}) --- - error: Write access to space 'admin_space' is denied for user 'guest' ... s:delete({3}) --- - error: Write access to space 'admin_space' is denied for user 'guest' ... s:drop() --- - error: Write access to space '_space_sequence' is denied for user 'guest' ... gs = box.schema.space.create('guest_space') --- - error: Write access to space '_schema' is denied for user 'guest' ... box.schema.func.create('guest_func') --- - error: Read access to space '_func' is denied for user 'guest' ... session.su('admin') --- ... s:select() --- - - [2] - [3] ... -- -- Create user with universe read&write grants -- and create this user session -- box.schema.user.create('uniuser') --- ... box.schema.user.grant('uniuser', 'read, write, execute', 'universe') --- ... session.su('uniuser') --- ... uid = session.uid() --- ... -- -- Check universal user -- Check delete currently authenticated user -- box.schema.user.drop('uniuser') --- - error: 'Failed to drop user or role ''uniuser'': the user is active in the current session' ... -- --Check create, call and drop function -- box.schema.func.create('uniuser_func') --- ... function uniuser_func() return 'hello' end --- ... uniuser_func() --- - hello ... box.schema.func.drop('uniuser_func') --- ... -- -- Check create and drop space -- us = box.schema.space.create('uniuser_space') --- ... us:drop() --- ... -- -- Check create and drop user -- box.schema.user.create('uniuser_testus') --- ... box.schema.user.drop('uniuser_testus') --- ... -- -- Check access system and any spaces -- box.space.admin_space:select() --- - - [2] - [3] ... box.space._user:select(1) --- - - [1, 1, 'admin', 'user', {}] ... box.space._space:select(280) --- - - [280, 1, '_space', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'engine', 'type': 'string'}, {'name': 'field_count', 'type': 'unsigned'}, {'name': 'flags', 'type': 'map'}, {'name': 'format', 'type': 'array'}]] ... us = box.schema.space.create('uniuser_space') --- ... box.schema.func.create('uniuser_func') --- ... session.su('admin') --- ... box.schema.user.create('someuser') --- ... box.schema.user.grant('someuser', 'read, write, execute', 'universe') --- ... session.su('someuser') --- ... -- -- Check drop objects of another user -- s:drop() --- - error: Drop access to space 'admin_space' is denied for user 'someuser' ... us:drop() --- - error: Drop access to space 'uniuser_space' is denied for user 'someuser' ... box.schema.func.drop('uniuser_func') --- - error: Drop access to function 'uniuser_func' is denied for user 'someuser' ... box.schema.user.drop('uniuser_testus') --- - error: User 'uniuser_testus' is not found ... session.su('admin') --- ... box.schema.func.drop('uniuser_func') --- ... box.schema.user.drop('someuser') --- ... box.schema.user.drop('uniuser_testus') --- - error: User 'uniuser_testus' is not found ... box.schema.user.drop('uniuser') --- ... _ = box.space._user:delete(uid) --- ... s:drop() --- ... -- -- Check write grant on _user -- box.schema.user.create('testuser') --- ... maxuid = box.space._user.index.primary:max()[1] --- ... box.schema.user.grant('testuser', 'write', 'space', '_user') --- ... session.su('testuser') --- ... testuser_uid = session.uid() --- ... _ = box.space._user:delete(2) --- - error: Drop access to user 'public' is denied for user 'testuser' ... box.space._user:select(1) --- - error: Read access to space '_user' is denied for user 'testuser' ... uid = box.space._user:insert{maxuid+1, session.uid(), 'someone', 'user', EMPTY_MAP}[1] --- ... _ = box.space._user:delete(uid) --- ... session.su('admin') --- ... box.space._user:select(1) --- - - [1, 1, 'admin', 'user', {}] ... _ = box.space._user:delete(testuser_uid) --- - error: 'Failed to drop user or role ''testuser'': the user has objects' ... box.schema.user.revoke('testuser', 'write', 'space', '_user') --- ... -- -- Check read grant on _user -- box.schema.user.grant('testuser', 'read', 'space', '_user') --- ... session.su('testuser') --- ... _ = box.space._user:delete(2) --- - error: Write access to space '_user' is denied for user 'testuser' ... box.space._user:select(1) --- - - [1, 1, 'admin', 'user', {}] ... box.space._user:insert{uid, session.uid(), 'someone2', 'user'} --- - error: Write access to space '_user' is denied for user 'testuser' ... session.su('admin') --- ... -- -- Check read grant on _index -- box.schema.user.grant('testuser', 'read', 'space', '_index') --- ... session.su('testuser') --- ... box.space._index:select(272) --- - - [272, 0, 'primary', 'tree', {'unique': true}, [[0, 'string']]] ... box.space._index:insert{512, 1,'owner','tree', 1, 1, 0,'unsigned'} --- - error: Write access to space '_index' is denied for user 'testuser' ... session.su('admin') --- ... box.schema.user.revoke('testuser', 'usage,session', 'universe') --- ... box.schema.user.revoke('testuser', 'read, write, execute', 'universe') --- - error: User 'testuser' does not have read, write, execute access on universe 'nil' ... box.schema.user.grant('testuser', 'usage,session', 'universe') --- ... -- -- Check that itertors check privileges -- s = box.schema.space.create('glade') --- ... box.schema.user.grant('testuser', 'read', 'space', 'glade') --- ... index = s:create_index('primary', {unique = true, parts = {1, 'unsigned', 2, 'string'}}) --- ... s:insert({1, 'A'}) --- - [1, 'A'] ... s:insert({2, 'B'}) --- - [2, 'B'] ... s:insert({3, 'C'}) --- - [3, 'C'] ... s:insert({4, 'D'}) --- - [4, 'D'] ... t = {} --- ... for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end --- ... t --- - - [3, 'C'] - [4, 'D'] ... t = {} --- ... session.su('testuser') --- ... s:select() --- - - [1, 'A'] - [2, 'B'] - [3, 'C'] - [4, 'D'] ... for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end --- ... t --- - - [3, 'C'] - [4, 'D'] ... t = {} --- ... session.su('admin') --- ... box.schema.user.revoke('testuser', 'read', 'space', 'glade') --- ... box.schema.user.grant('testuser', 'write', 'space', 'glade') --- ... session.su('testuser') --- ... s:select() --- - error: Read access to space 'glade' is denied for user 'testuser' ... for key, v in s.index.primary:pairs(1, {iterator = 'GE'}) do table.insert (t, v) end --- - error: Read access to space 'glade' is denied for user 'testuser' ... t --- - [] ... t = {} --- ... session.su('admin') --- ... box.schema.user.grant('testuser', 'read, write, execute', 'space', 'glade') --- ... session.su('testuser') --- ... s:select() --- - - [1, 'A'] - [2, 'B'] - [3, 'C'] - [4, 'D'] ... for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end --- ... t --- - - [3, 'C'] - [4, 'D'] ... t = {} --- ... session.su('guest') --- ... s:select() --- - error: Read access to space 'glade' is denied for user 'guest' ... for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end --- - error: Read access to space 'glade' is denied for user 'guest' ... t --- - [] ... t = {} --- ... session.su('guest') --- ... s:select() --- - error: Read access to space 'glade' is denied for user 'guest' ... for key, v in s.index.primary:pairs(3, {iterator = 'GE'}) do table.insert (t, v) end --- - error: Read access to space 'glade' is denied for user 'guest' ... t --- - [] ... -- -- Check that alter and truncate do not affect space access control. -- session.su('admin') --- ... _ = s:create_index('secondary', {unique = false, parts = {2, 'string'}}) --- ... session.su('testuser') --- ... s:select() --- - - [1, 'A'] - [2, 'B'] - [3, 'C'] - [4, 'D'] ... session.su('admin') --- ... s:truncate() --- ... s:insert({1234, 'ABCD'}) --- - [1234, 'ABCD'] ... session.su('testuser') --- ... s:select() --- - - [1234, 'ABCD'] ... session.su('admin') --- ... box.schema.user.drop('testuser') --- ... s:drop() --- ... -- -- gh-3089 usage access is not applied to owner -- box.schema.user.grant("guest","read, write, execute, create", "universe") --- ... box.session.su("guest") --- ... s = box.schema.space.create("test") --- ... _ = s:create_index("prim") --- ... test_func = function() end --- ... box.schema.func.create('test_func') --- ... sq = box.schema.sequence.create("test") --- ... box.session.su("admin") --- ... box.schema.user.revoke("guest", "usage", "universe") --- ... box.session.su("guest") --- ... s:select{} --- - error: Usage access to universe '' is denied for user 'guest' ... s:drop() --- - error: Usage access to universe '' is denied for user 'guest' ... sq:set(100) --- - error: Usage access to universe '' is denied for user 'guest' ... sq:drop() --- - error: Usage access to universe '' is denied for user 'guest' ... c = require("net.box").connect(os.getenv("LISTEN")) --- ... c:call("test_func") --- - error: Usage access to universe '' is denied for user 'guest' ... box.session.su("admin") --- ... box.schema.user.revoke("guest","read, write, execute, create", "universe") --- ... box.session.su("guest") --- ... s:select{} --- - error: Usage access to universe '' is denied for user 'guest' ... s:drop() --- - error: Usage access to universe '' is denied for user 'guest' ... sq:set(100) --- - error: Usage access to universe '' is denied for user 'guest' ... sq:drop() --- - error: Usage access to universe '' is denied for user 'guest' ... c = require("net.box").connect(os.getenv("LISTEN")) --- ... c:call("test_func") --- - error: Usage access to universe '' is denied for user 'guest' ... box.session.su("admin") --- ... box.schema.user.grant("guest","usage", "universe") --- ... box.schema.func.drop("test_func") --- ... s:drop() --- ... sq:drop() --- ... box.space._user:select() --- - - [0, 1, 'guest', 'user', {'chap-sha1': 'vhvewKp0tNyweZQ+cFKAlsyphfg='}] - [1, 1, 'admin', 'user', {}] - [2, 1, 'public', 'role', {}] - [3, 1, 'replication', 'role', {}] - [31, 1, 'super', 'role', {}] ... box.space._space:select() --- - - [272, 1, '_schema', 'memtx', 0, {}, [{'type': 'string', 'name': 'key'}]] - [276, 1, '_collation', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, { 'name': 'name', 'type': 'string'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'type', 'type': 'string'}, {'name': 'locale', 'type': 'string'}, { 'name': 'opts', 'type': 'map'}]] - [280, 1, '_space', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'engine', 'type': 'string'}, {'name': 'field_count', 'type': 'unsigned'}, {'name': 'flags', 'type': 'map'}, {'name': 'format', 'type': 'array'}]] - [281, 1, '_vspace', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'engine', 'type': 'string'}, {'name': 'field_count', 'type': 'unsigned'}, {'name': 'flags', 'type': 'map'}, {'name': 'format', 'type': 'array'}]] - [284, 1, '_sequence', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'step', 'type': 'integer'}, {'name': 'min', 'type': 'integer'}, {'name': 'max', 'type': 'integer'}, {'name': 'start', 'type': 'integer'}, {'name': 'cache', 'type': 'integer'}, {'name': 'cycle', 'type': 'boolean'}]] - [285, 1, '_sequence_data', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'value', 'type': 'integer'}]] - [288, 1, '_index', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'iid', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'opts', 'type': 'map'}, {'name': 'parts', 'type': 'array'}]] - [289, 1, '_vindex', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'iid', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'opts', 'type': 'map'}, {'name': 'parts', 'type': 'array'}]] - [296, 1, '_func', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'setuid', 'type': 'unsigned'}]] - [297, 1, '_vfunc', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'setuid', 'type': 'unsigned'}]] - [304, 1, '_user', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'auth', 'type': 'map'}]] - [305, 1, '_vuser', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'auth', 'type': 'map'}]] - [312, 1, '_priv', 'memtx', 0, {}, [{'name': 'grantor', 'type': 'unsigned'}, { 'name': 'grantee', 'type': 'unsigned'}, {'name': 'object_type', 'type': 'string'}, {'name': 'object_id', 'type': 'unsigned'}, {'name': 'privilege', 'type': 'unsigned'}]] - [313, 1, '_vpriv', 'sysview', 0, {}, [{'name': 'grantor', 'type': 'unsigned'}, {'name': 'grantee', 'type': 'unsigned'}, {'name': 'object_type', 'type': 'string'}, {'name': 'object_id', 'type': 'unsigned'}, {'name': 'privilege', 'type': 'unsigned'}]] - [320, 1, '_cluster', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'uuid', 'type': 'string'}]] - [330, 1, '_truncate', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'count', 'type': 'unsigned'}]] - [340, 1, '_space_sequence', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'sequence_id', 'type': 'unsigned'}, {'name': 'is_generated', 'type': 'boolean'}]] ... box.space._func:select() --- - - [1, 1, 'box.schema.user.info', 1, 'LUA'] ... session = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/misc.result0000664000000000000000000005045513306565107020167 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("push filter 'table: .*' to 'table:
'") --- - true ... -- gh-266: box.info() crash on uncofigured box package.loaded['box.space'] == nil --- - true ... package.loaded['box.index'] == nil --- - true ... package.loaded['box.tuple'] == nil --- - true ... package.loaded['box.error'] == nil --- - true ... package.loaded['box.info'] == nil --- - true ... package.loaded['box.stat'] == nil --- - true ... package.loaded['box.session'] == nil --- - true ... space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'hash' }) --- ... -- Test Lua from admin console. Whenever producing output, -- make sure it's a valid YAML. ' lua says: hello' --- - ' lua says: hello' ... -- # What's in the box? t = {} for n in pairs(box) do table.insert(t, tostring(n)) end table.sort(t) --- ... t --- - - NULL - atomic - backup - begin - cfg - commit - ctl - error - index - info - internal - once - rollback - rollback_to_savepoint - runtime - savepoint - schema - sequence - session - slab - snapshot - space - stat - tuple ... t = nil --- ... ---------------- -- # box.error ---------------- test_run:cmd("restart server default") env = require('test_run') --- ... test_run = env.new() --- ... box.error.last() --- - null ... box.error({code = 123, reason = 'test'}) --- - error: test ... box.error(box.error.ILLEGAL_PARAMS, "bla bla") --- - error: Illegal parameters, bla bla ... box.error() --- - error: Illegal parameters, bla bla ... box.error.raise() --- - error: Illegal parameters, bla bla ... e = box.error.last() --- ... e --- - Illegal parameters, bla bla ... e:unpack() --- - type: ClientError code: 1 message: Illegal parameters, bla bla trace: - file: '[C]' line: 4294967295 ... e.type --- - ClientError ... e.code --- - 1 ... e.message --- - Illegal parameters, bla bla ... tostring(e) --- - Illegal parameters, bla bla ... e = nil --- ... box.error.clear() --- ... box.error.last() --- - null ... box.error.raise() --- ... space = box.space.tweedledum --- ... -- -- gh-2080: box.error() crashes with wrong parameters box.error(box.error.UNSUPPORTED, "x", "x%s") --- - error: x does not support x%s ... box.error(box.error.UNSUPPORTED, "x") --- - error: 'bad argument #3 to ''?'' (no value)' ... box.error(box.error.UNSUPPORTED) --- - error: 'box.error(): bad arguments' ... ---------------- -- # box.stat ---------------- t = {} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for k, v in pairs(box.stat()) do table.insert(t, k) end; --- ... for k, v in pairs(box.stat().DELETE) do table.insert(t, k) end; --- ... for k, v in pairs(box.stat.DELETE) do table.insert(t, k) end; --- ... t; --- - - DELETE - SELECT - INSERT - EVAL - CALL - REPLACE - UPSERT - AUTH - ERROR - UPDATE - total - rps - total - rps ... ---------------- -- # box.space ---------------- type(box); --- - table ... type(box.space); --- - table ... t = {}; --- ... for i, v in pairs(space.index[0].parts[1]) do table.insert(t, tostring(i)..' : '..tostring(v)) end; --- ... t; --- - - 'type : unsigned' - 'is_nullable : false' - 'fieldno : 1' ... ---------------- -- # box.slab ---------------- string.match(tostring(box.slab.info()), '^table:') ~= nil; --- - true ... box.slab.info().arena_used >= 0; --- - true ... box.slab.info().arena_size > 0; --- - true ... string.match(tostring(box.slab.stats()), '^table:') ~= nil; --- - true ... t = {}; --- ... for k, v in pairs(box.slab.info()) do table.insert(t, k) end; --- ... t; --- - - items_size - items_used_ratio - quota_size - quota_used_ratio - arena_used_ratio - items_used - quota_used - arena_size - arena_used ... box.runtime.info().used > 0; --- - true ... box.runtime.info().maxalloc > 0; --- - true ... -- -- gh-502: box.slab.info() excessively sparse array -- type(require('yaml').encode(box.slab.info())); --- - string ... ---------------- -- # box.error ---------------- t = {} for k,v in pairs(box.error) do table.insert(t, 'box.error.'..tostring(k)..' : '..tostring(v)) end; --- ... t; --- - - 'box.error.UNKNOWN_REPLICA : 62' - 'box.error.WRONG_INDEX_RECORD : 106' - 'box.error.NO_SUCH_TRIGGER : 34' - 'box.error.SEQUENCE_EXISTS : 146' - 'box.error.CHECKPOINT_IN_PROGRESS : 120' - 'box.error.FIELD_TYPE : 23' - 'box.error.WRONG_SPACE_FORMAT : 141' - 'box.error.UNKNOWN_UPDATE_OP : 28' - 'box.error.WRONG_COLLATION_OPTIONS : 151' - 'box.error.CURSOR_NO_TRANSACTION : 80' - 'box.error.TUPLE_REF_OVERFLOW : 86' - 'box.error.ALTER_SEQUENCE : 143' - 'box.error.INVALID_XLOG_NAME : 75' - 'box.error.SAVEPOINT_EMPTY_TX : 60' - 'box.error.NO_SUCH_FUNCTION : 51' - 'box.error.ROLE_LOOP : 87' - 'box.error.TUPLE_NOT_FOUND : 4' - 'box.error.LOADING : 116' - 'box.error.BACKUP_IN_PROGRESS : 129' - 'box.error.DROP_USER : 44' - 'box.error.MODIFY_INDEX : 14' - 'box.error.PASSWORD_MISMATCH : 47' - 'box.error.UNSUPPORTED_ROLE_PRIV : 98' - 'box.error.ACCESS_DENIED : 42' - 'box.error.CANT_CREATE_COLLATION : 150' - 'box.error.USER_EXISTS : 46' - 'box.error.WAL_IO : 40' - 'box.error.PROC_RET : 21' - 'box.error.PRIV_GRANTED : 89' - 'box.error.CREATE_SPACE : 9' - 'box.error.GRANT : 88' - 'box.error.INVALID_INDEX_FILE : 131' - 'box.error.UNKNOWN_SCHEMA_OBJECT : 49' - 'box.error.WRONG_DD_VERSION : 140' - 'box.error.CREATE_ROLE : 84' - 'box.error.VINYL_MAX_TUPLE_SIZE : 139' - 'box.error.LOAD_FUNCTION : 99' - 'box.error.INVALID_XLOG : 74' - 'box.error.PRIV_NOT_GRANTED : 91' - 'box.error.TRANSACTION_CONFLICT : 97' - 'box.error.GUEST_USER_PASSWORD : 96' - 'box.error.PROC_C : 102' - 'box.error.INVALID_RUN_FILE : 132' - 'box.error.NONMASTER : 6' - 'box.error.MEMTX_MAX_TUPLE_SIZE : 110' - 'box.error.DROP_FUNCTION : 71' - 'box.error.CFG : 59' - 'box.error.NO_SUCH_FIELD : 37' - 'box.error.CONNECTION_TO_SELF : 117' - 'box.error.FUNCTION_MAX : 54' - 'box.error.ILLEGAL_PARAMS : 1' - 'box.error.PARTIAL_KEY : 136' - 'box.error.SAVEPOINT_NO_TRANSACTION : 114' - 'box.error.LOAD_MODULE : 138' - 'box.error.FUNCTION_LANGUAGE : 100' - 'box.error.ROLE_GRANTED : 90' - 'box.error.CHECKPOINT_ROLLBACK : 134' - 'box.error.NO_SUCH_USER : 45' - 'box.error.CANT_UPDATE_PRIMARY_KEY : 94' - 'box.error.EXACT_MATCH : 19' - 'box.error.ROLE_EXISTS : 83' - 'box.error.REPLICASET_UUID_IS_RO : 65' - 'box.error.INDEX_TYPE : 13' - 'box.error.NO_SUCH_PROC : 33' - 'box.error.MEMORY_ISSUE : 2' - 'box.error.KEY_PART_TYPE : 18' - 'box.error.CREATE_FUNCTION : 50' - 'box.error.ALREADY_RUNNING : 126' - 'box.error.NO_SUCH_INDEX : 35' - 'box.error.UNKNOWN_RTREE_INDEX_DISTANCE_TYPE : 103' - 'box.error.TUPLE_FOUND : 3' - 'box.error.VIEW_IS_RO : 113' - 'box.error.LOCAL_INSTANCE_ID_IS_READ_ONLY : 128' - 'box.error.FUNCTION_EXISTS : 52' - 'box.error.UPDATE_ARG_TYPE : 26' - 'box.error.CROSS_ENGINE_TRANSACTION : 81' - 'box.error.FORMAT_MISMATCH_INDEX_PART : 27' - 'box.error.FUNCTION_TX_ACTIVE : 30' - 'box.error.NO_SUCH_ENGINE : 57' - 'box.error.COMMIT_IN_SUB_STMT : 122' - 'box.error.injection : table:
- 'box.error.NULLABLE_MISMATCH : 153' - 'box.error.LAST_DROP : 15' - 'box.error.NO_SUCH_ROLE : 82' - 'box.error.DECOMPRESSION : 124' - 'box.error.CREATE_SEQUENCE : 142' - 'box.error.CREATE_USER : 43' - 'box.error.SPACE_FIELD_IS_DUPLICATE : 149' - 'box.error.INSTANCE_UUID_MISMATCH : 66' - 'box.error.SEQUENCE_OVERFLOW : 147' - 'box.error.SYSTEM : 115' - 'box.error.KEY_PART_IS_TOO_LONG : 118' - 'box.error.TUPLE_FORMAT_LIMIT : 16' - 'box.error.BEFORE_REPLACE_RET : 53' - 'box.error.NO_SUCH_SAVEPOINT : 61' - 'box.error.TRUNCATE_SYSTEM_SPACE : 137' - 'box.error.VY_QUOTA_TIMEOUT : 135' - 'box.error.WRONG_INDEX_OPTIONS : 108' - 'box.error.INVALID_VYLOG_FILE : 133' - 'box.error.INDEX_FIELD_COUNT_LIMIT : 127' - 'box.error.READ_VIEW_ABORTED : 130' - 'box.error.USER_MAX : 56' - 'box.error.PROTOCOL : 104' - 'box.error.TUPLE_NOT_ARRAY : 22' - 'box.error.KEY_PART_COUNT : 31' - 'box.error.ALTER_SPACE : 12' - 'box.error.ACTIVE_TRANSACTION : 79' - 'box.error.EXACT_FIELD_COUNT : 38' - 'box.error.DROP_SEQUENCE : 144' - 'box.error.INVALID_MSGPACK : 20' - 'box.error.MORE_THAN_ONE_TUPLE : 41' - 'box.error.RTREE_RECT : 101' - 'box.error.SUB_STMT_MAX : 121' - 'box.error.UNKNOWN_REQUEST_TYPE : 48' - 'box.error.SPACE_EXISTS : 10' - 'box.error.PROC_LUA : 32' - 'box.error.ROLE_NOT_GRANTED : 92' - 'box.error.NO_SUCH_SPACE : 36' - 'box.error.WRONG_INDEX_PARTS : 107' - 'box.error.DROP_SPACE : 11' - 'box.error.MIN_FIELD_COUNT : 39' - 'box.error.REPLICASET_UUID_MISMATCH : 63' - 'box.error.UPDATE_FIELD : 29' - 'box.error.COMPRESSION : 119' - 'box.error.INVALID_ORDER : 68' - 'box.error.INDEX_EXISTS : 85' - 'box.error.SPLICE : 25' - 'box.error.UNKNOWN : 0' - 'box.error.DROP_PRIMARY_KEY : 17' - 'box.error.NULLABLE_PRIMARY : 152' - 'box.error.NO_SUCH_SEQUENCE : 145' - 'box.error.RELOAD_CFG : 58' - 'box.error.INVALID_UUID : 64' - 'box.error.INJECTION : 8' - 'box.error.TIMEOUT : 78' - 'box.error.IDENTIFIER : 70' - 'box.error.ITERATOR_TYPE : 72' - 'box.error.REPLICA_MAX : 73' - 'box.error.MISSING_REQUEST_FIELD : 69' - 'box.error.MISSING_SNAPSHOT : 93' - 'box.error.WRONG_SPACE_OPTIONS : 111' - 'box.error.READONLY : 7' - 'box.error.UNSUPPORTED : 5' - 'box.error.UPDATE_INTEGER_OVERFLOW : 95' - 'box.error.NO_CONNECTION : 77' - 'box.error.INVALID_XLOG_ORDER : 76' - 'box.error.UPSERT_UNIQUE_SECONDARY_KEY : 105' - 'box.error.ROLLBACK_IN_SUB_STMT : 123' - 'box.error.WRONG_SCHEMA_VERSION : 109' - 'box.error.UNSUPPORTED_INDEX_FEATURE : 112' - 'box.error.INDEX_PART_TYPE_MISMATCH : 24' - 'box.error.INVALID_XLOG_TYPE : 125' ... test_run:cmd("setopt delimiter ''"); --- - true ... -- A test case for Bug#901674 -- No way to inspect exceptions from Box in Lua -- function myinsert(tuple) box.space.tweedledum:insert(tuple) end --- ... pcall(myinsert, {99, 1, 1953719668}) --- - true ... pcall(myinsert, {1, 'hello'}) --- - true ... pcall(myinsert, {1, 'hello'}) --- - false - Duplicate key exists in unique index 'primary' in space 'tweedledum' ... box.space.tweedledum:truncate() --- ... myinsert = nil --- ... -- A test case for gh-37: print of 64-bit number ffi = require('ffi') --- ... 1, 1 --- - 1 - 1 ... tonumber64(1), 1 --- - 1 - 1 ... -- Testing 64bit tonumber64() --- - error: 'bad argument #1 to ''?'' (value expected)' ... tonumber64('invalid number') --- - null ... tonumber64(123) --- - 123 ... tonumber64('123') --- - 123 ... type(tonumber64('4294967296')) == 'number' --- - true ... tonumber64('9223372036854775807') == tonumber64('9223372036854775807') --- - true ... tonumber64('9223372036854775807') - tonumber64('9223372036854775800') --- - 7 ... tonumber64('18446744073709551615') == tonumber64('18446744073709551615') --- - true ... tonumber64('18446744073709551615') + 1 --- - 0 ... tonumber64(-1) --- - -1 ... tonumber64('184467440737095516155') --- - null ... string.byte(require('msgpack').encode(tonumber64(123))) --- - 123 ... -- A test case for Bug#1061747 'tonumber64 is not transitive' tonumber64(tonumber64(2)) --- - 2 ... tostring(tonumber64(tonumber64(3))) --- - '3' ... -- A test case for Bug#1131108 'tonumber64 from negative int inconsistency' tonumber64(-1) --- - -1 ... tonumber64(-1LL) --- - -1 ... tonumber64(-1ULL) --- - 18446744073709551615 ... -1 --- - -1 ... -1LL --- - -1 ... -1ULL --- - 18446744073709551615 ... tonumber64(-1.0) --- - -1 ... 6LL - 7LL --- - -1 ... tostring(tonumber64('1234567890123')) == '1234567890123' --- - true ... tostring(tonumber64('12345678901234')) == '12345678901234' --- - true ... tostring(tonumber64('123456789012345')) == '123456789012345ULL' --- - true ... tostring(tonumber64('1234567890123456')) == '1234567890123456ULL' --- - true ... tonumber64('0x12') == 18 --- - true ... tonumber64('0x12', 16) == 18 --- - true ... tonumber64('0x12', 17) == nil --- - true ... tonumber64('0b01') == 1 --- - true ... tonumber64('0b01', 2) == 1 --- - true ... tonumber64('0b01', 3) == nil --- - true ... tonumber64(' 0b1 ') == 1 --- - true ... tonumber64(' 0b1 ', 'badbase') --- - error: 'bad argument #2 to ''?'' (number expected, got string)' ... tonumber64(' 0b1 ', 123) -- big base --- - error: 'bad argument #2 to ''?'' (base out of range)' ... tonumber64('12345', 123) -- big base --- - error: 'bad argument #2 to ''?'' (base out of range)' ... tonumber64('0xfffff') == 1048575 --- - true ... tonumber64('0b111111111111111111') == 262143 --- - true ... tonumber64('20', 36) --- - 72 ... tonumber64("", 10) --- - null ... tonumber64("", 32) --- - null ... tonumber64("-1") --- - -1 ... tonumber64("-0x16") --- - -22 ... tonumber64("-0b11") --- - -3 ... tonumber64(" -0x16 ") --- - -22 ... tonumber64(" -0b11 ") --- - -3 ... -- numbers/cdata with base = 10 - return as is tonumber64(100) --- - 100 ... tonumber64(100, 10) --- - 100 ... tonumber64(100LL) --- - 100 ... tonumber64(100ULL, 10) --- - 100 ... tonumber64(-100LL) --- - -100 ... tonumber64(-100LL, 10) --- - -100 ... tonumber64(ffi.new('char', 10)) --- - 10 ... tonumber64(ffi.new('short', 10)) --- - 10 ... tonumber64(ffi.new('int', 10)) --- - 10 ... tonumber64(ffi.new('int8_t', 10)) --- - 10 ... tonumber64(ffi.new('int16_t', 10)) --- - 10 ... tonumber64(ffi.new('int32_t', 10)) --- - 10 ... tonumber64(ffi.new('int64_t', 10)) --- - 10 ... tonumber64(ffi.new('unsigned char', 10)) --- - 10 ... tonumber64(ffi.new('unsigned short', 10)) --- - 10 ... tonumber64(ffi.new('unsigned int', 10)) --- - 10 ... tonumber64(ffi.new('unsigned int', 10)) --- - 10 ... tonumber64(ffi.new('uint8_t', 10)) --- - 10 ... tonumber64(ffi.new('uint16_t', 10)) --- - 10 ... tonumber64(ffi.new('uint32_t', 10)) --- - 10 ... tonumber64(ffi.new('uint64_t', 10)) --- - 10 ... tonumber64(ffi.new('float', 10)) --- - 10 ... tonumber64(ffi.new('double', 10)) --- - 10 ... -- number/cdata with custom `base` - is not supported tonumber64(100, 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(100LL, 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(-100LL, 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(100ULL, 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('char', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('short', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('int', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('int8_t', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('int16_t', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('int32_t', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('int64_t', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('unsigned char', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('unsigned short', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('unsigned int', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('unsigned int', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('uint8_t', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('uint16_t', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('uint32_t', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('uint64_t', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('float', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... tonumber64(ffi.new('double', 10), 2) --- - error: 'bad argument #1 to ''?'' (string expected)' ... -- invalid types - return nil ffi.cdef("struct __tonumber64_test {};") --- ... tonumber64(ffi.new('struct __tonumber64_test')) --- - null ... tonumber64(nil) --- - null ... tonumber64(function() end) --- - null ... tonumber64({}) --- - null ... collectgarbage('collect') --- - 0 ... -- dostring() dostring('abc') --- - error: '[string "abc"]:1: ''='' expected near ''''' ... dostring('abc=2') --- ... dostring('return abc') --- - 2 ... dostring('return ...', 1, 2, 3) --- - 1 - 2 - 3 ... -- A test case for Bug#1043804 lua error() -> server crash error() --- - error: null ... -- A test case for bitwise operations bit.lshift(1, 32) --- - 1 ... bit.band(1, 3) --- - 1 ... bit.bor(1, 2) --- - 3 ... space:truncate() --- ... dofile('fifo.lua') --- ... fifomax --- - 5 ... fifo_push(space, 1, 1) --- - [1, 4, 5, 1, 0, 0, 0, 0] ... fifo_push(space, 1, 2) --- - [1, 5, 6, 1, 2, 0, 0, 0] ... fifo_push(space, 1, 3) --- - [1, 6, 7, 1, 2, 3, 0, 0] ... fifo_push(space, 1, 4) --- - [1, 7, 8, 1, 2, 3, 4, 0] ... fifo_push(space, 1, 5) --- - [1, 8, 4, 1, 2, 3, 4, 5] ... fifo_push(space, 1, 6) --- - [1, 4, 5, 6, 2, 3, 4, 5] ... fifo_push(space, 1, 7) --- - [1, 5, 6, 6, 7, 3, 4, 5] ... fifo_push(space, 1, 8) --- - [1, 6, 7, 6, 7, 8, 4, 5] ... fifo_top(space, 1) --- - 8 ... space:delete{1} --- - [1, 6, 7, 6, 7, 8, 4, 5] ... fifo_top(space, 1) --- - 0 ... space:delete{1} --- - [1, 4, 4, 0, 0, 0, 0, 0] ... space:drop() --- ... test_run:cmd("clear filter") --- - true ... -- test test_run:grep_log() require('log').info('Incorrect password supplied') --- ... test_run:grep_log("default", "password") --- - password ... -- some collation test s = box.schema.space.create('test') --- ... not not s:create_index('test1', {parts = {{1, 'string', collation = 'Unicode'}}}) --- - true ... not not s:create_index('test2', {parts = {{2, 'string', collation = 'UNICODE'}}}) --- - true ... not not s:create_index('test3', {parts = {{3, 'string', collation = 'UnIcOdE'}}}) -- I'd prefer to panic on that --- - true ... s:create_index('test4', {parts = {{4, 'string'}}}).parts --- - - type: string is_nullable: false fieldno: 4 ... s:create_index('test5', {parts = {{5, 'string', collation = 'Unicode'}}}).parts --- - - type: string is_nullable: false collation: unicode fieldno: 5 ... s:drop() --- ... s = box.schema.space.create('test') --- ... not not s:create_index('test1', {parts = {{1, 'scalar', collation = 'unicode_ci'}}}) --- - true ... s:replace{1} s:replace{1.1} s:replace{false} --- ... s:replace{'Блин'} s:replace{'Ёж'} s:replace{'ешь'} s:replace{'Же'} s:replace{'Уже'} --- ... s:replace{'drop'} s:replace{'table'} s:replace{'users'} --- ... s:select{} --- - - [false] - [1] - [1.1] - ['drop'] - ['table'] - ['users'] - ['Блин'] - ['Ёж'] - ['ешь'] - ['Же'] - ['Уже'] ... s:select{'еж'} --- - - ['Ёж'] ... s:drop() --- ... s = box.schema.space.create('test') --- ... not not s:create_index('test1', {parts = {{1, 'number', collation = 'unicode_ci'}}}) --- - error: 'Wrong index options (field 1): collation is reasonable only for string and scalar parts' ... not not s:create_index('test2', {parts = {{2, 'unsigned', collation = 'unicode_ci'}}}) --- - error: 'Wrong index options (field 1): collation is reasonable only for string and scalar parts' ... not not s:create_index('test3', {parts = {{3, 'integer', collation = 'unicode_ci'}}}) --- - error: 'Wrong index options (field 1): collation is reasonable only for string and scalar parts' ... not not s:create_index('test4', {parts = {{4, 'boolean', collation = 'unicode_ci'}}}) --- - error: 'Wrong index options (field 1): collation is reasonable only for string and scalar parts' ... s:drop() --- ... -- -- gh-2068 no error for invalid user during space creation -- s = box.schema.space.create('test', {user="no_such_user"}) --- - error: User 'no_such_user' is not found ... -- Too long WAL write warning (gh-2743). s = box.schema.space.create('test') --- ... _ = s:create_index('pk') --- ... too_long_threshold = box.cfg.too_long_threshold --- ... box.cfg{too_long_threshold = 0} -- log everything --- ... expected_rows = 3 --- ... expected_lsn = box.info.lsn + 1 --- ... box.begin() for i = 1, expected_rows do s:insert{i} end box.commit() --- ... msg = test_run:grep_log('default', 'too long WAL write.*') --- ... rows, lsn = string.match(msg, '(%d+) rows at LSN (%d+)') --- ... rows = tonumber(rows) --- ... lsn = tonumber(lsn) --- ... rows == expected_rows --- - true ... lsn == expected_lsn --- - true ... box.cfg{too_long_threshold = too_long_threshold} --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/leak.test.lua0000664000000000000000000000127213306560010020346 0ustar rootroot-- -- gh-853 - memory leak on start if replace in xlog -- env = require('test_run') test_run = env.new() test_run:cmd("create server tiny with script='box/tiny.lua'") test_run:cmd("start server tiny") test_run:cmd("switch tiny") _ = box.schema.space.create('test') _ = box.space.test:create_index('pk') test_run:cmd("setopt delimiter ';'") for i=1, 500 do box.space.test:replace{1, string.rep('a', 50000)} -- or we run out of memory too soon collectgarbage('collect') end; test_run:cmd("setopt delimiter ''"); test_run:cmd('restart server tiny') box.space.test:len() box.space.test:drop() test_run:cmd("switch default") test_run:cmd("stop server tiny") test_run:cmd("cleanup server tiny") tarantool_1.9.1.26.g63eb81e3c/test/box/stat_net.test.lua0000664000000000000000000000131213306565107021262 0ustar rootroot-- clear statistics env = require('test_run') test_run = env.new() test_run:cmd('restart server default') box.stat.net.SENT -- zero box.stat.net.RECEIVED -- zero space = box.schema.space.create('tweedledum') box.schema.user.grant('guest','read,write,execute','universe') index = space:create_index('primary', { type = 'hash' }) remote = require 'net.box' LISTEN = require('uri').parse(box.cfg.listen) cn = remote.connect(LISTEN.host, LISTEN.service) cn.space.tweedledum:select() --small request box.stat.net.SENT.total > 0 box.stat.net.RECEIVED.total > 0 -- box.stat.net.EVENTS.total > 0 -- box.stat.net.LOCKS.total > 0 space:drop() cn:close() box.schema.user.revoke('guest','read,write,execute','universe') tarantool_1.9.1.26.g63eb81e3c/test/box/backup.result0000664000000000000000000000702013306565107020467 0ustar rootrootfio = require 'fio' --- ... log = require 'log' --- ... test_run = require('test_run').new() --- ... test_run:cleanup_cluster() --- ... -- Make sure that garbage collection is disabled -- while backup is in progress. default_checkpoint_count = box.cfg.checkpoint_count --- ... box.cfg{checkpoint_count = 1} --- ... ENGINES = {'memtx', 'vinyl'} --- ... -- Directories where files can be stored, -- from longest to shortest. CFG_DIRS = {box.cfg.wal_dir, box.cfg.memtx_dir, box.cfg.vinyl_dir} --- ... table.sort(CFG_DIRS, function(a, b) return #a > #b end) --- ... -- Create and populate tables. Make a snapshot to backup. _ = test_run:cmd("setopt delimiter ';'") --- ... for _, engine in ipairs(ENGINES) do s = box.schema.space.create(engine, {engine=engine}) _ = s:create_index('pk') for i=1,3 do s:insert{i, engine..i} end end box.snapshot() _ = test_run:cmd("setopt delimiter ''"); --- ... -- Add more data, but don't make a snapshot. -- These data won't make it to the backup. _ = test_run:cmd("setopt delimiter ';'") --- ... for _, engine in ipairs(ENGINES) do s = box.space[engine] for i=1,3 do s:insert{i*10} end end _ = test_run:cmd("setopt delimiter ''"); --- ... -- Start backup. files = box.backup.start() --- ... box.backup.start() -- error: backup is already in progress --- - error: Backup is already in progress ... -- Make sure new snapshots are not included into an ongoing backups. _ = test_run:cmd("setopt delimiter ';'") --- ... -- Even though checkpoint_count is set to 1, this must not trigger -- garbage collection, because the checkpoint is pinned by backup. for _, engine in ipairs(ENGINES) do s = box.space[engine] for i=1,3 do s:insert{i*100} end end box.snapshot() _ = test_run:cmd("setopt delimiter ''"); --- ... -- Prepare backup directory backup_dir = fio.pathjoin(fio.cwd(), 'backup') --- ... _ = os.execute(string.format('rm -rf %s', backup_dir)) --- ... log.info(string.format('save backup to %s', backup_dir)) --- ... -- Copy files to the backup directory _ = test_run:cmd("setopt delimiter ';'") --- ... for _, path in ipairs(files) do suffix = string.gsub(path, '.*%.', '') if suffix == 'xlog' then dir = box.cfg.wal_dir elseif suffix == 'snap' then dir = box.cfg.memtx_dir elseif suffix == 'vylog' or suffix == 'run' or suffix == 'index' then dir = box.cfg.vinyl_dir end assert(dir ~= nil) rel_path = string.sub(path, string.len(dir) + 2) dest_dir = fio.pathjoin(backup_dir, fio.dirname(rel_path)) log.info(string.format('copy %s', rel_path)) os.execute(string.format('mkdir -p %s && cp %s %s', dest_dir, path, dest_dir)) end _ = test_run:cmd("setopt delimiter ''"); --- ... box.backup.stop() --- ... -- Check that we can restore from the backup. _ = test_run:cmd(string.format("create server copy with script='box/backup_test.lua', workdir='%s'", backup_dir)) --- ... _ = test_run:cmd("start server copy") --- ... _ = test_run:cmd('switch copy') --- ... box.space['memtx']:select() --- - - [1, 'memtx1'] - [2, 'memtx2'] - [3, 'memtx3'] ... box.space['vinyl']:select() --- - - [1, 'vinyl1'] - [2, 'vinyl2'] - [3, 'vinyl3'] ... _ = test_run:cmd('switch default') --- ... _ = test_run:cmd("stop server copy") --- ... _ = test_run:cmd("cleanup server copy") --- ... -- Check that backup still works. _ = box.backup.start() --- ... box.backup.stop() --- ... -- Cleanup. _ = os.execute(string.format('rm -rf %s', backup_dir)) --- ... for _, engine in ipairs(ENGINES) do box.space[engine]:drop() end --- ... box.cfg{checkpoint_count = default_checkpoint_count} --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/tuple_bench.c0000664000000000000000000000431213306560010020403 0ustar rootroot#include "module.h" #include #include double proctime(void) { struct timeval tv; gettimeofday(&tv, NULL); return (double) tv.tv_sec + 1e-6 * tv.tv_usec; } int tuple_bench(box_function_ctx_t *ctx, const char *args, const char *args_end) { static const char *SPACE_NAME = "tester"; static const char *INDEX_NAME = "primary"; uint32_t space_id = box_space_id_by_name(SPACE_NAME, strlen(SPACE_NAME)); uint32_t index_id = box_index_id_by_name(space_id, INDEX_NAME, strlen(INDEX_NAME)); if (space_id == BOX_ID_NIL || index_id == BOX_ID_NIL) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "Can't find index %s in space %s", INDEX_NAME, SPACE_NAME); } say_debug("space_id = %u, index_id = %u", space_id, index_id); char tuple_buf[4][64]; char *tuple_end[4] = {tuple_buf[0], tuple_buf[1], tuple_buf[2], tuple_buf[3]}; const uint64_t test_numbers[4] = {2, 2, 1, 3}; const char test_strings[4][4] = {"bce", "abb", "abb", "ccd"}; /* get key types from args, and build test tuples with according types*/ uint32_t arg_count = mp_decode_array(&args); if (arg_count < 1) { return box_error_set(__FILE__, __LINE__, ER_PROC_C, "%s", "invalid argument count"); } uint32_t n = mp_decode_array(&args); uint32_t knum = 0, kstr = 0; for (uint32_t k = 0; k < 4; k++) { const char *field = args; tuple_end[k] = mp_encode_array(tuple_end[k], n); for (uint32_t i = 0; i < n; i++, field += 3) { if (mp_decode_strl(&field) != 3) { say_error("Arguments must be \"STR\" or \"NUM\""); return -1; } if (memcmp(field, "NUM", 3) == 0) { tuple_end[k] = mp_encode_uint(tuple_end[k], test_numbers[knum]); knum = (knum + 1) % 4; } else if (memcmp(field, "STR", 3) == 0) { tuple_end[k] = mp_encode_str(tuple_end[k], test_strings[kstr], strlen(test_strings[kstr])); kstr = (kstr + 1) % 4; } else { say_error("Arguments must be \"STR\" or \"NUM\""); return -1; } } } double t = proctime(); box_tuple_t *tuple; for (int i = 0; i < 80000000; i++) { int k = (i + (i >> 2) + (i >> 5) + 13) & 3; box_index_min(space_id, index_id, tuple_buf[k], tuple_end[k], &tuple); } t = proctime() - t; say_info("%lf\n", t); return 0; } tarantool_1.9.1.26.g63eb81e3c/test/box/rtree_misc.result0000664000000000000000000004052013306560010021344 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... s = box.schema.space.create('spatial') --- ... -- rtree index as primary key must be forbidden (unique) i = s:create_index('spatial', { type = 'rtree', unique = true, parts = {1, 'array'}}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': RTREE index can not be unique' ... -- any non-unique index as primary key must be forbidden i = s:create_index('spatial', { type = 'hash', unique = false, parts = {1, 'unsigned'}}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': primary key must be unique' ... i = s:create_index('spatial', { type = 'tree', unique = false, parts = {1, 'unsigned'}}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': primary key must be unique' ... i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {1, 'array'}}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': primary key must be unique' ... -- tree and hash indexes over array field is not possible i = s:create_index('primary', { type = 'tree', parts = {1, 'array'}}) --- - error: 'Can''t create or modify index ''primary'' in space ''spatial'': field type ''array'' is not supported' ... i = s:create_index('primary', { type = 'hash', parts = {1, 'array'}}) --- - error: 'Can''t create or modify index ''primary'' in space ''spatial'': field type ''array'' is not supported' ... -- normal indexes i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) --- ... i = s:create_index('secondary', { type = 'hash', parts = {2, 'unsigned'}}) --- ... -- adding a tuple with array instead of num will fail i = s:insert{{1, 2, 3}, 4} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... i = s:insert{1, {2, 3, 4}} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... -- rtree index must be one-part i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {1, 'array', 2, 'array'}}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': RTREE index key can not be multipart' ... -- unique rtree index is not possible i = s:create_index('spatial', { type = 'rtree', unique = true, parts = {3, 'array'}}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': RTREE index can not be unique' ... -- num rtree index is not possible i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'unsigned'}}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': RTREE index field type must be ARRAY' ... -- str rtree index is not possible i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'string'}}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': RTREE index field type must be ARRAY' ... -- normal rtree index i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'array'}}) --- ... -- inserting wrong values (should fail) s:insert{1, 2, 3} --- - error: 'Tuple field 3 type does not match one required by operation: expected array' ... s:insert{1, 2, "3"} --- - error: 'Tuple field 3 type does not match one required by operation: expected array' ... s:insert{1, 2, nil, 3} --- - error: 'Tuple field 3 type does not match one required by operation: expected array' ... s:insert{1, 2, {}} --- - error: 'RTree: Field must be an array with 2 (point) or 4 (rectangle/box) numeric coordinates' ... s:insert{1, 2, {"3", "4", "5", "6"}} --- - error: 'Tuple field 1 type does not match one required by operation: expected number' ... s:insert{1, 2, {nil, 4, 5, 6}} --- - error: 'Tuple field 1 type does not match one required by operation: expected number' ... s:insert{1, 2, {3, {4}, 5, 6}} --- - error: 'Tuple field 2 type does not match one required by operation: expected number' ... s:insert{1, 2, {3, 4, {}, 6}} --- - error: 'Tuple field 3 type does not match one required by operation: expected number' ... s:insert{1, 2, {3, 4, 5, "6"}} --- - error: 'Tuple field 4 type does not match one required by operation: expected number' ... s:insert{1, 2, {3}} --- - error: 'RTree: Field must be an array with 2 (point) or 4 (rectangle/box) numeric coordinates' ... s:insert{1, 2, {3, 4, 5}} --- - error: 'RTree: Field must be an array with 2 (point) or 4 (rectangle/box) numeric coordinates' ... -- inserting good value s:insert{1, 2, {3, 4, 5, 6}} --- - [1, 2, [3, 4, 5, 6]] ... -- invalid alters s.index.spatial:alter({unique = true}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': RTREE index can not be unique' ... s.index.spatial:alter({type = 'tree'}) --- - error: 'Can''t create or modify index ''spatial'' in space ''spatial'': field type ''array'' is not supported' ... box.space[box.schema.SPACE_ID]:update({s.id}, {{"=", 4, 'vinyl'}}) --- - error: 'Can''t modify space ''spatial'': can not change space engine' ... -- chech that truncate works s.index.spatial:select({0, 0, 10, 10}, {iterator = 'le'}) --- - - [1, 2, [3, 4, 5, 6]] ... s:truncate() --- ... s.index.spatial:select({0, 0, 10, 10}, {iterator = 'le'}) --- - [] ... -- inserting lots of equvalent records for i = 1,500 do s:insert{i, i, {3, 4, 5, 6}} end --- ... -- and some records for chaos for i = 1,10 do for j = 1,10 do s:insert{500+i+j*20, 500+i*20+j, {i, j, i, j}} end end --- ... s.index.spatial:count() --- - 600 ... #s.index.spatial:select({3, 4, 5, 6}) --- - 500 ... for i = 1,500,2 do s:delete{i} end --- ... s.index.spatial:count() --- - 350 ... #s.index.spatial:select({3, 4, 5, 6}) --- - 250 ... s.index.spatial:min() --- - error: Index 'spatial' (RTREE) of space 'spatial' (memtx) does not support min() ... s.index.spatial:max() --- - error: Index 'spatial' (RTREE) of space 'spatial' (memtx) does not support max() ... -- seems that drop can't fail s.index.spatial:drop() --- ... s.index.spatial:select({}) --- - error: '[string "return s.index.spatial:select({}) "]:1: attempt to index field ''spatial'' (a nil value)' ... s:drop() --- ... s = box.schema.space.create('vinyl', {engine = 'vinyl'}) --- ... -- rtree indexes are not enabled in vinyl i = s:create_index('spatial', { type = 'rtree', unique = true, parts = {3, 'array'}}) --- - error: Unsupported index type supplied for index 'spatial' in space 'vinyl' ... i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) --- ... -- ... even secondary i = s:create_index('spatial', { type = 'rtree', unique = true, parts = {3, 'array'}}) --- - error: Unsupported index type supplied for index 'spatial' in space 'vinyl' ... s:drop() --- ... -- rtree in temp space must work fine s = box.schema.space.create('spatial', {temporary = true}) --- ... i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) --- ... i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'array'}}) --- ... s:insert{1, 2, {3, 4, 5, 6}} --- - [1, 2, [3, 4, 5, 6]] ... s.index.spatial:select({0, 0, 10, 10}, {iterator = 'le'}) --- - - [1, 2, [3, 4, 5, 6]] ... s:drop() --- ... -- snapshot test s = box.schema.space.create('spatial') --- ... i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) --- ... i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'array'}}) --- ... for i = 1,10 do s:insert{i, i, {i, i, i + 1, i + 1}} end --- ... box.snapshot() --- - ok ... i:select({0, 0}, {iterator = 'neighbor'}) --- - - [1, 1, [1, 1, 2, 2]] - [2, 2, [2, 2, 3, 3]] - [3, 3, [3, 3, 4, 4]] - [4, 4, [4, 4, 5, 5]] - [5, 5, [5, 5, 6, 6]] - [6, 6, [6, 6, 7, 7]] - [7, 7, [7, 7, 8, 8]] - [8, 8, [8, 8, 9, 9]] - [9, 9, [9, 9, 10, 10]] - [10, 10, [10, 10, 11, 11]] ... test_run:cmd("restart server default") s = box.space.spatial --- ... i = s.index.spatial --- ... i:select({0, 0}, {iterator = 'neighbor'}) --- - - [1, 1, [1, 1, 2, 2]] - [2, 2, [2, 2, 3, 3]] - [3, 3, [3, 3, 4, 4]] - [4, 4, [4, 4, 5, 5]] - [5, 5, [5, 5, 6, 6]] - [6, 6, [6, 6, 7, 7]] - [7, 7, [7, 7, 8, 8]] - [8, 8, [8, 8, 9, 9]] - [9, 9, [9, 9, 10, 10]] - [10, 10, [10, 10, 11, 11]] ... s:drop() --- ... s = box.schema.space.create('spatial') --- ... i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) --- ... i = s:create_index('spatial', { type = 'rtree', unique = false, parts = {3, 'array'}, dimension = 4}) --- ... for i = 1,10 do s:insert{i, i, {i, i, i, i, i + 1, i + 1, i + 1, i + 1}} end --- ... box.snapshot() --- - ok ... i:select({0, 0, 0, 0}, {iterator = 'neighbor'}) --- - - [1, 1, [1, 1, 1, 1, 2, 2, 2, 2]] - [2, 2, [2, 2, 2, 2, 3, 3, 3, 3]] - [3, 3, [3, 3, 3, 3, 4, 4, 4, 4]] - [4, 4, [4, 4, 4, 4, 5, 5, 5, 5]] - [5, 5, [5, 5, 5, 5, 6, 6, 6, 6]] - [6, 6, [6, 6, 6, 6, 7, 7, 7, 7]] - [7, 7, [7, 7, 7, 7, 8, 8, 8, 8]] - [8, 8, [8, 8, 8, 8, 9, 9, 9, 9]] - [9, 9, [9, 9, 9, 9, 10, 10, 10, 10]] - [10, 10, [10, 10, 10, 10, 11, 11, 11, 11]] ... test_run:cmd("restart server default") s = box.space.spatial --- ... i = s.index.spatial --- ... i:select({0, 0, 0, 0}, {iterator = 'neighbor'}) --- - - [1, 1, [1, 1, 1, 1, 2, 2, 2, 2]] - [2, 2, [2, 2, 2, 2, 3, 3, 3, 3]] - [3, 3, [3, 3, 3, 3, 4, 4, 4, 4]] - [4, 4, [4, 4, 4, 4, 5, 5, 5, 5]] - [5, 5, [5, 5, 5, 5, 6, 6, 6, 6]] - [6, 6, [6, 6, 6, 6, 7, 7, 7, 7]] - [7, 7, [7, 7, 7, 7, 8, 8, 8, 8]] - [8, 8, [8, 8, 8, 8, 9, 9, 9, 9]] - [9, 9, [9, 9, 9, 9, 10, 10, 10, 10]] - [10, 10, [10, 10, 10, 10, 11, 11, 11, 11]] ... s:drop() --- ... -- distance type iopts = { type = 'rtree', unique = false, parts = {2, 'array'} } --- ... iopts['distance'] = 'euclid' --- ... s = box.schema.space.create('spatial') --- ... i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) --- ... i = s:create_index('spatial', iopts) --- ... s:insert{1, {0, 5}} --- - [1, [0, 5]] ... s:insert{2, {5, 0}} --- - [2, [5, 0]] ... s:insert{3, {5, 5}} --- - [3, [5, 5]] ... s:insert{4, {8, 0}} --- - [4, [8, 0]] ... s:insert{5, {0, 8}} --- - [5, [0, 8]] ... s.index.spatial:select({{0, 0}}, {iterator = 'neighbor'}) --- - - [1, [0, 5]] - [2, [5, 0]] - [3, [5, 5]] - [4, [8, 0]] - [5, [0, 8]] ... s:drop() --- ... iopts = { type = 'rtree', unique = false, parts = {2, 'array'} } --- ... iopts['distance'] = 'manhattan' --- ... s = box.schema.space.create('spatial') --- ... i = s:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) --- ... i = s:create_index('spatial', iopts) --- ... s:insert{1, {0, 5}} --- - [1, [0, 5]] ... s:insert{2, {5, 0}} --- - [2, [5, 0]] ... s:insert{3, {5, 5}} --- - [3, [5, 5]] ... s:insert{4, {8, 0}} --- - [4, [8, 0]] ... s:insert{5, {0, 8}} --- - [5, [0, 8]] ... s.index.spatial:select({{0, 0}}, {iterator = 'neighbor'}) --- - - [1, [0, 5]] - [2, [5, 0]] - [4, [8, 0]] - [5, [0, 8]] - [3, [5, 5]] ... test_run:cmd("restart server default") s = box.space.spatial --- ... i = s.index.spatial --- ... s.index.spatial:select({{0, 0}}, {iterator = 'neighbor'}) --- - - [1, [0, 5]] - [2, [5, 0]] - [4, [8, 0]] - [5, [0, 8]] - [3, [5, 5]] ... box.snapshot() --- - ok ... test_run:cmd("restart server default") utils = require('utils') --- ... s = box.space.spatial --- ... i = s.index.spatial --- ... s.index.spatial:select({{0, 0}}, {iterator = 'neighbor'}) --- - - [1, [0, 5]] - [2, [5, 0]] - [4, [8, 0]] - [5, [0, 8]] - [3, [5, 5]] ... s:drop() --- ... -- RTREE QA https://github.com/tarantool/tarantool/issues/976 s = box.schema.space.create('s') --- ... i = s:create_index('p') --- ... -- dimension too big i = s:create_index('s', {type = 'rtree', parts = {2, 'array'}, dimension = 21}) --- - error: 'Index ''s'' (RTREE) of space ''s'' (memtx) does not support dimension (21): must belong to range [1, 20]' ... -- dimension too low i = s:create_index('s', {type = 'rtree', parts = {2, 'array'}, dimension = 0}) --- - error: 'Index ''s'' (RTREE) of space ''s'' (memtx) does not support dimension (0): must belong to range [1, 20]' ... -- cant be unique i = s:create_index('s', {type = 'rtree', parts = {2, 'array'}, unique = true}) --- - error: 'Can''t create or modify index ''s'' in space ''s'': RTREE index can not be unique' ... -- wrong parts i = s:create_index('s', {type = 'rtree', parts = {2, 'unsigned'}}) --- - error: 'Can''t create or modify index ''s'' in space ''s'': RTREE index field type must be ARRAY' ... i = s:create_index('s', {type = 'rtree', parts = {2, 'array', 3, 'array'}}) --- - error: 'Can''t create or modify index ''s'' in space ''s'': RTREE index key can not be multipart' ... -- defaults test i = s:create_index('s', { type = 'rtree' }) --- ... i.dimension --- - 2 ... i.parts --- - - type: array is_nullable: false fieldno: 2 ... i:drop() --- ... -- hide first (id) field of tuple function f(t) local r = {} for i, v in ipairs(t) do r[i] = v end r[1] = 0 return setmetatable (r, {__serialize = 'seq'}) end --- ... -- new index through inserting to _index space f(box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{2, 'array'}}}) --- - [0, 2, 's', 'rtree', {'unique': false}, [[2, 'array']]] ... s.index.s:drop() --- ... -- with wrong args box.space._index:insert{s.id, 2, 's', 'rtree', nil, {{2, 'array'}}} --- - error: 'Tuple field 5 type does not match one required by operation: expected map' ... box.space._index:insert{s.id, 2, 's', 'rtree', utils.setmap({}), {{2, 'array'}}} --- - error: 'Can''t create or modify index ''s'' in space ''s'': RTREE index can not be unique' ... box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false, dimension = 22}, {{2, 'array'}}} --- - error: 'Index ''s'' (RTREE) of space ''s'' (memtx) does not support dimension (22): must belong to range [1, 20]' ... box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false, dimension = 'dimension'}, {{2, 'array'}}} --- - error: 'Wrong index options (field 4): ''dimension'' must be integer' ... box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{2, 'unsigned'}}} --- - error: 'Can''t create or modify index ''s'' in space ''s'': RTREE index field type must be ARRAY' ... box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{2, 'time'}}} --- - error: 'Wrong index parts: unknown field type; expected field1 id (number), field1 type (string), ...' ... box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{'no','time'}}} --- - error: 'Wrong index parts: field id must be an integer; expected field1 id (number), field1 type (string), ...' ... box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false, distance = 'lobachevsky'}, {{2, 'array'}}} --- - error: 'Wrong index options (field 4): distance must be either ''euclid'' or ''manhattan''' ... box.space._index:insert{s.id, 2, 's', 'rtee', {unique = false}, {{2, 'array'}}} --- - error: Unsupported index type supplied for index 's' in space 's' ... box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{}}} --- - error: 'Wrong index parts: expected a non-empty array; expected field1 id (number), field1 type (string), ...' ... -- unknown args checked f(box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false, holy = 'cow'}, {{2, 'array'}}}) --- - error: 'Wrong index options (field 4): unexpected option ''holy''' ... -- unknown part args are no more ignored (#2649) f(box.space._index:insert{s.id, 2, 's', 'rtree', {unique = false}, {{field=2, type='array', part = 'opts'}}}) --- - error: 'Wrong index options (field 1): unexpected option ''part''' ... -- alter i = s:create_index('s', {type = 'rtree', parts = {2, 'array'}}) --- ... i:alter{type = 'tree' } --- - error: 'Can''t create or modify index ''s'' in space ''s'': field type ''array'' is not supported' ... i:alter{dimension = 3 } --- ... s:insert{1, {1, 1} } --- - error: 'RTree: Field must be an array with 3 (point) or 6 (rectangle/box) numeric coordinates' ... s:insert{1, {1, 1, 1} } --- - [1, [1, 1, 1]] ... i:alter{dimension = 4 } --- - error: 'RTree: Field must be an array with 4 (point) or 8 (rectangle/box) numeric coordinates' ... s:select{} --- - - [1, [1, 1, 1]] ... s:insert{2, {2, 0, 0} } --- - [2, [2, 0, 0]] ... i:alter{distance = 'euclid' } --- ... i:select({0, 0, 0}, {iterator = 'neighbor'}) --- - - [1, [1, 1, 1]] - [2, [2, 0, 0]] ... i:alter{distance = 'manhattan' } --- ... i:select({0, 0, 0}, {iterator = 'neighbor'}) --- - - [2, [2, 0, 0]] - [1, [1, 1, 1]] ... -- gh-1467: invalid iterator type i:select({1, 2, 3, 4, 5, 6}, {iterator = 'BITS_ALL_SET' } ) --- - error: Index 's' (RTREE) of space 's' (memtx) does not support requested iterator type ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/select.result0000664000000000000000000003021213306560010020464 0ustar rootrootmsgpack = require('msgpack') --- ... env = require('test_run') --- ... test_run = env.new() --- ... s = box.schema.space.create('select', { temporary = true }) --- ... index1 = s:create_index('primary', { type = 'tree' }) --- ... index2 = s:create_index('second', { type = 'tree', unique = true, parts = {2, 'unsigned', 1, 'unsigned'}}) --- ... for i = 1, 20 do s:insert({ i, 1, 2, 3 }) end --- ... test_run:cmd("setopt delimiter ';'") --- - true ... local function test_op(op, idx, ...) local t1 = idx[op .. '_ffi'](idx, ...) local t2 = idx[op .. '_luac'](idx, ...) if msgpack.encode(t1) ~= msgpack.encode(t2) then return 'different result from '..op..'_ffi and '..op..'_luac', t1, t2 end return t1 end test = setmetatable({}, { __index = function(_, op) return function(...) return test_op(op, ...) end end }); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -------------------------------------------------------------------------------- -- get tests -------------------------------------------------------------------------------- s.index[0].get == s.index[0].get_ffi or s.index[0].get == s.index[0].get_luac --- - true ... test.get(s.index[0]) --- - error: Invalid key part count in an exact match (expected 1, got 0) ... test.get(s.index[0], {}) --- - error: Invalid key part count in an exact match (expected 1, got 0) ... test.get(s.index[0], nil) --- - error: Invalid key part count in an exact match (expected 1, got 0) ... test.get(s.index[0], 1) --- - [1, 1, 2, 3] ... test.get(s.index[0], {1}) --- - [1, 1, 2, 3] ... test.get(s.index[0], {1, 2}) --- - error: Invalid key part count in an exact match (expected 1, got 2) ... test.get(s.index[0], 0) --- - null ... test.get(s.index[0], {0}) --- - null ... test.get(s.index[0], "0") --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... test.get(s.index[0], {"0"}) --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... test.get(s.index[1], 1) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... test.get(s.index[1], {1}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... test.get(s.index[1], {1, 2}) --- - [2, 1, 2, 3] ... -------------------------------------------------------------------------------- -- select tests -------------------------------------------------------------------------------- s.index[0].select == s.index[0].select_ffi or s.index[0].select == s.index[0].select_luac --- - true ... test.select(s.index[0]) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], {}) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], nil) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], {}, {iterator = 'ALL'}) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], nil, {iterator = box.index.ALL }) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], {}, {iterator = box.index.ALL, limit = 10}) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] ... test.select(s.index[0], nil, {iterator = box.index.ALL, limit = 0}) --- - [] ... test.select(s.index[0], {}, {iterator = 'ALL', limit = 1, offset = 15}) --- - - [16, 1, 2, 3] ... test.select(s.index[0], nil, {iterator = 'ALL', limit = 20, offset = 15}) --- - - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], nil, {iterator = box.index.EQ}) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], {}, {iterator = 'EQ'}) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], nil, {iterator = 'REQ'}) --- - - [20, 1, 2, 3] - [19, 1, 2, 3] - [18, 1, 2, 3] - [17, 1, 2, 3] - [16, 1, 2, 3] - [15, 1, 2, 3] - [14, 1, 2, 3] - [13, 1, 2, 3] - [12, 1, 2, 3] - [11, 1, 2, 3] - [10, 1, 2, 3] - [9, 1, 2, 3] - [8, 1, 2, 3] - [7, 1, 2, 3] - [6, 1, 2, 3] - [5, 1, 2, 3] - [4, 1, 2, 3] - [3, 1, 2, 3] - [2, 1, 2, 3] - [1, 1, 2, 3] ... test.select(s.index[0], {}, {iterator = box.index.REQ}) --- - - [20, 1, 2, 3] - [19, 1, 2, 3] - [18, 1, 2, 3] - [17, 1, 2, 3] - [16, 1, 2, 3] - [15, 1, 2, 3] - [14, 1, 2, 3] - [13, 1, 2, 3] - [12, 1, 2, 3] - [11, 1, 2, 3] - [10, 1, 2, 3] - [9, 1, 2, 3] - [8, 1, 2, 3] - [7, 1, 2, 3] - [6, 1, 2, 3] - [5, 1, 2, 3] - [4, 1, 2, 3] - [3, 1, 2, 3] - [2, 1, 2, 3] - [1, 1, 2, 3] ... test.select(s.index[0], nil, {iterator = 'EQ', limit = 2, offset = 1}) --- - - [2, 1, 2, 3] - [3, 1, 2, 3] ... test.select(s.index[0], {}, {iterator = box.index.REQ, limit = 2, offset = 1}) --- - - [19, 1, 2, 3] - [18, 1, 2, 3] ... test.select(s.index[0], 1) --- - - [1, 1, 2, 3] ... test.select(s.index[0], {1}) --- - - [1, 1, 2, 3] ... test.select(s.index[0], {1, 2}) --- - error: Invalid key part count (expected [0..1], got 2) ... test.select(s.index[0], 0) --- - [] ... test.select(s.index[0], {0}) --- - [] ... test.select(s.index[0], "0") --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... test.select(s.index[0], {"0"}) --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... test.select(s.index[1], 1) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[1], {1}) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[1], {1}, {limit = 2}) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] ... test.select(s.index[1], 1, {iterator = 'EQ'}) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[1], {1}, {iterator = box.index.EQ, offset = 16, limit = 2}) --- - - [17, 1, 2, 3] - [18, 1, 2, 3] ... test.select(s.index[1], {1}, {iterator = box.index.REQ, offset = 16, limit = 2 }) --- - - [4, 1, 2, 3] - [3, 1, 2, 3] ... test.select(s.index[1], {1, 2}, {iterator = 'EQ'}) --- - - [2, 1, 2, 3] ... test.select(s.index[1], {1, 2}, {iterator = box.index.REQ}) --- - - [2, 1, 2, 3] ... test.select(s.index[1], {1, 2}) --- - - [2, 1, 2, 3] ... test.select(s.index[0], nil, { iterator = 'ALL', offset = 0, limit = 4294967295 }) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], {}, { iterator = 'ALL', offset = 0, limit = 4294967295 }) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], 1) --- - - [1, 1, 2, 3] ... test.select(s.index[0], 1, { iterator = box.index.EQ }) --- - - [1, 1, 2, 3] ... test.select(s.index[0], 1, { iterator = 'EQ' }) --- - - [1, 1, 2, 3] ... test.select(s.index[0], 1, { iterator = 'GE' }) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] - [3, 1, 2, 3] - [4, 1, 2, 3] - [5, 1, 2, 3] - [6, 1, 2, 3] - [7, 1, 2, 3] - [8, 1, 2, 3] - [9, 1, 2, 3] - [10, 1, 2, 3] - [11, 1, 2, 3] - [12, 1, 2, 3] - [13, 1, 2, 3] - [14, 1, 2, 3] - [15, 1, 2, 3] - [16, 1, 2, 3] - [17, 1, 2, 3] - [18, 1, 2, 3] - [19, 1, 2, 3] - [20, 1, 2, 3] ... test.select(s.index[0], 1, { iterator = 'GE', limit = 2 }) --- - - [1, 1, 2, 3] - [2, 1, 2, 3] ... test.select(s.index[0], 1, { iterator = 'LE', limit = 2 }) --- - - [1, 1, 2, 3] ... test.select(s.index[0], 1, { iterator = 'GE', offset = 10, limit = 2 }) --- - - [11, 1, 2, 3] - [12, 1, 2, 3] ... s:select(2) --- - - [2, 1, 2, 3] ... -------------------------------------------------------------------------------- -- min/max tests -------------------------------------------------------------------------------- test.min(s.index[1]) --- - [1, 1, 2, 3] ... test.max(s.index[1]) --- - [20, 1, 2, 3] ... -------------------------------------------------------------------------------- -- count tests -------------------------------------------------------------------------------- test.count(s.index[1]) --- - 20 ... test.count(s.index[0], nil) --- - 20 ... test.count(s.index[0], {}) --- - 20 ... test.count(s.index[0], 10, { iterator = 'GT'}) --- - 10 ... -------------------------------------------------------------------------------- -- random tests -------------------------------------------------------------------------------- test.random(s.index[0], 48) --- - [9, 1, 2, 3] ... s:drop() --- ... collectgarbage('collect') --- - 0 ... s = box.schema.space.create('select', { temporary = true }) --- ... index = s:create_index('primary', { type = 'tree' }) --- ... a = s:insert{0} --- ... lots_of_links = {} --- ... ref_count = 0 --- ... while (true) do table.insert(lots_of_links, s:get{0}) ref_count = ref_count + 1 end --- - error: Tuple reference counter overflow ... ref_count --- - 65531 ... lots_of_links = {} --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/call.test.lua0000664000000000000000000001674713306560010020362 0ustar rootrootbox.schema.user.grant('guest', 'read,write,execute', 'universe') conn = require('net.box').connect(box.cfg.listen) conn:ping() -- -- gh-291: IPROTO: call returns wrong tuple -- function return_none() return end conn:call("return_none") conn:eval("return return_none()") conn:call_16("return_none") function return_nil() return nil end conn:call("return_nil") conn:eval("return return_nil()") conn:call_16("return_nil") function return_nils() return nil, nil, nil end conn:call("return_nils") conn:eval("return return_nils()") conn:call_16("return_nils") function return_bool() return true end conn:call("return_bool") conn:eval("return return_bool()") conn:call_16("return_bool") function return_bools() return true, false, true end conn:call("return_bools") conn:eval("return return_bools()") conn:call_16("return_bools") function return_number() return 1 end conn:call("return_number") conn:eval("return return_number()") conn:call_16("return_number") function return_numbers() return 1, 2, 3 end conn:call("return_numbers") conn:eval("return return_numbers()") conn:call_16("return_numbers") function return_string() return "string" end conn:call("return_string") conn:eval("return return_string()") conn:call_16("return_string") function return_strings() return "a", "b", "c" end conn:call("return_strings") conn:eval("return return_strings()") conn:call_16("return_strings") function return_emptytuple() return box.tuple.new() end conn:call("return_emptytuple") conn:eval("return return_emptytuple()") conn:call_16("return_emptytuple") function return_tuple() return box.tuple.new(1, 2, 3) end conn:call("return_tuple") conn:eval("return return_tuple()") conn:call_16("return_tuple") function return_tuples() return box.tuple.new(1, 2, 3), box.tuple.new(3, 4, 5) end conn:call("return_tuples") conn:eval("return return_tuples()") conn:call_16("return_tuples") function return_map() return { k1 = 'v1', k2 = 'v2'} end conn:call("return_map") conn:eval("return return_map()") conn:call_16("return_map") function return_emptyarray() return {} end conn:call("return_emptyarray") conn:eval("return return_emptyarray()") conn:call_16("return_emptyarray") function return_array1() return {1} end conn:call("return_array1") conn:eval("return return_array1()") conn:call_16("return_array1") function return_array2() return {1, 2} end conn:call("return_array2") conn:eval("return return_array2()") conn:call_16("return_array2") function return_complexarray1() return {1, 2, {k1 = 'v1', k2 = 'v2' }} end conn:call("return_complexarray1") conn:eval("return return_complexarray1()") conn:call_16("return_complexarray1") function return_complexarray2() return {{k1 = 'v1', k2 = 'v2' }, 2, 3} end conn:call("return_complexarray2") conn:eval("return return_complexarray2()") conn:call_16("return_complexarray2") function return_complexarray3() return {1, {k1 = 'v1', k2 = 'v2' }, 3} end conn:call("return_complexarray3") conn:eval("return return_complexarray3()") conn:call_16("return_complexarray3") function return_complexarray4() return {{k1 = 'v1', k2 = 'v2' }} end conn:call("return_complexarray4") conn:eval("return return_complexarray4()") conn:call_16("return_complexarray4") function return_tableofarrays1() return {{1}} end conn:call("return_tableofarrays1") conn:eval("return return_tableofarrays1()") conn:call_16("return_tableofarrays1") function return_tableofarrays2() return {{1, 2, 3}} end conn:call("return_tableofarrays2") conn:eval("return return_tableofarrays2()") conn:call_16("return_tableofarrays2") function return_tableofarrays3() return {{1}, {2}, {3}} end conn:call("return_tableofarrays3") conn:eval("return return_tableofarrays3()") conn:call_16("return_tableofarrays3") function return_tableoftuples1() return {box.tuple.new(1)} end conn:call("return_tableoftuples1") conn:eval("return return_tableoftuples1()") conn:call_16("return_tableoftuples1") function return_tableoftuples2() return {box.tuple.new(1), box.tuple.new(2)} end conn:call("return_tableoftuples2") conn:eval("return return_tableoftuples2()") conn:call_16("return_tableoftuples2") function return_indecipherable1() return {{1}, 2, 3} end conn:call("return_indecipherable1") conn:eval("return return_indecipherable1()") conn:call_16("return_indecipherable1") function return_indecipherable2() return {box.tuple.new(1), 2, 3} end conn:call("return_indecipherable2") conn:eval("return return_indecipherable2()") conn:call_16("return_indecipherable2") function return_indecipherable3() return {1, {2}, 3} end conn:call("return_indecipherable3") conn:eval("return return_indecipherable3()") conn:call_16("return_indecipherable3") function return_indecipherable4() return {1, box.tuple.new(2), 3} end conn:call("return_indecipherable4") conn:eval("return return_indecipherable4()") conn:call_16("return_indecipherable4") function toarray(x) return setmetatable(x, { __serialize = 'array' }) end function tomap(x) return setmetatable(x, { __serialize = 'map' }) end function return_serialize1() return toarray({ [1] = 1, [20] = 1}) end conn:call("return_serialize1") conn:eval("return return_serialize1()") conn:call_16("return_serialize1") function return_serialize2() return tomap({ 'a', 'b', 'c'}) end conn:call("return_serialize2") conn:eval("return return_serialize2()") conn:call_16("return_serialize2") function return_serialize3() return {'x', toarray({ [1] = 1, [20] = 1})} end conn:call("return_serialize3") conn:eval("return return_serialize3()") conn:call_16("return_serialize3") function return_serialize4() return {'x', tomap({ 'a', 'b', 'c'})} end conn:call_16("return_serialize4") function return_serialize5() return {toarray({ [1] = 1, [20] = 1}), 'x'} end conn:call("return_serialize5") conn:eval("return return_serialize5()") conn:call_16("return_serialize5") function return_serialize6() return { tomap({ 'a', 'b', 'c'}), 'x'} end conn:call("return_serialize6") conn:eval("return return_serialize6()") conn:call_16("return_serialize6") function return_serialize7() return {toarray({ [1] = 1, [20] = 1})} end conn:call("return_serialize7") conn:eval("return return_serialize7()") conn:call_16("return_serialize7") function return_serialize8() return { tomap({ 'a', 'b', 'c'})} end conn:call("return_serialize8") conn:eval("return return_serialize8()") conn:call_16("return_serialize8") -- -- gh-1167 -- sparse_safe = require('msgpack').cfg.encode_sparse_safe sparse_safe function return_sparse1() local res = {} res[1] = 1 res[20] = 1 return res end conn:call("return_sparse1") conn:eval("return return_sparse1()") conn:call_16("return_sparse1") function return_sparse2() return { [1] = 1, [20] = 1} end conn:call("return_sparse2") conn:eval("return return_sparse2()") conn:call_16("return_sparse2") function return_sparse3() local res = {} res[5] = 5 res[20] = 1 return res end conn:call("return_sparse3") conn:eval("return return_sparse3()") conn:call_16("return_sparse3") function return_sparse4() return { [5] = 1, [20] = 1} end conn:call("return_sparse4") conn:eval("return return_sparse4()") conn:call_16("return_sparse4") require('msgpack').cfg { encode_sparse_safe = 50 } conn:call("return_sparse1") conn:eval("return return_sparse1()") conn:call_16("return_sparse1") conn:call("return_sparse2") conn:eval("return return_sparse2()") conn:call_16("return_sparse2") conn:call("return_sparse3") conn:eval("return return_sparse3()") conn:call_16("return_sparse3") conn:call("return_sparse4") conn:eval("return return_sparse4()") conn:call_16("return_sparse4") conn:close() require('msgpack').cfg { encode_sparse_safe = sparse_safe } box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/box/before_replace.result0000664000000000000000000002304213306560010022145 0ustar rootroottest_run = require('test_run').new() --- ... s = box.schema.space.create('test') --- ... _ = s:create_index('primary') --- ... _ = s:create_index('secondary', {unique = false, parts = {2, 'unsigned'}}) --- ... function fail(old, new) error('fail') end --- ... function save(old, new) old_tuple = old new_tuple = new end --- ... function ret_new(old, new) return new end --- ... function ret_old(old, new) return old end --- ... function ret_nil(old, new) return nil end --- ... function ret_null(old, new) return nil end --- ... function ret_none(old, new) return end --- ... function ret_invalid(old, new) return 'test' end --- ... function ret_update(old, new) return box.tuple.update(new, {{'+', 3, 1}}) end --- ... function ret_update_pk(old, new) return box.tuple.update(new, {{'+', 1, 1}}) end --- ... -- Exception in trigger. s:before_replace(fail) == fail --- - true ... s:insert{1, 1} --- - error: '[string "function fail(old, new) error(''fail'') end "]:1: fail' ... s:select() --- - [] ... s:before_replace(nil, fail) --- ... -- Check 'old' and 'new' trigger arguments. old_tuple = nil --- ... new_tuple = nil --- ... s:before_replace(save) == save --- - true ... s:insert{1, 1} --- - [1, 1] ... old_tuple, new_tuple --- - null - [1, 1] ... s:replace{1, 2} --- - [1, 2] ... old_tuple, new_tuple --- - [1, 1] - [1, 2] ... s:update(1, {{'+', 2, 1}}) --- - [1, 3] ... old_tuple, new_tuple --- - [1, 2] - [1, 3] ... s:upsert({1, 1}, {{'=', 2, 1}}) --- ... old_tuple, new_tuple --- - [1, 3] - [1, 1] ... s:upsert({2, 2}, {{'=', 2, 2}}) --- ... old_tuple, new_tuple --- - null - [2, 2] ... s:select() --- - - [1, 1] - [2, 2] ... s:delete(1) --- - [1, 1] ... old_tuple, new_tuple --- - [1, 1] - null ... s:delete(2) --- - [2, 2] ... old_tuple, new_tuple --- - [2, 2] - null ... s:select() --- - [] ... s:before_replace(nil, save) --- ... -- Returning 'new' from trigger doesn't affect statement. s:before_replace(ret_new) == ret_new --- - true ... s:insert{1, 1} --- - [1, 1] ... s:update(1, {{'+', 2, 1}}) --- - [1, 2] ... s:select() --- - - [1, 2] ... s:delete(1) --- - [1, 2] ... s:select() --- - [] ... s:before_replace(nil, ret_new) --- ... -- Returning 'old' from trigger skips statement. s:insert{1, 1} --- - [1, 1] ... s:before_replace(ret_old) == ret_old --- - true ... s:insert{2, 2} --- ... s:update(1, {{'+', 2, 1}}) --- ... s:delete(1) --- ... s:select() --- - - [1, 1] ... s:before_replace(nil, ret_old) --- ... s:delete(1) --- - [1, 1] ... -- Returning nil from trigger turns statement into DELETE. s:insert{1, 1} --- - [1, 1] ... s:before_replace(ret_nil) == ret_nil --- - true ... s:replace{1, 2} --- - [1, 1] ... s:select() --- - [] ... s:before_replace(nil, ret_nil) --- ... -- Returning box.NULL from trigger turns statement into DELETE. s:insert{1, 1} --- - [1, 1] ... s:before_replace(ret_null) == ret_null --- - true ... s:replace{1, 2} --- - [1, 1] ... s:select() --- - [] ... s:before_replace(nil, ret_null) --- ... -- Returning nothing doesn't affect the operation. s:insert{1, 1} --- - [1, 1] ... s:insert{2, 2} --- - [2, 2] ... s:before_replace(ret_none) == ret_none --- - true ... s:replace{1, 2} --- - [1, 2] ... s:update(1, {{'+', 2, 1}}) --- - [1, 3] ... s:delete(2) --- - [2, 2] ... s:select() --- - - [1, 3] ... s:before_replace(nil, ret_none) --- ... s:delete(1) --- - [1, 3] ... -- Update statement from trigger. s:before_replace(ret_update) == ret_update --- - true ... s:insert{1, 1, 1} --- - [1, 1, 2] ... s:update(1, {{'+', 2, 1}}) --- - [1, 2, 3] ... s:select() --- - - [1, 2, 3] ... s:before_replace(nil, ret_update) --- ... s:delete(1) --- - [1, 2, 3] ... -- Invalid return value. s:before_replace(ret_invalid) == ret_invalid --- - true ... s:insert{1, 1} --- - error: 'Invalid return value of space:before_replace trigger: expected tuple or nil, got string' ... s:select() --- - [] ... s:before_replace(nil, ret_invalid) --- ... -- Update of the primary key from trigger is forbidden. s:insert{1, 1} --- - [1, 1] ... s:before_replace(ret_update_pk) == ret_update_pk --- - true ... s:replace{1, 2} --- - error: Attempt to modify a tuple field which is part of index 'primary' in space 'test' ... s:before_replace(nil, ret_update_pk) --- ... s:delete(1) --- - [1, 1] ... -- Update over secondary index + space:before_replace. s2 = box.schema.space.create('test2') --- ... _ = s2:create_index('pk') --- ... _ = s2:create_index('sk', {unique = true, parts = {2, 'unsigned'}}) --- ... s2:insert{1, 1, 1, 1} --- - [1, 1, 1, 1] ... s2:before_replace(ret_update) == ret_update --- - true ... s2.index.sk:update(1, {{'+', 4, 1}}) --- - [1, 1, 2, 2] ... s2:select() --- - - [1, 1, 2, 2] ... s2:drop() --- ... -- Stacking triggers. old_tuple = nil --- ... new_tuple = nil --- ... s:before_replace(save) == save --- - true ... s:before_replace(ret_update) == ret_update --- - true ... s:insert{1, 1, 1} --- - [1, 1, 2] ... old_tuple, new_tuple --- - null - [1, 1, 2] ... s:before_replace(nil, save) --- ... s:before_replace(nil, ret_update) --- ... s:delete(1) --- - [1, 1, 2] ... -- Issue DML from trigger. s2 = box.schema.space.create('test2') --- ... _ = s2:create_index('pk') --- ... cb = function(old, new) s2:insert{i, old, new} end --- ... s:before_replace(cb) == cb --- - true ... i = 1 --- ... s:insert{1, 1} --- - [1, 1] ... i = 2 --- ... s:replace{1, 2} --- - [1, 2] ... s:replace{1, 3} -- error: conflict in s2 --- - error: Duplicate key exists in unique index 'pk' in space 'test2' ... s:select() --- - - [1, 2] ... s2:select() --- - - [1, null, [1, 1]] - [2, [1, 1], [1, 2]] ... -- DML done from space:before_replace is undone -- if space:on_replace fails. s:truncate() --- ... s2:truncate() --- ... s:on_replace(fail) == fail --- - true ... s:replace{1, 3} --- - error: '[string "function fail(old, new) error(''fail'') end "]:1: fail' ... s:select() --- - [] ... s2:select() --- - [] ... s:on_replace(nil, fail) --- ... s:before_replace(nil, cb) --- ... s2:drop() --- ... -- If space:before_replace turns the request into NOP, -- space:on_replace isn't called. old_tuple = nil --- ... new_tuple = nil --- ... s:insert{1, 1} --- - [1, 1] ... s:before_replace(ret_old) == ret_old --- - true ... s:on_replace(save) == save --- - true ... s:replace{1, 2} --- ... old_tuple, new_tuple --- - null - null ... s:delete(1) --- ... old_tuple, new_tuple --- - null - null ... s:insert{2, 2} --- ... old_tuple, new_tuple --- - null - null ... s:select() --- - - [1, 1] ... s:before_replace(nil, ret_old) --- ... s:on_replace(nil, save) --- ... s:delete(1) --- - [1, 1] ... -- Changed done in space:before_replace are visible -- in space:on_replace old_tuple = nil --- ... new_tuple = nil --- ... s:before_replace(ret_update) == ret_update --- - true ... s:on_replace(save) == save --- - true ... s:insert{1, 1, 1} --- - [1, 1, 2] ... old_tuple, new_tuple --- - null - [1, 1, 2] ... s:replace{1, 2, 2} --- - [1, 2, 3] ... old_tuple, new_tuple --- - [1, 1, 2] - [1, 2, 3] ... s:select() --- - - [1, 2, 3] ... s:before_replace(nil, ret_update) --- ... s:on_replace(nil, save) --- ... s:delete(1) --- - [1, 2, 3] ... -- Nesting limit: space.before_replace cb = function(old, new) s:insert{1, 1} end --- ... s:before_replace(cb) == cb --- - true ... s:insert{1, 1} -- error --- - error: 'Can not execute a nested statement: nesting limit reached' ... s:select() --- - [] ... s:before_replace(nil, cb) --- ... -- Nesting limit: space.before_replace + space.on_replace cb = function(old, new) s:delete(1) end --- ... s:before_replace(cb) == cb --- - true ... s:on_replace(cb) == cb --- - true ... s:insert{1, 1} -- error --- - error: 'Can not execute a nested statement: nesting limit reached' ... s:select() --- - [] ... s:before_replace(nil, cb) --- ... s:on_replace(nil, cb) --- ... -- Make sure the server can recover from xlogs after -- using space:before_replace. test_run:cmd('restart server default') s = box.space.test --- ... s:select() --- - [] ... -- Check that IPROTO_NOP is actually written to xlog. fio = require('fio') --- ... xlog = require('xlog') --- ... type(s:before_replace(function(old, new) return old end)) --- - function ... s:insert{1, 1} --- ... path = fio.pathjoin(box.cfg.wal_dir, string.format('%020d.xlog', box.info.lsn - 1)) --- ... fun, param, state = xlog.pairs(path) --- ... state, row = fun(param, state) --- ... row.HEADER.type --- - NOP ... row.BODY.space_id == s.id --- - true ... -- gh-3128 before_replace with run_triggers s2 = box.schema.space.create("test2") --- ... _ = s2:create_index("prim") --- ... before_replace1 = function() s2:insert{1} s:run_triggers(false) end --- ... before_replace2 = function() s2:insert{2} end --- ... on_replace = function() s2:insert{3} end --- ... type(s:on_replace(on_replace)) --- - function ... type(s:before_replace(before_replace1)) --- - function ... type(s:before_replace(before_replace2)) --- - function ... s:insert{1, 1} --- ... s2:select{} --- - - [1] - [2] ... s:truncate() --- ... s2:truncate() --- ... s:on_replace(nil, on_replace) --- ... s:before_replace(nil, before_replace1) --- ... s:before_replace(nil, before_replace2) --- ... -- -- gh-3128 -- If at least one before trigger returns old -- insertion will be aborted, but other before triggers -- will be executed before_replace1 = function(old, new) s2:insert{1} return old end --- ... before_replace2 = function(old, new) s2:insert{2} end --- ... type(s:on_replace(on_replace)) --- - function ... type(s:before_replace(before_replace1)) --- - function ... type(s:before_replace(before_replace2)) --- - function ... s:insert{1, 1} --- ... s:select{} --- - [] ... s2:select{} --- - - [1] - [2] ... s:on_replace(nil, on_replace) --- ... s:before_replace(nil, before_replace1) --- ... s:before_replace(nil, before_replace2) --- ... s2:drop() --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/protocol.test.lua0000664000000000000000000000153113306560010021271 0ustar rootrootbox.schema.user.grant('guest', 'read,write,execute', 'universe') -------------------------------------------------------------------------------- -- Test case for #273: IPROTO_ITERATOR ignored in network protocol -------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum') index = space:create_index('primary', { type = 'tree'}) for i=1,5 do space:insert{i} end LISTEN = require('uri').parse(box.cfg.listen) LISTEN ~= nil conn = (require 'net.box').connect(LISTEN.host, LISTEN.service) conn.space[space.id]:select(3, { iterator = 'GE' }) conn.space[space.id]:select(3, { iterator = 'LE' }) conn.space[space.id]:select(3, { iterator = 'GT' }) conn.space[space.id]:select(3, { iterator = 'LT' }) conn:close() space:drop() box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/box/role.test.lua0000664000000000000000000002622113306560010020374 0ustar rootrootbox.schema.role.create('iddqd') box.schema.role.create('iddqd') box.schema.role.drop('iddqd') box.schema.role.drop('iddqd') box.schema.role.create('iddqd') -- impossible to su to a role box.session.su('iddqd') -- test granting privilege to a role box.schema.role.grant('iddqd', 'execute', 'universe') box.schema.role.info('iddqd') box.schema.role.revoke('iddqd', 'execute', 'universe') box.schema.role.info('iddqd') -- test granting a role to a user box.schema.user.create('tester') box.schema.user.info('tester') box.schema.user.grant('tester', 'execute', 'role', 'iddqd') box.schema.user.info('tester') -- test granting user to a user box.schema.user.grant('tester', 'execute', 'role', 'tester') -- test granting a non-execute grant on a role - error box.schema.user.grant('tester', 'write', 'role', 'iddqd') box.schema.user.grant('tester', 'read', 'role', 'iddqd') -- test granting role to a role box.schema.role.grant('iddqd', 'execute', 'role', 'iddqd') box.schema.role.grant('iddqd', 'iddqd') box.schema.role.revoke('iddqd', 'iddqd') box.schema.user.grant('tester', 'iddqd') box.schema.user.revoke('tester', 'iddqd') box.schema.role.drop('iddqd') box.schema.user.revoke('tester', 'no-such-role') box.schema.user.grant('tester', 'no-such-role') box.schema.user.drop('tester') -- check for loops in role grants box.schema.role.create('a') box.schema.role.create('b') box.schema.role.create('c') box.schema.role.create('d') box.schema.role.grant('b', 'a') box.schema.role.grant('c', 'a') box.schema.role.grant('d', 'b') box.schema.role.grant('d', 'c') --check user restrictions box.schema.user.grant('a', 'd') box.schema.user.revoke('a', 'd') box.schema.user.drop('a') box.schema.role.grant('a', 'd') box.schema.role.drop('d') box.schema.role.drop('b') box.schema.role.drop('c') box.schema.role.drop('a') -- check that when dropping a role, it's first revoked -- from whoever it is granted box.schema.role.create('a') box.schema.role.create('b') box.schema.role.grant('b', 'a') box.schema.role.drop('a') box.schema.role.info('b') box.schema.role.drop('b') -- check a grant received via a role box.schema.user.create('test') box.schema.user.create('grantee') box.schema.role.create('liaison') --check role restrictions box.schema.role.grant('test', 'liaison') box.schema.role.revoke('test', 'liaison') box.schema.role.drop('test') box.schema.user.grant('grantee', 'liaison') box.schema.user.grant('test', 'read,write,create', 'universe') box.session.su('test') s = box.schema.space.create('test') _ = s:create_index('i1') box.schema.role.grant('liaison', 'read,write', 'space', 'test') box.session.su('grantee') box.space.test:insert{1} box.space.test:select{1} box.session.su('test') box.schema.role.revoke('liaison', 'read,write', 'space', 'test') box.session.su('grantee') box.space.test:insert{1} box.space.test:select{1} box.session.su('admin') box.schema.user.drop('test') box.schema.user.drop('grantee') box.schema.role.drop('liaison') -- -- Test how privileges are propagated through a complex role graph. -- Here's the graph: -- -- role1 ->- role2 -->- role4 -->- role6 ->- user1 -- \ / \ -- \->- role5 ->-/ \->- role9 ->- role10 ->- user -- / \ / -- role3 ->-/ \->- role7 ->-/ -- -- Privilege checks verify that grants/revokes are propagated correctly -- from the role1 to role10. -- box.schema.user.create("user") box.schema.role.create("role1") box.schema.role.create("role2") box.schema.role.create("role3") box.schema.role.create("role4") box.schema.role.create("role5") box.schema.role.create("role6") box.schema.role.create("role7") box.schema.user.create("user1") box.schema.role.create("role9") box.schema.role.create("role10") box.schema.role.grant("role2", "role1") box.schema.role.grant("role4", "role2") box.schema.role.grant("role5", "role2") box.schema.role.grant("role5", "role3") box.schema.role.grant("role6", "role4") box.schema.role.grant("role6", "role5") box.schema.role.grant("role7", "role5") box.schema.user.grant("user1", "role6") box.schema.role.grant("role9", "role6") box.schema.role.grant("role9", "role7") box.schema.role.grant("role10", "role9") box.schema.user.grant("user", "role10") -- try to create a cycle box.schema.role.grant("role2", "role10") -- -- test grant propagation -- box.schema.role.grant("role1", "read", "universe") box.session.su("user") box.space._space.index.name:get{"_space"}[3] box.session.su("admin") box.schema.role.revoke("role1", "read", "universe") box.session.su("user") box.space._space.index.name:get{"_space"}[3] box.session.su("admin") -- -- space-level privileges -- box.schema.role.grant("role1", "read", "space", "_index") box.session.su("user") box.space._space.index.name:get{"_space"}[3] box.space._index:get{288, 0}[3] box.session.su("admin") box.schema.role.revoke("role1", "read", "space", "_index") box.session.su("user") box.space._space.index.name:get{"_space"}[3] box.space._index:get{288, 0}[3] box.session.su("admin") -- -- grant to a non-leaf branch -- box.schema.role.grant("role5", "read", "space", "_index") box.session.su("user") box.space._space.index.name:get{"_space"}[3] box.space._index:get{288, 0}[3] box.session.su("admin") box.schema.role.revoke("role5", "read", "space", "_index") box.session.su("user") box.space._space.index.name:get{"_space"}[3] box.space._index:get{288, 0}[3] box.session.su("admin") -- -- grant via two branches -- box.schema.role.grant("role3", "read", "space", "_index") box.schema.role.grant("role4", "read", "space", "_index") box.schema.role.grant("role9", "read", "space", "_index") box.session.su("user") box.space._index:get{288, 0}[3] box.session.su("user1") box.space._index:get{288, 0}[3] box.session.su("admin") box.schema.role.revoke("role3", "read", "space", "_index") box.session.su("user") box.space._index:get{288, 0}[3] box.session.su("user1") box.space._index:get{288, 0}[3] box.session.su("admin") box.schema.role.revoke("role4", "read", "space", "_index") box.session.su("user") box.space._index:get{288, 0}[3] box.session.su("user1") box.space._index:get{288, 0}[3] box.session.su("admin") box.schema.role.revoke("role9", "read", "space", "_index") box.session.su("user") box.space._index:get{288, 0}[3] box.session.su("user1") box.space._index:get{288, 0}[3] box.session.su("admin") -- -- check diamond-shaped grant graph -- box.schema.role.grant("role5", "read", "space", "_space") box.session.su("user") box.space._space.index.name:get{"_space"}[3] box.session.su("user1") box.space._space.index.name:get{"_space"}[3] box.session.su("admin") box.schema.role.revoke("role5", "read", "space", "_space") box.session.su("user") box.space._space.index.name:get{"_space"}[3] box.session.su("user1") box.space._space.index.name:get{"_space"}[3] box.session.su("admin") box.schema.user.drop("user") box.schema.user.drop("user1") box.schema.role.drop("role1") box.schema.role.drop("role2") box.schema.role.drop("role3") box.schema.role.drop("role4") box.schema.role.drop("role5") box.schema.role.drop("role6") box.schema.role.drop("role7") box.schema.role.drop("role9") box.schema.role.drop("role10") -- -- only the creator of the role can grant it (or a superuser) -- There is no grant option. -- the same applies for privileges -- box.schema.user.create('user') box.schema.user.create('grantee') box.schema.user.grant('user', 'read,write,execute,create', 'universe') box.session.su('user') box.schema.role.create('role') box.session.su('admin') box.schema.user.grant('grantee', 'role') box.schema.user.revoke('grantee', 'role') box.schema.user.create('john') box.session.su('john') -- error box.schema.user.grant('grantee', 'role') -- box.session.su('admin') _ = box.schema.space.create('test') box.schema.user.grant('john', 'read,write,execute', 'universe') box.session.su('john') box.schema.user.grant('grantee', 'role') box.schema.user.grant('grantee', 'read', 'space', 'test') -- -- granting 'public' is however an exception - everyone -- can grant 'public' role, it's implicitly granted with -- a grant option. -- box.schema.user.grant('grantee', 'public') -- -- revoking role 'public' is another deal - only the -- superuser can do that, and even that would be useless, -- since one can still re-grant it back to oneself. -- box.schema.user.revoke('grantee', 'public') box.session.su('admin') box.schema.user.drop('john') box.schema.user.drop('user') box.schema.user.drop('grantee') box.schema.role.drop('role') box.space.test:drop() -- -- grant a privilege through a role, but -- the user has another privilege either granted -- natively (one case) or via another role. -- Check that privileges actually OR, but -- not replace each other. -- _ = box.schema.space.create('test') _ = box.space.test:create_index('primary') box.schema.user.create('john') box.schema.user.grant('john', 'read', 'space', 'test') box.session.su('john') box.space.test:select{} box.space.test:insert{1} box.session.su('admin') box.schema.role.grant('public', 'write', 'space', 'test') box.session.su('john') box.space.test:select{} box.space.test:insert{2} box.session.su('admin') box.schema.role.revoke('public', 'write', 'space', 'test') box.session.su('john') box.space.test:select{} box.space.test:insert{1} box.session.su('admin') box.space.test:drop() box.schema.user.drop('john') -- test ER_GRANT box.space._priv:replace{1, 0, 'universe', 0, 0} -- role.exists() -- -- true if the role is present box.schema.role.exists('public') -- for if there is no such role box.schema.role.exists('nosuchrole') -- false for users box.schema.role.exists('guest') -- false for users box.schema.role.exists('admin') -- role id is ok box.schema.role.exists(3) -- user id box.schema.role.exists(0) box.schema.role.create('public', { if_not_exists = true}) box.schema.user.create('admin', { if_not_exists = true}) box.schema.user.create('guest', { if_not_exists = true}) box.schema.user.create('test', { if_not_exists = true}) box.schema.user.create('test', { if_not_exists = true}) box.schema.role.drop('test', { if_not_exists = true}) box.schema.role.drop('test', { if_exists = true}) box.schema.role.create('test', { if_not_exists = true}) box.schema.role.create('test', { if_not_exists = true}) box.schema.user.drop('test', { if_not_exists = true}) -- gh-664 roles: accepting bad syntax for create box.schema.role.create('role', 'role') box.schema.role.drop('role', 'role') box.schema.user.drop('test', { if_exists = true}) -- gh-663: inconsistent roles grant/revoke box.schema.role.create('X1') box.schema.role.create('X2') box.schema.role.info('X1') box.schema.role.grant('X1','read','role','X2') box.schema.role.info('X1') box.schema.role.revoke('X1','read','role','X2') box.schema.role.info('X1') box.schema.role.drop('X1') box.schema.role.drop('X2') -- gh-867 inconsistent role/user info box.schema.role.create('test_role') box.schema.role.info('test_role') box.schema.user.info('test_role') box.schema.role.info('test_not_exist') box.schema.user.create('test_user') box.schema.user.info('test_user') box.schema.role.info('test_user') box.schema.user.info('test_not_exist') box.schema.role.drop('test_role') box.schema.user.drop('test_user') --gh-1266 if_exists for user drop box.schema.user.create('test_1266') box.schema.user.drop('test_1266') box.schema.user.drop('test_1266') box.schema.user.drop('test_1266', { if_exists = true}) tarantool_1.9.1.26.g63eb81e3c/test/box/admin.test.lua0000664000000000000000000000744113306560010020526 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd('restart server default') space = box.schema.space.create('tweedledum') index = space:create_index('primary') help() cfg_filter(box.cfg) space:insert{1, 'tuple'} box.snapshot() space:delete{1} test_run:cmd("setopt delimiter ';'") function check_type(arg, typeof) return type(arg) == typeof end; function test_box_info() local tmp = box.info() local num = {'pid', 'uptime'} local str = {'version', 'status' } local failed = {} if check_type(tmp.replication, 'table') == false then table.insert(failed, 'box.info().replication') else tmp.replication = nil end for k, v in ipairs(num) do if check_type(tmp[v], 'number') == false then table.insert(failed, 'box.info().'..v) else tmp[v] = nil end end for k, v in ipairs(str) do if check_type(tmp[v], 'string') == false then table.insert(failed, 'box.info().'..v) else tmp[v] = nil end end if #tmp > 0 or #failed > 0 then return 'box.info() is not ok.', 'failed: ', failed, tmp else return 'box.info() is ok.' end end; function test_slab(tbl) local num = {'item_size', 'item_count', 'slab_size', 'slab_count', 'mem_used', 'mem_free'} local failed = {} for k, v in ipairs(num) do if check_type(tbl[v], 'number') == false then table.insert(failed, 'box.slab.info()..'..v) else tbl[v] = nil end end if #tbl > 0 or #failed > 0 then return false, failed else return true, {} end end; function test_box_slab_info() local tmp = box.slab.info() local tmp_slabs = box.slab.stats() local cdata = {'arena_size', 'arena_used'} local failed = {} if type(tmp_slabs) == 'table' then for name, tbl in ipairs(tmp_slabs) do local bl, fld = test_slab(tbl) if bl == true then tmp[name] = nil else for k, v in ipairs(fld) do table.insert(failed, v) end end end else table.insert(failed, 'box.slab.info().slabs is not ok') end if #tmp_slabs == 0 then tmp_slabs = nil end for k, v in ipairs(cdata) do if check_type(tmp[v], 'number') == false then table.insert(failed, 'box.slab.info().'..v) else tmp[v] = nil end end if #tmp > 0 or #failed > 0 then return "box.slab.info() is not ok", tmp, failed else return "box.slab.info() is ok" end end; function test_fiber(tbl) local num = {'fid', 'csw'} for k, v in ipairs(num) do if check_type(tmp[v], 'number') == false then table.insert(failed, "require('fiber').info().."..v) else tmp[v] = nil end end if type(tbl.backtrace) == 'table' and #tbl.backtrace > 0 then tbl.backtrace = nil else table.insert(failed, 'backtrace') end if #tbl > 0 or #failed > 0 then return false, failed else return true, {} end end; function test_box_fiber_info() local tmp = require('fiber').info() local failed = {} for name, tbl in ipairs(tmp) do local bl, fld = test_fiber(tbl) if bl == true then tmp[name] = nil else for k, v in ipairs(fld) do table.insert(failed, v) end end end if #tmp > 0 or #failed > 0 then return "require('fiber').info is not ok. failed: ", tmp, failed else return "require('fiber').info() is ok" end end; test_box_info(); test_box_slab_info(); test_box_fiber_info(); space:drop(); test_run:cmd("setopt delimiter ''"); tarantool_1.9.1.26.g63eb81e3c/test/box/lua/0000775000000000000000000000000013306565107016544 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/box/lua/test_init.lua0000664000000000000000000000135513306560010021241 0ustar rootroot#!/usr/bin/env tarantool box.load_cfg() -- testing start-up script floor = require("math").floor -- -- Access to box.cfg from start-up script -- box_cfg = box.cfg() function print_config() return box_cfg end -- -- Test for bug #977898 -- Insert from detached fiber -- local function do_insert() box.fiber.detach() box.space[0]:insert{1, 2, 4, 8} end space = box.schema.create_space('tweedledum', { id = 0 }) space:create_index('primary', { type = 'hash' }) fiber = box.fiber.create(do_insert) box.fiber.resume(fiber) -- -- Test insert from start-up script -- space:insert{2, 4, 8, 16} -- -- A test case for https://github.com/tarantool/tarantool/issues/53 -- assert (require ~= nil) box.fiber.sleep(0.0) assert (require ~= nil) tarantool_1.9.1.26.g63eb81e3c/test/box/lua/identifier.lua0000664000000000000000000000344313306565107021375 0ustar rootrootmax_len_string = string.rep('a', box.schema.NAME_MAX) valid_testcases = { --[[ Symbols from various unicode groups ,, --]] "1", "_", "sd", "я", "Ё", ".", "@", "#" , "⁋", "☢", "☒", "↹", "〄", "㐤", "곉", "꒮", "ʘ", '', "𐎆", "⤘", "𐑿", "𝀷","勺", "◉", "༺", "Ԙ","Ⅷ","⅘", "℃", "∉", "∰","⨌","␡", "⑆", "⑳", "╈", "☎", "✇", "⟌", "⣇", "⧭", "⭓", max_len_string } invalid_testcases = { --[[ Invalid and non printable unicode sequences --]] --[[ 1-3 ASCII control, C0 --]] "\x01", "\x09", "\x1f", --[[ 4-4 ISO/IEC 2022 --]] "\x7f", --[[ 5-7 C1 --]] "\xc2\x80", "\xc2\x90", "\xc2\x9f", --[[ 8- zl line separator --]] "\xE2\x80\xA8", --[[ 9-16 other invalid --]] "\x20\x0b", "\xE2\x80", "\xFE\xFF", "\xC2", "\xED\xB0\x80", "\xE2\x80\xA9", "", max_len_string..'1' } function run_test(create_func, cleanup_func) local json = require("json") print("loosadlalsd") local bad_tests = {} for i, identifier in ipairs(valid_testcases) do local ok, res = pcall(create_func,identifier) if ok == false then table.insert(bad_tests, string.format("valid_testcases %s: %s", i, tostring(res))) else cleanup_func(identifier) end end for i, identifier in ipairs(invalid_testcases) do local ok, res = pcall(create_func,identifier) if ok then table.insert(bad_tests, "invalid_testcases: "..i) end end local res if (#bad_tests == 0) then res = string.format("All tests passed") else res = "Errors:\n"..table.concat(bad_tests, "\n") end return res end return { run_test = run_test; }; tarantool_1.9.1.26.g63eb81e3c/test/box/lua/cfg_test2.lua0000664000000000000000000000036513306560010021117 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 214748364, } require('console').listen(os.getenv('ADMIN')) box.schema.user.grant('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/box/lua/require_init.lua0000664000000000000000000000017713306560010021737 0ustar rootroot#!/usr/bin/env tarantool box.load_cfg() mod = require("require_mod") package_path = package.path package_cpath = package.cpath tarantool_1.9.1.26.g63eb81e3c/test/box/lua/push.lua0000664000000000000000000000064313306560010020215 0ustar rootroot function push_collection(space, size, cid, ...) local append = { ... } local tuple = space:get{cid} if tuple == nil then return space:insert{cid, unpack(append)} end if #append == 0 then return tuple end tuple = tuple:transform( #tuple + 1, 0, unpack( append ) ) if #tuple - 1 > tonumber(size) then tuple = tuple:transform( 2, #tuple - 1 - tonumber(size) ) end return space:replace{tuple:unpack()} end tarantool_1.9.1.26.g63eb81e3c/test/box/lua/bitset.lua0000664000000000000000000000241713306560010020531 0ustar rootrootlocal utils = require('utils') local SPACE_NO = 0 local INDEX_NO = 1 function create_space() local space = box.schema.create_space('tweedledum') space:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) space:create_index('bitset', { type = 'bitset', parts = {2, 'unsigned'}, unique = false }) end function fill(...) local space = box.space['tweedledum'] local nums = utils.table_generate(utils.arithmetic(...)) utils.table_shuffle(nums) for _k, v in ipairs(nums) do space:insert{v, v} end end function delete(...) local space = box.space['tweedledum'] local nums = utils.table_generate(utils.arithmetic(...)) utils.table_shuffle(nums) for _k, v in ipairs(nums) do space:delete{v} end end function clear() box.space['tweedledum']:truncate() end function drop_space() box.space['tweedledum']:drop() end function dump(...) return iterate('tweedledum', 'bitset', 1, 2, ...) end function test_insert_delete(n) local t = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127} utils.table_shuffle(t) clear() fill(1, n) for _, v in ipairs(t) do delete(v, n / v) end return dump(box.index.BITS_ALL) end tarantool_1.9.1.26.g63eb81e3c/test/box/lua/index_random_test.lua0000664000000000000000000000166313306560010022747 0ustar rootrootfunction index_random_test(space, index_no) local COUNT = 128 -- enough to resize both sptree and mhash -- clear the space space:truncate() -- randomize math.randomseed(os.time()) -- insert values into the index for k=1,COUNT,1 do space:insert{k} end -- delete some values from the index for i=1,COUNT/2,1 do local k = math.random(COUNT) local tuple = space:delete{k} if tuple ~= nil then COUNT = COUNT - 1 end end local rnd_start = math.random(4294967296) -- try to get all values from the index using index.random local tuples = {} local found = 0 while found < COUNT do local rnd = math.random(4294967296) if rnd == rnd_start then error('too many iterations') return nil end local tuple = space.index[index_no]:random(rnd) if tuple == nil then error('nil returned') return nil end local k = tuple[1] if tuples[k] == nil then found = found + 1 end tuples[k] = 1 end return true end tarantool_1.9.1.26.g63eb81e3c/test/box/lua/cfg_test4.lua0000664000000000000000000000042513306560010021116 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), slab_alloc_factor = 3.14, vinyl_memory = 1024 * 1024, } require('console').listen(os.getenv('ADMIN')) box.schema.user.grant('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/box/lua/cfg_test3.lua0000664000000000000000000000042313306560010021113 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 214748364, vinyl_write_threads = 10, } require('console').listen(os.getenv('ADMIN')) box.schema.user.grant('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/box/lua/fifo.lua0000664000000000000000000000162513306560010020162 0ustar rootroot-- {name, top, bottom, fifo...} fifomax = 5 function find_or_create_fifo(space, name) local fifo = space:get{name} if fifo == nil then fifo = {} for i = 1, fifomax do table.insert(fifo, 0) end fifo = space:insert{name, 4, 4, unpack(fifo)} end return fifo end function fifo_push(space, name, val) local fifo = find_or_create_fifo(space, name) local top = fifo[2] local bottom = fifo[3] if top == fifomax+3 then -- % size top = 4 elseif top ~= bottom then -- was not empty top = top + 1 end if bottom == fifomax + 3 then -- % size bottom = 4 elseif bottom == top then bottom = bottom + 1 end return space:update({name}, {{'=', 2, top}, {'=', 3, bottom }, {'=', top, val}}) end function fifo_top(space, name) local fifo = find_or_create_fifo(space, name) local top = fifo[2] return fifo[top] end tarantool_1.9.1.26.g63eb81e3c/test/box/lua/utils.lua0000664000000000000000000001227213306560010020377 0ustar rootrootfunction space_field_types(space_no) local types = {}; for _, index in pairs(box.space[space_no].index) do for _,key_def in pairs(index.parts) do types[key_def.fieldno] = key_def.type; end end return types; end function iterate(space_no, index_no, f1, f2, iterator, ...) local sorted = (box.space[space_no].index[index_no].type == "TREE"); local pkeys = {}; local tkeys = {}; local values = {}; local types = space_field_types(space_no); local function get_field(tuple, field_no) local f = tuple[field_no] if (types[field_no] == 'unsigned') then return string.format('%8d', f); else return f end end local state, v for state, v in box.space[space_no].index[index_no]:pairs({...}, { iterator = iterator }) do local pk = get_field(v, 1); local tk = '$'; for f = f1 + 1, f2, 1 do tk = (tk..(get_field(v, f))..'$'); end; table.insert(values, tk); if pkeys[pk] ~= nil then error('Duplicate tuple (primary key): '..pk); end if box.space[space_no].index[index_no].unique and tkeys[tk] ~= nil then error('Duplicate tuple (test key): '..tk); end; tkeys[pk] = true; tkeys[tk] = true; end; if not sorted then table.sort(values); end; return values end function arithmetic(d, count) if not d then d = 1 end local a = 0; local i = 0; return function() if count and (i >= count) then return nil; end i = i + 1; a = a + d; return a; end end function table_shuffle(t) local n = #t while n >= 2 do local k = math.random(n) t[k], t[n] = t[n], t[k] n = n - 1 end end function table_generate(iter) local t = {}; for k in iter do table.insert(t, k); end return t; end -- sort all rows as strings(not for tables); function sort(tuples) local function compare_tables(t1, t2) return (tostring(t1) < tostring(t2)) end table.sort(tuples, compare_tables) return tuples end; -- return string tuple function tuple_to_string(tuple, yaml) ans = '[' for i = 0, #tuple - 1 do if #i == 4 then ans = ans..i elseif #i == 8 then ans = ans..i else ans = ans..'\''..tostring(i)..'\'' end if not #i == #tuple -1 then ans = ans..', ' end end ans = ans..']' if yaml then ans = ' - '..ans end return ans end; function check_space(space, N) local errors = {} -- -- Insert -- local keys = {} math.randomseed(0) for i=1,N do local key = math.random(2147483647) keys[i] = key space:insert({key, 0}) end -- -- Select -- table_shuffle(keys) for i=1,N do local key = keys[i] local tuple = space:get({key}) if tuple == nil or tuple[1] ~= key then table.insert(errors, {'missing key after insert', key}) end end -- -- Delete some keys -- table_shuffle(keys) for i=1,N,3 do local key = keys[i] space:delete({key}) end -- -- Upsert -- for k=1,2 do -- Insert/update valuaes table_shuffle(keys) for i=1,N do local key = keys[i] space:upsert({key, 1}, {{'+', 2, 1}}) end -- Check values table_shuffle(keys) for i=1,N do local key = keys[i] local tuple = space:get({key}) if tuple == nil or tuple[1] ~= key then table.insert(errors, {'missing key after upsert', key}) end if tuple[2] ~= k then table.insert(errors, {'invalid value after upsert', key, 'found', tuple[2], 'expected', k}) end end end -- -- Delete -- table_shuffle(keys) for i=1,N do local key = keys[i] space:delete({key}) end for i=1,N do local key = keys[i] if space:get({key}) ~= nil then table.insert(errors, {'found deleted key', key}) end end local count = #space:select() -- :len() doesn't work on vinyl if count ~= 0 then table.insert(errors, {'invalid count after delete', count}) end return errors end function space_bsize(s) local bsize = 0 for _, t in s:pairs() do bsize = bsize + t:bsize() end return bsize end function create_iterator(obj, key, opts) local iter, key, state = obj:pairs(key, opts) local res = {iter = iter, key = key, state = state} res.next = function() local st, tp = iter.gen(key, state) return tp end res.iterate_over = function() local tp = nil local ret = {} local i = 0 tp = res.next() while tp do ret[i] = tp i = i + 1 tp = res.next() end return ret end return res end function setmap(tab) return setmetatable(tab, { __serialize = 'map' }) end return { space_field_types = space_field_types; iterate = iterate; arithmetic = arithmetic; table_generate = table_generate; table_shuffle = table_shuffle; sort = sort; tuple_to_string = tuple_to_string; check_space = check_space; space_bsize = space_bsize; create_iterator = create_iterator; setmap = setmap; }; tarantool_1.9.1.26.g63eb81e3c/test/box/lua/require_mod.lua0000664000000000000000000000011313306560010021541 0ustar rootrootexports = {} function exports.test(a, b) return a+b end return exports tarantool_1.9.1.26.g63eb81e3c/test/box/lua/cfg_bad_vinyl_dir.lua0000664000000000000000000000030213306560010022652 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), vinyl_dir = 'path/to/nowhere' } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/box/lua/cfg_test1.lua0000664000000000000000000000032713306560010021114 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), } require('console').listen(os.getenv('ADMIN')) box.schema.user.grant('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/box/hash_multipart.result0000664000000000000000000000546113306560010022241 0ustar rootrootutils = dofile('utils.lua') --- ... hash = box.schema.space.create('tweedledum') --- ... tmp = hash:create_index('primary', { type = 'hash', parts = {1, 'unsigned', 2, 'string', 3, 'unsigned'}, unique = true }) --- ... tmp = hash:create_index('unique', { type = 'hash', parts = {3, 'unsigned', 5, 'unsigned'}, unique = true }) --- ... -- insert rows hash:insert{0, 'foo', 0, '', 1} --- - [0, 'foo', 0, '', 1] ... hash:insert{0, 'foo', 1, '', 1} --- - [0, 'foo', 1, '', 1] ... hash:insert{1, 'foo', 0, '', 2} --- - [1, 'foo', 0, '', 2] ... hash:insert{1, 'foo', 1, '', 2} --- - [1, 'foo', 1, '', 2] ... hash:insert{0, 'bar', 0, '', 3} --- - [0, 'bar', 0, '', 3] ... hash:insert{0, 'bar', 1, '', 3} --- - [0, 'bar', 1, '', 3] ... hash:insert{1, 'bar', 0, '', 4} --- - [1, 'bar', 0, '', 4] ... hash:insert{1, 'bar', 1, '', 4} --- - [1, 'bar', 1, '', 4] ... -- try to insert a row with a duplicate key hash:insert{1, 'bar', 1, '', 5} --- - error: Duplicate key exists in unique index 'primary' in space 'tweedledum' ... -- output all rows env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function select_all() local result = {} local tuple, v for tuple, v in hash:pairs() do table.insert(result, v) end return result end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... utils.sort(select_all()) --- - - [0, 'bar', 0, '', 3] - [0, 'bar', 1, '', 3] - [0, 'foo', 0, '', 1] - [0, 'foo', 1, '', 1] - [1, 'bar', 0, '', 4] - [1, 'bar', 1, '', 4] - [1, 'foo', 0, '', 2] - [1, 'foo', 1, '', 2] ... select_all = nil --- ... -- primary index select hash.index['primary']:get{1, 'foo', 0} --- - [1, 'foo', 0, '', 2] ... hash.index['primary']:get{1, 'bar', 0} --- - [1, 'bar', 0, '', 4] ... -- primary index select with missing part hash.index['primary']:get{1, 'foo'} --- - error: Invalid key part count in an exact match (expected 3, got 2) ... -- primary index select with extra part hash.index['primary']:get{1, 'foo', 0, 0} --- - error: Invalid key part count in an exact match (expected 3, got 4) ... -- primary index select with wrong type hash.index['primary']:get{1, 'foo', 'baz'} --- - error: 'Supplied key type of part 2 does not match index part type: expected unsigned' ... -- secondary index select hash.index['unique']:get{1, 4} --- - [1, 'bar', 1, '', 4] ... -- secondary index select with no such key hash.index['unique']:get{1, 5} --- ... -- secondary index select with missing part hash.index['unique']:get{1} --- - error: Invalid key part count in an exact match (expected 2, got 1) ... -- secondary index select with wrong type hash.index['unique']:select{1, 'baz'} --- - error: 'Supplied key type of part 1 does not match index part type: expected unsigned' ... -- cleanup hash:truncate() --- ... hash:len() --- - 0 ... hash:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/upsert_errinj.result0000664000000000000000000000072313306560010022104 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... s = box.schema.create_space('tweedledum') --- ... index = s:create_index('pk') --- ... errinj = box.error.injection --- ... errinj.set("ERRINJ_TUPLE_ALLOC", true) --- - ok ... s:upsert({111, '111', 222, '222'}, {{'!', 5, '!'}}) --- - error: Failed to allocate 28 bytes in slab allocator for memtx_tuple ... errinj.set("ERRINJ_TUPLE_ALLOC", false) --- - ok ... s:select{111} --- - [] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/hash.test.lua0000664000000000000000000002514613306560010020363 0ustar rootroot--============================================================================= -- 32-bit hash tests --============================================================================= ------------------------------------------------------------------------------- -- 32-bit hash insert fields tests ------------------------------------------------------------------------------- hash = box.schema.space.create('tweedledum') tmp = hash:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) bsize = tmp:bsize() -- Insert valid fields hash:insert{0, 'value1 v1.0', 'value2 v1.0'} hash:insert{1, 'value1 v1.0', 'value2 v1.0'} hash:insert{2, 'value1 v1.0', 'value2 v1.0'} hash:insert{3, 'value1 v1.0', 'value2 v1.0'} tmp:bsize() > bsize -- Insert invalid fields hash:insert{'invalid key', 'value1 v1.0', 'value2 v1.0'} ------------------------------------------------------------------------------- -- 32-bit hash replace fields tests ------------------------------------------------------------------------------- -- Replace valid fields hash:replace{3, 'value1 v1.31', 'value2 1.12'} hash:replace{1, 'value1 v1.32', 'value2 1.72'} hash:replace{2, 'value1 v1.43', 'value2 1.92'} -- Replace invalid fields hash:replace{'invalid key', 'value1 v1.0', 'value2 v1.0'} ------------------------------------------------------------------------------- -- 32-bit hash select fields test ------------------------------------------------------------------------------- -- select by valid keys hash.index['primary']:get{0} hash.index['primary']:get{1} hash.index['primary']:get{2} hash.index['primary']:get{3} hash.index['primary']:get{4} hash.index['primary']:get{5} -- select by invalid keys hash.index['primary']:get{'invalid key'} hash.index['primary']:get{1, 2} ------------------------------------------------------------------------------- -- 32-bit hash delete fields test ------------------------------------------------------------------------------- -- delete by valid keys hash:delete{0} hash:delete{1} hash:delete{2} hash:delete{3} hash:delete{4} hash:delete{5} -- delete by invalid keys hash:delete{'invalid key'} hash:delete{1, 2} hash:truncate() --============================================================================= -- 64-bit hash tests --============================================================================= ------------------------------------------------------------------------------- -- 64-bit hash inset fields tests ------------------------------------------------------------------------------- -- Insert valid fields hash:insert{0ULL, 'value1 v1.0', 'value2 v1.0'} hash:insert{1ULL, 'value1 v1.0', 'value2 v1.0'} hash:insert{2ULL, 'value1 v1.0', 'value2 v1.0'} hash:insert{3ULL, 'value1 v1.0', 'value2 v1.0'} -- Insert invalid fields hash:insert{100, 'value1 v1.0', 'value2 v1.0'} hash:insert{101, 'value1 v1.0', 'value2 v1.0'} hash:insert{102, 'value1 v1.0', 'value2 v1.0'} hash:insert{103, 'value1 v1.0', 'value2 v1.0'} hash:insert{'invalid key', 'value1 v1.0', 'value2 v1.0'} ------------------------------------------------------------------------------- -- 64-bit hash replace fields tests ------------------------------------------------------------------------------- -- Replace valid fields hash:replace{3ULL, 'value1 v1.31', 'value2 1.12'} hash:replace{1ULL, 'value1 v1.32', 'value2 1.72'} hash:replace{2ULL, 'value1 v1.43', 'value2 1.92'} -- Replace invalid fields hash:replace{3, 'value1 v1.31', 'value2 1.12'} hash:replace{1, 'value1 v1.32', 'value2 1.72'} hash:replace{2, 'value1 v1.43', 'value2 1.92'} hash:replace{'invalid key', 'value1 v1.0', 'value2 v1.0'} ------------------------------------------------------------------------------- -- 64-bit hash select fields test ------------------------------------------------------------------------------- -- select by valid keys hash.index['primary']:get{0ULL} hash.index['primary']:get{1ULL} hash.index['primary']:get{2ULL} hash.index['primary']:get{3ULL} hash.index['primary']:get{4ULL} hash.index['primary']:get{5ULL} -- select by valid NUM keys hash.index['primary']:get{0} hash.index['primary']:get{1} hash.index['primary']:get{2} hash.index['primary']:get{3} hash.index['primary']:get{4} hash.index['primary']:get{5} -- select by invalid keys hash.index['primary']:get{'invalid key'} hash.index['primary']:get{'00000001', '00000002'} ------------------------------------------------------------------------------- -- 64-bit hash delete fields test ------------------------------------------------------------------------------- -- delete by valid keys hash:delete{0ULL} hash:delete{1ULL} hash:delete{2ULL} hash:delete{3ULL} hash:delete{4ULL} hash:delete{5ULL} hash:insert{0ULL, 'value1 v1.0', 'value2 v1.0'} hash:insert{1ULL, 'value1 v1.0', 'value2 v1.0'} hash:insert{2ULL, 'value1 v1.0', 'value2 v1.0'} hash:insert{3ULL, 'value1 v1.0', 'value2 v1.0'} -- delete by valid NUM keys hash:delete{0} hash:delete{1} hash:delete{2} hash:delete{3} hash:delete{4} hash:delete{5} -- delete by invalid keys hash:delete{'invalid key'} hash:delete{'00000001', '00000002'} hash:truncate() --============================================================================= -- String hash tests --============================================================================= ------------------------------------------------------------------------------- -- String hash inset fields tests ------------------------------------------------------------------------------- hash.index['primary']:drop() tmp = hash:create_index('primary', { type = 'hash', parts = {1, 'string'}, unique = true }) -- Insert valid fields hash:insert{'key 0', 'value1 v1.0', 'value2 v1.0'} hash:insert{'key 1', 'value1 v1.0', 'value2 v1.0'} hash:insert{'key 2', 'value1 v1.0', 'value2 v1.0'} hash:insert{'key 3', 'value1 v1.0', 'value2 v1.0'} ------------------------------------------------------------------------------- -- String hash replace fields tests ------------------------------------------------------------------------------- -- Replace valid fields hash:replace{'key 3', 'value1 v1.31', 'value2 1.12'} hash:replace{'key 1', 'value1 v1.32', 'value2 1.72'} hash:replace{'key 2', 'value1 v1.43', 'value2 1.92'} ------------------------------------------------------------------------------- -- String hash select fields test ------------------------------------------------------------------------------- -- select by valid keys hash.index['primary']:get{'key 0'} hash.index['primary']:get{'key 1'} hash.index['primary']:get{'key 2'} hash.index['primary']:get{'key 3'} hash.index['primary']:get{'key 4'} hash.index['primary']:get{'key 5'} -- select by invalid keys hash.index['primary']:get{'key 1', 'key 2'} ------------------------------------------------------------------------------- -- String hash delete fields test ------------------------------------------------------------------------------- -- delete by valid keys hash:delete{'key 0'} hash:delete{'key 1'} hash:delete{'key 2'} hash:delete{'key 3'} hash:delete{'key 4'} hash:delete{'key 5'} -- delete by invalid keys hash:delete{'key 1', 'key 2'} hash:truncate() ------------------------------------------------------------------------------- -- Collation test ------------------------------------------------------------------------------- hash.index['primary']:drop() tmp = hash:create_index('primary', { type = 'hash', parts = {{1, 'string', collation = 'unicode_ci'}}, unique = true}) tmp = hash:create_index('secondary', { type = 'hash', parts = {{2, 'scalar', collation = 'unicode_ci'}}, unique = true}) hash:insert{'Ёж', 'Hedgehog'} hash:insert{'Ёлка', 'Spruce'} hash:insert{'Jogurt', 'Йогурт'} hash:insert{'Один', 1} hash.index.primary:get('ёж') hash.index.primary:get('елка') hash.index.secondary:get('spruce') hash.index.secondary:get('йогурт') hash.index.secondary:get(1) hash.index.secondary:get('иогурт') hash.index.secondary:get(2) ------------------------ -- hash::replace tests ------------------------ hash.index['secondary']:drop() hash.index['primary']:drop() tmp = hash:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) tmp = hash:create_index('field1', { type = 'hash', parts = {2, 'unsigned'}, unique = true }) tmp = hash:create_index('field2', { type = 'hash', parts = {3, 'unsigned'}, unique = true }) tmp = hash:create_index('field3', { type = 'hash', parts = {4, 'unsigned'}, unique = true }) hash:insert{0, 0, 0, 0} hash:insert{1, 1, 1, 1} hash:insert{2, 2, 2, 2} -- OK hash:replace{1, 1, 1, 1} hash.index['primary']:get{10} hash.index['field1']:get{10} hash.index['field2']:get{10} hash.index['field3']:get{10} hash.index['primary']:get{1} hash.index['field1']:get{1} hash.index['field2']:get{1} hash.index['field3']:get{1} -- OK hash:insert{10, 10, 10, 10} hash:delete{10} hash.index['primary']:get{10} hash.index['field1']:get{10} hash.index['field2']:get{10} hash.index['field3']:get{10} -- TupleFound (primary key) hash:insert{1, 10, 10, 10} hash.index['primary']:get{10} hash.index['field1']:get{10} hash.index['field2']:get{10} hash.index['field3']:get{10} hash.index['primary']:get{1} -- TupleNotFound (primary key) hash:replace{10, 10, 10, 10} hash.index['primary']:get{10} hash.index['field1']:get{10} hash.index['field2']:get{10} hash.index['field3']:get{10} -- TupleFound (key --1) hash:insert{10, 0, 10, 10} hash.index['primary']:get{10} hash.index['field1']:get{10} hash.index['field2']:get{10} hash.index['field3']:get{10} hash.index['field1']:get{0} -- TupleFound (key --1) -- hash:replace_if_exists(2, 0, 10, 10) hash.index['primary']:get{10} hash.index['field1']:get{10} hash.index['field2']:get{10} hash.index['field3']:get{10} hash.index['field1']:get{0} -- TupleFound (key --3) hash:insert{10, 10, 10, 0} hash.index['primary']:get{10} hash.index['field1']:get{10} hash.index['field2']:get{10} hash.index['field3']:get{10} hash.index['field3']:get{0} -- TupleFound (key --3) -- hash:replace_if_exists(2, 10, 10, 0) hash.index['primary']:get{10} hash.index['field1']:get{10} hash.index['field2']:get{10} hash.index['field3']:get{10} hash.index['field3']:get{0} hash:drop() hash = box.schema.space.create('tweedledum') hi = hash:create_index('primary', { type = 'hash', parts = {1, 'unsigned'}, unique = true }) hash:insert{0} hash:insert{16} for _, tuple in hi:pairs(nil, {iterator = box.index.ALL}) do hash:delete{tuple[1]} end hash:drop() -- -- gh-616 "1-based indexing and 0-based error message -- _ = box.schema.create_space('test') _ = box.space.test:create_index('i',{parts={1,'string'}}) box.space.test:insert{1} box.space.test:drop() -- gh-1467: invalid iterator type space = box.schema.space.create('test') index = space:create_index('primary', { type = 'hash' }) space:select({1}, {iterator = 'BITS_ALL_SET' } ) space:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/func_reload.result0000664000000000000000000001013113306560010021464 0ustar rootrootfio = require('fio') --- ... net = require('net.box') --- ... fiber = require('fiber') --- ... ext = (jit.os == "OSX" and "dylib" or "so") --- ... build_path = os.getenv("BUILDDIR") --- ... reload1_path = build_path..'/test/box/reload1.'..ext --- ... reload2_path = build_path..'/test/box/reload2.'..ext --- ... reload_path = "reload."..ext --- ... _ = fio.unlink(reload_path) --- ... c = net.connect(os.getenv("LISTEN")) --- ... box.schema.func.create('reload.foo', {language = "C"}) --- ... box.schema.user.grant('guest', 'execute', 'function', 'reload.foo') --- ... _ = box.schema.space.create('test') --- ... _ = box.space.test:create_index('primary', {parts = {1, "integer"}}) --- ... box.schema.user.grant('guest', 'read,write', 'space', 'test') --- ... _ = fio.unlink(reload_path) --- ... fio.symlink(reload1_path, reload_path) --- - true ... --check not fail on non-load func box.schema.func.reload("reload.foo") --- ... -- test of usual case reload. No hanging calls box.space.test:insert{0} --- - [0] ... c:call("reload.foo", {1}) --- - [] ... box.space.test:delete{0} --- - [0] ... _ = fio.unlink(reload_path) --- ... fio.symlink(reload2_path, reload_path) --- - true ... box.schema.func.reload("reload.foo") --- ... c:call("reload.foo") --- - [] ... box.space.test:select{} --- - - [-1] - [0] - [1] ... box.space.test:truncate() --- ... -- test case with hanging calls _ = fio.unlink(reload_path) --- ... fio.symlink(reload1_path, reload_path) --- - true ... box.schema.func.reload("reload.foo") --- ... fibers = 10 --- ... for i = 1, fibers do fiber.create(function() c:call("reload.foo", {i}) end) end --- ... while box.space.test:count() < fibers do fiber.sleep(0.001) end --- ... -- double reload doesn't fail waiting functions box.schema.func.reload("reload.foo") --- ... _ = fio.unlink(reload_path) --- ... fio.symlink(reload2_path, reload_path) --- - true ... box.schema.func.reload("reload.foo") --- ... c:call("reload.foo") --- - [] ... while box.space.test:count() < 2 * fibers + 1 do fiber.sleep(0.001) end --- ... box.space.test:select{} --- - - [-10] - [-9] - [-8] - [-7] - [-6] - [-5] - [-4] - [-3] - [-2] - [-1] - [0] - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] ... box.schema.func.drop("reload.foo") --- ... box.space.test:drop() --- ... _ = fio.unlink(reload_path) --- ... fio.symlink(reload1_path, reload_path) --- - true ... box.schema.func.create('reload.test_reload', {language = "C"}) --- ... box.schema.user.grant('guest', 'execute', 'function', 'reload.test_reload') --- ... s = box.schema.space.create('test_reload') --- ... _ = s:create_index('pk') --- ... box.schema.user.grant('guest', 'read,write', 'space', 'test_reload') --- ... ch = fiber.channel(2) --- ... -- call first time to load function c:call("reload.test_reload") --- - [[1]] ... s:delete({1}) --- - [1, 2] ... _ = fio.unlink(reload_path) --- ... fio.symlink(reload2_path, reload_path) --- - true ... _ = fiber.create(function() ch:put(c:call("reload.test_reload")) end) --- ... while s:get({1}) == nil do fiber.yield(0.0001) end --- ... box.schema.func.reload("reload.test_reload") --- ... _ = fiber.create(function() ch:put(c:call("reload.test_reload")) end) --- ... ch:get() --- - [[1]] ... ch:get() --- - [[2]] ... s:drop() --- ... box.schema.func.create('reload.test_reload_fail', {language = "C"}) --- ... box.schema.user.grant('guest', 'execute', 'function', 'reload.test_reload_fail') --- ... c:call("reload.test_reload_fail") --- - [[2]] ... _ = fio.unlink(reload_path) --- ... fio.symlink(reload1_path, reload_path) --- - true ... s, e = pcall(box.schema.func.reload, "reload.test_reload") --- ... s, string.find(tostring(e), 'test_reload_fail') ~= nil --- - false - true ... c:call("reload.test_reload") --- - [[2]] ... c:call("reload.test_reload_fail") --- - [[2]] ... box.schema.func.drop("reload.test_reload") --- ... box.schema.func.drop("reload.test_reload_fail") --- ... _ = fio.unlink(reload_path) --- ... box.schema.func.reload() --- - error: 'bad argument #1 to ''?'' (string expected, got no value)' ... box.schema.func.reload("non-existing") --- - error: Function 'non-existing' does not exist ... tarantool_1.9.1.26.g63eb81e3c/test/box/tree_pk.test.lua0000664000000000000000000001601313306560010021062 0ustar rootrootutils = dofile('utils.lua') s0 = box.schema.space.create('tweedledum') i0 = s0:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) bsize = i0:bsize() -- integer keys s0:insert{1, 'tuple'} box.snapshot() s0:insert{2, 'tuple 2'} box.snapshot() i0:bsize() > bsize s0:insert{3, 'tuple 3'} s0.index['primary']:get{1} s0.index['primary']:get{2} s0.index['primary']:get{3} -- Cleanup s0:delete{1} s0:delete{2} s0:delete{3} -- Test incorrect keys - supplied key field type does not match index type -- https://bugs.launchpad.net/tarantool/+bug/1072624 s0:insert{'xxxxxxx'} s0:insert{''} s0:insert{'12'} s1 = box.schema.space.create('tweedledee') i1 = s1:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) s2 = box.schema.space.create('alice') i2 = s2:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) -- string keys s1:insert{'identifier', 'tuple'} box.snapshot() s1:insert{'second', 'tuple 2'} box.snapshot() s1.index['primary']:select('second', { limit = 100, iterator = 'GE' }) s1.index['primary']:select('identifier', { limit = 100, iterator = 'GE' }) s1:insert{'third', 'tuple 3'} s1.index['primary']:get{'identifier'} s1.index['primary']:get{'second'} s1.index['primary']:get{'third'} -- Cleanup s1:delete{'identifier'} s1:delete{'second'} s1:delete{'third'} env = require('test_run') test_run = env.new() test_run:cmd("setopt delimiter ';'") function crossjoin(space0, space1, limit) local result = {} for state, v0 in space0:pairs() do for state, v1 in space1:pairs() do if limit <= 0 then return result end local newtuple = v0:totable() for _, v in v1:pairs() do table.insert(newtuple, v) end table.insert(result, box.tuple.new(newtuple)) limit = limit - 1 end end return result end; test_run:cmd("setopt delimiter ''"); s2:insert{'1', 'tuple'} s1:insert{'1', 'tuple'} s1:insert{'2', 'tuple'} crossjoin(s1, s1, 0) crossjoin(s1, s1, 5) crossjoin(s1, s1, 10000) crossjoin(s1, s2, 10000) s1:truncate() s2:truncate() -- Bug #922520 - select missing keys s0:insert{200, 'select me!'} s0.index['primary']:get{200} s0.index['primary']:get{199} s0.index['primary']:get{201} s1:drop() s1 = nil s2:drop() s2 = nil -- -- tree::replace tests -- s0:truncate() i1 = s0:create_index('i1', { type = 'tree', parts = {2, 'unsigned'}, unique = true }) i2 = s0:create_index('i2', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) i3 = s0:create_index('i3', { type = 'tree', parts = {4, 'unsigned'}, unique = true }) s0:insert{0, 0, 0, 0} s0:insert{1, 1, 1, 1} s0:insert{2, 2, 2, 2} -- OK s0:replace{1, 1, 1, 1} s0:replace{1, 10, 10, 10} s0:replace{1, 1, 1, 1} s0.index['primary']:get{10} s0.index['i1']:select{10} s0.index['i2']:select{10} s0.index['i3']:select{10} s0.index['primary']:get{1} s0.index['i1']:select{1} s0.index['i2']:select{1} s0.index['i3']:select{1} -- OK s0:insert{10, 10, 10, 10} s0:delete{10} s0.index['primary']:get{10} s0.index['i1']:select{10} s0.index['i2']:select{10} s0.index['i3']:select{10} -- TupleFound (primary key) s0:insert{1, 10, 10, 10} s0.index['primary']:get{10} s0.index['i1']:select{10} s0.index['i2']:select{10} s0.index['i3']:select{10} s0.index['primary']:get{1} -- TupleNotFound (primary key) s0:replace{10, 10, 10, 10} s0.index['primary']:get{10} s0.index['i1']:select{10} s0.index['i2']:select{10} s0.index['i3']:select{10} -- TupleFound (key #1) s0:insert{10, 0, 10, 10} s0.index['primary']:get{10} s0.index['i1']:select{10} s0.index['i2']:select{10} s0.index['i3']:select{10} s0.index['i1']:select{0} -- TupleFound (key #1) s0:replace{2, 0, 10, 10} s0.index['primary']:get{10} s0.index['i1']:select{10} s0.index['i2']:select{10} s0.index['i3']:select{10} s0.index['i1']:select{0} -- TupleFound (key #3) s0:insert{10, 10, 10, 0} s0.index['primary']:get{10} s0.index['i1']:select{10} s0.index['i2']:select{10} s0.index['i3']:select{10} s0.index['i3']:select{0} -- TupleFound (key #3) s0:replace{2, 10, 10, 0} s0.index['primary']:get{10} s0.index['i1']:select{10} s0.index['i2']:select{10} s0.index['i3']:select{10} s0.index['i3']:select{0} -- Non-Uniq test (key #2) s0:insert{4, 4, 0, 4} s0:insert{5, 5, 0, 5} s0:insert{6, 6, 0, 6} s0:replace{5, 5, 0, 5} utils.sort(s0.index['i2']:select(0)) s0:delete{5} utils.sort(s0.index['i2']:select(0)) s0:drop() s0 = nil -- Stable non-unique indexes -- https://github.com/tarantool/tarantool/issues/2476 s = box.schema.space.create('test') i1 = s:create_index('i1', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) i2 = s:create_index('i2', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) i3 = s:create_index('i3', { type = 'tree', parts = {{3, 'unsigned', is_nullable = true}}, unique = true }) _ = s:replace{5, 1, box.NULL, 1} _ = s:replace{4, 1, box.NULL, 3} _ = s:replace{6, 1, box.NULL, 2} _ = s:replace{3, 1, box.NULL, 0} _ = s:replace{7, 1, box.NULL, 100} _ = s:replace{15, 2, 100, 11} _ = s:replace{14, 2, 500, 41} _ = s:replace{16, 2, 200, 31} _ = s:replace{13, 2, 300, 13} _ = s:replace{17, 2, 400, 10} i2:select{1} i2:select{2} i2:select{1, 5} i3:select{box.NULL} i1:alter{parts = {4, 'unsigned'}} i2:select{1} i2:select{2} i2:select{1, 1} i3:select{box.NULL} s:truncate() i1:alter{parts = {1, 'str'}} _ = s:replace{"5", 1, box.NULL} _ = s:replace{"4", 1, box.NULL} _ = s:replace{"6", 1, box.NULL} _ = s:replace{"3", 1, box.NULL} _ = s:replace{"7", 1, box.NULL} _ = s:replace{"15", 2, 100} _ = s:replace{"14", 2, 500} _ = s:replace{"16", 2, 200} _ = s:replace{"13", 2, 300} _ = s:replace{"17", 2, 400} i2:select{1} i2:select{2} i3:select{box.NULL} s:drop() --https://github.com/tarantool/tarantool/issues/2649 -- create standart index and alter it to collation index box.internal.collation.create('test', 'ICU', 'ru-RU') box.internal.collation.create('test-ci', 'ICU', 'ru-RU', {strength = 'secondary'}) s = box.schema.space.create('test') i1 = s:create_index('i1', { type = 'tree', parts = {{1, 'str'}}, unique = true }) _ = s:replace{"ааа"} _ = s:replace{"еее"} _ = s:replace{"ёёё"} _ = s:replace{"жжж"} _ = s:replace{"яяя"} _ = s:replace{"ААА"} _ = s:replace{"ЯЯЯ"} -- bad output s:select{} i1:alter({parts = {{1, 'str', collation='test'}}}) -- good output s:select{} i1:alter({parts = {{1, 'str', collation='test-ci'}}}) _ = s:delete{"ААА"} _ = s:delete{"ЯЯЯ"} i1:alter({parts = {{1, 'str', collation='test-ci'}}}) -- good output s:select{} s:insert{"ААА"} s:replace{"ЯЯЯ"} -- good output s:select{} s:drop() -- create collation index and alter it to standart index s = box.schema.space.create('test') i1 = s:create_index('i1', { type = 'tree', parts = {{1, 'str', collation='test'}}, unique = true }) _ = s:replace{"ааа"} _ = s:replace{"еее"} _ = s:replace{"ёёё"} _ = s:replace{"жжж"} _ = s:replace{"яяя"} _ = s:replace{"ААА"} _ = s:replace{"ЯЯЯ"} -- good output s:select{} i1:alter({parts = {{1, 'str'}}}) -- bad output s:select{} s:drop() box.internal.collation.drop('test') box.internal.collation.drop('test-ci') tarantool_1.9.1.26.g63eb81e3c/test/box/access_escalation.result0000664000000000000000000000466213306560010022662 0ustar rootrootfiber = require('fiber') --- ... net = require('net.box') --- ... log = require('log') --- ... json = require('json') --- ... os = require('os') --- ... -- gh-617: guest access denied because of setuid -- function invocation. -- Test for privilege escalation -- ----------------------------- -- * create a setuid function which changes effective id -- to superuser -- * invoke it via the binary protocol -- * while the function is running, invoke a non-setuid function -- which reads a system space. -- -- The invoked function should get "Access denied" error, -- there should be no privilege escalation. -- define functions function setuid() fiber.sleep(1000000) end --- ... function escalation() return box.space._space:get{box.schema.SPACE_ID} ~= nil end --- ... -- set up grants box.schema.func.create('setuid', {setuid=true}) --- ... box.schema.func.create('escalation') --- ... box.schema.user.grant('guest', 'execute', 'function', 'setuid') --- ... box.schema.user.grant('guest', 'execute', 'function', 'escalation') --- ... connection = net:connect(os.getenv("LISTEN")) --- ... background = fiber.create(function() connection:call("setuid") end) --- ... connection:call("escalation") --- - error: Read access to space '_space' is denied for user 'guest' ... fiber.cancel(background) --- ... -- -- tear down the functions; the grants are dropped recursively -- box.schema.func.drop('setuid') --- ... box.schema.func.drop('escalation') --- ... connection:close() --- ... -- Test for privilege de-escalation -- -------------------------------- -- -- * create a setuid function which runs under a deprived user -- * invoke the function, let it sleep -- * invoke a function which should have privileges -- -- create a deprived user box.schema.user.create('underprivileged') --- ... box.schema.user.grant('underprivileged', 'read,write', 'space', '_func') --- ... box.session.su('underprivileged') --- ... box.schema.func.create('setuid', {setuid=true}) --- ... box.session.su('admin') --- ... -- -- create a deprived function -- box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... connection = net:connect(os.getenv("LISTEN")) --- ... background = fiber.create(function() connection:call("setuid") end) --- ... connection:call("escalation") --- - true ... fiber.cancel(background) --- ... -- tear down box.schema.user.drop('underprivileged') --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... connection:close() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/alter_limits.result0000664000000000000000000006023113306565107021715 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("push filter ".."'\\.lua.*:[0-9]+: ' to '.lua...\"]:: '") --- - true ... -- ---------------------------------------------------------------- -- LIMITS -- ---------------------------------------------------------------- box.schema.SYSTEM_ID_MIN --- - 256 ... box.schema.FIELD_MAX --- - 2147483647 ... box.schema.INDEX_FIELD_MAX --- - 32767 ... box.schema.NAME_MAX --- - 65000 ... box.schema.INDEX_ID --- - 288 ... box.schema.SPACE_ID --- - 280 ... box.schema.INDEX_MAX --- - 128 ... box.schema.SPACE_MAX --- - 2147483647 ... box.schema.SYSTEM_ID_MAX --- - 511 ... box.schema.SCHEMA_ID --- - 272 ... box.schema.FORMAT_ID_MAX --- - 65534 ... -- ---------------------------------------------------------------- -- CREATE SPACE -- ---------------------------------------------------------------- s = box.schema.space.create('tweedledum') --- ... -- space already exists box.schema.space.create('tweedledum') --- - error: Space 'tweedledum' already exists ... -- create if not exists s = box.schema.space.create('tweedledum', { if_not_exists = true }) --- ... s:drop() --- ... -- no such space s:drop() --- - error: Space 'tweedledum' does not exist ... -- no such engine box.schema.space.create('tweedleedee', { engine = 'unknown' }) --- - error: Space engine 'unknown' does not exist ... -- explicit space id s = box.schema.space.create('tweedledum', { id = 3000 }) --- ... s.id --- - 3000 ... -- duplicate id box.schema.space.create('tweedledee', { id = 3000 }) --- - error: Duplicate key exists in unique index 'primary' in space '_space' ... -- stupid space id box.schema.space.create('tweedledee', { id = 'tweedledee' }) --- - error: Illegal parameters, options parameter 'id' should be of type number ... s:drop() --- ... -- too long space name box.schema.space.create(string.rep('t', box.schema.NAME_MAX + 1)) --- - error: 'Failed to create space ''tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'': space name is too long' ... -- too long space engine name box.schema.space.create('tweedleedee', { engine = string.rep('too-long', 100) }) --- - error: 'Failed to create space ''tweedleedee'': space engine name is too long' ... -- space name limit box.schema.space.create(string.rep('t', box.schema.NAME_MAX)..'_') --- - error: 'Failed to create space ''tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'': space name is too long' ... s = box.schema.space.create(string.rep('t', box.schema.NAME_MAX - 1)..'_') --- ... s.name:len() --- - 65000 ... s:drop() --- ... s = box.schema.space.create(string.rep('t', box.schema.NAME_MAX - 2)..'_') --- ... s.name:len() --- - 64999 ... s:drop() --- ... -- space with no indexes - test update, delete, select, truncate s = box.schema.space.create('tweedledum') --- ... s:insert{0} --- - error: 'No index #0 is defined in space ''tweedledum''' ... s:select{} --- - error: 'No index #0 is defined in space ''tweedledum''' ... s:delete{0} --- - error: 'No index #0 is defined in space ''tweedledum''' ... s:update(0, {{"=", 1, 0}}) --- - error: 'No index #0 is defined in space ''tweedledum''' ... s:insert{0} --- - error: 'No index #0 is defined in space ''tweedledum''' ... s.index[0] --- - null ... s:truncate() --- ... s.enabled --- - false ... -- enabled/disabled transition index = s:create_index('primary', { type = 'hash' }) --- ... s.enabled --- - true ... -- rename space - same name s:rename('tweedledum') --- ... s.name --- - tweedledum ... -- rename space - different name s:rename('tweedledee') --- ... s.name --- - tweedledee ... -- the reference from box.space[] to the space by old name should be gone box.space['tweedledum'] --- - null ... -- rename space - bad name s:rename(string.rep('t', box.schema.NAME_MAX * 2)) --- - error: 'Can''t modify space ''tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'': space name is too long' ... s.name --- - tweedledee ... -- access to a renamed space s:insert{0} --- - [0] ... s:delete{0} --- - [0] ... -- cleanup s:drop() --- ... -- check DDL on invalid space object s:create_index('primary') --- - error: Space 'tweedledee' does not exist ... s:rename('xxx') --- - error: Space 'tweedledee' does not exist ... s:drop() --- - error: Space 'tweedledee' does not exist ... -- create a space with reserved id (ok, but warns in the log) s = box.schema.space.create('test', { id = 256 }) --- ... s.id --- - 256 ... s:drop() --- ... s = box.schema.space.create('test', { field_count = 2 }) --- ... s.field_count --- - 2 ... index = s:create_index('primary') --- ... -- field_count actually works s:insert{1} --- - error: Tuple field count 1 does not match space field count 2 ... s:insert{1, 2} --- - [1, 2] ... s:insert{1, 2, 3} --- - error: Tuple field count 3 does not match space field count 2 ... s:select{} --- - - [1, 2] ... FIELD_COUNT = 4 --- ... -- increase field_count -- error box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 3}}) --- - error: 'Can''t modify space ''test'': can not change field count on a non-empty space' ... s:select{} --- - - [1, 2] ... -- decrease field_count - error box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 1}}) --- - error: 'Can''t modify space ''test'': can not change field count on a non-empty space' ... -- remove field_count - ok _ = box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 0}}) --- ... s:select{} --- - - [1, 2] ... -- increase field_count - error box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 3}}) --- - error: 'Can''t modify space ''test'': can not change field count on a non-empty space' ... s:truncate() --- ... s:select{} --- - [] ... -- set field_count of an empty space _ = box.space['_space']:update(s.id, {{"=", FIELD_COUNT + 1, 3}}) --- ... s:select{} --- - [] ... -- field_count actually works s:insert{3, 4} --- - error: Tuple field count 2 does not match space field count 3 ... s:insert{3, 4, 5} --- - [3, 4, 5] ... s:insert{3, 4, 5, 6} --- - error: Tuple field count 4 does not match space field count 3 ... s:insert{7, 8, 9} --- - [7, 8, 9] ... s:select{} --- - - [3, 4, 5] - [7, 8, 9] ... -- check transition of space from enabled to disabled on -- deletion of the primary key s.enabled --- - true ... s.index[0]:drop() --- ... s.enabled --- - false ... s.index[0] --- - null ... -- "disabled" on -- deletion of primary key s:drop() --- ... -- ---------------------------------------------------------------- -- CREATE INDEX -- ---------------------------------------------------------------- -- s = box.schema.space.create('test') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for k=1, box.schema.INDEX_MAX, 1 do index = s:create_index('i'..k, { type = 'hash' }) end; --- ... -- cleanup for k=2, box.schema.INDEX_MAX, 1 do s.index['i'..k]:drop() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- test limits enforced in key_def_check: -- unknown index type index = s:create_index('test', { type = 'nosuchtype' }) --- - error: Unsupported index type supplied for index 'test' in space 'test' ... -- hash index is not unique index = s:create_index('test', { type = 'hash', unique = false }) --- - error: 'Can''t create or modify index ''test'' in space ''test'': HASH index must be unique' ... -- bitset index is unique index = s:create_index('test', { type = 'bitset', unique = true }) --- - error: 'Can''t create or modify index ''test'' in space ''test'': BITSET can not be unique' ... -- bitset index is multipart index = s:create_index('test', { type = 'bitset', parts = {1, 'unsigned', 2, 'unsigned'}}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': BITSET index key can not be multipart' ... -- part count must be positive index = s:create_index('test', { type = 'hash', parts = {}}) --- - error: Illegal parameters, options.parts must have at least one part ... -- unknown field type index = s:create_index('test', { type = 'hash', parts = { 2, 'nosuchtype' }}) --- - error: 'Wrong index parts: unknown field type; expected field1 id (number), field1 type (string), ...' ... index = s:create_index('test', { type = 'hash', parts = { 2, 'any' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': field type ''any'' is not supported' ... index = s:create_index('test', { type = 'hash', parts = { 2, 'array' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': field type ''array'' is not supported' ... index = s:create_index('test', { type = 'hash', parts = { 2, 'map' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': field type ''map'' is not supported' ... index = s:create_index('test', { type = 'rtree', parts = { 2, 'nosuchtype' }}) --- - error: 'Wrong index parts: unknown field type; expected field1 id (number), field1 type (string), ...' ... index = s:create_index('test', { type = 'rtree', parts = { 2, 'any' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': RTREE index field type must be ARRAY' ... index = s:create_index('test', { type = 'rtree', parts = { 2, 'map' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': RTREE index field type must be ARRAY' ... -- bad field no index = s:create_index('test', { type = 'hash', parts = { 'qq', 'nosuchtype' }}) --- - error: 'Illegal parameters, options.parts[1]: field was not found by name ''qq''' ... -- big field no index = s:create_index('test', { type = 'hash', parts = { box.schema.FIELD_MAX, 'unsigned' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': field no is too big' ... index = s:create_index('test', { type = 'hash', parts = { box.schema.FIELD_MAX - 1, 'unsigned' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': field no is too big' ... index = s:create_index('test', { type = 'hash', parts = { box.schema.FIELD_MAX + 90, 'unsigned' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': field no is too big' ... index = s:create_index('test', { type = 'hash', parts = { box.schema.INDEX_FIELD_MAX + 1, 'unsigned' }}) --- ... index = s:create_index('t1', { type = 'hash', parts = { box.schema.INDEX_FIELD_MAX, 'unsigned' }}) --- ... index = s:create_index('t2', { type = 'hash', parts = { box.schema.INDEX_FIELD_MAX - 1, 'unsigned' }}) --- ... -- cleanup s:drop() --- ... s = box.schema.space.create('test') --- ... -- same part can't be indexed twice index = s:create_index('t1', { type = 'hash', parts = { 1, 'unsigned', 1, 'string' }}) --- - error: 'Can''t create or modify index ''t1'' in space ''test'': same key part is indexed twice' ... -- a lot of key parts parts = {} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for k=1, box.schema.INDEX_PART_MAX + 1, 1 do table.insert(parts, k) table.insert(parts, 'unsigned') end; --- ... #parts; --- - 512 ... index = s:create_index('t1', { type = 'hash', parts = parts}); --- - error: 'Can''t create or modify index ''t1'' in space ''test'': too many key parts' ... parts = {}; --- ... for k=1, box.schema.INDEX_PART_MAX, 1 do table.insert(parts, k + 1) table.insert(parts, 'unsigned') end; --- ... #parts; --- - 510 ... index = s:create_index('t1', { type = 'hash', parts = parts}); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- this is actually incorrect since parts is a lua table -- and length of a lua table which has index 0 set is not correct #s.index[0].parts --- - 255 ... -- cleanup s:drop() --- ... -- check costraints in tuple_format_new() s = box.schema.space.create('test') --- ... index = s:create_index('t1', { type = 'hash' }) --- ... -- field type contradicts field type of another index index = s:create_index('t2', { type = 'hash', parts = { 1, 'string' }}) --- - error: Field 1 has type 'unsigned' in one index, but type 'string' in another ... -- ok index = s:create_index('t2', { type = 'hash', parts = { 2, 'string' }}) --- ... -- don't allow drop of the primary key in presence of other keys s.index[0]:drop() --- - error: Can't drop primary key in space 'test' while secondary keys exist ... -- cleanup s:drop() --- ... -- index name, name manipulation s = box.schema.space.create('test') --- ... index = s:create_index('primary', { type = 'hash' }) --- ... -- space cache is updated correctly s.index[0].name --- - primary ... s.index[0].id --- - 0 ... s.index[0].type --- - HASH ... s.index['primary'].name --- - primary ... s.index['primary'].id --- - 0 ... s.index['primary'].type --- - HASH ... s.index.primary.name --- - primary ... s.index.primary.id --- - 0 ... -- other properties are preserved s.index.primary.type --- - HASH ... s.index.primary.unique --- - true ... s.index.primary:rename('new') --- ... s.index[0].name --- - new ... s.index.primary --- - null ... s.index.new.name --- - new ... -- too long name s.index[0]:rename(string.rep('t', box.schema.NAME_MAX)..'_') --- - error: 'Can''t create or modify index ''tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'' in space ''test'': index name is too long' ... s.index[0].name --- - new ... s.index[0]:rename(string.rep('t', box.schema.NAME_MAX - 1)..'_') --- ... s.index[0].name:len() --- - 65000 ... s.index[0]:rename(string.rep('t', box.schema.NAME_MAX - 2)..'_') --- ... s.index[0].name:len() --- - 64999 ... s.index[0]:rename('primary') --- ... s.index.primary.name --- - primary ... -- cleanup s:drop() --- ... -- modify index s = box.schema.space.create('test') --- ... index = s:create_index('primary', { type = 'hash' }) --- ... -- correct error on misuse of alter s.index.primary.alter({unique=false}) --- - error: 'builtin/box/schema.lua..."]:: Use index:alter(...) instead of index.alter(...)' ... s.index.primary:alter({unique=false}) --- - error: 'Can''t create or modify index ''primary'' in space ''test'': primary key must be unique' ... -- unique -> non-unique, index type s.index.primary:alter({type='tree', unique=false, name='pk'}) --- - error: 'Can''t create or modify index ''pk'' in space ''test'': primary key must be unique' ... s.index.primary.name --- - primary ... s.index.primary.id --- - 0 ... s.index.pk.type --- - error: '[string "return s.index.pk.type "]:1: attempt to index field ''pk'' (a nil value)' ... s.index.pk.unique --- - error: '[string "return s.index.pk.unique "]:1: attempt to index field ''pk'' (a nil value)' ... s.index.pk:rename('primary') --- - error: '[string "return s.index.pk:rename(''primary'') "]:1: attempt to index field ''pk'' (a nil value)' ... index = s:create_index('second', { type = 'tree', parts = { 2, 'string' } }) --- ... s.index.second.id --- - 1 ... index = s:create_index('third', { type = 'hash', parts = { 3, 'unsigned' } }) --- ... s.index.third:rename('second') --- - error: Duplicate key exists in unique index 'name' in space '_index' ... s.index.third.id --- - 2 ... s.index.second:drop() --- ... s.index.third:alter({name = 'second'}) --- ... s.index.third --- - null ... s.index.second.name --- - second ... s.index.second.id --- - 2 ... s:drop() --- ... -- ---------------------------------------------------------------- -- BUILD INDEX: changes of a non-empty index -- ---------------------------------------------------------------- s = box.schema.space.create('full') --- ... index = s:create_index('primary', { type = 'tree', parts = { 1, 'string' }}) --- ... s:insert{'No such movie', 999} --- - ['No such movie', 999] ... s:insert{'Barbara', 2012} --- - ['Barbara', 2012] ... s:insert{'Cloud Atlas', 2012} --- - ['Cloud Atlas', 2012] ... s:insert{'Almanya - Willkommen in Deutschland', 2011} --- - ['Almanya - Willkommen in Deutschland', 2011] ... s:insert{'Halt auf freier Strecke', 2011} --- - ['Halt auf freier Strecke', 2011] ... s:insert{'Homevideo', 2011} --- - ['Homevideo', 2011] ... s:insert{'Die Fremde', 2010} --- - ['Die Fremde', 2010] ... -- create index with data index = s:create_index('year', { type = 'tree', unique=false, parts = { 2, 'unsigned'} }) --- ... s.index.primary:select{} --- - - ['Almanya - Willkommen in Deutschland', 2011] - ['Barbara', 2012] - ['Cloud Atlas', 2012] - ['Die Fremde', 2010] - ['Halt auf freier Strecke', 2011] - ['Homevideo', 2011] - ['No such movie', 999] ... -- a duplicate in the created index index = s:create_index('nodups', { type = 'tree', unique=true, parts = { 2, 'unsigned'} }) --- - error: Duplicate key exists in unique index 'nodups' in space 'full' ... -- change of non-unique index to unique: same effect s.index.year:alter({unique=true}) --- - error: Duplicate key exists in unique index 'year' in space 'full' ... s.index.primary:select{} --- - - ['Almanya - Willkommen in Deutschland', 2011] - ['Barbara', 2012] - ['Cloud Atlas', 2012] - ['Die Fremde', 2010] - ['Halt auf freier Strecke', 2011] - ['Homevideo', 2011] - ['No such movie', 999] ... -- ambiguous field type index = s:create_index('string', { type = 'tree', unique = false, parts = { 2, 'string'}}) --- - error: Field 2 has type 'unsigned' in one index, but type 'string' in another ... -- create index on a non-existing field index = s:create_index('nosuchfield', { type = 'tree', unique = true, parts = { 3, 'string'}}) --- - error: Tuple field count 2 is less than required by space format or defined indexes (expected at least 3) ... s.index.year:drop() --- ... s:insert{'Der Baader Meinhof Komplex', '2009 '} --- - ['Der Baader Meinhof Komplex', '2009 '] ... -- create an index on a field with a wrong type index = s:create_index('year', { type = 'tree', unique = false, parts = { 2, 'unsigned'}}) --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... -- a field is missing s:replace{'Der Baader Meinhof Komplex'} --- - ['Der Baader Meinhof Komplex'] ... index = s:create_index('year', { type = 'tree', unique = false, parts = { 2, 'unsigned'}}) --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 2) ... s:drop() --- ... -- unique -> non-unique transition s = box.schema.space.create('test') --- ... -- primary key must be unique index = s:create_index('primary', { unique = false }) --- - error: 'Can''t create or modify index ''primary'' in space ''test'': primary key must be unique' ... -- create primary key index = s:create_index('primary', { type = 'hash' }) --- ... s:insert{1, 1} --- - [1, 1] ... index = s:create_index('secondary', { type = 'tree', unique = false, parts = {2, 'unsigned'}}) --- ... s:insert{2, 1} --- - [2, 1] ... s.index.secondary:alter{ unique = true } --- - error: Duplicate key exists in unique index 'secondary' in space 'test' ... s:delete{2} --- - [2, 1] ... s.index.secondary:alter{ unique = true } --- ... s:insert{2, 1} --- - error: Duplicate key exists in unique index 'secondary' in space 'test' ... s:insert{2, 2} --- - [2, 2] ... s.index.secondary:alter{ unique = false} --- ... s:insert{3, 2} --- - [3, 2] ... -- changing index id is not allowed s.index.secondary:alter{ id = 10} --- - error: Attempt to modify a tuple field which is part of index 'primary' in space '_index' ... s:drop() --- ... -- ---------------------------------------------------------------- -- SPACE CACHE: what happens to a space cache when an object is gone -- ---------------------------------------------------------------- s = box.schema.space.create('test') --- ... s1 = s --- ... index = s:create_index('primary') --- ... s1.index.primary.id --- - 0 ... primary = s1.index.primary --- ... s.index.primary:drop() --- ... primary.id --- - 0 ... primary:select{} --- - error: 'No index #0 is defined in space ''test''' ... s:drop() --- ... -- @todo: add a test case for dangling iterator (currently no checks -- for a dangling iterator in the code -- ---------------------------------------------------------------- -- ---------------------------------------------------------------- -- RECOVERY: check that all indexes are correctly built -- during recovery regardless of when they are created -- ---------------------------------------------------------------- -- primary, secondary keys in a snapshot s_empty = box.schema.space.create('s_empty') --- ... indexe1 = s_empty:create_index('primary') --- ... indexe2 = s_empty:create_index('secondary', { type = 'hash', unique = true, parts = {2, 'unsigned'}}) --- ... s_full = box.schema.space.create('s_full') --- ... indexf1 = s_full:create_index('primary') --- ... indexf2 = s_full:create_index('secondary', { type = 'hash', unique = true, parts = {2, 'unsigned'}}) --- ... s_full:insert{1, 1, 'a'} --- - [1, 1, 'a'] ... s_full:insert{2, 2, 'b'} --- - [2, 2, 'b'] ... s_full:insert{3, 3, 'c'} --- - [3, 3, 'c'] ... s_full:insert{4, 4, 'd'} --- - [4, 4, 'd'] ... s_full:insert{5, 5, 'e'} --- - [5, 5, 'e'] ... s_nil = box.schema.space.create('s_nil') --- ... s_drop = box.schema.space.create('s_drop') --- ... box.snapshot() --- - ok ... s_drop:drop() --- ... indexn1 = s_nil:create_index('primary', { type = 'hash'}) --- ... s_nil:insert{1,2,3,4,5,6} --- - [1, 2, 3, 4, 5, 6] ... s_nil:insert{7, 8, 9, 10, 11,12} --- - [7, 8, 9, 10, 11, 12] ... indexn2 = s_nil:create_index('secondary', { type = 'tree', unique=false, parts = {2, 'unsigned', 3, 'unsigned', 4, 'unsigned'}}) --- ... s_nil:insert{13, 14, 15, 16, 17} --- - [13, 14, 15, 16, 17] ... r_empty = box.schema.space.create('r_empty') --- ... indexe1 = r_empty:create_index('primary') --- ... indexe2 = r_empty:create_index('secondary', { type = 'hash', unique = true, parts = {2, 'unsigned'}}) --- ... r_full = box.schema.space.create('r_full') --- ... indexf1 = r_full:create_index('primary', { type = 'tree', unique = true, parts = {1, 'unsigned'}}) --- ... indexf2 = r_full:create_index('secondary', { type = 'hash', unique = true, parts = {2, 'unsigned'}}) --- ... r_full:insert{1, 1, 'a'} --- - [1, 1, 'a'] ... r_full:insert{2, 2, 'b'} --- - [2, 2, 'b'] ... r_full:insert{3, 3, 'c'} --- - [3, 3, 'c'] ... r_full:insert{4, 4, 'd'} --- - [4, 4, 'd'] ... r_full:insert{5, 5, 'e'} --- - [5, 5, 'e'] ... indexf1 = s_full:create_index('multikey', { type = 'tree', unique = true, parts = { 2, 'unsigned', 3, 'string'}}) --- ... s_full:insert{6, 6, 'f'} --- - [6, 6, 'f'] ... s_full:insert{7, 7, 'g'} --- - [7, 7, 'g'] ... s_full:insert{8, 8, 'h'} --- - [8, 8, 'h'] ... r_disabled = box.schema.space.create('r_disabled') --- ... test_run:cmd("restart server default") s_empty = box.space['s_empty'] --- ... s_full = box.space['s_full'] --- ... s_nil = box.space['s_nil'] --- ... s_drop = box.space['s_drop'] --- ... r_empty = box.space['r_empty'] --- ... r_full = box.space['r_full'] --- ... r_disabled = box.space['r_disabled'] --- ... s_drop --- - null ... s_empty.index.primary.type --- - TREE ... s_full.index.primary.type --- - TREE ... r_empty.index.primary.type --- - TREE ... r_full.index.primary.type --- - TREE ... s_nil.index.primary.type --- - HASH ... s_empty.index.primary.name --- - primary ... s_full.index.primary.name --- - primary ... r_empty.index.primary.name --- - primary ... r_full.index.primary.name --- - primary ... s_nil.index.primary.name --- - primary ... s_empty.enabled --- - true ... s_full.enabled --- - true ... r_empty.enabled --- - true ... r_full.enabled --- - true ... s_nil.enabled --- - true ... r_disabled.enabled --- - false ... s_empty.index.secondary.name --- - secondary ... s_full.index.secondary.name --- - secondary ... r_empty.index.secondary.name --- - secondary ... r_full.index.secondary.name --- - secondary ... s_nil.index.secondary.name --- - secondary ... s_empty.index.primary:count(1) --- - 0 ... s_full.index.primary:count(1) --- - 1 ... r_empty.index.primary:count(1) --- - 0 ... r_full.index.primary:count(1) --- - 1 ... s_nil.index.primary:count(1) --- - 1 ... s_empty.index.secondary:count(1) --- - 0 ... s_full.index.secondary:count(1) --- - 1 ... r_empty.index.secondary:count(1) --- - 0 ... r_full.index.secondary:count(1) --- - 1 ... s_nil.index.secondary:count(1) --- - 0 ... -- gh-503 if_not_exits option in create index i1 = s_empty:create_index("test") --- ... i1:select{} --- - [] ... i2 = s_empty:create_index("test") --- - error: Index 'test' already exists ... i3 = s_empty:create_index("test", { if_not_exists = true } ) --- ... i3:select{} --- - [] ... -- cleanup s_empty:drop() --- ... s_full:drop() --- ... r_empty:drop() --- ... r_full:drop() --- ... s_nil:drop() --- ... r_disabled:drop() --- ... -- -- @todo usability -- --------- -- - space name in all error messages! -- error: Duplicate key exists in unique index 1 (ugly) -- -- @todo features -------- -- - ffi function to enable/disable space -- test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/box/transaction.test.lua0000664000000000000000000001275313306565107022001 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd("setopt delimiter ';'") -- empty transaction - ok box.begin() box.commit(); -- double begin box.begin() box.begin(); -- no active transaction since exception rolled it back box.commit(); -- double commit - implicit start of transaction box.begin() box.commit() box.commit(); -- commit if not started - implicit start of transaction box.commit(); -- rollback if not started - ok box.rollback() -- double rollback - ok box.begin() box.rollback() box.rollback(); -- rollback of an empty trans - ends transaction box.begin() box.rollback(); -- no current transaction - implicit begin box.commit(); fiber = require('fiber'); function sloppy() box.begin() end; f = fiber.create(sloppy); -- when the sloppy fiber ends, its session has an active transction -- ensure it's rolled back automatically while f:status() ~= 'dead' do fiber.sleep(0) end; -- transactions and system spaces box.begin() box.schema.space.create('test'); box.rollback(); box.begin() box.schema.func.create('test'); box.rollback(); box.begin() box.schema.user.create('test'); box.rollback(); box.begin() box.schema.user.grant('guest', 'read', 'space', '_priv'); box.rollback(); box.begin() box.space._schema:insert{'test'}; box.rollback(); box.begin() box.space._cluster:insert{123456789, 'abc'}; box.rollback(); s = box.schema.space.create('test'); box.begin() index = s:create_index('primary'); box.rollback(); index = s:create_index('primary'); t = nil function multi() box.begin() s:auto_increment{'first row'} s:auto_increment{'second row'} t = s:select{} box.commit() end; multi(); t; s:select{}; s:truncate(); function multi() box.begin() s:auto_increment{'first row'} s:auto_increment{'second row'} t = s:select{} box.rollback() end; multi(); t; s:select{}; function multi() box.begin() s:insert{1, 'first row'} pcall(s.insert, s, {1, 'duplicate'}) t = s:select{} box.commit() end; multi(); t; s:select{}; s:truncate(); -- -- Test that fiber yield causes a transaction rollback -- but only if the transaction has changed any data -- -- Test admin console box.begin(); -- should be ok - active transaction, and we don't -- know, maybe it will use vinyl engine, which -- may support yield() in the future, so we don't roll -- back a transction with no statements. box.commit(); box.begin() s:insert{1, 'Must be rolled back'}; -- nothing - the transaction was rolled back while s:get{1} ~= nil do fiber.sleep(0) end -- nothing to commit because of yield box.commit(); -- Test background fiber -- function sloppy() box.begin() s:insert{1, 'From background fiber'} end; f = fiber.create(sloppy); while f:status() == 'running' do fiber.sleep(0) end; -- When the sloppy fiber ends, its session has an active transction -- It's rolled back automatically s:select{}; t = nil; function sloppy() box.begin() s:insert{1, 'From background fiber'} fiber.sleep(0) pcall(box.commit) t = s:select{} end; f = fiber.create(sloppy); while f:status() ~= 'dead' do fiber.sleep(0) end; t; s:select{}; s:drop(); test_run:cmd("setopt delimiter ''"); test = box.schema.space.create('test') tindex = test:create_index('primary') box.begin() test:insert{1} box.rollback() test:select{1} box.begin() test:insert{1} box.commit() test:select{1} -- -- Test statement-level rollback -- box.space.test:truncate() function insert(a) box.space.test:insert(a) end test_run:cmd("setopt delimiter ';'") function dup_key() box.begin() box.space.test:insert{1} local status, _ = pcall(insert, {1}) if not status then if box.error.last().code ~= box.error.TUPLE_FOUND then box.error.raise() end box.space.test:insert{2} end box.commit() end; test_run:cmd("setopt delimiter ''"); dup_key() box.space.test:select{} -- -- transaction which uses a non-existing space (used to crash in -- rollbackStatement) -- test = box.space.test box.space.test:drop() status, message = pcall(function() box.begin() test:put{1} test:put{2} box.commit() end) status message:match('does not exist') if not status then box.rollback() end test = nil test_run:cmd("setopt delimiter ';'") function tx_limit(n) box.begin() for i=0,n do box.space.test:insert{i} end box.commit() end; test_run:cmd("setopt delimiter ''"); _ = box.schema.space.create('test'); _ = box.space.test:create_index('primary'); tx_limit(10000) box.space.test:len() box.space.test:drop() -- -- gh-1638: box.rollback on a JIT-ed code path crashes LuaJIT -- (ffi call + yield don't mix well, rollback started to yield recently) -- Note: don't remove gh_1638(), it's necessary to trigger JIT-compilation. -- function gh_1638() box.begin(); box.rollback() end for i = 1, 1000 do fiber.create(function() gh_1638() end) end -- --gh-818 add atomic() -- space = box.schema.space.create('atomic') index = space:create_index('primary') test_run:cmd("setopt delimiter ';'") function args(...) return 'args', ... end; box.atomic(args, 1, 2, 3, 4, 5); function tx() space:auto_increment{'first row'} space:auto_increment{'second row'} return space:select{} end; box.atomic(tx); function tx_error(space) space:auto_increment{'third'} space:auto_increment{'fourth'} error("some error") end; box.atomic(tx_error, space); function nested(space) box.begin() end; box.atomic(nested, space); function rollback(space) space:auto_increment{'fifth'} box.rollback() end; box.atomic(rollback, space); test_run:cmd("setopt delimiter ''"); space:select{} space:drop() tarantool_1.9.1.26.g63eb81e3c/test/box/net_msg_max.test.lua0000664000000000000000000000254013306565107021746 0ustar rootroottest_run = require('test_run').new() fiber = require('fiber') net_box = require('net.box') box.schema.user.grant('guest', 'read,write,execute', 'universe') conn = net_box.connect(box.cfg.listen) conn2 = net_box.connect(box.cfg.listen) active = 0 finished = 0 continue = false limit = 768 run_max = (limit - 100) / 2 old_readahead = box.cfg.readahead box.cfg{readahead = 9000} long_str = string.rep('a', 1000) test_run:cmd("setopt delimiter ';'") function do_long_f(...) active = active + 1 while not continue do fiber.sleep(0.01) end active = active - 1 finished = finished + 1 end; function do_long(c) c:call('do_long_f', {long_str}) end; function run_workers(c) finished = 0 continue = false for i = 1, run_max do fiber.create(do_long, c) end end; -- Wait until 'active' stops growing - it means, that the input -- is blocked. function wait_active(value) while value ~= active do fiber.sleep(0.01) end end; function wait_finished(needed) continue = true while finished ~= needed do fiber.sleep(0.01) end end; test_run:cmd("setopt delimiter ''"); -- -- Test that message count limit is reachable. -- run_workers(conn) run_workers(conn2) wait_active(run_max * 2) active == run_max * 2 or active wait_finished(active) conn2:close() conn:close() box.schema.user.revoke('guest', 'read,write,execute', 'universe') box.cfg{readahead = old_readahead} tarantool_1.9.1.26.g63eb81e3c/test/box/backup.test.lua0000664000000000000000000000615513306565107020720 0ustar rootrootfio = require 'fio' log = require 'log' test_run = require('test_run').new() test_run:cleanup_cluster() -- Make sure that garbage collection is disabled -- while backup is in progress. default_checkpoint_count = box.cfg.checkpoint_count box.cfg{checkpoint_count = 1} ENGINES = {'memtx', 'vinyl'} -- Directories where files can be stored, -- from longest to shortest. CFG_DIRS = {box.cfg.wal_dir, box.cfg.memtx_dir, box.cfg.vinyl_dir} table.sort(CFG_DIRS, function(a, b) return #a > #b end) -- Create and populate tables. Make a snapshot to backup. _ = test_run:cmd("setopt delimiter ';'") for _, engine in ipairs(ENGINES) do s = box.schema.space.create(engine, {engine=engine}) _ = s:create_index('pk') for i=1,3 do s:insert{i, engine..i} end end box.snapshot() _ = test_run:cmd("setopt delimiter ''"); -- Add more data, but don't make a snapshot. -- These data won't make it to the backup. _ = test_run:cmd("setopt delimiter ';'") for _, engine in ipairs(ENGINES) do s = box.space[engine] for i=1,3 do s:insert{i*10} end end _ = test_run:cmd("setopt delimiter ''"); -- Start backup. files = box.backup.start() box.backup.start() -- error: backup is already in progress -- Make sure new snapshots are not included into an ongoing backups. _ = test_run:cmd("setopt delimiter ';'") for _, engine in ipairs(ENGINES) do s = box.space[engine] for i=1,3 do s:insert{i*100} end end -- Even though checkpoint_count is set to 1, this must not trigger -- garbage collection, because the checkpoint is pinned by backup. box.snapshot() _ = test_run:cmd("setopt delimiter ''"); -- Prepare backup directory backup_dir = fio.pathjoin(fio.cwd(), 'backup') _ = os.execute(string.format('rm -rf %s', backup_dir)) log.info(string.format('save backup to %s', backup_dir)) -- Copy files to the backup directory _ = test_run:cmd("setopt delimiter ';'") for _, path in ipairs(files) do suffix = string.gsub(path, '.*%.', '') if suffix == 'xlog' then dir = box.cfg.wal_dir elseif suffix == 'snap' then dir = box.cfg.memtx_dir elseif suffix == 'vylog' or suffix == 'run' or suffix == 'index' then dir = box.cfg.vinyl_dir end assert(dir ~= nil) rel_path = string.sub(path, string.len(dir) + 2) dest_dir = fio.pathjoin(backup_dir, fio.dirname(rel_path)) log.info(string.format('copy %s', rel_path)) os.execute(string.format('mkdir -p %s && cp %s %s', dest_dir, path, dest_dir)) end _ = test_run:cmd("setopt delimiter ''"); box.backup.stop() -- Check that we can restore from the backup. _ = test_run:cmd(string.format("create server copy with script='box/backup_test.lua', workdir='%s'", backup_dir)) _ = test_run:cmd("start server copy") _ = test_run:cmd('switch copy') box.space['memtx']:select() box.space['vinyl']:select() _ = test_run:cmd('switch default') _ = test_run:cmd("stop server copy") _ = test_run:cmd("cleanup server copy") -- Check that backup still works. _ = box.backup.start() box.backup.stop() -- Cleanup. _ = os.execute(string.format('rm -rf %s', backup_dir)) for _, engine in ipairs(ENGINES) do box.space[engine]:drop() end box.cfg{checkpoint_count = default_checkpoint_count} tarantool_1.9.1.26.g63eb81e3c/test/box/space_bsize.result0000664000000000000000000000211713306560010021477 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... utils = dofile('utils.lua') --- ... s = box.schema.space.create('space_bsize') --- ... idx = s:create_index('primary') --- ... for i = 1, 13 do s:insert{ i, string.rep('x', i) } end --- ... s:bsize() --- - 130 ... utils.space_bsize(s) --- - 130 ... for i = 1, 13, 2 do s:delete{ i } end --- ... s:bsize() --- - 60 ... utils.space_bsize(s) --- - 60 ... for i = 2, 13, 2 do s:update( { i }, {{ ":", 2, i, 0, string.rep('y', i) }} ) end --- ... s:bsize() --- - 102 ... utils.space_bsize(s) --- - 102 ... box.snapshot() --- - ok ... test_run:cmd("restart server default") utils = dofile('utils.lua') --- ... s = box.space['space_bsize'] --- ... s:bsize() --- - 102 ... utils.space_bsize(s) --- - 102 ... for i = 1, 13, 2 do s:insert{ i, string.rep('y', i) } end --- ... s:bsize() --- - 172 ... utils.space_bsize(s) --- - 172 ... s:truncate() --- ... s:bsize() --- - 0 ... utils.space_bsize(s) --- - 0 ... for i = 1, 13 do s:insert{ i, string.rep('x', i) } end --- ... s:bsize() --- - 130 ... utils.space_bsize(s) --- - 130 ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/protocol.result0000664000000000000000000000203413306560010021047 0ustar rootrootbox.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... -------------------------------------------------------------------------------- -- Test case for #273: IPROTO_ITERATOR ignored in network protocol -------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum') --- ... index = space:create_index('primary', { type = 'tree'}) --- ... for i=1,5 do space:insert{i} end --- ... LISTEN = require('uri').parse(box.cfg.listen) --- ... LISTEN ~= nil --- - true ... conn = (require 'net.box').connect(LISTEN.host, LISTEN.service) --- ... conn.space[space.id]:select(3, { iterator = 'GE' }) --- - - [3] - [4] - [5] ... conn.space[space.id]:select(3, { iterator = 'LE' }) --- - - [3] - [2] - [1] ... conn.space[space.id]:select(3, { iterator = 'GT' }) --- - - [4] - [5] ... conn.space[space.id]:select(3, { iterator = 'LT' }) --- - - [2] - [1] ... conn:close() --- ... space:drop() --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/box/errinj_index.result0000664000000000000000000001702013306560010021667 0ustar rootrooterrinj = box.error.injection --- ... -- Check a failed realloc in tree index. s = box.schema.space.create('tweedledum') --- ... index = s:create_index('primary', {type = 'tree'} ) --- ... for i = 1,10 do s:insert{i, i, 'test' .. i} end --- ... res = {} --- ... for i = 1,10 do table.insert(res, s:get{i}) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... res = {} --- ... for _, t in s.index[0]:pairs() do table.insert(res, t) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... errinj.set("ERRINJ_INDEX_ALLOC", true) --- - ok ... res = {} --- ... for i = 1,10 do table.insert(res, s:get{i}) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... res = {} --- ... for _, t in s.index[0]:pairs() do table.insert(res, t) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... for i = 501,2500 do s:insert{i, i} end --- - error: Failed to allocate 16384 bytes in mempool for new slab ... s:delete{1} --- - error: Failed to allocate 16384 bytes in mempool for new slab ... res = {} --- ... for i = 1,10 do table.insert(res, (s:get{i})) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... res = {} --- ... for i = 501,510 do table.insert(res, (s:get{i})) end --- ... res --- - [] ... --count must be exactly 10 function check_iter_and_size() local count = 0 for _, t in s.index[0]:pairs() do count = count + 1 end return count == 10 and "ok" or "fail" end --- ... check_iter_and_size() --- - ok ... for i = 2501,3500 do s:insert{i, i} end --- - error: Failed to allocate 16384 bytes in mempool for new slab ... s:delete{2} --- - error: Failed to allocate 16384 bytes in mempool for new slab ... check_iter_and_size() --- - ok ... res = {} --- ... for i = 1,10 do table.insert(res, (s:get{i})) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... errinj.set("ERRINJ_INDEX_ALLOC", false) --- - ok ... for i = 4501,5500 do s:insert{i, i} end --- ... res = {} --- ... for i = 1,10 do table.insert(res, (s:get{i})) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... s:delete{8} --- - [8, 8, 'test8'] ... res = {} --- ... for i = 1,10 do table.insert(res, (s:get{i})) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... res = {} --- ... for i = 5001,5010 do table.insert(res, (s:get{i})) end --- ... res --- - - [5001, 5001] - [5002, 5002] - [5003, 5003] - [5004, 5004] - [5005, 5005] - [5006, 5006] - [5007, 5007] - [5008, 5008] - [5009, 5009] - [5010, 5010] ... s:drop() --- ... ----------------------------------- -- Check a failed realloc in hash index. s = box.schema.space.create('tweedledum') --- ... index = s:create_index('primary', {type = 'hash'} ) --- ... for i = 1,10 do s:insert{i, i, 'test' .. i} end --- ... res = {} --- ... for i = 1,10 do table.insert(res, s:get{i}) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... res = {} --- ... for _, t in s.index[0]:pairs() do table.insert(res, t) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... errinj.set("ERRINJ_INDEX_ALLOC", true) --- - ok ... res = {} --- ... for i = 1,10 do table.insert(res, s:get{i}) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... res = {} --- ... for _, t in s.index[0]:pairs() do table.insert(res, t) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... for i = 501,2500 do s:insert{i, i} end --- - error: Failed to allocate 16384 bytes in mempool for new slab ... s:delete{1} --- - error: Failed to allocate 16384 bytes in mempool for new slab ... res = {} --- ... for i = 1,10 do table.insert(res, (s:get{i})) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... res = {} --- ... for i = 501,510 do table.insert(res, (s:get{i})) end --- ... res --- - [] ... res = {} --- ... for i = 2001,2010 do table.insert(res, (s:get{i})) end --- ... res --- - [] ... check_iter_and_size() --- - ok ... for i = 2501,3500 do s:insert{i, i} end --- - error: Failed to allocate 16384 bytes in mempool for new slab ... s:delete{2} --- - error: Failed to allocate 16384 bytes in mempool for new slab ... check_iter_and_size() --- - ok ... res = {} --- ... for i = 1,10 do table.insert(res, (s:get{i})) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... for i = 3501,4500 do s:insert{i, i} end --- - error: Failed to allocate 16384 bytes in mempool for new slab ... s:delete{3} --- - error: Failed to allocate 16384 bytes in mempool for new slab ... check_iter_and_size() --- - ok ... errinj.set("ERRINJ_INDEX_ALLOC", false) --- - ok ... for i = 4501,5500 do s:insert{i, i} end --- ... res = {} --- ... for i = 1,10 do table.insert(res, (s:get{i})) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [8, 8, 'test8'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... s:delete{8} --- - [8, 8, 'test8'] ... res = {} --- ... for i = 1,10 do table.insert(res, (s:get{i})) end --- ... res --- - - [1, 1, 'test1'] - [2, 2, 'test2'] - [3, 3, 'test3'] - [4, 4, 'test4'] - [5, 5, 'test5'] - [6, 6, 'test6'] - [7, 7, 'test7'] - [9, 9, 'test9'] - [10, 10, 'test10'] ... res = {} --- ... for i = 5001,5010 do table.insert(res, (s:get{i})) end --- ... res --- - - [5001, 5001] - [5002, 5002] - [5003, 5003] - [5004, 5004] - [5005, 5005] - [5006, 5006] - [5007, 5007] - [5008, 5008] - [5009, 5009] - [5010, 5010] ... s:drop() --- ... errinj = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/0000775000000000000000000000000013306565107016144 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/header.test.lua0000664000000000000000000000247613306560010021052 0ustar rootroottest_run = require('test_run').new() test_run:cmd('restart server default with cleanup=1') fio = require('fio') test_run:cmd("setopt delimiter ';'") function dump_header(path) local f = io.open(path) local header = {} while true do local line = f:read() if line == "" then break end table.insert(header, line) end f:close() return header end; test_run:cmd("setopt delimiter ''"); test_run:cmd("push filter '"..box.info.uuid.."' to ''") test_run:cmd("push filter '".._TARANTOOL.."' to ''") checkpoint_lsn = box.info.lsn -- SNAP files snap_name = string.format("%020d.snap", checkpoint_lsn) dump_header(fio.pathjoin(box.cfg.memtx_dir, snap_name)) -- XLOG files box.space._schema:insert({"layout_test"}) xlog_name = string.format("%020d.xlog", checkpoint_lsn) dump_header(fio.pathjoin(box.cfg.wal_dir, xlog_name)) box.space._schema:delete({"layout_test"}) box.snapshot() checkpoint_lsn = box.info.lsn -- SNAP files snap_name = string.format("%020d.snap", checkpoint_lsn) dump_header(fio.pathjoin(box.cfg.memtx_dir, snap_name)) -- XLOG files box.space._schema:insert({"layout_test"}) xlog_name = string.format("%020d.xlog", checkpoint_lsn) dump_header(fio.pathjoin(box.cfg.wal_dir, xlog_name)) box.space._schema:delete({"layout_test"}) test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/xlog/suite.ini0000664000000000000000000000045513306560010017766 0ustar rootroot[default] core = tarantool description = tarantool write ahead log tests script = xlog.lua disabled = snap_io_rate.test.lua valgrind_disabled = release_disabled = errinj.test.lua panic_on_lsn_gap.test.lua config = suite.cfg use_unix_sockets = True long_run = snap_io_rate.test.lua is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/xlog/errinj.test.lua0000664000000000000000000000243413306560010021105 0ustar rootroot-- -- we actually need to know what xlogs the server creates, -- so start from a clean state -- -- -- Check how well we handle a failed log write -- in panic_on_wal_error=false mode -- env = require('test_run') test_run = env.new() test_run:cmd('restart server default with cleanup=1') box.error.injection.set("ERRINJ_WAL_WRITE", true) box.space._schema:insert{"key"} test_run:cmd('restart server default') box.space._schema:insert{"key"} test_run:cmd('restart server default') box.space._schema:get{"key"} box.space._schema:delete{"key"} -- list all the logs name = string.match(arg[0], "([^,]+)%.lua") require('fio').glob(name .. "/*.xlog") test_run:cmd('restart server default with cleanup=1') -- gh-881 iproto request with wal IO error errinj = box.error.injection box.schema.user.grant('guest', 'read,write,execute', 'universe') test = box.schema.create_space('test') _ = test:create_index('primary') for i=1, box.cfg.rows_per_wal do test:insert{i, 'test'} end c = require('net.box').connect(box.cfg.listen) -- try to write xlog without permission to write to disk errinj.set('ERRINJ_WAL_WRITE', true) c.space.test:insert({box.cfg.rows_per_wal + 1,1,2,3}) errinj.set('ERRINJ_WAL_WRITE', false) -- Cleanup test:drop() errinj = nil box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/xlog/force_recovery.test.lua0000664000000000000000000000232713306560010022631 0ustar rootroot#!/usr/bin/env tarantool env = require('test_run') test_run = env.new() box.cfg{} test_run:cmd('create server test with script = "xlog/force_recovery.lua"') test_run:cmd("start server test") test_run:cmd("switch test") box.space._schema:replace({'test'}) test_run:cmd("restart server test") box.space._schema:replace({'lost'}) test_run:cmd("restart server test") box.space._schema:replace({'tost'}) -- corrupted (empty) in the middle (old behavior: goto error on recovery) fio = require('fio') path = fio.pathjoin(box.cfg.wal_dir, string.format('%020d.xlog', box.info.lsn - 2)) fio.truncate(path) test_run:cmd("restart server test") box.space._schema:replace({'last'}) -- corrupted (empty), last fio = require('fio') path = fio.pathjoin(box.cfg.wal_dir, string.format('%020d.xlog', box.info.lsn - 1)) fio.truncate(path) test_run:cmd("restart server test") box.space._schema:replace({'test'}) test_run:cmd("restart server test") box.space._schema:replace({'tost'}) -- corrupted header, last fio = require('fio') path = fio.pathjoin(box.cfg.wal_dir, string.format('%020d.xlog', box.info.lsn - 1)) f = fio.open(path, {'O_WRONLY'}) f:write('DEAD') f:close() test_run:cmd("restart server test") box.space._schema:replace({'post'}) tarantool_1.9.1.26.g63eb81e3c/test/xlog/big_tx.result0000664000000000000000000000047213306560010020647 0ustar rootrootenv = require('test_run').new() --- ... digest = require('digest') --- ... _ = box.schema.space.create('big_tx'):create_index('pk') --- ... t = box.space.big_tx:insert({1, digest.urandom(512 * 1024)}) --- ... env:cmd('restart server default') #box.space.big_tx:select() --- - 1 ... box.space.big_tx:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/header.result0000664000000000000000000000373513306560010020630 0ustar rootroottest_run = require('test_run').new() --- ... test_run:cmd('restart server default with cleanup=1') fio = require('fio') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function dump_header(path) local f = io.open(path) local header = {} while true do local line = f:read() if line == "" then break end table.insert(header, line) end f:close() return header end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... test_run:cmd("push filter '"..box.info.uuid.."' to ''") --- - true ... test_run:cmd("push filter '".._TARANTOOL.."' to ''") --- - true ... checkpoint_lsn = box.info.lsn --- ... -- SNAP files snap_name = string.format("%020d.snap", checkpoint_lsn) --- ... dump_header(fio.pathjoin(box.cfg.memtx_dir, snap_name)) --- - - SNAP - '0.13' - 'Version: ' - 'Instance: ' - 'VClock: {}' ... -- XLOG files box.space._schema:insert({"layout_test"}) --- - ['layout_test'] ... xlog_name = string.format("%020d.xlog", checkpoint_lsn) --- ... dump_header(fio.pathjoin(box.cfg.wal_dir, xlog_name)) --- - - XLOG - '0.13' - 'Version: ' - 'Instance: ' - 'VClock: {}' ... box.space._schema:delete({"layout_test"}) --- - ['layout_test'] ... box.snapshot() --- - ok ... checkpoint_lsn = box.info.lsn --- ... -- SNAP files snap_name = string.format("%020d.snap", checkpoint_lsn) --- ... dump_header(fio.pathjoin(box.cfg.memtx_dir, snap_name)) --- - - SNAP - '0.13' - 'Version: ' - 'Instance: ' - 'VClock: {1: 2}' ... -- XLOG files box.space._schema:insert({"layout_test"}) --- - ['layout_test'] ... xlog_name = string.format("%020d.xlog", checkpoint_lsn) --- ... dump_header(fio.pathjoin(box.cfg.wal_dir, xlog_name)) --- - - XLOG - '0.13' - 'Version: ' - 'Instance: ' - 'VClock: {1: 2}' ... box.space._schema:delete({"layout_test"}) --- - ['layout_test'] ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/xlog.lua0000664000000000000000000000050113306560010017600 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", force_recovery = true, rows_per_wal = 10, snap_io_rate_limit = 16 } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/xlog/legacy.test.lua0000664000000000000000000000332613306565107021075 0ustar rootroottest_run = require('test_run').new() version = test_run:get_cfg('version') -- Use 1.7.5 snapshot to check that space formats are not checked. -- It allows to use >= 1.6.5 format versions. test_run:cmd('create server legacy with script="xlog/upgrade.lua", workdir="xlog/upgrade/1.7.5"') test_run:cmd("start server legacy") test_run:switch('legacy') box.space._schema:get({'version'}) _space = box.space._space -- -- Check _space 1.7.5 format. -- _space:replace{600, 1, 'test', 'memtx', 0} box.space.test:drop() -- -- Check _index 1.6.5 format. -- s = box.schema.space.create('s') pk = s:create_index('pk') sk = box.space._index:insert{s.id, 2, 'sk', 'rtree', 0, 1, 2, 'array'} s.index.sk.parts s.index.sk:drop() box.space._index:insert{s.id, 2, 's', 'rtree', 0, 1, 2, 'thing'} box.space._index:insert{s.id, 2, 's', 'rtree', 0, 1, 2, 'array', 'wtf'} box.space._index:insert{s.id, 2, 's', 'rtree', 0, 0} s:drop() -- -- Check 1.6.5 space flags. -- s = box.schema.space.create('t', { temporary = true }) index = s:create_index('primary', { type = 'hash' }) s:insert{1, 2, 3} _ = _space:update(s.id, {{'=', 6, 'temporary'}}) s.temporary _ = _space:update(s.id, {{'=', 6, ''}}) s.temporary s:truncate() _ = _space:update(s.id, {{'=', 6, 'no-temporary'}}) s.temporary _ = _space:update(s.id, {{'=', 6, ',:asfda:temporary'}}) s.temporary _ = _space:update(s.id, {{'=', 6, 'a,b,c,d,e'}}) s.temporary _ = _space:update(s.id, {{'=', 6, 'temporary'}}) s.temporary s:get{1} s:insert{1, 2, 3} _ = _space:update(s.id, {{'=', 6, 'temporary'}}) s.temporary _ = _space:update(s.id, {{'=', 6, 'no-temporary'}}) s.temporary s:delete{1} _ = _space:update(s.id, {{'=', 6, 'no-temporary'}}) s:drop() test_run:switch('default') test_run:cmd('stop server legacy') tarantool_1.9.1.26.g63eb81e3c/test/xlog/replica.lua0000664000000000000000000000046313306560010020255 0ustar rootroot#!/usr/bin/env tarantool box.cfg({ listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), memtx_memory = 107374182, -- pid_file = "tarantool.pid", -- logger = "tarantool.log", }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/xlog/snap_io_rate.test.lua0000664000000000000000000000051513306560010022255 0ustar rootrootdigest = require'digest' fiber = require'fiber' _ = box.schema.space.create('snap'):create_index('pk') -- write > 64 mb snapshot for i = 0, 127 do box.space.snap:replace({i, digest.urandom(512 * 1024)}) end t1 = fiber.time() box.snapshot() t2 = fiber.time() t2 - t1 > 64 / box.cfg.snap_io_rate_limit * 0.95 box.space.snap:drop() tarantool_1.9.1.26.g63eb81e3c/test/xlog/force_recovery.result0000664000000000000000000000302013306560010022377 0ustar rootroot#!/usr/bin/env tarantool --- ... env = require('test_run') --- ... test_run = env.new() --- ... box.cfg{} --- ... test_run:cmd('create server test with script = "xlog/force_recovery.lua"') --- - true ... test_run:cmd("start server test") --- - true ... test_run:cmd("switch test") --- - true ... box.space._schema:replace({'test'}) --- - ['test'] ... test_run:cmd("restart server test") box.space._schema:replace({'lost'}) --- - ['lost'] ... test_run:cmd("restart server test") box.space._schema:replace({'tost'}) --- - ['tost'] ... -- corrupted (empty) in the middle (old behavior: goto error on recovery) fio = require('fio') --- ... path = fio.pathjoin(box.cfg.wal_dir, string.format('%020d.xlog', box.info.lsn - 2)) --- ... fio.truncate(path) --- - true ... test_run:cmd("restart server test") box.space._schema:replace({'last'}) --- - ['last'] ... -- corrupted (empty), last fio = require('fio') --- ... path = fio.pathjoin(box.cfg.wal_dir, string.format('%020d.xlog', box.info.lsn - 1)) --- ... fio.truncate(path) --- - true ... test_run:cmd("restart server test") box.space._schema:replace({'test'}) --- - ['test'] ... test_run:cmd("restart server test") box.space._schema:replace({'tost'}) --- - ['tost'] ... -- corrupted header, last fio = require('fio') --- ... path = fio.pathjoin(box.cfg.wal_dir, string.format('%020d.xlog', box.info.lsn - 1)) --- ... f = fio.open(path, {'O_WRONLY'}) --- ... f:write('DEAD') --- - true ... f:close() --- - true ... test_run:cmd("restart server test") box.space._schema:replace({'post'}) --- - ['post'] ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade.result0000664000000000000000000002376213306565107021045 0ustar rootroottest_run = require('test_run').new() --- ... version = test_run:get_cfg('version') --- ... work_dir = "xlog/upgrade/"..version --- ... test_run:cmd('create server upgrade with script="xlog/upgrade.lua", workdir="'..work_dir..'"') --- - true ... test_run:cmd("start server upgrade") --- - true ... test_run:switch('upgrade') --- - true ... test_run:cmd(string.format("push filter '%s' to ''", box.info.cluster.uuid)) --- - true ... -- -- Upgrade -- box.schema.upgrade() --- ... -- -- Migrated data -- box.space._schema:select() --- - - ['cluster', ''] - ['max_id', 513] - ['version', 1, 7, 7] ... box.space._space:select() --- - - [272, 1, '_schema', 'memtx', 0, {}, [{'type': 'string', 'name': 'key'}]] - [276, 1, '_collation', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, { 'name': 'name', 'type': 'string'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'type', 'type': 'string'}, {'name': 'locale', 'type': 'string'}, { 'name': 'opts', 'type': 'map'}]] - [280, 1, '_space', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'engine', 'type': 'string'}, {'name': 'field_count', 'type': 'unsigned'}, {'name': 'flags', 'type': 'map'}, {'name': 'format', 'type': 'array'}]] - [281, 1, '_vspace', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'engine', 'type': 'string'}, {'name': 'field_count', 'type': 'unsigned'}, {'name': 'flags', 'type': 'map'}, {'name': 'format', 'type': 'array'}]] - [284, 1, '_sequence', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'step', 'type': 'integer'}, {'name': 'min', 'type': 'integer'}, {'name': 'max', 'type': 'integer'}, {'name': 'start', 'type': 'integer'}, {'name': 'cache', 'type': 'integer'}, {'name': 'cycle', 'type': 'boolean'}]] - [285, 1, '_sequence_data', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'value', 'type': 'integer'}]] - [288, 1, '_index', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'iid', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'opts', 'type': 'map'}, {'name': 'parts', 'type': 'array'}]] - [289, 1, '_vindex', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'iid', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'opts', 'type': 'map'}, {'name': 'parts', 'type': 'array'}]] - [296, 1, '_func', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'setuid', 'type': 'unsigned'}]] - [297, 1, '_vfunc', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'setuid', 'type': 'unsigned'}]] - [304, 1, '_user', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'auth', 'type': 'map'}]] - [305, 1, '_vuser', 'sysview', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'owner', 'type': 'unsigned'}, {'name': 'name', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'auth', 'type': 'map'}]] - [312, 1, '_priv', 'memtx', 0, {}, [{'name': 'grantor', 'type': 'unsigned'}, { 'name': 'grantee', 'type': 'unsigned'}, {'name': 'object_type', 'type': 'string'}, {'name': 'object_id', 'type': 'unsigned'}, {'name': 'privilege', 'type': 'unsigned'}]] - [313, 1, '_vpriv', 'sysview', 0, {}, [{'name': 'grantor', 'type': 'unsigned'}, {'name': 'grantee', 'type': 'unsigned'}, {'name': 'object_type', 'type': 'string'}, {'name': 'object_id', 'type': 'unsigned'}, {'name': 'privilege', 'type': 'unsigned'}]] - [320, 1, '_cluster', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'uuid', 'type': 'string'}]] - [330, 1, '_truncate', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'count', 'type': 'unsigned'}]] - [340, 1, '_space_sequence', 'memtx', 0, {}, [{'name': 'id', 'type': 'unsigned'}, {'name': 'sequence_id', 'type': 'unsigned'}, {'name': 'is_generated', 'type': 'boolean'}]] - [512, 1, 'distro', 'memtx', 0, {}, [{'name': 'os', 'type': 'str'}, {'name': 'dist', 'type': 'str'}, {'name': 'version', 'type': 'num'}, {'name': 'time', 'type': 'num'}]] - [513, 1, 'temporary', 'memtx', 0, {'temporary': true}, []] ... box.space._index:select() --- - - [272, 0, 'primary', 'tree', {'unique': true}, [[0, 'string']]] - [276, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [276, 1, 'name', 'tree', {'unique': true}, [[1, 'string']]] - [280, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [280, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [280, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [281, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [281, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [281, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [284, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [284, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [284, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [285, 0, 'primary', 'hash', {'unique': true}, [[0, 'unsigned']]] - [288, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned'], [1, 'unsigned']]] - [288, 2, 'name', 'tree', {'unique': true}, [[0, 'unsigned'], [2, 'string']]] - [289, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned'], [1, 'unsigned']]] - [289, 2, 'name', 'tree', {'unique': true}, [[0, 'unsigned'], [2, 'string']]] - [296, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [296, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [296, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [297, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [297, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [297, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [304, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [304, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [304, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [305, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [305, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]] - [305, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]] - [312, 0, 'primary', 'tree', {'unique': true}, [[1, 'unsigned'], [2, 'string'], [3, 'unsigned']]] - [312, 1, 'owner', 'tree', {'unique': false}, [[0, 'unsigned']]] - [312, 2, 'object', 'tree', {'unique': false}, [[2, 'string'], [3, 'unsigned']]] - [313, 0, 'primary', 'tree', {'unique': true}, [[1, 'unsigned'], [2, 'string'], [3, 'unsigned']]] - [313, 1, 'owner', 'tree', {'unique': false}, [[0, 'unsigned']]] - [313, 2, 'object', 'tree', {'unique': false}, [[2, 'string'], [3, 'unsigned']]] - [320, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [320, 1, 'uuid', 'tree', {'unique': true}, [[1, 'string']]] - [330, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [340, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]] - [340, 1, 'sequence', 'tree', {'unique': false}, [[1, 'unsigned']]] - [512, 0, 'primary', 'hash', {'unique': true}, [[0, 'string'], [1, 'string'], [ 2, 'unsigned']]] - [512, 1, 'codename', 'hash', {'unique': true}, [[1, 'string']]] - [512, 2, 'time', 'tree', {'unique': false}, [[3, 'unsigned']]] ... box.space._user:select() --- - - [0, 1, 'guest', 'user', {'chap-sha1': 'vhvewKp0tNyweZQ+cFKAlsyphfg='}] - [1, 1, 'admin', 'user', {}] - [2, 1, 'public', 'role', {}] - [3, 1, 'replication', 'role', {}] - [4, 1, 'someuser', 'user', {'chap-sha1': '2qvbQIHM4zMWhAmm2xGeGNjqoHM='}] - [5, 1, 'somerole', 'role', {}] - [31, 1, 'super', 'role', {}] ... box.space._func:select() --- - - [1, 1, 'box.schema.user.info', 1, 'LUA'] - [2, 4, 'somefunc', 1, 'LUA'] - [3, 1, 'someotherfunc', 0, 'LUA'] ... box.space._collation:select() --- - - [1, 'unicode', 1, 'ICU', '', {}] - [2, 'unicode_ci', 1, 'ICU', '', {'strength': 'primary'}] ... box.space._priv:select() --- - - [1, 0, 'role', 2, 4] - [1, 0, 'universe', 0, 24] - [1, 1, 'universe', 0, 4294967295] - [1, 2, 'function', 1, 4] - [1, 2, 'function', 2, 4] - [1, 2, 'space', 276, 2] - [1, 2, 'space', 281, 1] - [1, 2, 'space', 289, 1] - [1, 2, 'space', 297, 1] - [1, 2, 'space', 305, 1] - [1, 2, 'space', 313, 1] - [1, 2, 'space', 330, 2] - [1, 3, 'space', 320, 2] - [1, 3, 'universe', 0, 1] - [1, 4, 'function', 3, 4] - [1, 4, 'role', 2, 4] - [1, 4, 'role', 5, 4] - [1, 4, 'space', 513, 3] - [1, 4, 'universe', 0, 24] - [1, 5, 'space', 512, 3] - [1, 31, 'universe', 0, 4294967295] ... box.space._vspace ~= nil --- - true ... box.space._vindex ~= nil --- - true ... box.space._vuser ~= nil --- - true ... box.space._vpriv ~= nil --- - true ... -- a test space r = box.space.distro:select() --- ... _ = table.sort(r, function(left, right) return tostring(left) < tostring(right) end) --- ... r --- - - ['debian', 'etch', 40, 1176019200] - ['debian', 'jessie', 80, 1430038800] - ['debian', 'lenny', 50, 1234602000] - ['debian', 'sarge', 31, 1118044800] - ['debian', 'squeeze', 60, 1296896400] - ['debian', 'wheezy', 70, 1367654400] - ['debian', 'woody', 30, 1027065600] - ['ubuntu', 'precise', 1510, 1335427200] - ['ubuntu', 'trusty', 1404, 1397721600] - ['ubuntu', 'vivid', 1504, 1429779600] - ['ubuntu', 'wily', 1510, 1445504400] ... test_run:cmd("clear filter") --- - true ... test_run:switch('default') --- - true ... test_run:cmd('stop server upgrade') --- - true ... test_run = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/misc.test.lua0000664000000000000000000000162613306560010020551 0ustar rootroottest_run = require('test_run').new() test_run:cmd('restart server default with cleanup=1') fio = require('fio') xlog = require('xlog') netbox = require('net.box') box.schema.user.grant('guest', 'read,write,execute', 'universe') -- -- Check that xlogs doesn't contain IPROTO_SYNC -- conn = netbox.connect(box.cfg.listen) -- insert some row using the binary protocol conn.space._schema:insert({'test'}) -- rotate xlog box.snapshot() -- dump xlog xlog_path = fio.pathjoin(box.cfg.wal_dir, string.format("%020d.xlog", 0)) result = {} fun, param, state = xlog.pairs(xlog_path) type(fun.totable) -- skip grants until our insert into _schema repeat state, row = fun(param, state) until row.BODY.space_id == box.space._schema.id row.HEADER.type row.HEADER.sync row.BODY box.space._schema:delete('test') -- -- Clean up -- box.schema.user.revoke('guest', 'read,write,execute', 'universe') netbox = nil xlog = nil fio = nil tarantool_1.9.1.26.g63eb81e3c/test/xlog/panic_on_lsn_gap.test.lua0000664000000000000000000000626713306560010023115 0ustar rootroot-- -- we actually need to know what xlogs the server creates, -- so start from a clean state -- -- -- Check how the server is able to find the next -- xlog if there are failed writes (lsn gaps). -- env = require('test_run') test_run = env.new() test_run:cmd("create server panic with script='xlog/panic.lua'") test_run:cmd("start server panic") test_run:cmd("switch panic") box.info.vclock s = box.space._schema -- we need to have at least one record in the -- xlog otherwise the server believes that there -- is an lsn gap during recovery. -- s:replace{"key", 'test 1'} box.info.vclock box.error.injection.set("ERRINJ_WAL_WRITE", true) t = {} -- -- Try to insert rows, so that it's time to -- switch WALs. No switch will happen though, -- since no writes were made. -- test_run:cmd("setopt delimiter ';'") for i=1,box.cfg.rows_per_wal do status, msg = pcall(s.replace, s, {"key"}) table.insert(t, msg) end; test_run:cmd("setopt delimiter ''"); t -- -- Before restart: oops, our LSN is 11, -- even though we didn't insert anything. -- name = string.match(arg[0], "([^,]+)%.lua") box.info.vclock require('fio').glob(name .. "/*.xlog") test_run:cmd("restart server panic") -- -- after restart: our LSN is the LSN of the -- last *written* row, all the failed -- rows are gone from lsn counter. -- box.info.vclock box.space._schema:select{'key'} box.error.injection.set("ERRINJ_WAL_WRITE", true) t = {} s = box.space._schema -- -- now do the same -- test_run:cmd("setopt delimiter ';'") for i=1,box.cfg.rows_per_wal do status, msg = pcall(s.replace, s, {"key"}) table.insert(t, msg) end; test_run:cmd("setopt delimiter ''"); t box.info.vclock box.error.injection.set("ERRINJ_WAL_WRITE", false) -- -- Write a good row after a series of failed -- rows. There is a gap in LSN, correct, -- but it's *inside* a single WAL, so doesn't -- affect WAL search in recover_remaining_wals() -- s:replace{'key', 'test 2'} -- -- notice that vclock before and after -- server stop is the same -- because it's -- recorded in the last row -- box.info.vclock test_run:cmd("restart server panic") box.info.vclock box.space._schema:select{'key'} -- list all the logs name = string.match(arg[0], "([^,]+)%.lua") require('fio').glob(name .. "/*.xlog") -- now insert 10 rows - so that the next -- row will need to switch the WAL test_run:cmd("setopt delimiter ';'") for i=1,box.cfg.rows_per_wal do box.space._schema:replace{"key", 'test 3'} end; test_run:cmd("setopt delimiter ''"); -- the next insert should switch xlog, but aha - it fails -- a new xlog file is created but has 0 rows require('fio').glob(name .. "/*.xlog") box.error.injection.set("ERRINJ_WAL_WRITE", true) box.space._schema:replace{"key", 'test 3'} box.info.vclock require('fio').glob(name .. "/*.xlog") -- and the next one (just to be sure box.space._schema:replace{"key", 'test 3'} box.info.vclock require('fio').glob(name .. "/*.xlog") box.error.injection.set("ERRINJ_WAL_WRITE", false) -- then a success box.space._schema:replace{"key", 'test 4'} box.info.vclock require('fio').glob(name .. "/*.xlog") -- restart is ok test_run:cmd("restart server panic") box.space._schema:select{'key'} test_run:cmd('switch default') test_run:cmd("stop server panic") test_run:cmd("cleanup server panic") tarantool_1.9.1.26.g63eb81e3c/test/xlog/panic_on_wal_error.result0000664000000000000000000000617213306560010023240 0ustar rootroot-- preparatory stuff env = require('test_run') --- ... test_run = env.new() --- ... fio = require('fio') --- ... glob = fio.pathjoin(box.cfg.wal_dir, '*.xlog') --- ... for _, file in pairs(fio.glob(glob)) do fio.unlink(file) end --- ... glob = fio.pathjoin(box.cfg.vinyl_dir, '*.vylog') --- ... for _, file in pairs(fio.glob(glob)) do fio.unlink(file) end --- ... glob = fio.pathjoin(box.cfg.memtx_dir, '*.snap') --- ... for _, file in pairs(fio.glob(glob)) do fio.unlink(file) end --- ... test_run:cmd("restart server default") box.schema.user.grant('guest', 'replication') --- ... _ = box.schema.space.create('test') --- ... _ = box.space.test:create_index('pk') --- ... -- -- reopen xlog -- test_run:cmd("restart server default") box.space.test ~= nil --- - true ... -- insert some stuff -- box.space.test:auto_increment{'before snapshot'} --- - [1, 'before snapshot'] ... -- -- this snapshot will go to the replica -- box.snapshot() --- - ok ... -- -- create a replica, let it catch up somewhat -- test_run:cmd("create server replica with rpl_master=default, script='xlog/replica.lua'") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:cmd("switch replica") --- - true ... box.space.test:select{} --- - - [1, 'before snapshot'] ... -- -- stop replica, restart the master, insert more stuff -- which will make it into an xlog only -- test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica") --- - true ... box.space.test:auto_increment{'after snapshot'} --- - [2, 'after snapshot'] ... box.space.test:auto_increment{'after snapshot - one more row'} --- - [3, 'after snapshot - one more row'] ... -- -- save snapshot and remove xlogs -- box.snapshot() --- - ok ... fio = require('fio') --- ... glob = fio.pathjoin(box.cfg.wal_dir, '*.xlog') --- ... files = fio.glob(glob) --- ... for _, file in pairs(files) do fio.unlink(file) end --- ... test_run:cmd("restart server default") -- -- make sure the server has some xlogs, otherwise the -- replica doesn't discover the gap in the logs -- box.space.test:auto_increment{'after snapshot and restart'} --- - [4, 'after snapshot and restart'] ... box.space.test:auto_increment{'after snapshot and restart - one more row'} --- - [5, 'after snapshot and restart - one more row'] ... -- -- check that panic is true -- box.cfg{force_recovery=false} --- ... box.cfg.force_recovery --- - false ... -- -- try to start the replica, ha-ha -- (replication should fail, some rows are missing) -- test_run:cmd("start server replica") --- - true ... test_run:cmd("switch replica") --- - true ... fiber = require('fiber') --- ... while box.info.replication[1].upstream.status ~= "stopped" do fiber.sleep(0.001) end --- ... box.info.replication[1].upstream.status --- - stopped ... box.info.replication[1].upstream.message --- - 'Missing .xlog file between LSN 6 {1: 6} and 8 {1: 8}' ... box.space.test:select{} --- - - [1, 'before snapshot'] ... -- -- test_run:cmd("switch default") --- - true ... test_run:cmd("stop server replica") --- - true ... test_run:cmd("cleanup server replica") --- - true ... -- -- cleanup box.space.test:drop() --- ... box.schema.user.revoke('guest', 'replication') --- ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/reader.result0000664000000000000000000007271213306560010020643 0ustar rootroot-- test for xlog_reader module -- consists of 3 parts: -- 1) ok snap/xlog reader -- 2) broken files reader (crc sum is invalid, bad header [version/type]) -- 3) before box.cfg and after box.cfg fio = require('fio') --- ... fun = require('fun') --- ... json = require('json') --- ... xlog = require('xlog').pairs --- ... trun = require('test_run').new() --- ... pattern_prefix = fio.pathjoin(os.getenv("SOURCEDIR"), "test/xlog/reader") --- ... pattern_prefix_re = pattern_prefix:gsub("/", "\\/") --- ... trun:cmd(("push filter '%s' to '%s'"):format(pattern_prefix_re, "")) --- - true ... pattern_ok_v12 = fio.pathjoin(pattern_prefix, "v12/") --- ... pattern_ok_v13 = fio.pathjoin(pattern_prefix, "v13/") --- ... trun:cmd("setopt delimiter ';'") --- - true ... function collect_results(file) local val = {} for k, v in xlog(file) do table.insert(val, setmetatable(v, { __serialize = "map"})) end return val end; --- ... fun.iter({ fio.pathjoin(pattern_ok_v12, '00000000000000000000.ok.snap'), fio.pathjoin(pattern_ok_v12, '00000000000000000000.ok.xlog'), }):map(collect_results):totable(); --- - - - {'HEADER': {'lsn': 1, 'type': 'INSERT'}, 'BODY': {'space_id': 272, 'tuple': [ 'cluster', '1366b1b5-2329-46f4-9fce-a1dc4275d469']}} - {'HEADER': {'lsn': 2, 'type': 'INSERT'}, 'BODY': {'space_id': 272, 'tuple': [ 'max_id', 511]}} - {'HEADER': {'lsn': 3, 'type': 'INSERT'}, 'BODY': {'space_id': 272, 'tuple': [ 'version', 1, 6, 8]}} - {'HEADER': {'lsn': 4, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 272, 1, '_schema', 'memtx', 0, {}, [{'type': 'str', 'name': 'key'}]]}} - {'HEADER': {'lsn': 5, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 280, 1, '_space', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'engine', 'type': 'str'}, {'name': 'field_count', 'type': 'num'}, {'name': 'flags', 'type': 'str'}, {'name': 'format', 'type': '*'}]]}} - {'HEADER': {'lsn': 6, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 281, 1, '_vspace', 'sysview', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'engine', 'type': 'str'}, {'name': 'field_count', 'type': 'num'}, {'name': 'flags', 'type': 'str'}, {'name': 'format', 'type': '*'}]]}} - {'HEADER': {'lsn': 7, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 288, 1, '_index', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'iid', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'type', 'type': 'str'}, {'name': 'opts', 'type': 'array'}, {'name': 'parts', 'type': 'array'}]]}} - {'HEADER': {'lsn': 8, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 289, 1, '_vindex', 'sysview', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'iid', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'type', 'type': 'str'}, {'name': 'opts', 'type': 'array'}, {'name': 'parts', 'type': 'array'}]]}} - {'HEADER': {'lsn': 9, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 296, 1, '_func', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'setuid', 'type': 'num'}]]}} - {'HEADER': {'lsn': 10, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 297, 1, '_vfunc', 'sysview', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'setuid', 'type': 'num'}]]}} - {'HEADER': {'lsn': 11, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 304, 1, '_user', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'type', 'type': 'str'}, {'name': 'auth', 'type': '*'}]]}} - {'HEADER': {'lsn': 12, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 305, 1, '_vuser', 'sysview', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'type', 'type': 'str'}, {'name': 'auth', 'type': '*'}]]}} - {'HEADER': {'lsn': 13, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 312, 1, '_priv', 'memtx', 0, {}, [{'name': 'grantor', 'type': 'num'}, { 'name': 'grantee', 'type': 'num'}, {'name': 'object_type', 'type': 'str'}, {'name': 'object_id', 'type': 'num'}, {'name': 'privilege', 'type': 'num'}]]}} - {'HEADER': {'lsn': 14, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 313, 1, '_vpriv', 'sysview', 0, {}, [{'name': 'grantor', 'type': 'num'}, {'name': 'grantee', 'type': 'num'}, {'name': 'object_type', 'type': 'str'}, {'name': 'object_id', 'type': 'num'}, {'name': 'privilege', 'type': 'num'}]]}} - {'HEADER': {'lsn': 15, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 320, 1, '_cluster', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'uuid', 'type': 'str'}]]}} - {'HEADER': {'lsn': 16, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 272, 0, 'primary', 'tree', {'unique': true}, [[0, 'str']]]}} - {'HEADER': {'lsn': 17, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 280, 0, 'primary', 'tree', {'unique': true}, [[0, 'num']]]}} - {'HEADER': {'lsn': 18, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 280, 1, 'owner', 'tree', {'unique': false}, [[1, 'num']]]}} - {'HEADER': {'lsn': 19, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 280, 2, 'name', 'tree', {'unique': true}, [[2, 'str']]]}} - {'HEADER': {'lsn': 20, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 281, 0, 'primary', 'tree', {'unique': true}, [[0, 'num']]]}} - {'HEADER': {'lsn': 21, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 281, 1, 'owner', 'tree', {'unique': false}, [[1, 'num']]]}} - {'HEADER': {'lsn': 22, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 281, 2, 'name', 'tree', {'unique': true}, [[2, 'str']]]}} - {'HEADER': {'lsn': 23, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 288, 0, 'primary', 'tree', {'unique': true}, [[0, 'num'], [1, 'num']]]}} - {'HEADER': {'lsn': 24, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 288, 2, 'name', 'tree', {'unique': true}, [[0, 'num'], [2, 'str']]]}} - {'HEADER': {'lsn': 25, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 289, 0, 'primary', 'tree', {'unique': true}, [[0, 'num'], [1, 'num']]]}} - {'HEADER': {'lsn': 26, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 289, 2, 'name', 'tree', {'unique': true}, [[0, 'num'], [2, 'str']]]}} - {'HEADER': {'lsn': 27, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 296, 0, 'primary', 'tree', {'unique': true}, [[0, 'num']]]}} - {'HEADER': {'lsn': 28, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 296, 1, 'owner', 'tree', {'unique': false}, [[1, 'num']]]}} - {'HEADER': {'lsn': 29, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 296, 2, 'name', 'tree', {'unique': true}, [[2, 'str']]]}} - {'HEADER': {'lsn': 30, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 297, 0, 'primary', 'tree', {'unique': true}, [[0, 'num']]]}} - {'HEADER': {'lsn': 31, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 297, 1, 'owner', 'tree', {'unique': false}, [[1, 'num']]]}} - {'HEADER': {'lsn': 32, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 297, 2, 'name', 'tree', {'unique': true}, [[2, 'str']]]}} - {'HEADER': {'lsn': 33, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 304, 0, 'primary', 'tree', {'unique': true}, [[0, 'num']]]}} - {'HEADER': {'lsn': 34, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 304, 1, 'owner', 'tree', {'unique': false}, [[1, 'num']]]}} - {'HEADER': {'lsn': 35, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 304, 2, 'name', 'tree', {'unique': true}, [[2, 'str']]]}} - {'HEADER': {'lsn': 36, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 305, 0, 'primary', 'tree', {'unique': true}, [[0, 'num']]]}} - {'HEADER': {'lsn': 37, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 305, 1, 'owner', 'tree', {'unique': false}, [[1, 'num']]]}} - {'HEADER': {'lsn': 38, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 305, 2, 'name', 'tree', {'unique': true}, [[2, 'str']]]}} - {'HEADER': {'lsn': 39, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 312, 0, 'primary', 'tree', {'unique': true}, [[1, 'num'], [2, 'str'], [ 3, 'num']]]}} - {'HEADER': {'lsn': 40, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 312, 1, 'owner', 'tree', {'unique': false}, [[0, 'num']]]}} - {'HEADER': {'lsn': 41, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 312, 2, 'object', 'tree', {'unique': false}, [[2, 'str'], [3, 'num']]]}} - {'HEADER': {'lsn': 42, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 313, 0, 'primary', 'tree', {'unique': true}, [[1, 'num'], [2, 'str'], [ 3, 'num']]]}} - {'HEADER': {'lsn': 43, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 313, 1, 'owner', 'tree', {'unique': false}, [[0, 'num']]]}} - {'HEADER': {'lsn': 44, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 313, 2, 'object', 'tree', {'unique': false}, [[2, 'str'], [3, 'num']]]}} - {'HEADER': {'lsn': 45, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 320, 0, 'primary', 'tree', {'unique': true}, [[0, 'num']]]}} - {'HEADER': {'lsn': 46, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 320, 1, 'uuid', 'tree', {'unique': true}, [[1, 'str']]]}} - {'HEADER': {'lsn': 47, 'type': 'INSERT'}, 'BODY': {'space_id': 296, 'tuple': [ 1, 1, 'box.schema.user.info', 1, 'LUA']}} - {'HEADER': {'lsn': 48, 'type': 'INSERT'}, 'BODY': {'space_id': 304, 'tuple': [ 0, 1, 'guest', 'user']}} - {'HEADER': {'lsn': 49, 'type': 'INSERT'}, 'BODY': {'space_id': 304, 'tuple': [ 1, 1, 'admin', 'user']}} - {'HEADER': {'lsn': 50, 'type': 'INSERT'}, 'BODY': {'space_id': 304, 'tuple': [ 2, 1, 'public', 'role']}} - {'HEADER': {'lsn': 51, 'type': 'INSERT'}, 'BODY': {'space_id': 304, 'tuple': [ 3, 1, 'replication', 'role']}} - {'HEADER': {'lsn': 52, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 0, 'role', 2, 4]}} - {'HEADER': {'lsn': 53, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 1, 'universe', 0, 7]}} - {'HEADER': {'lsn': 54, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'function', 1, 4]}} - {'HEADER': {'lsn': 55, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 281, 1]}} - {'HEADER': {'lsn': 56, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 289, 1]}} - {'HEADER': {'lsn': 57, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 297, 1]}} - {'HEADER': {'lsn': 58, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 305, 1]}} - {'HEADER': {'lsn': 59, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 313, 1]}} - {'HEADER': {'lsn': 60, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 3, 'space', 320, 2]}} - {'HEADER': {'lsn': 61, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 3, 'universe', 0, 1]}} - {'HEADER': {'lsn': 62, 'type': 'INSERT'}, 'BODY': {'space_id': 320, 'tuple': [ 1, '65f1a623-9e99-47b3-92e5-0c9a63ff566c']}} - - {'HEADER': {'lsn': 1, 'replica_id': 1, 'type': 'UPDATE', 'timestamp': 1476117049.224}, 'BODY': {'space_id': 272, 'index_base': 1, 'key': ['max_id'], 'tuple': [['+', 2, 1]]}} - {'HEADER': {'lsn': 2, 'replica_id': 1, 'type': 'INSERT', 'timestamp': 1476117049.2246}, 'BODY': {'space_id': 280, 'tuple': [512, 1, 'test', 'memtx', 0, {}, []]}} - {'HEADER': {'lsn': 3, 'replica_id': 1, 'type': 'INSERT', 'timestamp': 1476117049.2247}, 'BODY': {'space_id': 288, 'tuple': [512, 0, 'primary', 'tree', {'unique': true}, [[0, 'num']]]}} - {'HEADER': {'lsn': 4, 'replica_id': 1, 'type': 'INSERT', 'timestamp': 1476117049.2247}, 'BODY': {'space_id': 512, 'tuple': [1, 2, 3, 4]}} - {'HEADER': {'lsn': 5, 'replica_id': 1, 'type': 'REPLACE', 'timestamp': 1476117049.2248}, 'BODY': {'space_id': 512, 'tuple': [2, 2, 3, 4]}} - {'HEADER': {'lsn': 6, 'replica_id': 1, 'type': 'DELETE', 'timestamp': 1476117049.2248}, 'BODY': {'space_id': 512, 'key': [1]}} - {'HEADER': {'lsn': 7, 'replica_id': 1, 'type': 'UPDATE', 'timestamp': 1476117049.2248}, 'BODY': {'space_id': 512, 'index_base': 1, 'key': [2], 'tuple': [['=', 3, 4]]}} - {'HEADER': {'lsn': 8, 'replica_id': 1, 'type': 'UPSERT', 'timestamp': 1476117049.2248}, 'BODY': {'space_id': 512, 'operations': [['=', 3, 4]], 'index_base': 1, 'tuple': [ 3, 4, 5, 6]}} - {'HEADER': {'lsn': 9, 'replica_id': 1, 'type': 'UPSERT', 'timestamp': 1476117049.2249}, 'BODY': {'space_id': 512, 'operations': [['=', 3, 4]], 'index_base': 1, 'tuple': [ 3, 4, 5, 6]}} ... collectgarbage('collect'); --- - 0 ... fun.iter({ fio.pathjoin(pattern_ok_v13, '00000000000000000000.ok.snap'), fio.pathjoin(pattern_ok_v13, '00000000000000000000.ok.xlog'), }):map(collect_results):totable(); --- - - - {'HEADER': {'lsn': 1, 'type': 'INSERT'}, 'BODY': {'space_id': 272, 'tuple': [ 'cluster', '95166c09-cc9b-4b5b-96ef-9bcc3190abbb']}} - {'HEADER': {'lsn': 2, 'type': 'INSERT'}, 'BODY': {'space_id': 272, 'tuple': [ 'max_id', 511]}} - {'HEADER': {'lsn': 3, 'type': 'INSERT'}, 'BODY': {'space_id': 272, 'tuple': [ 'version', 1, 7, 2]}} - {'HEADER': {'lsn': 4, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 272, 1, '_schema', 'memtx', 0, {}, [{'type': 'str', 'name': 'key'}]]}} - {'HEADER': {'lsn': 5, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 280, 1, '_space', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'engine', 'type': 'str'}, {'name': 'field_count', 'type': 'num'}, {'name': 'flags', 'type': 'str'}, {'name': 'format', 'type': '*'}]]}} - {'HEADER': {'lsn': 6, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 281, 1, '_vspace', 'sysview', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'engine', 'type': 'str'}, {'name': 'field_count', 'type': 'num'}, {'name': 'flags', 'type': 'str'}, {'name': 'format', 'type': '*'}]]}} - {'HEADER': {'lsn': 7, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 288, 1, '_index', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'iid', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'type', 'type': 'str'}, {'name': 'opts', 'type': 'array'}, {'name': 'parts', 'type': 'array'}]]}} - {'HEADER': {'lsn': 8, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 289, 1, '_vindex', 'sysview', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'iid', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'type', 'type': 'str'}, {'name': 'opts', 'type': 'array'}, {'name': 'parts', 'type': 'array'}]]}} - {'HEADER': {'lsn': 9, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 296, 1, '_func', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'setuid', 'type': 'num'}]]}} - {'HEADER': {'lsn': 10, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 297, 1, '_vfunc', 'sysview', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'setuid', 'type': 'num'}]]}} - {'HEADER': {'lsn': 11, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 304, 1, '_user', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'type', 'type': 'str'}, {'name': 'auth', 'type': '*'}]]}} - {'HEADER': {'lsn': 12, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 305, 1, '_vuser', 'sysview', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'owner', 'type': 'num'}, {'name': 'name', 'type': 'str'}, {'name': 'type', 'type': 'str'}, {'name': 'auth', 'type': '*'}]]}} - {'HEADER': {'lsn': 13, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 312, 1, '_priv', 'memtx', 0, {}, [{'name': 'grantor', 'type': 'num'}, { 'name': 'grantee', 'type': 'num'}, {'name': 'object_type', 'type': 'str'}, {'name': 'object_id', 'type': 'num'}, {'name': 'privilege', 'type': 'num'}]]}} - {'HEADER': {'lsn': 14, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 313, 1, '_vpriv', 'sysview', 0, {}, [{'name': 'grantor', 'type': 'num'}, {'name': 'grantee', 'type': 'num'}, {'name': 'object_type', 'type': 'str'}, {'name': 'object_id', 'type': 'num'}, {'name': 'privilege', 'type': 'num'}]]}} - {'HEADER': {'lsn': 15, 'type': 'INSERT'}, 'BODY': {'space_id': 280, 'tuple': [ 320, 1, '_cluster', 'memtx', 0, {}, [{'name': 'id', 'type': 'num'}, {'name': 'uuid', 'type': 'str'}]]}} - {'HEADER': {'lsn': 16, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 272, 0, 'primary', 'tree', {'unique': true}, [[0, 'string']]]}} - {'HEADER': {'lsn': 17, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 280, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 18, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 280, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]]}} - {'HEADER': {'lsn': 19, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 280, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]]}} - {'HEADER': {'lsn': 20, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 281, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 21, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 281, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]]}} - {'HEADER': {'lsn': 22, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 281, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]]}} - {'HEADER': {'lsn': 23, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 288, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned'], [1, 'unsigned']]]}} - {'HEADER': {'lsn': 24, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 288, 2, 'name', 'tree', {'unique': true}, [[0, 'unsigned'], [2, 'string']]]}} - {'HEADER': {'lsn': 25, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 289, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned'], [1, 'unsigned']]]}} - {'HEADER': {'lsn': 26, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 289, 2, 'name', 'tree', {'unique': true}, [[0, 'unsigned'], [2, 'string']]]}} - {'HEADER': {'lsn': 27, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 296, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 28, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 296, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]]}} - {'HEADER': {'lsn': 29, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 296, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]]}} - {'HEADER': {'lsn': 30, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 297, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 31, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 297, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]]}} - {'HEADER': {'lsn': 32, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 297, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]]}} - {'HEADER': {'lsn': 33, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 304, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 34, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 304, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]]}} - {'HEADER': {'lsn': 35, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 304, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]]}} - {'HEADER': {'lsn': 36, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 305, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 37, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 305, 1, 'owner', 'tree', {'unique': false}, [[1, 'unsigned']]]}} - {'HEADER': {'lsn': 38, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 305, 2, 'name', 'tree', {'unique': true}, [[2, 'string']]]}} - {'HEADER': {'lsn': 39, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 312, 0, 'primary', 'tree', {'unique': true}, [[1, 'unsigned'], [2, 'string'], [3, 'unsigned']]]}} - {'HEADER': {'lsn': 40, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 312, 1, 'owner', 'tree', {'unique': false}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 41, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 312, 2, 'object', 'tree', {'unique': false}, [[2, 'string'], [3, 'unsigned']]]}} - {'HEADER': {'lsn': 42, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 313, 0, 'primary', 'tree', {'unique': true}, [[1, 'unsigned'], [2, 'string'], [3, 'unsigned']]]}} - {'HEADER': {'lsn': 43, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 313, 1, 'owner', 'tree', {'unique': false}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 44, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 313, 2, 'object', 'tree', {'unique': false}, [[2, 'string'], [3, 'unsigned']]]}} - {'HEADER': {'lsn': 45, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 320, 0, 'primary', 'tree', {'unique': true}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 46, 'type': 'INSERT'}, 'BODY': {'space_id': 288, 'tuple': [ 320, 1, 'uuid', 'tree', {'unique': true}, [[1, 'string']]]}} - {'HEADER': {'lsn': 47, 'type': 'INSERT'}, 'BODY': {'space_id': 296, 'tuple': [ 1, 1, 'box.schema.user.info', 1, 'LUA']}} - {'HEADER': {'lsn': 48, 'type': 'INSERT'}, 'BODY': {'space_id': 304, 'tuple': [ 0, 1, 'guest', 'user', {'chap-sha1': 'vhvewKp0tNyweZQ+cFKAlsyphfg='}]}} - {'HEADER': {'lsn': 49, 'type': 'INSERT'}, 'BODY': {'space_id': 304, 'tuple': [ 1, 1, 'admin', 'user']}} - {'HEADER': {'lsn': 50, 'type': 'INSERT'}, 'BODY': {'space_id': 304, 'tuple': [ 2, 1, 'public', 'role']}} - {'HEADER': {'lsn': 51, 'type': 'INSERT'}, 'BODY': {'space_id': 304, 'tuple': [ 3, 1, 'replication', 'role']}} - {'HEADER': {'lsn': 52, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 0, 'role', 2, 4]}} - {'HEADER': {'lsn': 53, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 1, 'universe', 0, 7]}} - {'HEADER': {'lsn': 54, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'function', 1, 4]}} - {'HEADER': {'lsn': 55, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 281, 1]}} - {'HEADER': {'lsn': 56, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 289, 1]}} - {'HEADER': {'lsn': 57, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 297, 1]}} - {'HEADER': {'lsn': 58, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 305, 1]}} - {'HEADER': {'lsn': 59, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 2, 'space', 313, 1]}} - {'HEADER': {'lsn': 60, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 3, 'space', 320, 2]}} - {'HEADER': {'lsn': 61, 'type': 'INSERT'}, 'BODY': {'space_id': 312, 'tuple': [ 1, 3, 'universe', 0, 1]}} - {'HEADER': {'lsn': 62, 'type': 'INSERT'}, 'BODY': {'space_id': 320, 'tuple': [ 1, '3083fa40-34fa-48da-8438-cf5d47f43f0a']}} - - {'HEADER': {'lsn': 1, 'replica_id': 1, 'type': 'UPDATE', 'timestamp': 1475796386.2266}, 'BODY': {'space_id': 272, 'index_base': 1, 'key': ['max_id'], 'tuple': [['+', 2, 1]]}} - {'HEADER': {'lsn': 2, 'replica_id': 1, 'type': 'INSERT', 'timestamp': 1475796386.2291}, 'BODY': {'space_id': 280, 'tuple': [512, 1, 'test', 'memtx', 0, {}, []]}} - {'HEADER': {'lsn': 3, 'replica_id': 1, 'type': 'INSERT', 'timestamp': 1475796409.4258}, 'BODY': {'space_id': 288, 'tuple': [512, 0, 'primary', 'tree', {'unique': true, 'lsn': 2}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 4, 'replica_id': 1, 'type': 'INSERT', 'timestamp': 1475796454.2693}, 'BODY': {'space_id': 512, 'tuple': [1, 2, 3, 4]}} - {'HEADER': {'lsn': 5, 'replica_id': 1, 'type': 'REPLACE', 'timestamp': 1475796459.9428}, 'BODY': {'space_id': 512, 'tuple': [2, 2, 3, 4]}} - {'HEADER': {'lsn': 6, 'replica_id': 1, 'type': 'DELETE', 'timestamp': 1475796470.6977}, 'BODY': {'space_id': 512, 'key': [1]}} - {'HEADER': {'lsn': 7, 'replica_id': 1, 'type': 'UPDATE', 'timestamp': 1475796500.8061}, 'BODY': {'space_id': 512, 'index_base': 1, 'key': [2], 'tuple': [['=', 3, 4]]}} - {'HEADER': {'lsn': 8, 'replica_id': 1, 'type': 'UPSERT', 'timestamp': 1475796514.5016}, 'BODY': {'space_id': 512, 'operations': [['=', 3, 4]], 'index_base': 1, 'tuple': [ 3, 4, 5, 6]}} - {'HEADER': {'lsn': 9, 'replica_id': 1, 'type': 'UPSERT', 'timestamp': 1475796515.7168}, 'BODY': {'space_id': 512, 'operations': [['=', 3, 4]], 'index_base': 1, 'tuple': [ 3, 4, 5, 6]}} ... collectgarbage('collect'); --- - 0 ... check_error = function(name, err) local path = fio.pathjoin(pattern_prefix, name) local stat, oerr = pcall(collect_results, path) if stat == true or not string.find(tostring(oerr), err) then return false, oerr end return true end; --- ... trun:cmd("setopt delimiter ''"); --- - true ... check_error("version.bad.xlog", "file format version") --- - true ... check_error("format.bad.xlog", "not support 'SNOP' file type") --- - true ... collect_results(fio.pathjoin(pattern_prefix, "crc.bad.xlog")) --- - - {'HEADER': {'lsn': 1, 'replica_id': 1, 'type': 'UPDATE', 'timestamp': 1475796386.2266}, 'BODY': {'space_id': 272, 'index_base': 1, 'key': ['max_id'], 'tuple': [['+', 2, 1]]}} - {'HEADER': {'lsn': 2, 'replica_id': 1, 'type': 'INSERT', 'timestamp': 1475796386.2291}, 'BODY': {'space_id': 280, 'tuple': [512, 1, 'test', 'memtx', 0, {}, []]}} - {'HEADER': {'lsn': 3, 'replica_id': 1, 'type': 'INSERT', 'timestamp': 1475796409.4258}, 'BODY': {'space_id': 288, 'tuple': [512, 0, 'primary', 'tree', {'unique': true, 'lsn': 2}, [[0, 'unsigned']]]}} - {'HEADER': {'lsn': 4, 'replica_id': 1, 'type': 'INSERT', 'timestamp': 1475796454.2693}, 'BODY': {'space_id': 512, 'tuple': [1, 2, 3, 4]}} - {'HEADER': {'lsn': 6, 'replica_id': 1, 'type': 'DELETE', 'timestamp': 1475796470.6977}, 'BODY': {'space_id': 512, 'key': [1]}} - {'HEADER': {'lsn': 7, 'replica_id': 1, 'type': 'UPDATE', 'timestamp': 1475796500.8061}, 'BODY': {'space_id': 512, 'index_base': 1, 'key': [2], 'tuple': [['=', 3, 4]]}} - {'HEADER': {'lsn': 8, 'replica_id': 1, 'type': 'UPSERT', 'timestamp': 1475796514.5016}, 'BODY': {'space_id': 512, 'operations': [['=', 3, 4]], 'index_base': 1, 'tuple': [ 3, 4, 5, 6]}} - {'HEADER': {'lsn': 9, 'replica_id': 1, 'type': 'UPSERT', 'timestamp': 1475796515.7168}, 'BODY': {'space_id': 512, 'operations': [['=', 3, 4]], 'index_base': 1, 'tuple': [ 3, 4, 5, 6]}} ... collect_results(fio.pathjoin(pattern_prefix, "eof.bad.xlog")) --- - - {'HEADER': {'type': 'INSERT'}, 'BODY': {'tuple': [0, {0: 8, 7: [[0, 'unsigned']], 6: 513}]}} - {'HEADER': {'type': 'INSERT'}, 'BODY': {'tuple': [10, {0: 8, 9: 12}]}} - {'HEADER': {'type': 'INSERT'}, 'BODY': {'tuple': [5, {0: 8, 2: 1, 9: 12}]}} - {'HEADER': {'type': 'INSERT'}, 'BODY': {'tuple': [2, {0: 8}]}} - {'HEADER': {'type': 'INSERT'}, 'BODY': {'tuple': [8, {2: 1, 8: 2}]}} - {'HEADER': {'type': 'INSERT'}, 'BODY': {'tuple': [11, {}]}} - {'HEADER': {'timestamp': 1495526288.1972, 'type': 'INSERT'}, 'BODY': {'tuple': [ 4, {0: 8, 2: 3}]}} - {'HEADER': {'timestamp': 1495526288.2103, 'type': 'INSERT'}, 'BODY': {'tuple': [ 5, {0: 8, 2: 3, 9: 24}]}} - {'HEADER': {'timestamp': 1495526288.2103, 'type': 'INSERT'}, 'BODY': {'tuple': [ 8, {2: 3, 8: 4}]}} - {'HEADER': {'timestamp': 1495526288.2103, 'type': 'INSERT'}, 'BODY': {'tuple': [ 10, {0: 8, 9: 24}]}} ... trun:cmd('clear filter') --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/panic.lua0000664000000000000000000000044413306560010017727 0ustar rootroot#!/usr/bin/env tarantool os = require('os') box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", force_recovery = false, rows_per_wal = 10 } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/xlog/gh1433.test.lua0000664000000000000000000000033413306560010020522 0ustar rootrootfio = require('fio') box.space._schema:insert({'gh1433'}) box.space._schema:delete({'gh1433'}) glob = fio.pathjoin(box.cfg.memtx_dir, '*.snap') for _, file in pairs(fio.glob(glob)) do fio.unlink(file) end box.snapshot() tarantool_1.9.1.26.g63eb81e3c/test/xlog/errinj.result0000664000000000000000000000316313306560010020664 0ustar rootroot-- -- we actually need to know what xlogs the server creates, -- so start from a clean state -- -- -- Check how well we handle a failed log write -- in panic_on_wal_error=false mode -- env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default with cleanup=1') box.error.injection.set("ERRINJ_WAL_WRITE", true) --- - ok ... box.space._schema:insert{"key"} --- - error: Failed to write to disk ... test_run:cmd('restart server default') box.space._schema:insert{"key"} --- - ['key'] ... test_run:cmd('restart server default') box.space._schema:get{"key"} --- - ['key'] ... box.space._schema:delete{"key"} --- - ['key'] ... -- list all the logs name = string.match(arg[0], "([^,]+)%.lua") --- ... require('fio').glob(name .. "/*.xlog") --- - - xlog/00000000000000000000.xlog - xlog/00000000000000000001.xlog ... test_run:cmd('restart server default with cleanup=1') -- gh-881 iproto request with wal IO error errinj = box.error.injection --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... test = box.schema.create_space('test') --- ... _ = test:create_index('primary') --- ... for i=1, box.cfg.rows_per_wal do test:insert{i, 'test'} end --- ... c = require('net.box').connect(box.cfg.listen) --- ... -- try to write xlog without permission to write to disk errinj.set('ERRINJ_WAL_WRITE', true) --- - ok ... c.space.test:insert({box.cfg.rows_per_wal + 1,1,2,3}) --- - error: Failed to write to disk ... errinj.set('ERRINJ_WAL_WRITE', false) --- - ok ... -- Cleanup test:drop() --- ... errinj = nil --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/0000775000000000000000000000000013306560010017372 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/crc.bad.xlog0000664000000000000000000000110413306560010021555 0ustar rootrootXLOG 0.13 Server: f09eae7a-e876-45f1-b9f5-6995d13de2b0 VClock: {} պ (utPPPPPPPAhg max_id!+պ *ΕhPPPPPPPAh&!testmemtxպ C\PPPPPPPAn[@- !primarytreeuniqueãlsnunsignedպ ;ܸePPPPPPPAy;т!պ =$QPPPPPPPAzW.!պ PPPPPPPA}r պ "/)PPPPPPPA3 !=պ %Ζ谧PPPPPPP A(=!պ %ά;PPPPPPP  A(=!tarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/eof.bad.xlog0000664000000000000000000000052013306560010021560 0ustar rootrootVYLOG 0.13 Version: 1.7.4-29-g4a02c32 Instance: 5d92da79-9c14-476d-86ef-b795d2496f83 VClock: {1: 12} պ S·#!unsigned!  ! !!! պ ζ)AH ~!պ D- ڧAH u! AH u!AH u!  tarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/format.bad.xlog0000664000000000000000000000110513306560010022277 0ustar rootrootSNOP 0.13 Server: f09eae7a-e876-45f1-b9f5-6995d13de2b0 VClock: {} պ (utPPPPPPPAhg max_id!+պ *ΕhPPPPPPPAh&!testmemtxպ C\PPPPPPPAn[@- !primarytreeuniqueãlsnunsignedպ ;ܸePPPPPPPAy;т!պ ?$QPPPPPPPAzW.!պ PPPPPPPA}r պ "/)PPPPPPPA3 !=պ %Ζ谧PPPPPPP A(=!պ %ά;PPPPPPP  A(=! tarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/v12/0000775000000000000000000000000013306560010020002 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/v12/00000000000000000000.ok.snap0000664000000000000000000001147513306560010023264 0ustar rootrootSNAP 0.12 Server: 65f1a623-9e99-47b3-92e5-0c9a63ff566c VClock: {1: 0} պ <3-H8!cluster$1366b1b5-2329-46f4-9fce-a1dc4275d469պ է!max_idպ κ!versionպ 6ދ!_schemamemtxtypestrnamekeyպ ̷]p!_spacememtxnameidtypenumnameownertypenumnamenametypestrnameenginetypestrnamefield_counttypenumnameflagstypestrnameformattype*պ ̺5Q!_vspacesysviewnameidtypenumnameownertypenumnamenametypestrnameenginetypestrnamefield_counttypenumnameflagstypestrnameformattype*պ ̜yb! _indexmemtxnameidtypenumnameiidtypenumnamenametypestrnametypetypestrnameoptstypearraynamepartstypearrayպ ̟i!!_vindexsysviewnameidtypenumnameiidtypenumnamenametypestrnametypetypestrnameoptstypearraynamepartstypearrayպ ruQ !(_funcmemtxnameidtypenumnameownertypenumnamenametypestrnamesetuidtypenumպ uΞzX !)_vfuncsysviewnameidtypenumnameownertypenumnamenametypestrnamesetuidtypenumպ ̂' !0_usermemtxnameidtypenumnameownertypenumnamenametypestrnametypetypestrnameauthtype*պ ̅;A !1_vusersysviewnameidtypenumnameownertypenumnamenametypestrnametypetypestrnameauthtype*պ ̜  !8_privmemtxnamegrantortypenumnamegranteetypenumnameobject_typetypestrnameobject_idtypenumnameprivilegetypenumպ ̟Qk!9_vprivsysviewnamegrantortypenumnamegranteetypenumnameobject_typetypestrnameobject_idtypenumnameprivilegetypenumպ Jhŝ!@_clustermemtxnameidtypenumnameuuidtypestrպ /k !primarytreeuniqueÑstrպ /w_ !primarytreeuniqueÑnumպ -ܟڧ !ownertreeunique‘numպ ,Tqԧ !nametreeuniqueÑstrպ /δ?姦 !primarytreeuniqueÑnumպ -8< !ownertreeunique‘numպ ,޵ t !nametreeuniqueÑstrպ 5d\ ! primarytreeuniqueÒnumnumպ 2μ1R ! nametreeuniqueÒnumstrպ 5Ύ2& !!primarytreeuniqueÒnumnumպ 2Bty_ !!nametreeuniqueÒnumstrպ / 8 !(primarytreeuniqueÑnumպ -F#[M !(ownertreeunique‘numպ ,AF̧ !(nametreeuniqueÑstrպ /Ρ !)primarytreeuniqueÑnumպ -~.ȉ !)ownertreeunique‘numպ ,1d&  !)nametreeuniqueÑstrպ /5`! !0primarytreeuniqueÑnumպ -(Ǡg" !0ownertreeunique‘numպ ,# !0nametreeuniqueÑstrպ /L}ڧ$ !1primarytreeuniqueÑnumպ -I% !1ownertreeunique‘numպ ,.S& !1nametreeuniqueÑstrպ ;Ε(' !8primarytreeuniqueÓnumstrnumպ -鐧( !8ownertreeunique‘numպ 4׾Χ) !8objecttreeunique’strnumպ ;s* !9primarytreeuniqueÓnumstrnumպ -$zT+ !9ownertreeunique‘numպ 4Έf, !9objecttreeunique’strnumպ /D- !@primarytreeuniqueÑnumպ ,S. !@uuidtreeuniqueÑstrպ *#ͧ/(!box.schema.user.infoLUAպ G§00!guestuserպ Κ$i10!adminuserպ δK20!publicroleպ !+E͞30!replicationroleպ Aꧦ48!roleպ Wѧ58!universeպ 68!functionպ 378!spaceպ Δ@u88!space!պ U my98!space)պ l:8!space1պ A;8!space9պ 5<8!space@պ =8!universeպ 5a>@!$65f1a623-9e99-47b3-92e5-0c9a63ff566ctarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/v12/00000000000000000000.ok.xlog0000664000000000000000000000107613306560010023270 0ustar rootrootXLOG 0.12 Server: 0e82a45f-d1ff-4f19-bf2c-b815ad567dab VClock: {1: 0} պ ( 6ANU max_id!+պ *XAN_ !testmemtxպ 92AN`Ȃ !primarytreeuniqueÑnumպ <&ANb$!պ άᡶANb!պ ΚXANc պ "ΈANߧANcg !=պ % ANcDŽ(=!պ %"  ANd((=!tarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/version.bad.xlog0000664000000000000000000000110513306560010022474 0ustar rootrootXLOG 0.07 Server: f09eae7a-e876-45f1-b9f5-6995d13de2b0 VClock: {} պ (utPPPPPPPAhg max_id!+պ *ΕhPPPPPPPAh&!testmemtxպ C\PPPPPPPAn[@- !primarytreeuniqueãlsnunsignedպ ;ܸePPPPPPPAy;т!պ ?$QPPPPPPPAzW.!պ PPPPPPPA}r պ "/)PPPPPPPA3 !=պ %Ζ谧PPPPPPP A(=!պ %ά;PPPPPPP  A(=! tarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/v13/0000775000000000000000000000000013306560010020003 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/v13/00000000000000000000.ok.snap0000664000000000000000000000215013306560010023253 0ustar rootrootSNAP 0.13 Server: 3083fa40-34fa-48da-8438-cf5d47f43f0a VClock: {} պ N~I7PPPPP(/P- oF q t1h%(aY&Ie18Hm( fu<px|@3C15Jg ?O*{|k)+iofpܖU5ɪdT[qUfq&&WI6CpswϹ#ӿ{uhia fcV(77Z˟$(XJ'芰5⢅}}/~at5%}7/ےk[T;Q+CûU;̀J@S[Y- OZ" ({OvzMJg`z6+І8nn gJg?B]$nNBm8KCH*u8:Jq&t:6\ ׈P$p,p*l(l&T:;i#0RQƁI0$?7μ%DA2.H<<:.Sq``vNeyl_TوK'Jgǐ!>>82ZI̻q-_Rt~<ákٯ&6OAT:{ǒ*Wu!pE'R:pqQF+B LrŐ lDjFf@B!N`a3 CCRUlXU+URU*x1ToV̇hꉕQ9j $%U9{qƛH2k}| ziZNAF| ?|QrICyD_ׯkL9MJzOZdōh' RFJim Y"KսD񝕴E\uTl#`Eb61E@8 5A Z0熄!ҲR$Ͳq ^M)g&dzzt#~W9 $dKɯޡ - oT+w>tarantool_1.9.1.26.g63eb81e3c/test/xlog/reader/v13/00000000000000000000.ok.xlog0000664000000000000000000000110413306560010023261 0ustar rootrootXLOG 0.13 Server: f09eae7a-e876-45f1-b9f5-6995d13de2b0 VClock: {} պ (utPPPPPPPAhg max_id!+պ *ΕhPPPPPPPAh&!testmemtxպ C\PPPPPPPAn[@- !primarytreeuniqueãlsnunsignedպ ;ܸePPPPPPPAy;т!պ ?$QPPPPPPPAzW.!պ PPPPPPPA}r պ "/)PPPPPPPA3 !=պ %Ζ谧PPPPPPP A(=!պ %ά;PPPPPPP  A(=!tarantool_1.9.1.26.g63eb81e3c/test/xlog/panic_on_wal_error.test.lua0000664000000000000000000000462413306560010023461 0ustar rootroot-- preparatory stuff env = require('test_run') test_run = env.new() fio = require('fio') glob = fio.pathjoin(box.cfg.wal_dir, '*.xlog') for _, file in pairs(fio.glob(glob)) do fio.unlink(file) end glob = fio.pathjoin(box.cfg.vinyl_dir, '*.vylog') for _, file in pairs(fio.glob(glob)) do fio.unlink(file) end glob = fio.pathjoin(box.cfg.memtx_dir, '*.snap') for _, file in pairs(fio.glob(glob)) do fio.unlink(file) end test_run:cmd("restart server default") box.schema.user.grant('guest', 'replication') _ = box.schema.space.create('test') _ = box.space.test:create_index('pk') -- -- reopen xlog -- test_run:cmd("restart server default") box.space.test ~= nil -- insert some stuff -- box.space.test:auto_increment{'before snapshot'} -- -- this snapshot will go to the replica -- box.snapshot() -- -- create a replica, let it catch up somewhat -- test_run:cmd("create server replica with rpl_master=default, script='xlog/replica.lua'") test_run:cmd("start server replica") test_run:cmd("switch replica") box.space.test:select{} -- -- stop replica, restart the master, insert more stuff -- which will make it into an xlog only -- test_run:cmd("switch default") test_run:cmd("stop server replica") box.space.test:auto_increment{'after snapshot'} box.space.test:auto_increment{'after snapshot - one more row'} -- -- save snapshot and remove xlogs -- box.snapshot() fio = require('fio') glob = fio.pathjoin(box.cfg.wal_dir, '*.xlog') files = fio.glob(glob) for _, file in pairs(files) do fio.unlink(file) end test_run:cmd("restart server default") -- -- make sure the server has some xlogs, otherwise the -- replica doesn't discover the gap in the logs -- box.space.test:auto_increment{'after snapshot and restart'} box.space.test:auto_increment{'after snapshot and restart - one more row'} -- -- check that panic is true -- box.cfg{force_recovery=false} box.cfg.force_recovery -- -- try to start the replica, ha-ha -- (replication should fail, some rows are missing) -- test_run:cmd("start server replica") test_run:cmd("switch replica") fiber = require('fiber') while box.info.replication[1].upstream.status ~= "stopped" do fiber.sleep(0.001) end box.info.replication[1].upstream.status box.info.replication[1].upstream.message box.space.test:select{} -- -- test_run:cmd("switch default") test_run:cmd("stop server replica") test_run:cmd("cleanup server replica") -- -- cleanup box.space.test:drop() box.schema.user.revoke('guest', 'replication') tarantool_1.9.1.26.g63eb81e3c/test/xlog/checkpoint_daemon.result0000664000000000000000000001011613306560010023041 0ustar rootrootfio = require 'fio' --- ... errno = require 'errno' --- ... fiber = require 'fiber' --- ... env = require('test_run') --- ... test_run = env.new() --- ... test_run:cleanup_cluster() --- ... box.cfg{checkpoint_interval = 0} --- ... PERIOD = 0.03 --- ... if jit.os ~= 'Linux' then PERIOD = 1.5 end --- ... space = box.schema.space.create('checkpoint_daemon') --- ... index = space:create_index('pk', { type = 'tree', parts = { 1, 'unsigned' }}) --- ... box.cfg{checkpoint_interval = PERIOD, checkpoint_count = 2 } --- ... no = 1 --- ... -- first xlog for i = 1, box.cfg.rows_per_wal + 10 do space:insert { no } no = no + 1 end --- ... -- second xlog for i = 1, box.cfg.rows_per_wal + 10 do space:insert { no } no = no + 1 end --- ... -- wait for last snapshot fiber.sleep(1.5 * PERIOD) --- ... -- third xlog for i = 1, box.cfg.rows_per_wal + 10 do space:insert { no } no = no + 1 end --- ... -- fourth xlog for i = 1, box.cfg.rows_per_wal + 10 do space:insert { no } no = no + 1 end --- ... -- wait for last snapshot test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 100 do fiber.sleep(PERIOD) snaps = fio.glob(fio.pathjoin(box.cfg.memtx_dir, '*.snap')) xlogs = fio.glob(fio.pathjoin(box.cfg.wal_dir, '*.xlog')) if #snaps == 2 then break end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... #snaps == 2 or snaps --- - true ... #xlogs > 0 --- - true ... fio.basename(snaps[1], '.snap') >= fio.basename(xlogs[1], '.xlog') --- - true ... -- gh-2780 check that scheduled snapshots are performed fiber.sleep(3 * PERIOD) --- ... -- check that it's not first snapshot test_run:grep_log("default", "saving snapshot", 400) == nil --- - true ... test_run:grep_log("default", "making snapshot", 400) ~= nil --- - true ... -- restore default options box.cfg{checkpoint_interval = 3600 * 4, checkpoint_count = 4 } --- ... space:drop() --- ... daemon = box.internal.checkpoint_daemon --- ... -- stop daemon box.cfg{ checkpoint_interval = 0 } --- ... -- wait daemon to stop while daemon.fiber ~= nil do fiber.sleep(0) end --- ... daemon.fiber == nil --- - true ... -- start daemon box.cfg{ checkpoint_interval = 10 } --- ... daemon.fiber ~= nil --- - true ... -- reload configuration box.cfg{ checkpoint_interval = 15, checkpoint_count = 20 } --- ... daemon.checkpoint_interval == 15 --- - true ... daemon.checkpoint_count = 20 --- ... -- Check that checkpoint_count can't be < 1. box.cfg{ checkpoint_count = 1 } --- ... box.cfg{ checkpoint_count = 0 } --- - error: 'Incorrect value for option ''checkpoint_count'': the value must not be less than one' ... box.cfg.checkpoint_count --- - 1 ... -- Start PERIOD = 3600 --- ... box.cfg{ checkpoint_count = 2, checkpoint_interval = PERIOD} --- ... snapshot_time, time = daemon.next_snapshot_time, fiber.time() --- ... snapshot_time + 1 >= time + PERIOD or {snapshot_time, time, PERIOD} --- - true ... snapshot_time - 1 <= time + 2 * PERIOD or {snapshot_time, time, PERIOD} --- - true ... daemon_fiber = daemon.fiber --- ... daemon_control = daemon.control --- ... -- Reload #1 PERIOD = 100 --- ... box.cfg{ checkpoint_count = 2, checkpoint_interval = PERIOD} --- ... snapshot_time, time = daemon.next_snapshot_time, fiber.time() --- ... snapshot_time + 1 >= time + PERIOD or {snapshot_time, time, PERIOD} --- - true ... snapshot_time - 1 <= time + 2 * PERIOD or {snapshot_time, time, PERIOD} --- - true ... daemon.fiber == daemon_fiber --- - true ... daemon.control == daemon_control --- - true ... -- Reload #2 PERIOD = 1000 --- ... box.cfg{ checkpoint_count = 2, checkpoint_interval = PERIOD} --- ... snapshot_time, time = daemon.next_snapshot_time, fiber.time() --- ... snapshot_time + 1 >= time + PERIOD or {snapshot_time, time, PERIOD} --- - true ... snapshot_time - 1 <= time + 2 * PERIOD or {snapshot_time, time, PERIOD} --- - true ... daemon.fiber == daemon_fiber --- - true ... daemon.control == daemon_control --- - true ... daemon_control = nil --- ... daemin_fiber = nil --- ... -- Shutdown box.cfg{ checkpoint_count = 2, checkpoint_interval = 0} --- ... daemon.next_snapshot_time --- - null ... daemon.fiber == nil --- - true ... daemon.control == nil --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/snap_io_rate.result0000664000000000000000000000063513306560010022037 0ustar rootrootdigest = require'digest' --- ... fiber = require'fiber' --- ... _ = box.schema.space.create('snap'):create_index('pk') --- ... -- write > 64 mb snapshot for i = 0, 127 do box.space.snap:replace({i, digest.urandom(512 * 1024)}) end --- ... t1 = fiber.time() --- ... box.snapshot() --- - ok ... t2 = fiber.time() --- ... t2 - t1 > 64 / box.cfg.snap_io_rate_limit * 0.95 --- - true ... box.space.snap:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/gh1433.result0000664000000000000000000000045313306560010020303 0ustar rootrootfio = require('fio') --- ... box.space._schema:insert({'gh1433'}) --- - ['gh1433'] ... box.space._schema:delete({'gh1433'}) --- - ['gh1433'] ... glob = fio.pathjoin(box.cfg.memtx_dir, '*.snap') --- ... for _, file in pairs(fio.glob(glob)) do fio.unlink(file) end --- ... box.snapshot() --- - ok ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/panic_on_lsn_gap.result0000664000000000000000000001224713306560010022667 0ustar rootroot-- -- we actually need to know what xlogs the server creates, -- so start from a clean state -- -- -- Check how the server is able to find the next -- xlog if there are failed writes (lsn gaps). -- env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("create server panic with script='xlog/panic.lua'") --- - true ... test_run:cmd("start server panic") --- - true ... test_run:cmd("switch panic") --- - true ... box.info.vclock --- - {} ... s = box.space._schema --- ... -- we need to have at least one record in the -- xlog otherwise the server believes that there -- is an lsn gap during recovery. -- s:replace{"key", 'test 1'} --- - ['key', 'test 1'] ... box.info.vclock --- - {1: 1} ... box.error.injection.set("ERRINJ_WAL_WRITE", true) --- - ok ... t = {} --- ... -- -- Try to insert rows, so that it's time to -- switch WALs. No switch will happen though, -- since no writes were made. -- test_run:cmd("setopt delimiter ';'") --- - true ... for i=1,box.cfg.rows_per_wal do status, msg = pcall(s.replace, s, {"key"}) table.insert(t, msg) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... t --- - - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk ... -- -- Before restart: oops, our LSN is 11, -- even though we didn't insert anything. -- name = string.match(arg[0], "([^,]+)%.lua") --- ... box.info.vclock --- - {1: 1} ... require('fio').glob(name .. "/*.xlog") --- - - panic/00000000000000000000.xlog ... test_run:cmd("restart server panic") -- -- after restart: our LSN is the LSN of the -- last *written* row, all the failed -- rows are gone from lsn counter. -- box.info.vclock --- - {1: 1} ... box.space._schema:select{'key'} --- - - ['key', 'test 1'] ... box.error.injection.set("ERRINJ_WAL_WRITE", true) --- - ok ... t = {} --- ... s = box.space._schema --- ... -- -- now do the same -- test_run:cmd("setopt delimiter ';'") --- - true ... for i=1,box.cfg.rows_per_wal do status, msg = pcall(s.replace, s, {"key"}) table.insert(t, msg) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... t --- - - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk - Failed to write to disk ... box.info.vclock --- - {1: 1} ... box.error.injection.set("ERRINJ_WAL_WRITE", false) --- - ok ... -- -- Write a good row after a series of failed -- rows. There is a gap in LSN, correct, -- but it's *inside* a single WAL, so doesn't -- affect WAL search in recover_remaining_wals() -- s:replace{'key', 'test 2'} --- - ['key', 'test 2'] ... -- -- notice that vclock before and after -- server stop is the same -- because it's -- recorded in the last row -- box.info.vclock --- - {1: 12} ... test_run:cmd("restart server panic") box.info.vclock --- - {1: 12} ... box.space._schema:select{'key'} --- - - ['key', 'test 2'] ... -- list all the logs name = string.match(arg[0], "([^,]+)%.lua") --- ... require('fio').glob(name .. "/*.xlog") --- - - panic/00000000000000000000.xlog - panic/00000000000000000001.xlog ... -- now insert 10 rows - so that the next -- row will need to switch the WAL test_run:cmd("setopt delimiter ';'") --- - true ... for i=1,box.cfg.rows_per_wal do box.space._schema:replace{"key", 'test 3'} end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- the next insert should switch xlog, but aha - it fails -- a new xlog file is created but has 0 rows require('fio').glob(name .. "/*.xlog") --- - - panic/00000000000000000000.xlog - panic/00000000000000000001.xlog - panic/00000000000000000012.xlog ... box.error.injection.set("ERRINJ_WAL_WRITE", true) --- - ok ... box.space._schema:replace{"key", 'test 3'} --- - error: Failed to write to disk ... box.info.vclock --- - {1: 22} ... require('fio').glob(name .. "/*.xlog") --- - - panic/00000000000000000000.xlog - panic/00000000000000000001.xlog - panic/00000000000000000012.xlog - panic/00000000000000000022.xlog ... -- and the next one (just to be sure box.space._schema:replace{"key", 'test 3'} --- - error: Failed to write to disk ... box.info.vclock --- - {1: 22} ... require('fio').glob(name .. "/*.xlog") --- - - panic/00000000000000000000.xlog - panic/00000000000000000001.xlog - panic/00000000000000000012.xlog - panic/00000000000000000022.xlog ... box.error.injection.set("ERRINJ_WAL_WRITE", false) --- - ok ... -- then a success box.space._schema:replace{"key", 'test 4'} --- - ['key', 'test 4'] ... box.info.vclock --- - {1: 25} ... require('fio').glob(name .. "/*.xlog") --- - - panic/00000000000000000000.xlog - panic/00000000000000000001.xlog - panic/00000000000000000012.xlog - panic/00000000000000000022.xlog ... -- restart is ok test_run:cmd("restart server panic") box.space._schema:select{'key'} --- - - ['key', 'test 4'] ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server panic") --- - true ... test_run:cmd("cleanup server panic") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/checkpoint_daemon.test.lua0000664000000000000000000000647713306560010023301 0ustar rootrootfio = require 'fio' errno = require 'errno' fiber = require 'fiber' env = require('test_run') test_run = env.new() test_run:cleanup_cluster() box.cfg{checkpoint_interval = 0} PERIOD = 0.03 if jit.os ~= 'Linux' then PERIOD = 1.5 end space = box.schema.space.create('checkpoint_daemon') index = space:create_index('pk', { type = 'tree', parts = { 1, 'unsigned' }}) box.cfg{checkpoint_interval = PERIOD, checkpoint_count = 2 } no = 1 -- first xlog for i = 1, box.cfg.rows_per_wal + 10 do space:insert { no } no = no + 1 end -- second xlog for i = 1, box.cfg.rows_per_wal + 10 do space:insert { no } no = no + 1 end -- wait for last snapshot fiber.sleep(1.5 * PERIOD) -- third xlog for i = 1, box.cfg.rows_per_wal + 10 do space:insert { no } no = no + 1 end -- fourth xlog for i = 1, box.cfg.rows_per_wal + 10 do space:insert { no } no = no + 1 end -- wait for last snapshot test_run:cmd("setopt delimiter ';'") for i = 1, 100 do fiber.sleep(PERIOD) snaps = fio.glob(fio.pathjoin(box.cfg.memtx_dir, '*.snap')) xlogs = fio.glob(fio.pathjoin(box.cfg.wal_dir, '*.xlog')) if #snaps == 2 then break end end; test_run:cmd("setopt delimiter ''"); #snaps == 2 or snaps #xlogs > 0 fio.basename(snaps[1], '.snap') >= fio.basename(xlogs[1], '.xlog') -- gh-2780 check that scheduled snapshots are performed fiber.sleep(3 * PERIOD) -- check that it's not first snapshot test_run:grep_log("default", "saving snapshot", 400) == nil test_run:grep_log("default", "making snapshot", 400) ~= nil -- restore default options box.cfg{checkpoint_interval = 3600 * 4, checkpoint_count = 4 } space:drop() daemon = box.internal.checkpoint_daemon -- stop daemon box.cfg{ checkpoint_interval = 0 } -- wait daemon to stop while daemon.fiber ~= nil do fiber.sleep(0) end daemon.fiber == nil -- start daemon box.cfg{ checkpoint_interval = 10 } daemon.fiber ~= nil -- reload configuration box.cfg{ checkpoint_interval = 15, checkpoint_count = 20 } daemon.checkpoint_interval == 15 daemon.checkpoint_count = 20 -- Check that checkpoint_count can't be < 1. box.cfg{ checkpoint_count = 1 } box.cfg{ checkpoint_count = 0 } box.cfg.checkpoint_count -- Start PERIOD = 3600 box.cfg{ checkpoint_count = 2, checkpoint_interval = PERIOD} snapshot_time, time = daemon.next_snapshot_time, fiber.time() snapshot_time + 1 >= time + PERIOD or {snapshot_time, time, PERIOD} snapshot_time - 1 <= time + 2 * PERIOD or {snapshot_time, time, PERIOD} daemon_fiber = daemon.fiber daemon_control = daemon.control -- Reload #1 PERIOD = 100 box.cfg{ checkpoint_count = 2, checkpoint_interval = PERIOD} snapshot_time, time = daemon.next_snapshot_time, fiber.time() snapshot_time + 1 >= time + PERIOD or {snapshot_time, time, PERIOD} snapshot_time - 1 <= time + 2 * PERIOD or {snapshot_time, time, PERIOD} daemon.fiber == daemon_fiber daemon.control == daemon_control -- Reload #2 PERIOD = 1000 box.cfg{ checkpoint_count = 2, checkpoint_interval = PERIOD} snapshot_time, time = daemon.next_snapshot_time, fiber.time() snapshot_time + 1 >= time + PERIOD or {snapshot_time, time, PERIOD} snapshot_time - 1 <= time + 2 * PERIOD or {snapshot_time, time, PERIOD} daemon.fiber == daemon_fiber daemon.control == daemon_control daemon_control = nil daemin_fiber = nil -- Shutdown box.cfg{ checkpoint_count = 2, checkpoint_interval = 0} daemon.next_snapshot_time daemon.fiber == nil daemon.control == nil tarantool_1.9.1.26.g63eb81e3c/test/xlog/misc.result0000664000000000000000000000222113306560010020320 0ustar rootroottest_run = require('test_run').new() --- ... test_run:cmd('restart server default with cleanup=1') fio = require('fio') --- ... xlog = require('xlog') --- ... netbox = require('net.box') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... -- -- Check that xlogs doesn't contain IPROTO_SYNC -- conn = netbox.connect(box.cfg.listen) --- ... -- insert some row using the binary protocol conn.space._schema:insert({'test'}) --- - ['test'] ... -- rotate xlog box.snapshot() --- - ok ... -- dump xlog xlog_path = fio.pathjoin(box.cfg.wal_dir, string.format("%020d.xlog", 0)) --- ... result = {} --- ... fun, param, state = xlog.pairs(xlog_path) --- ... type(fun.totable) --- - function ... -- skip grants until our insert into _schema repeat state, row = fun(param, state) until row.BODY.space_id == box.space._schema.id --- ... row.HEADER.type --- - INSERT ... row.HEADER.sync --- - null ... row.BODY --- - space_id: 272 tuple: ['test'] ... box.space._schema:delete('test') --- - ['test'] ... -- -- Clean up -- box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... netbox = nil --- ... xlog = nil --- ... fio = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/force_recovery.lua0000664000000000000000000000022113306560010021642 0ustar rootroot#!/usr/bin/env tarantool box.cfg { listen = os.getenv("LISTEN"), force_recovery = true } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/0000775000000000000000000000000013306565107017573 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.9/0000775000000000000000000000000013306565107020246 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.9/version0000664000000000000000000000002113306565107021647 0ustar rootroot1.6.9-0-gd36ba27 tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.9/00000000000000000010.xlog0000664000000000000000000000061013306565107023116 0ustar rootrootXLOG 0.12 Server: 65f1a623-9e99-47b3-92e5-0c9a63ff566c VClock: {1: 10} պ +ίG A-!debiansargeBպ +Lr) A4!debianwoody=7պ ,Fw AZ!ubuntuwilyV(պ -Ζ A!ubuntuvividU8պ .UߠAq!ubuntutrusty|SOtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.9/00000000000000000005.xlog0000664000000000000000000000060413306565107023125 0ustar rootrootXLOG 0.12 Server: 65f1a623-9e99-47b3-92e5-0c9a63ff566c VClock: {1: 5} պ ,YA~!debianjessiePU<պ ,VЧAׂ!debianwheezyFQպ -C(GA땂!debiansqueeze@!$65f1a623-9e99-47b3-92e5-0c9a63ff566ctarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.9/00000000000000000015.xlog0000664000000000000000000000100113306565107023116 0ustar rootrootXLOG 0.12 Server: 65f1a623-9e99-47b3-92e5-0c9a63ff566c VClock: {1: 15} պ /hA!ubuntupreciseOպ uXLA  !=nameostypestrnamedisttypestrnameversiontypenumnametimetypenumպ (OqA ń max_id!+պ :HOA i!temporarymemtxtemporaryÐպ PΔV`A 0!someuseruserchap-sha12qvbQIHM4zMWhAmm2xGeGNjqoHM=tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.6/0000775000000000000000000000000013306565107020243 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.6/version0000664000000000000000000000002313306565107021646 0ustar rootroot1.6.6-125-g935cb31 tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.6/00000000000000000010.xlog0000664000000000000000000000061013306565107023113 0ustar rootrootXLOG 0.12 Server: f87d5b71-d191-466d-a5c9-8d334a4c0316 VClock: {1: 10} պ +Μ/ Aծ-Ws!debiansargeBպ +q Aծ-Ws8!debianwoody=7պ ,κ {k Aծ-Ws!ubuntuwilyV(պ -ΕlAծ-Ws!ubuntuvividU8պ . Aծ-WsJ!ubuntutrusty|SOtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.6/00000000000000000005.xlog0000664000000000000000000000060413306565107023122 0ustar rootrootXLOG 0.12 Server: f87d5b71-d191-466d-a5c9-8d334a4c0316 VClock: {1: 5} պ ,$Aծ-Ws!debianjessiePU<պ ,MVAծ-Ws!debianwheezyFQպ -OAծ-Ws!debiansqueeze<Aծ-Ws !primaryhashuniqueÓstrstrnumպ :ίigۧAծ-Wsf !codenamehashuniqueÑstrպ 6r6Aծ-Wsт !timetreeunique‘numtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.6/00000000000000000025.xlog0000664000000000000000000000054313306565107023126 0ustar rootrootXLOG 0.12 Server: f87d5b71-d191-466d-a5c9-8d334a4c0316 VClock: {1: 25} պ !ήC7Aծ-WsR8!roleպ $mDAծ-WsǍ8!spaceպ %;Aծ-Wsx8!functionպ )cAծ-Wsw(!someotherfuncպ %;AAծ-Ws^8!functiontarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.6/00000000000000000000.snap0000664000000000000000000001147013306565107023110 0ustar rootrootSNAP 0.12 Server: f87d5b71-d191-466d-a5c9-8d334a4c0316 VClock: {1: 0} պ <p]!cluster$dd0be018-aff6-42e0-b051-7bea11046855պ է!max_idպ ̠d!versionպ 6Σ}@!_schemamemtxtypestrnamekeyպ ̷ξ!_spacememtxnameidtypenumnameownertypenumnamenametypestrnameenginetypestrnamefield_counttypenumnameflagstypestrnameformattype*պ ̺V!_vspacesysviewnameidtypenumnameownertypenumnamenametypestrnameenginetypestrnamefield_counttypenumnameflagstypestrnameformattype*պ ̜ΫD! _indexmemtxnameidtypenumnameiidtypenumnamenametypestrnametypetypestrnameoptstypearraynamepartstypearrayպ ̟f!!_vindexsysviewnameidtypenumnameiidtypenumnamenametypestrnametypetypestrnameoptstypearraynamepartstypearrayպ rX !(_funcmemtxnameidtypenumnameownertypenumnamenametypestrnamesetuidtypenumպ u#ϧ !)_vfuncsysviewnameidtypenumnameownertypenumnamenametypestrnamesetuidtypenumպ ̂ι !0_usermemtxnameidtypenumnameownertypenumnamenametypestrnametypetypestrnameauthtype*պ ̅Υ[ !1_vusersysviewnameidtypenumnameownertypenumnamenametypestrnametypetypestrnameauthtype*պ ̜OC¦ !8_privmemtxnamegrantortypenumnamegranteetypenumnameobject_typetypestrnameobject_idtypenumnameprivilegetypenumպ ̟μ!<!9_vprivsysviewnamegrantortypenumnamegranteetypenumnameobject_typetypestrnameobject_idtypenumnameprivilegetypenumպ JίW{!@_clustermemtxnameidtypenumnameuuidtypestrպ /k !primarytreeuniqueÑstrպ /w_ !primarytreeuniqueÑnumպ -ܟڧ !ownertreeunique‘numպ ,Tqԧ !nametreeuniqueÑstrպ /δ?姦 !primarytreeuniqueÑnumպ -8< !ownertreeunique‘numպ ,޵ t !nametreeuniqueÑstrպ 5d\ ! primarytreeuniqueÒnumnumպ 2μ1R ! nametreeuniqueÒnumstrպ 5Ύ2& !!primarytreeuniqueÒnumnumպ 2Bty_ !!nametreeuniqueÒnumstrպ / 8 !(primarytreeuniqueÑnumպ -F#[M !(ownertreeunique‘numպ ,AF̧ !(nametreeuniqueÑstrպ /Ρ !)primarytreeuniqueÑnumպ -~.ȉ !)ownertreeunique‘numպ ,1d&  !)nametreeuniqueÑstrպ /5`! !0primarytreeuniqueÑnumպ -(Ǡg" !0ownertreeunique‘numպ ,# !0nametreeuniqueÑstrպ /L}ڧ$ !1primarytreeuniqueÑnumպ -I% !1ownertreeunique‘numպ ,.S& !1nametreeuniqueÑstrպ ;Ε(' !8primarytreeuniqueÓnumstrnumպ -鐧( !8ownertreeunique‘numպ 4׾Χ) !8objecttreeunique’strnumպ ;s* !9primarytreeuniqueÓnumstrnumպ -$zT+ !9ownertreeunique‘numպ 4Έf, !9objecttreeunique’strnumպ /D- !@primarytreeuniqueÑnumպ ,S. !@uuidtreeuniqueÑstrպ &Mz/(!box.schema.user.infoպ G§00!guestuserպ Κ$i10!adminuserպ δK20!publicroleպ !+E͞30!replicationroleպ Aꧦ48!roleպ Wѧ58!universeպ 68!functionպ 378!spaceպ Δ@u88!space!պ U my98!space)պ l:8!space1պ A;8!space9պ 5<8!space@պ =8!universeպ 5f>@!$f87d5b71-d191-466d-a5c9-8d334a4c0316tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.6/00000000000000000015.xlog0000664000000000000000000000077713306565107023136 0ustar rootrootXLOG 0.12 Server: f87d5b71-d191-466d-a5c9-8d334a4c0316 VClock: {1: 15} պ /ίAծ-Ws!ubuntupreciseOպ uDAծ-Ws !=nameostypestrnamedisttypestrnameversiontypenumnametimetypenumպ (;xAAծ-Ws max_id!+պ 8ΈocAծ-Wsa!temporarymemtxtemporaryպ P̝ϧAծ-Ws'0!someuseruserchap-sha12qvbQIHM4zMWhAmm2xGeGNjqoHM=tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.5/0000775000000000000000000000000013306565107020242 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.5/version0000664000000000000000000000002113306565107021643 0ustar rootroot1.6.5-0-g800652e tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.5/00000000000000000010.xlog0000664000000000000000000000061013306565107023112 0ustar rootrootXLOG 0.12 Server: 969d0c4b-309b-4868-aff5-cf707f6b3f6f VClock: {1: 10} պ +°PPPPPPP Aծ-Uݝ!debiansargeBպ +(~@PPPPPPP Aծ-Uݠ!debianwoody=7պ ,N[PPPPPPP Aծ-Uݠ܂!ubuntuwilyV(պ -eΧPPPPPPPAծ-Uݡ!ubuntuvividU8պ .η#TNPPPPPPPAծ-Uݢ!ubuntutrusty|SOtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.5/00000000000000000005.xlog0000664000000000000000000000060413306565107023121 0ustar rootrootXLOG 0.12 Server: 969d0c4b-309b-4868-aff5-cf707f6b3f6f VClock: {1: 5} պ ,ʧPPPPPPPAծ-Uݗق!debianjessiePU<պ ,l{WPPPPPPPAծ-Uݛ!debianwheezyFQպ -U PPPPPPPAծ-Uݜe!debiansqueeze@!$969d0c4b-309b-4868-aff5-cf707f6b3f6ftarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.5/00000000000000000015.xlog0000664000000000000000000000077313306565107023131 0ustar rootrootXLOG 0.12 Server: 969d0c4b-309b-4868-aff5-cf707f6b3f6f VClock: {1: 15} պ /' PPPPPPPAծ-Uݢ!ubuntupreciseOպ s΅DwPPPPPPPAծ-Uݤ !=nameostypestrnamedisttypestrnameversiontypenumnametimetypenumպ &úPPPPPPPAծ-Uݥ max_id!+պ 8]זPPPPPPPAծ-Uݧ0!temporarymemtxtemporaryպ P KPPPPPPPAծ-Uݨ0!someuseruserchap-sha12qvbQIHM4zMWhAmm2xGeGNjqoHM=tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.2/0000775000000000000000000000000013306565107020240 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.2/version0000664000000000000000000000002113306565107021641 0ustar rootroot1.7.2-0-g8e92715 tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.2/00000000000000000010.xlog0000664000000000000000000000061013306565107023110 0ustar rootrootXLOG 0.13 Server: 3083fa40-34fa-48da-8438-cf5d47f43f0a VClock: {1: 10} պ +]}PPPPPPP A4!debiansargeBպ +!PPPPPPP A4z!debianwoody=7պ ,δ,PPPPPPPP A4@!ubuntuwilyV(պ -gxZPPPPPPPA4n!ubuntuvividU8պ .礭.PPPPPPPA4!ubuntutrusty|SOtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.2/00000000000000000005.xlog0000664000000000000000000000060413306565107023117 0ustar rootrootXLOG 0.13 Server: 3083fa40-34fa-48da-8438-cf5d47f43f0a VClock: {1: 5} պ ,ΨFPPPPPPPA4!debianjessiePU<պ ,μePPPPPPPA4ʂ!debianwheezyFQպ -΀*=PPPPPPPA4Y!debiansqueeze7PPPPPPPA4e(!somefuncLUAպ #΀ЧPPPPPPPA4Ղ8 universeպ (3p_PPPPPPPA4͂0!someroleroletarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.2/00000000000000000000.xlog0000664000000000000000000000072113306565107023112 0ustar rootrootXLOG 0.13 Server: 3083fa40-34fa-48da-8438-cf5d47f43f0a VClock: {} պ (޽GPPPPPPPA4̈́ max_id!+պ ,?PPPPPPPA4!distromemtxպ UvyPPPPPPPA4 !primaryhashuniqueãlsnstringstringunsignedպ BΧIPPPPPPPA4J !codenamehashuniqueãlsnstringպ @QݹPPPPPPPA4; !timetreeunique£lsnunsignedtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.2/00000000000000000025.xlog0000664000000000000000000000054713306565107023127 0ustar rootrootXLOG 0.13 Server: 3083fa40-34fa-48da-8438-cf5d47f43f0a VClock: {1: 25} պ !.PPPPPPPA4:8!roleպ $k*JPPPPPPPA48!spaceպ %Ν"PPPPPPPA4+8!functionպ -D5PPPPPPPA4(!someotherfuncLUAպ %]_#PPPPPPPA4ڂ8!functiontarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.2/00000000000000000000.snap0000664000000000000000000000215013306565107023100 0ustar rootrootSNAP 0.13 Server: 3083fa40-34fa-48da-8438-cf5d47f43f0a VClock: {} պ N~I7PPPPP(/P- oF q t1h%(aY&Ie18Hm( fu<px|@3C15Jg ?O*{|k)+iofpܖU5ɪdT[qUfq&&WI6CpswϹ#ӿ{uhia fcV(77Z˟$(XJ'芰5⢅}}/~at5%}7/ےk[T;Q+CûU;̀J@S[Y- OZ" ({OvzMJg`z6+І8nn gJg?B]$nNBm8KCH*u8:Jq&t:6\ ׈P$p,p*l(l&T:;i#0RQƁI0$?7μ%DA2.H<<:.Sq``vNeyl_TوK'Jgǐ!>>82ZI̻q-_Rt~<ákٯ&6OAT:{ǒ*Wu!pE'R:pqQF+B LrŐ lDjFf@B!N`a3 CCRUlXU+URU*x1ToV̇hꉕQ9j $%U9{qƛH2k}| ziZNAF| ?|QrICyD_ׯkL9MJzOZdōh' RFJim Y"KսD񝕴E\uTl#`Eb61E@8 5A Z0熄!ҲR$Ͳq ^M)g&dzzt#~W9 $dKɯޡ - oT+w>tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.2/00000000000000000015.xlog0000664000000000000000000000100113306565107023110 0ustar rootrootXLOG 0.13 Server: 3083fa40-34fa-48da-8438-cf5d47f43f0a VClock: {1: 15} պ />PPPPPPPA4ӌ!ubuntupreciseOպ u PPPPPPPA4պ !=nameostypestrnamedisttypestrnameversiontypenumnametimetypenumպ (D PPPPPPPPA4S max_id!+պ :γ0PPPPPPPA4!temporarymemtxtemporaryÐպ Pΰ֋3PPPPPPPA4v0!someuseruserchap-sha12qvbQIHM4zMWhAmm2xGeGNjqoHM=tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.2/00000000000000000031.xlog0000664000000000000000000000041413306565107023115 0ustar rootrootXLOG 0.13 Server: 3083fa40-34fa-48da-8438-cf5d47f43f0a VClock: {1: 31} պ -J}PPPPPPP A4="S  !=uniqueպ -pǧPPPPPPP!A4  !=uniqueպ - PPPPPPP"A4  !=uniquetarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/fill.lua0000664000000000000000000000476513306560010021224 0ustar rootroot--- --- A script to generate some dataset used by migration.test.lua --- box.cfg{ rows_per_wal = 5 } box.schema.space.create("distro") box.space.distro:create_index('primary', { type = 'hash', unique = true, parts = {1, 'str', 2, 'str', 3, 'num'}}) box.space.distro:create_index('codename', { type = 'hash', unique = true, parts = {2, 'str'}}) box.space.distro:create_index('time', { type = 'tree', unique = false, parts = {4, 'num'}}) local function d(year, month, day) return os.time { year = year, month = month, day = day } end box.space.distro:insert({'debian', 'jessie', 80, d(2015, 4, 26)}) box.space.distro:insert({'debian', 'wheezy', 70, d(2013, 5, 04)}) box.space.distro:insert({'debian', 'squeeze', 60, d(2011, 2, 05)}) box.space.distro:insert({'debian', 'lenny', 50, d(2009, 2, 14)}) box.space.distro:insert({'debian', 'etch', 40, d(2007, 4, 8)}) box.space.distro:insert({'debian', 'sarge', 31, d(2005, 6, 6)}) box.space.distro:insert({'debian', 'woody', 30, d(2002, 7, 19)}) box.space.distro:insert({'ubuntu', 'wily', 1510, d(2015, 10, 22)}) box.space.distro:insert({'ubuntu', 'vivid', 1504, d(2015, 4, 23)}) box.space.distro:insert({'ubuntu', 'trusty', 1404, d(2014, 4, 17)}) box.space.distro:insert({'ubuntu', 'precise', 1510, d(2012, 4, 26)}) -- 1.6.5+ if box.space.distro.format ~= nil then local format={} format[1] = {name='os', type='str'} format[2] = {name='dist', type='str'} format[3] = {name='version', type='num'} format[4] = {name='time', type='num'} box.space.distro:format(format) end box.schema.space.create('temporary', { temporary = true }) box.schema.user.create('someuser', { password = 'somepassword' }) box.schema.user.grant('someuser', 'read,write', 'universe') box.session.su('someuser') box.schema.func.create('somefunc', { setuid = true }) box.session.su('admin') box.schema.user.revoke('someuser', 'read,write', 'universe') box.schema.role.create('somerole') box.schema.user.grant('someuser', 'execute', 'role', 'somerole') if _TARANTOOL == nil or _TARANTOOL < "1.6.6" then box.schema.user.grant('somerole', 'read,write', 'space', 'distro') box.schema.user.grant('public', 'execute', 'function', 'somefunc') else box.schema.role.grant('somerole', 'read,write', 'space', 'distro') box.schema.role.grant('public', 'execute', 'function', 'somefunc') end box.schema.func.create('someotherfunc') box.schema.user.grant('someuser', 'execute', 'function', 'someotherfunc') box.schema.user.grant('someuser', 'read,write', 'space', 'temporary') os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.5/0000775000000000000000000000000013306565107020243 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.5/00000000000000000010.xlog0000664000000000000000000000065013306565107023117 0ustar rootrootXLOG 0.13 Version: 1.7.4-461-gc3ee56ad5 Instance: 5f66259f-f353-411c-b808-75e5a9751c78 VClock: {1: 10} պ +j Af; !debiansargeBպ +ΥXI Af;!debianwoody=7պ ,.o Af;@!ubuntuwilyV(պ -&CYӧAf;!ubuntuvividU8պ .OLfAf;!ubuntutrusty|SOtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.5/00000000000000000005.xlog0000664000000000000000000000064413306565107023126 0ustar rootrootXLOG 0.13 Version: 1.7.4-461-gc3ee56ad5 Instance: 5f66259f-f353-411c-b808-75e5a9751c78 VClock: {1: 5} պ ,ζDZ&Af;!debianjessiePU<պ ,ΝaAf;!debianwheezyFQպ -!*Af; !debiansqueezePAO g%[.Q/H+]3Ҟ.X0jVIM-,HV E!Qu^6ycz̿f`k+@^yܸtJ{}CJҰ&2oѥ#=܂8$%W؝Xmux""(! xL>J2ׁ`hխC7[SƶAӠeB<<l]:N[iWq$`LLɘTTop|fgPSy+ƛdE-(_+XB"pHmyFB#`r  &6}%QMFp"g+׹}3iTP#Cgw(&15JN:`OR)Hxͨhf$C( ( b BZ0] ~9wwOu#(zs=:7(J uJCiCT[]^3:ȝ$/TVVk_ple >{t9G}w1 4ͫe}(ݠrWj794 x**sNm=ۿe[Wj~i @|Ɂ czAcu} Du">SghK e^)~ka=.sQV/@9Gq בf/tjX4~QvelҒ^% 2p4h{M4t*tlsNPКd 4| VQkGv@O`tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.5/00000000000000000015.xlog0000664000000000000000000000104113306565107023117 0ustar rootrootXLOG 0.13 Version: 1.7.4-461-gc3ee56ad5 Instance: 5f66259f-f353-411c-b808-75e5a9751c78 VClock: {1: 15} պ /fAf;/!ubuntupreciseOպ uXlKAf; !=nameostypestrnamedisttypestrnameversiontypenumnametimetypenumպ (9`Af;\ max_id!+պ :8Af;!!temporarymemtxtemporaryÐպ P>ΧAf;#`0!someuseruserchap-sha12qvbQIHM4zMWhAmm2xGeGNjqoHM=tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.7/0000775000000000000000000000000013306565107020244 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.7/version0000664000000000000000000000002313306565107021647 0ustar rootroot1.6.7-469-g1d364fb tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.7/00000000000000000010.xlog0000664000000000000000000000061013306565107023114 0ustar rootrootXLOG 0.12 Server: 0b42ea89-e2b0-4ce4-ba2f-cbdaf66c61ff VClock: {1: 10} պ +/+§PPPPPPP Aծ-Yy@!debiansargeBպ +xPPPPPPP Aծ-YyDS!debianwoody=7պ ,Å4PPPPPPP Aծ-YyE!ubuntuwilyV(պ -4X PPPPPPPAծ-YyE!ubuntuvividU8պ .jPPPPPPPAծ-YyF0!ubuntutrusty|SOtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.7/00000000000000000005.xlog0000664000000000000000000000060413306565107023123 0ustar rootrootXLOG 0.12 Server: 0b42ea89-e2b0-4ce4-ba2f-cbdaf66c61ff VClock: {1: 5} պ ,qPPPPPPPAծ-Yy4 !debianjessiePU<պ ,ΣRPPPPPPPAծ-Yy:1!debianwheezyFQպ -!PPPPPPPAծ-Yy>_!debiansqueezePPPPPPPAծ-Yyg7(!someotherfuncպ %μQPPPPPPPAծ-YyhR8!functiontarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.7/00000000000000000000.snap0000664000000000000000000001147013306565107023111 0ustar rootrootSNAP 0.12 Server: 0b42ea89-e2b0-4ce4-ba2f-cbdaf66c61ff VClock: {1: 0} պ < t8PPPPPPP!cluster$58798139-8f6a-4945-a824-4d3fec855708պ էPPPPPPP!max_idպ ̠dPPPPPPP!versionպ 6Σ}@PPPPPPP!_schemamemtxtypestrnamekeyպ ̷ξPPPPPP!_spacememtxnameidtypenumnameownertypenumnamenametypestrnameenginetypestrnamefield_counttypenumnameflagstypestrnameformattype*պ ̺VPPPPPP!_vspacesysviewnameidtypenumnameownertypenumnamenametypestrnameenginetypestrnamefield_counttypenumnameflagstypestrnameformattype*պ ̜ΫDPPPPPP! _indexmemtxnameidtypenumnameiidtypenumnamenametypestrnametypetypestrnameoptstypearraynamepartstypearrayպ ̟fPPPPPP!!_vindexsysviewnameidtypenumnameiidtypenumnamenametypestrnametypetypestrnameoptstypearraynamepartstypearrayպ rXPPPPPP !(_funcmemtxnameidtypenumnameownertypenumnamenametypestrnamesetuidtypenumպ u#ϧPPPPPP !)_vfuncsysviewnameidtypenumnameownertypenumnamenametypestrnamesetuidtypenumպ ̂ιPPPPPP !0_usermemtxnameidtypenumnameownertypenumnamenametypestrnametypetypestrnameauthtype*պ ̅Υ[PPPPPP !1_vusersysviewnameidtypenumnameownertypenumnamenametypestrnametypetypestrnameauthtype*պ ̜OC¦PPPPPP !8_privmemtxnamegrantortypenumnamegranteetypenumnameobject_typetypestrnameobject_idtypenumnameprivilegetypenumպ ̟μ!@!$0b42ea89-e2b0-4ce4-ba2f-cbdaf66c61fftarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.7/00000000000000000015.xlog0000664000000000000000000000077713306565107023137 0ustar rootrootXLOG 0.12 Server: 0b42ea89-e2b0-4ce4-ba2f-cbdaf66c61ff VClock: {1: 15} պ /; PPPPPPPAծ-YyF!ubuntupreciseOպ u΃& PPPPPPPAծ-YyIy !=nameostypestrnamedisttypestrnameversiontypenumnametimetypenumպ (2WPPPPPPPAծ-YyJ؄ max_id!+պ 8Ը:PPPPPPPAծ-YyL!temporarymemtxtemporaryպ PMPPPPPPPAծ-YyN0!someuseruserchap-sha12qvbQIHM4zMWhAmm2xGeGNjqoHM=tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.6/0000775000000000000000000000000013306565107020244 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.6/00000000000000000010.xlog0000664000000000000000000000065013306565107023120 0ustar rootrootXLOG 0.13 Version: 1.7.5-290-g4243700ae Instance: 465a31f0-63ff-41ee-a3b4-1f2091eafc6d VClock: {1: 10} պ +7 Aր w.%!debiansargeBպ +N  Aր w.pw!debianwoody=7պ ,ΪJ Aր w.r!ubuntuwilyV(պ -FAր w.t!ubuntuvividU8պ .Aր w.v-!ubuntutrusty|SOtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.6/00000000000000000005.xlog0000664000000000000000000000064413306565107023127 0ustar rootrootXLOG 0.13 Version: 1.7.5-290-g4243700ae Instance: 465a31f0-63ff-41ee-a3b4-1f2091eafc6d VClock: {1: 5} պ ,XAր w-!debianjessiePU<պ ,9̧Aր w.!debianwheezyFQպ -,뚧Aր w.#!debiansqueezekM]KME¸3s"Z􎜩W->"q ܩοdʸNĵԌbVPliU `dfPG)n8 0 ad0mnz H J(jxX4~ KgGÏ=eȷ8X- jI:܏!lA^qy2J/;⬀L>ƎV#:^$DQvY˧q1QzC';4+^jB:h};ń4 %4䁚JƲ[Z9 ~ Mjp }T]|t~-mz,et7[?},C]Y&A 2A| 8P %°'09sC 5gF_.&ǰpxH%QR(;ѣg:Q8\.dh ߓ;/c #%f~0 ;Eo[dyI2t<ɲ i; Dd/13`ꝾΊ=@"atRO>tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.7.6/00000000000000000015.xlog0000664000000000000000000000104113306565107023120 0ustar rootrootXLOG 0.13 Version: 1.7.5-290-g4243700ae Instance: 465a31f0-63ff-41ee-a3b4-1f2091eafc6d VClock: {1: 15} պ /^HAր w.w!ubuntupreciseOպ u'4Aր w.ɨ !=nameostypestrnamedisttypestrnameversiontypenumnametimetypenumպ (b?Aր w.4 max_id!+պ :50Aր w.R!temporarymemtxtemporaryÐպ P΀ Aր w.^0!someuseruserchap-sha12qvbQIHM4zMWhAmm2xGeGNjqoHM=tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/0000775000000000000000000000000013306565107020245 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/version0000664000000000000000000000002313306565107021650 0ustar rootroot1.6.8-525-ga571ac0 tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/00000000000000000010.xlog0000664000000000000000000000061013306565107023115 0ustar rootrootXLOG 0.12 Server: 893d1e1b-a369-48e8-bb0e-206994e80d63 VClock: {1: 10} պ +*F^PPPPPPP AյQ>t#!debiansargeBպ +8dPPPPPPP AյQ>|!debianwoody=7պ ,|,PPPPPPP AյQ>~!ubuntuwilyV(պ -ν1PPPPPPPAյQ>_!ubuntuvividU8պ .SUPPPPPPPAյQ>!ubuntutrusty|SOtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/00000000000000000005.xlog0000664000000000000000000000060413306565107023124 0ustar rootrootXLOG 0.12 Server: 893d1e1b-a369-48e8-bb0e-206994e80d63 VClock: {1: 5} պ ,XPPPPPPPAյQ>VD!debianjessiePU<պ ,ΠUfPPPPPPPAյQ>a!debianwheezyFQպ -pPPPPPPPAյQ>l!debiansqueezeo!debianlenny2Iպ *ҥiPPPPPPP AյQ>q!debianetch(Ftarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/00000000000000000030.xlog0000664000000000000000000000020313306565107023115 0ustar rootrootXLOG 0.12 Server: 893d1e1b-a369-48e8-bb0e-206994e80d63 VClock: {1: 30} պ $DGPPPPPPPAյQ>8!spacetarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/00000000000000000020.xlog0000664000000000000000000000054413306565107023124 0ustar rootrootXLOG 0.12 Server: 893d1e1b-a369-48e8-bb0e-206994e80d63 VClock: {1: 20} պ !tPPPPPPPAյQ>8!roleպ %ΠPPPPPPPAյQ>؂8!universeպ (ѶւPPPPPPPAյQ>(!somefuncLUAպ #`fPPPPPPPAյQ>܂8 universeպ (r4W"PPPPPPPAյQ>΄0!someroleroletarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/00000000000000000000.xlog0000664000000000000000000000066313306565107023124 0ustar rootrootXLOG 0.12 Server: 893d1e1b-a369-48e8-bb0e-206994e80d63 VClock: {1: 0} պ (Ί+JPPPPPPPAյQ= max_id!+պ ,,ՅPPPPPPPAյQ>=!distromemtxպ EUPPPPPPPAյQ>B8 !primaryhashuniqueÓstrstrnumպ :%PPPPPPPAյQ>K !codenamehashuniqueÑstrպ 6ο'PPPPPPPAյQ>Q( !timetreeunique‘numtarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/00000000000000000025.xlog0000664000000000000000000000054713306565107023134 0ustar rootrootXLOG 0.12 Server: 893d1e1b-a369-48e8-bb0e-206994e80d63 VClock: {1: 25} պ !PPPPPPPAյQ>8!roleպ $Ί]PPPPPPPAյQ>ܣ8!spaceպ %΄1PPPPPPPAյQ>8!functionպ -wxPPPPPPPAյQ>鞂(!someotherfuncLUAպ %+)PPPPPPPAյQ>)8!functiontarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/00000000000000000000.snap0000664000000000000000000001147013306565107023112 0ustar rootrootSNAP 0.12 Server: 893d1e1b-a369-48e8-bb0e-206994e80d63 VClock: {1: 0} պ <wPPPPPPP!cluster$6820678b-206c-4a3e-95bf-76dc896e6182պ էPPPPPPP!max_idպ ̠dPPPPPPP!versionպ 6ދPPPPPPP!_schemamemtxtypestrnamekeyպ ̷]pPPPPPP!_spacememtxnameidtypenumnameownertypenumnamenametypestrnameenginetypestrnamefield_counttypenumnameflagstypestrnameformattype*պ ̺5QPPPPPP!_vspacesysviewnameidtypenumnameownertypenumnamenametypestrnameenginetypestrnamefield_counttypenumnameflagstypestrnameformattype*պ ̜ybPPPPPP! _indexmemtxnameidtypenumnameiidtypenumnamenametypestrnametypetypestrnameoptstypearraynamepartstypearrayպ ̟iPPPPPP!!_vindexsysviewnameidtypenumnameiidtypenumnamenametypestrnametypetypestrnameoptstypearraynamepartstypearrayպ ruQPPPPPP !(_funcmemtxnameidtypenumnameownertypenumnamenametypestrnamesetuidtypenumպ uΞzXPPPPPP !)_vfuncsysviewnameidtypenumnameownertypenumnamenametypestrnamesetuidtypenumպ ̂'PPPPPP !0_usermemtxnameidtypenumnameownertypenumnamenametypestrnametypetypestrnameauthtype*պ ̅;APPPPPP !1_vusersysviewnameidtypenumnameownertypenumnamenametypestrnametypetypestrnameauthtype*պ ̜ PPPPPP !8_privmemtxnamegrantortypenumnamegranteetypenumnameobject_typetypestrnameobject_idtypenumnameprivilegetypenumպ ̟QkPPPPPP!9_vprivsysviewnamegrantortypenumnamegranteetypenumnameobject_typetypestrnameobject_idtypenumnameprivilegetypenumպ JhŝPPPPPP!@_clustermemtxnameidtypenumnameuuidtypestrպ /kPPPPPP !primarytreeuniqueÑstrպ /w_PPPPPP !primarytreeuniqueÑnumպ -ܟڧPPPPPP !ownertreeunique‘numպ ,TqԧPPPPPP !nametreeuniqueÑstrպ /δ?姦PPPPPP !primarytreeuniqueÑnumպ -8@!$893d1e1b-a369-48e8-bb0e-206994e80d63tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade/1.6.8/00000000000000000015.xlog0000664000000000000000000000100113306565107023115 0ustar rootrootXLOG 0.12 Server: 893d1e1b-a369-48e8-bb0e-206994e80d63 VClock: {1: 15} պ /η!sܧPPPPPPPAյQ>!ubuntupreciseOպ uDCPPPPPPPAյQ>Մ !=nameostypestrnamedisttypestrnameversiontypenumnametimetypenumպ (+ ԧPPPPPPPAյQ>  max_id!+պ :PPPPPPPAյQ>!temporarymemtxtemporaryÐպ PPPPPPPPAյQ>0!someuseruserchap-sha12qvbQIHM4zMWhAmm2xGeGNjqoHM=tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade.lua0000664000000000000000000000025013306560010020257 0ustar rootroot#!/usr/bin/env tarantool box.cfg { listen = os.getenv("LISTEN"), memtx_memory = 107374182 } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/xlog/reader.test.lua0000664000000000000000000000343413306560010021057 0ustar rootroot-- test for xlog_reader module -- consists of 3 parts: -- 1) ok snap/xlog reader -- 2) broken files reader (crc sum is invalid, bad header [version/type]) -- 3) before box.cfg and after box.cfg fio = require('fio') fun = require('fun') json = require('json') xlog = require('xlog').pairs trun = require('test_run').new() pattern_prefix = fio.pathjoin(os.getenv("SOURCEDIR"), "test/xlog/reader") pattern_prefix_re = pattern_prefix:gsub("/", "\\/") trun:cmd(("push filter '%s' to '%s'"):format(pattern_prefix_re, "")) pattern_ok_v12 = fio.pathjoin(pattern_prefix, "v12/") pattern_ok_v13 = fio.pathjoin(pattern_prefix, "v13/") trun:cmd("setopt delimiter ';'") function collect_results(file) local val = {} for k, v in xlog(file) do table.insert(val, setmetatable(v, { __serialize = "map"})) end return val end; fun.iter({ fio.pathjoin(pattern_ok_v12, '00000000000000000000.ok.snap'), fio.pathjoin(pattern_ok_v12, '00000000000000000000.ok.xlog'), }):map(collect_results):totable(); collectgarbage('collect'); fun.iter({ fio.pathjoin(pattern_ok_v13, '00000000000000000000.ok.snap'), fio.pathjoin(pattern_ok_v13, '00000000000000000000.ok.xlog'), }):map(collect_results):totable(); collectgarbage('collect'); check_error = function(name, err) local path = fio.pathjoin(pattern_prefix, name) local stat, oerr = pcall(collect_results, path) if stat == true or not string.find(tostring(oerr), err) then return false, oerr end return true end; trun:cmd("setopt delimiter ''"); check_error("version.bad.xlog", "file format version") check_error("format.bad.xlog", "not support 'SNOP' file type") collect_results(fio.pathjoin(pattern_prefix, "crc.bad.xlog")) collect_results(fio.pathjoin(pattern_prefix, "eof.bad.xlog")) trun:cmd('clear filter') tarantool_1.9.1.26.g63eb81e3c/test/xlog/legacy.result0000664000000000000000000000572413306565107020660 0ustar rootroottest_run = require('test_run').new() --- ... version = test_run:get_cfg('version') --- ... -- Use 1.7.5 snapshot to check that space formats are not checked. -- It allows to use >= 1.6.5 format versions. test_run:cmd('create server legacy with script="xlog/upgrade.lua", workdir="xlog/upgrade/1.7.5"') --- - true ... test_run:cmd("start server legacy") --- - true ... test_run:switch('legacy') --- - true ... box.space._schema:get({'version'}) --- - ['version', 1, 7, 5] ... _space = box.space._space --- ... -- -- Check _space 1.7.5 format. -- _space:replace{600, 1, 'test', 'memtx', 0} --- - [600, 1, 'test', 'memtx', 0] ... box.space.test:drop() --- ... -- -- Check _index 1.6.5 format. -- s = box.schema.space.create('s') --- ... pk = s:create_index('pk') --- ... sk = box.space._index:insert{s.id, 2, 'sk', 'rtree', 0, 1, 2, 'array'} --- ... s.index.sk.parts --- - - type: array is_nullable: false fieldno: 3 ... s.index.sk:drop() --- ... box.space._index:insert{s.id, 2, 's', 'rtree', 0, 1, 2, 'thing'} --- - error: 'Wrong index parts: unknown field type; expected field1 id (number), field1 type (string), ...' ... box.space._index:insert{s.id, 2, 's', 'rtree', 0, 1, 2, 'array', 'wtf'} --- - error: 'Wrong record in _index space: got {number, number, string, string, number, number, number, string, string}, expected {space id (number), index id (number), name (string), type (string), is_unique (number), part count (number) part0 field no (number), part0 field type (string), ...}' ... box.space._index:insert{s.id, 2, 's', 'rtree', 0, 0} --- - error: 'Can''t create or modify index ''s'' in space ''s'': part count must be positive' ... s:drop() --- ... -- -- Check 1.6.5 space flags. -- s = box.schema.space.create('t', { temporary = true }) --- ... index = s:create_index('primary', { type = 'hash' }) --- ... s:insert{1, 2, 3} --- - [1, 2, 3] ... _ = _space:update(s.id, {{'=', 6, 'temporary'}}) --- ... s.temporary --- - true ... _ = _space:update(s.id, {{'=', 6, ''}}) --- - error: 'Can''t modify space ''t'': can not switch temporary flag on a non-empty space' ... s.temporary --- - true ... s:truncate() --- ... _ = _space:update(s.id, {{'=', 6, 'no-temporary'}}) --- ... s.temporary --- - false ... _ = _space:update(s.id, {{'=', 6, ',:asfda:temporary'}}) --- ... s.temporary --- - false ... _ = _space:update(s.id, {{'=', 6, 'a,b,c,d,e'}}) --- ... s.temporary --- - false ... _ = _space:update(s.id, {{'=', 6, 'temporary'}}) --- ... s.temporary --- - true ... s:get{1} --- ... s:insert{1, 2, 3} --- - [1, 2, 3] ... _ = _space:update(s.id, {{'=', 6, 'temporary'}}) --- ... s.temporary --- - true ... _ = _space:update(s.id, {{'=', 6, 'no-temporary'}}) --- - error: 'Can''t modify space ''t'': can not switch temporary flag on a non-empty space' ... s.temporary --- - true ... s:delete{1} --- - [1, 2, 3] ... _ = _space:update(s.id, {{'=', 6, 'no-temporary'}}) --- ... s:drop() --- ... test_run:switch('default') --- - true ... test_run:cmd('stop server legacy') --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/xlog/suite.cfg0000664000000000000000000000053313306565107017757 0ustar rootroot{ "upgrade.test.lua": { "1.6.5": {"version": "1.6.5"}, "1.6.6": {"version": "1.6.6"}, "1.6.7": {"version": "1.6.7"}, "1.6.8": {"version": "1.6.8"}, "1.6.9": {"version": "1.6.9"}, "1.7.2": {"version": "1.7.2"}, "1.7.5": {"version": "1.7.5"}, "1.7.6": {"version": "1.7.6"} } } tarantool_1.9.1.26.g63eb81e3c/test/xlog/upgrade.test.lua0000664000000000000000000000167413306560010021250 0ustar rootroottest_run = require('test_run').new() version = test_run:get_cfg('version') work_dir = "xlog/upgrade/"..version test_run:cmd('create server upgrade with script="xlog/upgrade.lua", workdir="'..work_dir..'"') test_run:cmd("start server upgrade") test_run:switch('upgrade') test_run:cmd(string.format("push filter '%s' to ''", box.info.cluster.uuid)) -- -- Upgrade -- box.schema.upgrade() -- -- Migrated data -- box.space._schema:select() box.space._space:select() box.space._index:select() box.space._user:select() box.space._func:select() box.space._collation:select() box.space._priv:select() box.space._vspace ~= nil box.space._vindex ~= nil box.space._vuser ~= nil box.space._vpriv ~= nil -- a test space r = box.space.distro:select() _ = table.sort(r, function(left, right) return tostring(left) < tostring(right) end) r test_run:cmd("clear filter") test_run:switch('default') test_run:cmd('stop server upgrade') test_run = nil tarantool_1.9.1.26.g63eb81e3c/test/xlog/big_tx.test.lua0000664000000000000000000000041213306560010021062 0ustar rootrootenv = require('test_run').new() digest = require('digest') _ = box.schema.space.create('big_tx'):create_index('pk') t = box.space.big_tx:insert({1, digest.urandom(512 * 1024)}) env:cmd('restart server default') #box.space.big_tx:select() box.space.big_tx:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/0000775000000000000000000000000013306565107016334 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/stress.test.lua0000664000000000000000000000031313306560010021321 0ustar rootroot#!/usr/bin/env tarantool test_run = require('test_run').new() require('stress').stress(10) test_run:cmd('restart server default') require('stress').stress(10) test_run:cmd('restart server default') tarantool_1.9.1.26.g63eb81e3c/test/vinyl/tx_conflict.test.lua0000664000000000000000000002634213306560010022324 0ustar rootroot-- The test runs loop of given number of rounds. -- Every round does the following: -- The test starts several concurrent transactions in vinyl. -- The transactions make some read/write operations over several keys in -- a random order and commit at a random moment. -- After that all transactions are sorted in order of commit -- With the sublist of read-write transactions committed w/o conflict: -- Test tries to make these transactions in memtex, one tx after another, -- without interleaving and compares select results with vinyl to make sure -- if the transaction could be serialized in order of commit or not -- With the sublist of read-write transactions committed with conflict: -- Test tries to make the same operations end ensures that the read results -- are not possible in memtx. -- With the sublist of read only transactions: -- Test tries to insert these transactions between other transactions and checks -- that it possible to get same results. test_run = require('test_run').new() txn_proxy = require('txn_proxy') --settings num_tx = 10 --number of concurrent transactions num_key = 5 --number of keys that transactions use num_tests = 50 --number of test rounds to run txs = {} order_of_commit = {} num_committed = 0 stmts = {} errors = {} initial_data = {} initial_repro = "" ops = {'begin', 'commit', 'select', 'replace', 'upsert', 'delete'} -- ignore case of unnecessary conflict: -- s:delete{1} -- t1:begin() t1:select{1} t1:replace{2} s:replace{1} s:delete{1} t1:commit() ignore_unnecessary_conflict1 = true --avoid first upsert in transaction --fails if num_tests = 1000 ignore_unnecessary_conflict2 = true -- New point iterator introduced additional possible conflicts that -- happens during run page read yield. -- This flag disables 'could be serializable' checks. latest_broken = true test_run:cmd("setopt delimiter ';'") s1 = box.schema.create_space('test1', { engine = 'vinyl' }) i1 = s1:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) s2 = box.schema.create_space('test2', { engine = 'memtx' }) i2 = s2:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) if ignore_unnecessary_conflict1 then q1 = box.schema.create_space('testq1', { engine = 'vinyl' }) iq1 = q1:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) q2 = box.schema.create_space('testq2', { engine = 'memtx' }) iq2 = q2:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) end; for i=1,num_tx do txs[i] = {con = txn_proxy.new()} end; function my_equal(a, b) local typea = box.tuple.is(a) and 'table' or type(a) local typeb = box.tuple.is(b) and 'table' or type(b) if typea ~= typeb then return false elseif typea ~= 'table' then return a == b end for k,v in pairs(a) do if not my_equal(b[k], v) then return false end end for k,v in pairs(b) do if not my_equal(a[k], v) then return false end end return true end; unique_value = 0 function get_unique_value() unique_value = unique_value + 1 return unique_value end; function prepare() order_of_commit = {} num_committed = 0 stmts = {} for i=1,num_tx do txs[i].started = false txs[i].ended = false if math.random(3) == 1 then txs[i].read_only = true else txs[i].read_only = false end txs[i].read_only_checked = false txs[i].conflicted = false txs[i].possible = nil txs[i].num_writes = 0 end s1:truncate() s2:truncate() if ignore_unnecessary_conflict1 then q1:truncate() q2:truncate() end for i=1,num_key do local r = math.random(5) local v = get_unique_value() if (r >= 2) then s1:replace{i, v} s2:replace{i, v } if ignore_unnecessary_conflict1 then q1:replace{i, v} q2:replace{i, v } end end if (r == 2) then s1:delete{i} s2:delete{i} end end initial_data = s1:select{} initial_repro = "" initial_repro = initial_repro .. "s = box.schema.space.create('test', {engine = 'vinyl', if_not_exists = true})\n" initial_repro = initial_repro .. "i1 = s:create_index('test', {parts = {1, 'uint'}, if_not_exists = true})\n" initial_repro = initial_repro .. "txn_proxy = require('txn_proxy')\n" for _,tuple in pairs(initial_data) do initial_repro = initial_repro .. "s:replace{" .. tuple[1] .. ", " .. tuple[2] .. "} " end end; function apply(t, k, op) local tx = txs[t] local v = nil local q = nil local k = k local repro = nil if op == 'begin' then if tx.started then table.insert(errors, "assert #1") end tx.started = true tx.con:begin() k = nil repro = "c" .. t .. " = txn_proxy.new() c" .. t .. ":begin()" repro = "p(\"c" .. t .. ":begin()\") " .. repro elseif op == 'commit' then if tx.ended or not tx.started then table.insert(errors, "assert #2") end tx.ended = true table.insert(order_of_commit, t) num_committed = num_committed + 1 local res = tx.con:commit() if res ~= "" and res[1]['error'] then tx.conflicted = true else tx.select_all = s1:select{} if tx.num_writes == 0 then tx.read_only = true end end k = nil repro = "c" .. t .. ":commit()" repro = "p(\"" .. repro .. "\", " .. repro .. ", s:select{})" elseif op == 'select' then v = tx.con('s1:select{'..k..'}') if ignore_unnecessary_conflict1 then q = tx.con('q1:select{'..k..'}') end repro = "c" .. t .. "('s:select{" .. k .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'replace' then v = get_unique_value() tx.con('s1:replace{'..k..','..v..'}') if ignore_unnecessary_conflict1 then tx.con('q1:replace{'..k..','..v..'}') end tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:replace{" .. k .. ", " .. v .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'upsert' then v = math.random(100) tx.con('s1:upsert({'..k..','..v..'}, {{"+", 2,'..v..'}})') if ignore_unnecessary_conflict1 then tx.con('q1:upsert({'..k..','..v..'}, {{"+", 2,'..v..'}})') end tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:upsert({" .. k .. ", " .. v .. "}, {{\\'+\\', 2, " .. v .. "}})')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'delete' then tx.con('s1:delete{'..k..'}') tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:delete{" .. k .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" end table.insert(stmts, {t=t, k=k, op=op, v=v, q=q, repro=repro}) end; function act() while true do local t = math.random(num_tx) local k = math.random(num_key) local tx = txs[t] if not tx.ended then local op_no = 0 if (tx.read_only) then op_no = math.random(3) else op_no = math.random(6) end local op = ops[op_no] if ignore_unnecessary_conflict2 then local were_ops = false for i,st in ipairs(stmts) do if st.t == t and st.k == k and st.op ~= 'commit' then were_ops = true end end if op == 'upsert' and not were_ops then op = 'replace' end end if op ~= 'commit' or tx.started then if not tx.started then apply(t, k, 'begin') end if op ~= 'begin' then apply(t, k, op) end end return end end end; function is_rdonly_tx_possible(t) for _,s in pairs(stmts) do if s.t == t and s.op == 'select' then local cmp_with = {s2:select{s.k}} if not my_equal(s.v, cmp_with) then return false end end end return true end; function try_to_apply_tx(t) for _,s in pairs(stmts) do if s.t == t then if s.op == 'select' then local cmp_with = {s2:select{s.k}} if not my_equal(s.v, cmp_with) then return false end if ignore_unnecessary_conflict1 then cmp_with = {q2:select{s.k}} if not my_equal(s.q, cmp_with) then return false end end elseif s.op == 'replace' then s2:replace{s.k, s.v} if ignore_unnecessary_conflict1 then q2:replace{s.k, s.v } end elseif s.op == 'upsert' then s2:upsert({s.k, s.v}, {{'+', 2, s.v}}) if ignore_unnecessary_conflict1 then q2:upsert({s.k, s.v}, {{'+', 2, s.v}}) end elseif s.op == 'delete' then s2:delete{s.k} end end end return true end; function check_rdonly_possibility() for i=1,num_tx do if txs[i].read_only and not txs[i].possible then if is_rdonly_tx_possible(i) then txs[i].possible = true end end end end; function check() local had_errors = (errors[1] ~= nil) for i=1,num_tx do if txs[i].read_only then if txs[i].conflicted then table.insert(errors, "read-only conflicted " .. i) end txs[i].possible = false end end check_rdonly_possibility() for _,t in ipairs(order_of_commit) do if not txs[t].read_only then if txs[t].conflicted then box.begin() if try_to_apply_tx(t) and not latest_broken then table.insert(errors, "could be serializable " .. t) end box.rollback() else if not try_to_apply_tx(t) then table.insert(errors, "not serializable " .. t) end if not my_equal(txs[t].select_all, s2:select{}) then table.insert(errors, "results are different " .. t) end check_rdonly_possibility() end end end for i=1,num_tx do if txs[i].read_only and not txs[i].possible then table.insert(errors, "not valid read view " .. i) end end if errors[1] and not had_errors then print("p(\"" .. errors[1] .. "\")") print(initial_repro) print("p(\"" .. initial_repro .. "\")") print('----------------------') for _,stmt in ipairs(stmts) do print(stmt.repro) end io.flush() end end; for i = 1, num_tests do prepare() while num_committed ~= num_tx do act() end check() end; test_run:cmd("setopt delimiter ''"); errors s1:drop() s2:drop() if ignore_unnecessary_conflict1 then q1:drop() q2:drop() end tarantool_1.9.1.26.g63eb81e3c/test/vinyl/snapshot.test.lua0000664000000000000000000000324213306560010021641 0ustar rootroottest_run = require('test_run').new() fiber = require 'fiber' fio = require 'fio' xlog = require 'xlog' s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk') -- Start a few fibers populating the space in the background. n_workers = 3 c = fiber.channel(n_workers) test_run:cmd("setopt delimiter ';'") for i=1,n_workers do fiber.create(function() for j=i,1000,n_workers do s:insert{j} end c:put(true) end) end test_run:cmd("setopt delimiter ''"); -- Let the background fibers run. fiber.sleep(0.001) -- Concurrent checkpoint. box.snapshot() -- Join background fibers. for i=1,n_workers do c:get() end -- Get list of files from the last checkpoint. files = box.backup.start() -- Extract the last checkpoint LSN and find -- max LSN stored in run files. snap_lsn = -1 run_lsn = -1 test_run:cmd("setopt delimiter ';'") for _, path in ipairs(files) do suffix = string.gsub(path, '.*%.', '') if suffix == 'snap' then snap_lsn = tonumber(fio.basename(path, '.snap')) end if suffix == 'run' then for lsn, _ in xlog.pairs(path) do if run_lsn < lsn then run_lsn = lsn end end end end test_run:cmd("setopt delimiter ''"); snap_lsn >= 0 run_lsn >= 0 box.backup.stop() -- Check that run files only contain statements -- inserted before checkpoint. snap_lsn == run_lsn or {snap_lsn, run_lsn} s:drop() -- -- gh-2614 about broken vy_run_iterator_start_from. -- s = box.schema.space.create('test', {engine = 'vinyl'}) p = s:create_index('pk') s:replace{100} s:replace{101} s:replace{102} s:replace{103} box.snapshot() s:select({99}, {iterator = box.index.LE, limit = 10}) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/iterator.test.lua0000664000000000000000000004347113306560010021643 0ustar rootroot-- -- Verify that the iterator uses the state of the space before the iterator -- was created. -- env = require('test_run') test_run = env.new() create_iterator = require('utils').create_iterator iterator_next = function(iter) return iter.next() end iterate_over = function(iter) return iter.iterate_over() end -- -- Following tests verify that combinations -- of various commands are worked correctly. -- Combinations mentioned above are explicitly described in -- write_iterator.test.lua. -- space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('primary') -- -- DELETE followed by UPSERT -- -- 1) create iterator at first iter_obj = create_iterator(space) space:insert({1}) space:insert({2}) space:insert({3}) space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:select{} space:delete{1} space:delete{2} space:delete{3} space:select{} iterate_over(iter_obj) -- 2) create iterator after initializing space:insert({1}) space:insert({2}) space:insert({3}) iter_obj = create_iterator(space) space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:select{} space:delete{1} space:delete{2} space:delete{3} space:select{} iterate_over(iter_obj) -- 3) create iterator within test case space:insert({1}) space:insert({2}) space:insert({3}) space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) iter_obj = create_iterator(space) space:select{} space:delete{1} space:delete{2} space:delete{3} space:select{} iterate_over(iter_obj) -- -- UPSERT followed by DELETE -- -- 1) create iterator at first iter_obj = create_iterator(space) space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:delete{1} space:delete{2} space:delete{3} space:select{} iterate_over(iter_obj) -- 2) create iterator after initializing space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) iter_obj = create_iterator(space) space:delete{1} space:delete{2} space:delete{3} space:select{} iterate_over(iter_obj) -- 3) create iterator within test case space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:delete{1} space:delete{2} iter_obj = create_iterator(space) space:delete{3} space:select{} iterate_over(iter_obj) -- -- UPSERT followed by UPSERT -- -- 1) create iterator at first iter_obj = create_iterator(space) space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:select{} iterate_over(iter_obj) space:truncate() -- 2) create iterator after initializing space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) iter_obj = create_iterator(space) space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:select{} iterate_over(iter_obj) space:truncate() -- 3) create iterator within test case space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:upsert({1}, {{'!', 2, 1}}) iter_obj = create_iterator(space) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:select{} iterate_over(iter_obj) space:truncate() -- -- UPSERT followed by REPLACE -- -- 1) create iterator at first iter_obj = create_iterator(space) space:upsert({1}, {{'!', 2, 1}}) space:replace({1, 10}) space:upsert({2}, {{'!', 2, 2}}) space:replace({2, 20}) space:upsert({3}, {{'!', 2, 3}}) space:replace({3, 30}) space:select{} iterate_over(iter_obj) space:truncate() -- 2) create iterator after initializing space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) iter_obj = create_iterator(space) space:replace({1, 10}) space:replace({2, 20}) space:replace({3, 30}) space:select{} iterate_over(iter_obj) space:truncate() -- 3) create iterator within test case space:upsert({1}, {{'!', 2, 1}}) space:replace({1, 10}) space:upsert({2}, {{'!', 2, 2}}) space:replace({2, 20}) space:upsert({3}, {{'!', 2, 3}}) iter_obj = create_iterator(space) space:replace({3, 30}) space:select{} iterate_over(iter_obj) space:truncate() -- -- REPLACE followed by UPSERT -- -- 1) create iterator at first iter_obj = create_iterator(space) space:replace({1, 10}) space:replace({2, 20}) space:replace({3, 30}) space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:select{} iterate_over(iter_obj) space:truncate() -- 2) create iterator after initializing space:replace({1, 10}) space:replace({2, 20}) space:replace({3, 30}) iter_obj = create_iterator(space) space:upsert({1}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:select{} iterate_over(iter_obj) space:truncate() -- 3) create iterator within test case space:replace({1, 10}) space:replace({2, 20}) space:replace({3, 30}) space:upsert({1}, {{'!', 2, 1}}) iter_obj = create_iterator(space) space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:select{} iterate_over(iter_obj) space:truncate() -- -- REPLACE followed by DELETE -- -- 1) create iterator at first iter_obj = create_iterator(space) space:replace({1, 10}) space:replace({2, 20}) space:replace({3, 30}) space:delete{1} space:delete{2} space:delete{3} space:select{} iterate_over(iter_obj) -- 2) create iterator after initializing space:replace({1, 10}) space:replace({2, 20}) space:replace({3, 30}) iter_obj = create_iterator(space) space:delete{1} space:delete{2} space:delete{3} space:select{} iterate_over(iter_obj) -- 3) create iterator within test case space:replace({1, 10}) space:replace({2, 20}) space:replace({3, 30}) space:delete{1} space:delete{2} iter_obj = create_iterator(space) space:delete{3} space:select{} iterate_over(iter_obj) -- -- DELETE followed by REPLACE -- -- 1) create iterator at first space:insert({1, 10}) space:insert({2, 20}) space:insert({3, 30}) iter_obj = create_iterator(space) space:delete({1}) space:delete({2}) space:delete({3}) space:replace({1}) space:replace({2}) space:replace({3}) space:select{} iterate_over(iter_obj) space:truncate() -- 2) create iterator after initializing space:insert({1, 10}) space:insert({2, 20}) space:insert({3, 30}) space:delete({1}) space:delete({2}) space:delete({3}) iter_obj = create_iterator(space) space:replace({1}) space:replace({2}) space:replace({3}) space:select{} iterate_over(iter_obj) space:truncate() -- 3) create iterator within test case space:insert({1, 10}) space:insert({2, 20}) space:insert({3, 30}) space:delete({1}) space:delete({2}) space:delete({3}) space:replace({1}) space:replace({2}) iter_obj = create_iterator(space) space:replace({3}) space:select{} iterate_over(iter_obj) space:truncate() -- -- REPLACE followed by REPLACE -- -- 1) create iterator at first iter_obj = create_iterator(space) space:replace({1}) space:replace({2}) space:replace({3}) space:replace({1, 10}) space:replace({2, 20}) space:replace({3, 30}) space:select{} iterate_over(iter_obj) space:truncate() -- 2) create iterator after initializing space:replace({1}) space:replace({2}) space:replace({3}) iter_obj = create_iterator(space) space:replace({1, 10}) space:replace({2, 20}) space:replace({3, 30}) space:select{} iterate_over(iter_obj) space:truncate() -- 3) create iterator within test case space:replace({1}) space:replace({2}) space:replace({3}) space:replace({1, 10}) space:replace({2, 20}) iter_obj = create_iterator(space) space:replace({3, 30}) space:select{} iterate_over(iter_obj) space:truncate() -- -- single UPSERT (for completeness) -- -- 1) create iterator at first iter_obj = create_iterator(space) space:upsert({1}, {{'!', 2, 10}}) space:upsert({2}, {{'!', 2, 20}}) space:upsert({3}, {{'!', 2, 30}}) space:select{} iterate_over(iter_obj) space:truncate() -- 2) create iterator after initializing space:upsert({1}, {{'!', 2, 10}}) iter_obj = create_iterator(space) space:upsert({2}, {{'!', 2, 20}}) space:upsert({3}, {{'!', 2, 30}}) space:select{} iterate_over(iter_obj) space:truncate() -- 3) create iterator within test case space:upsert({1}, {{'!', 2, 10}}) space:upsert({2}, {{'!', 2, 20}}) space:upsert({3}, {{'!', 2, 30}}) iter_obj = create_iterator(space) space:select{} iterate_over(iter_obj) space:truncate() -- -- single REPLACE (for completeness) -- -- 1) create iterator at first iter_obj = create_iterator(space) space:replace({1}) space:replace({2}) space:replace({3}) space:select{} iterate_over(iter_obj) space:truncate() -- 2) create iterator after initializing space:replace({1}) space:replace({2}) iter_obj = create_iterator(space) space:replace({3}) space:select{} iterate_over(iter_obj) space:truncate() -- 3) create iterator within test case space:replace({1}) space:replace({2}) space:replace({3}) iter_obj = create_iterator(space) space:select{} iterate_over(iter_obj) space:truncate() space:drop() -- -- gh-1797 -- Test another iterator types and move the iterator -- during the space is modified, try to pass keys in pairs() -- invocations. -- -- Test iterator type EQ space1 = box.schema.space.create('test1', { engine = 'vinyl' }) pk = space1:create_index('primary') space1:replace({1}) space1:replace({2}) space1:replace({3}) space1:upsert({3}, {{'!', 2, 3}}) space1:upsert({5}, {{'!', 2, 5}}) iter_obj_sp1 = create_iterator(space1, 3, {iterator = box.index.EQ}) space1:replace({6}) iterator_next(iter_obj_sp1) space1:replace({8}) space1:select{} iterate_over(iter_obj_sp1) space1:drop() -- Test iterator type GT space2 = box.schema.space.create('test2', { engine = 'vinyl' }) pk = space2:create_index('primary') space2:replace({1}) space2:replace({2}) space2:replace({3}) space2:replace({4}) space2:replace({5}) iter_obj_sp2 = create_iterator(space2, 3, {iterator = box.index.GT}) -- Test iterator type GE space3 = box.schema.space.create('test3', { engine = 'vinyl' }) pk = space3:create_index('primary') space3:replace({1}) space3:replace({2}) space3:replace({3}) space3:replace({4}) space3:replace({5}) iter_obj_sp3 = create_iterator(space3, 3, {iterator = box.index.GE}) -- Test iterator type LT and LE simultaneously space4 = box.schema.space.create('test4', { engine = 'vinyl' }) pk = space4:create_index('primary') space4:replace({1}) space4:replace({2}) space4:replace({3}) space4:upsert({3}, {{'!', 2, 3}}) space4:upsert({5}, {{'!', 2, 5}}) iter_obj_sp4 = create_iterator(space4, 3, {iterator = box.index.LE}) iter_obj_sp4_2 = create_iterator(space4, 3, {iterator = box.index.LT}) space4:replace({6}) -- Snapshot for all spaces box.snapshot() -- Continue GT space2:replace({6}) iterator_next(iter_obj_sp2) space2:replace({8}) space2:select{} -- Continue GE space3:replace({6}) iterator_next(iter_obj_sp3) space3:replace({8}) space3:select{} -- Continue LT and LE iterator_next(iter_obj_sp4) space4:replace({8}) iterator_next(iter_obj_sp4_2) space4:select{} -- Snapshot for all spaces box.snapshot() -- Continue GT iterate_over(iter_obj_sp2) space2:truncate() space2:drop() -- Continue GE iterate_over(iter_obj_sp3) space3:truncate() space3:drop() -- Continue LT and LE iterate_over(iter_obj_sp4) iterate_over(iter_obj_sp4_2) space4:truncate() space4:drop() -- -- Test same with multiple indexes. -- space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('primary') idx2 = space:create_index('idx2', { parts = {2, 'unsigned'} }) idx3 = space:create_index('idx3', { parts = {3, 'integer'}, unique = false }) -- Test iterator type EQ space:select{} iter_obj = create_iterator(space, 1, {iterator = 'EQ'}) space:replace({1, 2, 3}) space:delete({1}) space:replace({1, 1, 1}) space:upsert({1, 1, 1}, {{'+', 2, 1}, {'+', 3, 2}}) space:select{} iterate_over(iter_obj) iter_obj2 = create_iterator(idx2, 2, {iterator = 'EQ'}) space:delete({1}) iterate_over(iter_obj2) space:truncate() -- Test iterators inside the transaction, but before create several iterators with -- various space states. space:replace({1, 1, 1}) space:replace({2, 4, 1}) space:replace({3, 8, 1}) iter_obj = create_iterator(space, 2, {iterator = 'GT'}) -- must return only 3 space:replace({4, 16, -1}) space:replace({5, 32, -10}) iter_obj2 = create_iterator(idx3, 0, {iterator = 'LE'}) -- must return -1 and -10 space:replace({6, 64, -10}) iter_obj3 = create_iterator(idx3, 0, {iterator = 'GE'}) -- must return {1} * 3 box.begin() space:replace({7, 128, 20}) iter_obj4 = create_iterator(space) -- must fail after rollback box.rollback() space:select{} iterate_over(iter_obj) iterate_over(iter_obj2) iterate_over(iter_obj3) iterate_over(iter_obj4) space:truncate() -- Iterate within transaction space:replace({1, 1, 1}) box.begin() space:replace({2, 2, 1}) iter_obj = create_iterator(pk, 1, {iterator = 'GE'}) space:replace({3, 3, 10}) iter_obj2 = create_iterator(idx3, 20, {iterator = 'LT'}) space:replace({4, 4, 15}) space:replace({5, 5, 25}) -- Must print all, include tuples added after the iterator creation -- because of the opened transaction presense. iterate_over(iter_obj) iterator_next(iter_obj2) space:replace({12, 12, 12}) iterator_next(iter_obj2) space:replace({9, 9, 9}) iterate_over(iter_obj2) box.commit() space:truncate() -- Create the iterator before the transaction, but iterate inside space:replace({1, 1, 1}) space:replace({2, 2, 2}) iter_obj = create_iterator(pk) iter_obj2 = create_iterator(idx2, 2, {iterator = 'GE'}) space:replace({3, 3, 3}) box.begin() space:replace({4, 4, 4}) iterate_over(iter_obj) iterate_over(iter_obj2) box.commit() space:truncate() -- Create the iterator inside the transaction, but iterate outside space:replace({1, 1, 1}) box.begin() space:replace({2, 2, 2}) iter_obj = create_iterator(pk) space:replace({3, 3, 3}) box.commit() iterate_over(iter_obj) space:truncate() -- Create the iterator inside the transaction before any other actions -- and iterate inside space:replace({1, 1, 1}) box.begin() iter_obj = create_iterator(pk) space:replace({2, 2, 2}) iterate_over(iter_obj) box.commit() space:drop() space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('primary', { parts = { 1, 'uint', 2, 'uint' } }) box.begin() space:replace({1, 1}) space:replace({2, 2}) space:select({1, 1}, {iterator = 'GT'}) space:select({1, 1}, {iterator = 'GE'}) space:select({}, {iterator = 'GE'}) space:select({}) space:select({}, {iterator = 'LE'}) space:select({1}, {iterator = 'GT'}) space:select({1}, {iterator = 'GE'}) space:select({1}, {iterator = 'LT'}) space:select({1}, {iterator = 'LE'}) space:select({2}, {iterator = 'GT'}) space:select({2}, {iterator = 'GE'}) space:select({2}, {iterator = 'LT'}) space:select({2}, {iterator = 'LE'}) box.commit() space:drop() --make runs with more than one record with every key s = box.schema.space.create('test', { engine = 'vinyl' }) pk = s:create_index('primary', { parts = { 1, 'uint' } }) for i=1,10 do s:upsert({i, 1}, {{'+', 2, 1}}) end itr = create_iterator(s, {}, {}) iterator_next(itr) for i=1,10 do s:upsert({i, 1}, {{'+', 2, 1}}) end iterator_next(itr) box.snapshot() -- create last-level run iterator_next(itr) for i=1,10 do s:upsert({i, 1}, {{'+', 2, 1}}) end iterator_next(itr) box.snapshot() -- create not-last-level run iterator_next(itr) for i=1,10 do s:upsert({i, 1}, {{'+', 2, 1}}) end iterator_next(itr) s:select{1} s:drop() -- gh-2394 -- -- Check GE/LE iterators in a transaction involving several spaces. -- test_run:cmd("setopt delimiter ';'") s = {} for i=1,3 do s[i] = box.schema.space.create('test'..i, { engine = 'vinyl' }) _ = s[i]:create_index('primary') s[i]:insert{20, 'B'..i} s[i]:insert{40, 'D'..i} end test_run:cmd("setopt delimiter ''"); box.begin() for i=1,3 do s[i]:insert{10, 'A'..i} s[i]:insert{30, 'C'..i} s[i]:insert{50, 'E'..i} end s[1]:select({}, {iterator = 'GE'}) s[1]:select({}, {iterator = 'LE'}) s[2]:select({}, {iterator = 'GE'}) s[2]:select({}, {iterator = 'LE'}) s[3]:select({}, {iterator = 'GE'}) s[3]:select({}, {iterator = 'LE'}) box.rollback() for i=1,3 do s[i]:drop() end sm = box.schema.create_space('sm', { engine = 'memtx'}) im1 = sm:create_index('i1', { type = 'tree', parts = {1,'unsigned'}, unique = true }) im2 = sm:create_index('i2', { type = 'tree', parts = {2,'unsigned'}, unique = true }) sv = box.schema.create_space('sv', { engine = 'vinyl'}) iv1 = sv:create_index('i1', { type = 'tree', parts = {1,'unsigned'}, unique = true }) iv2 = sv:create_index('i2', { type = 'tree', parts = {2,'unsigned'}, unique = true }) test_run:cmd("setopt delimiter ';'") function f() for i = 1,100 do local arr = {} for j = 1,100 do table.insert(arr, {math.random(1000), math.random(1000)}) end box.begin() for _,t in pairs(arr) do pcall(sm.replace, sm, t) end box.commit() box.begin() for _,t in pairs(arr) do pcall(sv.replace, sv, t) end box.commit() end end function compare(a, b) if #a ~= #b then return "different sizes" end local c = #a for i = 1,c do if a[i][1] ~= b[i][1] or a[i][2] ~= b[i][2] then return "different data" end end return "equal" end test_run:cmd("setopt delimiter ''"); f() compare(sm:select{}, sv:select{}) compare(im1:select{}, iv1:select{}) compare(im2:select{}, iv2:select{}) sv:drop() sm:drop() s = box.schema.space.create('test', { engine = 'vinyl' }) pk = s:create_index('primary', { parts = { 1, 'uint' } }) s:replace{0, 0} s:replace{1, 10} box.begin() s:select{0 } txn_proxy = require('txn_proxy') c = txn_proxy.new() c("s:replace{0, 1}") s:upsert({1, 1}, {{'+', 2, 5}}) s:select{0} s:select{1} box.commit() s:drop() s = box.schema.space.create('test', { engine = 'vinyl' }) i = s:create_index('primary', { parts = { 1, 'uint' } }) s:replace{1} s:replace{2} s:replace{3} s:select{} box.begin() gen,param,state = i:pairs({0}, {iterator = 'GE'}) state, value = gen(param, state) value s:delete{2} state, value = gen(param, state) value box.commit() s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/suite.ini0000664000000000000000000000075113306560010020155 0ustar rootroot[default] core = tarantool description = vinyl integration tests script = vinyl.lua release_disabled = errinj.test.lua errinj_gc.test.lua errinj_vylog.test.lua partial_dump.test.lua quota_timeout.test.lua recovery_quota.test.lua config = suite.cfg lua_libs = suite.lua stress.lua large.lua txn_proxy.lua ../box/lua/utils.lua use_unix_sockets = True long_run = stress.test.lua large.test.lua write_iterator_rand.test.lua dump_stress.test.lua select_consistency.test.lua is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/vinyl/errinj.test.lua0000664000000000000000000003747613306565107021327 0ustar rootroot-- -- gh-1681: vinyl: crash in vy_rollback on ER_WAL_WRITE -- test_run = require('test_run').new() fio = require('fio') fiber = require('fiber') errinj = box.error.injection errinj.set("ERRINJ_VY_SCHED_TIMEOUT", 0.040) s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk') function f() box.begin() s:insert{1, 'hi'} s:insert{2, 'bye'} box.commit() end errinj.set("ERRINJ_WAL_WRITE", true) f() s:select{} errinj.set("ERRINJ_WAL_WRITE", false) f() s:select{} s:drop() -- -- Lost data in case of dump error -- -- test_run:cmd("setopt delimiter ';'") if box.cfg.vinyl_page_size > 1024 or box.cfg.vinyl_range_size > 65536 then error("This test relies on splits and dumps") end; s = box.schema.space.create('test', {engine='vinyl'}); _ = s:create_index('pk'); value = string.rep('a', 1024) last_id = 1 -- fill up a range function range() local range_size = box.cfg.vinyl_range_size local page_size = box.cfg.vinyl_page_size local s = box.space.test local num_rows = 0 for i=1,range_size/page_size do for j=1, page_size/#value do s:replace({last_id, value}) last_id = last_id + 1 num_rows = num_rows + 1 end end return num_rows end; num_rows = 0; num_rows = num_rows + range(); box.snapshot(); errinj.set("ERRINJ_VY_RUN_WRITE", true); num_rows = num_rows + range(); -- fails due to error injection box.snapshot(); errinj.set("ERRINJ_VY_RUN_WRITE", false); -- fails due to scheduler timeout box.snapshot(); fiber.sleep(0.06); num_rows = num_rows + range(); box.snapshot(); num_rows = num_rows + range(); box.snapshot(); num_rows; for i=1,num_rows do if s:get{i} == nil then error("Row "..i.."not found") end end; #s:select{} == num_rows; s:drop(); test_run:cmd("setopt delimiter ''"); -- Disable the cache so that we can check that disk errors -- are handled properly. vinyl_cache = box.cfg.vinyl_cache box.cfg{vinyl_cache = 0} s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk') for i = 1, 10 do s:insert({i, 'test str' .. tostring(i)}) end box.snapshot() s:select() errinj.set("ERRINJ_VY_READ_PAGE", true) s:select() errinj.set("ERRINJ_VY_READ_PAGE", false) s:select() errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0.05) function test_cancel_read () k = s:select() return #k end f1 = fiber.create(test_cancel_read) fiber.cancel(f1) -- task should be done fiber.sleep(0.1) errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0); s:select() -- error after timeout for canceled fiber errinj.set("ERRINJ_VY_READ_PAGE", true) errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0.05) f1 = fiber.create(test_cancel_read) fiber.cancel(f1) fiber.sleep(0.1) errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0); errinj.set("ERRINJ_VY_READ_PAGE", false); s:select() -- index is dropped while a read task is in progress errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0.05) f1 = fiber.create(test_cancel_read) fiber.cancel(f1) s:drop() fiber.sleep(0.1) errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0); box.cfg{vinyl_cache = vinyl_cache} -- gh-2871: check that long reads are logged s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk') for i = 1, 10 do s:insert{i, i * 2} end box.snapshot() too_long_threshold = box.cfg.too_long_threshold box.cfg{too_long_threshold = 0.01} errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0.05) s:get(10) ~= nil #s:select(5, {iterator = 'LE'}) == 5 errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0); test_run:cmd("push filter 'lsn=[0-9]+' to 'lsn='") test_run:grep_log('default', 'get.* took too long') test_run:grep_log('default', 'select.* took too long') test_run:cmd("clear filter") box.cfg{too_long_threshold = too_long_threshold} s:drop() s = box.schema.space.create('test', {engine='vinyl'}); _ = s:create_index('pk'); _ = s:replace({1, string.rep('a', 128000)}) errinj.set("ERRINJ_WAL_WRITE_DISK", true) box.snapshot() errinj.set("ERRINJ_WAL_WRITE_DISK", false) fiber.sleep(0.06) _ = s:replace({2, string.rep('b', 128000)}) box.snapshot(); #s:select({1}) s:drop() errinj.set("ERRINJ_VY_SCHED_TIMEOUT", 0) -- -- Check that upsert squash fiber does not crash if index or -- in-memory tree is gone. -- errinj.set("ERRINJ_VY_SQUASH_TIMEOUT", 0.050) s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk') s:insert{0, 0} box.snapshot() for i=1,256 do s:upsert({0, 0}, {{'+', 2, 1}}) end box.snapshot() -- in-memory tree is gone fiber.sleep(0.05) s:select() s:replace{0, 0} box.snapshot() for i=1,256 do s:upsert({0, 0}, {{'+', 2, 1}}) end s:drop() -- index is gone fiber.sleep(0.05) errinj.set("ERRINJ_VY_SQUASH_TIMEOUT", 0) --https://github.com/tarantool/tarantool/issues/1842 --test error injection s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk') s:replace{0, 0} s:replace{1, 0} s:replace{2, 0} errinj.set("ERRINJ_WAL_WRITE", true) s:replace{3, 0} s:replace{4, 0} s:replace{5, 0} s:replace{6, 0} errinj.set("ERRINJ_WAL_WRITE", false) s:replace{7, 0} s:replace{8, 0} s:select{} s:drop() create_iterator = require('utils').create_iterator --iterator test test_run:cmd("setopt delimiter ';'") fiber_status = 0 function fiber_func() box.begin() s:replace{5, 5} fiber_status = 1 local res = {pcall(box.commit) } fiber_status = 2 return unpack(res) end; test_run:cmd("setopt delimiter ''"); s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk') fiber = require('fiber') _ = s:replace{0, 0} _ = s:replace{10, 0} _ = s:replace{20, 0} test_run:cmd("setopt delimiter ';'"); faced_trash = false for i = 1,100 do errinj.set("ERRINJ_WAL_WRITE", true) local f = fiber.create(fiber_func) local itr = create_iterator(s, {0}, {iterator='GE'}) local first = itr.next() local second = itr.next() if (second[1] ~= 5 and second[1] ~= 10) then faced_trash = true end while fiber_status <= 1 do fiber.sleep(0.001) end local _,next = pcall(itr.next) _,next = pcall(itr.next) _,next = pcall(itr.next) errinj.set("ERRINJ_WAL_WRITE", false) s:delete{5} end; test_run:cmd("setopt delimiter ''"); faced_trash s:drop() -- TX in prepared but not committed state s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk') fiber = require('fiber') txn_proxy = require('txn_proxy') s:replace{1, "original"} s:replace{2, "original"} s:replace{3, "original"} c0 = txn_proxy.new() c0:begin() c1 = txn_proxy.new() c1:begin() c2 = txn_proxy.new() c2:begin() c3 = txn_proxy.new() c3:begin() -- -- Prepared transactions -- -- Pause WAL writer to cause all further calls to box.commit() to move -- transactions into prepared, but not committed yet state. errinj.set("ERRINJ_WAL_DELAY", true) lsn = box.info.lsn c0('s:replace{1, "c0"}') c0('s:replace{2, "c0"}') c0('s:replace{3, "c0"}') _ = fiber.create(c0.commit, c0) box.info.lsn == lsn c1('s:replace{1, "c1"}') c1('s:replace{2, "c1"}') _ = fiber.create(c1.commit, c1) box.info.lsn == lsn c3('s:select{1}') -- c1 is visible c2('s:replace{1, "c2"}') c2('s:replace{3, "c2"}') _ = fiber.create(c2.commit, c2) box.info.lsn == lsn c3('s:select{1}') -- c1 is visible, c2 is not c3('s:select{2}') -- c1 is visible c3('s:select{3}') -- c2 is not visible -- Resume WAL writer and wait until all transactions will been committed errinj.set("ERRINJ_WAL_DELAY", false) REQ_COUNT = 7 while box.info.lsn - lsn < REQ_COUNT do fiber.sleep(0.01) end box.info.lsn == lsn + REQ_COUNT c3('s:select{1}') -- c1 is visible, c2 is not c3('s:select{2}') -- c1 is visible c3('s:select{3}') -- c2 is not visible c3:commit() s:drop() -- -- Test mem restoration on a prepared and not commited statement -- after moving iterator into read view. -- space = box.schema.space.create('test', {engine = 'vinyl'}) pk = space:create_index('pk') space:replace{1} space:replace{2} space:replace{3} last_read = nil errinj.set("ERRINJ_WAL_DELAY", true) test_run:cmd("setopt delimiter ';'") function fill_space() box.begin() space:replace{1} space:replace{2} space:replace{3} -- block until wal_delay = false box.commit() -- send iterator to read view space:replace{1, 1} -- flush mem and update index version to trigger iterator restore box.snapshot() end; function iterate_in_read_view() local i = create_iterator(space) last_read = i.next() fiber.sleep(100000) last_read = i.next() end; test_run:cmd("setopt delimiter ''"); f1 = fiber.create(fill_space) -- Prepared transaction is blocked due to wal_delay. -- Start iterator with vlsn = INT64_MAX f2 = fiber.create(iterate_in_read_view) last_read -- Finish prepared transaction and send to read view the iterator. errinj.set("ERRINJ_WAL_DELAY", false) while f1:status() ~= 'dead' do fiber.sleep(0.01) end f2:wakeup() while f2:status() ~= 'dead' do fiber.sleep(0.01) end last_read space:drop() -- -- Space drop in the middle of dump. -- test_run:cmd("create server test with script='vinyl/low_quota.lua'") test_run:cmd("start server test with args='1048576'") test_run:cmd('switch test') fiber = require 'fiber' box.cfg{vinyl_timeout = 0.001} s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('i1', {parts = {1, 'unsigned'}}) _ = s:create_index('i2', {parts = {2, 'unsigned'}}) _ = s:insert{1, 1} -- Delay dump so that we can manage to drop the space -- while it is still being dumped. box.error.injection.set('ERRINJ_VY_RUN_WRITE_TIMEOUT', 0.1) -- Before failing on quota timeout, the following fiber -- will trigger dump due to memory shortage. _ = fiber.create(function() s:insert{2, 2, string.rep('x', box.cfg.vinyl_memory)} end) -- Let the fiber run. fiber.sleep(0) -- Drop the space while the dump task is still running. s:drop() -- Wait for the dump task to complete. box.snapshot() box.error.injection.set('ERRINJ_VY_RUN_WRITE_TIMEOUT', 0) -- -- Check that all dump/compact tasks that are in progress at -- the time when the server stops are aborted immediately. -- s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('i1', {parts = {1, 'unsigned'}}) _ = s:create_index('i2', {parts = {2, 'unsigned'}}) box.error.injection.set('ERRINJ_VY_RUN_WRITE_STMT_TIMEOUT', 0.01) for i = 1, 1000 do s:replace{i, i} end _ = fiber.create(function() box.snapshot() end) fiber.sleep(0.01) test_run:cmd('switch default') t1 = fiber.time() test_run:cmd("stop server test") t2 = fiber.time() t2 - t1 < 1 test_run:cmd("cleanup server test") -- -- If we logged an index creation in the metadata log before WAL write, -- WAL failure would result in leaving the index record in vylog forever. -- Since we use LSN to identify indexes in vylog, retrying index creation -- would then lead to a duplicate index id in vylog and hence inability -- to make a snapshot or recover. -- s = box.schema.space.create('test', {engine = 'vinyl'}) errinj.set('ERRINJ_WAL_IO', true) _ = s:create_index('pk') errinj.set('ERRINJ_WAL_IO', false) _ = s:create_index('pk') box.snapshot() s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('i1', {parts = {1, 'unsigned'}}) c = 10 errinj.set("ERRINJ_WAL_WRITE_DISK", true) for i = 1,10 do fiber.create(function() pcall(s.replace, s, {i}) c = c - 1 end) end while c ~= 0 do fiber.sleep(0.001) end s:select{} errinj.set("ERRINJ_WAL_WRITE_DISK", false) s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('i1', {parts = {1, 'unsigned'}}) for i = 0, 9 do s:replace({i, i + 1}) end box.snapshot() errinj.set("ERRINJ_XLOG_GARBAGE", true) s:select() errinj.set("ERRINJ_XLOG_GARBAGE", false) errinj.set("ERRINJ_VYRUN_DATA_READ", true) s:select() errinj.set("ERRINJ_VYRUN_DATA_READ", false) s:select() s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('i1', {parts = {1, 'unsigned'}}) for i = 0, 9 do s:replace({i, i + 1}) end errinj.set("ERRINJ_XLOG_GARBAGE", true) box.snapshot() for i = 10, 19 do s:replace({i, i + 1}) end errinj.set("ERRINJ_XLOG_GARBAGE", false) box.snapshot() s:select() s:drop() -- Point select from secondary index during snapshot. -- Once upon time that leaded to crash. s = box.schema.space.create('test', {engine = 'vinyl'}) i1 = s:create_index('pk', {parts = {1, 'uint'}, bloom_fpr = 0.5}) i2 = s:create_index('sk', {parts = {2, 'uint'}, bloom_fpr = 0.5}) for i = 1,10 do s:replace{i, i, 0} end test_run:cmd("setopt delimiter ';'") function worker() for i = 11,20,2 do s:upsert({i, i}, {{'=', 3, 1}}) errinj.set("ERRINJ_VY_POINT_ITER_WAIT", true) i1:select{i} s:upsert({i + 1 ,i + 1}, {{'=', 3, 1}}) errinj.set("ERRINJ_VY_POINT_ITER_WAIT", true) i2:select{i + 1} end end test_run:cmd("setopt delimiter ''"); f = fiber.create(worker) while f:status() ~= 'dead' do box.snapshot() fiber.sleep(0.01) end errinj.set("ERRINJ_VY_POINT_ITER_WAIT", false) s:drop() -- vinyl: vy_cache_add: Assertion `0' failed -- https://github.com/tarantool/tarantool/issues/2685 s = box.schema.create_space('test', {engine = 'vinyl'}) pk = s:create_index('pk') s:replace{2, 0} box.snapshot() s:replace{1, 0} box.snapshot() s:replace{0, 0} s:select{0} errinj.set("ERRINJ_WAL_DELAY", true) wait_replace = true _ = fiber.create(function() s:replace{1, 1} wait_replace = false end) gen,param,state = s:pairs({1}, {iterator = 'GE'}) state, value = gen(param, state) value errinj.set("ERRINJ_WAL_DELAY", false) while wait_replace do fiber.sleep(0.01) end state, value = gen(param, state) value s:drop() -- -- gh-2442: secondary index cursor must skip key update, made -- after the secondary index scan, but before a primary index -- lookup. It is ok, and the test checks this. -- s = box.schema.create_space('test', {engine = 'vinyl'}) pk = s:create_index('pk') sk = s:create_index('sk', {parts = {{2, 'unsigned'}}}) s:replace{1, 1} s:replace{3, 3} box.snapshot() ret = nil function do_read() ret = sk:select({2}, {iterator = 'GE'}) end errinj.set("ERRINJ_VY_DELAY_PK_LOOKUP", true) f = fiber.create(do_read) f:status() ret s:replace{2, 2} errinj.set("ERRINJ_VY_DELAY_PK_LOOKUP", false) while ret == nil do fiber.sleep(0.01) end ret s:drop() -- -- gh-3412 - assertion failure at exit in case: -- * there is a fiber waiting for quota -- * there is a pending vylog write -- test_run:cmd("create server low_quota with script='vinyl/low_quota.lua'") test_run:cmd("start server low_quota with args='1048576'") test_run:cmd('switch low_quota') _ = box.schema.space.create('test', {engine = 'vinyl'}) _ = box.space.test:create_index('pk') box.error.injection.set('ERRINJ_VY_RUN_WRITE_STMT_TIMEOUT', 0.01) fiber = require('fiber') pad = string.rep('x', 100 * 1024) _ = fiber.create(function() for i = 1, 11 do box.space.test:replace{i, pad} end end) repeat fiber.sleep(0.001) q = box.info.vinyl().quota until q.limit - q.used < pad:len() test_run:cmd("restart server low_quota with args='1048576'") box.error.injection.set('ERRINJ_VY_LOG_FLUSH_DELAY', true) fiber = require('fiber') pad = string.rep('x', 100 * 1024) _ = fiber.create(function() for i = 1, 11 do box.space.test:replace{i, pad} end end) repeat fiber.sleep(0.001) q = box.info.vinyl().quota until q.limit - q.used < pad:len() test_run:cmd('switch default') test_run:cmd("stop server low_quota") test_run:cmd("cleanup server low_quota") -- -- gh-3437: if compaction races with checkpointing, it may remove -- files needed for backup. -- s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {run_count_per_level = 1}) -- Create a run file. _ = s:replace{1} box.snapshot() -- Create another run file. This will trigger compaction -- as run_count_per_level is set to 1. Due to the error -- injection compaction will finish before snapshot. _ = s:replace{2} errinj.set('ERRINJ_SNAP_COMMIT_DELAY', true) c = fiber.channel(1) _ = fiber.create(function() box.snapshot() c:put(true) end) while s.index.pk:info().disk.compact.count == 0 do fiber.sleep(0.001) end errinj.set('ERRINJ_SNAP_COMMIT_DELAY', false) c:get() -- Check that all files corresponding to the last checkpoint -- are present. files = box.backup.start() missing = {} for _, f in pairs(files) do if not fio.path.exists(f) then table.insert(missing, f) end end missing box.backup.stop() s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upsert.result0000664000000000000000000002261513306565107021124 0ustar rootroottest_run = require('test_run').new() --- ... -- gh-1671 upsert is broken in a transaction -- upsert after upsert space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert({1, 1, 2}) --- - [1, 1, 2] ... space:insert({2}) --- - [2] ... space:insert({3, 4, 'abc'}) --- - [3, 4, 'abc'] ... box.begin() --- ... space:upsert({1}, {{'#', 3, 1}}) --- ... space:upsert({1}, {{'!', 2, 20}}) --- ... space:upsert({1}, {{'+', 3, 20}}) --- ... box.commit() --- ... space:select{} --- - - [1, 20, 21] - [2] - [3, 4, 'abc'] ... box.begin() --- ... space:upsert({2}, {{'!', 2, 10}}) --- ... space:upsert({3, 4, 5}, {{'+', 2, 1}}) --- ... space:upsert({2, 2, 2, 2}, {{'+', 2, 10.5}}) --- ... space:upsert({3}, {{'-', 2, 2}}) --- ... box.commit() --- ... space:select{} --- - - [1, 20, 21] - [2, 20.5] - [3, 3, 'abc'] ... space:drop() --- ... -- upsert after replace space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert{1} --- - [1] ... space:insert{2} --- - [2] ... box.begin() --- ... space:replace({3, 4}) --- - [3, 4] ... space:upsert({3, 3, 3, 3}, {{'+', 2, 1}}) --- ... box.commit() --- ... space:select{} --- - - [1] - [2] - [3, 5] ... box.begin() --- ... space:replace({2, 2}) --- - [2, 2] ... space:upsert({2}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 3}}) --- ... box.commit() --- ... space:select{} --- - - [1] - [2, 3, 1, 2] - [3, 5] ... box.begin() --- ... space:replace({4}) --- - [4] ... space:upsert({4}, {{'!', 2, 1}}) --- ... space:replace({5}) --- - [5] ... space:upsert({4}, {{'!', 2, 3}}) --- ... space:upsert({5}, {{'!', 2, 1}, {'+', 2, 1}}) --- ... box.commit() --- ... space:select{} --- - - [1] - [2, 3, 1, 2] - [3, 5] - [4, 3, 1] - [5, 2] ... space:drop() --- ... -- upsert after delete space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert{1} --- - [1] ... space:insert{2} --- - [2] ... space:insert{3} --- - [3] ... space:insert{4} --- - [4] ... box.begin() --- ... space:delete({1}) --- ... space:upsert({1, 2}, {{'!', 2, 100}}) --- ... box.commit() --- ... space:select{} --- - - [1, 2] - [2] - [3] - [4] ... box.begin() --- ... space:delete({2}) --- ... space:upsert({1}, {{'+', 2, 1}}) --- ... space:upsert({2, 200}, {{'!', 2, 1000}}) --- ... space:upsert({2}, {{'!', 2, 1005}}) --- ... box.commit() --- ... space:select{} --- - - [1, 3] - [2, 1005, 200] - [3] - [4] ... space:drop() --- ... -- replace after upsert space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert{1} --- - [1] ... space:insert{2} --- - [2] ... space:insert{3} --- - [3] ... space:insert{4} --- - [4] ... box.begin() --- ... space:upsert({1, 2}, {{'!', 2, 100}}) --- ... space:replace({1, 2, 3}) --- - [1, 2, 3] ... box.commit() --- ... space:select{} --- - - [1, 2, 3] - [2] - [3] - [4] ... box.begin() --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... box.commit() --- ... space:select{} --- - - [1, 2, 3] - [2, 20] - [3, 30] - [4] ... space:drop() --- ... -- delete after upsert box.cfg{} --- ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert{1} --- - [1] ... space:insert{2} --- - [2] ... space:insert{3} --- - [3] ... space:insert{4} --- - [4] ... box.begin() --- ... space:upsert({1, 2}, {{'!', 2, 100}}) --- ... space:delete({1}) --- ... box.commit() --- ... space:select{} --- - - [2] - [3] - [4] ... box.begin() --- ... space:upsert({5}, {{'!', 2, 100}}) --- ... space:delete({5}) --- ... box.commit() --- ... space:select{} --- - - [2] - [3] - [4] ... box.begin() --- ... space:upsert({5}, {{'!', 2, 100}}) --- ... space:delete({4}) --- ... space:upsert({4}, {{'!', 2, 100}}) --- ... space:delete({5}) --- ... space:upsert({4}, {{'!', 2, 105}}) --- ... box.commit() --- ... space:select{} --- - - [2] - [3] - [4, 105] ... space:drop() --- ... -- -- gh-1829: vinyl: merge hot UPSERTS in the background -- gh-1828: Automatically convert UPSERT into REPLACE -- gh-1826: vinyl: memory explosion on UPSERT -- clock = require 'clock' --- ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... _ = space:create_index('primary', { type = 'tree', range_size = 250 * 1024 * 1024 } ) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... -- add a lot of UPSERT statements to the space function gen() for i=1,2000 do space:upsert({0, 0}, {{'+', 2, 1}}) end end; --- ... -- check that 'get' takes reasonable time function check() local start = clock.monotonic() for i=1,1000 do space:get(0) end return clock.monotonic() - start < 1 end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- No runs gen() --- ... check() -- exploded before #1826 --- - true ... -- Mem has DELETE box.snapshot() --- - ok ... space:delete({0}) --- ... gen() --- ... check() -- exploded before #1826 --- - true ... -- Mem has REPLACE box.snapshot() --- - ok ... space:replace({0, 0}) --- - [0, 0] ... gen() --- ... check() -- exploded before #1826 --- - true ... -- Mem has only UPSERTS box.snapshot() --- - ok ... gen() --- ... check() -- exploded before #1829 --- - true ... space:drop() --- ... -- test upsert statistic against some upsert scenarous test_run:cmd("setopt delimiter ';'") --- - true ... function upsert_stat_diff(stat2, stat1) return { squashed = stat2.upsert.squashed - stat1.upsert.squashed, applied = stat2.upsert.applied - stat1.upsert.applied } end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... stat1 = index:info() --- ... -- separate upserts w/o on disk data space:upsert({1, 1, 1}, {{'+', 2, 10}}) --- ... space:upsert({1, 1, 1}, {{'-', 2, 20}}) --- ... space:upsert({1, 1, 1}, {{'=', 2, 20}}) --- ... stat2 = index:info() --- ... upsert_stat_diff(stat2, stat1) --- - squashed: 3 applied: 3 ... stat1 = stat2 --- ... stat1.rows --- - 3 ... -- in-tx upserts box.begin() --- ... space:upsert({2, 1, 1}, {{'+', 2, 10}}) --- ... space:upsert({2, 1, 1}, {{'-', 2, 20}}) --- ... space:upsert({2, 1, 1}, {{'=', 2, 20}}) --- ... box.commit() --- ... stat2 = index:info() --- ... upsert_stat_diff(stat2, stat1) --- - squashed: 3 applied: 3 ... stat1 = stat2 --- ... stat1.rows --- - 4 ... box.snapshot() --- - ok ... index:info().rows --- - 2 ... -- upsert with on disk data space:upsert({1, 1, 1}, {{'+', 2, 10}}) --- ... space:upsert({1, 1, 1}, {{'-', 2, 20}}) --- ... stat2 = index:info() --- ... upsert_stat_diff(stat2, stat1) --- - squashed: 0 applied: 0 ... stat1 = stat2 --- ... stat1.rows --- - 4 ... -- count of applied apserts space:get({1}) --- - [1, 10, 1] ... stat2 = index:info() --- ... upsert_stat_diff(stat2, stat1) --- - squashed: 0 applied: 2 ... stat1 = stat2 --- ... space:get({2}) --- - [2, 20, 1] ... stat2 = index:info() --- ... upsert_stat_diff(stat2, stat1) --- - squashed: 0 applied: 0 ... stat1 = stat2 --- ... space:select({}) --- - - [1, 10, 1] - [2, 20, 1] ... stat2 = index:info() --- ... upsert_stat_diff(stat2, stat1) --- - squashed: 0 applied: 0 ... stat1 = stat2 --- ... -- start upsert optimizer for i = 0, 999 do space:upsert({3, 0, 0}, {{'+', 2, 1}}) end --- ... stat2 = index:info() --- ... upsert_stat_diff(stat2, stat1) --- - squashed: 7 applied: 903 ... stat1 = stat2 --- ... space:get{3} --- - [3, 999, 0] ... stat1.rows --- - 1004 ... space:drop() --- ... -- fix behaviour after https://github.com/tarantool/tarantool/issues/2104 s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... i = s:create_index('test', { run_count_per_level = 20 }) --- ... s:replace({1, 1}) --- - [1, 1] ... box.snapshot() --- - ok ... s:upsert({1, 1}, {{'+', 1, 1}}) --- ... s:upsert({1, 1}, {{'+', 2, 1}}) --- ... s:select() --both upserts are ignored due to primary key change --- - - [1, 1] ... -- -- gh-2520 use cache as a hint when applying upserts. -- old_stat = s.index.test:info() --- ... -- insert the first upsert s:upsert({100}, {{'=', 2, 200}}) --- ... -- force a dump, the inserted upsert is now on disk box.snapshot() --- - ok ... -- populate the cache s:get{100} --- - [100] ... -- a lookup in a run was done to populate the cache new_stat = s.index.test:info() --- ... upsert_stat_diff(new_stat, old_stat) --- - squashed: 0 applied: 1 ... new_stat.disk.iterator.lookup - old_stat.disk.iterator.lookup --- - 1 ... old_stat = new_stat --- ... -- Add another upsert: the cached REPLACE will be used and the upsert will -- be applied immediately s:upsert({100}, {{'=', 2, 300}}) --- ... -- force a new dump box.snapshot() --- - ok ... -- lookup the key s:get{100} --- - [100, 300] ... -- -- since we converted upsert to replace on insert, we had to -- go no further than the latest dump to locate the latest -- value of the key -- new_stat = s.index.test:info() --- ... upsert_stat_diff(new_stat, old_stat) --- - squashed: 0 applied: 0 ... new_stat.disk.iterator.lookup - old_stat.disk.iterator.lookup --- - 1 ... -- -- gh-3003: crash in read iterator if upsert exactly matches -- the search key. -- s:truncate() --- ... s:insert{100, 100} --- - [100, 100] ... box.snapshot() --- - ok ... s:upsert({100}, {{'+', 2, 100}}) --- ... s:select({100}, 'GE') --- - - [100, 200] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/tx_gap_lock.result0000664000000000000000000004710213306565107022072 0ustar rootroottest_run = require('test_run').new() --- ... txn_proxy = require('txn_proxy') --- ... c = txn_proxy.new() --- ... c1 = txn_proxy.new() --- ... c2 = txn_proxy.new() --- ... c3 = txn_proxy.new() --- ... c4 = txn_proxy.new() --- ... c5 = txn_proxy.new() --- ... c6 = txn_proxy.new() --- ... ---------------------------------------------------------------- -- SELECT ALL ---------------------------------------------------------------- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk') --- ... _ = s:insert{1} --- ... _ = s:insert{3} --- ... c:begin() --- - ... c("s:select()") -- {1}, {3} --- - - [[1], [3]] ... _ = s:insert{2} -- send c to read view --- ... c("s:select()") -- {1}, {3} --- - - [[1], [3]] ... c:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- _ = s:insert{1} --- ... _ = s:insert{2} --- ... c:begin() --- - ... c("s:select()") -- {1}, {2} --- - - [[1], [2]] ... _ = s:insert{3} -- send c to read view --- ... c("s:select()") -- {1}, {2} --- - - [[1], [2]] ... c:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- _ = s:insert{2} --- ... _ = s:insert{3} --- ... c:begin() --- - ... c("s:select()") -- {2}, {3} --- - - [[2], [3]] ... _ = s:insert{1} -- send c to read view --- ... c("s:select()") -- {2}, {3} --- - - [[2], [3]] ... c:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- _ = s:insert{123} --- ... c1:begin() --- - ... c2:begin() --- - ... c1("s:select({}, {iterator = 'GT'})") -- {123} --- - - [[123]] ... c2("s:select({}, {iterator = 'LT'})") -- {123} --- - - [[123]] ... _ = s:replace{123, 456} -- send c1 and c2 to read view --- ... c1("s:select({}, {iterator = 'GT'})") -- {123} --- - - [[123]] ... c2("s:select({}, {iterator = 'LT'})") -- {123} --- - - [[123]] ... c1:commit() --- - ... c2:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- -- SELECT GT/GE ---------------------------------------------------------------- _ = s:insert{10} --- ... _ = s:insert{20} --- ... _ = s:insert{30} --- ... c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c4:begin() --- - ... c5:begin() --- - ... c6:begin() --- - ... c1("s:select({10}, {iterator = 'GE'})") -- {10}, {20}, {30} --- - - [[10], [20], [30]] ... c2("s:select({10}, {iterator = 'GT'})") -- {20}, {30} --- - - [[20], [30]] ... c3("s:select({15}, {iterator = 'GE'})") -- {20}, {30} --- - - [[20], [30]] ... c4("s:select({15}, {iterator = 'GT'})") -- {20}, {30} --- - - [[20], [30]] ... c5("s:select({25}, {iterator = 'GE'})") -- {30} --- - - [[30]] ... c6("s:select({30}, {iterator = 'GE'})") -- {30} --- - - [[30]] ... _ = s:replace{10, 1} -- send c1 to read view --- ... c1("s:get(10)") -- {10} --- - - [10] ... c2("s:get(10)") -- {10, 1} --- - - [10, 1] ... c3("s:get(10)") -- {10, 1} --- - - [10, 1] ... c4("s:get(10)") -- {10, 1} --- - - [10, 1] ... c5("s:get(10)") -- {10, 1} --- - - [10, 1] ... c6("s:get(10)") -- {10, 1} --- - - [10, 1] ... _ = s:replace{15, 2} -- send c2 and c3 to read view --- ... c2("s:get(15)") -- none --- - ... c3("s:get(15)") -- none --- - ... c4("s:get(15)") -- {15, 2} --- - - [15, 2] ... c5("s:get(15)") -- {15, 2} --- - - [15, 2] ... c6("s:get(15)") -- {15, 2} --- - - [15, 2] ... _ = s:replace{35, 3} -- send c4, c5, and c6 to read view --- ... c4("s:get(35)") -- none --- - ... c5("s:get(35)") -- none --- - ... c6("s:get(35)") -- none --- - ... c1:commit() --- - ... c2:commit() --- - ... c3:commit() --- - ... c4:commit() --- - ... c5:commit() --- - ... c6:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- -- SELECT LT/LE ---------------------------------------------------------------- _ = s:insert{10} --- ... _ = s:insert{20} --- ... _ = s:insert{30} --- ... c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c4:begin() --- - ... c5:begin() --- - ... c6:begin() --- - ... c1("s:select({30}, {iterator = 'LE'})") -- {30}, {20}, {10} --- - - [[30], [20], [10]] ... c2("s:select({30}, {iterator = 'LT'})") -- {20}, {10} --- - - [[20], [10]] ... c3("s:select({25}, {iterator = 'LE'})") -- {20}, {10} --- - - [[20], [10]] ... c4("s:select({25}, {iterator = 'LT'})") -- {20}, {10} --- - - [[20], [10]] ... c5("s:select({15}, {iterator = 'LE'})") -- {10} --- - - [[10]] ... c6("s:select({10}, {iterator = 'LE'})") -- {10} --- - - [[10]] ... _ = s:replace{30, 1} -- send c1 to read view --- ... c1("s:get(30)") -- {30} --- - - [30] ... c2("s:get(30)") -- {30, 1} --- - - [30, 1] ... c3("s:get(30)") -- {30, 1} --- - - [30, 1] ... c4("s:get(30)") -- {30, 1} --- - - [30, 1] ... c5("s:get(30)") -- {30, 1} --- - - [30, 1] ... c6("s:get(30)") -- {30, 1} --- - - [30, 1] ... _ = s:replace{25, 2} -- send c2 and c3 to read view --- ... c2("s:get(25)") -- none --- - ... c3("s:get(25)") -- none --- - ... c4("s:get(25)") -- {25, 2} --- - - [25, 2] ... c5("s:get(25)") -- {25, 2} --- - - [25, 2] ... c6("s:get(25)") -- {25, 2} --- - - [25, 2] ... _ = s:replace{5, 3} -- send c4, c5, and c6 to read view --- ... c4("s:get(5)") -- none --- - ... c5("s:get(5)") -- none --- - ... c6("s:get(5)") -- none --- - ... c1:commit() --- - ... c2:commit() --- - ... c3:commit() --- - ... c4:commit() --- - ... c5:commit() --- - ... c6:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- -- SELECT LIMIT ---------------------------------------------------------------- for i = 1, 9 do s:insert{i * 10} end --- ... c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c4:begin() --- - ... c1("s:select({20}, {iterator = 'GE', limit = 3})") -- {20}, {30}, {40} --- - - [[20], [30], [40]] ... c2("s:select({80}, {iterator = 'LE', limit = 3})") -- {80}, {70}, {60} --- - - [[80], [70], [60]] ... c3("s:select({10}, {iterator = 'GE', limit = 3})") -- {10}, {20}, {30} --- - - [[10], [20], [30]] ... c4("s:select({90}, {iterator = 'LE', limit = 3})") -- {90}, {80}, {70} --- - - [[90], [80], [70]] ... _ = s:replace{50, 1} --- ... c1("s:get(50)") -- {50, 1} --- - - [50, 1] ... c2("s:get(50)") -- {50, 1} --- - - [50, 1] ... c3("s:get(50)") -- {50, 1} --- - - [50, 1] ... c4("s:get(50)") -- {50, 1} --- - - [50, 1] ... _ = s:replace{40, 2} -- send c1 to read view --- ... c1("s:get(40)") -- {40} --- - - [40] ... c2("s:get(40)") -- {40, 2} --- - - [40, 2] ... c3("s:get(40)") -- {40, 2} --- - - [40, 2] ... c4("s:get(40)") -- {40, 2} --- - - [40, 2] ... _ = s:replace{60, 3} -- send c2 to read view --- ... c2("s:get(60)") -- {60} --- - - [60] ... c3("s:get(60)") -- {60, 3} --- - - [60, 3] ... c4("s:get(60)") -- {60, 3} --- - - [60, 3] ... _ = s:replace{25, 4} -- send c3 to read view --- ... c3("s:get(25)") -- none --- - ... c4("s:get(25)") -- {25, 4} --- - - [25, 4] ... _ = s:replace{75, 5} -- send c4 to read view --- ... c4("s:get(75)") -- none --- - ... c1:commit() --- - ... c2:commit() --- - ... c3:commit() --- - ... c4:commit() --- - ... s:drop() --- ... ---------------------------------------------------------------- -- SELECT EQ/REQ ---------------------------------------------------------------- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {parts = {1, 'unsigned', 2, 'unsigned'}}) --- ... _ = s:insert{1, 1} --- ... _ = s:insert{2, 1} --- ... _ = s:insert{2, 2} --- ... _ = s:insert{2, 3} --- ... _ = s:insert{3, 3} --- ... c1:begin() --- - ... c2:begin() --- - ... c1("s:select({2}, {iterator = 'EQ'})") -- {2, 1}, {2, 2}, {2, 3} --- - - [[2, 1], [2, 2], [2, 3]] ... c2("s:select({2}, {iterator = 'REQ'})") -- {2, 3}, {2, 2}, {2, 1} --- - - [[2, 3], [2, 2], [2, 1]] ... _ = s:replace{1, 10} --- ... c1("s:select({1})") -- {1, 1}, {1, 10} --- - - [[1, 1], [1, 10]] ... c2("s:select({1})") -- {1, 1}, {1, 10} --- - - [[1, 1], [1, 10]] ... _ = s:replace{3, 30} --- ... c1("s:get({3, 30})") -- {3, 30} --- - - [3, 30] ... c2("s:get({3, 30})") -- {3, 30} --- - - [3, 30] ... _ = s:replace{2, 20} -- send c1 and c2 to read view --- ... c1("s:select({2}, {iterator = 'EQ'})") -- {2, 1}, {2, 2}, {2, 3} --- - - [[2, 1], [2, 2], [2, 3]] ... c2("s:select({2}, {iterator = 'REQ'})") -- {2, 3}, {2, 2}, {2, 1} --- - - [[2, 3], [2, 2], [2, 1]] ... c1:commit() --- - ... c2:commit() --- - ... s:drop() --- ... ---------------------------------------------------------------- -- Interval merging ---------------------------------------------------------------- function gap_lock_count() return box.info.vinyl().tx.gap_locks end --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk') --- ... _ = s:insert{10} --- ... _ = s:insert{20} --- ... _ = s:insert{30} --- ... _ = s:insert{40} --- ... gap_lock_count() -- 0 --- - 0 ... c:begin() --- - ... c("s:select({10}, {iterator = 'GE', limit = 4})") -- locks [10, 40] --- - - [[10], [20], [30], [40]] ... gap_lock_count() -- 1 --- - 1 ... c("s:select({15}, {iterator = 'GE', limit = 2})") -- locks [15, 30] --- - - [[20], [30]] ... gap_lock_count() -- 1 --- - 1 ... c("s:select({35}, {iterator = 'LE', limit = 2})") -- locks [20, 35] --- - - [[30], [20]] ... gap_lock_count() -- 1 --- - 1 ... c("s:select({5}, {iterator = 'GT', limit = 2})") -- locks (5, 20] --- - - [[10], [20]] ... gap_lock_count() -- 1 --- - 1 ... c("s:select({45}, {iterator = 'LT', limit = 2})") -- locks [30, 45) --- - - [[40], [30]] ... gap_lock_count() -- 1 --- - 1 ... _ = s:insert{5} --- ... _ = s:insert{45} --- ... c("s:get(5)") -- {5} --- - - [5] ... c("s:get(45)") -- {45} --- - - [45] ... _ = s:insert{25} -- send c to read view --- ... c("s:get(25)") -- none --- - ... c:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- _ = s:insert{10} --- ... _ = s:insert{20} --- ... _ = s:insert{30} --- ... _ = s:insert{40} --- ... gap_lock_count() -- 0 --- - 0 ... c:begin() --- - ... c("s:select({1}, {iterator = 'GT', limit = 1})") -- locks (1, 10] --- - - [[10]] ... c("s:select({50}, {iterator = 'LT', limit = 1})") -- locks [40, 50) --- - - [[40]] ... c("s:select({20}, {iterator = 'GE', limit = 2})") -- locks [20, 30] --- - - [[20], [30]] ... gap_lock_count() -- 3 --- - 3 ... c("s:select({5}, {iterator = 'GT', limit = 4})") -- locks (5, 40] --- - - [[10], [20], [30], [40]] ... gap_lock_count() -- 1 --- - 1 ... _ = s:insert{1} --- ... _ = s:insert{50} --- ... c("s:get(1)") -- {1} --- - - [1] ... c("s:get(50)") -- {50} --- - - [50] ... _ = s:insert{5} -- send c to read view --- ... c("s:get(5)") -- none --- - ... c:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- _ = s:insert{100} --- ... gap_lock_count() -- 0 --- - 0 ... c:begin() --- - ... c("s:select({100}, {iterator = 'GT'})") -- locks (100, +inf) --- - - [] ... c("s:select({100}, {iterator = 'LT'})") -- locks (-inf, 100) --- - - [] ... gap_lock_count() -- 2 --- - 2 ... c("s:get(100)") -- locks [100] --- - - [100] ... gap_lock_count() -- 1 --- - 1 ... _ = s:insert{1000} -- send c to read view --- ... c("s:get(1000)") -- none --- - ... c:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- _ = s:insert{1, 0} --- ... _ = s:insert{2, 0} --- ... _ = s:insert{3, 0} --- ... _ = s:insert{4, 0} --- ... gap_lock_count() -- 0 --- - 0 ... c:begin() --- - ... c("s:select({1}, {iterator = 'GE', limit = 2})") -- locks [1, 2] --- - - [[1, 0], [2, 0]] ... c("s:select({2}, {iterator = 'GT', limit = 2})") -- locks (2, 4] --- - - [[3, 0], [4, 0]] ... gap_lock_count() -- 1 --- - 1 ... c:commit() --- - ... s:drop() --- ... ---------------------------------------------------------------- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {parts = {1, 'unsigned', 2, 'unsigned'}}) --- ... gap_lock_count() -- 0 --- - 0 ... c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c4:begin() --- - ... c1("s:select({100}, {iterator = 'GE'})") -- c1: locks [{100}, +inf) --- - - [] ... c1("s:select({100, 100}, {iterator = 'GE'})") -- c1: locks [{100, 100}, +inf) --- - - [] ... c2("s:select({100}, {iterator = 'GE'})") -- c2: locks [{100}, +inf) --- - - [] ... c2("s:select({100, 100}, {iterator = 'GT'})") -- c2: locks ({100, 100}, +inf) --- - - [] ... c3("s:select({100}, {iterator = 'GT'})") -- c3: locks ({100}, +inf) --- - - [] ... c3("s:select({100, 100}, {iterator = 'GE'})") -- c3: locks [{100, 100}, +inf) --- - - [] ... c4("s:select({100}, {iterator = 'GT'})") -- c4: locks ({100}, +inf) --- - - [] ... c4("s:select({100, 100}, {iterator = 'GT'})") -- c4: locks ({100, 100}, +inf) --- - - [] ... gap_lock_count() -- 4 --- - 4 ... _ = s:insert{100, 50} -- send c1 and c2 to read view --- ... c1("s:get({100, 50})") -- none --- - ... c2("s:get({100, 50})") -- none --- - ... c3("s:get({100, 50})") -- {100, 50} --- - - [100, 50] ... c4("s:get({100, 50})") -- {100, 50} --- - - [100, 50] ... gap_lock_count() -- 6; new intervals: c3:[{100, 50}], c4:[{100, 50}] --- - 6 ... _ = s:insert{100, 100} -- send c3 to read view --- ... c3("s:get({100, 100})") -- none --- - ... c4("s:get({100, 100})") -- {100, 100} --- - - [100, 100] ... gap_lock_count() -- 6; c4:[{100, 100}] is merged with c4:({100, 100}, +inf) --- - 6 ... _ = s:insert{100, 101} -- send c4 to read view --- ... c4("s:get({100, 101})") -- none --- - ... gap_lock_count() -- 6 --- - 6 ... c1:commit() --- - ... c2:commit() --- - ... c3:commit() --- - ... c4:commit() --- - ... s:truncate() --- ... ---------------------------------------------------------------- gap_lock_count() -- 0 --- - 0 ... c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c4:begin() --- - ... c1("s:select({100}, {iterator = 'LE'})") -- c1: locks (-inf, {100}] --- - - [] ... c1("s:select({100, 100}, {iterator = 'LE'})") -- c1: locks (-inf, {100, 100}] --- - - [] ... c2("s:select({100}, {iterator = 'LE'})") -- c2: locks (-inf, {100}] --- - - [] ... c2("s:select({100, 100}, {iterator = 'LT'})") -- c2: locks (-inf, {100, 100}) --- - - [] ... c3("s:select({100}, {iterator = 'LT'})") -- c3: locks (-inf, {100}) --- - - [] ... c3("s:select({100, 100}, {iterator = 'LE'})") -- c3: locks (-inf, {100, 100}] --- - - [] ... c4("s:select({100}, {iterator = 'LT'})") -- c4: locks (-inf, {100}) --- - - [] ... c4("s:select({100, 100}, {iterator = 'LT'})") -- c4: locks (-inf, {100, 100}) --- - - [] ... gap_lock_count() -- 4 --- - 4 ... _ = s:insert{100, 150} -- send c1 and c2 to read view --- ... c1("s:get({100, 150})") -- none --- - ... c2("s:get({100, 150})") -- none --- - ... c3("s:get({100, 150})") -- {100, 150} --- - - [100, 150] ... c4("s:get({100, 150})") -- {100, 150} --- - - [100, 150] ... gap_lock_count() -- 6; new intervals: c3:[{100, 150}], c4:[{100, 150}] --- - 6 ... _ = s:insert{100, 100} -- send c3 to read view --- ... c3("s:get({100, 100})") -- none --- - ... c4("s:get({100, 100})") -- {100, 100} --- - - [100, 100] ... gap_lock_count() -- 6; c4:[{100, 100}] is merged with c4:[-inf, {100, 100}) --- - 6 ... _ = s:insert{100, 99} -- send c4 to read view --- ... c4("s:get({100, 99})") -- none --- - ... gap_lock_count() -- 6 --- - 6 ... c1:commit() --- - ... c2:commit() --- - ... c3:commit() --- - ... c4:commit() --- - ... s:drop() --- ... ---------------------------------------------------------------- -- gh-2534: Iterator over a secondary index doesn't double track -- results in the primary index. ---------------------------------------------------------------- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {parts = {1, 'unsigned'}}) --- ... _ = s:create_index('sk', {parts = {2, 'unsigned'}}) --- ... for i = 1, 100 do s:insert{i, i} end --- ... box.begin() --- ... gap_lock_count() -- 0 --- - 0 ... _ = s.index.sk:select({}, {limit = 50}) --- ... gap_lock_count() -- 1 --- - 1 ... for i = 1, 100 do s.index.sk:get(i) end --- ... gap_lock_count() -- 51 --- - 51 ... _ = s.index.sk:select() --- ... gap_lock_count() -- 1 --- - 1 ... box.commit() --- ... gap_lock_count() -- 0 --- - 0 ... s:drop() --- ... gap_lock_count = nil --- ... ---------------------------------------------------------------- -- Randomized stress test -- -- The idea behind the test is simple: execute several random -- selects from a bunch of transactions, then insert a random -- value to the space and check that only those transactions -- that would actually read the new value were sent to read -- view. ---------------------------------------------------------------- test_run:cmd("setopt delimiter ';'") --- - true ... seed = os.time(); --- ... math.randomseed(seed); --- ... INDEX_COUNT = 3; --- ... TUPLE_COUNT = 100; --- ... TX_COUNT = 20; --- ... SELECTS_PER_TX = 5; --- ... PAYLOAD_FIELD = INDEX_COUNT * 2 + 1; --- ... MAX_VAL = {[1] = 15, [2] = 10, [3] = 5}; --- ... assert(#MAX_VAL == INDEX_COUNT); --- - true ... s = box.schema.space.create('test', {engine = 'vinyl'}); --- ... for i = 1, INDEX_COUNT do s:create_index('i' .. i, {unique = (i == 1), parts = {i * 2 - 1, 'unsigned', i * 2, 'unsigned'}}) end; --- ... function gen_tuple(payload) local t = {} for i = 1, INDEX_COUNT do t[i * 2 - 1] = math.random(MAX_VAL[i]) t[i * 2] = math.random(MAX_VAL[i]) end table.insert(t, payload) return t end; --- ... function cmp_tuple(t1, t2) for i = 1, PAYLOAD_FIELD do if t1[i] ~= t2[i] then return t1[i] > t2[i] and 1 or -1 end end return 0 end; --- ... function gen_select() local index = math.random(INDEX_COUNT) local key = {} if math.random(100) > 10 then key[1] = math.random(MAX_VAL[index]) if math.random(100) > 50 then key[2] = math.random(MAX_VAL[index]) end end local iterator_types = {'EQ', 'REQ', 'LE', 'LT', 'GE', 'GT'} local dir = iterator_types[math.random(#iterator_types)] local limit = math.random(TUPLE_COUNT / 4) return string.format( "s.index['i%d']:select(%s, {iterator = '%s', limit = %d})", index, '{' .. table.concat(key, ', ') .. '}', dir, limit) end; --- ... for i = 1, TUPLE_COUNT do s:replace(gen_tuple()) end; --- ... tx_list = {}; --- ... for i = 1, TX_COUNT do local tx = {} tx.conn = txn_proxy.new() tx.conn:begin() tx.selects = {} for j = 1, SELECTS_PER_TX do local cmd = gen_select() local result = tx.conn(cmd)[1] setmetatable(result, nil) tx.selects[j] = {cmd = cmd, result = result} end tx_list[i] = tx end; --- ... conflict = s:replace(gen_tuple('new')); --- ... for i = 1, TX_COUNT do local tx = tx_list[i] tx.should_abort = false for j = 1, SELECTS_PER_TX do local sel = tx.selects[j] local result = loadstring('return ' .. sel.cmd)() if #result == #sel.result then for k, v in ipairs(result) do if cmp_tuple(v, sel.result[k]) ~= 0 then tx.should_abort = true break end end else tx.should_abort = true end end end; --- ... invalid = {}; --- ... for i = 1, TX_COUNT do local tx = tx_list[i] local v = tx.conn(string.format("s:get({%d, %d})", conflict[1], conflict[2]))[1] local was_aborted = false if v == nil or v[PAYLOAD_FIELD] == nil then was_aborted = true end if tx.should_abort ~= was_aborted then table.insert(invalid, tx) end tx.conn:commit() tx.conn = nil end; --- ... #invalid == 0 or {seed = seed, conflict = conflict, invalid = invalid}; --- - true ... s:drop(); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... ---------------------------------------------------------------- c = nil --- ... c1 = nil --- ... c2 = nil --- ... c3 = nil --- ... c4 = nil --- ... c5 = nil --- ... c6 = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/replica_quota.test.lua0000664000000000000000000000350113306560010022630 0ustar rootroottest_run = require('test_run').new() box.schema.user.grant('guest', 'read,write,execute', 'universe') box.schema.user.grant('guest', 'replication') s = box.schema.space.create('test', { engine = 'vinyl' }) _ = s:create_index('pk', {run_count_per_level = 1}) -- Send > 2 MB to replica. pad = string.rep('x', 1100) for i = 1,1000 do s:insert{i, pad} end box.snapshot() for i = 1001,2000 do s:insert{i, pad} end -- Replica has memory limit set to 1 MB so replication would hang -- if the scheduler didn't work on the destination. -- -- Also check that quota timeout isn't taken into account while -- the replica is joining (see gh-2873). To do that, we set -- vinyl_timeout to 1 ms on the replica, which isn't enough for -- a dump to complete and hence would result in bootstrap failure -- were the timeout not ignored. -- _ = test_run:cmd("create server replica with rpl_master=default, script='vinyl/replica_quota.lua'") _ = test_run:cmd("start server replica") _ = test_run:wait_lsn('replica', 'default') -- Check vinyl_timeout is ignored on 'subscribe' (gh-3087). _ = test_run:cmd("stop server replica") for i = 2001,3000 do s:insert{i, pad} end _ = test_run:cmd("start server replica") _ = test_run:wait_lsn('replica', 'default') -- During join we remove compacted run files immediately (gh-3162). -- Check that we don't delete files that are still in use. _ = test_run:cmd("stop server replica") _ = test_run:cmd("cleanup server replica") box.snapshot() for i = 3001,4000 do s:insert{i, pad} end _ = test_run:cmd("start server replica") -- join _ = test_run:cmd("stop server replica") _ = test_run:cmd("start server replica") -- recovery _ = test_run:cmd("stop server replica") _ = test_run:cmd("cleanup server replica") s:drop() box.schema.user.revoke('guest', 'replication') box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/vinyl/info.lua0000664000000000000000000000021713306560010017756 0ustar rootroot#!/usr/bin/env tarantool box.cfg{ vinyl_cache = 15 * 1024, -- 15K to test cache eviction } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/vinyl/partial_dump.result0000664000000000000000000000407213306560010022244 0ustar rootroot-- -- 1. Create a space which has more indexes that can be scheduled -- for dump simultaneously (> vinyl_write_threads). -- -- 2. Insert tuples and then update values of secondary keys. -- -- 3. Inject a dump error for a random index. Try to make a snapshot. -- -- 4. Restart and check the space. -- test_run = require('test_run').new() --- ... INDEX_COUNT = box.cfg.vinyl_write_threads * 3 --- ... assert(INDEX_COUNT < 100) --- - true ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... for i = 1, INDEX_COUNT do s:create_index('i' .. i, {parts = {i, 'unsigned'}}) end --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function make_tuple(key, val) local tuple = {} tuple[1] = key for i = 2, INDEX_COUNT do tuple[i] = val * (i - 1) end return tuple end test_run:cmd("setopt delimiter ''"); --- ... for i = 1, 5 do s:insert(make_tuple(i, i)) end --- ... for i = 1, 5 do s:replace(make_tuple(i, i * 100)) end --- ... math.randomseed(os.time()) --- ... box.error.injection.set('ERRINJ_VY_INDEX_DUMP', math.random(INDEX_COUNT) - 1) --- - ok ... box.snapshot() --- - error: Error injection 'vinyl index dump' ... box.error.injection.set('ERRINJ_VY_INDEX_DUMP', -1) --- - ok ... test_run:cmd('restart server default') INDEX_COUNT = box.cfg.vinyl_write_threads * 3 --- ... assert(INDEX_COUNT < 100) --- - true ... s = box.space.test --- ... s:select() --- - - [1, 100, 200, 300, 400, 500, 600, 700, 800] - [2, 200, 400, 600, 800, 1000, 1200, 1400, 1600] - [3, 300, 600, 900, 1200, 1500, 1800, 2100, 2400] - [4, 400, 800, 1200, 1600, 2000, 2400, 2800, 3200] - [5, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000] ... bad_index = -1 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, INDEX_COUNT - 1 do if s:count() ~= s.index[i]:count() then bad_index = i end for _, v in s.index[i]:pairs() do if v ~= s:get(v[1]) then bad_index = i end end end test_run:cmd("setopt delimiter ''"); --- ... bad_index < 0 or {bad_index, s.index[bad_index]:select()} --- - true ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/compact.result0000664000000000000000000000266313306565107021231 0ustar rootroottest_run = require('test_run').new() --- ... fiber = require('fiber') --- ... space = box.schema.space.create("vinyl", { engine = 'vinyl' }) --- ... _= space:create_index('primary', { parts = { 1, 'unsigned' }, run_count_per_level = 2 }) --- ... function vyinfo() return box.space.vinyl.index.primary:info() end --- ... vyinfo().run_count == 0 --- - true ... -- create the frist run space:insert({1}) --- - [1] ... space:replace({1, 2}) --- - [1, 2] ... space:upsert({1},{{'=', 4, 5}}) -- bad upsert --- ... require('log').info(string.rep(" ", 1024)) --- ... space:select() --- - - [1, 2] ... space:select() --- - - [1, 2] ... -- gh-1571: bad upsert should not log on reads test_run:grep_log('default', 'UPSERT operation failed', 400) == nil --- - true ... box.snapshot() --- - ok ... vyinfo().run_count == 1 --- - true ... -- create the second run space:replace({2,2}) --- - [2, 2] ... space:upsert({2},{{'=',4,5}}) -- bad upsert --- ... box.snapshot() -- create the second run --- - ok ... vyinfo().run_count == 2 --- - true ... -- create a few more runs to trigger compaction space:insert({3, 3}) --- - [3, 3] ... box.snapshot() --- - ok ... -- wait for compaction while vyinfo().run_count >= 2 do fiber.sleep(0.1) end --- ... vyinfo().run_count == 1 --- - true ... -- gh-1571: bad upsert should log on compaction test_run:grep_log('default', 'UPSERT operation failed') ~= nil --- - true ... space:drop() --- ... fiber = nil --- ... test_run = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/tx_gap_lock.test.lua0000664000000000000000000003574613306565107022326 0ustar rootroottest_run = require('test_run').new() txn_proxy = require('txn_proxy') c = txn_proxy.new() c1 = txn_proxy.new() c2 = txn_proxy.new() c3 = txn_proxy.new() c4 = txn_proxy.new() c5 = txn_proxy.new() c6 = txn_proxy.new() ---------------------------------------------------------------- -- SELECT ALL ---------------------------------------------------------------- s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk') _ = s:insert{1} _ = s:insert{3} c:begin() c("s:select()") -- {1}, {3} _ = s:insert{2} -- send c to read view c("s:select()") -- {1}, {3} c:commit() s:truncate() ---------------------------------------------------------------- _ = s:insert{1} _ = s:insert{2} c:begin() c("s:select()") -- {1}, {2} _ = s:insert{3} -- send c to read view c("s:select()") -- {1}, {2} c:commit() s:truncate() ---------------------------------------------------------------- _ = s:insert{2} _ = s:insert{3} c:begin() c("s:select()") -- {2}, {3} _ = s:insert{1} -- send c to read view c("s:select()") -- {2}, {3} c:commit() s:truncate() ---------------------------------------------------------------- _ = s:insert{123} c1:begin() c2:begin() c1("s:select({}, {iterator = 'GT'})") -- {123} c2("s:select({}, {iterator = 'LT'})") -- {123} _ = s:replace{123, 456} -- send c1 and c2 to read view c1("s:select({}, {iterator = 'GT'})") -- {123} c2("s:select({}, {iterator = 'LT'})") -- {123} c1:commit() c2:commit() s:truncate() ---------------------------------------------------------------- -- SELECT GT/GE ---------------------------------------------------------------- _ = s:insert{10} _ = s:insert{20} _ = s:insert{30} c1:begin() c2:begin() c3:begin() c4:begin() c5:begin() c6:begin() c1("s:select({10}, {iterator = 'GE'})") -- {10}, {20}, {30} c2("s:select({10}, {iterator = 'GT'})") -- {20}, {30} c3("s:select({15}, {iterator = 'GE'})") -- {20}, {30} c4("s:select({15}, {iterator = 'GT'})") -- {20}, {30} c5("s:select({25}, {iterator = 'GE'})") -- {30} c6("s:select({30}, {iterator = 'GE'})") -- {30} _ = s:replace{10, 1} -- send c1 to read view c1("s:get(10)") -- {10} c2("s:get(10)") -- {10, 1} c3("s:get(10)") -- {10, 1} c4("s:get(10)") -- {10, 1} c5("s:get(10)") -- {10, 1} c6("s:get(10)") -- {10, 1} _ = s:replace{15, 2} -- send c2 and c3 to read view c2("s:get(15)") -- none c3("s:get(15)") -- none c4("s:get(15)") -- {15, 2} c5("s:get(15)") -- {15, 2} c6("s:get(15)") -- {15, 2} _ = s:replace{35, 3} -- send c4, c5, and c6 to read view c4("s:get(35)") -- none c5("s:get(35)") -- none c6("s:get(35)") -- none c1:commit() c2:commit() c3:commit() c4:commit() c5:commit() c6:commit() s:truncate() ---------------------------------------------------------------- -- SELECT LT/LE ---------------------------------------------------------------- _ = s:insert{10} _ = s:insert{20} _ = s:insert{30} c1:begin() c2:begin() c3:begin() c4:begin() c5:begin() c6:begin() c1("s:select({30}, {iterator = 'LE'})") -- {30}, {20}, {10} c2("s:select({30}, {iterator = 'LT'})") -- {20}, {10} c3("s:select({25}, {iterator = 'LE'})") -- {20}, {10} c4("s:select({25}, {iterator = 'LT'})") -- {20}, {10} c5("s:select({15}, {iterator = 'LE'})") -- {10} c6("s:select({10}, {iterator = 'LE'})") -- {10} _ = s:replace{30, 1} -- send c1 to read view c1("s:get(30)") -- {30} c2("s:get(30)") -- {30, 1} c3("s:get(30)") -- {30, 1} c4("s:get(30)") -- {30, 1} c5("s:get(30)") -- {30, 1} c6("s:get(30)") -- {30, 1} _ = s:replace{25, 2} -- send c2 and c3 to read view c2("s:get(25)") -- none c3("s:get(25)") -- none c4("s:get(25)") -- {25, 2} c5("s:get(25)") -- {25, 2} c6("s:get(25)") -- {25, 2} _ = s:replace{5, 3} -- send c4, c5, and c6 to read view c4("s:get(5)") -- none c5("s:get(5)") -- none c6("s:get(5)") -- none c1:commit() c2:commit() c3:commit() c4:commit() c5:commit() c6:commit() s:truncate() ---------------------------------------------------------------- -- SELECT LIMIT ---------------------------------------------------------------- for i = 1, 9 do s:insert{i * 10} end c1:begin() c2:begin() c3:begin() c4:begin() c1("s:select({20}, {iterator = 'GE', limit = 3})") -- {20}, {30}, {40} c2("s:select({80}, {iterator = 'LE', limit = 3})") -- {80}, {70}, {60} c3("s:select({10}, {iterator = 'GE', limit = 3})") -- {10}, {20}, {30} c4("s:select({90}, {iterator = 'LE', limit = 3})") -- {90}, {80}, {70} _ = s:replace{50, 1} c1("s:get(50)") -- {50, 1} c2("s:get(50)") -- {50, 1} c3("s:get(50)") -- {50, 1} c4("s:get(50)") -- {50, 1} _ = s:replace{40, 2} -- send c1 to read view c1("s:get(40)") -- {40} c2("s:get(40)") -- {40, 2} c3("s:get(40)") -- {40, 2} c4("s:get(40)") -- {40, 2} _ = s:replace{60, 3} -- send c2 to read view c2("s:get(60)") -- {60} c3("s:get(60)") -- {60, 3} c4("s:get(60)") -- {60, 3} _ = s:replace{25, 4} -- send c3 to read view c3("s:get(25)") -- none c4("s:get(25)") -- {25, 4} _ = s:replace{75, 5} -- send c4 to read view c4("s:get(75)") -- none c1:commit() c2:commit() c3:commit() c4:commit() s:drop() ---------------------------------------------------------------- -- SELECT EQ/REQ ---------------------------------------------------------------- s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {parts = {1, 'unsigned', 2, 'unsigned'}}) _ = s:insert{1, 1} _ = s:insert{2, 1} _ = s:insert{2, 2} _ = s:insert{2, 3} _ = s:insert{3, 3} c1:begin() c2:begin() c1("s:select({2}, {iterator = 'EQ'})") -- {2, 1}, {2, 2}, {2, 3} c2("s:select({2}, {iterator = 'REQ'})") -- {2, 3}, {2, 2}, {2, 1} _ = s:replace{1, 10} c1("s:select({1})") -- {1, 1}, {1, 10} c2("s:select({1})") -- {1, 1}, {1, 10} _ = s:replace{3, 30} c1("s:get({3, 30})") -- {3, 30} c2("s:get({3, 30})") -- {3, 30} _ = s:replace{2, 20} -- send c1 and c2 to read view c1("s:select({2}, {iterator = 'EQ'})") -- {2, 1}, {2, 2}, {2, 3} c2("s:select({2}, {iterator = 'REQ'})") -- {2, 3}, {2, 2}, {2, 1} c1:commit() c2:commit() s:drop() ---------------------------------------------------------------- -- Interval merging ---------------------------------------------------------------- function gap_lock_count() return box.info.vinyl().tx.gap_locks end s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk') _ = s:insert{10} _ = s:insert{20} _ = s:insert{30} _ = s:insert{40} gap_lock_count() -- 0 c:begin() c("s:select({10}, {iterator = 'GE', limit = 4})") -- locks [10, 40] gap_lock_count() -- 1 c("s:select({15}, {iterator = 'GE', limit = 2})") -- locks [15, 30] gap_lock_count() -- 1 c("s:select({35}, {iterator = 'LE', limit = 2})") -- locks [20, 35] gap_lock_count() -- 1 c("s:select({5}, {iterator = 'GT', limit = 2})") -- locks (5, 20] gap_lock_count() -- 1 c("s:select({45}, {iterator = 'LT', limit = 2})") -- locks [30, 45) gap_lock_count() -- 1 _ = s:insert{5} _ = s:insert{45} c("s:get(5)") -- {5} c("s:get(45)") -- {45} _ = s:insert{25} -- send c to read view c("s:get(25)") -- none c:commit() s:truncate() ---------------------------------------------------------------- _ = s:insert{10} _ = s:insert{20} _ = s:insert{30} _ = s:insert{40} gap_lock_count() -- 0 c:begin() c("s:select({1}, {iterator = 'GT', limit = 1})") -- locks (1, 10] c("s:select({50}, {iterator = 'LT', limit = 1})") -- locks [40, 50) c("s:select({20}, {iterator = 'GE', limit = 2})") -- locks [20, 30] gap_lock_count() -- 3 c("s:select({5}, {iterator = 'GT', limit = 4})") -- locks (5, 40] gap_lock_count() -- 1 _ = s:insert{1} _ = s:insert{50} c("s:get(1)") -- {1} c("s:get(50)") -- {50} _ = s:insert{5} -- send c to read view c("s:get(5)") -- none c:commit() s:truncate() ---------------------------------------------------------------- _ = s:insert{100} gap_lock_count() -- 0 c:begin() c("s:select({100}, {iterator = 'GT'})") -- locks (100, +inf) c("s:select({100}, {iterator = 'LT'})") -- locks (-inf, 100) gap_lock_count() -- 2 c("s:get(100)") -- locks [100] gap_lock_count() -- 1 _ = s:insert{1000} -- send c to read view c("s:get(1000)") -- none c:commit() s:truncate() ---------------------------------------------------------------- _ = s:insert{1, 0} _ = s:insert{2, 0} _ = s:insert{3, 0} _ = s:insert{4, 0} gap_lock_count() -- 0 c:begin() c("s:select({1}, {iterator = 'GE', limit = 2})") -- locks [1, 2] c("s:select({2}, {iterator = 'GT', limit = 2})") -- locks (2, 4] gap_lock_count() -- 1 c:commit() s:drop() ---------------------------------------------------------------- s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {parts = {1, 'unsigned', 2, 'unsigned'}}) gap_lock_count() -- 0 c1:begin() c2:begin() c3:begin() c4:begin() c1("s:select({100}, {iterator = 'GE'})") -- c1: locks [{100}, +inf) c1("s:select({100, 100}, {iterator = 'GE'})") -- c1: locks [{100, 100}, +inf) c2("s:select({100}, {iterator = 'GE'})") -- c2: locks [{100}, +inf) c2("s:select({100, 100}, {iterator = 'GT'})") -- c2: locks ({100, 100}, +inf) c3("s:select({100}, {iterator = 'GT'})") -- c3: locks ({100}, +inf) c3("s:select({100, 100}, {iterator = 'GE'})") -- c3: locks [{100, 100}, +inf) c4("s:select({100}, {iterator = 'GT'})") -- c4: locks ({100}, +inf) c4("s:select({100, 100}, {iterator = 'GT'})") -- c4: locks ({100, 100}, +inf) gap_lock_count() -- 4 _ = s:insert{100, 50} -- send c1 and c2 to read view c1("s:get({100, 50})") -- none c2("s:get({100, 50})") -- none c3("s:get({100, 50})") -- {100, 50} c4("s:get({100, 50})") -- {100, 50} gap_lock_count() -- 6; new intervals: c3:[{100, 50}], c4:[{100, 50}] _ = s:insert{100, 100} -- send c3 to read view c3("s:get({100, 100})") -- none c4("s:get({100, 100})") -- {100, 100} gap_lock_count() -- 6; c4:[{100, 100}] is merged with c4:({100, 100}, +inf) _ = s:insert{100, 101} -- send c4 to read view c4("s:get({100, 101})") -- none gap_lock_count() -- 6 c1:commit() c2:commit() c3:commit() c4:commit() s:truncate() ---------------------------------------------------------------- gap_lock_count() -- 0 c1:begin() c2:begin() c3:begin() c4:begin() c1("s:select({100}, {iterator = 'LE'})") -- c1: locks (-inf, {100}] c1("s:select({100, 100}, {iterator = 'LE'})") -- c1: locks (-inf, {100, 100}] c2("s:select({100}, {iterator = 'LE'})") -- c2: locks (-inf, {100}] c2("s:select({100, 100}, {iterator = 'LT'})") -- c2: locks (-inf, {100, 100}) c3("s:select({100}, {iterator = 'LT'})") -- c3: locks (-inf, {100}) c3("s:select({100, 100}, {iterator = 'LE'})") -- c3: locks (-inf, {100, 100}] c4("s:select({100}, {iterator = 'LT'})") -- c4: locks (-inf, {100}) c4("s:select({100, 100}, {iterator = 'LT'})") -- c4: locks (-inf, {100, 100}) gap_lock_count() -- 4 _ = s:insert{100, 150} -- send c1 and c2 to read view c1("s:get({100, 150})") -- none c2("s:get({100, 150})") -- none c3("s:get({100, 150})") -- {100, 150} c4("s:get({100, 150})") -- {100, 150} gap_lock_count() -- 6; new intervals: c3:[{100, 150}], c4:[{100, 150}] _ = s:insert{100, 100} -- send c3 to read view c3("s:get({100, 100})") -- none c4("s:get({100, 100})") -- {100, 100} gap_lock_count() -- 6; c4:[{100, 100}] is merged with c4:[-inf, {100, 100}) _ = s:insert{100, 99} -- send c4 to read view c4("s:get({100, 99})") -- none gap_lock_count() -- 6 c1:commit() c2:commit() c3:commit() c4:commit() s:drop() ---------------------------------------------------------------- -- gh-2534: Iterator over a secondary index doesn't double track -- results in the primary index. ---------------------------------------------------------------- s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {parts = {1, 'unsigned'}}) _ = s:create_index('sk', {parts = {2, 'unsigned'}}) for i = 1, 100 do s:insert{i, i} end box.begin() gap_lock_count() -- 0 _ = s.index.sk:select({}, {limit = 50}) gap_lock_count() -- 1 for i = 1, 100 do s.index.sk:get(i) end gap_lock_count() -- 51 _ = s.index.sk:select() gap_lock_count() -- 1 box.commit() gap_lock_count() -- 0 s:drop() gap_lock_count = nil ---------------------------------------------------------------- -- Randomized stress test -- -- The idea behind the test is simple: execute several random -- selects from a bunch of transactions, then insert a random -- value to the space and check that only those transactions -- that would actually read the new value were sent to read -- view. ---------------------------------------------------------------- test_run:cmd("setopt delimiter ';'") seed = os.time(); math.randomseed(seed); INDEX_COUNT = 3; TUPLE_COUNT = 100; TX_COUNT = 20; SELECTS_PER_TX = 5; PAYLOAD_FIELD = INDEX_COUNT * 2 + 1; MAX_VAL = {[1] = 15, [2] = 10, [3] = 5}; assert(#MAX_VAL == INDEX_COUNT); s = box.schema.space.create('test', {engine = 'vinyl'}); for i = 1, INDEX_COUNT do s:create_index('i' .. i, {unique = (i == 1), parts = {i * 2 - 1, 'unsigned', i * 2, 'unsigned'}}) end; function gen_tuple(payload) local t = {} for i = 1, INDEX_COUNT do t[i * 2 - 1] = math.random(MAX_VAL[i]) t[i * 2] = math.random(MAX_VAL[i]) end table.insert(t, payload) return t end; function cmp_tuple(t1, t2) for i = 1, PAYLOAD_FIELD do if t1[i] ~= t2[i] then return t1[i] > t2[i] and 1 or -1 end end return 0 end; function gen_select() local index = math.random(INDEX_COUNT) local key = {} if math.random(100) > 10 then key[1] = math.random(MAX_VAL[index]) if math.random(100) > 50 then key[2] = math.random(MAX_VAL[index]) end end local iterator_types = {'EQ', 'REQ', 'LE', 'LT', 'GE', 'GT'} local dir = iterator_types[math.random(#iterator_types)] local limit = math.random(TUPLE_COUNT / 4) return string.format( "s.index['i%d']:select(%s, {iterator = '%s', limit = %d})", index, '{' .. table.concat(key, ', ') .. '}', dir, limit) end; for i = 1, TUPLE_COUNT do s:replace(gen_tuple()) end; tx_list = {}; for i = 1, TX_COUNT do local tx = {} tx.conn = txn_proxy.new() tx.conn:begin() tx.selects = {} for j = 1, SELECTS_PER_TX do local cmd = gen_select() local result = tx.conn(cmd)[1] setmetatable(result, nil) tx.selects[j] = {cmd = cmd, result = result} end tx_list[i] = tx end; conflict = s:replace(gen_tuple('new')); for i = 1, TX_COUNT do local tx = tx_list[i] tx.should_abort = false for j = 1, SELECTS_PER_TX do local sel = tx.selects[j] local result = loadstring('return ' .. sel.cmd)() if #result == #sel.result then for k, v in ipairs(result) do if cmp_tuple(v, sel.result[k]) ~= 0 then tx.should_abort = true break end end else tx.should_abort = true end end end; invalid = {}; for i = 1, TX_COUNT do local tx = tx_list[i] local v = tx.conn(string.format("s:get({%d, %d})", conflict[1], conflict[2]))[1] local was_aborted = false if v == nil or v[PAYLOAD_FIELD] == nil then was_aborted = true end if tx.should_abort ~= was_aborted then table.insert(invalid, tx) end tx.conn:commit() tx.conn = nil end; #invalid == 0 or {seed = seed, conflict = conflict, invalid = invalid}; s:drop(); test_run:cmd("setopt delimiter ''"); ---------------------------------------------------------------- c = nil c1 = nil c2 = nil c3 = nil c4 = nil c5 = nil c6 = nil tarantool_1.9.1.26.g63eb81e3c/test/vinyl/select_consistency.test.lua0000664000000000000000000000437613306560010023713 0ustar rootroottest_run = require('test_run').new() fiber = require 'fiber' math.randomseed(os.time()) s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {parts = {1, 'unsigned'}, page_size = 64, range_size = 256}) _ = s:create_index('i1', {unique = true, parts = {2, 'unsigned', 3, 'unsigned'}, page_size = 64, range_size = 256}) _ = s:create_index('i2', {unique = true, parts = {2, 'unsigned', 4, 'unsigned'}, page_size = 64, range_size = 256}) -- -- If called from a transaction, i1:select({k}) and i2:select({k}) -- must yield the same result. Let's check that under a stress load. -- MAX_KEY = 100 MAX_VAL = 10 PADDING = string.rep('x', 100) test_run:cmd("setopt delimiter ';'") function gen_insert() pcall(s.insert, s, {math.random(MAX_KEY), math.random(MAX_VAL), math.random(MAX_VAL), math.random(MAX_VAL), PADDING}) end; function gen_delete() pcall(s.delete, s, math.random(MAX_KEY)) end; function gen_update() pcall(s.update, s, math.random(MAX_KEY), {{'+', 5, 1}}) end; function dml_loop() while not stop do gen_insert() gen_update() gen_delete() fiber.sleep(0) end ch:put(true) end; function snap_loop() while not stop do box.snapshot() fiber.sleep(0.1) end ch:put(true) end; stop = false; ch = fiber.channel(3); _ = fiber.create(dml_loop); _ = fiber.create(dml_loop); _ = fiber.create(snap_loop); failed = {}; for i = 1, 10000 do local val = math.random(MAX_VAL) box.begin() local res1 = s.index.i1:select({val}) local res2 = s.index.i2:select({val}) box.commit() local equal = true if #res1 == #res2 then for _, t1 in ipairs(res1) do local found = false for _, t2 in ipairs(res2) do if t1[1] == t2[1] then found = true break end end if not found then equal = false break end end else equal = false end if not equal then table.insert(failed, {res1, res2}) end fiber.sleep(0) end; stop = true; for i = 1, ch:size() do ch:get() end; test_run:cmd("setopt delimiter ''"); #failed == 0 or failed s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/write_iterator_rand.result0000664000000000000000000001030013306560010023621 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function clean_space(sp, cnt) for i = 1, cnt do sp:delete({i}) end box.snapshot() return sp:count() == 0 end; --- ... function check_tuples_len(sp, cnt, len) for i = 1, cnt do if not (#sp:get({i}) == len) then return false end end return true end; --- ... function fill_space(sp, cnt) local err = 'delete after upsert error' for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end for i = 1, cnt do sp:delete({i}) end box.snapshot() if not (sp:count() == 0) then return err end err = 'upsert after delete error' for i = 1, cnt do sp:insert({i}) end for i = 1, cnt do sp:delete({i}) end for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end box.snapshot() if not (sp:count() == cnt) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'upsert before upsert error' for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end box.snapshot() if not check_tuples_len(sp, cnt, 2) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'replace before upsert error' for i = 1, cnt do sp:replace({i}) end for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end box.snapshot() if not check_tuples_len(sp, cnt, 2) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'upsert before replace error' for i = 1, cnt do sp:upsert({i, i}, {{'!', 2, i}}) end for i = 1, cnt do sp:replace({i}) end box.snapshot() if not check_tuples_len(sp, cnt, 1) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'delete before replace error' for i = 1, cnt do sp:insert({i}) end box.snapshot() for i = 1, cnt do sp:delete({i}) end for i = 1, cnt do sp:replace({i, i}) end box.snapshot() if not check_tuples_len(sp, cnt, 2) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'replace before delete error' for i = 1, cnt do sp:replace({i}) end for i = 1, cnt do sp:delete({i}) end box.snapshot() if not (sp:count() == 0) then return err end err = 'replace before replace error' for i = 1, cnt do sp:replace({i}) end for i = 1, cnt do sp:replace({i, i}) end box.snapshot() if not check_tuples_len(sp, cnt, 2) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'single upserts error' for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end box.snapshot() if not check_tuples_len(sp, cnt, 1) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'single replaces error' for i = 1, cnt do sp:replace({i}) end box.snapshot() if not check_tuples_len(sp, cnt, 1) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end return 'ok' end; --- ... function fill_space_with_sizes(page_size, range_size, cnt) local space = box.schema.space.create('test', { engine = 'vinyl' }) local pk = space:create_index('primary', { page_size = page_size, range_size = range_size }) local ret = fill_space(space, cnt) space:drop() return ret end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- Tests on write iterator with random combinations of page_size and range_size page_size = math.random(128, 256) --- ... range_size = page_size * math.random(10, 20) --- ... fill_space_with_sizes(page_size, range_size, 300) --- - ok ... page_size = math.random(256, 512) --- ... range_size = page_size * math.random(10, 20) --- ... fill_space_with_sizes(page_size, range_size, 500) --- - ok ... page_size = math.random(512, 1024) --- ... range_size = page_size * math.random(10, 20) --- ... fill_space_with_sizes(page_size, range_size, 700) --- - ok ... page_size = math.random(1024, 2048) --- ... range_size = page_size * math.random(10, 20) --- ... fill_space_with_sizes(page_size, range_size, 900) --- - ok ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/on_replace.result0000664000000000000000000002626013306560010021675 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... fail = false --- ... old_tuple = nil --- ... new_tuple = nil --- ... function on_replace(old_tuple_, new_tuple_) if fail then old_tuple = nil new_tuple = nil error('fail') else old_tuple = old_tuple_ new_tuple = new_tuple_ end end --- ... -- on insert one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... tmp = space:on_replace(on_replace) --- ... space:insert({6, 'f'}) --- - [6, 'f'] ... old_tuple, new_tuple --- - null - [6, 'f'] ... index:select{} --- - - [6, 'f'] ... fail = true --- ... space:insert({7, 'g'}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... index:select{} --- - - [6, 'f'] ... space:drop() --- ... fail = false --- ... -- on insert in multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... index2 = space:create_index('secondary', { parts = {2, 'scalar'} }) --- ... tmp = space:on_replace(on_replace) --- ... space:insert({1, 2}) --- - [1, 2] ... old_tuple, new_tuple --- - null - [1, 2] ... index:select{} --- - - [1, 2] ... index2:select{} --- - - [1, 2] ... fail = true --- ... space:insert({2, 3}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... index:select{} --- - - [1, 2] ... index2:select{} --- - - [1, 2] ... space:drop() --- ... fail = false --- ... -- on replace in one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:replace({1}) --- - [1] ... tmp = space:on_replace(on_replace) --- ... space:replace({2}) --- - [2] ... old_tuple, new_tuple --- - null - [2] ... space:replace({2}) --- - [2] ... old_tuple, new_tuple --- - [2] - [2] ... space:replace({1, 43}) --- - [1, 43] ... old_tuple, new_tuple --- - [1] - [1, 43] ... fail = true --- ... space:replace({2, 100}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... space:select{} --- - - [1, 43] - [2] ... fail = false --- ... space:drop() --- ... -- ensure trigger error causes rollback of only one statement fail = true --- ... space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... index2 = space:create_index('secondary', { parts = {2, 'string'} }) --- ... box.begin() --- ... space:insert({1, 'a'}) --- - [1, 'a'] ... space:insert({2, 'a'}) --- - error: Duplicate key exists in unique index 'secondary' in space 'test_space' ... space:insert({1, 'b'}) --- - error: Duplicate key exists in unique index 'primary' in space 'test_space' ... space:insert({2, 'b'}) --- - [2, 'b'] ... tmp = space:on_replace(on_replace) --- ... space:insert({3, 'c'}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... box.commit() --- ... index:select{} --- - - [1, 'a'] - [2, 'b'] ... index2:select{} --- - - [1, 'a'] - [2, 'b'] ... fail = false --- ... space:drop() --- ... -- on replace in multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... index2 = space:create_index('secondary', { parts = {2, 'scalar'} }) --- ... tmp = space:on_replace(on_replace) --- ... space:replace({1, 'a'}) --- - [1, 'a'] ... space:replace({2, true}) --- - [2, true] ... space:replace({3, 36.6}) --- - [3, 36.6] ... tmp = space:on_replace(on_replace) --- ... space:replace({4, 4}) --- - [4, 4] ... old_tuple, new_tuple --- - null - [4, 4] ... space:replace({5, 5}) --- - [5, 5] ... old_tuple, new_tuple --- - null - [5, 5] ... space:replace({4, 5}) --- - error: Duplicate key exists in unique index 'secondary' in space 'test_space' ... old_tuple, new_tuple --- - null - [5, 5] ... space:replace({5, 6, 60}) --- - [5, 6, 60] ... old_tuple, new_tuple --- - [5, 5] - [5, 6, 60] ... fail = true --- ... space:replace({10, 10}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... index:select{} --- - - [1, 'a'] - [2, true] - [3, 36.6] - [4, 4] - [5, 6, 60] ... index2:select{} --- - - [2, true] - [4, 4] - [5, 6, 60] - [3, 36.6] - [1, 'a'] ... fail = false --- ... space:drop() --- ... -- on delete from one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert({1, 2}) --- - [1, 2] ... space:insert({2, 3, 4}) --- - [2, 3, 4] ... space:insert({3, 4, 5}) --- - [3, 4, 5] ... space:insert({4}) --- - [4] ... tmp = space:on_replace(on_replace) --- ... index:delete({3}) --- ... old_tuple, new_tuple --- - [3, 4, 5] - null ... index:delete({4}) --- ... old_tuple, new_tuple --- - [4] - null ... fail = true --- ... index:delete({1}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... index:select{} --- - - [1, 2] - [2, 3, 4] ... fail = false --- ... space:drop() --- ... -- on delete from multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... index2 = space:create_index('secondary', { parts = {2, 'scalar'} }) --- ... space:insert({1, 'a'}) --- - [1, 'a'] ... space:insert({2, 2, 'b'}) --- - [2, 2, 'b'] ... space:insert({3, 30.3}) --- - [3, 30.3] ... space:insert({4, false}) --- - [4, false] ... tmp = space:on_replace(on_replace) --- ... index:delete({1}) --- ... old_tuple, new_tuple --- - [1, 'a'] - null ... index2:delete({30.3}) --- ... old_tuple, new_tuple --- - [3, 30.3] - null ... fail = true --- ... index2:delete({false}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... index:select{} --- - - [2, 2, 'b'] - [4, false] ... index2:select{} --- - - [4, false] - [2, 2, 'b'] ... fail = false --- ... space:drop() --- ... -- on update one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert({1, 2}) --- - [1, 2] ... space:insert({2, 3, 4}) --- - [2, 3, 4] ... space:insert({3, 4, 5}) --- - [3, 4, 5] ... space:insert({4}) --- - [4] ... tmp = space:on_replace(on_replace) --- ... index:update({1}, {{'#', 2, 1}}) --- - [1] ... old_tuple, new_tuple --- - [1, 2] - [1] ... index:update({2}, {{'#', 1, 1}}) -- must fail --- - error: Attempt to modify a tuple field which is part of index 'primary' in space 'test_space' ... old_tuple, new_tuple --- - [1, 2] - [1] ... index:update({3}, {{'=', 4, '300'}}) --- - [3, 4, 5, '300'] ... old_tuple, new_tuple --- - [3, 4, 5] - [3, 4, 5, '300'] ... index:update({20}, {{'+', 2, 5}}) --- ... old_tuple, new_tuple --- - [3, 4, 5] - [3, 4, 5, '300'] ... fail = true --- ... index:update({1}, {{'=', 2, 'one'}}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... index:select{} --- - - [1] - [2, 3, 4] - [3, 4, 5, '300'] - [4] ... fail = false --- ... space:drop() --- ... -- on update multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... index2 = space:create_index('secondary', { parts = {2, 'scalar'} }) --- ... space:insert({1, 'a'}) --- - [1, 'a'] ... space:insert({2, 2, 'b'}) --- - [2, 2, 'b'] ... space:insert({3, 30.3}) --- - [3, 30.3] ... space:insert({4, false}) --- - [4, false] ... tmp = space:on_replace(on_replace) --- ... index:update({1}, {{'=', 2, 'z'}}) --- - [1, 'z'] ... old_tuple, new_tuple --- - [1, 'a'] - [1, 'z'] ... index:update({2}, {{'+', 1, 1}}) --- - error: Attempt to modify a tuple field which is part of index 'primary' in space 'test_space' ... old_tuple, new_tuple --- - [1, 'a'] - [1, 'z'] ... index2:update({30.3}, {{'+', 2, 10}}) --- - [3, 40.3] ... old_tuple, new_tuple --- - [3, 30.3] - [3, 40.3] ... index2:update({false}, {{'=', 3, 'equal false'}}) --- - [4, false, 'equal false'] ... old_tuple, new_tuple --- - [4, false] - [4, false, 'equal false'] ... fail = true --- ... index:update({1}, {{'=', 2, 'a'}}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... index2:update({2}, {{'-', 2, 10}}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... index:select{} --- - - [1, 'z'] - [2, 2, 'b'] - [3, 40.3] - [4, false, 'equal false'] ... index2:select{} --- - - [4, false, 'equal false'] - [2, 2, 'b'] - [3, 40.3] - [1, 'z'] ... fail = false --- ... space:drop() --- ... -- on upsert one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert({1, 1}) --- - [1, 1] ... space:insert({2, 2, 2}) --- - [2, 2, 2] ... space:insert({3}) --- - [3] ... tmp = space:on_replace(on_replace) --- ... space:upsert({1}, {{'+', 2, 10}}) --- ... old_tuple, new_tuple --- - [1, 1] - [1, 11] ... space:upsert({4, 4, 4, 4}, {{'!', 2, 400}}) --- ... old_tuple, new_tuple --- - null - [4, 4, 4, 4] ... fail = true --- ... space:upsert({2}, {{'!', 2, 2}}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... space:upsert({5, 5, 5}, {{'!', 2, 5}}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... index:select{} --- - - [1, 11] - [2, 2, 2] - [3] - [4, 4, 4, 4] ... fail = false --- ... space:drop() --- ... -- on upsert multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) --- ... index = space:create_index('primary', { parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { parts = {2, 'unsigned', 3, 'unsigned'} }) --- ... index3 = space:create_index('third', { parts = {3, 'unsigned'}, unique = false }) --- ... space:insert({1, 1, 1}) --- - [1, 1, 1] ... space:insert({2, 2, 2}) --- - [2, 2, 2] ... space:insert({3, 3, 3}) --- - [3, 3, 3] ... tmp = space:on_replace(on_replace) --- ... space:upsert({1, 1, 1}, {{'+', 3, 1}}) --- ... old_tuple, new_tuple --- - [1, 1, 1] - [1, 1, 2] ... space:upsert({1, 1, 1}, {{'+', 2, 1}}) -- must fail --- ... old_tuple, new_tuple --- - [1, 1, 2] - [1, 2, 2] ... space:upsert({4, 4, 4}, {{'!', 4, 400}}) --- ... old_tuple, new_tuple --- - null - [4, 4, 4] ... index:select{} --- - - [1, 1, 2] - [2, 2, 2] - [3, 3, 3] - [4, 4, 4] ... index2:select{} --- - - [1, 1, 2] - [2, 2, 2] - [3, 3, 3] - [4, 4, 4] ... index3:select{} --- - - [1, 1, 2] - [2, 2, 2] - [3, 3, 3] - [4, 4, 4] ... fail = true --- ... space:upsert({2, 2, 2}, {{'!', 4, 200}}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... space:upsert({5, 5, 5}, {{'!', 4, 500}}) --- - error: '[string "function on_replace(old_tuple_, new_tuple_) i..."]:1: fail' ... old_tuple, new_tuple --- - null - null ... fail = false --- ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/gh.test.lua0000664000000000000000000002264613306560010020411 0ustar rootrootfiber = require('fiber') env = require('test_run') test_run = env.new() -- gh-283: hang after three creates and drops s = box.schema.space.create('space0', {engine='vinyl'}) i = s:create_index('space0', {type = 'tree', parts = {1, 'string'}}) s:insert{'a', 'b', 'c'} s:drop() s = box.schema.space.create('space0', {engine='vinyl'}) i = s:create_index('space0', {type = 'tree', parts = {1, 'string'}}) s:insert{'a', 'b', 'c'} t = s.index[0]:select({}, {iterator = box.index.ALL}) t s:drop() s = box.schema.space.create('space0', {engine='vinyl'}) i = s:create_index('space0', {type = 'tree', parts = {1, 'string'}}) s:insert{'a', 'b', 'c'} t = s.index[0]:select({}, {iterator = box.index.ALL}) t s:drop() -- gh-280: crash if insert without index s = box.schema.space.create('test', {engine='vinyl'}) s:insert{'a'} s:drop() -- gh-436: No error when creating temporary vinyl space s = box.schema.space.create('tester',{engine='vinyl', temporary=true}) -- gh-432: ignored limit s = box.schema.space.create('tester',{engine='vinyl'}) i = s:create_index('vinyl_index', {}) for v=1, 100 do s:insert({v}) end t = s:select({''},{iterator='GT', limit =1}) t t = s:select({},{iterator='GT', limit =1}) t s:drop() s = box.schema.space.create('tester', {engine='vinyl'}) i = s:create_index('vinyl_index', {type = 'tree', parts = {1, 'string'}}) for v=1, 100 do s:insert({tostring(v)}) end t = s:select({''},{iterator='GT', limit =1}) t t = s:select({},{iterator='GT', limit =1}) t s:drop() -- gh-681: support or produce error on space::alter s = box.schema.space.create('M', {engine='vinyl'}) i = s:create_index('primary',{}) s:insert{5, 5} s.index.primary:alter({parts={2,'unsigned'}}) s:drop() -- gh-1008: assertion if insert of wrong type s = box.schema.space.create('t', {engine='vinyl'}) i = s:create_index('primary',{parts={1, 'string'}}) box.space.t:insert{1,'A'} s:drop() -- gh-1009: search for empty string fails s = box.schema.space.create('t', {engine='vinyl'}) i = s:create_index('primary',{parts={1, 'string'}}) s:insert{''} #i:select{''} i:get{''} s:drop() -- gh-1407: upsert generate garbage data email_space_id = 'email' email_space = box.schema.space.create(email_space_id, { engine = 'vinyl', if_not_exists = true }) i = email_space:create_index('primary', { parts = {1, 'string'} }) time = 1234 email = "test@domain.com" email_hash_index = "asdfasdfs" box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) box.space.email:select{email} box.space.email:drop() --gh-1540: vinyl: invalid results from LE/LT iterators s = box.schema.space.create('test', { engine = 'vinyl' }) i = box.space.test:create_index('primary', { parts = { 1, 'unsigned', 2, 'unsigned' } }) for i =1,2 do for j=1,9 do box.space.test:replace({i, j}) end end box.space.test:select({1, 999999}, {iterator = 'LE'}) box.space.test:drop() s1 = box.schema.create_space('s1',{engine='vinyl'}) i1 = s1:create_index('primary',{parts={1,'unsigned',2,'unsigned'}}) s2 = box.schema.create_space('s2',{engine='memtx'}) i2 = s2:create_index('primary',{parts={1,'unsigned',2,'unsigned'}}) for i = 1,3 do for j = 1,5 do s1:insert{i, j} s2:insert{i, j} end end itrs = {'GE', 'GT', 'LE', 'LT'} good = true test_run:cmd("setopt delimiter ';'") function my_equal(a, b) if type(a) ~= type(b) then return false elseif type(a) ~= 'table' and not box.tuple.is(a) then return a == b end for k,v in pairs(a) do if not my_equal(b[k], v) then return false end end for k,v in pairs(b) do if not my_equal(a[k], v) then return false end end return true end; for i = 0,4 do for j = 0,6 do for k = 1,4 do opts = {iterator=itrs[k]} if not my_equal(s1:select({i, j}, opts), s2:select({i, j}, opts)) then good = false end end end end; test_run:cmd("setopt delimiter ''"); good s1:drop() s2:drop() -- -- gh-1608: tuple disappears after invalid upsert -- s = box.schema.create_space('test', {engine = 'vinyl'}) _ = s:create_index('test', {type = 'tree', parts = {1, 'unsigned', 2, 'string'}}) s:put({1, 'test', 3, 4}) s:select() s:upsert({1, 'test', 'failed'}, {{'=', 3, 33}, {'=', 4, nil}}) s:select() s:drop() -- -- gh-1684: vinyl: infinite cycle on box.snapshot() -- -- Create and drop several indices space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('primary') index2 = space:create_index('secondary', { parts = {2, 'string'} }) index3 = space:create_index('third', { parts = {3, 'string'}, unique = false }) index2:drop() index2 = space:create_index('secondary', { parts = {4, 'string'} }) index3:drop() index2:drop() index2 = space:create_index('secondary', { parts = {2, 'string'} }) index3 = space:create_index('third', { parts = {3, 'string'}, unique = false }) index4 = space:create_index('fourth', { parts = {2, 'string', 3, 'string'} }) space:drop() space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) box.snapshot() space:drop() -- -- gh-1658: auto_increment -- space = box.schema.space.create('tweedledum', { engine = 'vinyl' }) _ = space:create_index('primary') space:auto_increment{'a'} space:auto_increment{'b'} space:auto_increment{'c'} space:select{} space:truncate() space:auto_increment{'a'} space:auto_increment{'b'} space:auto_increment{'c'} space:select{} space:delete{2} space:auto_increment{'d'} space:select{} space:drop() -- -- Truncate basic test -- -- truncate s = box.schema.space.create('name_of_space', {engine='vinyl'}) i = s:create_index('name_of_index', {type = 'tree', parts = {1, 'string'}}) s:insert{'a', 'b', 'c'} s:select{'a'} s:truncate() s:select{} s:insert{'b', 'c', 'd'} s:select{} s:truncate() s:select{} s:drop() -- -- gh-1725: vinyl: merge iterator can't merge more than two runs -- s0 = box.schema.space.create('tweedledum', {engine = 'vinyl'}) i0 = s0:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) -- integer keys s0:replace{1, 'tuple'} box.snapshot() s0:replace{2, 'tuple 2'} box.snapshot() s0:insert{3, 'tuple 3'} s0.index['primary']:get{1} s0.index['primary']:get{2} s0.index['primary']:get{3} s0:drop() -- -- gh-2081: snapshot hang -- s = box.schema.space.create('tweedledum', {engine='vinyl'}) i = s:create_index('primary') _ = s:insert{1} _ = fiber.create(function() fiber.sleep(0.001) s:insert{2} end) box.snapshot() s:drop() s = box.schema.space.create("test", {engine='vinyl'}) i1 = box.space.test:create_index('i1', {parts = {1, 'unsigned'}}) i2 = box.space.test:create_index('i2', {unique = false, parts = {2, 'unsigned'}}) count = 10000 test_run:cmd("setopt delimiter ';'") box.begin() for i = 1, count do s:replace({math.random(count), math.random(count)}) if i % 100 == 0 then box.commit() box.begin() end end box.commit() test_run:cmd("setopt delimiter ''"); s.index.i1:count() == s.index.i2:count() s:drop() -- https://github.com/tarantool/tarantool/issues/2588 max_tuple_size = box.cfg.vinyl_max_tuple_size box.cfg { vinyl_max_tuple_size = 40 * 1024 * 1024 } s = box.schema.space.create('vinyl', { engine = 'vinyl' }) i = box.space.vinyl:create_index('primary') _ = s:replace({1, string.rep('x', 35 * 1024 * 1024)}) s:drop() box.cfg { vinyl_max_tuple_size = max_tuple_size } -- https://github.com/tarantool/tarantool/issues/2614 count = 10000 s = box.schema.space.create("test", {engine='vinyl'}) _ = s:create_index('pk') cont = true finished = 0 test_run:cmd("setopt delimiter ';'") _ = fiber.create(function() while cont do s:select(math.random(count), {iterator = box.index.LE, limit = 10}) fiber.sleep(0.01) end finished = finished + 1 end); _ = fiber.create(function() while cont do box.snapshot() fiber.sleep(0.01) end finished = finished + 1 end); for i = 1, count do s:replace{math.random(count)} end; test_run:cmd("setopt delimiter ''"); cont = false while finished ~= 2 do fiber.sleep(0.01) end s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/tx_serial.result0000664000000000000000000002137513306560010021562 0ustar rootroot-- The test runs loop of given number of rounds. -- Every round does the following: -- The test starts several concurrent transactions in vinyl. -- The transactions make some read/write operations over several keys in -- a random order and commit at a random moment. -- After that all transactions are sorted in order of commit -- With the sublist of read-write transactions committed w/o conflict: -- Test tries to make these transactions in memtex, one tx after another, -- without interleaving and compares select results with vinyl to make sure -- if the transaction could be serialized in order of commit or not -- With the sublist of read-write transactions committed with conflict: -- Test does nothing -- With the sublist of read only transactions: -- Test tries to insert these transactions between other transactions and checks -- that it possible to get same results. test_run = require('test_run').new() --- ... txn_proxy = require('txn_proxy') --- ... --settings num_tx = 10 --number of concurrent transactions --- ... num_key = 5 --number of keys that transactions use --- ... num_tests = 60 --number of test rounds to run --- ... txs = {} --- ... order_of_commit = {} --- ... num_committed = 0 --- ... stmts = {} --- ... errors = {} --- ... initial_data = {} --- ... initial_repro = "" --- ... ops = {'begin', 'commit', 'select', 'replace', 'upsert', 'delete'} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... s1 = box.schema.create_space('test1', { engine = 'vinyl' }) i1 = s1:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) s2 = box.schema.create_space('test2', { engine = 'memtx' }) i2 = s2:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) for i=1,num_tx do txs[i] = {con = txn_proxy.new()} end; --- ... function my_equal(a, b) local typea = box.tuple.is(a) and 'table' or type(a) local typeb = box.tuple.is(b) and 'table' or type(b) if typea ~= typeb then return false elseif typea ~= 'table' then return a == b end for k,v in pairs(a) do if not my_equal(b[k], v) then return false end end for k,v in pairs(b) do if not my_equal(a[k], v) then return false end end return true end; --- ... unique_value = 0 function get_unique_value() unique_value = unique_value + 1 return unique_value end; --- ... function prepare() order_of_commit = {} num_committed = 0 stmts = {} for i=1,num_tx do txs[i].started = false txs[i].ended = false if math.random(3) == 1 then txs[i].read_only = true else txs[i].read_only = false end txs[i].read_only_checked = false txs[i].conflicted = false txs[i].possible = nil txs[i].num_writes = 0 end s1:truncate() s2:truncate() for i=1,num_key do local r = math.random(5) local v = get_unique_value() if (r >= 2) then s1:replace{i, v} s2:replace{i, v } end if (r == 2) then s1:delete{i} s2:delete{i} end end initial_data = s1:select{} initial_repro = "" initial_repro = initial_repro .. "s = box.schema.space.create('test', {engine = 'vinyl', if_not_exists = true})\n" initial_repro = initial_repro .. "i1 = s:create_index('test', {parts = {1, 'uint'}, if_not_exists = true})\n" initial_repro = initial_repro .. "txn_proxy = require('txn_proxy')\n" for _,tuple in pairs(initial_data) do initial_repro = initial_repro .. "s:replace{" .. tuple[1] .. ", " .. tuple[2] .. "} " end end; --- ... function apply(t, k, op) local tx = txs[t] local v = nil local k = k local repro = nil if op == 'begin' then if tx.started then table.insert(errors, "assert #1") end tx.started = true tx.con:begin() k = nil repro = "c" .. t .. " = txn_proxy.new() c" .. t .. ":begin()" repro = "p(\"c" .. t .. ":begin()\") " .. repro elseif op == 'commit' then if tx.ended or not tx.started then table.insert(errors, "assert #2") end tx.ended = true table.insert(order_of_commit, t) num_committed = num_committed + 1 local res = tx.con:commit() if res ~= "" and res[1]['error'] then tx.conflicted = true else tx.select_all = s1:select{} if tx.num_writes == 0 then tx.read_only = true end end k = nil repro = "c" .. t .. ":commit()" repro = "p(\"" .. repro .. "\", " .. repro .. ", s:select{})" elseif op == 'select' then v = tx.con('s1:select{'..k..'}') repro = "c" .. t .. "('s:select{" .. k .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'replace' then v = get_unique_value() tx.con('s1:replace{'..k..','..v..'}') tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:replace{" .. k .. ", " .. v .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'upsert' then v = math.random(100) tx.con('s1:upsert({'..k..','..v..'}, {{"+", 2,'..v..'}})') tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:upsert({" .. k .. ", " .. v .. "}, {{\\'+\\', 2, " .. v .. "}})')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'delete' then tx.con('s1:delete{'..k..'}') tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:delete{" .. k .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" end table.insert(stmts, {t=t, k=k, op=op, v=v, repro=repro}) end; --- ... function generate_random_operation() local t = math.random(num_tx) local k = math.random(num_key) local tx = txs[t] if tx.ended then return end local op_no = 0 if (tx.read_only) then op_no = math.random(3) else op_no = math.random(6) end local op = ops[op_no] if op ~= 'commit' or tx.started then if not tx.started then apply(t, k, 'begin') end if op ~= 'begin' then apply(t, k, op) end end end; --- ... function is_rdonly_tx_possible(t) for _,s in pairs(stmts) do if s.t == t and s.op == 'select' then local cmp_with = {s2:select{s.k}} if not my_equal(s.v, cmp_with) then return false end end end return true end; --- ... function try_to_apply_tx(t) for _,s in pairs(stmts) do if s.t == t then if s.op == 'select' then local cmp_with = {s2:select{s.k}} if not my_equal(s.v, cmp_with) then return false end elseif s.op == 'replace' then s2:replace{s.k, s.v} elseif s.op == 'upsert' then s2:upsert({s.k, s.v}, {{'+', 2, s.v}}) elseif s.op == 'delete' then s2:delete{s.k} end end end return true end; --- ... function check_rdonly_possibility() for i=1,num_tx do if txs[i].read_only and not txs[i].possible then if is_rdonly_tx_possible(i) then txs[i].possible = true end end end end; --- ... function check() local had_errors = (errors[1] ~= nil) for i=1,num_tx do if txs[i].read_only then if txs[i].conflicted then table.insert(errors, "read-only conflicted " .. i) end txs[i].possible = false end end check_rdonly_possibility() for _,t in ipairs(order_of_commit) do if not txs[t].read_only then if not txs[t].conflicted then if not try_to_apply_tx(t) then table.insert(errors, "not serializable " .. t) end if not my_equal(txs[t].select_all, s2:select{}) then table.insert(errors, "results are different " .. t) end check_rdonly_possibility() end end end for i=1,num_tx do if txs[i].read_only and not txs[i].possible then table.insert(errors, "not valid read view " .. i) end end if errors[1] and not had_errors then print("p(\"" .. errors[1] .. "\")") print(initial_repro) print("p(\"" .. initial_repro .. "\")") print('----------------------') for _,stmt in ipairs(stmts) do print(stmt.repro) end io.flush() end end; --- ... for i = 1, num_tests do prepare() while num_committed ~= num_tx do generate_random_operation() end check() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... errors --- - [] ... s1:drop() --- ... s2:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/layout.result0000664000000000000000000002106113306565107021111 0ustar rootroottest_run = require('test_run').new() --- ... test_run:cmd('restart server default with cleanup=1') fiber = require 'fiber' --- ... fio = require 'fio' --- ... xlog = require 'xlog' --- ... fun = require 'fun' --- ... space = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = space:create_index('pk', {parts = {{1, 'string', collation = 'unicode'}}, run_count_per_level=3}) --- ... _ = space:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}, run_count_per_level=3}) --- ... -- Empty run space:insert{'ЁЁЁ', 777} --- - ['ЁЁЁ', 777] ... space:delete{'ЁЁЁ'} --- ... box.snapshot() --- - ok ... space:replace{'ЭЭЭ', box.NULL} --- - ['ЭЭЭ', null] ... space:replace{'эээ', box.NULL} --- - ['эээ', null] ... space:replace{'ёёё', box.NULL} --- - ['ёёё', null] ... box.snapshot() --- - ok ... space:replace{'ёёё', 123} --- - ['ёёё', 123] ... space:replace{'ЮЮЮ', 456} --- - ['ЮЮЮ', 456] ... space:replace{'ююю', 789} --- - ['ююю', 789] ... box.snapshot() --- - ok ... space:drop() --- ... -- Get the list of files from the last checkpoint. -- convert names to relative -- work_dir = fio.cwd() files = box.backup.start() --- ... -- use abspath to work correclty with symlinks -- for i, name in pairs(files) do files[i] = fio.abspath(files[i]):sub(#work_dir + 2) end table.sort(files) --- ... -- files result = {} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i, path in pairs(files) do local suffix = string.gsub(path, '.*%.', '') if suffix ~= 'snap' and suffix ~= 'xlog' then local rows = {} local i = 1 for lsn, row in xlog.pairs(path) do rows[i] = row i = i + 1 end table.insert(result, { fio.basename(path), rows }) end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... box.backup.stop() -- resume the garbage collection process --- ... test_run:cmd("push filter 'timestamp: .*' to 'timestamp: '") --- - true ... test_run:cmd("push filter 'offset: .*' to 'offset: '") --- - true ... test_run:cmd("push filter 'bloom_filter: .*' to 'bloom_filter: '") --- - true ... result --- - - - 00000000000000000009.vylog - - HEADER: type: INSERT BODY: tuple: [0, {0: 3, 7: [{'field': 0, 'collation': 1, 'type': 'string'}], 6: 512}] - HEADER: type: INSERT BODY: tuple: [10, {0: 3, 9: 9}] - HEADER: type: INSERT BODY: tuple: [5, {0: 3, 2: 6, 9: 9}] - HEADER: type: INSERT BODY: tuple: [4, {0: 3, 2: 3}] - HEADER: type: INSERT BODY: tuple: [6, {2: 3}] - HEADER: type: INSERT BODY: tuple: [2, {0: 3}] - HEADER: type: INSERT BODY: tuple: [8, {2: 6, 8: 7}] - HEADER: type: INSERT BODY: tuple: [0, {0: 4, 5: 1, 6: 512, 7: [{'field': 1, 'is_nullable': true, 'type': 'unsigned'}]}] - HEADER: type: INSERT BODY: tuple: [10, {0: 4, 9: 9}] - HEADER: type: INSERT BODY: tuple: [5, {0: 4, 2: 4, 9: 9}] - HEADER: type: INSERT BODY: tuple: [4, {0: 4, 2: 2}] - HEADER: type: INSERT BODY: tuple: [6, {2: 2}] - HEADER: type: INSERT BODY: tuple: [2, {0: 4, 1: 1}] - HEADER: type: INSERT BODY: tuple: [8, {1: 1, 2: 4, 8: 5}] - HEADER: type: INSERT BODY: tuple: [11, {}] - HEADER: timestamp: type: INSERT BODY: tuple: [7, {2: 3}] - HEADER: timestamp: type: INSERT BODY: tuple: [7, {2: 2}] - HEADER: timestamp: type: INSERT BODY: tuple: [4, {0: 4, 2: 8}] - HEADER: timestamp: type: INSERT BODY: tuple: [5, {0: 4, 2: 8, 9: 12}] - HEADER: timestamp: type: INSERT BODY: tuple: [8, {1: 1, 2: 8, 8: 9}] - HEADER: timestamp: type: INSERT BODY: tuple: [10, {0: 4, 9: 12}] - HEADER: timestamp: type: INSERT BODY: tuple: [4, {0: 3, 2: 10}] - HEADER: timestamp: type: INSERT BODY: tuple: [5, {0: 3, 2: 10, 9: 12}] - HEADER: timestamp: type: INSERT BODY: tuple: [8, {2: 10, 8: 11}] - HEADER: timestamp: type: INSERT BODY: tuple: [10, {0: 3, 9: 12}] - - 00000000000000000006.index - - HEADER: type: RUNINFO BODY: min_lsn: 7 max_key: ['ЭЭЭ'] page_count: 1 bloom_filter: max_lsn: 9 min_key: ['ёёё'] - HEADER: type: PAGEINFO BODY: row_index_offset: offset: size: 86 unpacked_size: 67 row_count: 3 min_key: ['ёёё'] - - 00000000000000000006.run - - HEADER: lsn: 9 type: INSERT BODY: tuple: ['ёёё', null] - HEADER: lsn: 8 type: INSERT BODY: tuple: ['эээ', null] - HEADER: lsn: 7 type: INSERT BODY: tuple: ['ЭЭЭ', null] - HEADER: type: ROWINDEX BODY: row_index: "\0\0\0\0\0\0\0\x10\0\0\0 " - - 00000000000000000010.index - - HEADER: type: RUNINFO BODY: min_lsn: 10 max_key: ['ЮЮЮ'] page_count: 1 bloom_filter: max_lsn: 12 min_key: ['ёёё'] - HEADER: type: PAGEINFO BODY: row_index_offset: offset: size: 90 unpacked_size: 71 row_count: 3 min_key: ['ёёё'] - - 00000000000000000010.run - - HEADER: lsn: 10 type: REPLACE BODY: tuple: ['ёёё', 123] - HEADER: lsn: 12 type: INSERT BODY: tuple: ['ююю', 789] - HEADER: lsn: 11 type: INSERT BODY: tuple: ['ЮЮЮ', 456] - HEADER: type: ROWINDEX BODY: row_index: "\0\0\0\0\0\0\0\x10\0\0\0\"" - - 00000000000000000004.index - - HEADER: type: RUNINFO BODY: min_lsn: 7 max_key: [null, 'ЭЭЭ'] page_count: 1 bloom_filter: max_lsn: 9 min_key: [null, 'ёёё'] - HEADER: type: PAGEINFO BODY: row_index_offset: offset: size: 86 unpacked_size: 67 row_count: 3 min_key: [null, 'ёёё'] - - 00000000000000000004.run - - HEADER: lsn: 9 type: INSERT BODY: tuple: [null, 'ёёё'] - HEADER: lsn: 8 type: INSERT BODY: tuple: [null, 'эээ'] - HEADER: lsn: 7 type: INSERT BODY: tuple: [null, 'ЭЭЭ'] - HEADER: type: ROWINDEX BODY: row_index: "\0\0\0\0\0\0\0\x10\0\0\0 " - - 00000000000000000008.index - - HEADER: type: RUNINFO BODY: min_lsn: 10 max_key: [789, 'ююю'] page_count: 1 bloom_filter: max_lsn: 12 min_key: [null, 'ёёё'] - HEADER: type: PAGEINFO BODY: row_index_offset: offset: size: 110 unpacked_size: 91 row_count: 4 min_key: [null, 'ёёё'] - - 00000000000000000008.run - - HEADER: lsn: 10 type: DELETE BODY: key: [null, 'ёёё'] - HEADER: lsn: 10 type: REPLACE BODY: tuple: [123, 'ёёё'] - HEADER: lsn: 11 type: INSERT BODY: tuple: [456, 'ЮЮЮ'] - HEADER: lsn: 12 type: INSERT BODY: tuple: [789, 'ююю'] - HEADER: type: ROWINDEX BODY: row_index: "\0\0\0\0\0\0\0\x10\0\0\0 \0\0\02" ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/errinj_vylog.result0000664000000000000000000000676513306565107022323 0ustar rootroottest_run = require('test_run').new() --- ... fiber = require('fiber') --- ... -- -- Check that an error to commit a new run to vylog does not -- break vinyl permanently. -- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk') --- ... _ = s:insert{1, 'x'} --- ... SCHED_TIMEOUT = 0.05 --- ... box.error.injection.set('ERRINJ_VY_SCHED_TIMEOUT', SCHED_TIMEOUT) --- - ok ... box.error.injection.set('ERRINJ_VY_LOG_FLUSH', true); --- - ok ... box.snapshot() --- - error: Error injection 'vinyl log flush' ... box.error.injection.set('ERRINJ_VY_LOG_FLUSH', false); --- - ok ... fiber.sleep(2 * SCHED_TIMEOUT) --- ... box.error.injection.set('ERRINJ_VY_SCHED_TIMEOUT', 0) --- - ok ... _ = s:insert{2, 'y'} --- ... box.snapshot() --- - ok ... _ = s:insert{3, 'z'} --- ... test_run:cmd('restart server default') s = box.space.test --- ... s:select() --- - - [1, 'x'] - [2, 'y'] - [3, 'z'] ... s:drop() --- ... -- -- Check that an index drop/truncate/create record we failed to -- write to vylog is flushed along with the next record. -- s1 = box.schema.space.create('test1', {engine = 'vinyl'}) --- ... _ = s1:create_index('pk') --- ... _ = s1:insert{1, 'a'} --- ... s2 = box.schema.space.create('test2', {engine = 'vinyl'}) --- ... _ = s2:create_index('pk') --- ... _ = s2:insert{2, 'b'} --- ... box.snapshot() --- - ok ... _ = s1:insert{3, 'c'} --- ... _ = s2:insert{4, 'd'} --- ... box.error.injection.set('ERRINJ_VY_LOG_FLUSH', true); --- - ok ... s1:drop() --- ... s2:truncate() --- ... _ = s2:insert{5, 'e'} --- ... s3 = box.schema.space.create('test3', {engine = 'vinyl'}) --- ... _ = s3:create_index('pk') --- ... _ = s3:insert{6, 'f'} --- ... box.error.injection.set('ERRINJ_VY_LOG_FLUSH', false); --- - ok ... box.snapshot() --- - ok ... _ = s2:insert{7, 'g'} --- ... _ = s3:insert{8, 'h'} --- ... test_run:cmd('restart server default') s1 = box.space.test1 --- ... s1 == nil --- - true ... s2 = box.space.test2 --- ... s2:select() --- - - [5, 'e'] - [7, 'g'] ... s2:drop() --- ... s3 = box.space.test3 --- ... s3:select() --- - - [6, 'f'] - [8, 'h'] ... s3:drop() --- ... -- -- Check that if a buffered index drop/truncate/create record -- does not make it to the vylog before restart, it will be -- replayed on recovery. -- s1 = box.schema.space.create('test1', {engine = 'vinyl'}) --- ... _ = s1:create_index('pk') --- ... _ = s1:insert{111, 'aaa'} --- ... s2 = box.schema.space.create('test2', {engine = 'vinyl'}) --- ... _ = s2:create_index('pk') --- ... _ = s2:insert{222, 'bbb'} --- ... box.snapshot() --- - ok ... _ = s1:insert{333, 'ccc'} --- ... _ = s2:insert{444, 'ddd'} --- ... box.error.injection.set('ERRINJ_VY_LOG_FLUSH', true); --- - ok ... s1:drop() --- ... s2:truncate() --- ... _ = s2:insert{555, 'eee'} --- ... s3 = box.schema.space.create('test3', {engine = 'vinyl'}) --- ... _ = s3:create_index('pk') --- ... _ = s3:insert{666, 'fff'} --- ... -- gh-2532: replaying create/drop from xlog crashes tarantool test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 10 do s = box.schema.space.create('test', {engine = 'vinyl'}) s:create_index('primary') s:create_index('secondary', {unique = false, parts = {2, 'string'}}) s:insert{i, 'test' .. i} s:truncate() s:drop() end test_run:cmd("setopt delimiter ''"); --- ... test_run:cmd('restart server default') s1 = box.space.test1 --- ... s1 == nil --- - true ... s2 = box.space.test2 --- ... s2:select() --- - - [555, 'eee'] ... s2:drop() --- ... s3 = box.space.test3 --- ... s3:select() --- - - [666, 'fff'] ... s3:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/savepoint.test.lua0000664000000000000000000000740513306560010022017 0ustar rootrootenv = require('test_run') test_run = env.new() engine = 'vinyl' -- test duplicate conflict in the primary index space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') space:insert({1}) space:insert({2}) space:insert({3}) space:select{} space:insert({1}) space:select{} box.begin() space:insert({5}) space:insert({6}) space:insert({7}) space:insert({7}) space:insert({8}) box.commit() index:select{} index:get({1}) index:get({2}) index:get({3}) index:get({4}) index:get({5}) index:get({6}) index:get({7}) index:get({8}) space:drop() -- test duplicate conflict in the secondary index space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { parts = {1, 'uint'} }) index2 = space:create_index('secondary', { parts = {2, 'int', 3, 'str'} }) space:insert({1}) space:insert({1, 1, 'a'}) space:insert({2, 2, 'a'}) space:insert({3, 2, 'b'}) space:insert({2, 3, 'c'}) index:select{} index2:select{} -- fail all box.begin() space:insert({1, 10, '10'}) space:insert({2, 10, '10'}) space:insert({3, 10, '10'}) box.commit() index:select{} index2:select{} -- fail at the begining box.begin() space:insert({1, 1, '1'}) space:insert({4, 4, 'd'}) space:insert({5, 5, 'd'}) box.commit() index:select{} index2:select{} -- fail at the end box.begin() space:insert({6, 6, 'd'}) space:insert({7, 6, 'e'}) space:insert({1, 1, '1'}) box.commit() index:select{} index2:select{} -- fail pk box.begin() space:insert({1, 100, '100'}) box.commit() index:select{} index2:select{} -- fail secondary box.begin() space:insert({8, 6, 'd'}) box.commit() index:select{} index2:select{} space:drop() -- test other operations (update, delete, upsert) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') space:insert({1}) space:insert({2}) space:insert({3}) space:select{} box.begin() space:insert({5}) index:update({1}, {{'+', 1, 3}}) box.commit() index:select{} box.begin() space:delete({5}) space:update({1}, {{'=', 2, 43}}) space:insert({10}) space:upsert({3}, {{}, {'='}}) -- incorrect ops space:insert({15}) box.commit() index:select{} box.begin() space:delete({15}) space:delete({10}) space:insert({11}) space:upsert({12}, {}) space:insert({'abc'}) space:update({1}, {{'#', 2, 1}}) box.commit() space:select{} space:drop() -- test same on several indexes space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { parts = {1, 'unsigned', 2, 'string'} }) index2 = space:create_index('secondary', { parts = {2, 'string', 3, 'scalar'}, unique = false }) index3 = space:create_index('third', { parts = {4, 'integer', 2, 'string'} }) space:insert({1, 'a', 'sclr1', 20}) space:insert({1, 'b', 'sclr1', 20}) space:insert({1, 'c', 'sclr1', -30}) space:insert({2, 'a', true, 15}) index:select{} index2:select{} index3:select{} box.begin() space:insert({1, 'a', 'sclr1', 20}) space:update({2, 'a'}, {{'=', 3, 3.14}}) box.commit() index:select{} index2:select{} index3:select{} box.begin() space:delete({1, 'a'}) space:insert({100, '100', '100', 100}) space:update({2, 'a'}, {{}}) box.commit() index:select{} index2:select{} index3:select{} space:drop() -- test rollback space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { parts = {1, 'unsigned'} }) index2 = space:create_index('secondary', { parts = {2, 'unsigned'}, unique = false }) index3 = space:create_index('third', { parts = {2, 'unsigned', 3, 'scalar'} }) space:insert({1, 1, 'a'}) space:insert({2, 1, 'b'}) space:insert({3, 2, 'a'}) index:select{} index2:select{} index3:select{} box.begin() space:insert({4, 2, 'b'}) space:upsert({2}, {{'=', 4, 1000}}) index3:delete({3, 'a'}) space:insert({4, 100, 100}) box.rollback() index:select{} index2:select{} index3:select{} space:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/stress.lua0000664000000000000000000000636613306560010020361 0ustar rootrootlocal fiber = require('fiber') local function pcall_wrap(status, ...) if status ~= true then return false, tostring(...) end return status, ... end; local pcall_e = function(fun, ...) return pcall_wrap(pcall(fun, ...)) end; box.once("vinyl_stress", function() local s1 = box.schema.space.create('s1', { engine = 'vinyl', if_not_exists = true }) s1:create_index('pk', {if_not_exists = true}) local s2 = box.schema.space.create('s2', { engine = 'vinyl', if_not_exists = true }) s2:create_index('pk', {if_not_exists = true}) local s3 = box.schema.space.create('s3', { engine = 'vinyl', if_not_exists = true }) s3:create_index('pk', {if_not_exists = true}) local s4 = box.schema.space.create('s4', { engine = 'vinyl', if_not_exists = true }) s4:create_index('pk', {if_not_exists = true}) local s5 = box.schema.space.create('s5', { engine = 'vinyl'}) s5:create_index('pk') end) local spaces = {box.space.s1, box.space.s2, box.space.s3, box.space.s4, box.space.s5} local max_data_size = box.cfg.vinyl_page_size * 1.5 local function t1(ch, time_limit) local t1 = fiber.time() while fiber.time() - t1 < time_limit do local k = math.random(10000) local t = math.random(80) local data = string.char(math.random(string.byte('Z') - string.byte('A')) + string.byte('A') - 1) data = data:rep(math.random(max_data_size)) local space = spaces[math.fmod(t, #spaces) + 1] if t < 32 then space:replace({k, data}) elseif t < 40 then space:upsert({k, data}, {{'=', 2, data}}) elseif t < 56 then pcall_e(space.insert, space, {k, data}) elseif t < 64 then space:delete({k}) else pcall_e(space.update, space, {k}, {{'=', 2, data}}) end end ch:put(1) end; local function t2(ch, time_limit) local t1 = fiber.time() while fiber.time() - t1 < time_limit do local k = math.random(10000) local t = math.random(16) local space = spaces[math.fmod(t, #spaces) + 1] if t < 12 then local l = space:get({k}) else space:delete({k}) end end ch:put(2) end; local function t3(ch, time_limit) local t1 = fiber.time() local i = 0 while fiber.time() - t1 < time_limit do i = i + 1 local k = math.random(10000) local t = math.random(20) local l = math.random(2048) local space = spaces[math.fmod(t, #spaces) + 1] if t <= 6 then space:select(k, { iterator = 'GE', limit = l }) elseif t <= 12 then space:select(k, { iterator = 'LE', limit = l }) else space:delete({k}) end if i % 10 == 0 then collectgarbage('collect') end end ch:put(3) end; local function stress(time_limit) time_limit = time_limit or 300 local ch = fiber.channel(16) math.randomseed(os.time()); for i = 1, 6 do fiber.create(t1, ch, time_limit) end; for i = 1, 6 do fiber.create(t2, ch, time_limit) end; for i = 1, 4 do fiber.create(t3, ch, time_limit) end; for i = 1, 16 do ch:get() end; end return { stress = stress; } tarantool_1.9.1.26.g63eb81e3c/test/vinyl/update_optimize.test.lua0000664000000000000000000001701713306565107023225 0ustar rootroottest_run = require('test_run').new() -- Restart the server to finish all snaphsots from prior tests. test_run:cmd('restart server default') fiber = require('fiber') -- optimize one index space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary', { run_count_per_level = 20 }) index2 = space:create_index('secondary', { parts = {5, 'unsigned'}, run_count_per_level = 20 }) function dumped_stmt_count() return index:info().disk.dump.out.rows + index2:info().disk.dump.out.rows end box.snapshot() test_run:cmd("setopt delimiter ';'") function wait_for_dump(index, old_count) while index:info().run_count == old_count do fiber.sleep(0) end return index:info().run_count end; test_run:cmd("setopt delimiter ''"); index_run_count = index:info().run_count index2_run_count = index2:info().run_count old_stmt_count = dumped_stmt_count() space:insert({1, 2, 3, 4, 5}) space:insert({2, 3, 4, 5, 6}) space:insert({3, 4, 5, 6, 7}) space:insert({4, 5, 6, 7, 8}) box.snapshot() -- Wait for dump both indexes. index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 8 old_stmt_count = new_stmt_count -- not optimized updates space:update({1}, {{'=', 5, 10}}) -- change secondary index field -- Need a snapshot after each operation to avoid purging some -- statements in vy_write_iterator during dump. box.snapshot() index_run_count = wait_for_dump(index, index_run_count) space:update({1}, {{'!', 4, 20}}) -- move range containing index field box.snapshot() index_run_count = wait_for_dump(index, index_run_count) space:update({1}, {{'#', 3, 1}}) -- same box.snapshot() index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 9 old_stmt_count = new_stmt_count space:select{} index2:select{} -- optimized updates space:update({2}, {{'=', 6, 10}}) -- change not indexed field box.snapshot() index_run_count = wait_for_dump(index, index_run_count) -- Move range that doesn't contain indexed fields. space:update({2}, {{'!', 7, 20}}) box.snapshot() index_run_count = wait_for_dump(index, index_run_count) space:update({2}, {{'#', 6, 1}}) -- same box.snapshot() index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 3 old_stmt_count = new_stmt_count space:select{} index2:select{} space:drop() -- optimize two indexes space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary', { parts = {2, 'unsigned'}, run_count_per_level = 20 } ) index2 = space:create_index('secondary', { parts = {4, 'unsigned', 3, 'unsigned'}, run_count_per_level = 20 }) index3 = space:create_index('third', { parts = {5, 'unsigned'}, run_count_per_level = 20 }) function dumped_stmt_count() return index:info().disk.dump.out.rows + index2:info().disk.dump.out.rows + index3:info().disk.dump.out.rows end box.snapshot() index_run_count = index:info().run_count index2_run_count = index2:info().run_count index3_run_count = index3:info().run_count old_stmt_count = dumped_stmt_count() space:insert({1, 2, 3, 4, 5}) space:insert({2, 3, 4, 5, 6}) space:insert({3, 4, 5, 6, 7}) space:insert({4, 5, 6, 7, 8}) box.snapshot() index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 12 old_stmt_count = new_stmt_count -- not optimizes updates index:update({2}, {{'+', 1, 10}, {'+', 3, 10}, {'+', 4, 10}, {'+', 5, 10}}) -- change all fields box.snapshot() index_run_count = wait_for_dump(index, index_run_count) index:update({2}, {{'!', 3, 20}}) -- move range containing all indexes box.snapshot() index_run_count = wait_for_dump(index, index_run_count) index:update({2}, {{'=', 7, 100}, {'+', 5, 10}, {'#', 3, 1}}) -- change two cols but then move range with all indexed fields box.snapshot() index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 15 old_stmt_count = new_stmt_count space:select{} index2:select{} index3:select{} -- optimize one 'secondary' index update index:update({3}, {{'+', 1, 10}, {'-', 5, 2}, {'!', 6, 100}}) -- change only index 'third' box.snapshot() index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 3 old_stmt_count = new_stmt_count -- optimize one 'third' index update index:update({3}, {{'=', 1, 20}, {'+', 3, 5}, {'=', 4, 30}, {'!', 6, 110}}) -- change only index 'secondary' box.snapshot() index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 3 old_stmt_count = new_stmt_count -- optimize both indexes index:update({3}, {{'+', 1, 10}, {'#', 6, 1}}) -- don't change any indexed fields box.snapshot() index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 1 old_stmt_count = new_stmt_count space:select{} index2:select{} index3:select{} -- -- gh-1716: optimize UPDATE with fieldno > 64. -- -- Create a big tuple. long_tuple = {} for i = 1, 70 do long_tuple[i] = i end _ = space:replace(long_tuple) box.snapshot() -- Make update of not indexed field with pos > 64. index_run_count = wait_for_dump(index, index_run_count) old_stmt_count = dumped_stmt_count() _ = index:update({2}, {{'=', 65, 1000}}) box.snapshot() -- Check the only primary index to be changed. index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 1 old_stmt_count = new_stmt_count space:get{2}[65] -- -- Try to optimize update with negative field numbers. -- index:update({2}, {{'#', -65, 65}}) box.snapshot() index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() new_stmt_count - old_stmt_count == 1 old_stmt_count = new_stmt_count index:select{} index2:select{} index3:select{} -- Optimize index2 with negative update op. space:replace{10, 20, 30, 40, 50} box.snapshot() index_run_count = wait_for_dump(index, index_run_count) old_stmt_count = dumped_stmt_count() index:update({20}, {{'=', -1, 500}}) box.snapshot() index_run_count = wait_for_dump(index, index_run_count) new_stmt_count = dumped_stmt_count() -- 3 = REPLACE in index1 and DELETE + REPLACE in index3. new_stmt_count - old_stmt_count == 3 old_stmt_count = new_stmt_count index:select{} index2:select{} index3:select{} -- Check if optimizes update do not skip the entire key during -- dump. space:replace{10, 100, 1000, 10000, 100000, 1000000} index:update({100}, {{'=', 6, 1}}) box.begin() space:replace{20, 200, 2000, 20000, 200000, 2000000} index:update({200}, {{'=', 6, 2}}) box.commit() box.snapshot() index_run_count = wait_for_dump(index, index_run_count) old_stmt_count = dumped_stmt_count() index:select{} index2:select{} index3:select{} -- -- gh-2980: key uniqueness is not checked if indexed fields -- are not updated. -- space:truncate() space:replace{1, 1, 1, 1, 1} LOOKUPS_BASE = {0, 0, 0} test_run:cmd("setopt delimiter ';'") function lookups() local ret = {} for i = 1, #LOOKUPS_BASE do local info = space.index[i - 1]:info() table.insert(ret, info.lookup - LOOKUPS_BASE[i]) end return ret end; test_run:cmd("setopt delimiter ''"); LOOKUPS_BASE = lookups() -- update of a field that is not indexed space:update(1, {{'+', 1, 1}}) lookups() -- update of a field indexed by space.index[1] space:update(1, {{'+', 3, 1}}) lookups() -- update of a field indexed by space.index[2] space:update(1, {{'+', 5, 1}}) lookups() space:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/update_optimize.result0000664000000000000000000002744113306565107023006 0ustar rootroottest_run = require('test_run').new() --- ... -- Restart the server to finish all snaphsots from prior tests. test_run:cmd('restart server default') fiber = require('fiber') --- ... -- optimize one index space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary', { run_count_per_level = 20 }) --- ... index2 = space:create_index('secondary', { parts = {5, 'unsigned'}, run_count_per_level = 20 }) --- ... function dumped_stmt_count() return index:info().disk.dump.out.rows + index2:info().disk.dump.out.rows end --- ... box.snapshot() --- - ok ... test_run:cmd("setopt delimiter ';'") --- - true ... function wait_for_dump(index, old_count) while index:info().run_count == old_count do fiber.sleep(0) end return index:info().run_count end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... index_run_count = index:info().run_count --- ... index2_run_count = index2:info().run_count --- ... old_stmt_count = dumped_stmt_count() --- ... space:insert({1, 2, 3, 4, 5}) --- - [1, 2, 3, 4, 5] ... space:insert({2, 3, 4, 5, 6}) --- - [2, 3, 4, 5, 6] ... space:insert({3, 4, 5, 6, 7}) --- - [3, 4, 5, 6, 7] ... space:insert({4, 5, 6, 7, 8}) --- - [4, 5, 6, 7, 8] ... box.snapshot() --- - ok ... -- Wait for dump both indexes. index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 8 --- - true ... old_stmt_count = new_stmt_count --- ... -- not optimized updates space:update({1}, {{'=', 5, 10}}) -- change secondary index field --- - [1, 2, 3, 4, 10] ... -- Need a snapshot after each operation to avoid purging some -- statements in vy_write_iterator during dump. box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... space:update({1}, {{'!', 4, 20}}) -- move range containing index field --- - [1, 2, 3, 20, 4, 10] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... space:update({1}, {{'#', 3, 1}}) -- same --- - [1, 2, 20, 4, 10] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 9 --- - true ... old_stmt_count = new_stmt_count --- ... space:select{} --- - - [1, 2, 20, 4, 10] - [2, 3, 4, 5, 6] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] ... index2:select{} --- - - [2, 3, 4, 5, 6] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [1, 2, 20, 4, 10] ... -- optimized updates space:update({2}, {{'=', 6, 10}}) -- change not indexed field --- - [2, 3, 4, 5, 6, 10] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... -- Move range that doesn't contain indexed fields. space:update({2}, {{'!', 7, 20}}) --- - [2, 3, 4, 5, 6, 10, 20] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... space:update({2}, {{'#', 6, 1}}) -- same --- - [2, 3, 4, 5, 6, 20] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 3 --- - true ... old_stmt_count = new_stmt_count --- ... space:select{} --- - - [1, 2, 20, 4, 10] - [2, 3, 4, 5, 6, 20] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] ... index2:select{} --- - - [2, 3, 4, 5, 6, 20] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [1, 2, 20, 4, 10] ... space:drop() --- ... -- optimize two indexes space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary', { parts = {2, 'unsigned'}, run_count_per_level = 20 } ) --- ... index2 = space:create_index('secondary', { parts = {4, 'unsigned', 3, 'unsigned'}, run_count_per_level = 20 }) --- ... index3 = space:create_index('third', { parts = {5, 'unsigned'}, run_count_per_level = 20 }) --- ... function dumped_stmt_count() return index:info().disk.dump.out.rows + index2:info().disk.dump.out.rows + index3:info().disk.dump.out.rows end --- ... box.snapshot() --- - ok ... index_run_count = index:info().run_count --- ... index2_run_count = index2:info().run_count --- ... index3_run_count = index3:info().run_count --- ... old_stmt_count = dumped_stmt_count() --- ... space:insert({1, 2, 3, 4, 5}) --- - [1, 2, 3, 4, 5] ... space:insert({2, 3, 4, 5, 6}) --- - [2, 3, 4, 5, 6] ... space:insert({3, 4, 5, 6, 7}) --- - [3, 4, 5, 6, 7] ... space:insert({4, 5, 6, 7, 8}) --- - [4, 5, 6, 7, 8] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 12 --- - true ... old_stmt_count = new_stmt_count --- ... -- not optimizes updates index:update({2}, {{'+', 1, 10}, {'+', 3, 10}, {'+', 4, 10}, {'+', 5, 10}}) -- change all fields --- - [11, 2, 13, 14, 15] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... index:update({2}, {{'!', 3, 20}}) -- move range containing all indexes --- - [11, 2, 20, 13, 14, 15] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... index:update({2}, {{'=', 7, 100}, {'+', 5, 10}, {'#', 3, 1}}) -- change two cols but then move range with all indexed fields --- - [11, 2, 13, 24, 15, 100] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 15 --- - true ... old_stmt_count = new_stmt_count --- ... space:select{} --- - - [11, 2, 13, 24, 15, 100] - [2, 3, 4, 5, 6] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] ... index2:select{} --- - - [2, 3, 4, 5, 6] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [11, 2, 13, 24, 15, 100] ... index3:select{} --- - - [2, 3, 4, 5, 6] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [11, 2, 13, 24, 15, 100] ... -- optimize one 'secondary' index update index:update({3}, {{'+', 1, 10}, {'-', 5, 2}, {'!', 6, 100}}) -- change only index 'third' --- - [12, 3, 4, 5, 4, 100] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 3 --- - true ... old_stmt_count = new_stmt_count --- ... -- optimize one 'third' index update index:update({3}, {{'=', 1, 20}, {'+', 3, 5}, {'=', 4, 30}, {'!', 6, 110}}) -- change only index 'secondary' --- - [20, 3, 9, 30, 4, 110, 100] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 3 --- - true ... old_stmt_count = new_stmt_count --- ... -- optimize both indexes index:update({3}, {{'+', 1, 10}, {'#', 6, 1}}) -- don't change any indexed fields --- - [30, 3, 9, 30, 4, 100] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 1 --- - true ... old_stmt_count = new_stmt_count --- ... space:select{} --- - - [11, 2, 13, 24, 15, 100] - [30, 3, 9, 30, 4, 100] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] ... index2:select{} --- - - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [11, 2, 13, 24, 15, 100] - [30, 3, 9, 30, 4, 100] ... index3:select{} --- - - [30, 3, 9, 30, 4, 100] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [11, 2, 13, 24, 15, 100] ... -- -- gh-1716: optimize UPDATE with fieldno > 64. -- -- Create a big tuple. long_tuple = {} --- ... for i = 1, 70 do long_tuple[i] = i end --- ... _ = space:replace(long_tuple) --- ... box.snapshot() --- - ok ... -- Make update of not indexed field with pos > 64. index_run_count = wait_for_dump(index, index_run_count) --- ... old_stmt_count = dumped_stmt_count() --- ... _ = index:update({2}, {{'=', 65, 1000}}) --- ... box.snapshot() --- - ok ... -- Check the only primary index to be changed. index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 1 --- - true ... old_stmt_count = new_stmt_count --- ... space:get{2}[65] --- - 1000 ... -- -- Try to optimize update with negative field numbers. -- index:update({2}, {{'#', -65, 65}}) --- - [1, 2, 3, 4, 5] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... new_stmt_count - old_stmt_count == 1 --- - true ... old_stmt_count = new_stmt_count --- ... index:select{} --- - - [1, 2, 3, 4, 5] - [30, 3, 9, 30, 4, 100] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] ... index2:select{} --- - - [1, 2, 3, 4, 5] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [30, 3, 9, 30, 4, 100] ... index3:select{} --- - - [30, 3, 9, 30, 4, 100] - [1, 2, 3, 4, 5] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] ... -- Optimize index2 with negative update op. space:replace{10, 20, 30, 40, 50} --- - [10, 20, 30, 40, 50] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... old_stmt_count = dumped_stmt_count() --- ... index:update({20}, {{'=', -1, 500}}) --- - [10, 20, 30, 40, 500] ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... new_stmt_count = dumped_stmt_count() --- ... -- 3 = REPLACE in index1 and DELETE + REPLACE in index3. new_stmt_count - old_stmt_count == 3 --- - true ... old_stmt_count = new_stmt_count --- ... index:select{} --- - - [1, 2, 3, 4, 5] - [30, 3, 9, 30, 4, 100] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [10, 20, 30, 40, 500] ... index2:select{} --- - - [1, 2, 3, 4, 5] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [30, 3, 9, 30, 4, 100] - [10, 20, 30, 40, 500] ... index3:select{} --- - - [30, 3, 9, 30, 4, 100] - [1, 2, 3, 4, 5] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [10, 20, 30, 40, 500] ... -- Check if optimizes update do not skip the entire key during -- dump. space:replace{10, 100, 1000, 10000, 100000, 1000000} --- - [10, 100, 1000, 10000, 100000, 1000000] ... index:update({100}, {{'=', 6, 1}}) --- - [10, 100, 1000, 10000, 100000, 1] ... box.begin() --- ... space:replace{20, 200, 2000, 20000, 200000, 2000000} --- - [20, 200, 2000, 20000, 200000, 2000000] ... index:update({200}, {{'=', 6, 2}}) --- - [20, 200, 2000, 20000, 200000, 2] ... box.commit() --- ... box.snapshot() --- - ok ... index_run_count = wait_for_dump(index, index_run_count) --- ... old_stmt_count = dumped_stmt_count() --- ... index:select{} --- - - [1, 2, 3, 4, 5] - [30, 3, 9, 30, 4, 100] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [10, 20, 30, 40, 500] - [10, 100, 1000, 10000, 100000, 1] - [20, 200, 2000, 20000, 200000, 2] ... index2:select{} --- - - [1, 2, 3, 4, 5] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [30, 3, 9, 30, 4, 100] - [10, 20, 30, 40, 500] - [10, 100, 1000, 10000, 100000, 1] - [20, 200, 2000, 20000, 200000, 2] ... index3:select{} --- - - [30, 3, 9, 30, 4, 100] - [1, 2, 3, 4, 5] - [3, 4, 5, 6, 7] - [4, 5, 6, 7, 8] - [10, 20, 30, 40, 500] - [10, 100, 1000, 10000, 100000, 1] - [20, 200, 2000, 20000, 200000, 2] ... -- -- gh-2980: key uniqueness is not checked if indexed fields -- are not updated. -- space:truncate() --- ... space:replace{1, 1, 1, 1, 1} --- - [1, 1, 1, 1, 1] ... LOOKUPS_BASE = {0, 0, 0} --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function lookups() local ret = {} for i = 1, #LOOKUPS_BASE do local info = space.index[i - 1]:info() table.insert(ret, info.lookup - LOOKUPS_BASE[i]) end return ret end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... LOOKUPS_BASE = lookups() --- ... -- update of a field that is not indexed space:update(1, {{'+', 1, 1}}) --- - [2, 1, 1, 1, 1] ... lookups() --- - - 1 - 0 - 0 ... -- update of a field indexed by space.index[1] space:update(1, {{'+', 3, 1}}) --- - [2, 1, 2, 1, 1] ... lookups() --- - - 2 - 1 - 0 ... -- update of a field indexed by space.index[2] space:update(1, {{'+', 5, 1}}) --- - [2, 1, 2, 1, 2] ... lookups() --- - - 3 - 1 - 1 ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/tx_serial.test.lua0000664000000000000000000002102313306560010021771 0ustar rootroot-- The test runs loop of given number of rounds. -- Every round does the following: -- The test starts several concurrent transactions in vinyl. -- The transactions make some read/write operations over several keys in -- a random order and commit at a random moment. -- After that all transactions are sorted in order of commit -- With the sublist of read-write transactions committed w/o conflict: -- Test tries to make these transactions in memtex, one tx after another, -- without interleaving and compares select results with vinyl to make sure -- if the transaction could be serialized in order of commit or not -- With the sublist of read-write transactions committed with conflict: -- Test does nothing -- With the sublist of read only transactions: -- Test tries to insert these transactions between other transactions and checks -- that it possible to get same results. test_run = require('test_run').new() txn_proxy = require('txn_proxy') --settings num_tx = 10 --number of concurrent transactions num_key = 5 --number of keys that transactions use num_tests = 60 --number of test rounds to run txs = {} order_of_commit = {} num_committed = 0 stmts = {} errors = {} initial_data = {} initial_repro = "" ops = {'begin', 'commit', 'select', 'replace', 'upsert', 'delete'} test_run:cmd("setopt delimiter ';'") s1 = box.schema.create_space('test1', { engine = 'vinyl' }) i1 = s1:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) s2 = box.schema.create_space('test2', { engine = 'memtx' }) i2 = s2:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) for i=1,num_tx do txs[i] = {con = txn_proxy.new()} end; function my_equal(a, b) local typea = box.tuple.is(a) and 'table' or type(a) local typeb = box.tuple.is(b) and 'table' or type(b) if typea ~= typeb then return false elseif typea ~= 'table' then return a == b end for k,v in pairs(a) do if not my_equal(b[k], v) then return false end end for k,v in pairs(b) do if not my_equal(a[k], v) then return false end end return true end; unique_value = 0 function get_unique_value() unique_value = unique_value + 1 return unique_value end; function prepare() order_of_commit = {} num_committed = 0 stmts = {} for i=1,num_tx do txs[i].started = false txs[i].ended = false if math.random(3) == 1 then txs[i].read_only = true else txs[i].read_only = false end txs[i].read_only_checked = false txs[i].conflicted = false txs[i].possible = nil txs[i].num_writes = 0 end s1:truncate() s2:truncate() for i=1,num_key do local r = math.random(5) local v = get_unique_value() if (r >= 2) then s1:replace{i, v} s2:replace{i, v } end if (r == 2) then s1:delete{i} s2:delete{i} end end initial_data = s1:select{} initial_repro = "" initial_repro = initial_repro .. "s = box.schema.space.create('test', {engine = 'vinyl', if_not_exists = true})\n" initial_repro = initial_repro .. "i1 = s:create_index('test', {parts = {1, 'uint'}, if_not_exists = true})\n" initial_repro = initial_repro .. "txn_proxy = require('txn_proxy')\n" for _,tuple in pairs(initial_data) do initial_repro = initial_repro .. "s:replace{" .. tuple[1] .. ", " .. tuple[2] .. "} " end end; function apply(t, k, op) local tx = txs[t] local v = nil local k = k local repro = nil if op == 'begin' then if tx.started then table.insert(errors, "assert #1") end tx.started = true tx.con:begin() k = nil repro = "c" .. t .. " = txn_proxy.new() c" .. t .. ":begin()" repro = "p(\"c" .. t .. ":begin()\") " .. repro elseif op == 'commit' then if tx.ended or not tx.started then table.insert(errors, "assert #2") end tx.ended = true table.insert(order_of_commit, t) num_committed = num_committed + 1 local res = tx.con:commit() if res ~= "" and res[1]['error'] then tx.conflicted = true else tx.select_all = s1:select{} if tx.num_writes == 0 then tx.read_only = true end end k = nil repro = "c" .. t .. ":commit()" repro = "p(\"" .. repro .. "\", " .. repro .. ", s:select{})" elseif op == 'select' then v = tx.con('s1:select{'..k..'}') repro = "c" .. t .. "('s:select{" .. k .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'replace' then v = get_unique_value() tx.con('s1:replace{'..k..','..v..'}') tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:replace{" .. k .. ", " .. v .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'upsert' then v = math.random(100) tx.con('s1:upsert({'..k..','..v..'}, {{"+", 2,'..v..'}})') tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:upsert({" .. k .. ", " .. v .. "}, {{\\'+\\', 2, " .. v .. "}})')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'delete' then tx.con('s1:delete{'..k..'}') tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:delete{" .. k .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" end table.insert(stmts, {t=t, k=k, op=op, v=v, repro=repro}) end; function generate_random_operation() local t = math.random(num_tx) local k = math.random(num_key) local tx = txs[t] if tx.ended then return end local op_no = 0 if (tx.read_only) then op_no = math.random(3) else op_no = math.random(6) end local op = ops[op_no] if op ~= 'commit' or tx.started then if not tx.started then apply(t, k, 'begin') end if op ~= 'begin' then apply(t, k, op) end end end; function is_rdonly_tx_possible(t) for _,s in pairs(stmts) do if s.t == t and s.op == 'select' then local cmp_with = {s2:select{s.k}} if not my_equal(s.v, cmp_with) then return false end end end return true end; function try_to_apply_tx(t) for _,s in pairs(stmts) do if s.t == t then if s.op == 'select' then local cmp_with = {s2:select{s.k}} if not my_equal(s.v, cmp_with) then return false end elseif s.op == 'replace' then s2:replace{s.k, s.v} elseif s.op == 'upsert' then s2:upsert({s.k, s.v}, {{'+', 2, s.v}}) elseif s.op == 'delete' then s2:delete{s.k} end end end return true end; function check_rdonly_possibility() for i=1,num_tx do if txs[i].read_only and not txs[i].possible then if is_rdonly_tx_possible(i) then txs[i].possible = true end end end end; function check() local had_errors = (errors[1] ~= nil) for i=1,num_tx do if txs[i].read_only then if txs[i].conflicted then table.insert(errors, "read-only conflicted " .. i) end txs[i].possible = false end end check_rdonly_possibility() for _,t in ipairs(order_of_commit) do if not txs[t].read_only then if not txs[t].conflicted then if not try_to_apply_tx(t) then table.insert(errors, "not serializable " .. t) end if not my_equal(txs[t].select_all, s2:select{}) then table.insert(errors, "results are different " .. t) end check_rdonly_possibility() end end end for i=1,num_tx do if txs[i].read_only and not txs[i].possible then table.insert(errors, "not valid read view " .. i) end end if errors[1] and not had_errors then print("p(\"" .. errors[1] .. "\")") print(initial_repro) print("p(\"" .. initial_repro .. "\")") print('----------------------') for _,stmt in ipairs(stmts) do print(stmt.repro) end io.flush() end end; for i = 1, num_tests do prepare() while num_committed ~= num_tx do generate_random_operation() end check() end; test_run:cmd("setopt delimiter ''"); errors s1:drop() s2:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/iterator.result0000664000000000000000000006471713306560010021430 0ustar rootroot-- -- Verify that the iterator uses the state of the space before the iterator -- was created. -- env = require('test_run') --- ... test_run = env.new() --- ... create_iterator = require('utils').create_iterator --- ... iterator_next = function(iter) return iter.next() end --- ... iterate_over = function(iter) return iter.iterate_over() end --- ... -- -- Following tests verify that combinations -- of various commands are worked correctly. -- Combinations mentioned above are explicitly described in -- write_iterator.test.lua. -- space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary') --- ... -- -- DELETE followed by UPSERT -- -- 1) create iterator at first iter_obj = create_iterator(space) --- ... space:insert({1}) --- - [1] ... space:insert({2}) --- - [2] ... space:insert({3}) --- - [3] ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... space:delete{1} --- ... space:delete{2} --- ... space:delete{3} --- ... space:select{} --- - [] ... iterate_over(iter_obj) --- - [] ... -- 2) create iterator after initializing space:insert({1}) --- - [1] ... space:insert({2}) --- - [2] ... space:insert({3}) --- - [3] ... iter_obj = create_iterator(space) --- ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... space:delete{1} --- ... space:delete{2} --- ... space:delete{3} --- ... space:select{} --- - [] ... iterate_over(iter_obj) --- - [] ... -- 3) create iterator within test case space:insert({1}) --- - [1] ... space:insert({2}) --- - [2] ... space:insert({3}) --- - [3] ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... iter_obj = create_iterator(space) --- ... space:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... space:delete{1} --- ... space:delete{2} --- ... space:delete{3} --- ... space:select{} --- - [] ... iterate_over(iter_obj) --- - [] ... -- -- UPSERT followed by DELETE -- -- 1) create iterator at first iter_obj = create_iterator(space) --- ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:delete{1} --- ... space:delete{2} --- ... space:delete{3} --- ... space:select{} --- - [] ... iterate_over(iter_obj) --- - [] ... -- 2) create iterator after initializing space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... iter_obj = create_iterator(space) --- ... space:delete{1} --- ... space:delete{2} --- ... space:delete{3} --- ... space:select{} --- - [] ... iterate_over(iter_obj) --- - [] ... -- 3) create iterator within test case space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:delete{1} --- ... space:delete{2} --- ... iter_obj = create_iterator(space) --- ... space:delete{3} --- ... space:select{} --- - [] ... iterate_over(iter_obj) --- - [] ... -- -- UPSERT followed by UPSERT -- -- 1) create iterator at first iter_obj = create_iterator(space) --- ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... iterate_over(iter_obj) --- - 0: [1, 1] 1: [2, 2] 2: [3, 3] ... space:truncate() --- ... -- 2) create iterator after initializing space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... iter_obj = create_iterator(space) --- ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... iterate_over(iter_obj) --- - 0: [1, 1] 1: [2, 2] 2: [3, 3] ... space:truncate() --- ... -- 3) create iterator within test case space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:upsert({1}, {{'!', 2, 1}}) --- ... iter_obj = create_iterator(space) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... iterate_over(iter_obj) --- - 0: [1, 1] 1: [2, 2] 2: [3, 3] ... space:truncate() --- ... -- -- UPSERT followed by REPLACE -- -- 1) create iterator at first iter_obj = create_iterator(space) --- ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:replace({1, 10}) --- - [1, 10] ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:replace({2, 20}) --- - [2, 20] ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:replace({3, 30}) --- - [3, 30] ... space:select{} --- - - [1, 10] - [2, 20] - [3, 30] ... iterate_over(iter_obj) --- - 0: [1, 10] 1: [2, 20] 2: [3, 30] ... space:truncate() --- ... -- 2) create iterator after initializing space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... iter_obj = create_iterator(space) --- ... space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... space:select{} --- - - [1, 10] - [2, 20] - [3, 30] ... iterate_over(iter_obj) --- - 0: [1, 10] 1: [2, 20] 2: [3, 30] ... space:truncate() --- ... -- 3) create iterator within test case space:upsert({1}, {{'!', 2, 1}}) --- ... space:replace({1, 10}) --- - [1, 10] ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:replace({2, 20}) --- - [2, 20] ... space:upsert({3}, {{'!', 2, 3}}) --- ... iter_obj = create_iterator(space) --- ... space:replace({3, 30}) --- - [3, 30] ... space:select{} --- - - [1, 10] - [2, 20] - [3, 30] ... iterate_over(iter_obj) --- - 0: [1, 10] 1: [2, 20] 2: [3, 30] ... space:truncate() --- ... -- -- REPLACE followed by UPSERT -- -- 1) create iterator at first iter_obj = create_iterator(space) --- ... space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:select{} --- - - [1, 1, 10] - [2, 2, 20] - [3, 3, 30] ... iterate_over(iter_obj) --- - 0: [1, 1, 10] 1: [2, 2, 20] 2: [3, 3, 30] ... space:truncate() --- ... -- 2) create iterator after initializing space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... iter_obj = create_iterator(space) --- ... space:upsert({1}, {{'!', 2, 1}}) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:select{} --- - - [1, 1, 10] - [2, 2, 20] - [3, 3, 30] ... iterate_over(iter_obj) --- - 0: [1, 1, 10] 1: [2, 2, 20] 2: [3, 3, 30] ... space:truncate() --- ... -- 3) create iterator within test case space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... space:upsert({1}, {{'!', 2, 1}}) --- ... iter_obj = create_iterator(space) --- ... space:upsert({2}, {{'!', 2, 2}}) --- ... space:upsert({3}, {{'!', 2, 3}}) --- ... space:select{} --- - - [1, 1, 10] - [2, 2, 20] - [3, 3, 30] ... iterate_over(iter_obj) --- - 0: [1, 1, 10] 1: [2, 2, 20] 2: [3, 3, 30] ... space:truncate() --- ... -- -- REPLACE followed by DELETE -- -- 1) create iterator at first iter_obj = create_iterator(space) --- ... space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... space:delete{1} --- ... space:delete{2} --- ... space:delete{3} --- ... space:select{} --- - [] ... iterate_over(iter_obj) --- - [] ... -- 2) create iterator after initializing space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... iter_obj = create_iterator(space) --- ... space:delete{1} --- ... space:delete{2} --- ... space:delete{3} --- ... space:select{} --- - [] ... iterate_over(iter_obj) --- - [] ... -- 3) create iterator within test case space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... space:delete{1} --- ... space:delete{2} --- ... iter_obj = create_iterator(space) --- ... space:delete{3} --- ... space:select{} --- - [] ... iterate_over(iter_obj) --- - [] ... -- -- DELETE followed by REPLACE -- -- 1) create iterator at first space:insert({1, 10}) --- - [1, 10] ... space:insert({2, 20}) --- - [2, 20] ... space:insert({3, 30}) --- - [3, 30] ... iter_obj = create_iterator(space) --- ... space:delete({1}) --- ... space:delete({2}) --- ... space:delete({3}) --- ... space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... space:replace({3}) --- - [3] ... space:select{} --- - - [1] - [2] - [3] ... iterate_over(iter_obj) --- - 0: [1] 1: [2] 2: [3] ... space:truncate() --- ... -- 2) create iterator after initializing space:insert({1, 10}) --- - [1, 10] ... space:insert({2, 20}) --- - [2, 20] ... space:insert({3, 30}) --- - [3, 30] ... space:delete({1}) --- ... space:delete({2}) --- ... space:delete({3}) --- ... iter_obj = create_iterator(space) --- ... space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... space:replace({3}) --- - [3] ... space:select{} --- - - [1] - [2] - [3] ... iterate_over(iter_obj) --- - 0: [1] 1: [2] 2: [3] ... space:truncate() --- ... -- 3) create iterator within test case space:insert({1, 10}) --- - [1, 10] ... space:insert({2, 20}) --- - [2, 20] ... space:insert({3, 30}) --- - [3, 30] ... space:delete({1}) --- ... space:delete({2}) --- ... space:delete({3}) --- ... space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... iter_obj = create_iterator(space) --- ... space:replace({3}) --- - [3] ... space:select{} --- - - [1] - [2] - [3] ... iterate_over(iter_obj) --- - 0: [1] 1: [2] 2: [3] ... space:truncate() --- ... -- -- REPLACE followed by REPLACE -- -- 1) create iterator at first iter_obj = create_iterator(space) --- ... space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... space:replace({3}) --- - [3] ... space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... space:select{} --- - - [1, 10] - [2, 20] - [3, 30] ... iterate_over(iter_obj) --- - 0: [1, 10] 1: [2, 20] 2: [3, 30] ... space:truncate() --- ... -- 2) create iterator after initializing space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... space:replace({3}) --- - [3] ... iter_obj = create_iterator(space) --- ... space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... space:replace({3, 30}) --- - [3, 30] ... space:select{} --- - - [1, 10] - [2, 20] - [3, 30] ... iterate_over(iter_obj) --- - 0: [1, 10] 1: [2, 20] 2: [3, 30] ... space:truncate() --- ... -- 3) create iterator within test case space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... space:replace({3}) --- - [3] ... space:replace({1, 10}) --- - [1, 10] ... space:replace({2, 20}) --- - [2, 20] ... iter_obj = create_iterator(space) --- ... space:replace({3, 30}) --- - [3, 30] ... space:select{} --- - - [1, 10] - [2, 20] - [3, 30] ... iterate_over(iter_obj) --- - 0: [1, 10] 1: [2, 20] 2: [3, 30] ... space:truncate() --- ... -- -- single UPSERT (for completeness) -- -- 1) create iterator at first iter_obj = create_iterator(space) --- ... space:upsert({1}, {{'!', 2, 10}}) --- ... space:upsert({2}, {{'!', 2, 20}}) --- ... space:upsert({3}, {{'!', 2, 30}}) --- ... space:select{} --- - - [1] - [2] - [3] ... iterate_over(iter_obj) --- - 0: [1] 1: [2] 2: [3] ... space:truncate() --- ... -- 2) create iterator after initializing space:upsert({1}, {{'!', 2, 10}}) --- ... iter_obj = create_iterator(space) --- ... space:upsert({2}, {{'!', 2, 20}}) --- ... space:upsert({3}, {{'!', 2, 30}}) --- ... space:select{} --- - - [1] - [2] - [3] ... iterate_over(iter_obj) --- - 0: [1] 1: [2] 2: [3] ... space:truncate() --- ... -- 3) create iterator within test case space:upsert({1}, {{'!', 2, 10}}) --- ... space:upsert({2}, {{'!', 2, 20}}) --- ... space:upsert({3}, {{'!', 2, 30}}) --- ... iter_obj = create_iterator(space) --- ... space:select{} --- - - [1] - [2] - [3] ... iterate_over(iter_obj) --- - 0: [1] 1: [2] 2: [3] ... space:truncate() --- ... -- -- single REPLACE (for completeness) -- -- 1) create iterator at first iter_obj = create_iterator(space) --- ... space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... space:replace({3}) --- - [3] ... space:select{} --- - - [1] - [2] - [3] ... iterate_over(iter_obj) --- - 0: [1] 1: [2] 2: [3] ... space:truncate() --- ... -- 2) create iterator after initializing space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... iter_obj = create_iterator(space) --- ... space:replace({3}) --- - [3] ... space:select{} --- - - [1] - [2] - [3] ... iterate_over(iter_obj) --- - 0: [1] 1: [2] 2: [3] ... space:truncate() --- ... -- 3) create iterator within test case space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... space:replace({3}) --- - [3] ... iter_obj = create_iterator(space) --- ... space:select{} --- - - [1] - [2] - [3] ... iterate_over(iter_obj) --- - 0: [1] 1: [2] 2: [3] ... space:truncate() --- ... space:drop() --- ... -- -- gh-1797 -- Test another iterator types and move the iterator -- during the space is modified, try to pass keys in pairs() -- invocations. -- -- Test iterator type EQ space1 = box.schema.space.create('test1', { engine = 'vinyl' }) --- ... pk = space1:create_index('primary') --- ... space1:replace({1}) --- - [1] ... space1:replace({2}) --- - [2] ... space1:replace({3}) --- - [3] ... space1:upsert({3}, {{'!', 2, 3}}) --- ... space1:upsert({5}, {{'!', 2, 5}}) --- ... iter_obj_sp1 = create_iterator(space1, 3, {iterator = box.index.EQ}) --- ... space1:replace({6}) --- - [6] ... iterator_next(iter_obj_sp1) --- - [3, 3] ... space1:replace({8}) --- - [8] ... space1:select{} --- - - [1] - [2] - [3, 3] - [5] - [6] - [8] ... iterate_over(iter_obj_sp1) --- - [] ... space1:drop() --- ... -- Test iterator type GT space2 = box.schema.space.create('test2', { engine = 'vinyl' }) --- ... pk = space2:create_index('primary') --- ... space2:replace({1}) --- - [1] ... space2:replace({2}) --- - [2] ... space2:replace({3}) --- - [3] ... space2:replace({4}) --- - [4] ... space2:replace({5}) --- - [5] ... iter_obj_sp2 = create_iterator(space2, 3, {iterator = box.index.GT}) --- ... -- Test iterator type GE space3 = box.schema.space.create('test3', { engine = 'vinyl' }) --- ... pk = space3:create_index('primary') --- ... space3:replace({1}) --- - [1] ... space3:replace({2}) --- - [2] ... space3:replace({3}) --- - [3] ... space3:replace({4}) --- - [4] ... space3:replace({5}) --- - [5] ... iter_obj_sp3 = create_iterator(space3, 3, {iterator = box.index.GE}) --- ... -- Test iterator type LT and LE simultaneously space4 = box.schema.space.create('test4', { engine = 'vinyl' }) --- ... pk = space4:create_index('primary') --- ... space4:replace({1}) --- - [1] ... space4:replace({2}) --- - [2] ... space4:replace({3}) --- - [3] ... space4:upsert({3}, {{'!', 2, 3}}) --- ... space4:upsert({5}, {{'!', 2, 5}}) --- ... iter_obj_sp4 = create_iterator(space4, 3, {iterator = box.index.LE}) --- ... iter_obj_sp4_2 = create_iterator(space4, 3, {iterator = box.index.LT}) --- ... space4:replace({6}) --- - [6] ... -- Snapshot for all spaces box.snapshot() --- - ok ... -- Continue GT space2:replace({6}) --- - [6] ... iterator_next(iter_obj_sp2) --- - [4] ... space2:replace({8}) --- - [8] ... space2:select{} --- - - [1] - [2] - [3] - [4] - [5] - [6] - [8] ... -- Continue GE space3:replace({6}) --- - [6] ... iterator_next(iter_obj_sp3) --- - [3] ... space3:replace({8}) --- - [8] ... space3:select{} --- - - [1] - [2] - [3] - [4] - [5] - [6] - [8] ... -- Continue LT and LE iterator_next(iter_obj_sp4) --- - [3, 3] ... space4:replace({8}) --- - [8] ... iterator_next(iter_obj_sp4_2) --- - [2] ... space4:select{} --- - - [1] - [2] - [3, 3] - [5] - [6] - [8] ... -- Snapshot for all spaces box.snapshot() --- - ok ... -- Continue GT iterate_over(iter_obj_sp2) --- - 0: [5] 1: [6] 2: [8] ... space2:truncate() --- ... space2:drop() --- ... -- Continue GE iterate_over(iter_obj_sp3) --- - 0: [4] 1: [5] 2: [6] 3: [8] ... space3:truncate() --- ... space3:drop() --- ... -- Continue LT and LE iterate_over(iter_obj_sp4) --- - 0: [2] 1: [1] ... iterate_over(iter_obj_sp4_2) --- - 0: [1] ... space4:truncate() --- ... space4:drop() --- ... -- -- Test same with multiple indexes. -- space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary') --- ... idx2 = space:create_index('idx2', { parts = {2, 'unsigned'} }) --- ... idx3 = space:create_index('idx3', { parts = {3, 'integer'}, unique = false }) --- ... -- Test iterator type EQ space:select{} --- - [] ... iter_obj = create_iterator(space, 1, {iterator = 'EQ'}) --- ... space:replace({1, 2, 3}) --- - [1, 2, 3] ... space:delete({1}) --- ... space:replace({1, 1, 1}) --- - [1, 1, 1] ... space:upsert({1, 1, 1}, {{'+', 2, 1}, {'+', 3, 2}}) --- ... space:select{} --- - - [1, 2, 3] ... iterate_over(iter_obj) --- - 0: [1, 2, 3] ... iter_obj2 = create_iterator(idx2, 2, {iterator = 'EQ'}) --- ... space:delete({1}) --- ... iterate_over(iter_obj2) --- - [] ... space:truncate() --- ... -- Test iterators inside the transaction, but before create several iterators with -- various space states. space:replace({1, 1, 1}) --- - [1, 1, 1] ... space:replace({2, 4, 1}) --- - [2, 4, 1] ... space:replace({3, 8, 1}) --- - [3, 8, 1] ... iter_obj = create_iterator(space, 2, {iterator = 'GT'}) -- must return only 3 --- ... space:replace({4, 16, -1}) --- - [4, 16, -1] ... space:replace({5, 32, -10}) --- - [5, 32, -10] ... iter_obj2 = create_iterator(idx3, 0, {iterator = 'LE'}) -- must return -1 and -10 --- ... space:replace({6, 64, -10}) --- - [6, 64, -10] ... iter_obj3 = create_iterator(idx3, 0, {iterator = 'GE'}) -- must return {1} * 3 --- ... box.begin() --- ... space:replace({7, 128, 20}) --- - [7, 128, 20] ... iter_obj4 = create_iterator(space) -- must fail after rollback --- ... box.rollback() --- ... space:select{} --- - - [1, 1, 1] - [2, 4, 1] - [3, 8, 1] - [4, 16, -1] - [5, 32, -10] - [6, 64, -10] ... iterate_over(iter_obj) --- - 0: [3, 8, 1] 1: [4, 16, -1] 2: [5, 32, -10] 3: [6, 64, -10] ... iterate_over(iter_obj2) --- - 0: [4, 16, -1] 1: [6, 64, -10] 2: [5, 32, -10] ... iterate_over(iter_obj3) --- - 0: [1, 1, 1] 1: [2, 4, 1] 2: [3, 8, 1] ... iterate_over(iter_obj4) --- - error: The transaction the cursor belongs to has ended ... space:truncate() --- ... -- Iterate within transaction space:replace({1, 1, 1}) --- - [1, 1, 1] ... box.begin() --- ... space:replace({2, 2, 1}) --- - [2, 2, 1] ... iter_obj = create_iterator(pk, 1, {iterator = 'GE'}) --- ... space:replace({3, 3, 10}) --- - [3, 3, 10] ... iter_obj2 = create_iterator(idx3, 20, {iterator = 'LT'}) --- ... space:replace({4, 4, 15}) --- - [4, 4, 15] ... space:replace({5, 5, 25}) --- - [5, 5, 25] ... -- Must print all, include tuples added after the iterator creation -- because of the opened transaction presense. iterate_over(iter_obj) --- - 0: [1, 1, 1] 1: [2, 2, 1] 2: [3, 3, 10] 3: [4, 4, 15] 4: [5, 5, 25] ... iterator_next(iter_obj2) --- - [4, 4, 15] ... space:replace({12, 12, 12}) --- - [12, 12, 12] ... iterator_next(iter_obj2) --- - [12, 12, 12] ... space:replace({9, 9, 9}) --- - [9, 9, 9] ... iterate_over(iter_obj2) --- - 0: [3, 3, 10] 1: [9, 9, 9] 2: [2, 2, 1] 3: [1, 1, 1] ... box.commit() --- ... space:truncate() --- ... -- Create the iterator before the transaction, but iterate inside space:replace({1, 1, 1}) --- - [1, 1, 1] ... space:replace({2, 2, 2}) --- - [2, 2, 2] ... iter_obj = create_iterator(pk) --- ... iter_obj2 = create_iterator(idx2, 2, {iterator = 'GE'}) --- ... space:replace({3, 3, 3}) --- - [3, 3, 3] ... box.begin() --- ... space:replace({4, 4, 4}) --- - [4, 4, 4] ... iterate_over(iter_obj) --- - 0: [1, 1, 1] 1: [2, 2, 2] 2: [3, 3, 3] ... iterate_over(iter_obj2) --- - 0: [2, 2, 2] 1: [3, 3, 3] ... box.commit() --- ... space:truncate() --- ... -- Create the iterator inside the transaction, but iterate outside space:replace({1, 1, 1}) --- - [1, 1, 1] ... box.begin() --- ... space:replace({2, 2, 2}) --- - [2, 2, 2] ... iter_obj = create_iterator(pk) --- ... space:replace({3, 3, 3}) --- - [3, 3, 3] ... box.commit() --- ... iterate_over(iter_obj) --- - error: The transaction the cursor belongs to has ended ... space:truncate() --- ... -- Create the iterator inside the transaction before any other actions -- and iterate inside space:replace({1, 1, 1}) --- - [1, 1, 1] ... box.begin() --- ... iter_obj = create_iterator(pk) --- ... space:replace({2, 2, 2}) --- - [2, 2, 2] ... iterate_over(iter_obj) --- - 0: [1, 1, 1] 1: [2, 2, 2] ... box.commit() --- ... space:drop() --- ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary', { parts = { 1, 'uint', 2, 'uint' } }) --- ... box.begin() --- ... space:replace({1, 1}) --- - [1, 1] ... space:replace({2, 2}) --- - [2, 2] ... space:select({1, 1}, {iterator = 'GT'}) --- - - [2, 2] ... space:select({1, 1}, {iterator = 'GE'}) --- - - [1, 1] - [2, 2] ... space:select({}, {iterator = 'GE'}) --- - - [1, 1] - [2, 2] ... space:select({}) --- - - [1, 1] - [2, 2] ... space:select({}, {iterator = 'LE'}) --- - - [2, 2] - [1, 1] ... space:select({1}, {iterator = 'GT'}) --- - - [2, 2] ... space:select({1}, {iterator = 'GE'}) --- - - [1, 1] - [2, 2] ... space:select({1}, {iterator = 'LT'}) --- - [] ... space:select({1}, {iterator = 'LE'}) --- - - [1, 1] ... space:select({2}, {iterator = 'GT'}) --- - [] ... space:select({2}, {iterator = 'GE'}) --- - - [2, 2] ... space:select({2}, {iterator = 'LT'}) --- - - [1, 1] ... space:select({2}, {iterator = 'LE'}) --- - - [2, 2] - [1, 1] ... box.commit() --- ... space:drop() --- ... --make runs with more than one record with every key s = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = s:create_index('primary', { parts = { 1, 'uint' } }) --- ... for i=1,10 do s:upsert({i, 1}, {{'+', 2, 1}}) end --- ... itr = create_iterator(s, {}, {}) --- ... iterator_next(itr) --- - [1, 1] ... for i=1,10 do s:upsert({i, 1}, {{'+', 2, 1}}) end --- ... iterator_next(itr) --- - [2, 1] ... box.snapshot() -- create last-level run --- - ok ... iterator_next(itr) --- - [3, 1] ... for i=1,10 do s:upsert({i, 1}, {{'+', 2, 1}}) end --- ... iterator_next(itr) --- - [4, 1] ... box.snapshot() -- create not-last-level run --- - ok ... iterator_next(itr) --- - [5, 1] ... for i=1,10 do s:upsert({i, 1}, {{'+', 2, 1}}) end --- ... iterator_next(itr) --- - [6, 1] ... s:select{1} --- - - [1, 4] ... s:drop() --- ... -- gh-2394 -- -- Check GE/LE iterators in a transaction involving several spaces. -- test_run:cmd("setopt delimiter ';'") --- - true ... s = {} for i=1,3 do s[i] = box.schema.space.create('test'..i, { engine = 'vinyl' }) _ = s[i]:create_index('primary') s[i]:insert{20, 'B'..i} s[i]:insert{40, 'D'..i} end test_run:cmd("setopt delimiter ''"); --- ... box.begin() --- ... for i=1,3 do s[i]:insert{10, 'A'..i} s[i]:insert{30, 'C'..i} s[i]:insert{50, 'E'..i} end --- ... s[1]:select({}, {iterator = 'GE'}) --- - - [10, 'A1'] - [20, 'B1'] - [30, 'C1'] - [40, 'D1'] - [50, 'E1'] ... s[1]:select({}, {iterator = 'LE'}) --- - - [50, 'E1'] - [40, 'D1'] - [30, 'C1'] - [20, 'B1'] - [10, 'A1'] ... s[2]:select({}, {iterator = 'GE'}) --- - - [10, 'A2'] - [20, 'B2'] - [30, 'C2'] - [40, 'D2'] - [50, 'E2'] ... s[2]:select({}, {iterator = 'LE'}) --- - - [50, 'E2'] - [40, 'D2'] - [30, 'C2'] - [20, 'B2'] - [10, 'A2'] ... s[3]:select({}, {iterator = 'GE'}) --- - - [10, 'A3'] - [20, 'B3'] - [30, 'C3'] - [40, 'D3'] - [50, 'E3'] ... s[3]:select({}, {iterator = 'LE'}) --- - - [50, 'E3'] - [40, 'D3'] - [30, 'C3'] - [20, 'B3'] - [10, 'A3'] ... box.rollback() --- ... for i=1,3 do s[i]:drop() end --- ... sm = box.schema.create_space('sm', { engine = 'memtx'}) --- ... im1 = sm:create_index('i1', { type = 'tree', parts = {1,'unsigned'}, unique = true }) --- ... im2 = sm:create_index('i2', { type = 'tree', parts = {2,'unsigned'}, unique = true }) --- ... sv = box.schema.create_space('sv', { engine = 'vinyl'}) --- ... iv1 = sv:create_index('i1', { type = 'tree', parts = {1,'unsigned'}, unique = true }) --- ... iv2 = sv:create_index('i2', { type = 'tree', parts = {2,'unsigned'}, unique = true }) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function f() for i = 1,100 do local arr = {} for j = 1,100 do table.insert(arr, {math.random(1000), math.random(1000)}) end box.begin() for _,t in pairs(arr) do pcall(sm.replace, sm, t) end box.commit() box.begin() for _,t in pairs(arr) do pcall(sv.replace, sv, t) end box.commit() end end function compare(a, b) if #a ~= #b then return "different sizes" end local c = #a for i = 1,c do if a[i][1] ~= b[i][1] or a[i][2] ~= b[i][2] then return "different data" end end return "equal" end test_run:cmd("setopt delimiter ''"); --- ... f() --- ... compare(sm:select{}, sv:select{}) --- - equal ... compare(im1:select{}, iv1:select{}) --- - equal ... compare(im2:select{}, iv2:select{}) --- - equal ... sv:drop() --- ... sm:drop() --- ... s = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = s:create_index('primary', { parts = { 1, 'uint' } }) --- ... s:replace{0, 0} --- - [0, 0] ... s:replace{1, 10} --- - [1, 10] ... box.begin() --- ... s:select{0 } --- - - [0, 0] ... txn_proxy = require('txn_proxy') --- ... c = txn_proxy.new() --- ... c("s:replace{0, 1}") --- - - [0, 1] ... s:upsert({1, 1}, {{'+', 2, 5}}) --- ... s:select{0} --- - - [0, 0] ... s:select{1} --- - - [1, 15] ... box.commit() --- - error: Transaction has been aborted by conflict ... s:drop() --- ... s = box.schema.space.create('test', { engine = 'vinyl' }) --- ... i = s:create_index('primary', { parts = { 1, 'uint' } }) --- ... s:replace{1} s:replace{2} s:replace{3} --- ... s:select{} --- - - [1] - [2] - [3] ... box.begin() --- ... gen,param,state = i:pairs({0}, {iterator = 'GE'}) --- ... state, value = gen(param, state) --- ... value --- - [1] ... s:delete{2} --- ... state, value = gen(param, state) --- ... value --- - [3] ... box.commit() --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade.result0000664000000000000000000000210013306560010021200 0ustar rootroottest_run = require('test_run').new() --- ... version = test_run:get_cfg('version') --- ... work_dir = 'vinyl/upgrade/' .. version --- ... test_run:cmd('create server upgrade with script="vinyl/upgrade.lua", workdir="' .. work_dir .. '"') --- - true ... test_run:cmd('start server upgrade') --- - true ... test_run:switch('upgrade') --- - true ... box.space.test.index.i1:select() --- - - [1, 'a'] - [2, 'b'] - [3, 'c'] ... box.space.test.index.i2:select() --- - - [1, 'a'] - [2, 'b'] - [3, 'c'] ... box.space.test_truncate.index.i1:select() --- - - [123, 'abc'] ... box.space.test_truncate.index.i2:select() --- - - [123, 'abc'] ... box.space.test_split:select() --- - - [1, 5] - [2, 6] - [3, 7] - [4, 8] - [5, 9] - [6, 10] - [7, 11] - [8, 12] ... box.space.test_split:select() --- - - [1, 5] - [2, 6] - [3, 7] - [4, 8] - [5, 9] - [6, 10] - [7, 11] - [8, 12] ... box.space.test_drop == nil --- - true ... test_run:switch('default') --- - true ... test_run:cmd('stop server upgrade') --- - true ... test_run:cmd('cleanup server upgrade') --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/large.lua0000664000000000000000000000224713306560010020122 0ustar rootrootfiber = require('fiber') digest = require('digest') local PAGE_SIZE = 1024 local RANGE_SIZE = 64 * PAGE_SIZE local TUPLE_SIZE = 128 * PAGE_SIZE local function prepare() local s1 = box.schema.space.create('large_s1', { engine = 'vinyl', if_not_exists = true }) s1:create_index('pk', { if_not_exists = true; range_size = RANGE_SIZE; page_size = PAGE_SIZE; }) end local function large_test(iter_limit) iter_limit = iter_limit or 500 local data = digest.urandom(TUPLE_SIZE) for i=0,iter_limit do local space = box.space.large_s1 space:replace({i, data}) if i % 100 == 0 then collectgarbage('collect') end end end local function check_test() local i = 0 for _, tuple in box.space.large_s1:pairs() do if TUPLE_SIZE ~= tuple[2]:len() then error('Large tuple has incorect length') end if i % 10 == 0 then collectgarbage('collect') end i = i + 1 end end local function teardown() box.space.large_s1:drop() end return { prepare = prepare; large = large_test, check = check_test; teardown = teardown; } tarantool_1.9.1.26.g63eb81e3c/test/vinyl/misc.test.lua0000664000000000000000000000474113306565107020756 0ustar rootrootfiber = require('fiber') -- -- gh-2784: do not validate space formatted but not indexed fields -- in surrogate statements. -- -- At first, test simple surrogate delete generated from a key. format = {{name = 'a', type = 'unsigned'}, {name = 'b', type = 'unsigned'}} s = box.schema.space.create('test', {engine = 'vinyl', format = format}) _ = s:create_index('pk') s:insert{1, 1} -- Type of a second field in a surrogate tuple must be NULL but -- with UNSIGNED type, specified in a tuple_format. It is -- possible, because only indexed fields are used in surrogate -- tuples. s:delete(1) s:drop() -- Test select after snapshot. This select gets surrogate -- tuples from a disk. Here NULL also can be stored in formatted, -- but not indexed field. format = {} format[1] = {name = 'a', type = 'unsigned'} format[2] = {name = 'b', type = 'unsigned'} format[3] = {name = 'c', type = 'unsigned'} s = box.schema.space.create('test', {engine = 'vinyl', format = format}) _ = s:create_index('pk') _ = s:create_index('sk', {parts = {2, 'unsigned'}}) s:insert{1, 1, 1} box.snapshot() s:delete(1) box.snapshot() s:select() s:drop() -- -- gh-2983: ensure the transaction associated with a fiber -- is automatically rolled back if the fiber stops. -- s = box.schema.create_space('test', { engine = 'vinyl' }) _ = s:create_index('pk') tx1 = box.info.vinyl().tx ch = fiber.channel(1) _ = fiber.create(function() box.begin() s:insert{1} ch:put(true) end) ch:get() tx2 = box.info.vinyl().tx tx2.commit - tx1.commit -- 0 tx2.rollback - tx1.rollback -- 1 s:drop() -- -- gh-3158: check of duplicates is skipped if the index -- is contained by another unique index which is checked. -- s = box.schema.create_space('test', {engine = 'vinyl'}) i1 = s:create_index('i1', {unique = true, parts = {1, 'unsigned', 2, 'unsigned'}}) i2 = s:create_index('i2', {unique = true, parts = {2, 'unsigned', 1, 'unsigned'}}) i3 = s:create_index('i3', {unique = true, parts = {3, 'unsigned', 4, 'unsigned', 5, 'unsigned'}}) i4 = s:create_index('i4', {unique = true, parts = {5, 'unsigned', 4, 'unsigned'}}) i5 = s:create_index('i5', {unique = true, parts = {4, 'unsigned', 5, 'unsigned', 1, 'unsigned'}}) i6 = s:create_index('i6', {unique = true, parts = {4, 'unsigned', 6, 'unsigned', 5, 'unsigned'}}) i7 = s:create_index('i7', {unique = true, parts = {6, 'unsigned'}}) s:insert{1, 1, 1, 1, 1, 1} i1:info().lookup -- 1 i2:info().lookup -- 0 i3:info().lookup -- 0 i4:info().lookup -- 1 i5:info().lookup -- 0 i6:info().lookup -- 0 i7:info().lookup -- 1 s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/snapshot.result0000664000000000000000000000375013306560010021424 0ustar rootroottest_run = require('test_run').new() --- ... fiber = require 'fiber' --- ... fio = require 'fio' --- ... xlog = require 'xlog' --- ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk') --- ... -- Start a few fibers populating the space in the background. n_workers = 3 --- ... c = fiber.channel(n_workers) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i=1,n_workers do fiber.create(function() for j=i,1000,n_workers do s:insert{j} end c:put(true) end) end test_run:cmd("setopt delimiter ''"); --- ... -- Let the background fibers run. fiber.sleep(0.001) --- ... -- Concurrent checkpoint. box.snapshot() --- - ok ... -- Join background fibers. for i=1,n_workers do c:get() end --- ... -- Get list of files from the last checkpoint. files = box.backup.start() --- ... -- Extract the last checkpoint LSN and find -- max LSN stored in run files. snap_lsn = -1 --- ... run_lsn = -1 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for _, path in ipairs(files) do suffix = string.gsub(path, '.*%.', '') if suffix == 'snap' then snap_lsn = tonumber(fio.basename(path, '.snap')) end if suffix == 'run' then for lsn, _ in xlog.pairs(path) do if run_lsn < lsn then run_lsn = lsn end end end end test_run:cmd("setopt delimiter ''"); --- ... snap_lsn >= 0 --- - true ... run_lsn >= 0 --- - true ... box.backup.stop() --- ... -- Check that run files only contain statements -- inserted before checkpoint. snap_lsn == run_lsn or {snap_lsn, run_lsn} --- - true ... s:drop() --- ... -- -- gh-2614 about broken vy_run_iterator_start_from. -- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... p = s:create_index('pk') --- ... s:replace{100} --- - [100] ... s:replace{101} --- - [101] ... s:replace{102} --- - [102] ... s:replace{103} --- - [103] ... box.snapshot() --- - ok ... s:select({99}, {iterator = box.index.LE, limit = 10}) --- - [] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/split_coalesce.test.lua0000664000000000000000000000574413306565107023020 0ustar rootroottest_run = require('test_run').new() fiber = require('fiber') -- Temporary table to restore variables after restart. var = box.schema.space.create('var') _ = var:create_index('primary', {parts = {1, 'string'}}) s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('primary', {unique=true, parts={1, 'unsigned'}, page_size=256, range_size=2048, run_count_per_level=1, run_size_ratio=1000}) function vyinfo() return box.space.test.index.primary:info() end range_count = 4 tuple_size = math.ceil(s.index.primary.options.page_size / 4) pad_size = tuple_size - 30 assert(pad_size >= 16) keys_per_range = math.floor(s.index.primary.options.range_size / tuple_size) key_count = range_count * keys_per_range -- Rewrite the space until enough ranges are created. test_run:cmd("setopt delimiter ';'") iter = 0 function gen_tuple(k) local pad = {} for i = 1,pad_size do pad[i] = string.char(math.random(65, 90)) end return {k, k + iter, table.concat(pad)} end while vyinfo().range_count < range_count do iter = iter + 1 for k = key_count,1,-1 do s:replace(gen_tuple(k)) end box.snapshot() fiber.sleep(0.01) end; test_run:cmd("setopt delimiter ''"); vyinfo().range_count -- Remember the number of iterations and the number of keys -- so that we can check data validity after restart. _ = var:insert{'iter', iter} _ = var:insert{'key_count', key_count} _ = var:insert{'keys_per_range', keys_per_range} -- Check that the space can be recovered after splitting ranges. test_run:cmd('restart server default') fiber = require 'fiber' s = box.space.test var = box.space.var iter = var:get('iter')[2] key_count = var:get('key_count')[2] keys_per_range = var:get('keys_per_range')[2] function vyinfo() return box.space.test.index.primary:info() end -- Check the space content. s:count() == key_count for k = 1,key_count do v = s:get(k) assert(v[2] == k + iter) end -- Delete 90% keys, remove padding for the rest. test_run:cmd("setopt delimiter ';'") for k = 1,key_count do if k % 10 ~= 0 then s:delete(k) else s:update(k, {{'#', 3, 1}}) end end test_run:cmd("setopt delimiter ''"); box.snapshot() -- Trigger compaction until ranges are coalesced. test_run:cmd("setopt delimiter ';'") while vyinfo().range_count > 1 do for i = 1,key_count,keys_per_range do s:delete{i} end box.snapshot() fiber.sleep(0.01) end test_run:cmd("setopt delimiter ''"); vyinfo().range_count -- Check that the space can be recovered after coalescing ranges. test_run:cmd('restart server default') s = box.space.test var = box.space.var iter = var:get('iter')[2] key_count = var:get('key_count')[2] -- Check the space content. test_run:cmd("setopt delimiter ';'") key_count_left = 0 for k = 1,key_count do v = s:get(k) if k % 10 == 0 then assert(v[2] == k + iter) key_count_left = key_count_left + 1 else assert(v == nil) end end test_run:cmd("setopt delimiter ''"); s:count() == key_count_left s:drop() var:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/constraint.result0000664000000000000000000000716413306560010021754 0ustar rootroot-- key type validations (str, num) space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... space:insert{1} --- - error: 'Tuple field 1 type does not match one required by operation: expected string' ... space:replace{1} --- - error: 'Tuple field 1 type does not match one required by operation: expected string' ... space:delete{1} --- - error: 'Supplied key type of part 0 does not match index part type: expected string' ... space:update({1}, {{'=', 1, 101}}) --- - error: 'Supplied key type of part 0 does not match index part type: expected string' ... space:upsert({1}, {{'+', 1, 10}}) --- - error: 'Tuple field 1 type does not match one required by operation: expected string' ... space:get{1} --- - error: 'Supplied key type of part 0 does not match index part type: expected string' ... index:pairs(1, {iterator = 'GE'}) --- - error: 'Supplied key type of part 0 does not match index part type: expected string' ... space:drop() --- ... -- key type validations (num, str) space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... space:insert{'A'} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... space:replace{'A'} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... space:delete{'A'} --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... space:update({'A'}, {{'=', 1, 101}}) --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... space:upsert({'A'}, {{'+', 1, 10}}) --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... space:get{'A'} --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... index:pairs('A', {iterator = 'GE'}) --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... space:drop() --- ... -- ensure all key-parts are passed space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1,'unsigned',2,'unsigned'} }) --- ... space:insert{1} --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 2) ... space:replace{1} --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 2) ... space:delete{1} --- - error: Invalid key part count in an exact match (expected 2, got 1) ... space:update(1, {{'=', 1, 101}}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... space:upsert({1}, {{'+', 1, 10}}) --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 2) ... space:get{1} --- - error: Invalid key part count in an exact match (expected 2, got 1) ... index:select({1}, {iterator = box.index.GT}) --- - [] ... space:drop() --- ... ------------------------------------------------------------------------------- -- Key part length without limit ------------------------------------------------------------------------------- space = box.schema.space.create('single_part', { engine = 'vinyl' }) --- ... _ = space:create_index('primary', { type = 'tree', parts = {1, 'string'}}) --- ... t1 = space:insert({string.rep('x', 1020)}) --- ... t1 = space:insert({string.rep('x', 10210)}) --- ... t3 = space:insert({string.rep('x', 102200)}) --- ... space:drop() --- ... space = nil --- ... pk = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/gh.result0000664000000000000000000003055013306565107020175 0ustar rootrootfiber = require('fiber') --- ... env = require('test_run') --- ... test_run = env.new() --- ... -- gh-283: hang after three creates and drops s = box.schema.space.create('space0', {engine='vinyl'}) --- ... i = s:create_index('space0', {type = 'tree', parts = {1, 'string'}}) --- ... s:insert{'a', 'b', 'c'} --- - ['a', 'b', 'c'] ... s:drop() --- ... s = box.schema.space.create('space0', {engine='vinyl'}) --- ... i = s:create_index('space0', {type = 'tree', parts = {1, 'string'}}) --- ... s:insert{'a', 'b', 'c'} --- - ['a', 'b', 'c'] ... t = s.index[0]:select({}, {iterator = box.index.ALL}) --- ... t --- - - ['a', 'b', 'c'] ... s:drop() --- ... s = box.schema.space.create('space0', {engine='vinyl'}) --- ... i = s:create_index('space0', {type = 'tree', parts = {1, 'string'}}) --- ... s:insert{'a', 'b', 'c'} --- - ['a', 'b', 'c'] ... t = s.index[0]:select({}, {iterator = box.index.ALL}) --- ... t --- - - ['a', 'b', 'c'] ... s:drop() --- ... -- gh-280: crash if insert without index s = box.schema.space.create('test', {engine='vinyl'}) --- ... s:insert{'a'} --- - error: 'No index #0 is defined in space ''test''' ... s:drop() --- ... -- gh-436: No error when creating temporary vinyl space s = box.schema.space.create('tester',{engine='vinyl', temporary=true}) --- - error: 'Can''t modify space ''tester'': engine does not support temporary flag' ... -- gh-432: ignored limit s = box.schema.space.create('tester',{engine='vinyl'}) --- ... i = s:create_index('vinyl_index', {}) --- ... for v=1, 100 do s:insert({v}) end --- ... t = s:select({''},{iterator='GT', limit =1}) --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... t --- - - ['a', 'b', 'c'] ... t = s:select({},{iterator='GT', limit =1}) --- ... t --- - - [1] ... s:drop() --- ... s = box.schema.space.create('tester', {engine='vinyl'}) --- ... i = s:create_index('vinyl_index', {type = 'tree', parts = {1, 'string'}}) --- ... for v=1, 100 do s:insert({tostring(v)}) end --- ... t = s:select({''},{iterator='GT', limit =1}) --- ... t --- - - ['1'] ... t = s:select({},{iterator='GT', limit =1}) --- ... t --- - - ['1'] ... s:drop() --- ... -- gh-681: support or produce error on space::alter s = box.schema.space.create('M', {engine='vinyl'}) --- ... i = s:create_index('primary',{}) --- ... s:insert{5, 5} --- - [5, 5] ... s.index.primary:alter({parts={2,'unsigned'}}) --- - error: Vinyl does not support changing the definition of an index ... s:drop() --- ... -- gh-1008: assertion if insert of wrong type s = box.schema.space.create('t', {engine='vinyl'}) --- ... i = s:create_index('primary',{parts={1, 'string'}}) --- ... box.space.t:insert{1,'A'} --- - error: 'Tuple field 1 type does not match one required by operation: expected string' ... s:drop() --- ... -- gh-1009: search for empty string fails s = box.schema.space.create('t', {engine='vinyl'}) --- ... i = s:create_index('primary',{parts={1, 'string'}}) --- ... s:insert{''} --- - [''] ... #i:select{''} --- - 1 ... i:get{''} --- - [''] ... s:drop() --- ... -- gh-1407: upsert generate garbage data email_space_id = 'email' --- ... email_space = box.schema.space.create(email_space_id, { engine = 'vinyl', if_not_exists = true }) --- ... i = email_space:create_index('primary', { parts = {1, 'string'} }) --- ... time = 1234 --- ... email = "test@domain.com" --- ... email_hash_index = "asdfasdfs" --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:upsert({email, email_hash_index, time}, {{'!', -1, email_hash_index}, {'!', -1, time}}) --- ... box.space.email:select{email} --- - - ['test@domain.com', 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234, 'asdfasdfs', 1234] ... box.space.email:drop() --- ... --gh-1540: vinyl: invalid results from LE/LT iterators s = box.schema.space.create('test', { engine = 'vinyl' }) --- ... i = box.space.test:create_index('primary', { parts = { 1, 'unsigned', 2, 'unsigned' } }) --- ... for i =1,2 do for j=1,9 do box.space.test:replace({i, j}) end end --- ... box.space.test:select({1, 999999}, {iterator = 'LE'}) --- - - [1, 9] - [1, 8] - [1, 7] - [1, 6] - [1, 5] - [1, 4] - [1, 3] - [1, 2] - [1, 1] ... box.space.test:drop() --- ... s1 = box.schema.create_space('s1',{engine='vinyl'}) --- ... i1 = s1:create_index('primary',{parts={1,'unsigned',2,'unsigned'}}) --- ... s2 = box.schema.create_space('s2',{engine='memtx'}) --- ... i2 = s2:create_index('primary',{parts={1,'unsigned',2,'unsigned'}}) --- ... for i = 1,3 do for j = 1,5 do s1:insert{i, j} s2:insert{i, j} end end --- ... itrs = {'GE', 'GT', 'LE', 'LT'} --- ... good = true --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function my_equal(a, b) if type(a) ~= type(b) then return false elseif type(a) ~= 'table' and not box.tuple.is(a) then return a == b end for k,v in pairs(a) do if not my_equal(b[k], v) then return false end end for k,v in pairs(b) do if not my_equal(a[k], v) then return false end end return true end; --- ... for i = 0,4 do for j = 0,6 do for k = 1,4 do opts = {iterator=itrs[k]} if not my_equal(s1:select({i, j}, opts), s2:select({i, j}, opts)) then good = false end end end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... good --- - true ... s1:drop() --- ... s2:drop() --- ... -- -- gh-1608: tuple disappears after invalid upsert -- s = box.schema.create_space('test', {engine = 'vinyl'}) --- ... _ = s:create_index('test', {type = 'tree', parts = {1, 'unsigned', 2, 'string'}}) --- ... s:put({1, 'test', 3, 4}) --- - [1, 'test', 3, 4] ... s:select() --- - - [1, 'test', 3, 4] ... s:upsert({1, 'test', 'failed'}, {{'=', 3, 33}, {'=', 4, nil}}) --- - error: Unknown UPDATE operation ... s:select() --- - - [1, 'test', 3, 4] ... s:drop() --- ... -- -- gh-1684: vinyl: infinite cycle on box.snapshot() -- -- Create and drop several indices space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary') --- ... index2 = space:create_index('secondary', { parts = {2, 'string'} }) --- ... index3 = space:create_index('third', { parts = {3, 'string'}, unique = false }) --- ... index2:drop() --- ... index2 = space:create_index('secondary', { parts = {4, 'string'} }) --- ... index3:drop() --- ... index2:drop() --- ... index2 = space:create_index('secondary', { parts = {2, 'string'} }) --- ... index3 = space:create_index('third', { parts = {3, 'string'}, unique = false }) --- ... index4 = space:create_index('fourth', { parts = {2, 'string', 3, 'string'} }) --- ... space:drop() --- ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... box.snapshot() --- - ok ... space:drop() --- ... -- -- gh-1658: auto_increment -- space = box.schema.space.create('tweedledum', { engine = 'vinyl' }) --- ... _ = space:create_index('primary') --- ... space:auto_increment{'a'} --- - [1, 'a'] ... space:auto_increment{'b'} --- - [2, 'b'] ... space:auto_increment{'c'} --- - [3, 'c'] ... space:select{} --- - - [1, 'a'] - [2, 'b'] - [3, 'c'] ... space:truncate() --- ... space:auto_increment{'a'} --- - [1, 'a'] ... space:auto_increment{'b'} --- - [2, 'b'] ... space:auto_increment{'c'} --- - [3, 'c'] ... space:select{} --- - - [1, 'a'] - [2, 'b'] - [3, 'c'] ... space:delete{2} --- ... space:auto_increment{'d'} --- - [4, 'd'] ... space:select{} --- - - [1, 'a'] - [3, 'c'] - [4, 'd'] ... space:drop() --- ... -- -- Truncate basic test -- -- truncate s = box.schema.space.create('name_of_space', {engine='vinyl'}) --- ... i = s:create_index('name_of_index', {type = 'tree', parts = {1, 'string'}}) --- ... s:insert{'a', 'b', 'c'} --- - ['a', 'b', 'c'] ... s:select{'a'} --- - - ['a', 'b', 'c'] ... s:truncate() --- ... s:select{} --- - [] ... s:insert{'b', 'c', 'd'} --- - ['b', 'c', 'd'] ... s:select{} --- - - ['b', 'c', 'd'] ... s:truncate() --- ... s:select{} --- - [] ... s:drop() --- ... -- -- gh-1725: vinyl: merge iterator can't merge more than two runs -- s0 = box.schema.space.create('tweedledum', {engine = 'vinyl'}) --- ... i0 = s0:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) --- ... -- integer keys s0:replace{1, 'tuple'} --- - [1, 'tuple'] ... box.snapshot() --- - ok ... s0:replace{2, 'tuple 2'} --- - [2, 'tuple 2'] ... box.snapshot() --- - ok ... s0:insert{3, 'tuple 3'} --- - [3, 'tuple 3'] ... s0.index['primary']:get{1} --- - [1, 'tuple'] ... s0.index['primary']:get{2} --- - [2, 'tuple 2'] ... s0.index['primary']:get{3} --- - [3, 'tuple 3'] ... s0:drop() --- ... -- -- gh-2081: snapshot hang -- s = box.schema.space.create('tweedledum', {engine='vinyl'}) --- ... i = s:create_index('primary') --- ... _ = s:insert{1} --- ... _ = fiber.create(function() fiber.sleep(0.001) s:insert{2} end) --- ... box.snapshot() --- - ok ... s:drop() --- ... s = box.schema.space.create("test", {engine='vinyl'}) --- ... i1 = box.space.test:create_index('i1', {parts = {1, 'unsigned'}}) --- ... i2 = box.space.test:create_index('i2', {unique = false, parts = {2, 'unsigned'}}) --- ... count = 10000 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() for i = 1, count do s:replace({math.random(count), math.random(count)}) if i % 100 == 0 then box.commit() box.begin() end end box.commit() test_run:cmd("setopt delimiter ''"); --- ... s.index.i1:count() == s.index.i2:count() --- - true ... s:drop() --- ... -- https://github.com/tarantool/tarantool/issues/2588 max_tuple_size = box.cfg.vinyl_max_tuple_size --- ... box.cfg { vinyl_max_tuple_size = 40 * 1024 * 1024 } --- ... s = box.schema.space.create('vinyl', { engine = 'vinyl' }) --- ... i = box.space.vinyl:create_index('primary') --- ... _ = s:replace({1, string.rep('x', 35 * 1024 * 1024)}) --- ... s:drop() --- ... box.cfg { vinyl_max_tuple_size = max_tuple_size } --- ... -- https://github.com/tarantool/tarantool/issues/2614 count = 10000 --- ... s = box.schema.space.create("test", {engine='vinyl'}) --- ... _ = s:create_index('pk') --- ... cont = true --- ... finished = 0 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... _ = fiber.create(function() while cont do s:select(math.random(count), {iterator = box.index.LE, limit = 10}) fiber.sleep(0.01) end finished = finished + 1 end); --- ... _ = fiber.create(function() while cont do box.snapshot() fiber.sleep(0.01) end finished = finished + 1 end); --- ... for i = 1, count do s:replace{math.random(count)} end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... cont = false --- ... while finished ~= 2 do fiber.sleep(0.01) end --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/write_iterator.test.lua0000664000000000000000000002507513306565107023071 0ustar rootrootenv = require('test_run') test_run = env.new() fiber = require('fiber') -- -- Tests on data integrity after dump of memory runs or range -- compaction. -- -- The aim is to test vy_write_iterator. There are several combinations -- of various commands that can occur: -- 1) delete followed by upsert : write iterator should convert -- upsert to replace (insert) -- 2) upsert followed by delete: the upsert is filtered out, -- delete can be filtered out or not depending on whether it's -- compaction (filtered out) or dump (preserved) -- 3) upsert followed by upsert: two upserts are folded together -- into one -- 4) upsert followed by replace: upsert is replaced -- 5) replace followed by upsert: two commands are folded -- into a single replace with upsert ops applied -- 6) replace followed by delete: -- both are eliminated in case of compaction; -- replace is filtered out if it's dump -- 7) delete followed by replace: delete is filtered out -- 8) replace followed by replace: the first replace is filtered -- out -- 9) single upsert (for completeness) -- 10) single replace (for completeness) space = box.schema.space.create('test', { engine = 'vinyl' }) -- -- pk = space:create_index('primary', { page_size = 12 * 1024, range_size = 12 * 1024 }) -- Insert many big tuples and then call snapshot to -- force dumping and compacting. big_val = string.rep('1', 2000) _ = space:insert{1} _ = space:insert{2, big_val} _ = space:insert{3, big_val} _ = space:insert{5, big_val} _ = space:insert{6, big_val} _ = space:insert{7, big_val} _ = space:insert{8, big_val} _ = space:insert{9, big_val} _ = space:insert{10, big_val} _ = space:insert{11, big_val} space:count() box.snapshot() -- -- Create a couple of tiny runs on disk, to increate the "number of runs" -- heuristic of hte planner and trigger compaction -- space:insert{12} box.snapshot() space:insert{13} box.snapshot() #space:select{} space:drop() -- -- Create a vinyl index with small page_size parameter, so that -- big tuples will not fit in a single page. -- space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('primary', { page_size = 256, range_size = 3 * 1024 }) space:insert({1}) box.snapshot() big_val = string.rep('1', 2000) _ = space:insert{2, big_val} _ = space:insert{3, big_val} _ = space:insert{5, big_val} _ = space:insert{6, big_val} _ = space:insert{7, big_val} _ = space:insert{8, big_val} _ = space:insert{9, big_val} _ = space:insert{10, big_val} _ = space:insert{11, big_val} -- Increate the number of runs, trigger compaction space:count() box.snapshot() space:insert{12} box.snapshot() space:insert{13} box.snapshot() #space:select{} space:drop() -- Test dumping and compacting a space with more than one index. space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('primary', { page_size = 512, range_size = 1024 * 12 }) index2 = space:create_index('secondary', { parts = {2, 'string'}, page_size = 512, range_size = 1024 * 12 }) for i = 1, 100 do space:insert{i, ''..i} if i % 2 == 0 then box.snapshot() end end space:delete{1} space:delete{10} space:delete{100} box.snapshot() index2:delete{'9'} index2:delete{'99'} box.snapshot() space:select{2} -- Test that not dumped changes are visible. space:upsert({2, '2'}, {{'=', 3, 22}}) space:select{2} space:upsert({2, '2'}, {{'!', 3, 222}}) space:select{2} space:upsert({2, '2'}, {{'!', 3, 2222}}) space:select{2} box.snapshot() space:select{2} space:update({2}, {{'!', 3, 22222}}) box.snapshot() space:select{2} space:drop() space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('primary', { page_size = 128, range_size = 1024 }) -- Test that snaphot() inside a transaction doesn't lose data -- and that upserts are successfully merged. box.begin() space:upsert({2}, {{'=', 2, 22}}) space:upsert({2}, {{'!', 2, 222}}) space:upsert({2}, {{'!', 2, 2222}}) space:select{} box.snapshot() box.commit() space:select{} space:insert({3}) box.snapshot() space:select{} -- -- Verify that deletion of tuples with key 2 and 3 is -- successfully dumped and compacted. -- box.begin() space:delete{2} space:delete{3} box.commit() space:upsert({10}, {{'!', 2, 10}}) box.snapshot() space:select{} -- Test that deletion is successfully dumped and compacted. space:delete{10} space:upsert({10}, {{'!', 2, 10}}) space:upsert({10}, {{'!', 2, 10}}) box.snapshot() space:select{} space:delete{10} space:upsert({10}, {{'!', 2, 10}}) space:delete({10}) box.snapshot() space:select{} -- Test that if replace is met then previous upsert is ignored. space:upsert({10}, {{'!', 2, 10}}) space:replace({10, 100}) box.snapshot() space:select{} space:delete{10} -- Test that dumping and compacting didn't lose single upsert. space:upsert({100}, {{'!', 2, 100}}) box.snapshot() space:select{} space:delete{100} -- Verify that if upsert goes after replace then they will be merged. space:replace({200}) space:upsert({200}, {{'!', 2, 200}}) box.snapshot() space:select{} space:delete{200} -- Insert more tuples than can fit in range_size big_val = string.rep('1', 400) _ = space:replace({1, big_val}) _ = space:replace({2, big_val}) _ = space:replace({3, big_val}) _ = space:replace({4, big_val}) _ = space:replace({5, big_val}) _ = space:replace({6, big_val}) _ = space:replace({7, big_val}) space:count() box.snapshot() space:count() space:delete({1}) space:delete({2}) space:delete({3}) space:delete({4}) space:delete({5}) space:delete({6}) space:delete({7}) space:select{} box.snapshot() space:select{} -- Test that update successfully merged with replace and other updates space:insert({1}) space:update({1}, {{'=', 2, 111}}) space:update({1}, {{'!', 2, 11}}) space:update({1}, {{'+', 3, 1}, {'!', 4, 444}}) space:select{} box.snapshot() space:select{} space:delete{1} box.snapshot() space:select{} -- Test upsert after deletion space:insert({1}) box.snapshot() space:select{} space:delete({1}) space:upsert({1}, {{'!', 2, 111}}) space:select{} box.snapshot() space:select{} space:delete({1}) -- Test upsert before deletion space:insert({1}) box.snapshot() space:select{} space:upsert({1}, {{'!', 2, 111}}) space:delete({1}) box.snapshot() space:select{} -- Test deletion before replace space:insert({1}) box.snapshot() space:select{} space:delete({1}) space:replace({1, 1}) box.snapshot() space:select{} space:delete({1}) -- Test replace before deletion space:replace({5, 5}) space:delete({5}) box.snapshot() space:select{} -- Test many replaces space:replace{6} space:replace{6, 6, 6} space:replace{6, 6, 6, 6} space:replace{6, 6, 6, 6, 6} space:replace{6, 6, 6, 6, 6, 6} space:replace{6, 6, 6, 6, 6, 6, 6} box.snapshot() space:select{} space:delete({6}) space:drop() -- gh-1725 merge iterator can't merge more than two runs space = box.schema.space.create('tweedledum', {engine = 'vinyl'}) pk = space:create_index('primary') -- integer keys space:replace{1, 'tuple'} box.snapshot() space:replace{2, 'tuple 2'} box.snapshot() space:replace{3, 'tuple 3'} pk:get{1} or {'none'} pk:get{2} pk:get{3} space:drop() -- gh-2875 INSERT+DELETE pairs are annihilated on compaction s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('primary', {run_count_per_level = 1}) sk = s:create_index('secondary', {run_count_per_level = 1, parts = {2, 'unsigned'}}) PAD1 = 100 PAD2 = 10 -- Create a big run to prevent major compaction. for i = 1001, 1000 + PAD1 do s:replace{i, i} end box.snapshot() -- Generate some INSERT statements and dump them to disk. _ = s:insert{1, 1} -- insert _ = s:replace{2, 2} -- replace, no old tuple _ = s:upsert({3, 3}, {{'!', 1, 1}}) -- upsert, no old tuple box.begin() s:insert{4, 4} s:delete(4) box.commit() box.begin() s:insert{5, 5} s:replace{5, 5, 5} box.commit() box.begin() s:insert{6, 6} s:update(6, {{'!', 2, 6}}) box.commit() _ = s:insert{7, 7} _ = s:insert{8, 8} box.snapshot() -- Delete the inserted tuples and trigger compaction. s:delete{1} s:delete{2} s:delete{3} s:delete{4} s:delete{5} s:delete{6} -- Check that a REPLACE in a secondary index generated by -- an update operation is converted into an INSERT on dump -- and hence gets annihilated by the next DELETE. _ = s:update(7, {{'=', 2, 77}}) s:delete{7} _ = s:upsert({8, 8}, {{'=', 2, 88}}) s:delete{8} -- Some padding to trigger minor compaction. for i = 1001, 1000 + PAD2 do s:replace{i, i} end box.snapshot() -- Wait for compaction. while pk:info().disk.compact.count == 0 do fiber.sleep(0.001) end while sk:info().disk.compact.count == 0 do fiber.sleep(0.001) end pk:info().disk.compact.count -- 1 sk:info().disk.compact.count -- 1 -- All INSERT+DELETE pairs should have been annihilated, -- only padding is left. pk:info().disk.compact.out.rows - PAD2 -- 0 sk:info().disk.compact.out.rows - PAD2 -- 0 pk:select(1000, {iterator = 'LE'}) -- empty sk:select(1000, {iterator = 'LE'}) -- empty s:drop() -- Check that an INSERT+DELETE pair is annihilated on compaction -- only if the first statement among all sources was an INSERT. s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('primary', {run_count_per_level = 1}) sk = s:create_index('secondary', {run_count_per_level = 1, parts = {2, 'unsigned'}}) PAD1 = 100 PAD2 = 10 -- Create a big run to prevent major compaction. for i = 1001, 1000 + PAD1 do s:insert{i, i} end _ = s:insert{1, 1} _ = s:insert{2, 2} _ = s:insert{3, 3} _ = s:insert{4, 4} _ = s:insert{5, 5} _ = s:insert{6, 6} _ = s:insert{7, 7} _ = s:insert{8, 8} box.snapshot() -- Generate DELETE+INSERT statements and write them to disk. s:delete{1} s:insert{1, 100} box.begin() s:delete{2} s:insert{2, 200} box.commit() s:replace{3, 30} s:delete{3} s:insert{3, 300} box.begin() s:replace{4, 40} s:delete{4} s:insert{4, 400} box.commit() s:delete{5} s:upsert({5, 500}, {{'=', 2, 500}}) box.begin() s:delete{6} s:upsert({6, 600}, {{'=', 2, 600}}) box.commit() s:replace{7, 70} s:delete{7} s:upsert({7, 700}, {{'=', 2, 700}}) box.begin() s:replace{8, 80} s:delete{8} s:upsert({8, 800}, {{'=', 2, 800}}) box.commit() box.snapshot() -- Generate DELETE statements and trigger compaction. s:delete{1} s:delete{2} s:delete{3} s:delete{4} s:delete{5} s:delete{6} s:delete{7} s:delete{8} -- Some padding to trigger minor compaction. for i = 1001, 1000 + PAD2 do s:replace{i, i} end box.snapshot() -- Wait for compaction. while pk:info().disk.compact.count == 0 do fiber.sleep(0.001) end while sk:info().disk.compact.count == 0 do fiber.sleep(0.001) end pk:info().disk.compact.count -- 1 sk:info().disk.compact.count -- 1 -- If INSERT+DELETE statements stored in the two compacted runs -- were annihilated we would see tuples stored in the first run. pk:select(1000, {iterator = 'LE'}) -- empty sk:select(1000, {iterator = 'LE'}) -- empty s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/split_coalesce.result0000664000000000000000000000656313306565107022577 0ustar rootroottest_run = require('test_run').new() --- ... fiber = require('fiber') --- ... -- Temporary table to restore variables after restart. var = box.schema.space.create('var') --- ... _ = var:create_index('primary', {parts = {1, 'string'}}) --- ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('primary', {unique=true, parts={1, 'unsigned'}, page_size=256, range_size=2048, run_count_per_level=1, run_size_ratio=1000}) --- ... function vyinfo() return box.space.test.index.primary:info() end --- ... range_count = 4 --- ... tuple_size = math.ceil(s.index.primary.options.page_size / 4) --- ... pad_size = tuple_size - 30 --- ... assert(pad_size >= 16) --- - true ... keys_per_range = math.floor(s.index.primary.options.range_size / tuple_size) --- ... key_count = range_count * keys_per_range --- ... -- Rewrite the space until enough ranges are created. test_run:cmd("setopt delimiter ';'") --- - true ... iter = 0 function gen_tuple(k) local pad = {} for i = 1,pad_size do pad[i] = string.char(math.random(65, 90)) end return {k, k + iter, table.concat(pad)} end while vyinfo().range_count < range_count do iter = iter + 1 for k = key_count,1,-1 do s:replace(gen_tuple(k)) end box.snapshot() fiber.sleep(0.01) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... vyinfo().range_count --- - 4 ... -- Remember the number of iterations and the number of keys -- so that we can check data validity after restart. _ = var:insert{'iter', iter} --- ... _ = var:insert{'key_count', key_count} --- ... _ = var:insert{'keys_per_range', keys_per_range} --- ... -- Check that the space can be recovered after splitting ranges. test_run:cmd('restart server default') fiber = require 'fiber' --- ... s = box.space.test --- ... var = box.space.var --- ... iter = var:get('iter')[2] --- ... key_count = var:get('key_count')[2] --- ... keys_per_range = var:get('keys_per_range')[2] --- ... function vyinfo() return box.space.test.index.primary:info() end --- ... -- Check the space content. s:count() == key_count --- - true ... for k = 1,key_count do v = s:get(k) assert(v[2] == k + iter) end --- ... -- Delete 90% keys, remove padding for the rest. test_run:cmd("setopt delimiter ';'") --- - true ... for k = 1,key_count do if k % 10 ~= 0 then s:delete(k) else s:update(k, {{'#', 3, 1}}) end end test_run:cmd("setopt delimiter ''"); --- ... box.snapshot() --- - ok ... -- Trigger compaction until ranges are coalesced. test_run:cmd("setopt delimiter ';'") --- - true ... while vyinfo().range_count > 1 do for i = 1,key_count,keys_per_range do s:delete{i} end box.snapshot() fiber.sleep(0.01) end test_run:cmd("setopt delimiter ''"); --- ... vyinfo().range_count --- - 1 ... -- Check that the space can be recovered after coalescing ranges. test_run:cmd('restart server default') s = box.space.test --- ... var = box.space.var --- ... iter = var:get('iter')[2] --- ... key_count = var:get('key_count')[2] --- ... -- Check the space content. test_run:cmd("setopt delimiter ';'") --- - true ... key_count_left = 0 for k = 1,key_count do v = s:get(k) if k % 10 == 0 then assert(v[2] == k + iter) key_count_left = key_count_left + 1 else assert(v == nil) end end test_run:cmd("setopt delimiter ''"); --- ... s:count() == key_count_left --- - true ... s:drop() --- ... var:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/cache.test.lua0000664000000000000000000001625513306565107021071 0ustar rootroot#!/usr/bin/env tarantool test_run = require('test_run').new() test_run:cmd("setopt delimiter ';'") stat = nil function stat_changed() local old_stat = stat local new_stat = box.space.test.index.pk:info() stat = new_stat return (old_stat == nil or old_stat.memory.iterator.lookup ~= new_stat.memory.iterator.lookup or old_stat.memory.iterator.get.rows ~= new_stat.memory.iterator.get.rows or old_stat.disk.iterator.lookup ~= new_stat.disk.iterator.lookup or old_stat.disk.iterator.get.rows ~= new_stat.disk.iterator.get.rows) end; test_run:cmd("setopt delimiter ''"); s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk') str = string.rep('!', 100) for i = 1,1000 do s:insert{i, str} end box.begin() t = s:select{} box.commit() #t t = s:replace{100, str} for i = 1,10 do box.begin() t = s:select{} box.commit() end t = s:replace{200, str} s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) str = '' s:replace{0, 0, 0} s:replace{1, 1, 1, str} s:replace{1, 2, 1, str} s:replace{1, 3, 1, str} s:replace{1, 4, 1, str} s:replace{2, 1, 2, str} s:replace{2, 2, 2, str} s:replace{2, 3, 2, str} s:replace{2, 4, 2, str} s:replace{3, 3, 4} box.snapshot() _ = stat_changed() -- init box.begin() s:get{1, 2} box.commit() stat_changed() -- cache miss, true s:get{1, 2} stat_changed() -- cache hit, false box.begin() s:select{1} box.commit() stat_changed() -- cache miss, true s:select{1} stat_changed() -- cache hit, false box.begin() s:select{} box.commit() stat_changed() -- cache miss, true s:select{} stat_changed() -- cache hit, false s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) str = '' s:replace{0, 0, 0} s:replace{1, 1, 1, str} s:replace{1, 2, 1, str} s:replace{1, 3, 1, str} s:replace{1, 4, 1, str} s:replace{2, 1, 2, str} s:replace{2, 2, 2, str} s:replace{2, 3, 2, str} s:replace{2, 4, 2, str} s:replace{3, 3, 4} box.snapshot() _ = stat_changed() -- init box.begin() s:select{} box.commit() stat_changed() -- cache miss, true s:get{1, 2} stat_changed() -- cache hit, false s:select{1} stat_changed() -- cache hit, false s:select{} stat_changed() -- cache hit, false s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) str = '' s:replace{0, 0, 0} s:replace{1, 2, 1, str} s:replace{1, 3, 1, str} s:replace{1, 4, 1, str} s:replace{2, 1, 2, str} s:replace{2, 2, 2, str} s:replace{2, 3, 2, str} s:replace{2, 4, 2, str} s:replace{3, 3, 4} box.begin() s:select{1} box.commit() s:replace{1, 1, 1, str} s:select{1} s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) s:replace{1, 1, 1} s:replace{2, 2, 2} s:replace{3, 3, 3} s:replace{4, 4, 4} s:replace{5, 5, 5} box.begin() pk:min() pk:max() box.commit() s:replace{0, 0, 0} s:replace{6, 6, 6} pk:min() pk:max() s:drop() -- Same test w/o begin/end s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk') str = string.rep('!', 100) for i = 1,1000 do s:insert{i, str} end box.snapshot() t = s:select{} #t t = s:replace{100, str} for i = 1,10 do t = s:select{} end t = s:replace{200, str} s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) str = '' s:replace{0, 0, 0} s:replace{1, 1, 1, str} s:replace{1, 2, 1, str} s:replace{1, 3, 1, str} s:replace{1, 4, 1, str} s:replace{2, 1, 2, str} s:replace{2, 2, 2, str} s:replace{2, 3, 2, str} s:replace{2, 4, 2, str} s:replace{3, 3, 4} box.snapshot() _ = stat_changed() -- init s:get{1, 2} stat_changed() -- cache miss, true s:get{1, 2} stat_changed() -- cache hit, false s:select{1} stat_changed() -- cache miss, true s:select{1} stat_changed() -- cache hit, false s:select{} stat_changed() -- cache miss, true s:select{} stat_changed() -- cache hit, false s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) str = '' s:replace{0, 0, 0} s:replace{1, 1, 1, str} s:replace{1, 2, 1, str} s:replace{1, 3, 1, str} s:replace{1, 4, 1, str} s:replace{2, 1, 2, str} s:replace{2, 2, 2, str} s:replace{2, 3, 2, str} s:replace{2, 4, 2, str} s:replace{3, 3, 4} box.snapshot() _ = stat_changed() -- init s:select{} stat_changed() -- cache miss, true s:get{1, 2} stat_changed() -- cache hit, false s:select{1} stat_changed() -- cache hit, false s:select{} stat_changed() -- cache hit, false s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) str = '' s:replace{0, 0, 0} s:replace{1, 2, 1, str} s:replace{1, 3, 1, str} s:replace{1, 4, 1, str} s:replace{2, 1, 2, str} s:replace{2, 2, 2, str} s:replace{2, 3, 2, str} s:replace{2, 4, 2, str} s:replace{3, 3, 4} s:select{1} s:replace{1, 1, 1, str} s:select{1} s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) s:replace{1, 1, 1} s:replace{2, 2, 2} s:replace{3, 3, 3} s:replace{4, 4, 4} s:replace{5, 5, 5} pk:min() pk:max() s:replace{0, 0, 0} s:replace{6, 6, 6} pk:min() pk:max() s:drop() -- https://github.com/tarantool/tarantool/issues/2189 local_space = box.schema.space.create('test', {engine='vinyl'}) pk = local_space:create_index('pk') local_space:replace({1, 1}) local_space:replace({2, 2}) local_space:select{} box.begin() local_space:replace({1}) local_space:select{} box.commit() local_space:select{} local_space:drop() -- -- gh-2661: vy_cache_next_key after version change returns the -- same statement as before. -- s = box.schema.create_space('test', {engine = 'vinyl'}) pk = s:create_index('pk') sk = s:create_index('sec', {parts = {2, 'string'}, unique = false}) s:insert{1, 'key1'} sk:select('key1') s:insert{3, 'key2'} sk:select('key2') s:insert{5, 'key1'} sk:select('key1') s:drop() -- -- gh-2789: vy_cache_iterator must stop iteration, if a sought -- statement does not exist and is between chained statements. -- s = box.schema.create_space('test', {engine = 'vinyl'}) pk = s:create_index('pk') s:replace{1} s:replace{2} s:replace{4} s:replace{5} box.snapshot() -- Cache is not updated in autocommit mode. box.begin() s:select{} box.commit() info = pk:info().cache info.lookup info.get.rows pk:info().disk.iterator.lookup s:get{3} info = pk:info().cache info.lookup info.get.rows pk:info().disk.iterator.lookup s:drop() -- -- Cache resize -- vinyl_cache = box.cfg.vinyl_cache box.cfg{vinyl_cache = 1000 * 1000} s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk') for i = 1, 100 do s:replace{i, string.rep('x', 1000)} end for i = 1, 100 do s:get{i} end box.info.vinyl().cache.used box.cfg{vinyl_cache = 50 * 1000} box.info.vinyl().cache.used box.cfg{vinyl_cache = 0} box.info.vinyl().cache.used -- Make sure cache is not populated if box.cfg.vinyl_cache is set to 0 st1 = s.index.pk:info().cache #s:select() for i = 1, 100 do s:get{i} end st2 = s.index.pk:info().cache st2.put.rows - st1.put.rows box.info.vinyl().cache.used s:drop() box.cfg{vinyl_cache = vinyl_cache} tarantool_1.9.1.26.g63eb81e3c/test/vinyl/gc.test.lua0000664000000000000000000000533613306565107020415 0ustar rootroottest_run = require('test_run').new() fiber = require('fiber') fio = require('fio') test_run:cleanup_cluster() -- Make each snapshot trigger garbage collection. default_checkpoint_count = box.cfg.checkpoint_count box.cfg{checkpoint_count = 1} -- Temporary space for bumping lsn. temp = box.schema.space.create('temp') _ = temp:create_index('pk') s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk', {run_count_per_level=1}) path = fio.pathjoin(box.cfg.vinyl_dir, tostring(s.id), tostring(s.index.pk.id)) function ls_data() return fio.glob(fio.pathjoin(path, '*')) end function ls_vylog() return fio.glob(fio.pathjoin(box.cfg.vinyl_dir, '*.vylog')) end function gc_info() return box.internal.gc.info() end function gc() temp:auto_increment{} box.snapshot() end -- Check that run files are deleted by gc. s:insert{1} box.snapshot() -- dump s:insert{2} box.snapshot() -- dump + compaction while s.index.pk:info().run_count > 1 do fiber.sleep(0.01) end -- wait for compaction gc() files = ls_data() #files == 2 or {files, gc_info()} -- Check that gc keeps the current and previous log files. files = ls_vylog() #files == 2 or {files, gc_info()} -- Check that files left from dropped indexes are deleted by gc. s:drop() gc() files = ls_data() #files == 0 or {files, gc_info()} -- -- Check that vylog files are removed if vinyl is not used. -- files = ls_vylog() #files == 2 or {files, gc_info()} -- All records should have been purged from the log by now -- so we should only keep the previous log file. gc() files = ls_vylog() #files == 1 or {files, gc_info()} -- The previous log file should be removed by the next gc. gc() files = ls_vylog() #files == 0 or {files, gc_info()} temp:drop() box.cfg{checkpoint_count = default_checkpoint_count} -- -- Check that compacted run files that are not referenced -- by any checkpoint are deleted immediately (gh-3407). -- test_run:cmd("create server test with script='vinyl/low_quota.lua'") test_run:cmd("start server test with args='1048576'") test_run:cmd('switch test') box.cfg{checkpoint_count = 2} fio = require('fio') fiber = require('fiber') s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {run_count_per_level = 3}) function count_runs() return #fio.glob(fio.pathjoin(box.cfg.vinyl_dir, s.id, s.index.pk.id, '*.run')) end _ = s:replace{1} box.snapshot() _ = s:replace{2} box.snapshot() count_runs() -- 2 for i = 1, 20 do s:replace{i, string.rep('x', 100 * 1024)} end while s.index.pk:info().disk.compact.count < 1 do fiber.sleep(0.001) end s.index.pk:info().disk.compact.count -- 1 count_runs() -- 3 (compacted runs created after checkpoint are deleted) test_run:cmd('switch default') test_run:cmd("stop server test") test_run:cmd("cleanup server test") tarantool_1.9.1.26.g63eb81e3c/test/vinyl/large.result0000664000000000000000000000042713306560010020655 0ustar rootroot#!/usr/bin/env tarantool --- ... test_run = require('test_run').new() --- ... large = require('large') --- ... large.prepare() --- ... large.large(500) --- ... test_run:cmd('restart server default') large = require('large') --- ... large.check() --- ... large.teardown() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/on_replace.test.lua0000664000000000000000000001416013306560010022112 0ustar rootrootenv = require('test_run') test_run = env.new() fail = false old_tuple = nil new_tuple = nil function on_replace(old_tuple_, new_tuple_) if fail then old_tuple = nil new_tuple = nil error('fail') else old_tuple = old_tuple_ new_tuple = new_tuple_ end end -- on insert one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') tmp = space:on_replace(on_replace) space:insert({6, 'f'}) old_tuple, new_tuple index:select{} fail = true space:insert({7, 'g'}) old_tuple, new_tuple index:select{} space:drop() fail = false -- on insert in multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') index2 = space:create_index('secondary', { parts = {2, 'scalar'} }) tmp = space:on_replace(on_replace) space:insert({1, 2}) old_tuple, new_tuple index:select{} index2:select{} fail = true space:insert({2, 3}) old_tuple, new_tuple index:select{} index2:select{} space:drop() fail = false -- on replace in one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') space:replace({1}) tmp = space:on_replace(on_replace) space:replace({2}) old_tuple, new_tuple space:replace({2}) old_tuple, new_tuple space:replace({1, 43}) old_tuple, new_tuple fail = true space:replace({2, 100}) old_tuple, new_tuple space:select{} fail = false space:drop() -- ensure trigger error causes rollback of only one statement fail = true space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') index2 = space:create_index('secondary', { parts = {2, 'string'} }) box.begin() space:insert({1, 'a'}) space:insert({2, 'a'}) space:insert({1, 'b'}) space:insert({2, 'b'}) tmp = space:on_replace(on_replace) space:insert({3, 'c'}) old_tuple, new_tuple box.commit() index:select{} index2:select{} fail = false space:drop() -- on replace in multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') index2 = space:create_index('secondary', { parts = {2, 'scalar'} }) tmp = space:on_replace(on_replace) space:replace({1, 'a'}) space:replace({2, true}) space:replace({3, 36.6}) tmp = space:on_replace(on_replace) space:replace({4, 4}) old_tuple, new_tuple space:replace({5, 5}) old_tuple, new_tuple space:replace({4, 5}) old_tuple, new_tuple space:replace({5, 6, 60}) old_tuple, new_tuple fail = true space:replace({10, 10}) old_tuple, new_tuple index:select{} index2:select{} fail = false space:drop() -- on delete from one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') space:insert({1, 2}) space:insert({2, 3, 4}) space:insert({3, 4, 5}) space:insert({4}) tmp = space:on_replace(on_replace) index:delete({3}) old_tuple, new_tuple index:delete({4}) old_tuple, new_tuple fail = true index:delete({1}) old_tuple, new_tuple index:select{} fail = false space:drop() -- on delete from multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') index2 = space:create_index('secondary', { parts = {2, 'scalar'} }) space:insert({1, 'a'}) space:insert({2, 2, 'b'}) space:insert({3, 30.3}) space:insert({4, false}) tmp = space:on_replace(on_replace) index:delete({1}) old_tuple, new_tuple index2:delete({30.3}) old_tuple, new_tuple fail = true index2:delete({false}) old_tuple, new_tuple index:select{} index2:select{} fail = false space:drop() -- on update one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') space:insert({1, 2}) space:insert({2, 3, 4}) space:insert({3, 4, 5}) space:insert({4}) tmp = space:on_replace(on_replace) index:update({1}, {{'#', 2, 1}}) old_tuple, new_tuple index:update({2}, {{'#', 1, 1}}) -- must fail old_tuple, new_tuple index:update({3}, {{'=', 4, '300'}}) old_tuple, new_tuple index:update({20}, {{'+', 2, 5}}) old_tuple, new_tuple fail = true index:update({1}, {{'=', 2, 'one'}}) old_tuple, new_tuple index:select{} fail = false space:drop() -- on update multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') index2 = space:create_index('secondary', { parts = {2, 'scalar'} }) space:insert({1, 'a'}) space:insert({2, 2, 'b'}) space:insert({3, 30.3}) space:insert({4, false}) tmp = space:on_replace(on_replace) index:update({1}, {{'=', 2, 'z'}}) old_tuple, new_tuple index:update({2}, {{'+', 1, 1}}) old_tuple, new_tuple index2:update({30.3}, {{'+', 2, 10}}) old_tuple, new_tuple index2:update({false}, {{'=', 3, 'equal false'}}) old_tuple, new_tuple fail = true index:update({1}, {{'=', 2, 'a'}}) old_tuple, new_tuple index2:update({2}, {{'-', 2, 10}}) old_tuple, new_tuple index:select{} index2:select{} fail = false space:drop() -- on upsert one index space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary') space:insert({1, 1}) space:insert({2, 2, 2}) space:insert({3}) tmp = space:on_replace(on_replace) space:upsert({1}, {{'+', 2, 10}}) old_tuple, new_tuple space:upsert({4, 4, 4, 4}, {{'!', 2, 400}}) old_tuple, new_tuple fail = true space:upsert({2}, {{'!', 2, 2}}) old_tuple, new_tuple space:upsert({5, 5, 5}, {{'!', 2, 5}}) old_tuple, new_tuple index:select{} fail = false space:drop() -- on upsert multiple indexes space = box.schema.space.create('test_space', { engine = 'vinyl' }) index = space:create_index('primary', { parts = {1, 'unsigned', 2, 'unsigned'} }) index2 = space:create_index('secondary', { parts = {2, 'unsigned', 3, 'unsigned'} }) index3 = space:create_index('third', { parts = {3, 'unsigned'}, unique = false }) space:insert({1, 1, 1}) space:insert({2, 2, 2}) space:insert({3, 3, 3}) tmp = space:on_replace(on_replace) space:upsert({1, 1, 1}, {{'+', 3, 1}}) old_tuple, new_tuple space:upsert({1, 1, 1}, {{'+', 2, 1}}) -- must fail old_tuple, new_tuple space:upsert({4, 4, 4}, {{'!', 4, 400}}) old_tuple, new_tuple index:select{} index2:select{} index3:select{} fail = true space:upsert({2, 2, 2}, {{'!', 4, 200}}) old_tuple, new_tuple space:upsert({5, 5, 5}, {{'!', 4, 500}}) old_tuple, new_tuple fail = false space:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/compact.test.lua0000664000000000000000000000216513306565107021447 0ustar rootroottest_run = require('test_run').new() fiber = require('fiber') space = box.schema.space.create("vinyl", { engine = 'vinyl' }) _= space:create_index('primary', { parts = { 1, 'unsigned' }, run_count_per_level = 2 }) function vyinfo() return box.space.vinyl.index.primary:info() end vyinfo().run_count == 0 -- create the frist run space:insert({1}) space:replace({1, 2}) space:upsert({1},{{'=', 4, 5}}) -- bad upsert require('log').info(string.rep(" ", 1024)) space:select() space:select() -- gh-1571: bad upsert should not log on reads test_run:grep_log('default', 'UPSERT operation failed', 400) == nil box.snapshot() vyinfo().run_count == 1 -- create the second run space:replace({2,2}) space:upsert({2},{{'=',4,5}}) -- bad upsert box.snapshot() -- create the second run vyinfo().run_count == 2 -- create a few more runs to trigger compaction space:insert({3, 3}) box.snapshot() -- wait for compaction while vyinfo().run_count >= 2 do fiber.sleep(0.1) end vyinfo().run_count == 1 -- gh-1571: bad upsert should log on compaction test_run:grep_log('default', 'UPSERT operation failed') ~= nil space:drop() fiber = nil test_run = nil tarantool_1.9.1.26.g63eb81e3c/test/vinyl/errinj_gc.result0000664000000000000000000000546213306565107021545 0ustar rootroottest_run = require('test_run').new() --- ... fiber = require('fiber') --- ... fio = require('fio') --- ... errinj = box.error.injection --- ... test_run:cleanup_cluster() --- ... -- Make each snapshot trigger garbage collection. box.cfg{checkpoint_count = 1} --- ... -- Temporary space for bumping lsn. temp = box.schema.space.create('temp') --- ... _ = temp:create_index('pk') --- ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk', {run_count_per_level=1}) --- ... path = fio.pathjoin(box.cfg.vinyl_dir, tostring(s.id), tostring(s.index.pk.id)) --- ... function file_count() return #fio.glob(fio.pathjoin(path, '*')) end --- ... function gc() temp:auto_increment{} box.snapshot() end --- ... -- -- Check that gc retries to delete files left -- from compacted runs. -- errinj.set('ERRINJ_VY_GC', true) --- - ok ... s:insert{12345, 'abcdef'} box.snapshot() -- dump --- ... s:insert{67890, 'ghijkl'} box.snapshot() -- dump + compaction --- ... while s.index.pk:info().run_count > 1 do fiber.sleep(0.01) end -- wait for compaction --- ... file_count() --- - 6 ... gc() --- ... file_count() --- - 6 ... errinj.set('ERRINJ_VY_GC', false) --- - ok ... gc() --- ... file_count() --- - 2 ... -- -- Check that gc retries to delete files left -- from dropped indexes. -- errinj.set('ERRINJ_VY_GC', true) --- - ok ... s:drop() --- ... gc() --- ... file_count() --- - 2 ... errinj.set('ERRINJ_VY_GC', false) --- - ok ... gc() --- ... file_count() --- - 0 ... -- -- Check that files left from incomplete runs are deleted -- upon recovery completion. -- s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk', {run_count_per_level=1}) --- ... path = fio.pathjoin(box.cfg.vinyl_dir, tostring(s.id), tostring(s.index.pk.id)) --- ... s:insert{100, '12345'} box.snapshot() -- dump --- ... file_count() --- - 2 ... errinj.set('ERRINJ_VY_RUN_DISCARD', true) --- - ok ... errinj.set('ERRINJ_VY_TASK_COMPLETE', true) --- - ok ... s:insert{200, '67890'} box.snapshot() -- run file created, but dump fails --- - error: Error injection 'vinyl task completion' ... file_count() --- - 4 ... test_run:cmd('restart server default') test_run = require('test_run').new() --- ... fio = require('fio') --- ... default_checkpoint_count = box.cfg.checkpoint_count --- ... box.cfg{checkpoint_count = 1} --- ... s = box.space.test --- ... temp = box.space.temp --- ... path = fio.pathjoin(box.cfg.vinyl_dir, tostring(s.id), tostring(s.index.pk.id)) --- ... function file_count() return #fio.glob(fio.pathjoin(path, '*')) end --- ... function gc() temp:auto_increment{} box.snapshot() end --- ... file_count() --- - 2 ... s:select() --- - - [100, '12345'] - [200, '67890'] ... -- -- Cleanup. -- s:drop() --- ... gc() --- ... file_count() --- - 0 ... temp:drop() --- ... box.cfg{checkpoint_count = default_checkpoint_count} --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/quota.result0000664000000000000000000000251713306565107020732 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... -- -- Restart the server because need to reset quota used memory -- after previous tests. -- test_run:cmd('restart server default') -- -- gh-1863 add BPS tree extents to memory quota -- box.info.vinyl().quota.used --- - 0 ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('pk') --- ... sec = space:create_index('sec', { parts = {2, 'unsigned'} }) --- ... space:insert({1, 1}) --- - [1, 1] ... box.info.vinyl().quota.used --- - 98343 ... space:insert({1, 1}) --- - error: Duplicate key exists in unique index 'pk' in space 'test' ... box.info.vinyl().quota.used --- - 98343 ... space:update({1}, {{'!', 1, 100}}) -- try to modify the primary key --- - error: Attempt to modify a tuple field which is part of index 'pk' in space 'test' ... box.info.vinyl().quota.used --- - 98343 ... space:insert({2, 2}) --- - [2, 2] ... space:insert({3, 3}) --- - [3, 3] ... space:insert({4, 4}) --- - [4, 4] ... box.info.vinyl().quota.used --- - 98460 ... box.snapshot() --- - ok ... box.info.vinyl().quota.used --- - 0 ... space:select{} --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] ... box.info.vinyl().quota.used --- - 0 ... _ = space:replace{1, 1, string.rep('a', 1024 * 1024 * 5)} --- ... box.info.vinyl().quota.used --- - 5341228 ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/mvcc.test.lua0000664000000000000000000010025413306565107020747 0ustar rootroottest_run = require('test_run').new() -- need to restart in order to reset box.info.vinyl() stats test_run:cmd("restart server default") txn_proxy = require('txn_proxy') _ = box.schema.space.create('test', {engine = 'vinyl'}) _ = box.space.test:create_index('pk') c1 = txn_proxy.new() c2 = txn_proxy.new() c3 = txn_proxy.new() c4 = txn_proxy.new() c5 = txn_proxy.new() c6 = txn_proxy.new() c7 = txn_proxy.new() t = box.space.test -- -- empty transaction commit -- c1:begin() c1:commit() -- -- empty transaction rollback -- c1:begin() c1:rollback() -- -- single-statement transaction commit -- c1:begin() c1("t:replace{1}") c1:commit() c1("t:get{1}") -- cleanup c1("t:delete{1}") -- -- single-statement transaction rollback -- c1:begin() c1("t:replace{1}") c1:rollback() c1("t:get{1}") -- -- basic effects: if a transaction is rolled back, it has no effect -- c1:begin() c1("t:insert{1}") c1("t:get{1}") c1:rollback() c1("t:get{1}") c2("t:get{1}") -- -- multi-statement transaction -- test_run:cmd("setopt delimiter ';'") c1:begin(); for i = 1,100 do c1(string.format("t:insert{%d}", i)) assert(c1(string.format("t:get{%d}", i))[1][1] == i) end; c1:commit(); for i = 1,100 do c1(string.format("t:delete{%d}", i)) end; for i = 1,100 do assert(#c1(string.format("t:get{%d}", i)) == 0) end; test_run:cmd("setopt delimiter ''"); -- -- multi-statement transaction rollback -- test_run:cmd("setopt delimiter ';'") c1:begin(); for i = 1,100 do c1(string.format("t:insert{%d}", i)) assert(c1(string.format("t:get{%d}", i))[1][1] == i) end; c1:rollback(); for i = 1,100 do assert(#c1(string.format("t:get{%d}", i)) == 0) end; test_run:cmd("setopt delimiter ''"); -- transaction_set_set_get_commit(void) c1:begin() c1("t:replace{1, 1}") c1("t:replace{1, 2}") c1("t:get{1}") c1:commit() c1("t:get{1}") c1("t:delete{1}") -- transaction_set_set_commit_get(void) c1:begin() c1("t:replace{1}") c1("t:replace{1, 2}") c1:commit() c2:begin() c2("t:get{1}") c2:rollback() c1("t:delete{1}") -- transaction_set_set_rollback_get(void) c1:begin() c1("t:replace{1}") c1("t:replace{1, 2}") c1:rollback() c2:begin() c2("t:get{1}") c2:rollback() -- transaction_set_delete_get_commit(void) c1:begin() c1("t:insert{1}") c1("t:delete{1}") c1("t:get{1}") c1:commit() -- transaction_set_delete_get_commit_get(void) c1:begin() c1("t:insert{1}") c1("t:delete{1}") c1("t:get{1}") c1:commit() c1("t:get{1}") -- -- transaction_set_delete_set_commit_get(void) -- c1:begin() c1("t:insert{1, 1}") c1("t:delete{1}") c1("t:insert{1, 2}") c1("t:get{1}") c1:commit() c2("t:get{1}") -- -- cleanup -- c1("t:delete{1}") -- -- transaction_set_delete_commit_get_set(void) -- c1:begin() c1("t:insert{1}") c1("t:delete{1}") c1:commit() c1("t:get{1}") c1("t:insert{1}") c1("t:get{1}") c1("t:delete{1}") c1("t:get{1}") -- -- transaction_p_set_commit(void) -- c1:begin() c2:begin() c1("t:replace{1, 10}") c1:commit() c2("t:replace{2, 15}"); c2:commit() c1("t:get{1}") c1("t:get{2}") c1("t:delete{1}") c1("t:delete{2}") -- -- no dirty reads: if a transaction is not committed, its effects are not -- visible -- c1:begin() c1("t:insert{1}") c1("t:get{1}") -- -- not visible in c2 -- c2("t:get{1}") c1:commit() -- -- become visible in c2 after c1 commits (c2 runs in autocommit) -- c2("t:get{1}") -- -- repeatable read: if c1 has read X, and there was -- another transaction, which modified X after c1 started, -- and c1 reads X again, it gets the same result -- c1:begin() c1("t:get{1}") -- -- not visible in c1 -- c2("t:replace{1, 'c2'}") c1("t:get{1}") c2:commit() -- -- still not visible, even though c2 has committed -- c1("t:get{1}") -- commits ok since is a read only transaction c1:commit() -- -- now visible -- c1("t:get{1}") c1("t:delete{1}") -- ******************************* -- tx manager tests from sophia * -- ******************************* -- -------------------------------------------------------------------------- -- transaction_p_set_get_commit(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine c1("t:replace{1, 10}") c1("t:get{1}") -- {1, 10} -- c1:commit() -- -- c2("t:replace{2, 15}") -- c2("t:get{2}") -- {2, 15} -- c2:commit() -- -- cleanup -- c1("t:delete{1}") c1("t:delete{2}") -- -- -------------------------------------------------------------------------- -- transaction_p_set_commit_get0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c1:commit() -- c2("t:replace{2, 15}") c2:commit() -- c1:begin() c1("t:get{1}") -- {1, 10} -- c1("t:get{2}") -- {2, 15} c1:rollback() -- -- cleanup -- c1("t:delete{1}") c1("t:delete{2}") -- -------------------------------------------------------------------------- -- transaction_p_set_commit_get1(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") c2("t:get{200}") -- c2("t:replace{1, 10}") c2:commit() -- -- try writing an unrelated key -- c1("t:replace{2, 15}") c1:commit() -- c2:begin() c2("t:get{1}") -- {1, 10} c2:rollback() -- -- cleanup -- c1("t:delete{1}") c1("t:delete{2}") -- -- -- now try the same key -- -- c1:begin() c2:begin() c1("t:get{100}") c2("t:get{200}") -- c2("t:replace{1, 10}") c2:commit() -- c1("t:replace{1, 15}") c1:commit() -- c2:begin() c2("t:get{1}") -- {1, 15} c2:rollback() -- -- cleanup -- c1("t:delete{1}") -- -------------------------------------------------------------------------- -- transaction_p_set_commit_get2(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") c2("t:get{200}") -- -- c1("t:replace{2, 15}") c1:commit() -- -- c2("t:replace{1, 10}") c2:commit() -- commits successfully -- c1:begin() c1("t:get{1}") -- {1, 10} -- c1("t:get{2}") -- {2, 15} c1:rollback() -- -- cleanup -- c1("t:delete{1}") c1("t:delete{2}") -- -------------------------------------------------------------------------- -- transaction_p_set_rollback_get0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") c2("t:get{200}") -- -- c1("t:replace{1, 10}") c1:rollback() -- c2("t:replace{2, 15}") c2:rollback() -- c3:begin() c3("t:get{1}") -- finds nothing c3("t:get{2}") -- finds nothing c3:rollback() -- -------------------------------------------------------------------------- -- transaction_p_set_rollback_get1(void) -- -------------------------------------------------------------------------- -- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 10}") c2:rollback() -- c1("t:replace{2, 15}") c1:rollback() -- c3:begin() c3("t:get{1}") -- finds nothing c3("t:get{2}") -- finds nothing c3:rollback() -- -- -------------------------------------------------------------------------- -- transaction_p_set_rollback_get2(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- -- c2("t:replace{1, 10}") c2:rollback() -- c1("t:replace{1, 15}") c1:rollback() -- c3("t:get{1}") -- finds nothing -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- -- c2("t:replace{1, 10}") c2:rollback() -- c1("t:replace{1, 15}") c1:commit() -- c3("t:get{1}") -- {1, 15} -- -- cleanup -- c3("t:delete{1}") -- -------------------------------------------------------------------------- -- transaction_c_set_commit0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine c1("t:replace{1, 10}") c1:commit() -- c2("t:replace{1, 15}") c2:commit() -- c2("t:get{1}") -- {1,15} -- cleanup -- c1("t:delete{1}") -- -------------------------------------------------------------------------- -- transaction_c_set_commit1(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 10}") c2:commit() -- c1("t:replace{1, 15}") c1:commit() -- c3("t:get{1}") -- {1, 15} -- -- cleanup -- c3("t:delete{1}") -- -------------------------------------------------------------------------- -- transaction_c_set_commit2(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 15}") -- c2("t:replace{1, 10}") -- c2:commit() c1:commit() -- c3("t:get{1}") -- {1, 15} -- -- cleanup -- c1("t:delete{1}") -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 15}") -- c2("t:replace{1, 10}") -- -- sic: commit order c1:commit() c2:commit() -- write after write is ok, the last writer to commit wins -- c3("t:get{1}") -- {1, 10} -- -- cleanup -- c1("t:delete{1}") -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_a0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 10}") -- c2:rollback() -- c1("t:replace{1, 15}") -- c1:commit() -- c3("t:get{1}") -- -- cleanup -- c1("t:delete{1}") -- -- -- statement order is irrelevant, rollback order is important c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") c2("t:replace{1, 15}") -- c2:rollback() c1:commit() -- c3("t:get{1}") -- -- cleanup -- c1("t:delete{1}") -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_a1(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 10}") c1("t:replace{1, 15}") -- c2:rollback() c1:commit() -- success -- -- cleanup -- c1("t:delete{1}") -- -- statements in different order now -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") c2("t:replace{1, 15}") -- c2:rollback() c1:commit() -- success -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_b0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 10}") c2:commit() -- success -- c1("t:replace{1, 15}") c1:rollback() -- success -- c3("t:get{1}") -- cleanup -- c1("t:delete{1}") -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_b1(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 15}") c1("t:replace{1, 10}") -- c2:commit() c1:rollback() -- c3("t:get{1}") -- -- cleanup -- c1("t:delete{1}") -- -- now commit the second transaction -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 15}") c1("t:replace{1, 10}") -- c2:commit() c1:commit() -- ok, the last committer wins -- c3("t:get{1}") -- {1, 10} -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_ab0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 15}") c2:rollback() -- c1("t:replace{1, 10}") c1:rollback() -- c3("t:get{1}") -- -- cleanup -- c1("t:delete{1}") -- -- now commit the second transaction -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 15}") c2:rollback() -- c1("t:replace{1, 10}") c1:commit() -- c3("t:get{1}") -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_ab1(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 10}") c1("t:replace{1, 15}") -- c2:rollback() c1:rollback() -- c3("t:get{1}") -- -- cleanup -- c2("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_a0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c2("t:replace{1, 15}") -- c1("t:replace{1, 10}") -- c1:commit() -- success c2:commit() -- success, the last writer wins -- c2("t:get{1}") -- {1, 15} -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_a1(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c2:commit() -- success c1:commit() -- success, the last writer wins -- -- cleanup -- c2("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_b0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c2:commit() -- success c1:commit() -- success -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_b1(void) -- -------------------------------------------------------------------------- -- c2:begin() c1:begin() c2("t:get{100}") -- start transaction in the engine c1("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c2:commit() -- success c1:commit() -- success -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_a0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c2:commit() -- success c1:commit() -- success -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_a1(void) -- -------------------------------------------------------------------------- -- c2:begin() c1:begin() c2("t:get{100}") -- start transaction in the engine c1("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c2:commit() -- success c1:rollback() -- success -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_b0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c2:commit() -- success c2:rollback() -- not in transaction c1:commit() -- success -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_b1(void) -- -------------------------------------------------------------------------- -- c2:begin() c1:begin() c2("t:get{100}") -- start transaction in the engine c1("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c2:commit() -- success c2:rollback() -- not in transaction c1:commit() -- success -- -- cleanup -- c1("t:delete{1}") c1("t:delete{2}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_n0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine c3:begin() c3("t:get{300}") -- start transaction in the engine -- -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c3("t:replace{1, 20}") -- c2:commit() -- success c3:commit() -- success c1:commit() -- success, the last committer wins c2:commit() -- not in transaction c3:commit() -- not in transaction -- c3:get{1} -- {1, 20} -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_n1(void) -- -------------------------------------------------------------------------- -- c1:begin() c3:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c3("t:get{200}") -- start transaction in the engine c2("t:get{300}") -- start transaction in the engine -- -- c1("t:replace{1, 10}") -- c2("t:replace{1, 20}") -- c3("t:replace{1, 30}") -- c1:commit() -- success c2:commit() -- success c3:commit() -- success -- c3("t:get{1}") -- {1, 30} -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c3:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine c3("t:get{300}") -- start transaction in the engine -- -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c3("t:replace{1, 20}") -- c2:commit() -- success c3:commit() -- rollback c1:rollback() -- success -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n1(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c3:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine c3("t:get{300}") -- start transaction in the engine -- -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c3("t:replace{1, 20}") -- c2:commit() -- success c3:commit() -- rollback c2:rollback() -- success, not in transaction in tarantool c3:commit() -- success, not in transaction in tarantool c1:commit() -- rollback -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n2(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c3:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine c3("t:get{300}") -- start transaction in the engine -- -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c3("t:replace{1, 20}") -- c3:rollback() c2:commit() c1:commit() -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n3(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c3:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine c3("t:get{300}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c3("t:replace{1, 20}") -- c2:commit() c3:rollback() c1:commit() -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n4(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c3:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine c3("t:get{300}") -- start transaction in the engine -- -- c1("t:replace{1, 10}") -- c2("t:replace{1, 15}") -- c3("t:replace{1, 20}") -- c2:commit() c3:rollback() c1:rollback() -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_get0(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") c1:commit() -- c2("t:get{1}") -- find newest {1, 10} -- c2("t:replace{1, 15}") c2:commit() -- rollback -- c3:begin() c3("t:get{1}") -- {1, 10} c3:commit() -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_get1(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine -- c1("t:replace{1, 10}") -- c1:rollback() -- c2("t:get{1}") -- finds nothing -- c2("t:replace{1, 15}") c2:commit() -- c3:begin() c3("t:get{1}") -- {1, 15} c3:commit() -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_get2(void) -- -------------------------------------------------------------------------- -- c7:begin() c7("t:get{100}") -- start transaction in the engine c1:begin() -- c1("t:replace{1, 1}") -- c2:begin() -- c2("t:replace{1, 2}") -- c4:begin() c4("t:replace{1, 4}") -- c5:begin() c5("t:replace{1, 5}") -- c6:begin() c6("t:get{100}") -- start transaction in the engine -- c1("t:get{1}") -- {1, 1} -- c2("t:get{1}") -- {1, 2} -- c4("t:get{1}") -- {1, 4} -- c5("t:get{1}") -- {1, 5} -- c6("t:get{1}") -- nothing -- c7("t:get{1}") -- nothing -- c3:begin() -- c3("t:get{1}") -- nothing c3:rollback() -- c1:rollback() c2:rollback() c3:rollback() c4:rollback() c5:rollback() c6:rollback() c7:rollback() -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_get3(void) -- -------------------------------------------------------------------------- -- c7:begin() c1:begin() c7("t:get{100}") -- start transaction in the engine c1("t:get{1}") -- start transaction in the engine -- c3:begin() c3("t:replace{1, 3}") c3:commit() -- c2:begin() c3:begin() c2("t:get{500}") -- start transaction in the engine c3("t:get{600}") -- start transaction in the engine c2("t:get{1}") -- {1, 3} -- c3("t:replace{1, 6}") c3:commit() -- c2 goes to read view now -- c4:begin() c3:begin() -- c3("t:replace{1, 9}") c3:commit() -- c5:begin() c3:begin() c5("t:get{800}") -- start transaction in the engine c3("t:get{900}") -- start transaction in the engine -- c3("t:replace{1, 12}") c3:commit() -- c6:begin() c6("t:get{1000}") -- start transaction in the engine -- c2("t:get{1}") -- {1, 3} -- c4("t:get{1}") -- {1, 12} -- c5("t:get{1}") -- {1, 12} -- c6("t:get{1}") -- {1, 12} -- c3:begin() c3("t:get{1}") -- {1, 12} c3:rollback() -- c1("t:get{1}") -- nothing -- c7("t:get{1}") -- {1, 12} -- c2:rollback() -- c4("t:get{1}") -- {1, 12} -- c5("t:get{1}") -- {1, 12} -- c6("t:get{1}") -- {1, 12} -- c3:begin() c3("t:get{1}") -- {1, 12} c3:rollback() -- c1("t:get{1}") -- nothing -- c7("t:get{1}") -- {1, 12} -- c4:rollback() -- c5("t:get{1}") -- {1, 12} -- c6("t:get{1}") -- {1, 12} -- c3:begin() c3("t:get{1}") -- {1, 12} c3:rollback() -- c1("t:get{1}") -- nothing -- c7("t:get{1}") -- {1, 12} -- c5:rollback() -- c6("t:get{1}") -- {1, 12} -- c3:begin() c3("t:get{1}") -- {1, 12} c3:rollback() -- c1("t:get{1}") -- nothing -- c7("t:get{1}") -- {1, 12} -- c6:rollback() -- c3:begin() c3("t:get{1}") -- {1, 12} c3:rollback() -- c1("t:get{1}") -- nothing -- c7("t:get{1}") -- {1, 12} -- c1:rollback() c7:rollback() -- c3:begin() c3("t:get{1}") -- {1, 12} c3:rollback() -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_c_set_conflict_derive(void) -- -------------------------------------------------------------------------- -- c1:begin() c2:begin() c1("t:get{100}") -- start transaction in the engine c2("t:get{200}") -- start transaction in the engine c1("t:replace{1, 10}") c2("t:replace{1, 15}") -- c1:commit() -- c2("t:replace{1, 20}") -- should not reset conflict flag -- c2:commit() -- rollback -- c3("t:get{1}") -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- transaction_sc_set_wait(void) -- -------------------------------------------------------------------------- -- c1:begin() -- c1("t:replace{1, 10}") -- -- sic: runs in autocommit mode -- c2("t:replace{1, 15}") -- c1:commit() -- c2("t:get{1}") -- {1, 10} -- c1("t:delete{1}") -- -------------------------------------------------------------------------- -- transaction_sc_get(void) -- -------------------------------------------------------------------------- -- c1("t:replace{1, 7}") -- c2:begin() -- c2("t:replace{1, 8}") -- c1("t:get{1}") -- {1, 7} -- c2:commit() -- c1("t:get{1}") -- {1, 8} -- c3("t:get{1}") -- {1, 8} -- -- cleanup -- c1("t:delete{1}") -- -------------------------------------------------------------------------- -- two conflicting inserts -- -------------------------------------------------------------------------- c1:begin() c2:begin() -- c1("t:insert{1, 10}") -- c2("t:insert{1, 15}") -- c1:commit() -- success c2:commit() -- rollback, c2 reads {1} before writing it -- c3("t:get{1}") -- {1, 10} -- -- -- cleanup -- c1("t:delete{1}") -- c1:begin() c2:begin() -- c1("t:insert{1, 10}") -- c2("t:insert{1, 15}") -- c2:commit() -- success c1:commit() -- rollback, c1 reads {1} before writing it -- c3("t:get{1}") -- {1, 15} -- -- -- cleanup -- c1("t:delete{1}") -- -- -------------------------------------------------------------------------- -- Transaction spuriously abort based on CSN clock -- -------------------------------------------------------------------------- t:insert{1, 10} t:insert{2, 20} c7:begin() c7("t:insert{8, 800}") c3:begin() c3("t:get{1}") c3:commit() c1:begin() c2:begin() -- c1("t:replace{4, 40}") -- c2("t:get{1}") -- c3:begin() c3("t:insert{3, 30}") c3:commit() -- c2("t:replace{5, 50}") c1("t:get{1}") c1:commit() c2:commit() c7:rollback() -- -- cleanup -- t:delete{1} t:delete{2} t:delete{3} t:delete{4} t:delete{5} -- -------------------------------------------------------------------------- -- Conflict manager works for iterators -- -------------------------------------------------------------------------- t:insert{1, 10} t:insert{2, 20} c1:begin() c2:begin() c1("t:select{}") c2("t:select{}") c1("t:replace{1, 'new'}") c2("t:replace{2, 'new'}") c1:commit() c2:commit() -- rollback -- -- -- gh-1606 visibility of changes in transaction in range queries -- c1:begin() c1("t:select{}") c1("t:replace{3, 30}") c1("t:select{}") c1("t:select({3}, {iterator='ge'})") c1("t:select({3}, {iterator='lt'})") c1("t:select({3}, {iterator='gt'})") c1("t:select({3}, {iterator='eq'})") c1("t:replace{3, 'new'}") c1("t:select({3}, {iterator='ge'})") c1("t:select({3}, {iterator='lt'})") c1("t:select({3}, {iterator='gt'})") c1("t:select({3}, {iterator='eq'})") c1("t:delete{3}") c1("t:select({3}, {iterator='ge'})") c1("t:select({3}, {iterator='lt'})") c1("t:select({3}, {iterator='gt'})") c1("t:select({3}, {iterator='eq'})") c1("t:replace{3}") c1("t:delete{2}") c1("t:select({3}, {iterator='lt'})") c1("t:select({3}, {iterator='le'})") c1("t:replace{2}") c1("t:delete{1}") c1("t:select({3}, {iterator='lt'})") c1("t:select({3}, {iterator='le'})") c1("t:delete{3}") c1("t:select({3}, {iterator='lt'})") c1("t:select({3}, {iterator='le'})") c1:rollback() c1("t:select{}") -- -- -- Check that a cursor is closed automatically when a transaction -- is committed or rolled back -- c1:begin() c1("t:select{1}") c1("for k, v in box.space.test:pairs() do box.commit() end") c1:rollback() c1:begin() c1("t:select{1}") c1("for k, v in box.space.test:pairs() do box.rollback() end") c1:rollback() t:truncate() -- -- Check that min/max/count transactions stay within a read view -- t:replace{1} c1:begin() c1("t.index.pk:max()") -- {1} c1("t.index.pk:min()") -- {1} c1("t.index.pk:count()") -- 1 c2:begin() c2("t:replace{2}") -- conflicts with c1 so c1 starts using a read view c2:commit() c1("t.index.pk:max()") -- {1} c1("t.index.pk:min()") -- {1} c1("t.index.pk:count()") -- 1 c1:commit() -- -- Convert the reader to a read view: in this test we have -- an explicit conflict between c1 and c2, so c1 begins -- using a read view -- c1:begin() c1("t.index.pk:max()") -- {2} c1("t.index.pk:min()") -- {1} c1("t.index.pk:count()") -- 2 c2:begin() c2("t:replace{1, 'new'}") -- conflits with c1 so c1 starts using a read view c2("t:replace{3}") c2:commit() c1("t.index.pk:max()") -- {2} c1("t.index.pk:min()") -- {1} c1("t.index.pk:count()") -- 2 c1:commit() t:truncate() -- -- Check that select() does not add the key following -- the last returned key to the conflict manager. -- t:replace{1} t:replace{2} c1:begin() c1("t:select({}, {limit = 0})") -- none c2:begin() c2("t:replace{1, 'new'}") c2:commit() c1("t:select({}, {limit = 1})") -- {1, 'new'} c2:begin() c2("t:replace{2, 'new'}") c2:commit() c1("t:select()") -- {1, 'new'}, {2, 'new'} c1:commit() t:truncate() -- -- gh-2716 uniqueness check for secondary indexes -- _ = t:create_index('sk', {parts = {2, 'unsigned'}, unique = true}) c1:begin() c2:begin() c1("t:insert{1, 2}") c2("t:insert{2, 2}") c1:commit() c2:commit() -- rollback t:select() -- {1, 2} t:truncate() t.index.sk:drop() -- ************************************************************************* -- 1.7 cleanup marker: end of tests cleanup -- ************************************************************************* -- box.space.test:drop() c1 = nil c2 = nil c3 = nil c4 = nil c5 = nil c6 = nil c7 = nil collectgarbage() -- check of read views proper allocation/deallocation s = box.schema.space.create('test', {engine = 'vinyl'}) i = box.space.test:create_index('pk') s:replace{1, 2, 3} s:replace{4, 5, 6} s:replace{7, 8, 9} box.info.vinyl().tx.read_views -- 0 (no read views needed) box.info.vinyl().tx.transactions -- 0 c1 = txn_proxy.new() c2 = txn_proxy.new() c3 = txn_proxy.new() c4 = txn_proxy.new() c1:begin() c2:begin() c3:begin() c4:begin() box.info.vinyl().tx.read_views -- 0 (no read views needed) box.info.vinyl().tx.transactions -- 0 c1("s:select{1}") c2("s:select{1}") c3("s:select{1}") c4("s:select{1}") box.info.vinyl().tx.read_views -- 0 (no read views needed) box.info.vinyl().tx.transactions -- 4 c4("s:replace{1, 0, 0}") box.info.vinyl().tx.read_views -- 0 (no read views needed) box.info.vinyl().tx.transactions -- 4 c4:commit() box.info.vinyl().tx.read_views -- 1 (one read view for all TXs) box.info.vinyl().tx.transactions -- 3 c1:commit() box.info.vinyl().tx.read_views -- 1 (one read view for all TXs) box.info.vinyl().tx.transactions -- 2 c2:rollback() box.info.vinyl().tx.read_views -- 1 (one read view for all TXs) box.info.vinyl().tx.transactions -- 1 c3:commit() box.info.vinyl().tx.read_views -- 0 (no read views needed) box.info.vinyl().tx.transactions -- 0 (all done) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/select_consistency.result0000664000000000000000000000474013306560010023465 0ustar rootroottest_run = require('test_run').new() --- ... fiber = require 'fiber' --- ... math.randomseed(os.time()) --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {parts = {1, 'unsigned'}, page_size = 64, range_size = 256}) --- ... _ = s:create_index('i1', {unique = true, parts = {2, 'unsigned', 3, 'unsigned'}, page_size = 64, range_size = 256}) --- ... _ = s:create_index('i2', {unique = true, parts = {2, 'unsigned', 4, 'unsigned'}, page_size = 64, range_size = 256}) --- ... -- -- If called from a transaction, i1:select({k}) and i2:select({k}) -- must yield the same result. Let's check that under a stress load. -- MAX_KEY = 100 --- ... MAX_VAL = 10 --- ... PADDING = string.rep('x', 100) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function gen_insert() pcall(s.insert, s, {math.random(MAX_KEY), math.random(MAX_VAL), math.random(MAX_VAL), math.random(MAX_VAL), PADDING}) end; --- ... function gen_delete() pcall(s.delete, s, math.random(MAX_KEY)) end; --- ... function gen_update() pcall(s.update, s, math.random(MAX_KEY), {{'+', 5, 1}}) end; --- ... function dml_loop() while not stop do gen_insert() gen_update() gen_delete() fiber.sleep(0) end ch:put(true) end; --- ... function snap_loop() while not stop do box.snapshot() fiber.sleep(0.1) end ch:put(true) end; --- ... stop = false; --- ... ch = fiber.channel(3); --- ... _ = fiber.create(dml_loop); --- ... _ = fiber.create(dml_loop); --- ... _ = fiber.create(snap_loop); --- ... failed = {}; --- ... for i = 1, 10000 do local val = math.random(MAX_VAL) box.begin() local res1 = s.index.i1:select({val}) local res2 = s.index.i2:select({val}) box.commit() local equal = true if #res1 == #res2 then for _, t1 in ipairs(res1) do local found = false for _, t2 in ipairs(res2) do if t1[1] == t2[1] then found = true break end end if not found then equal = false break end end else equal = false end if not equal then table.insert(failed, {res1, res2}) end fiber.sleep(0) end; --- ... stop = true; --- ... for i = 1, ch:size() do ch:get() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... #failed == 0 or failed --- - true ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/parallel.result0000664000000000000000000000170713306560010021361 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... fiber = require('fiber') --- ... s = box.schema.space.create('vinyl', {engine = 'vinyl'}) --- ... i = s:create_index('primary', {type = 'tree'}) --- ... n = 10 --- ... m = 10 --- ... ch = fiber.channel(m) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function stest(l, m, n) for i = 0, n - 1 do s:insert({i * m + l, 'tuple ' .. tostring(i * m + l)}) end ch:put(1) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... for i = 0, m - 1 do f = fiber.create(stest, i, m, n) end --- ... cnt = 0 --- ... start_time = fiber.time() --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 0, m - 1 do tm = start_time + 2 - fiber.time() if tm < 0 then tm = 0 end cnt = cnt + (ch:get(tm) or 0) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... cnt == m --- - true ... i:count() == m * n --- - true ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/errinj_vylog.test.lua0000664000000000000000000000534513306565107022535 0ustar rootroottest_run = require('test_run').new() fiber = require('fiber') -- -- Check that an error to commit a new run to vylog does not -- break vinyl permanently. -- s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk') _ = s:insert{1, 'x'} SCHED_TIMEOUT = 0.05 box.error.injection.set('ERRINJ_VY_SCHED_TIMEOUT', SCHED_TIMEOUT) box.error.injection.set('ERRINJ_VY_LOG_FLUSH', true); box.snapshot() box.error.injection.set('ERRINJ_VY_LOG_FLUSH', false); fiber.sleep(2 * SCHED_TIMEOUT) box.error.injection.set('ERRINJ_VY_SCHED_TIMEOUT', 0) _ = s:insert{2, 'y'} box.snapshot() _ = s:insert{3, 'z'} test_run:cmd('restart server default') s = box.space.test s:select() s:drop() -- -- Check that an index drop/truncate/create record we failed to -- write to vylog is flushed along with the next record. -- s1 = box.schema.space.create('test1', {engine = 'vinyl'}) _ = s1:create_index('pk') _ = s1:insert{1, 'a'} s2 = box.schema.space.create('test2', {engine = 'vinyl'}) _ = s2:create_index('pk') _ = s2:insert{2, 'b'} box.snapshot() _ = s1:insert{3, 'c'} _ = s2:insert{4, 'd'} box.error.injection.set('ERRINJ_VY_LOG_FLUSH', true); s1:drop() s2:truncate() _ = s2:insert{5, 'e'} s3 = box.schema.space.create('test3', {engine = 'vinyl'}) _ = s3:create_index('pk') _ = s3:insert{6, 'f'} box.error.injection.set('ERRINJ_VY_LOG_FLUSH', false); box.snapshot() _ = s2:insert{7, 'g'} _ = s3:insert{8, 'h'} test_run:cmd('restart server default') s1 = box.space.test1 s1 == nil s2 = box.space.test2 s2:select() s2:drop() s3 = box.space.test3 s3:select() s3:drop() -- -- Check that if a buffered index drop/truncate/create record -- does not make it to the vylog before restart, it will be -- replayed on recovery. -- s1 = box.schema.space.create('test1', {engine = 'vinyl'}) _ = s1:create_index('pk') _ = s1:insert{111, 'aaa'} s2 = box.schema.space.create('test2', {engine = 'vinyl'}) _ = s2:create_index('pk') _ = s2:insert{222, 'bbb'} box.snapshot() _ = s1:insert{333, 'ccc'} _ = s2:insert{444, 'ddd'} box.error.injection.set('ERRINJ_VY_LOG_FLUSH', true); s1:drop() s2:truncate() _ = s2:insert{555, 'eee'} s3 = box.schema.space.create('test3', {engine = 'vinyl'}) _ = s3:create_index('pk') _ = s3:insert{666, 'fff'} -- gh-2532: replaying create/drop from xlog crashes tarantool test_run:cmd("setopt delimiter ';'") for i = 1, 10 do s = box.schema.space.create('test', {engine = 'vinyl'}) s:create_index('primary') s:create_index('secondary', {unique = false, parts = {2, 'string'}}) s:insert{i, 'test' .. i} s:truncate() s:drop() end test_run:cmd("setopt delimiter ''"); test_run:cmd('restart server default') s1 = box.space.test1 s1 == nil s2 = box.space.test2 s2:select() s2:drop() s3 = box.space.test3 s3:select() s3:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/hermitage.test.lua0000664000000000000000000001764313306560010021761 0ustar rootroot-- -- hermitage: Testing transaction isolation levels. -- github.com/ept/hermitage -- -- Testing Vinyl transactional isolation in Tarantool. -- -- ************************************************************************* -- 1.7 setup begins -- ************************************************************************* test_run = require('test_run').new() txn_proxy = require('txn_proxy') _ = box.schema.space.create('test', {engine = 'vinyl'}) _ = box.space.test:create_index('pk') c1 = txn_proxy.new() c2 = txn_proxy.new() c3 = txn_proxy.new() t = box.space.test -- ************************************************************************* -- 1.7 setup up marker: end of test setup -- ************************************************************************* -- ------------------------------------------------------------------------ -- READ COMMITTED basic requirements: G0 -- ------------------------------------------------------------------------ -- -- REPLACE -- -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:replace{1, 11}") c2("t:replace{1, 12}") c1("t:replace{2, 21}") c1:commit() c2("t:replace{2, 22}") c2:commit() -- success, the last writer wins t:get{1} -- {1, 12} t:get{2} -- {2, 22} -- teardown t:truncate() -- -- UPDATE -- -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:update(1, {{'=', 2, 11}})") c2("t:update(1, {{'=', 2, 12}})") c1("t:update(2, {{'=', 2, 21}})") c1:commit() c2("t:update(2, {{'=', 2, 22}})") c2:commit() -- rollback t:get{1} -- {1, 11} t:get{2} -- {2, 21} -- teardown t:truncate() -- ------------------------------------------------------------------------ -- READ COMMITTED basic requirements: G1A -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:replace{1, 101}") c2("t:replace{1, 10}") c1:rollback() c2("t:get{1}") -- {1, 10} c2:commit() -- true -- teardown t:truncate() -- ------------------------------------------------------------------------ -- READ COMMITTED basic requirements: G1B -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:replace{1, 101}") c2("t:get{1}") -- {1, 10} c1("t:replace{1, 11}") c1:commit() -- ok c2("t:get{1}") -- {1, 10} c2:commit() -- ok -- teardown t:truncate() -- ------------------------------------------------------------------------ -- Circular information flow: G1C -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:replace{1, 11}") c2("t:replace{2, 22}") c1("t:get{2}") -- {2, 20} c2("t:get{1}") -- {1, 10} c1:commit() -- ok c2:commit() -- rollback (@fixme: not necessary) -- teardown t:truncate() -- ------------------------------------------------------------------------ -- OTV: observable transaction vanishes -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c3:begin() c1("t:replace{1, 11}") c1("t:replace{2, 19}") c2("t:replace{1, 12}") c1:commit() -- ok c3("t:get{1}") -- {1, 11} c2("t:replace{2, 18}") c3("t:get{2}") -- {2, 19} c2:commit() -- write only transaction - OK to commit c3("t:get{2}") -- {2, 19} c3("t:get{1}") -- {1, 11} c3:commit() -- read only transaction - OK to commit, stays with its read view -- teardown t:truncate() -- ------------------------------------------------------------------------ -- PMP: Predicate with many preceders -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:select()") -- {1, 10}, {2, 20} c2("t:replace{3, 30}") c2:commit() -- ok c1("t:select()") -- still {1, 10}, {2, 20} c1:commit() -- ok -- teardown t:truncate() -- ------------------------------------------------------------------------ -- PMP write: predicate many preceders for write predicates -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:replace{1, 20}") c1("t:replace{2, 30}") c2("t:get{1}") -- {1, 10} c2("t:get{2}") -- {2, 20} c2("t:delete{2}") c1:commit() -- ok c2("t:get{1}") -- {1, 10} c2:commit() -- rollback -- conflict t:get{1} -- {1, 20} t:get{2} -- {2, 30} -- teardown t:truncate() -- ------------------------------------------------------------------------ -- P4: lost update: don't allow a subsequent commit to lose update -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:get{1}") -- {1, 10} c2("t:get{1}") -- {1, 10} c1("t:replace{1, 11}") c2("t:replace{1, 12}") c1:commit() -- ok c2:commit() -- rollback -- conflict -- teardown t:truncate() ------------------------------------------------------------------------ -- G-single: read skew -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:get{1}") -- {1, 10} c2("t:get{1}") -- {1, 10} c2("t:get{2}") -- {2, 20} c2("t:replace{1, 12}") c2("t:replace{2, 18}") c2:commit() -- ok c1("t:get{2}") -- {2, 20} c1:commit() -- ok -- teardown t:truncate() ------------------------------------------------------------------------ -- G-single: read skew, test with write predicate -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:get{1}") -- {1, 10} c2("t:get{1}") -- {1, 10} c2("t:get{2}") -- {2, 20} c2("t:replace{1, 12}") c2("t:replace{2, 18}") c2:commit() -- T2 c1("t:delete{2}") c1("t:get{2}") -- finds nothing c1:commit() -- rollback -- teardown t:truncate() -- ------------------------------------------------------------------------ -- G2-item: write skew -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() c1("t:get{1}") -- {1, 10} c1("t:get{2}") -- {2, 20} c2("t:get{1}") -- {1, 10} c2("t:get{2}") -- {2, 20} c1("t:replace{1, 11}") c2("t:replace{1, 21}") c1:commit() -- ok c2:commit() -- rollback -- conflict -- teardown t:truncate() -- ------------------------------------------------------------------------ -- G2: anti-dependency cycles -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c2:begin() -- select * from test where value % 3 = 0 c1("t:select()") -- {1, 10}, {2, 20} c2("t:select()") -- {1, 10}, {2, 20} c1("t:replace{3, 30}") c2("t:replace{4, 42}") c1:commit() -- ok c2:commit() -- rollback -- teardown t:truncate() -- ------------------------------------------------------------------------ -- G2: anti-dependency cycles with two items -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c1("t:get{1}") -- {1, 10} c1("t:get{2}") -- {2, 20} c2:begin() c2("t:replace{2, 25}") c2:commit() -- ok c3:begin() c3("t:get{1}") -- {1, 10} c3("t:get{2}") -- {2, 25} c3:commit() -- ok c1("t:replace{1, 0}") c1:commit() -- rollback -- teardown t:truncate() -- ------------------------------------------------------------------------ -- G2: anti-dependency cycles with two items (no replace) -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} t:replace{2, 20} c1:begin() c1("t:get{1}") -- {1, 10} c1("t:get{2}") -- {2, 20} c2:begin() c2("t:replace{2, 25}") c2:commit() -- ok c3:begin() c3("t:get{1}") -- {1, 10} c3("t:get{2}") -- {2, 25} c3:commit() -- ok -- c1("t:replace{1, 0)") c1:commit() -- ok -- teardown t:truncate() -- ************************************************************************* -- 1.7 cleanup marker: end of test cleanup -- ************************************************************************* -- box.space.test:drop() c1 = nil c2 = nil c3 = nil tarantool_1.9.1.26.g63eb81e3c/test/vinyl/info.test.lua0000664000000000000000000002051013306565107020746 0ustar rootroottest_run = require('test_run').new() -- Since we store LSNs in data files, the data size may differ -- from run to run. Deploy a new server to make sure it will be -- the same so that we can check it. test_run:cmd('create server test with script = "vinyl/info.lua"') test_run:cmd('start server test') test_run:cmd('switch test') fiber = require('fiber') s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {page_size = 4096, range_size = 16384, run_count_per_level = 1, run_size_ratio = 1000}) -- -- Helper functions. -- test_run:cmd("setopt delimiter ';'") -- Generate random 1K padding. function pad() local t = {} for i = 1, 1024 do t[i] = string.char(math.random(65, 90)) end return table.concat(t) end; -- Insert a tuple into the test space. function put(val) box.space.test:replace{val, pad()} end; -- Compute the difference between two tables containing stats. -- If a field value is the same, it will be set to nil in the -- resulting table. If 'path' is not 'nil', compare statistics -- starting from 'path'. function stat_diff(stat1, stat2, path) while path ~= nil and path ~= '' do local i = path:find('%.') or path:len() + 1 local node = path:sub(1, i - 1) path = path:sub(i + 1, path:len()) stat1 = stat1[node] stat2 = stat2[node] end if type(stat1) == 'string' then return nil end if type(stat1) == 'number' then return stat1 ~= stat2 and stat1 - stat2 or nil end assert(type(stat1) == 'table') local diff for k, v1 in pairs(stat1) do local v2 = stat2[k] local d = stat_diff(v1, v2) if d ~= nil then if diff == nil then diff = {} end diff[k] = d end end return diff end; -- Return index statistics. -- -- Note, latency measurement is beyond the scope of this test -- so we just filter it out. function istat() local st = box.space.test.index.pk:info() st.latency = nil return st end; -- Return global statistics. -- -- Note, quota watermark checking is beyond the scope of this -- test so we just filter out related statistics. function gstat() local st = box.info.vinyl() st.quota.use_rate = nil st.quota.dump_bandwidth = nil st.quota.watermark = nil return st end; -- Wait until a stat counter changes. function wait(stat_func, stat, path, diff) while (stat_diff(stat_func(), stat, path) or 0) < diff do fiber.sleep(0.01) end end; test_run:cmd("setopt delimiter ''"); -- initially stats are empty istat() gstat() -- -- Index statistics. -- -- Compressed data size may differ as padding is random. -- Besides, it may depend on the zstd version so let's -- filter it out. test_run:cmd("push filter 'bytes_compressed: .*' to 'bytes_compressed: '") -- put + dump st = istat() for i = 1, 100, 4 do put(i) end box.snapshot() wait(istat, st, 'disk.dump.count', 1) stat_diff(istat(), st) -- put + dump + compaction st = istat() for i = 1, 100, 2 do put(i) end box.snapshot() wait(istat, st, 'disk.compact.count', 1) stat_diff(istat(), st) -- point lookup from disk + cache put st = istat() s:get(1) ~= nil stat_diff(istat(), st) -- point lookup from cache st = istat() s:get(1) ~= nil stat_diff(istat(), st) -- put in memory + cache invalidate st = istat() put(1) stat_diff(istat(), st) -- point lookup from memory st = istat() s:get(1) ~= nil stat_diff(istat(), st) -- put in txw + point lookup from txw st = istat() box.begin() put(1) s:get(1) ~= nil stat_diff(istat(), st) box.rollback() -- apply upsert in txw st = istat() box.begin() _ = s:replace{1} _ = s:upsert({1}, {{'=', 2, pad()}}) stat_diff(istat(), st, 'upsert') box.rollback() -- apply upsert on get st = istat() _ = s:upsert({5}, {{'=', 2, pad()}}) s:get(5) ~= nil stat_diff(istat(), st, 'upsert') -- cache eviction assert(box.cfg.vinyl_cache < 100 * 1024) for i = 1, 100 do put(i) end st = istat() for i = 1, 100 do s:get(i) end stat_diff(istat(), st, 'cache') -- range split for i = 1, 100 do put(i) end st = istat() box.snapshot() wait(istat, st, 'disk.compact.count', 2) st = istat() st.range_count -- 2 st.run_count -- 2 st.run_avg -- 1 st.run_histogram -- [1]:2 -- range lookup for i = 1, 100 do put(i) end box.begin() for i = 1, 100, 2 do put(i) end st = istat() #s:select() stat_diff(istat(), st) box.rollback() -- range lookup from cache assert(box.cfg.vinyl_cache > 10 * 1024) for i = 1, 100 do put(i) end box.begin() #s:select({}, {limit = 5}) st = istat() #s:select({}, {limit = 5}) stat_diff(istat(), st) box.rollback() -- -- Global statistics. -- -- use quota st = gstat() put(1) stat_diff(gstat(), st, 'quota') -- use cache st = gstat() _ = s:get(1) stat_diff(gstat(), st, 'cache') s:delete(1) -- rollback st = gstat() box.begin() _ = s:insert{1} box.rollback() stat_diff(gstat(), st, 'tx') -- conflict st = gstat() ch1 = fiber.channel(1) ch2 = fiber.channel(1) test_run:cmd("setopt delimiter ';'") _ = fiber.create(function() box.begin() s:insert{1} ch1:put(true) ch2:get() pcall(box.commit) ch1:put(true) end); test_run:cmd("setopt delimiter ''"); ch1:get() _ = s:insert{1} ch2:put(true) ch1:get() stat_diff(gstat(), st, 'tx') s:delete(1) -- tx statements st = gstat() box.begin() for i = 1, 10 do s:replace{i} end stat_diff(gstat(), st, 'tx') box.rollback() stat_diff(gstat(), st, 'tx') -- transactions st = gstat() ch1 = fiber.channel(5) ch2 = fiber.channel(5) test_run:cmd("setopt delimiter ';'") for i = 1, 5 do fiber.create(function() box.begin() s:replace{i} ch1:put(true) ch2:get() box.rollback() ch1:put(true) end) end; test_run:cmd("setopt delimiter ''"); for i = 1, 5 do ch1:get() end stat_diff(gstat(), st, 'tx') for i = 1, 5 do ch2:put(true) end for i = 1, 5 do ch1:get() end stat_diff(gstat(), st, 'tx') -- read view st = gstat() ch1 = fiber.channel(1) ch2 = fiber.channel(1) test_run:cmd("setopt delimiter ';'") _ = fiber.create(function() box.begin() s:select() ch1:put(true) ch2:get() pcall(box.commit) ch1:put(true) end); test_run:cmd("setopt delimiter ''"); ch1:get() _ = s:insert{1} stat_diff(gstat(), st, 'tx') ch2:put(true) ch1:get() stat_diff(gstat(), st, 'tx') s:delete(1) -- gap locks st = gstat() box.begin() _ = s:select({10}, {iterator = 'LT'}) _ = s:select({20}, {iterator = 'GT'}) stat_diff(gstat(), st, 'tx') box.commit() stat_diff(gstat(), st, 'tx') s:drop() -- -- space.bsize, index.len, index.bsize -- s = box.schema.space.create('test', {engine = 'vinyl'}) s:bsize() i1 = s:create_index('i1', {parts = {1, 'unsigned'}, run_count_per_level = 1}) i2 = s:create_index('i2', {parts = {2, 'unsigned'}, run_count_per_level = 1}) s:bsize() i1:len(), i2:len() i1:bsize(), i2:bsize() for i = 1, 100, 2 do s:replace{i, i, pad()} end st1 = i1:info() st2 = i2:info() s:bsize() i1:len(), i2:len() i1:bsize(), i2:bsize() s:bsize() == st1.memory.bytes i1:len() == st1.memory.rows i2:len() == st2.memory.rows i1:bsize() == st1.memory.index_size i2:bsize() == st2.memory.index_size box.snapshot() st1 = i1:info() st2 = i2:info() s:bsize() i1:len(), i2:len() i1:bsize(), i2:bsize() s:bsize() == st1.disk.bytes i1:len() == st1.disk.rows i2:len() == st2.disk.rows i1:bsize() == st1.disk.index_size + st1.disk.bloom_size i2:bsize() == st2.disk.index_size + st2.disk.bloom_size + st2.disk.bytes for i = 1, 100, 2 do s:delete(i) end for i = 2, 100, 2 do s:replace{i, i, pad()} end st1 = i1:info() st2 = i2:info() s:bsize() i1:len(), i2:len() i1:bsize(), i2:bsize() s:bsize() == st1.memory.bytes + st1.disk.bytes i1:len() == st1.memory.rows + st1.disk.rows i2:len() == st2.memory.rows + st2.disk.rows i1:bsize() == st1.memory.index_size + st1.disk.index_size + st1.disk.bloom_size i2:bsize() == st2.memory.index_size + st2.disk.index_size + st2.disk.bloom_size + st2.disk.bytes box.snapshot() wait(function() return i1:info() end, st1, 'disk.compact.count', 1) wait(function() return i2:info() end, st2, 'disk.compact.count', 1) st1 = i1:info() st2 = i2:info() s:bsize() i1:len(), i2:len() i1:bsize(), i2:bsize() s:bsize() == st1.disk.bytes i1:len() == st1.disk.rows i2:len() == st2.disk.rows i1:bsize() == st1.disk.index_size + st1.disk.bloom_size i2:bsize() == st2.disk.index_size + st2.disk.bloom_size + st2.disk.bytes s:drop() test_run:cmd('switch default') test_run:cmd('stop server test') test_run:cmd('cleanup server test') test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/vinyl/info.result0000664000000000000000000003561413306565107020540 0ustar rootroottest_run = require('test_run').new() --- ... -- Since we store LSNs in data files, the data size may differ -- from run to run. Deploy a new server to make sure it will be -- the same so that we can check it. test_run:cmd('create server test with script = "vinyl/info.lua"') --- - true ... test_run:cmd('start server test') --- - true ... test_run:cmd('switch test') --- - true ... fiber = require('fiber') --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {page_size = 4096, range_size = 16384, run_count_per_level = 1, run_size_ratio = 1000}) --- ... -- -- Helper functions. -- test_run:cmd("setopt delimiter ';'") --- - true ... -- Generate random 1K padding. function pad() local t = {} for i = 1, 1024 do t[i] = string.char(math.random(65, 90)) end return table.concat(t) end; --- ... -- Insert a tuple into the test space. function put(val) box.space.test:replace{val, pad()} end; --- ... -- Compute the difference between two tables containing stats. -- If a field value is the same, it will be set to nil in the -- resulting table. If 'path' is not 'nil', compare statistics -- starting from 'path'. function stat_diff(stat1, stat2, path) while path ~= nil and path ~= '' do local i = path:find('%.') or path:len() + 1 local node = path:sub(1, i - 1) path = path:sub(i + 1, path:len()) stat1 = stat1[node] stat2 = stat2[node] end if type(stat1) == 'string' then return nil end if type(stat1) == 'number' then return stat1 ~= stat2 and stat1 - stat2 or nil end assert(type(stat1) == 'table') local diff for k, v1 in pairs(stat1) do local v2 = stat2[k] local d = stat_diff(v1, v2) if d ~= nil then if diff == nil then diff = {} end diff[k] = d end end return diff end; --- ... -- Return index statistics. -- -- Note, latency measurement is beyond the scope of this test -- so we just filter it out. function istat() local st = box.space.test.index.pk:info() st.latency = nil return st end; --- ... -- Return global statistics. -- -- Note, quota watermark checking is beyond the scope of this -- test so we just filter out related statistics. function gstat() local st = box.info.vinyl() st.quota.use_rate = nil st.quota.dump_bandwidth = nil st.quota.watermark = nil return st end; --- ... -- Wait until a stat counter changes. function wait(stat_func, stat, path, diff) while (stat_diff(stat_func(), stat, path) or 0) < diff do fiber.sleep(0.01) end end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- initially stats are empty istat() --- - rows: 0 run_avg: 0 bytes: 0 upsert: squashed: 0 applied: 0 lookup: 0 run_count: 0 cache: invalidate: rows: 0 bytes: 0 index_size: 0 rows: 0 evict: rows: 0 bytes: 0 put: rows: 0 bytes: 0 lookup: 0 bytes: 0 get: rows: 0 bytes: 0 range_count: 1 put: rows: 0 bytes: 0 disk: index_size: 0 rows: 0 bytes: 0 dump: in: rows: 0 bytes: 0 count: 0 out: rows: 0 bytes: 0 compact: in: rows: 0 bytes: 0 count: 0 out: rows: 0 bytes: 0 iterator: read: bytes_compressed: 0 pages: 0 rows: 0 bytes: 0 bloom: hit: 0 miss: 0 lookup: 0 get: rows: 0 bytes: 0 pages: 0 bytes_compressed: 0 bloom_size: 0 txw: bytes: 0 rows: 0 iterator: lookup: 0 get: rows: 0 bytes: 0 run_histogram: '[0]:1' memory: bytes: 0 index_size: 0 rows: 0 iterator: lookup: 0 get: rows: 0 bytes: 0 get: rows: 0 bytes: 0 ... gstat() --- - cache: limit: 15360 tuples: 0 used: 0 tx: conflict: 0 commit: 0 rollback: 0 statements: 0 transactions: 0 gap_locks: 0 read_views: 0 quota: limit: 134217728 used: 0 ... -- -- Index statistics. -- -- Compressed data size may differ as padding is random. -- Besides, it may depend on the zstd version so let's -- filter it out. test_run:cmd("push filter 'bytes_compressed: .*' to 'bytes_compressed: '") --- - true ... -- put + dump st = istat() --- ... for i = 1, 100, 4 do put(i) end --- ... box.snapshot() --- - ok ... wait(istat, st, 'disk.dump.count', 1) --- ... stat_diff(istat(), st) --- - rows: 25 run_avg: 1 run_count: 1 disk: dump: in: rows: 25 bytes: 26525 count: 1 out: rows: 25 bytes: 26049 index_size: 294 rows: 25 bloom_size: 4096 pages: 7 bytes: 26049 bytes_compressed: bytes: 26049 put: rows: 25 bytes: 26525 ... -- put + dump + compaction st = istat() --- ... for i = 1, 100, 2 do put(i) end --- ... box.snapshot() --- - ok ... wait(istat, st, 'disk.compact.count', 1) --- ... stat_diff(istat(), st) --- - disk: dump: in: rows: 50 bytes: 53050 count: 1 out: rows: 50 bytes: 52091 index_size: 252 rows: 25 bytes_compressed: pages: 6 bytes: 26042 compact: in: rows: 75 bytes: 78140 count: 1 out: rows: 50 bytes: 52091 put: rows: 50 bytes: 53050 rows: 25 bytes: 26042 ... -- point lookup from disk + cache put st = istat() --- ... s:get(1) ~= nil --- - true ... stat_diff(istat(), st) --- - cache: index_size: 49152 rows: 1 bytes: 1061 lookup: 1 put: rows: 1 bytes: 1061 lookup: 1 disk: iterator: read: bytes: 4167 pages: 1 bytes_compressed: rows: 4 lookup: 1 get: rows: 1 bytes: 1061 memory: iterator: lookup: 1 get: rows: 1 bytes: 1061 ... -- point lookup from cache st = istat() --- ... s:get(1) ~= nil --- - true ... stat_diff(istat(), st) --- - cache: lookup: 1 put: rows: 1 bytes: 1061 get: rows: 1 bytes: 1061 lookup: 1 get: rows: 1 bytes: 1061 ... -- put in memory + cache invalidate st = istat() --- ... put(1) --- ... stat_diff(istat(), st) --- - cache: invalidate: rows: 1 bytes: 1061 rows: -1 bytes: -1061 rows: 1 memory: index_size: 49152 bytes: 1061 rows: 1 put: rows: 1 bytes: 1061 bytes: 1061 ... -- point lookup from memory st = istat() --- ... s:get(1) ~= nil --- - true ... stat_diff(istat(), st) --- - cache: bytes: 1061 lookup: 1 rows: 1 put: rows: 1 bytes: 1061 memory: iterator: lookup: 1 get: rows: 1 bytes: 1061 lookup: 1 get: rows: 1 bytes: 1061 ... -- put in txw + point lookup from txw st = istat() --- ... box.begin() --- ... put(1) --- ... s:get(1) ~= nil --- - true ... stat_diff(istat(), st) --- - txw: rows: 1 bytes: 1061 iterator: lookup: 1 get: rows: 1 bytes: 1061 lookup: 1 get: rows: 1 bytes: 1061 ... box.rollback() --- ... -- apply upsert in txw st = istat() --- ... box.begin() --- ... _ = s:replace{1} --- ... _ = s:upsert({1}, {{'=', 2, pad()}}) --- ... stat_diff(istat(), st, 'upsert') --- - squashed: 1 applied: 1 ... box.rollback() --- ... -- apply upsert on get st = istat() --- ... _ = s:upsert({5}, {{'=', 2, pad()}}) --- ... s:get(5) ~= nil --- - true ... stat_diff(istat(), st, 'upsert') --- - applied: 1 ... -- cache eviction assert(box.cfg.vinyl_cache < 100 * 1024) --- - true ... for i = 1, 100 do put(i) end --- ... st = istat() --- ... for i = 1, 100 do s:get(i) end --- ... stat_diff(istat(), st, 'cache') --- - rows: 14 bytes: 14854 evict: rows: 86 bytes: 91246 lookup: 100 put: rows: 100 bytes: 106100 ... -- range split for i = 1, 100 do put(i) end --- ... st = istat() --- ... box.snapshot() --- - ok ... wait(istat, st, 'disk.compact.count', 2) --- ... st = istat() --- ... st.range_count -- 2 --- - 2 ... st.run_count -- 2 --- - 2 ... st.run_avg -- 1 --- - 1 ... st.run_histogram -- [1]:2 --- - '[1]:2' ... -- range lookup for i = 1, 100 do put(i) end --- ... box.begin() --- ... for i = 1, 100, 2 do put(i) end --- ... st = istat() --- ... #s:select() --- - 100 ... stat_diff(istat(), st) --- - cache: rows: 13 bytes: 13793 evict: rows: 37 bytes: 39257 lookup: 1 put: rows: 51 bytes: 54111 disk: iterator: read: bytes: 104300 pages: 25 bytes_compressed: rows: 100 lookup: 2 get: rows: 100 bytes: 106100 txw: iterator: lookup: 1 get: rows: 50 bytes: 53050 memory: iterator: lookup: 1 get: rows: 100 bytes: 106100 lookup: 1 get: rows: 100 bytes: 106100 ... box.rollback() --- ... -- range lookup from cache assert(box.cfg.vinyl_cache > 10 * 1024) --- - true ... for i = 1, 100 do put(i) end --- ... box.begin() --- ... #s:select({}, {limit = 5}) --- - 5 ... st = istat() --- ... #s:select({}, {limit = 5}) --- - 5 ... stat_diff(istat(), st) --- - cache: lookup: 1 put: rows: 5 bytes: 5305 get: rows: 9 bytes: 9549 txw: iterator: lookup: 1 lookup: 1 get: rows: 5 bytes: 5305 ... box.rollback() --- ... -- -- Global statistics. -- -- use quota st = gstat() --- ... put(1) --- ... stat_diff(gstat(), st, 'quota') --- - used: 1061 ... -- use cache st = gstat() --- ... _ = s:get(1) --- ... stat_diff(gstat(), st, 'cache') --- - used: 1101 tuples: 1 ... s:delete(1) --- ... -- rollback st = gstat() --- ... box.begin() --- ... _ = s:insert{1} --- ... box.rollback() --- ... stat_diff(gstat(), st, 'tx') --- - rollback: 1 ... -- conflict st = gstat() --- ... ch1 = fiber.channel(1) --- ... ch2 = fiber.channel(1) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... _ = fiber.create(function() box.begin() s:insert{1} ch1:put(true) ch2:get() pcall(box.commit) ch1:put(true) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... ch1:get() --- - true ... _ = s:insert{1} --- ... ch2:put(true) --- - true ... ch1:get() --- - true ... stat_diff(gstat(), st, 'tx') --- - conflict: 1 commit: 1 rollback: 1 ... s:delete(1) --- ... -- tx statements st = gstat() --- ... box.begin() --- ... for i = 1, 10 do s:replace{i} end --- ... stat_diff(gstat(), st, 'tx') --- - statements: 10 transactions: 1 ... box.rollback() --- ... stat_diff(gstat(), st, 'tx') --- - rollback: 1 ... -- transactions st = gstat() --- ... ch1 = fiber.channel(5) --- ... ch2 = fiber.channel(5) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 5 do fiber.create(function() box.begin() s:replace{i} ch1:put(true) ch2:get() box.rollback() ch1:put(true) end) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... for i = 1, 5 do ch1:get() end --- ... stat_diff(gstat(), st, 'tx') --- - statements: 5 transactions: 5 ... for i = 1, 5 do ch2:put(true) end --- ... for i = 1, 5 do ch1:get() end --- ... stat_diff(gstat(), st, 'tx') --- - rollback: 5 ... -- read view st = gstat() --- ... ch1 = fiber.channel(1) --- ... ch2 = fiber.channel(1) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... _ = fiber.create(function() box.begin() s:select() ch1:put(true) ch2:get() pcall(box.commit) ch1:put(true) end); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... ch1:get() --- - true ... _ = s:insert{1} --- ... stat_diff(gstat(), st, 'tx') --- - transactions: 1 gap_locks: 1 commit: 1 read_views: 1 ... ch2:put(true) --- - true ... ch1:get() --- - true ... stat_diff(gstat(), st, 'tx') --- - commit: 2 ... s:delete(1) --- ... -- gap locks st = gstat() --- ... box.begin() --- ... _ = s:select({10}, {iterator = 'LT'}) --- ... _ = s:select({20}, {iterator = 'GT'}) --- ... stat_diff(gstat(), st, 'tx') --- - transactions: 1 gap_locks: 2 ... box.commit() --- ... stat_diff(gstat(), st, 'tx') --- - commit: 1 ... s:drop() --- ... -- -- space.bsize, index.len, index.bsize -- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... s:bsize() --- - 0 ... i1 = s:create_index('i1', {parts = {1, 'unsigned'}, run_count_per_level = 1}) --- ... i2 = s:create_index('i2', {parts = {2, 'unsigned'}, run_count_per_level = 1}) --- ... s:bsize() --- - 0 ... i1:len(), i2:len() --- - 0 - 0 ... i1:bsize(), i2:bsize() --- - 0 - 0 ... for i = 1, 100, 2 do s:replace{i, i, pad()} end --- ... st1 = i1:info() --- ... st2 = i2:info() --- ... s:bsize() --- - 53300 ... i1:len(), i2:len() --- - 50 - 50 ... i1:bsize(), i2:bsize() --- - 49152 - 49152 ... s:bsize() == st1.memory.bytes --- - true ... i1:len() == st1.memory.rows --- - true ... i2:len() == st2.memory.rows --- - true ... i1:bsize() == st1.memory.index_size --- - true ... i2:bsize() == st2.memory.index_size --- - true ... box.snapshot() --- - ok ... st1 = i1:info() --- ... st2 = i2:info() --- ... s:bsize() --- - 52199 ... i1:len(), i2:len() --- - 50 - 50 ... i1:bsize(), i2:bsize() --- - 4390 - 4946 ... s:bsize() == st1.disk.bytes --- - true ... i1:len() == st1.disk.rows --- - true ... i2:len() == st2.disk.rows --- - true ... i1:bsize() == st1.disk.index_size + st1.disk.bloom_size --- - true ... i2:bsize() == st2.disk.index_size + st2.disk.bloom_size + st2.disk.bytes --- - true ... for i = 1, 100, 2 do s:delete(i) end --- ... for i = 2, 100, 2 do s:replace{i, i, pad()} end --- ... st1 = i1:info() --- ... st2 = i2:info() --- ... s:bsize() --- - 107449 ... i1:len(), i2:len() --- - 150 - 150 ... i1:bsize(), i2:bsize() --- - 53542 - 54098 ... s:bsize() == st1.memory.bytes + st1.disk.bytes --- - true ... i1:len() == st1.memory.rows + st1.disk.rows --- - true ... i2:len() == st2.memory.rows + st2.disk.rows --- - true ... i1:bsize() == st1.memory.index_size + st1.disk.index_size + st1.disk.bloom_size --- - true ... i2:bsize() == st2.memory.index_size + st2.disk.index_size + st2.disk.bloom_size + st2.disk.bytes --- - true ... box.snapshot() --- - ok ... wait(function() return i1:info() end, st1, 'disk.compact.count', 1) --- ... wait(function() return i2:info() end, st2, 'disk.compact.count', 1) --- ... st1 = i1:info() --- ... st2 = i2:info() --- ... s:bsize() --- - 52199 ... i1:len(), i2:len() --- - 50 - 50 ... i1:bsize(), i2:bsize() --- - 4390 - 4946 ... s:bsize() == st1.disk.bytes --- - true ... i1:len() == st1.disk.rows --- - true ... i2:len() == st2.disk.rows --- - true ... i1:bsize() == st1.disk.index_size + st1.disk.bloom_size --- - true ... i2:bsize() == st2.disk.index_size + st2.disk.bloom_size + st2.disk.bytes --- - true ... s:drop() --- ... test_run:cmd('switch default') --- - true ... test_run:cmd('stop server test') --- - true ... test_run:cmd('cleanup server test') --- - true ... test_run:cmd("clear filter") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/bloom.test.lua0000664000000000000000000000326313306565107021131 0ustar rootroottest_run = require('test_run').new() -- -- Setting bloom_fpr to 1 disables bloom filter. -- s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {bloom_fpr = 1}) for i = 1, 10, 2 do s:insert{i} end box.snapshot() for i = 1, 10 do s:get{i} end stat = s.index.pk:info() stat.disk.bloom_size -- 0 stat.disk.iterator.bloom.hit -- 0 stat.disk.iterator.bloom.miss -- 0 s:drop() s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk') reflects = 0 function cur_reflects() return box.space.test.index.pk:info().disk.iterator.bloom.hit end function new_reflects() local o = reflects reflects = cur_reflects() return reflects - o end seeks = 0 function cur_seeks() return box.space.test.index.pk:info().disk.iterator.lookup end function new_seeks() local o = seeks seeks = cur_seeks() return seeks - o end for i = 1,1000 do s:replace{i} end box.snapshot() _ = new_reflects() _ = new_seeks() for i = 1,1000 do s:select{i} end new_reflects() == 0 new_seeks() == 1000 for i = 1001,2000 do s:select{i} end new_reflects() > 980 new_seeks() < 20 test_run:cmd('restart server default') s = box.space.test reflects = 0 function cur_reflects() return box.space.test.index.pk:info().disk.iterator.bloom.hit end function new_reflects() local o = reflects reflects = cur_reflects() return reflects - o end seeks = 0 function cur_seeks() return box.space.test.index.pk:info().disk.iterator.lookup end function new_seeks() local o = seeks seeks = cur_seeks() return seeks - o end _ = new_reflects() _ = new_seeks() for i = 1,1000 do s:select{i} end new_reflects() == 0 new_seeks() == 1000 for i = 1001,2000 do s:select{i} end new_reflects() > 980 new_seeks() < 20 s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/parallel.test.lua0000664000000000000000000000140313306560010021573 0ustar rootrootenv = require('test_run') test_run = env.new() fiber = require('fiber') s = box.schema.space.create('vinyl', {engine = 'vinyl'}) i = s:create_index('primary', {type = 'tree'}) n = 10 m = 10 ch = fiber.channel(m) test_run:cmd("setopt delimiter ';'") function stest(l, m, n) for i = 0, n - 1 do s:insert({i * m + l, 'tuple ' .. tostring(i * m + l)}) end ch:put(1) end; test_run:cmd("setopt delimiter ''"); for i = 0, m - 1 do f = fiber.create(stest, i, m, n) end cnt = 0 start_time = fiber.time() test_run:cmd("setopt delimiter ';'") for i = 0, m - 1 do tm = start_time + 2 - fiber.time() if tm < 0 then tm = 0 end cnt = cnt + (ch:get(tm) or 0) end; test_run:cmd("setopt delimiter ''"); cnt == m i:count() == m * n s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/vinyl.lua0000664000000000000000000000161613306560010020170 0ustar rootroot#!/usr/bin/env tarantool box.cfg { listen = os.getenv("LISTEN"), memtx_memory = 512 * 1024 * 1024, memtx_max_tuple_size = 4 * 1024 * 1024, rows_per_wal = 1000000, vinyl_read_threads = 2, vinyl_write_threads = 3, vinyl_memory = 512 * 1024 * 1024, vinyl_range_size = 1024 * 64, vinyl_page_size = 1024, vinyl_run_count_per_level = 1, vinyl_run_size_ratio = 2, vinyl_cache = 10240, -- 10kB vinyl_max_tuple_size = 1024 * 1024 * 6, } function box_info_sort(data) if type(data)~='table' then return data end local keys = {} for k in pairs(data) do table.insert(keys, k) end table.sort(keys) local result = {} for _,k in pairs(keys) do local v = data[k] table.insert(result, {[k] = box_info_sort(v) }) end return result end require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upsert.test.lua0000664000000000000000000001534113306565107021343 0ustar rootroottest_run = require('test_run').new() -- gh-1671 upsert is broken in a transaction -- upsert after upsert space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary') space:insert({1, 1, 2}) space:insert({2}) space:insert({3, 4, 'abc'}) box.begin() space:upsert({1}, {{'#', 3, 1}}) space:upsert({1}, {{'!', 2, 20}}) space:upsert({1}, {{'+', 3, 20}}) box.commit() space:select{} box.begin() space:upsert({2}, {{'!', 2, 10}}) space:upsert({3, 4, 5}, {{'+', 2, 1}}) space:upsert({2, 2, 2, 2}, {{'+', 2, 10.5}}) space:upsert({3}, {{'-', 2, 2}}) box.commit() space:select{} space:drop() -- upsert after replace space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary') space:insert{1} space:insert{2} box.begin() space:replace({3, 4}) space:upsert({3, 3, 3, 3}, {{'+', 2, 1}}) box.commit() space:select{} box.begin() space:replace({2, 2}) space:upsert({2}, {{'!', 2, 1}}) space:upsert({2}, {{'!', 2, 3}}) box.commit() space:select{} box.begin() space:replace({4}) space:upsert({4}, {{'!', 2, 1}}) space:replace({5}) space:upsert({4}, {{'!', 2, 3}}) space:upsert({5}, {{'!', 2, 1}, {'+', 2, 1}}) box.commit() space:select{} space:drop() -- upsert after delete space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary') space:insert{1} space:insert{2} space:insert{3} space:insert{4} box.begin() space:delete({1}) space:upsert({1, 2}, {{'!', 2, 100}}) box.commit() space:select{} box.begin() space:delete({2}) space:upsert({1}, {{'+', 2, 1}}) space:upsert({2, 200}, {{'!', 2, 1000}}) space:upsert({2}, {{'!', 2, 1005}}) box.commit() space:select{} space:drop() -- replace after upsert space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary') space:insert{1} space:insert{2} space:insert{3} space:insert{4} box.begin() space:upsert({1, 2}, {{'!', 2, 100}}) space:replace({1, 2, 3}) box.commit() space:select{} box.begin() space:upsert({2}, {{'!', 2, 2}}) space:upsert({3}, {{'!', 2, 3}}) space:replace({2, 20}) space:replace({3, 30}) box.commit() space:select{} space:drop() -- delete after upsert box.cfg{} space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary') space:insert{1} space:insert{2} space:insert{3} space:insert{4} box.begin() space:upsert({1, 2}, {{'!', 2, 100}}) space:delete({1}) box.commit() space:select{} box.begin() space:upsert({5}, {{'!', 2, 100}}) space:delete({5}) box.commit() space:select{} box.begin() space:upsert({5}, {{'!', 2, 100}}) space:delete({4}) space:upsert({4}, {{'!', 2, 100}}) space:delete({5}) space:upsert({4}, {{'!', 2, 105}}) box.commit() space:select{} space:drop() -- -- gh-1829: vinyl: merge hot UPSERTS in the background -- gh-1828: Automatically convert UPSERT into REPLACE -- gh-1826: vinyl: memory explosion on UPSERT -- clock = require 'clock' space = box.schema.space.create('test', { engine = 'vinyl' }) _ = space:create_index('primary', { type = 'tree', range_size = 250 * 1024 * 1024 } ) test_run:cmd("setopt delimiter ';'") -- add a lot of UPSERT statements to the space function gen() for i=1,2000 do space:upsert({0, 0}, {{'+', 2, 1}}) end end; -- check that 'get' takes reasonable time function check() local start = clock.monotonic() for i=1,1000 do space:get(0) end return clock.monotonic() - start < 1 end; test_run:cmd("setopt delimiter ''"); -- No runs gen() check() -- exploded before #1826 -- Mem has DELETE box.snapshot() space:delete({0}) gen() check() -- exploded before #1826 -- Mem has REPLACE box.snapshot() space:replace({0, 0}) gen() check() -- exploded before #1826 -- Mem has only UPSERTS box.snapshot() gen() check() -- exploded before #1829 space:drop() -- test upsert statistic against some upsert scenarous test_run:cmd("setopt delimiter ';'") function upsert_stat_diff(stat2, stat1) return { squashed = stat2.upsert.squashed - stat1.upsert.squashed, applied = stat2.upsert.applied - stat1.upsert.applied } end; test_run:cmd("setopt delimiter ''"); space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary') stat1 = index:info() -- separate upserts w/o on disk data space:upsert({1, 1, 1}, {{'+', 2, 10}}) space:upsert({1, 1, 1}, {{'-', 2, 20}}) space:upsert({1, 1, 1}, {{'=', 2, 20}}) stat2 = index:info() upsert_stat_diff(stat2, stat1) stat1 = stat2 stat1.rows -- in-tx upserts box.begin() space:upsert({2, 1, 1}, {{'+', 2, 10}}) space:upsert({2, 1, 1}, {{'-', 2, 20}}) space:upsert({2, 1, 1}, {{'=', 2, 20}}) box.commit() stat2 = index:info() upsert_stat_diff(stat2, stat1) stat1 = stat2 stat1.rows box.snapshot() index:info().rows -- upsert with on disk data space:upsert({1, 1, 1}, {{'+', 2, 10}}) space:upsert({1, 1, 1}, {{'-', 2, 20}}) stat2 = index:info() upsert_stat_diff(stat2, stat1) stat1 = stat2 stat1.rows -- count of applied apserts space:get({1}) stat2 = index:info() upsert_stat_diff(stat2, stat1) stat1 = stat2 space:get({2}) stat2 = index:info() upsert_stat_diff(stat2, stat1) stat1 = stat2 space:select({}) stat2 = index:info() upsert_stat_diff(stat2, stat1) stat1 = stat2 -- start upsert optimizer for i = 0, 999 do space:upsert({3, 0, 0}, {{'+', 2, 1}}) end stat2 = index:info() upsert_stat_diff(stat2, stat1) stat1 = stat2 space:get{3} stat1.rows space:drop() -- fix behaviour after https://github.com/tarantool/tarantool/issues/2104 s = box.schema.space.create('test', {engine = 'vinyl'}) i = s:create_index('test', { run_count_per_level = 20 }) s:replace({1, 1}) box.snapshot() s:upsert({1, 1}, {{'+', 1, 1}}) s:upsert({1, 1}, {{'+', 2, 1}}) s:select() --both upserts are ignored due to primary key change -- -- gh-2520 use cache as a hint when applying upserts. -- old_stat = s.index.test:info() -- insert the first upsert s:upsert({100}, {{'=', 2, 200}}) -- force a dump, the inserted upsert is now on disk box.snapshot() -- populate the cache s:get{100} -- a lookup in a run was done to populate the cache new_stat = s.index.test:info() upsert_stat_diff(new_stat, old_stat) new_stat.disk.iterator.lookup - old_stat.disk.iterator.lookup old_stat = new_stat -- Add another upsert: the cached REPLACE will be used and the upsert will -- be applied immediately s:upsert({100}, {{'=', 2, 300}}) -- force a new dump box.snapshot() -- lookup the key s:get{100} -- -- since we converted upsert to replace on insert, we had to -- go no further than the latest dump to locate the latest -- value of the key -- new_stat = s.index.test:info() upsert_stat_diff(new_stat, old_stat) new_stat.disk.iterator.lookup - old_stat.disk.iterator.lookup -- -- gh-3003: crash in read iterator if upsert exactly matches -- the search key. -- s:truncate() s:insert{100, 100} box.snapshot() s:upsert({100}, {{'+', 2, 100}}) s:select({100}, 'GE') s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/dump_stress.result0000664000000000000000000000314313306560010022131 0ustar rootroottest_run = require('test_run').new() --- ... test_run:cmd("create server test with script='vinyl/low_quota.lua'") --- - true ... test_run:cmd("start server test with args='1048576'") --- - true ... test_run:cmd('switch test') --- - true ... fiber = require 'fiber' --- ... pad_size = 1000 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function gen_tuple(k) local pad = {} for i = 1,pad_size do pad[i] = string.char(math.random(65, 90)) end return {k, k + 1, k + 2, k + 3, table.concat(pad)} end test_run:cmd("setopt delimiter ''"); --- ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('i1', {parts = {1, 'unsigned'}}) --- ... _ = s:create_index('i2', {unique = false, parts = {2, 'unsigned'}}) --- ... _ = s:create_index('i3', {unique = false, parts = {3, 'unsigned'}}) --- ... _ = s:create_index('i4', {unique = false, parts = {4, 'unsigned'}}) --- ... -- -- Schedule dump caused by snapshot and memory shortage concurrently. -- _ = fiber.create(function() while true do box.snapshot() fiber.sleep(0.01) end end) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for i = 1, 10 * box.cfg.vinyl_memory / pad_size do s:replace(gen_tuple(i)) if i % 100 == 0 then box.commit() box.begin() end end test_run:cmd("setopt delimiter ''"); --- ... s.index.i1:count() --- - 10485 ... s.index.i2:count() --- - 10485 ... s.index.i3:count() --- - 10485 ... s.index.i4:count() --- - 10485 ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server test") --- - true ... test_run:cmd("cleanup server test") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/quota_timeout.test.lua0000664000000000000000000000473513306565107022725 0ustar rootroottest_run = require('test_run').new() test_run:cmd("create server test with script='vinyl/low_quota.lua'") test_run:cmd("start server test with args='1048576'") test_run:cmd('switch test') fiber = require 'fiber' box.cfg{vinyl_timeout=0.01} box.error.injection.set('ERRINJ_VY_SCHED_TIMEOUT', 0.01) -- -- Check that a transaction is aborted on timeout if it exceeds -- quota and the scheduler doesn't manage to free memory. -- box.error.injection.set('ERRINJ_VY_RUN_WRITE', true) s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk') pad = string.rep('x', 2 * box.cfg.vinyl_memory / 3) _ = s:auto_increment{pad} s:count() box.info.vinyl().quota.used -- Since the following operation requires more memory than configured -- and dump is disabled, it should fail with ER_VY_QUOTA_TIMEOUT. _ = s:auto_increment{pad} s:count() box.info.vinyl().quota.used box.error.injection.set('ERRINJ_VY_RUN_WRITE', false) fiber.sleep(0.01) -- wait for scheduler to unthrottle -- -- Check that there's a warning in the log if a transaction -- waits for quota for more than too_long_threshold seconds. -- box.error.injection.set('ERRINJ_VY_RUN_WRITE_TIMEOUT', 0.01) box.cfg{vinyl_timeout=60} box.cfg{too_long_threshold=0.01} _ = s:auto_increment{pad} _ = s:auto_increment{pad} test_run:cmd("push filter '[0-9.]+ sec' to ' sec'") test_run:grep_log('test', 'waited for .* quota for too long.*') test_run:cmd("clear filter") box.error.injection.set('ERRINJ_VY_RUN_WRITE_TIMEOUT', 0) s:truncate() box.snapshot() -- -- Check that exceeding quota doesn't hang the scheduler -- in case there's nothing to dump. -- -- The following operation should fail instantly irrespective -- of the value of 'vinyl_timeout' (gh-3291). -- box.info.vinyl().quota.used == 0 box.cfg{vinyl_timeout = 9000} pad = string.rep('x', box.cfg.vinyl_memory) _ = s:auto_increment{pad} s:drop() box.snapshot() -- -- Check that exceeding quota triggers dump of all spaces. -- s1 = box.schema.space.create('test1', {engine = 'vinyl'}) _ = s1:create_index('pk') s2 = box.schema.space.create('test2', {engine = 'vinyl'}) _ = s2:create_index('pk') pad = string.rep('x', 64) _ = s1:auto_increment{pad} s1.index.pk:info().memory.bytes > 0 pad = string.rep('x', box.cfg.vinyl_memory - string.len(pad)) _ = s2:auto_increment{pad} while s1.index.pk:info().disk.dump.count == 0 do fiber.sleep(0.01) end s1.index.pk:info().memory.bytes == 0 test_run:cmd('switch default') test_run:cmd("stop server test") test_run:cmd("cleanup server test") tarantool_1.9.1.26.g63eb81e3c/test/vinyl/write_iterator_rand.test.lua0000664000000000000000000001001013306560010024040 0ustar rootrootenv = require('test_run') test_run = env.new() test_run:cmd("setopt delimiter ';'") function clean_space(sp, cnt) for i = 1, cnt do sp:delete({i}) end box.snapshot() return sp:count() == 0 end; function check_tuples_len(sp, cnt, len) for i = 1, cnt do if not (#sp:get({i}) == len) then return false end end return true end; function fill_space(sp, cnt) local err = 'delete after upsert error' for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end for i = 1, cnt do sp:delete({i}) end box.snapshot() if not (sp:count() == 0) then return err end err = 'upsert after delete error' for i = 1, cnt do sp:insert({i}) end for i = 1, cnt do sp:delete({i}) end for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end box.snapshot() if not (sp:count() == cnt) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'upsert before upsert error' for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end box.snapshot() if not check_tuples_len(sp, cnt, 2) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'replace before upsert error' for i = 1, cnt do sp:replace({i}) end for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end box.snapshot() if not check_tuples_len(sp, cnt, 2) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'upsert before replace error' for i = 1, cnt do sp:upsert({i, i}, {{'!', 2, i}}) end for i = 1, cnt do sp:replace({i}) end box.snapshot() if not check_tuples_len(sp, cnt, 1) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'delete before replace error' for i = 1, cnt do sp:insert({i}) end box.snapshot() for i = 1, cnt do sp:delete({i}) end for i = 1, cnt do sp:replace({i, i}) end box.snapshot() if not check_tuples_len(sp, cnt, 2) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'replace before delete error' for i = 1, cnt do sp:replace({i}) end for i = 1, cnt do sp:delete({i}) end box.snapshot() if not (sp:count() == 0) then return err end err = 'replace before replace error' for i = 1, cnt do sp:replace({i}) end for i = 1, cnt do sp:replace({i, i}) end box.snapshot() if not check_tuples_len(sp, cnt, 2) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'single upserts error' for i = 1, cnt do sp:upsert({i}, {{'!', 2, i}}) end box.snapshot() if not check_tuples_len(sp, cnt, 1) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end err = 'single replaces error' for i = 1, cnt do sp:replace({i}) end box.snapshot() if not check_tuples_len(sp, cnt, 1) then return err end err = 'clean after "'..err..'" error' if not clean_space(sp, cnt) then return err end return 'ok' end; function fill_space_with_sizes(page_size, range_size, cnt) local space = box.schema.space.create('test', { engine = 'vinyl' }) local pk = space:create_index('primary', { page_size = page_size, range_size = range_size }) local ret = fill_space(space, cnt) space:drop() return ret end; test_run:cmd("setopt delimiter ''"); -- Tests on write iterator with random combinations of page_size and range_size page_size = math.random(128, 256) range_size = page_size * math.random(10, 20) fill_space_with_sizes(page_size, range_size, 300) page_size = math.random(256, 512) range_size = page_size * math.random(10, 20) fill_space_with_sizes(page_size, range_size, 500) page_size = math.random(512, 1024) range_size = page_size * math.random(10, 20) fill_space_with_sizes(page_size, range_size, 700) page_size = math.random(1024, 2048) range_size = page_size * math.random(10, 20) fill_space_with_sizes(page_size, range_size, 900) tarantool_1.9.1.26.g63eb81e3c/test/vinyl/errinj.result0000664000000000000000000005263013306565107021073 0ustar rootroot-- -- gh-1681: vinyl: crash in vy_rollback on ER_WAL_WRITE -- test_run = require('test_run').new() --- ... fio = require('fio') --- ... fiber = require('fiber') --- ... errinj = box.error.injection --- ... errinj.set("ERRINJ_VY_SCHED_TIMEOUT", 0.040) --- - ok ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk') --- ... function f() box.begin() s:insert{1, 'hi'} s:insert{2, 'bye'} box.commit() end --- ... errinj.set("ERRINJ_WAL_WRITE", true) --- - ok ... f() --- - error: Failed to write to disk ... s:select{} --- - [] ... errinj.set("ERRINJ_WAL_WRITE", false) --- - ok ... f() --- ... s:select{} --- - - [1, 'hi'] - [2, 'bye'] ... s:drop() --- ... -- -- Lost data in case of dump error -- -- test_run:cmd("setopt delimiter ';'") --- - true ... if box.cfg.vinyl_page_size > 1024 or box.cfg.vinyl_range_size > 65536 then error("This test relies on splits and dumps") end; --- ... s = box.schema.space.create('test', {engine='vinyl'}); --- ... _ = s:create_index('pk'); --- ... -- fill up a range value = string.rep('a', 1024) last_id = 1 function range() local range_size = box.cfg.vinyl_range_size local page_size = box.cfg.vinyl_page_size local s = box.space.test local num_rows = 0 for i=1,range_size/page_size do for j=1, page_size/#value do s:replace({last_id, value}) last_id = last_id + 1 num_rows = num_rows + 1 end end return num_rows end; --- ... num_rows = 0; --- ... num_rows = num_rows + range(); --- ... box.snapshot(); --- - ok ... errinj.set("ERRINJ_VY_RUN_WRITE", true); --- - ok ... num_rows = num_rows + range(); --- ... -- fails due to error injection box.snapshot(); --- - error: Error injection 'vinyl dump' ... errinj.set("ERRINJ_VY_RUN_WRITE", false); --- - ok ... -- fails due to scheduler timeout box.snapshot(); --- - error: Error injection 'vinyl dump' ... fiber.sleep(0.06); --- ... num_rows = num_rows + range(); --- ... box.snapshot(); --- - ok ... num_rows = num_rows + range(); --- ... box.snapshot(); --- - ok ... num_rows; --- - 256 ... for i=1,num_rows do if s:get{i} == nil then error("Row "..i.."not found") end end; --- ... #s:select{} == num_rows; --- - true ... s:drop(); --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- Disable the cache so that we can check that disk errors -- are handled properly. vinyl_cache = box.cfg.vinyl_cache --- ... box.cfg{vinyl_cache = 0} --- ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk') --- ... for i = 1, 10 do s:insert({i, 'test str' .. tostring(i)}) end --- ... box.snapshot() --- - ok ... s:select() --- - - [1, 'test str1'] - [2, 'test str2'] - [3, 'test str3'] - [4, 'test str4'] - [5, 'test str5'] - [6, 'test str6'] - [7, 'test str7'] - [8, 'test str8'] - [9, 'test str9'] - [10, 'test str10'] ... errinj.set("ERRINJ_VY_READ_PAGE", true) --- - ok ... s:select() --- - error: Error injection 'vinyl page read' ... errinj.set("ERRINJ_VY_READ_PAGE", false) --- - ok ... s:select() --- - - [1, 'test str1'] - [2, 'test str2'] - [3, 'test str3'] - [4, 'test str4'] - [5, 'test str5'] - [6, 'test str6'] - [7, 'test str7'] - [8, 'test str8'] - [9, 'test str9'] - [10, 'test str10'] ... errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0.05) --- - ok ... function test_cancel_read () k = s:select() return #k end --- ... f1 = fiber.create(test_cancel_read) --- ... fiber.cancel(f1) --- ... -- task should be done fiber.sleep(0.1) --- ... errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0); --- - ok ... s:select() --- - - [1, 'test str1'] - [2, 'test str2'] - [3, 'test str3'] - [4, 'test str4'] - [5, 'test str5'] - [6, 'test str6'] - [7, 'test str7'] - [8, 'test str8'] - [9, 'test str9'] - [10, 'test str10'] ... -- error after timeout for canceled fiber errinj.set("ERRINJ_VY_READ_PAGE", true) --- - ok ... errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0.05) --- - ok ... f1 = fiber.create(test_cancel_read) --- ... fiber.cancel(f1) --- ... fiber.sleep(0.1) --- ... errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0); --- - ok ... errinj.set("ERRINJ_VY_READ_PAGE", false); --- - ok ... s:select() --- - - [1, 'test str1'] - [2, 'test str2'] - [3, 'test str3'] - [4, 'test str4'] - [5, 'test str5'] - [6, 'test str6'] - [7, 'test str7'] - [8, 'test str8'] - [9, 'test str9'] - [10, 'test str10'] ... -- index is dropped while a read task is in progress errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0.05) --- - ok ... f1 = fiber.create(test_cancel_read) --- ... fiber.cancel(f1) --- ... s:drop() --- ... fiber.sleep(0.1) --- ... errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0); --- - ok ... box.cfg{vinyl_cache = vinyl_cache} --- ... -- gh-2871: check that long reads are logged s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk') --- ... for i = 1, 10 do s:insert{i, i * 2} end --- ... box.snapshot() --- - ok ... too_long_threshold = box.cfg.too_long_threshold --- ... box.cfg{too_long_threshold = 0.01} --- ... errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0.05) --- - ok ... s:get(10) ~= nil --- - true ... #s:select(5, {iterator = 'LE'}) == 5 --- - true ... errinj.set("ERRINJ_VY_READ_PAGE_TIMEOUT", 0); --- - ok ... test_run:cmd("push filter 'lsn=[0-9]+' to 'lsn='") --- - true ... test_run:grep_log('default', 'get.* took too long') --- - get([10]) => INSERT([10, 20], lsn=) took too long ... test_run:grep_log('default', 'select.* took too long') --- - select([5], LE) => INSERT([5, 10], lsn=) took too long ... test_run:cmd("clear filter") --- - true ... box.cfg{too_long_threshold = too_long_threshold} --- ... s:drop() --- ... s = box.schema.space.create('test', {engine='vinyl'}); --- ... _ = s:create_index('pk'); --- ... _ = s:replace({1, string.rep('a', 128000)}) --- ... errinj.set("ERRINJ_WAL_WRITE_DISK", true) --- - ok ... box.snapshot() --- - error: Error injection 'xlog write injection' ... errinj.set("ERRINJ_WAL_WRITE_DISK", false) --- - ok ... fiber.sleep(0.06) --- ... _ = s:replace({2, string.rep('b', 128000)}) --- ... box.snapshot(); --- - ok ... #s:select({1}) --- - 1 ... s:drop() --- ... errinj.set("ERRINJ_VY_SCHED_TIMEOUT", 0) --- - ok ... -- -- Check that upsert squash fiber does not crash if index or -- in-memory tree is gone. -- errinj.set("ERRINJ_VY_SQUASH_TIMEOUT", 0.050) --- - ok ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk') --- ... s:insert{0, 0} --- - [0, 0] ... box.snapshot() --- - ok ... for i=1,256 do s:upsert({0, 0}, {{'+', 2, 1}}) end --- ... box.snapshot() -- in-memory tree is gone --- - ok ... fiber.sleep(0.05) --- ... s:select() --- - - [0, 256] ... s:replace{0, 0} --- - [0, 0] ... box.snapshot() --- - ok ... for i=1,256 do s:upsert({0, 0}, {{'+', 2, 1}}) end --- ... s:drop() -- index is gone --- ... fiber.sleep(0.05) --- ... errinj.set("ERRINJ_VY_SQUASH_TIMEOUT", 0) --- - ok ... --https://github.com/tarantool/tarantool/issues/1842 --test error injection s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk') --- ... s:replace{0, 0} --- - [0, 0] ... s:replace{1, 0} --- - [1, 0] ... s:replace{2, 0} --- - [2, 0] ... errinj.set("ERRINJ_WAL_WRITE", true) --- - ok ... s:replace{3, 0} --- - error: Failed to write to disk ... s:replace{4, 0} --- - error: Failed to write to disk ... s:replace{5, 0} --- - error: Failed to write to disk ... s:replace{6, 0} --- - error: Failed to write to disk ... errinj.set("ERRINJ_WAL_WRITE", false) --- - ok ... s:replace{7, 0} --- - [7, 0] ... s:replace{8, 0} --- - [8, 0] ... s:select{} --- - - [0, 0] - [1, 0] - [2, 0] - [7, 0] - [8, 0] ... s:drop() --- ... create_iterator = require('utils').create_iterator --- ... --iterator test test_run:cmd("setopt delimiter ';'") --- - true ... fiber_status = 0 function fiber_func() box.begin() s:replace{5, 5} fiber_status = 1 local res = {pcall(box.commit) } fiber_status = 2 return unpack(res) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk') --- ... fiber = require('fiber') --- ... _ = s:replace{0, 0} --- ... _ = s:replace{10, 0} --- ... _ = s:replace{20, 0} --- ... test_run:cmd("setopt delimiter ';'"); --- - true ... faced_trash = false for i = 1,100 do errinj.set("ERRINJ_WAL_WRITE", true) local f = fiber.create(fiber_func) local itr = create_iterator(s, {0}, {iterator='GE'}) local first = itr.next() local second = itr.next() if (second[1] ~= 5 and second[1] ~= 10) then faced_trash = true end while fiber_status <= 1 do fiber.sleep(0.001) end local _,next = pcall(itr.next) _,next = pcall(itr.next) _,next = pcall(itr.next) errinj.set("ERRINJ_WAL_WRITE", false) s:delete{5} end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... faced_trash --- - false ... s:drop() --- ... -- TX in prepared but not committed state s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk') --- ... fiber = require('fiber') --- ... txn_proxy = require('txn_proxy') --- ... s:replace{1, "original"} --- - [1, 'original'] ... s:replace{2, "original"} --- - [2, 'original'] ... s:replace{3, "original"} --- - [3, 'original'] ... c0 = txn_proxy.new() --- ... c0:begin() --- - ... c1 = txn_proxy.new() --- ... c1:begin() --- - ... c2 = txn_proxy.new() --- ... c2:begin() --- - ... c3 = txn_proxy.new() --- ... c3:begin() --- - ... -- -- Prepared transactions -- -- Pause WAL writer to cause all further calls to box.commit() to move -- transactions into prepared, but not committed yet state. errinj.set("ERRINJ_WAL_DELAY", true) --- - ok ... lsn = box.info.lsn --- ... c0('s:replace{1, "c0"}') --- - - [1, 'c0'] ... c0('s:replace{2, "c0"}') --- - - [2, 'c0'] ... c0('s:replace{3, "c0"}') --- - - [3, 'c0'] ... _ = fiber.create(c0.commit, c0) --- ... box.info.lsn == lsn --- - true ... c1('s:replace{1, "c1"}') --- - - [1, 'c1'] ... c1('s:replace{2, "c1"}') --- - - [2, 'c1'] ... _ = fiber.create(c1.commit, c1) --- ... box.info.lsn == lsn --- - true ... c3('s:select{1}') -- c1 is visible --- - - [[1, 'c1']] ... c2('s:replace{1, "c2"}') --- - - [1, 'c2'] ... c2('s:replace{3, "c2"}') --- - - [3, 'c2'] ... _ = fiber.create(c2.commit, c2) --- ... box.info.lsn == lsn --- - true ... c3('s:select{1}') -- c1 is visible, c2 is not --- - - [[1, 'c1']] ... c3('s:select{2}') -- c1 is visible --- - - [[2, 'c1']] ... c3('s:select{3}') -- c2 is not visible --- - - [[3, 'c0']] ... -- Resume WAL writer and wait until all transactions will been committed errinj.set("ERRINJ_WAL_DELAY", false) --- - ok ... REQ_COUNT = 7 --- ... while box.info.lsn - lsn < REQ_COUNT do fiber.sleep(0.01) end --- ... box.info.lsn == lsn + REQ_COUNT --- - true ... c3('s:select{1}') -- c1 is visible, c2 is not --- - - [[1, 'c1']] ... c3('s:select{2}') -- c1 is visible --- - - [[2, 'c1']] ... c3('s:select{3}') -- c2 is not visible --- - - [[3, 'c0']] ... c3:commit() --- - ... s:drop() --- ... -- -- Test mem restoration on a prepared and not commited statement -- after moving iterator into read view. -- space = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = space:create_index('pk') --- ... space:replace{1} --- - [1] ... space:replace{2} --- - [2] ... space:replace{3} --- - [3] ... last_read = nil --- ... errinj.set("ERRINJ_WAL_DELAY", true) --- - ok ... test_run:cmd("setopt delimiter ';'") --- - true ... -- block until wal_delay = false -- send iterator to read view -- flush mem and update index version to trigger iterator restore function fill_space() box.begin() space:replace{1} space:replace{2} space:replace{3} box.commit() space:replace{1, 1} box.snapshot() end; --- ... function iterate_in_read_view() local i = create_iterator(space) last_read = i.next() fiber.sleep(100000) last_read = i.next() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... f1 = fiber.create(fill_space) --- ... -- Prepared transaction is blocked due to wal_delay. -- Start iterator with vlsn = INT64_MAX f2 = fiber.create(iterate_in_read_view) --- ... last_read --- - [1] ... -- Finish prepared transaction and send to read view the iterator. errinj.set("ERRINJ_WAL_DELAY", false) --- - ok ... while f1:status() ~= 'dead' do fiber.sleep(0.01) end --- ... f2:wakeup() --- ... while f2:status() ~= 'dead' do fiber.sleep(0.01) end --- ... last_read --- - [2] ... space:drop() --- ... -- -- Space drop in the middle of dump. -- test_run:cmd("create server test with script='vinyl/low_quota.lua'") --- - true ... test_run:cmd("start server test with args='1048576'") --- - true ... test_run:cmd('switch test') --- - true ... fiber = require 'fiber' --- ... box.cfg{vinyl_timeout = 0.001} --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('i1', {parts = {1, 'unsigned'}}) --- ... _ = s:create_index('i2', {parts = {2, 'unsigned'}}) --- ... _ = s:insert{1, 1} --- ... -- Delay dump so that we can manage to drop the space -- while it is still being dumped. box.error.injection.set('ERRINJ_VY_RUN_WRITE_TIMEOUT', 0.1) --- - ok ... -- Before failing on quota timeout, the following fiber -- will trigger dump due to memory shortage. _ = fiber.create(function() s:insert{2, 2, string.rep('x', box.cfg.vinyl_memory)} end) --- ... -- Let the fiber run. fiber.sleep(0) --- ... -- Drop the space while the dump task is still running. s:drop() --- ... -- Wait for the dump task to complete. box.snapshot() --- - ok ... box.error.injection.set('ERRINJ_VY_RUN_WRITE_TIMEOUT', 0) --- - ok ... -- -- Check that all dump/compact tasks that are in progress at -- the time when the server stops are aborted immediately. -- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('i1', {parts = {1, 'unsigned'}}) --- ... _ = s:create_index('i2', {parts = {2, 'unsigned'}}) --- ... box.error.injection.set('ERRINJ_VY_RUN_WRITE_STMT_TIMEOUT', 0.01) --- - ok ... for i = 1, 1000 do s:replace{i, i} end --- ... _ = fiber.create(function() box.snapshot() end) --- ... fiber.sleep(0.01) --- ... test_run:cmd('switch default') --- - true ... t1 = fiber.time() --- ... test_run:cmd("stop server test") --- - true ... t2 = fiber.time() --- ... t2 - t1 < 1 --- - true ... test_run:cmd("cleanup server test") --- - true ... -- -- If we logged an index creation in the metadata log before WAL write, -- WAL failure would result in leaving the index record in vylog forever. -- Since we use LSN to identify indexes in vylog, retrying index creation -- would then lead to a duplicate index id in vylog and hence inability -- to make a snapshot or recover. -- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... errinj.set('ERRINJ_WAL_IO', true) --- - ok ... _ = s:create_index('pk') --- - error: Failed to write to disk ... errinj.set('ERRINJ_WAL_IO', false) --- - ok ... _ = s:create_index('pk') --- ... box.snapshot() --- - ok ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('i1', {parts = {1, 'unsigned'}}) --- ... c = 10 --- ... errinj.set("ERRINJ_WAL_WRITE_DISK", true) --- - ok ... for i = 1,10 do fiber.create(function() pcall(s.replace, s, {i}) c = c - 1 end) end --- ... while c ~= 0 do fiber.sleep(0.001) end --- ... s:select{} --- - [] ... errinj.set("ERRINJ_WAL_WRITE_DISK", false) --- - ok ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('i1', {parts = {1, 'unsigned'}}) --- ... for i = 0, 9 do s:replace({i, i + 1}) end --- ... box.snapshot() --- - ok ... errinj.set("ERRINJ_XLOG_GARBAGE", true) --- - ok ... s:select() --- - error: tx checksum mismatch ... errinj.set("ERRINJ_XLOG_GARBAGE", false) --- - ok ... errinj.set("ERRINJ_VYRUN_DATA_READ", true) --- - ok ... s:select() --- - error: failed to read from file ... errinj.set("ERRINJ_VYRUN_DATA_READ", false) --- - ok ... s:select() --- - - [0, 1] - [1, 2] - [2, 3] - [3, 4] - [4, 5] - [5, 6] - [6, 7] - [7, 8] - [8, 9] - [9, 10] ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('i1', {parts = {1, 'unsigned'}}) --- ... for i = 0, 9 do s:replace({i, i + 1}) end --- ... errinj.set("ERRINJ_XLOG_GARBAGE", true) --- - ok ... box.snapshot() --- - error: tx checksum mismatch ... for i = 10, 19 do s:replace({i, i + 1}) end --- ... errinj.set("ERRINJ_XLOG_GARBAGE", false) --- - ok ... box.snapshot() --- - ok ... s:select() --- - - [0, 1] - [1, 2] - [2, 3] - [3, 4] - [4, 5] - [5, 6] - [6, 7] - [7, 8] - [8, 9] - [9, 10] - [10, 11] - [11, 12] - [12, 13] - [13, 14] - [14, 15] - [15, 16] - [16, 17] - [17, 18] - [18, 19] - [19, 20] ... s:drop() --- ... -- Point select from secondary index during snapshot. -- Once upon time that leaded to crash. s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... i1 = s:create_index('pk', {parts = {1, 'uint'}, bloom_fpr = 0.5}) --- ... i2 = s:create_index('sk', {parts = {2, 'uint'}, bloom_fpr = 0.5}) --- ... for i = 1,10 do s:replace{i, i, 0} end --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function worker() for i = 11,20,2 do s:upsert({i, i}, {{'=', 3, 1}}) errinj.set("ERRINJ_VY_POINT_ITER_WAIT", true) i1:select{i} s:upsert({i + 1 ,i + 1}, {{'=', 3, 1}}) errinj.set("ERRINJ_VY_POINT_ITER_WAIT", true) i2:select{i + 1} end end test_run:cmd("setopt delimiter ''"); --- ... f = fiber.create(worker) --- ... while f:status() ~= 'dead' do box.snapshot() fiber.sleep(0.01) end --- ... errinj.set("ERRINJ_VY_POINT_ITER_WAIT", false) --- - ok ... s:drop() --- ... -- vinyl: vy_cache_add: Assertion `0' failed -- https://github.com/tarantool/tarantool/issues/2685 s = box.schema.create_space('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk') --- ... s:replace{2, 0} --- - [2, 0] ... box.snapshot() --- - ok ... s:replace{1, 0} --- - [1, 0] ... box.snapshot() --- - ok ... s:replace{0, 0} --- - [0, 0] ... s:select{0} --- - - [0, 0] ... errinj.set("ERRINJ_WAL_DELAY", true) --- - ok ... wait_replace = true --- ... _ = fiber.create(function() s:replace{1, 1} wait_replace = false end) --- ... gen,param,state = s:pairs({1}, {iterator = 'GE'}) --- ... state, value = gen(param, state) --- ... value --- - [1, 1] ... errinj.set("ERRINJ_WAL_DELAY", false) --- - ok ... while wait_replace do fiber.sleep(0.01) end --- ... state, value = gen(param, state) --- ... value --- - [2, 0] ... s:drop() --- ... -- -- gh-2442: secondary index cursor must skip key update, made -- after the secondary index scan, but before a primary index -- lookup. It is ok, and the test checks this. -- s = box.schema.create_space('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sk', {parts = {{2, 'unsigned'}}}) --- ... s:replace{1, 1} --- - [1, 1] ... s:replace{3, 3} --- - [3, 3] ... box.snapshot() --- - ok ... ret = nil --- ... function do_read() ret = sk:select({2}, {iterator = 'GE'}) end --- ... errinj.set("ERRINJ_VY_DELAY_PK_LOOKUP", true) --- - ok ... f = fiber.create(do_read) --- ... f:status() --- - suspended ... ret --- - null ... s:replace{2, 2} --- - [2, 2] ... errinj.set("ERRINJ_VY_DELAY_PK_LOOKUP", false) --- - ok ... while ret == nil do fiber.sleep(0.01) end --- ... ret --- - - [3, 3] ... s:drop() --- ... -- -- gh-3412 - assertion failure at exit in case: -- * there is a fiber waiting for quota -- * there is a pending vylog write -- test_run:cmd("create server low_quota with script='vinyl/low_quota.lua'") --- - true ... test_run:cmd("start server low_quota with args='1048576'") --- - true ... test_run:cmd('switch low_quota') --- - true ... _ = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = box.space.test:create_index('pk') --- ... box.error.injection.set('ERRINJ_VY_RUN_WRITE_STMT_TIMEOUT', 0.01) --- - ok ... fiber = require('fiber') --- ... pad = string.rep('x', 100 * 1024) --- ... _ = fiber.create(function() for i = 1, 11 do box.space.test:replace{i, pad} end end) --- ... repeat fiber.sleep(0.001) q = box.info.vinyl().quota until q.limit - q.used < pad:len() --- ... test_run:cmd("restart server low_quota with args='1048576'") box.error.injection.set('ERRINJ_VY_LOG_FLUSH_DELAY', true) --- - ok ... fiber = require('fiber') --- ... pad = string.rep('x', 100 * 1024) --- ... _ = fiber.create(function() for i = 1, 11 do box.space.test:replace{i, pad} end end) --- ... repeat fiber.sleep(0.001) q = box.info.vinyl().quota until q.limit - q.used < pad:len() --- ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server low_quota") --- - true ... test_run:cmd("cleanup server low_quota") --- - true ... -- -- gh-3437: if compaction races with checkpointing, it may remove -- files needed for backup. -- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {run_count_per_level = 1}) --- ... -- Create a run file. _ = s:replace{1} --- ... box.snapshot() --- - ok ... -- Create another run file. This will trigger compaction -- as run_count_per_level is set to 1. Due to the error -- injection compaction will finish before snapshot. _ = s:replace{2} --- ... errinj.set('ERRINJ_SNAP_COMMIT_DELAY', true) --- - ok ... c = fiber.channel(1) --- ... _ = fiber.create(function() box.snapshot() c:put(true) end) --- ... while s.index.pk:info().disk.compact.count == 0 do fiber.sleep(0.001) end --- ... errinj.set('ERRINJ_SNAP_COMMIT_DELAY', false) --- - ok ... c:get() --- - true ... -- Check that all files corresponding to the last checkpoint -- are present. files = box.backup.start() --- ... missing = {} --- ... for _, f in pairs(files) do if not fio.path.exists(f) then table.insert(missing, f) end end --- ... missing --- - [] ... box.backup.stop() --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/savepoint.result0000664000000000000000000002224213306560010021572 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... engine = 'vinyl' --- ... -- test duplicate conflict in the primary index space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... space:insert({1}) --- - [1] ... space:insert({2}) --- - [2] ... space:insert({3}) --- - [3] ... space:select{} --- - - [1] - [2] - [3] ... space:insert({1}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:select{} --- - - [1] - [2] - [3] ... box.begin() --- ... space:insert({5}) --- - [5] ... space:insert({6}) --- - [6] ... space:insert({7}) --- - [7] ... space:insert({7}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:insert({8}) --- - [8] ... box.commit() --- ... index:select{} --- - - [1] - [2] - [3] - [5] - [6] - [7] - [8] ... index:get({1}) --- - [1] ... index:get({2}) --- - [2] ... index:get({3}) --- - [3] ... index:get({4}) --- ... index:get({5}) --- - [5] ... index:get({6}) --- - [6] ... index:get({7}) --- - [7] ... index:get({8}) --- - [8] ... space:drop() --- ... -- test duplicate conflict in the secondary index space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { parts = {1, 'uint'} }) --- ... index2 = space:create_index('secondary', { parts = {2, 'int', 3, 'str'} }) --- ... space:insert({1}) --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 3) ... space:insert({1, 1, 'a'}) --- - [1, 1, 'a'] ... space:insert({2, 2, 'a'}) --- - [2, 2, 'a'] ... space:insert({3, 2, 'b'}) --- - [3, 2, 'b'] ... space:insert({2, 3, 'c'}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... index:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] ... index2:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] ... -- fail all box.begin() --- ... space:insert({1, 10, '10'}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:insert({2, 10, '10'}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:insert({3, 10, '10'}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... box.commit() --- ... index:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] ... index2:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] ... -- fail at the begining box.begin() --- ... space:insert({1, 1, '1'}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:insert({4, 4, 'd'}) --- - [4, 4, 'd'] ... space:insert({5, 5, 'd'}) --- - [5, 5, 'd'] ... box.commit() --- ... index:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] - [4, 4, 'd'] - [5, 5, 'd'] ... index2:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] - [4, 4, 'd'] - [5, 5, 'd'] ... -- fail at the end box.begin() --- ... space:insert({6, 6, 'd'}) --- - [6, 6, 'd'] ... space:insert({7, 6, 'e'}) --- - [7, 6, 'e'] ... space:insert({1, 1, '1'}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... box.commit() --- ... index:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] - [4, 4, 'd'] - [5, 5, 'd'] - [6, 6, 'd'] - [7, 6, 'e'] ... index2:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] - [4, 4, 'd'] - [5, 5, 'd'] - [6, 6, 'd'] - [7, 6, 'e'] ... -- fail pk box.begin() --- ... space:insert({1, 100, '100'}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... box.commit() --- ... index:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] - [4, 4, 'd'] - [5, 5, 'd'] - [6, 6, 'd'] - [7, 6, 'e'] ... index2:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] - [4, 4, 'd'] - [5, 5, 'd'] - [6, 6, 'd'] - [7, 6, 'e'] ... -- fail secondary box.begin() --- ... space:insert({8, 6, 'd'}) --- - error: Duplicate key exists in unique index 'secondary' in space 'test' ... box.commit() --- ... index:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] - [4, 4, 'd'] - [5, 5, 'd'] - [6, 6, 'd'] - [7, 6, 'e'] ... index2:select{} --- - - [1, 1, 'a'] - [2, 2, 'a'] - [3, 2, 'b'] - [4, 4, 'd'] - [5, 5, 'd'] - [6, 6, 'd'] - [7, 6, 'e'] ... space:drop() --- ... -- test other operations (update, delete, upsert) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... space:insert({1}) --- - [1] ... space:insert({2}) --- - [2] ... space:insert({3}) --- - [3] ... space:select{} --- - - [1] - [2] - [3] ... box.begin() --- ... space:insert({5}) --- - [5] ... index:update({1}, {{'+', 1, 3}}) --- - error: Attempt to modify a tuple field which is part of index 'primary' in space 'test' ... box.commit() --- ... index:select{} --- - - [1] - [2] - [3] - [5] ... box.begin() --- ... space:delete({5}) --- ... space:update({1}, {{'=', 2, 43}}) --- - [1, 43] ... space:insert({10}) --- - [10] ... space:upsert({3}, {{}, {'='}}) -- incorrect ops --- - error: Illegal parameters, update operation must be an array {op,..}, got empty array ... space:insert({15}) --- - [15] ... box.commit() --- ... index:select{} --- - - [1, 43] - [2] - [3] - [10] - [15] ... box.begin() --- ... space:delete({15}) --- ... space:delete({10}) --- ... space:insert({11}) --- - [11] ... space:upsert({12}, {}) --- ... space:insert({'abc'}) --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... space:update({1}, {{'#', 2, 1}}) --- - [1] ... box.commit() --- ... space:select{} --- - - [1] - [2] - [3] - [11] - [12] ... space:drop() --- ... -- test same on several indexes space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { parts = {1, 'unsigned', 2, 'string'} }) --- ... index2 = space:create_index('secondary', { parts = {2, 'string', 3, 'scalar'}, unique = false }) --- ... index3 = space:create_index('third', { parts = {4, 'integer', 2, 'string'} }) --- ... space:insert({1, 'a', 'sclr1', 20}) --- - [1, 'a', 'sclr1', 20] ... space:insert({1, 'b', 'sclr1', 20}) --- - [1, 'b', 'sclr1', 20] ... space:insert({1, 'c', 'sclr1', -30}) --- - [1, 'c', 'sclr1', -30] ... space:insert({2, 'a', true, 15}) --- - [2, 'a', true, 15] ... index:select{} --- - - [1, 'a', 'sclr1', 20] - [1, 'b', 'sclr1', 20] - [1, 'c', 'sclr1', -30] - [2, 'a', true, 15] ... index2:select{} --- - - [2, 'a', true, 15] - [1, 'a', 'sclr1', 20] - [1, 'b', 'sclr1', 20] - [1, 'c', 'sclr1', -30] ... index3:select{} --- - - [1, 'c', 'sclr1', -30] - [2, 'a', true, 15] - [1, 'a', 'sclr1', 20] - [1, 'b', 'sclr1', 20] ... box.begin() --- ... space:insert({1, 'a', 'sclr1', 20}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:update({2, 'a'}, {{'=', 3, 3.14}}) --- - [2, 'a', 3.14, 15] ... box.commit() --- ... index:select{} --- - - [1, 'a', 'sclr1', 20] - [1, 'b', 'sclr1', 20] - [1, 'c', 'sclr1', -30] - [2, 'a', 3.14, 15] ... index2:select{} --- - - [2, 'a', 3.14, 15] - [1, 'a', 'sclr1', 20] - [1, 'b', 'sclr1', 20] - [1, 'c', 'sclr1', -30] ... index3:select{} --- - - [1, 'c', 'sclr1', -30] - [2, 'a', 3.14, 15] - [1, 'a', 'sclr1', 20] - [1, 'b', 'sclr1', 20] ... box.begin() --- ... space:delete({1, 'a'}) --- ... space:insert({100, '100', '100', 100}) --- - [100, '100', '100', 100] ... space:update({2, 'a'}, {{}}) --- - error: Illegal parameters, update operation must be an array {op,..}, got empty array ... box.commit() --- ... index:select{} --- - - [1, 'b', 'sclr1', 20] - [1, 'c', 'sclr1', -30] - [2, 'a', 3.14, 15] - [100, '100', '100', 100] ... index2:select{} --- - - [100, '100', '100', 100] - [2, 'a', 3.14, 15] - [1, 'b', 'sclr1', 20] - [1, 'c', 'sclr1', -30] ... index3:select{} --- - - [1, 'c', 'sclr1', -30] - [2, 'a', 3.14, 15] - [1, 'b', 'sclr1', 20] - [100, '100', '100', 100] ... space:drop() --- ... -- test rollback space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { parts = {1, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { parts = {2, 'unsigned'}, unique = false }) --- ... index3 = space:create_index('third', { parts = {2, 'unsigned', 3, 'scalar'} }) --- ... space:insert({1, 1, 'a'}) --- - [1, 1, 'a'] ... space:insert({2, 1, 'b'}) --- - [2, 1, 'b'] ... space:insert({3, 2, 'a'}) --- - [3, 2, 'a'] ... index:select{} --- - - [1, 1, 'a'] - [2, 1, 'b'] - [3, 2, 'a'] ... index2:select{} --- - - [1, 1, 'a'] - [2, 1, 'b'] - [3, 2, 'a'] ... index3:select{} --- - - [1, 1, 'a'] - [2, 1, 'b'] - [3, 2, 'a'] ... box.begin() --- ... space:insert({4, 2, 'b'}) --- - [4, 2, 'b'] ... space:upsert({2}, {{'=', 4, 1000}}) --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 3) ... index3:delete({3, 'a'}) --- ... space:insert({4, 100, 100}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... box.rollback() --- ... index:select{} --- - - [1, 1, 'a'] - [2, 1, 'b'] - [3, 2, 'a'] ... index2:select{} --- - - [1, 1, 'a'] - [2, 1, 'b'] - [3, 2, 'a'] ... index3:select{} --- - - [1, 1, 'a'] - [2, 1, 'b'] - [3, 2, 'a'] ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/recovery_quota.result0000664000000000000000000000562713306565107022655 0ustar rootroottest_run = require('test_run').new() --- ... -- Upon start the test server creates a space and populates it with -- more tuples than can be stored in memory, which results in dumping -- some of them to disk. If on restart, during recovery from WAL, -- it replayed the dumped statements, it would exceed memory quota. -- Check that it does not. test_run:cmd('create server test with script = "vinyl/low_quota.lua"') --- - true ... test_run:cmd('start server test with args="2097152"') --- - true ... test_run:cmd('switch test') --- - true ... -- Create a vinyl space and trigger dump by exceeding memory quota. s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {run_count_per_level = 10}) --- ... pad_size = 1000 --- ... pad = string.rep('x', pad_size) --- ... for i = 1, 2 * box.cfg.vinyl_memory / pad_size do s:insert{i, pad} end --- ... -- Save the total number of committed and dumped statements. -- Make sure no task is completed after we saved stats. box.error.injection.set('ERRINJ_VY_TASK_COMPLETE', true) --- - ok ... var = box.schema.space.create('var') --- ... _ = var:create_index('pk', {parts = {1, 'string'}}) --- ... stat = box.space.test.index.pk:info() --- ... _ = var:insert{'put', stat.put.rows} --- ... _ = var:insert{'dump', stat.disk.dump.out.rows} --- ... test_run:cmd('restart server test with args="2097152"') -- Check that we do not exceed quota. stat = box.info.vinyl() --- ... stat.quota.used <= stat.quota.limit or {stat.quota.used, stat.quota.limit} --- - true ... -- Check that we did not replay statements dumped before restart. stat = box.space.test.index.pk:info() --- ... var = box.space.var --- ... dump_before = var:get('dump')[2] --- ... dump_after = stat.disk.dump.out.rows --- ... put_before = var:get('put')[2] --- ... put_after = stat.put.rows --- ... dump_after == 0 or dump_after --- - true ... put_before - dump_before == put_after or {dump_before, dump_after, put_before, put_after} --- - true ... -- Disable dump and use all memory up to the limit. box.error.injection.set('ERRINJ_VY_RUN_WRITE', true) --- - ok ... box.cfg{vinyl_timeout=0.001} --- ... pad_size = 1000 --- ... pad = string.rep('x', pad_size) --- ... for i = 1, box.cfg.vinyl_memory / pad_size do box.space.test:replace{i, pad} end --- - error: Timed out waiting for Vinyl memory quota ... box.info.vinyl().quota.used > 1024 * 1024 --- - true ... -- Check that tarantool can recover with a smaller memory limit. test_run:cmd('restart server test with args="1048576"') fiber = require 'fiber' --- ... -- All memory above the limit must be dumped after recovery. while box.space.test.index.pk:info().disk.dump.count == 0 do fiber.sleep(0.001) end --- ... stat = box.info.vinyl() --- ... stat.quota.used <= stat.quota.limit or {stat.quota.used, stat.quota.limit} --- - true ... _ = test_run:cmd('switch default') --- ... test_run:cmd('stop server test') --- - true ... test_run:cmd('cleanup server test') --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/ddl.result0000664000000000000000000004057513306565107020352 0ustar rootrootfiber = require('fiber') --- ... test_run = require('test_run').new() --- ... -- sanity checks space = box.schema.space.create('test', {engine = 'vinyl' }) --- ... space:create_index('pk', {range_size = 0}) --- - error: 'Wrong index options (field 4): range_size must be greater than 0' ... space:create_index('pk', {page_size = 0}) --- - error: 'Wrong index options (field 4): page_size must be greater than 0 and less than or equal to range_size' ... space:create_index('pk', {page_size = 8192, range_size = 4096}) --- - error: 'Wrong index options (field 4): page_size must be greater than 0 and less than or equal to range_size' ... space:create_index('pk', {run_count_per_level = 0}) --- - error: 'Wrong index options (field 4): run_count_per_level must be greater than 0' ... space:create_index('pk', {run_size_ratio = 1}) --- - error: 'Wrong index options (field 4): run_size_ratio must be greater than 1' ... space:create_index('pk', {bloom_fpr = 0}) --- - error: 'Wrong index options (field 4): bloom_fpr must be greater than 0 and less than or equal to 1' ... space:create_index('pk', {bloom_fpr = 1.1}) --- - error: 'Wrong index options (field 4): bloom_fpr must be greater than 0 and less than or equal to 1' ... space:drop() --- ... -- space secondary index create space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index1 = space:create_index('primary') --- ... index2 = space:create_index('secondary') --- ... space:drop() --- ... -- space index create hash space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary', {type = 'hash'}) --- - error: Unsupported index type supplied for index 'primary' in space 'test' ... space:drop() --- ... -- creation of a new index and altering the definition of an existing -- index are unsupported for non-empty spaces space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert({1}) --- - [1] ... -- fail because of wrong tuple format {1}, but need {1, ...} index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) --- - error: Vinyl does not support adding an index to a non-empty space ... space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) --- - error: Vinyl does not support changing the definition of an index ... #box.space._index:select({space.id}) --- - 1 ... box.space._index:get{space.id, 0}[6] --- - [[0, 'unsigned']] ... space:drop() --- ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert({1, 2}) --- - [1, 2] ... index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) --- - error: Vinyl does not support adding an index to a non-empty space ... space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) --- - error: Vinyl does not support changing the definition of an index ... #box.space._index:select({space.id}) --- - 1 ... box.space._index:get{space.id, 0}[6] --- - [[0, 'unsigned']] ... space:drop() --- ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary') --- ... space:insert({1, 2}) --- - [1, 2] ... index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) --- - error: Vinyl does not support adding an index to a non-empty space ... space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) --- - error: Vinyl does not support changing the definition of an index ... #box.space._index:select({space.id}) --- - 1 ... box.space._index:get{space.id, 0}[6] --- - [[0, 'unsigned']] ... space:delete({1}) --- ... -- must fail because vy_mems have data index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) --- - error: Vinyl does not support adding an index to a non-empty space ... space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) --- - error: Vinyl does not support changing the definition of an index ... box.snapshot() --- - ok ... while space.index.primary:info().rows ~= 0 do fiber.sleep(0.01) end --- ... -- After a dump REPLACE + DELETE = nothing, so the space is empty -- but an index can not be altered. index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) --- ... space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) --- - error: Vinyl does not support changing the definition of an index ... -- Space format still can be altered. format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {name = 'field2', type = 'unsigned'} --- ... space:format(format) --- ... space:drop() --- ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... index = space:create_index('primary', { run_count_per_level = 2 }) --- ... space:insert({1, 2}) --- - [1, 2] ... box.snapshot() --- - ok ... space:delete({1}) --- ... box.snapshot() --- - ok ... while space.index.primary:info().run_count ~= 2 do fiber.sleep(0.01) end --- ... -- must fail because vy_runs have data index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) --- - error: Vinyl does not support adding an index to a non-empty space ... space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) --- - error: Vinyl does not support changing the definition of an index ... -- After compaction the REPLACE + DELETE + DELETE = nothing, so -- the space is now empty and can be altered. space:delete({1}) --- ... -- Make sure the run is big enough to trigger compaction. space:replace({2, 3}) --- - [2, 3] ... space:delete({2}) --- ... box.snapshot() --- - ok ... -- Wait until the dump is finished. while space.index.primary:info().rows ~= 0 do fiber.sleep(0.01) end --- ... index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) --- ... -- Can not alter an index even if it becames empty after dump. space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) --- - error: Vinyl does not support changing the definition of an index ... space:drop() --- ... -- -- gh-1709: need error on altering space -- space = box.schema.space.create('test', {engine='vinyl'}) --- ... pk = space:create_index('pk', {parts = {1, 'unsigned'}}) --- ... space:auto_increment{1} --- - [1, 1] ... space:auto_increment{2} --- - [2, 2] ... space:auto_increment{3} --- - [3, 3] ... box.space._index:replace{space.id, 0, 'pk', 'tree', {unique=true}, {{0, 'unsigned'}, {1, 'unsigned'}}} --- - error: Vinyl does not support changing the definition of an index ... space:select{} --- - - [1, 1] - [2, 2] - [3, 3] ... space:drop() --- ... -- Allow to specify various bloom fprs per index. space = box.schema.space.create('test', {engine='vinyl'}) --- ... pk = space:create_index('pk', {bloom_fpr = 0.1}) --- ... sec = space:create_index('sec', {bloom_fpr = 0.2}) --- ... third = space:create_index('third', {bloom_fpr = 0.3}) --- ... pk.options.bloom_fpr --- - 0.1 ... sec.options.bloom_fpr --- - 0.2 ... third.options.bloom_fpr --- - 0.3 ... space:drop() --- ... -- -- gh-2109: allow alter some opts of not empty indexes -- -- Forst, check that we can decrease run_count_per_level and it -- triggers compaction after next box.snapshot(). Ensure that the -- runs with different page_sizes and bloom_fprs are compacted -- correctly. -- space = box.schema.space.create('test', {engine='vinyl'}) --- ... page_size = 8192 --- ... range_size = 1024 * 1024 * 1024 --- ... bloom_fpr = 0.1 --- ... pk = space:create_index('pk', {run_count_per_level = 10, page_size = page_size, range_size = range_size, bloom_fpr = bloom_fpr}) --- ... pad_size = page_size / 5 --- ... pad = string.rep('I', pad_size) --- ... -- Create 4 pages with sizes 'page_size' for i = 1, 20 do space:replace{i, pad} end --- ... est_bsize = pad_size * 20 --- ... box.snapshot() --- - ok ... pk:info().disk.pages --- - 4 ... space.index.pk.options.page_size --- - 8192 ... pk:info().run_count --- - 1 ... space.index.pk.options.bloom_fpr --- - 0.1 ... -- Change page_size and trigger compaction page_size = page_size * 2 --- ... bloom_fpr = bloom_fpr * 2 --- ... pk:alter({page_size = page_size, run_count_per_level = 1, bloom_fpr = bloom_fpr}) --- ... pad_size = page_size / 5 --- ... pad = string.rep('I', pad_size) --- ... -- Create 4 pages with new sizes in new run for i = 1, 20 do space:replace{i + 20, pad} end --- ... est_bsize = est_bsize + pad_size * 20 --- ... box.snapshot() --- - ok ... -- Wait for compaction while pk:info().run_count ~= 1 do fiber.sleep(0.01) end --- ... pk:info().disk.pages --- - 6 ... space.index.pk.options.page_size --- - 16384 ... pk:info().run_count --- - 1 ... space.index.pk.options.bloom_fpr --- - 0.2 ... est_bsize / page_size == pk:info().disk.pages --- - true ... space:drop() --- ... -- -- Change range size to trigger split. -- space = box.schema.space.create('test', {engine = 'vinyl'}) --- ... page_size = 64 --- ... range_size = page_size * 15 --- ... pk = space:create_index('pk', {page_size = page_size, range_size = range_size, run_count_per_level = 1}) --- ... pad = '' --- ... for i = 1, 64 do pad = pad..(i % 10) end --- ... for i = 1, 8 do space:replace{i, pad} end --- ... box.snapshot() --- - ok ... -- Decrease the range_size and dump many runs to trigger split. pk:alter({range_size = page_size * 2}) --- ... while pk:info().range_count < 2 do space:replace{1, pad} box.snapshot() fiber.sleep(0.01) end --- ... space:drop() --- ... -- gh-2673 vinyl cursor uses already freed VinylIndex and vy_index s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... i0 = s:create_index('i0', {parts = {1, 'string'}}) --- ... i1 = s:create_index('i1', {unique = false, parts = {2, 'string', 3, 'string', 4, 'string'}}) --- ... i2 = s:create_index('i2', {parts = {2, 'string', 4, 'string', 3, 'string', 1, 'string'}}) --- ... i3 = s:create_index('i3', {parts = {2, 'string', 4, 'string', 6, 'unsigned', 1, 'string'}}) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for j = 1, 60 do s:truncate() self = {} self.end2018 = os.time{year=2018, month=12, day=31, hour=23, min=59, sec=59} self.start2019 = os.time{year=2019, month=1, day=1, hour=0, min=0, sec=0} self.week1end = os.time{year=2019, month=1, day=6, hour=23, min=59, sec=59} self.week2start = os.time{year=2019, month=1, day=7, hour=0, min=0, sec=0} local iface1 = s:insert{'id1', 'uid1', 'iid1', 'fid1', {1, 2, 3, 4}, self.end2018} local iface2 = s:insert{'id2', 'uid1', 'iid1', 'fid1', {1, 2, 3, 4}, self.start2019} local iface3 = s:insert{'id3', 'uid1', 'iid1', 'fid1', {1, 2, 3, 4}, self.week1end} local iface4 = s:insert{'id4', 'uid1', 'iid1', 'fid1', {1, 2, 3, 4}, self.week2start} local f, ctx, state = s.index.i3:pairs({'uid1', 'fid1', 0x7FFFFFFF}, { iterator='LE' }) state, tup = f(ctx, state) state, tup = f(ctx, state) end ; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... s:drop() --- ... -- gh-2342 cursors after death of index create_iterator = require('utils').create_iterator --- ... s = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = s:create_index('primary', { parts = { 1, 'uint' } }) --- ... sk = s:create_index('sec', { parts = { 2, 'uint' } }) --- ... s:replace{1, 2, 3} --- - [1, 2, 3] ... s:replace{4, 5, 6} --- - [4, 5, 6] ... s:replace{7, 8, 9} --- - [7, 8, 9] ... itr = create_iterator(s, {}) --- ... f, ctx, state = s.index.sec:pairs({5}, { iterator='LE' }) --- ... itr.next() --- - [1, 2, 3] ... f(ctx, state) --- - - [4, 5, 6] ... s:drop() --- ... itr.next() --- - null ... f(ctx, state) --- - null ... f = nil --- ... ctx = nil --- ... state = nil --- ... itr = nil --- ... collectgarbage('collect') --- - 0 ... -- gh-2342 drop space if transaction is in progress ch = fiber.channel(1) --- ... s = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = s:create_index('primary', { parts = { 1, 'uint' } }) --- ... sk = s:create_index('sec', { parts = { 2, 'uint' } }) --- ... box.begin() --- ... s:replace({1, 2, 3}) --- - [1, 2, 3] ... s:replace({4, 5, 6}) --- - [4, 5, 6] ... s:replace({7, 8, 9}) --- - [7, 8, 9] ... s:upsert({10, 11, 12}, {}) --- ... _ = fiber.create(function () s:drop() ch:put(true) end) --- ... ch:get() --- - true ... box.commit() --- - error: Transaction has been aborted by conflict ... s = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = s:create_index('primary', { parts = { 1, 'uint' } }) --- ... sk = s:create_index('sec', { parts = { 2, 'uint' } }) --- ... box.begin() --- ... s:replace{1, 2, 3} --- - [1, 2, 3] ... s:replace{4, 5, 6} --- - [4, 5, 6] ... s:replace{7, 8, 9} --- - [7, 8, 9] ... _ = fiber.create(function () s:drop() ch:put(true) end) --- ... ch:get() --- - true ... box.commit() --- - error: Transaction has been aborted by conflict ... -- check invalid field types space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary') --- ... index = space:create_index('test', { type = 'tree', parts = { 2, 'nosuchtype' }}) --- - error: 'Wrong index parts: unknown field type; expected field1 id (number), field1 type (string), ...' ... index = space:create_index('test', { type = 'tree', parts = { 2, 'any' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': field type ''any'' is not supported' ... index = space:create_index('test', { type = 'tree', parts = { 2, 'array' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': field type ''array'' is not supported' ... index = space:create_index('test', { type = 'tree', parts = { 2, 'map' }}) --- - error: 'Can''t create or modify index ''test'' in space ''test'': field type ''map'' is not supported' ... space:drop() --- ... -- -- Allow compatible changes of a non-empty vinyl space. -- space = box.schema.create_space('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary') --- ... space:replace{1} --- - [1] ... space:replace{2} --- - [2] ... format = {} --- ... format[1] = {name = 'field1'} --- ... format[2] = {name = 'field2', is_nullable = true} --- ... format[3] = {name = 'field3', is_nullable = true} --- ... space:format(format) --- ... t1 = space:replace{3,4,5} --- ... t2 = space:replace{4,5} --- ... t1.field1, t1.field2, t1.field3 --- - 3 - 4 - 5 ... t2.field1, t2.field2, t2.field3 --- - 4 - 5 - null ... t1 = pk:get{1} --- ... t1.field1, t1.field2, t1.field3 --- - 1 - null - null ... box.snapshot() --- - ok ... t1 = pk:get{2} --- ... t1.field1, t1.field2, t1.field3 --- - 2 - null - null ... -- Forbid incompatible change. format[2].is_nullable = false --- ... space:format(format) --- - error: Vinyl does not support changing space format of a non-empty space ... space:drop() --- ... -- gh-3019 default index options box.space._space:insert{512, 1, 'test', 'vinyl', 0, setmetatable({}, {__serialize = 'map'}), {}} --- - [512, 1, 'test', 'vinyl', 0, {}, []] ... box.space._index:insert{512, 0, 'pk', 'tree', {unique = true}, {{0, 'unsigned'}}} --- - [512, 0, 'pk', 'tree', {'unique': true}, [[0, 'unsigned']]] ... box.space.test.index.pk --- - unique: true parts: - type: unsigned is_nullable: false fieldno: 1 id: 0 space_id: 512 options: page_size: 8192 run_count_per_level: 2 run_size_ratio: 3.5 bloom_fpr: 0.05 range_size: 1073741824 name: pk type: TREE ... box.space.test:drop() --- ... -- gh-2449 change 'unique' index property from true to false s = box.schema.space.create('test', { engine = 'vinyl' }) --- ... _ = s:create_index('primary') --- ... _ = s:create_index('secondary', {unique = true, parts = {2, 'unsigned'}}) --- ... s:insert{1, 10} --- - [1, 10] ... s.index.secondary:alter{unique = false} -- ok --- ... s.index.secondary.unique --- - false ... s.index.secondary:alter{unique = true} -- error --- - error: Vinyl does not support changing the definition of an index ... s.index.secondary.unique --- - false ... s:insert{2, 10} --- - [2, 10] ... s.index.secondary:select(10) --- - - [1, 10] - [2, 10] ... s:drop() --- ... -- -- gh-3169: vinyl index key definition can not be altered even if -- the index is empty. -- s = box.schema.space.create('vinyl', {engine = 'vinyl'}) --- ... i = s:create_index('pk') --- ... i:alter{parts = {1, 'integer'}} --- - error: Vinyl does not support changing the definition of an index ... _ = s:replace{-1} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... _ = s:replace{1} --- ... _ = s:replace{-2} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... _ = s:replace{3} --- ... _ = s:replace{-3} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... s:select{} --- - - [1] - [3] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/replica_quota.lua0000664000000000000000000000042413306560010021653 0ustar rootroot#!/usr/bin/env tarantool box.cfg({ listen = os.getenv("LISTEN"), replication = os.getenv("MASTER"), vinyl_memory = 1024 * 1024, vinyl_timeout = 0.001, }) require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/vinyl/gc.result0000664000000000000000000000637713306565107020202 0ustar rootroottest_run = require('test_run').new() --- ... fiber = require('fiber') --- ... fio = require('fio') --- ... test_run:cleanup_cluster() --- ... -- Make each snapshot trigger garbage collection. default_checkpoint_count = box.cfg.checkpoint_count --- ... box.cfg{checkpoint_count = 1} --- ... -- Temporary space for bumping lsn. temp = box.schema.space.create('temp') --- ... _ = temp:create_index('pk') --- ... s = box.schema.space.create('test', {engine='vinyl'}) --- ... _ = s:create_index('pk', {run_count_per_level=1}) --- ... path = fio.pathjoin(box.cfg.vinyl_dir, tostring(s.id), tostring(s.index.pk.id)) --- ... function ls_data() return fio.glob(fio.pathjoin(path, '*')) end --- ... function ls_vylog() return fio.glob(fio.pathjoin(box.cfg.vinyl_dir, '*.vylog')) end --- ... function gc_info() return box.internal.gc.info() end --- ... function gc() temp:auto_increment{} box.snapshot() end --- ... -- Check that run files are deleted by gc. s:insert{1} box.snapshot() -- dump --- ... s:insert{2} box.snapshot() -- dump + compaction --- ... while s.index.pk:info().run_count > 1 do fiber.sleep(0.01) end -- wait for compaction --- ... gc() --- ... files = ls_data() --- ... #files == 2 or {files, gc_info()} --- - true ... -- Check that gc keeps the current and previous log files. files = ls_vylog() --- ... #files == 2 or {files, gc_info()} --- - true ... -- Check that files left from dropped indexes are deleted by gc. s:drop() --- ... gc() --- ... files = ls_data() --- ... #files == 0 or {files, gc_info()} --- - true ... -- -- Check that vylog files are removed if vinyl is not used. -- files = ls_vylog() --- ... #files == 2 or {files, gc_info()} --- - true ... -- All records should have been purged from the log by now -- so we should only keep the previous log file. gc() --- ... files = ls_vylog() --- ... #files == 1 or {files, gc_info()} --- - true ... -- The previous log file should be removed by the next gc. gc() --- ... files = ls_vylog() --- ... #files == 0 or {files, gc_info()} --- - true ... temp:drop() --- ... box.cfg{checkpoint_count = default_checkpoint_count} --- ... -- -- Check that compacted run files that are not referenced -- by any checkpoint are deleted immediately (gh-3407). -- test_run:cmd("create server test with script='vinyl/low_quota.lua'") --- - true ... test_run:cmd("start server test with args='1048576'") --- - true ... test_run:cmd('switch test') --- - true ... box.cfg{checkpoint_count = 2} --- ... fio = require('fio') --- ... fiber = require('fiber') --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {run_count_per_level = 3}) --- ... function count_runs() return #fio.glob(fio.pathjoin(box.cfg.vinyl_dir, s.id, s.index.pk.id, '*.run')) end --- ... _ = s:replace{1} --- ... box.snapshot() --- - ok ... _ = s:replace{2} --- ... box.snapshot() --- - ok ... count_runs() -- 2 --- - 2 ... for i = 1, 20 do s:replace{i, string.rep('x', 100 * 1024)} end --- ... while s.index.pk:info().disk.compact.count < 1 do fiber.sleep(0.001) end --- ... s.index.pk:info().disk.compact.count -- 1 --- - 1 ... count_runs() -- 3 (compacted runs created after checkpoint are deleted) --- - 3 ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server test") --- - true ... test_run:cmd("cleanup server test") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/stress.result0000664000000000000000000000034613306560010021106 0ustar rootroot#!/usr/bin/env tarantool --- ... test_run = require('test_run').new() --- ... require('stress').stress(10) --- ... test_run:cmd('restart server default') require('stress').stress(10) --- ... test_run:cmd('restart server default') tarantool_1.9.1.26.g63eb81e3c/test/vinyl/recover.result0000664000000000000000000001634113306565107021246 0ustar rootrootfiber = require('fiber') --- ... test_run = require('test_run').new() --- ... -- Temporary table to restore variables after restart. var = box.schema.space.create('var') --- ... _ = var:create_index('primary', {parts = {1, 'string'}}) --- ... -- Empty space. s1 = box.schema.space.create('test1', {engine = 'vinyl'}) --- ... _ = s1:create_index('pk') --- ... -- Truncated space. s2 = box.schema.space.create('test2', {engine = 'vinyl'}) --- ... _ = s2:create_index('pk') --- ... _ = s2:insert{123} --- ... s2:truncate() --- ... -- Data space. s3 = box.schema.space.create('test3', {engine='vinyl'}) --- ... _ = s3:create_index('primary') --- ... _ = s3:create_index('secondary', {unique = false, parts = {2, 'string'}}) --- ... for i = 0, 4 do s3:insert{i, 'test' .. i} end --- ... -- Flush data to disk. box.snapshot() --- - ok ... -- Write some data to memory. for i = 5, 9 do s3:insert{i, 'test' .. i} end --- ... -- Concurrent index creation (gh-2288). ch = fiber.channel(2) --- ... s4 = box.schema.space.create('test4', {engine = 'vinyl'}) --- ... s5 = box.schema.space.create('test5', {engine = 'vinyl'}) --- ... _ = fiber.create(function() s4:create_index('i1') s4:create_index('i2') ch:put(true) end) --- ... _ = fiber.create(function() s5:create_index('i1') s5:create_index('i2') ch:put(true) end) --- ... ch:get() --- - true ... ch:get() --- - true ... s4:insert{44} --- - [44] ... s5:insert{55} --- - [55] ... -- Remember stats before restarting the server. _ = var:insert{'vyinfo', s3.index.primary:info()} --- ... test_run:cmd('restart server default') s1 = box.space.test1 --- ... s2 = box.space.test2 --- ... s3 = box.space.test3 --- ... s4 = box.space.test4 --- ... s5 = box.space.test5 --- ... var = box.space.var --- ... -- Check space contents. s1:select() --- - [] ... s2:select() --- - [] ... s3.index.primary:select() --- - - [0, 'test0'] - [1, 'test1'] - [2, 'test2'] - [3, 'test3'] - [4, 'test4'] - [5, 'test5'] - [6, 'test6'] - [7, 'test7'] - [8, 'test8'] - [9, 'test9'] ... s3.index.secondary:select() --- - - [0, 'test0'] - [1, 'test1'] - [2, 'test2'] - [3, 'test3'] - [4, 'test4'] - [5, 'test5'] - [6, 'test6'] - [7, 'test7'] - [8, 'test8'] - [9, 'test9'] ... s4.index.i1:select() --- - - [44] ... s4.index.i2:select() --- - - [44] ... s5.index.i1:select() --- - - [55] ... s5.index.i2:select() --- - - [55] ... -- Check that stats didn't change after recovery. vyinfo1 = var:get('vyinfo')[2] --- ... vyinfo2 = s3.index.primary:info() --- ... vyinfo1.memory.rows == vyinfo2.memory.rows --- - true ... vyinfo1.memory.bytes == vyinfo2.memory.bytes --- - true ... vyinfo1.disk.rows == vyinfo2.disk.rows --- - true ... vyinfo1.disk.bytes == vyinfo2.disk.bytes --- - true ... vyinfo1.disk.bytes_compressed == vyinfo2.disk.bytes_compressed --- - true ... vyinfo1.disk.pages == vyinfo2.disk.pages --- - true ... vyinfo1.run_count == vyinfo2.run_count --- - true ... vyinfo1.range_count == vyinfo2.range_count --- - true ... s1:drop() --- ... s2:drop() --- ... s3:drop() --- ... s4:drop() --- ... s5:drop() --- ... var:drop() --- ... test_run:cmd('create server force_recovery with script="vinyl/force_recovery.lua"') --- - true ... test_run:cmd('start server force_recovery') --- - true ... test_run:cmd('switch force_recovery') --- - true ... fio = require'fio' --- ... test = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = test:create_index('pk') --- ... for i = 0, 9999 do test:replace({i, i, string.rep('a', 512)}) end --- ... box.snapshot() --- - ok ... for i = 10000, 11999 do test:delete({i - 10000}) end --- ... box.snapshot() --- - ok ... for i = 12000, 13999 do test:upsert({i - 10000, i, string.rep('a', 128)}, {{'+', 2, 5}}) end --- ... box.snapshot() --- - ok ... for _, f in pairs(fio.glob(box.cfg.vinyl_dir .. '/' .. test.id .. '/0/*.index')) do fio.unlink(f) end --- ... _ = box.schema.space.create('info') --- ... _ = box.space.info:create_index('pk') --- ... _ = box.space.info:insert{1, box.space.test.index.pk:info()} --- ... test2 = box.schema.space.create('test2', {engine = 'vinyl'}) --- ... _ = test2:create_index('pk') --- ... _ = test2:create_index('sec', {parts = {4, 'unsigned', 2, 'string'}}) --- ... test2:replace({1, 'a', 2, 3}) --- - [1, 'a', 2, 3] ... test2:replace({2, 'd', 4, 1}) --- - [2, 'd', 4, 1] ... test2:replace({3, 'c', 6, 7}) --- - [3, 'c', 6, 7] ... test2:replace({4, 'b', 6, 3}) --- - [4, 'b', 6, 3] ... box.snapshot() --- - ok ... for _, f in pairs(fio.glob(box.cfg.vinyl_dir .. '/' .. test2.id .. '/0/*.index')) do fio.unlink(f) end --- ... for _, f in pairs(fio.glob(box.cfg.vinyl_dir .. '/' .. test2.id .. '/1/*.index')) do fio.unlink(f) end --- ... test_run = require('test_run').new() --- ... test_run:cmd('switch default') --- - true ... test_run:cmd('stop server force_recovery') --- - true ... test_run:cmd('start server force_recovery') --- - true ... test_run:cmd('switch force_recovery') --- - true ... sum = 0 --- ... for k, v in pairs(box.space.test:select()) do sum = sum + v[2] end --- ... -- should be a sum(2005 .. 4004) + sum(4000 .. 9999) = 48006000 sum --- - 48006000 ... -- Check that disk stats are restored after index rebuild (gh-3173). old_info = box.space.info:get(1)[2] --- ... new_info = box.space.test.index.pk:info() --- ... new_info.disk.index_size == old_info.disk.index_size --- - true ... new_info.disk.bloom_size == old_info.disk.bloom_size --- - true ... new_info.disk.rows == old_info.disk.rows --- - true ... new_info.disk.bytes == old_info.disk.bytes --- - true ... new_info.disk.bytes_compressed == old_info.disk.bytes_compressed --- - true ... new_info.disk.pages == old_info.disk.pages --- - true ... new_info.run_count == old_info.run_count --- - true ... new_info.range_count == old_info.range_count --- - true ... box.space.test2:select() --- - - [1, 'a', 2, 3] - [2, 'd', 4, 1] - [3, 'c', 6, 7] - [4, 'b', 6, 3] ... box.space.test2.index.sec:select() --- - - [2, 'd', 4, 1] - [1, 'a', 2, 3] - [4, 'b', 6, 3] - [3, 'c', 6, 7] ... test_run:cmd('switch default') --- - true ... test_run:cmd('stop server force_recovery') --- - true ... test_run:cmd('delete server force_recovery') --- - true ... -- garbaged vy run indexes test_run:cmd('create server force_recovery with script="vinyl/bad_run_indexes.lua"') --- - true ... test_run:cmd('start server force_recovery') --- - true ... test_run:cmd('switch force_recovery') --- - true ... test = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = test:create_index('pk') --- ... for i = 0, 9999 do test:replace({i, i, string.rep('a', 512)}) end --- ... box.snapshot() --- - ok ... for i = 10000, 11999 do test:delete({i - 10000}) end --- ... box.snapshot() --- - ok ... for i = 12000, 13999 do test:upsert({i - 10000, i, string.rep('a', 128)}, {{'+', 2, 5}}) end --- ... box.snapshot() --- - ok ... test_run:cmd('switch default') --- - true ... test_run:cmd('stop server force_recovery') --- - true ... test_run:cmd('start server force_recovery') --- - true ... test_run:cmd('switch force_recovery') --- - true ... sum = 0 --- ... for k, v in pairs(box.space.test:select()) do sum = sum + v[2] end --- ... -- should be a sum(2005 .. 4004) + sum(4000 .. 9999) = 48006000 sum --- - 48006000 ... test_run:cmd('switch default') --- - true ... test_run:cmd('stop server force_recovery') --- - true ... test_run:cmd('cleanup server force_recovery') --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/errinj_gc.test.lua0000664000000000000000000000437213306565107021765 0ustar rootroottest_run = require('test_run').new() fiber = require('fiber') fio = require('fio') errinj = box.error.injection test_run:cleanup_cluster() -- Make each snapshot trigger garbage collection. box.cfg{checkpoint_count = 1} -- Temporary space for bumping lsn. temp = box.schema.space.create('temp') _ = temp:create_index('pk') s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk', {run_count_per_level=1}) path = fio.pathjoin(box.cfg.vinyl_dir, tostring(s.id), tostring(s.index.pk.id)) function file_count() return #fio.glob(fio.pathjoin(path, '*')) end function gc() temp:auto_increment{} box.snapshot() end -- -- Check that gc retries to delete files left -- from compacted runs. -- errinj.set('ERRINJ_VY_GC', true) s:insert{12345, 'abcdef'} box.snapshot() -- dump s:insert{67890, 'ghijkl'} box.snapshot() -- dump + compaction while s.index.pk:info().run_count > 1 do fiber.sleep(0.01) end -- wait for compaction file_count() gc() file_count() errinj.set('ERRINJ_VY_GC', false) gc() file_count() -- -- Check that gc retries to delete files left -- from dropped indexes. -- errinj.set('ERRINJ_VY_GC', true) s:drop() gc() file_count() errinj.set('ERRINJ_VY_GC', false) gc() file_count() -- -- Check that files left from incomplete runs are deleted -- upon recovery completion. -- s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('pk', {run_count_per_level=1}) path = fio.pathjoin(box.cfg.vinyl_dir, tostring(s.id), tostring(s.index.pk.id)) s:insert{100, '12345'} box.snapshot() -- dump file_count() errinj.set('ERRINJ_VY_RUN_DISCARD', true) errinj.set('ERRINJ_VY_TASK_COMPLETE', true) s:insert{200, '67890'} box.snapshot() -- run file created, but dump fails file_count() test_run:cmd('restart server default') test_run = require('test_run').new() fio = require('fio') default_checkpoint_count = box.cfg.checkpoint_count box.cfg{checkpoint_count = 1} s = box.space.test temp = box.space.temp path = fio.pathjoin(box.cfg.vinyl_dir, tostring(s.id), tostring(s.index.pk.id)) function file_count() return #fio.glob(fio.pathjoin(path, '*')) end function gc() temp:auto_increment{} box.snapshot() end file_count() s:select() -- -- Cleanup. -- s:drop() gc() file_count() temp:drop() box.cfg{checkpoint_count = default_checkpoint_count} tarantool_1.9.1.26.g63eb81e3c/test/vinyl/ddl.test.lua0000664000000000000000000002604113306565107020563 0ustar rootrootfiber = require('fiber') test_run = require('test_run').new() -- sanity checks space = box.schema.space.create('test', {engine = 'vinyl' }) space:create_index('pk', {range_size = 0}) space:create_index('pk', {page_size = 0}) space:create_index('pk', {page_size = 8192, range_size = 4096}) space:create_index('pk', {run_count_per_level = 0}) space:create_index('pk', {run_size_ratio = 1}) space:create_index('pk', {bloom_fpr = 0}) space:create_index('pk', {bloom_fpr = 1.1}) space:drop() -- space secondary index create space = box.schema.space.create('test', { engine = 'vinyl' }) index1 = space:create_index('primary') index2 = space:create_index('secondary') space:drop() -- space index create hash space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary', {type = 'hash'}) space:drop() -- creation of a new index and altering the definition of an existing -- index are unsupported for non-empty spaces space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary') space:insert({1}) -- fail because of wrong tuple format {1}, but need {1, ...} index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) #box.space._index:select({space.id}) box.space._index:get{space.id, 0}[6] space:drop() space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary') space:insert({1, 2}) index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) #box.space._index:select({space.id}) box.space._index:get{space.id, 0}[6] space:drop() space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary') space:insert({1, 2}) index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) #box.space._index:select({space.id}) box.space._index:get{space.id, 0}[6] space:delete({1}) -- must fail because vy_mems have data index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) box.snapshot() while space.index.primary:info().rows ~= 0 do fiber.sleep(0.01) end -- After a dump REPLACE + DELETE = nothing, so the space is empty -- but an index can not be altered. index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) -- Space format still can be altered. format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {name = 'field2', type = 'unsigned'} space:format(format) space:drop() space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary', { run_count_per_level = 2 }) space:insert({1, 2}) box.snapshot() space:delete({1}) box.snapshot() while space.index.primary:info().run_count ~= 2 do fiber.sleep(0.01) end -- must fail because vy_runs have data index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) -- After compaction the REPLACE + DELETE + DELETE = nothing, so -- the space is now empty and can be altered. space:delete({1}) -- Make sure the run is big enough to trigger compaction. space:replace({2, 3}) space:delete({2}) box.snapshot() -- Wait until the dump is finished. while space.index.primary:info().rows ~= 0 do fiber.sleep(0.01) end index2 = space:create_index('secondary', { parts = {2, 'unsigned'} }) -- Can not alter an index even if it becames empty after dump. space.index.primary:alter({parts = {1, 'unsigned', 2, 'unsigned'}}) space:drop() -- -- gh-1709: need error on altering space -- space = box.schema.space.create('test', {engine='vinyl'}) pk = space:create_index('pk', {parts = {1, 'unsigned'}}) space:auto_increment{1} space:auto_increment{2} space:auto_increment{3} box.space._index:replace{space.id, 0, 'pk', 'tree', {unique=true}, {{0, 'unsigned'}, {1, 'unsigned'}}} space:select{} space:drop() -- Allow to specify various bloom fprs per index. space = box.schema.space.create('test', {engine='vinyl'}) pk = space:create_index('pk', {bloom_fpr = 0.1}) sec = space:create_index('sec', {bloom_fpr = 0.2}) third = space:create_index('third', {bloom_fpr = 0.3}) pk.options.bloom_fpr sec.options.bloom_fpr third.options.bloom_fpr space:drop() -- -- gh-2109: allow alter some opts of not empty indexes -- -- Forst, check that we can decrease run_count_per_level and it -- triggers compaction after next box.snapshot(). Ensure that the -- runs with different page_sizes and bloom_fprs are compacted -- correctly. -- space = box.schema.space.create('test', {engine='vinyl'}) page_size = 8192 range_size = 1024 * 1024 * 1024 bloom_fpr = 0.1 pk = space:create_index('pk', {run_count_per_level = 10, page_size = page_size, range_size = range_size, bloom_fpr = bloom_fpr}) pad_size = page_size / 5 pad = string.rep('I', pad_size) -- Create 4 pages with sizes 'page_size' for i = 1, 20 do space:replace{i, pad} end est_bsize = pad_size * 20 box.snapshot() pk:info().disk.pages space.index.pk.options.page_size pk:info().run_count space.index.pk.options.bloom_fpr -- Change page_size and trigger compaction page_size = page_size * 2 bloom_fpr = bloom_fpr * 2 pk:alter({page_size = page_size, run_count_per_level = 1, bloom_fpr = bloom_fpr}) pad_size = page_size / 5 pad = string.rep('I', pad_size) -- Create 4 pages with new sizes in new run for i = 1, 20 do space:replace{i + 20, pad} end est_bsize = est_bsize + pad_size * 20 box.snapshot() -- Wait for compaction while pk:info().run_count ~= 1 do fiber.sleep(0.01) end pk:info().disk.pages space.index.pk.options.page_size pk:info().run_count space.index.pk.options.bloom_fpr est_bsize / page_size == pk:info().disk.pages space:drop() -- -- Change range size to trigger split. -- space = box.schema.space.create('test', {engine = 'vinyl'}) page_size = 64 range_size = page_size * 15 pk = space:create_index('pk', {page_size = page_size, range_size = range_size, run_count_per_level = 1}) pad = '' for i = 1, 64 do pad = pad..(i % 10) end for i = 1, 8 do space:replace{i, pad} end box.snapshot() -- Decrease the range_size and dump many runs to trigger split. pk:alter({range_size = page_size * 2}) while pk:info().range_count < 2 do space:replace{1, pad} box.snapshot() fiber.sleep(0.01) end space:drop() -- gh-2673 vinyl cursor uses already freed VinylIndex and vy_index s = box.schema.space.create('test', {engine = 'vinyl'}) i0 = s:create_index('i0', {parts = {1, 'string'}}) i1 = s:create_index('i1', {unique = false, parts = {2, 'string', 3, 'string', 4, 'string'}}) i2 = s:create_index('i2', {parts = {2, 'string', 4, 'string', 3, 'string', 1, 'string'}}) i3 = s:create_index('i3', {parts = {2, 'string', 4, 'string', 6, 'unsigned', 1, 'string'}}) test_run:cmd("setopt delimiter ';'") for j = 1, 60 do s:truncate() self = {} self.end2018 = os.time{year=2018, month=12, day=31, hour=23, min=59, sec=59} self.start2019 = os.time{year=2019, month=1, day=1, hour=0, min=0, sec=0} self.week1end = os.time{year=2019, month=1, day=6, hour=23, min=59, sec=59} self.week2start = os.time{year=2019, month=1, day=7, hour=0, min=0, sec=0} local iface1 = s:insert{'id1', 'uid1', 'iid1', 'fid1', {1, 2, 3, 4}, self.end2018} local iface2 = s:insert{'id2', 'uid1', 'iid1', 'fid1', {1, 2, 3, 4}, self.start2019} local iface3 = s:insert{'id3', 'uid1', 'iid1', 'fid1', {1, 2, 3, 4}, self.week1end} local iface4 = s:insert{'id4', 'uid1', 'iid1', 'fid1', {1, 2, 3, 4}, self.week2start} local f, ctx, state = s.index.i3:pairs({'uid1', 'fid1', 0x7FFFFFFF}, { iterator='LE' }) state, tup = f(ctx, state) state, tup = f(ctx, state) end ; test_run:cmd("setopt delimiter ''"); s:drop() -- gh-2342 cursors after death of index create_iterator = require('utils').create_iterator s = box.schema.space.create('test', { engine = 'vinyl' }) pk = s:create_index('primary', { parts = { 1, 'uint' } }) sk = s:create_index('sec', { parts = { 2, 'uint' } }) s:replace{1, 2, 3} s:replace{4, 5, 6} s:replace{7, 8, 9} itr = create_iterator(s, {}) f, ctx, state = s.index.sec:pairs({5}, { iterator='LE' }) itr.next() f(ctx, state) s:drop() itr.next() f(ctx, state) f = nil ctx = nil state = nil itr = nil collectgarbage('collect') -- gh-2342 drop space if transaction is in progress ch = fiber.channel(1) s = box.schema.space.create('test', { engine = 'vinyl' }) pk = s:create_index('primary', { parts = { 1, 'uint' } }) sk = s:create_index('sec', { parts = { 2, 'uint' } }) box.begin() s:replace({1, 2, 3}) s:replace({4, 5, 6}) s:replace({7, 8, 9}) s:upsert({10, 11, 12}, {}) _ = fiber.create(function () s:drop() ch:put(true) end) ch:get() box.commit() s = box.schema.space.create('test', { engine = 'vinyl' }) pk = s:create_index('primary', { parts = { 1, 'uint' } }) sk = s:create_index('sec', { parts = { 2, 'uint' } }) box.begin() s:replace{1, 2, 3} s:replace{4, 5, 6} s:replace{7, 8, 9} _ = fiber.create(function () s:drop() ch:put(true) end) ch:get() box.commit() -- check invalid field types space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('primary') index = space:create_index('test', { type = 'tree', parts = { 2, 'nosuchtype' }}) index = space:create_index('test', { type = 'tree', parts = { 2, 'any' }}) index = space:create_index('test', { type = 'tree', parts = { 2, 'array' }}) index = space:create_index('test', { type = 'tree', parts = { 2, 'map' }}) space:drop() -- -- Allow compatible changes of a non-empty vinyl space. -- space = box.schema.create_space('test', { engine = 'vinyl' }) pk = space:create_index('primary') space:replace{1} space:replace{2} format = {} format[1] = {name = 'field1'} format[2] = {name = 'field2', is_nullable = true} format[3] = {name = 'field3', is_nullable = true} space:format(format) t1 = space:replace{3,4,5} t2 = space:replace{4,5} t1.field1, t1.field2, t1.field3 t2.field1, t2.field2, t2.field3 t1 = pk:get{1} t1.field1, t1.field2, t1.field3 box.snapshot() t1 = pk:get{2} t1.field1, t1.field2, t1.field3 -- Forbid incompatible change. format[2].is_nullable = false space:format(format) space:drop() -- gh-3019 default index options box.space._space:insert{512, 1, 'test', 'vinyl', 0, setmetatable({}, {__serialize = 'map'}), {}} box.space._index:insert{512, 0, 'pk', 'tree', {unique = true}, {{0, 'unsigned'}}} box.space.test.index.pk box.space.test:drop() -- gh-2449 change 'unique' index property from true to false s = box.schema.space.create('test', { engine = 'vinyl' }) _ = s:create_index('primary') _ = s:create_index('secondary', {unique = true, parts = {2, 'unsigned'}}) s:insert{1, 10} s.index.secondary:alter{unique = false} -- ok s.index.secondary.unique s.index.secondary:alter{unique = true} -- error s.index.secondary.unique s:insert{2, 10} s.index.secondary:select(10) s:drop() -- -- gh-3169: vinyl index key definition can not be altered even if -- the index is empty. -- s = box.schema.space.create('vinyl', {engine = 'vinyl'}) i = s:create_index('pk') i:alter{parts = {1, 'integer'}} _ = s:replace{-1} _ = s:replace{1} _ = s:replace{-2} _ = s:replace{3} _ = s:replace{-3} s:select{} s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/bad_run_indexes.lua0000664000000000000000000000036513306560010022160 0ustar rootroot#!/usr/bin/env tarantool box.error.injection.set('ERRINJ_VYRUN_INDEX_GARBAGE', true) box.cfg { listen = os.getenv("LISTEN"), vinyl_memory = 128 * 1024 * 1024, force_recovery = true, } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/vinyl/quota_timeout.result0000664000000000000000000000625213306565107022500 0ustar rootroottest_run = require('test_run').new() --- ... test_run:cmd("create server test with script='vinyl/low_quota.lua'") --- - true ... test_run:cmd("start server test with args='1048576'") --- - true ... test_run:cmd('switch test') --- - true ... fiber = require 'fiber' --- ... box.cfg{vinyl_timeout=0.01} --- ... box.error.injection.set('ERRINJ_VY_SCHED_TIMEOUT', 0.01) --- - ok ... -- -- Check that a transaction is aborted on timeout if it exceeds -- quota and the scheduler doesn't manage to free memory. -- box.error.injection.set('ERRINJ_VY_RUN_WRITE', true) --- - ok ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk') --- ... pad = string.rep('x', 2 * box.cfg.vinyl_memory / 3) --- ... _ = s:auto_increment{pad} --- ... s:count() --- - 1 ... box.info.vinyl().quota.used --- - 748241 ... -- Since the following operation requires more memory than configured -- and dump is disabled, it should fail with ER_VY_QUOTA_TIMEOUT. _ = s:auto_increment{pad} --- - error: Timed out waiting for Vinyl memory quota ... s:count() --- - 1 ... box.info.vinyl().quota.used --- - 748241 ... box.error.injection.set('ERRINJ_VY_RUN_WRITE', false) --- - ok ... fiber.sleep(0.01) -- wait for scheduler to unthrottle --- ... -- -- Check that there's a warning in the log if a transaction -- waits for quota for more than too_long_threshold seconds. -- box.error.injection.set('ERRINJ_VY_RUN_WRITE_TIMEOUT', 0.01) --- - ok ... box.cfg{vinyl_timeout=60} --- ... box.cfg{too_long_threshold=0.01} --- ... _ = s:auto_increment{pad} --- ... _ = s:auto_increment{pad} --- ... test_run:cmd("push filter '[0-9.]+ sec' to ' sec'") --- - true ... test_run:grep_log('test', 'waited for .* quota for too long.*') --- - 'waited for 699089 bytes of vinyl memory quota for too long: sec' ... test_run:cmd("clear filter") --- - true ... box.error.injection.set('ERRINJ_VY_RUN_WRITE_TIMEOUT', 0) --- - ok ... s:truncate() --- ... box.snapshot() --- - ok ... -- -- Check that exceeding quota doesn't hang the scheduler -- in case there's nothing to dump. -- -- The following operation should fail instantly irrespective -- of the value of 'vinyl_timeout' (gh-3291). -- box.info.vinyl().quota.used == 0 --- - true ... box.cfg{vinyl_timeout = 9000} --- ... pad = string.rep('x', box.cfg.vinyl_memory) --- ... _ = s:auto_increment{pad} --- - error: Failed to allocate 1048615 bytes in lsregion for vinyl transaction ... s:drop() --- ... box.snapshot() --- - ok ... -- -- Check that exceeding quota triggers dump of all spaces. -- s1 = box.schema.space.create('test1', {engine = 'vinyl'}) --- ... _ = s1:create_index('pk') --- ... s2 = box.schema.space.create('test2', {engine = 'vinyl'}) --- ... _ = s2:create_index('pk') --- ... pad = string.rep('x', 64) --- ... _ = s1:auto_increment{pad} --- ... s1.index.pk:info().memory.bytes > 0 --- - true ... pad = string.rep('x', box.cfg.vinyl_memory - string.len(pad)) --- ... _ = s2:auto_increment{pad} --- ... while s1.index.pk:info().disk.dump.count == 0 do fiber.sleep(0.01) end --- ... s1.index.pk:info().memory.bytes == 0 --- - true ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server test") --- - true ... test_run:cmd("cleanup server test") --- - true ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/layout.test.lua0000664000000000000000000000341013306565107021330 0ustar rootroottest_run = require('test_run').new() test_run:cmd('restart server default with cleanup=1') fiber = require 'fiber' fio = require 'fio' xlog = require 'xlog' fun = require 'fun' space = box.schema.space.create('test', {engine='vinyl'}) _ = space:create_index('pk', {parts = {{1, 'string', collation = 'unicode'}}, run_count_per_level=3}) _ = space:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}, run_count_per_level=3}) -- Empty run space:insert{'ЁЁЁ', 777} space:delete{'ЁЁЁ'} box.snapshot() space:replace{'ЭЭЭ', box.NULL} space:replace{'эээ', box.NULL} space:replace{'ёёё', box.NULL} box.snapshot() space:replace{'ёёё', 123} space:replace{'ЮЮЮ', 456} space:replace{'ююю', 789} box.snapshot() space:drop() -- Get the list of files from the last checkpoint. -- convert names to relative -- work_dir = fio.cwd() files = box.backup.start() -- use abspath to work correclty with symlinks -- for i, name in pairs(files) do files[i] = fio.abspath(files[i]):sub(#work_dir + 2) end table.sort(files) -- files result = {} test_run:cmd("setopt delimiter ';'") for i, path in pairs(files) do local suffix = string.gsub(path, '.*%.', '') if suffix ~= 'snap' and suffix ~= 'xlog' then local rows = {} local i = 1 for lsn, row in xlog.pairs(path) do rows[i] = row i = i + 1 end table.insert(result, { fio.basename(path), rows }) end end; test_run:cmd("setopt delimiter ''"); box.backup.stop() -- resume the garbage collection process test_run:cmd("push filter 'timestamp: .*' to 'timestamp: '") test_run:cmd("push filter 'offset: .*' to 'offset: '") test_run:cmd("push filter 'bloom_filter: .*' to 'bloom_filter: '") result test_run:cmd("clear filter") tarantool_1.9.1.26.g63eb81e3c/test/vinyl/misc.result0000664000000000000000000000566513306565107020543 0ustar rootrootfiber = require('fiber') --- ... -- -- gh-2784: do not validate space formatted but not indexed fields -- in surrogate statements. -- -- At first, test simple surrogate delete generated from a key. format = {{name = 'a', type = 'unsigned'}, {name = 'b', type = 'unsigned'}} --- ... s = box.schema.space.create('test', {engine = 'vinyl', format = format}) --- ... _ = s:create_index('pk') --- ... s:insert{1, 1} --- - [1, 1] ... -- Type of a second field in a surrogate tuple must be NULL but -- with UNSIGNED type, specified in a tuple_format. It is -- possible, because only indexed fields are used in surrogate -- tuples. s:delete(1) --- ... s:drop() --- ... -- Test select after snapshot. This select gets surrogate -- tuples from a disk. Here NULL also can be stored in formatted, -- but not indexed field. format = {} --- ... format[1] = {name = 'a', type = 'unsigned'} --- ... format[2] = {name = 'b', type = 'unsigned'} --- ... format[3] = {name = 'c', type = 'unsigned'} --- ... s = box.schema.space.create('test', {engine = 'vinyl', format = format}) --- ... _ = s:create_index('pk') --- ... _ = s:create_index('sk', {parts = {2, 'unsigned'}}) --- ... s:insert{1, 1, 1} --- - [1, 1, 1] ... box.snapshot() --- - ok ... s:delete(1) --- ... box.snapshot() --- - ok ... s:select() --- - [] ... s:drop() --- ... -- -- gh-2983: ensure the transaction associated with a fiber -- is automatically rolled back if the fiber stops. -- s = box.schema.create_space('test', { engine = 'vinyl' }) --- ... _ = s:create_index('pk') --- ... tx1 = box.info.vinyl().tx --- ... ch = fiber.channel(1) --- ... _ = fiber.create(function() box.begin() s:insert{1} ch:put(true) end) --- ... ch:get() --- - true ... tx2 = box.info.vinyl().tx --- ... tx2.commit - tx1.commit -- 0 --- - 0 ... tx2.rollback - tx1.rollback -- 1 --- - 1 ... s:drop() --- ... -- -- gh-3158: check of duplicates is skipped if the index -- is contained by another unique index which is checked. -- s = box.schema.create_space('test', {engine = 'vinyl'}) --- ... i1 = s:create_index('i1', {unique = true, parts = {1, 'unsigned', 2, 'unsigned'}}) --- ... i2 = s:create_index('i2', {unique = true, parts = {2, 'unsigned', 1, 'unsigned'}}) --- ... i3 = s:create_index('i3', {unique = true, parts = {3, 'unsigned', 4, 'unsigned', 5, 'unsigned'}}) --- ... i4 = s:create_index('i4', {unique = true, parts = {5, 'unsigned', 4, 'unsigned'}}) --- ... i5 = s:create_index('i5', {unique = true, parts = {4, 'unsigned', 5, 'unsigned', 1, 'unsigned'}}) --- ... i6 = s:create_index('i6', {unique = true, parts = {4, 'unsigned', 6, 'unsigned', 5, 'unsigned'}}) --- ... i7 = s:create_index('i7', {unique = true, parts = {6, 'unsigned'}}) --- ... s:insert{1, 1, 1, 1, 1, 1} --- - [1, 1, 1, 1, 1, 1] ... i1:info().lookup -- 1 --- - 1 ... i2:info().lookup -- 0 --- - 0 ... i3:info().lookup -- 0 --- - 0 ... i4:info().lookup -- 1 --- - 1 ... i5:info().lookup -- 0 --- - 0 ... i6:info().lookup -- 0 --- - 0 ... i7:info().lookup -- 1 --- - 1 ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/force_recovery.lua0000664000000000000000000000027013306560010022036 0ustar rootroot#!/usr/bin/env tarantool box.cfg { listen = os.getenv("LISTEN"), vinyl_memory = 128 * 1024 * 1024, force_recovery = true, } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/vinyl/cache.result0000664000000000000000000003154513306565107020647 0ustar rootroot#!/usr/bin/env tarantool --- ... test_run = require('test_run').new() --- ... test_run:cmd("setopt delimiter ';'") --- - true ... stat = nil function stat_changed() local old_stat = stat local new_stat = box.space.test.index.pk:info() stat = new_stat return (old_stat == nil or old_stat.memory.iterator.lookup ~= new_stat.memory.iterator.lookup or old_stat.memory.iterator.get.rows ~= new_stat.memory.iterator.get.rows or old_stat.disk.iterator.lookup ~= new_stat.disk.iterator.lookup or old_stat.disk.iterator.get.rows ~= new_stat.disk.iterator.get.rows) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk') --- ... str = string.rep('!', 100) --- ... for i = 1,1000 do s:insert{i, str} end --- ... box.begin() --- ... t = s:select{} --- ... box.commit() --- ... #t --- - 1000 ... t = s:replace{100, str} --- ... for i = 1,10 do box.begin() t = s:select{} box.commit() end --- ... t = s:replace{200, str} --- ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) --- ... str = '' --- ... s:replace{0, 0, 0} --- - [0, 0, 0] ... s:replace{1, 1, 1, str} --- - [1, 1, 1, ''] ... s:replace{1, 2, 1, str} --- - [1, 2, 1, ''] ... s:replace{1, 3, 1, str} --- - [1, 3, 1, ''] ... s:replace{1, 4, 1, str} --- - [1, 4, 1, ''] ... s:replace{2, 1, 2, str} --- - [2, 1, 2, ''] ... s:replace{2, 2, 2, str} --- - [2, 2, 2, ''] ... s:replace{2, 3, 2, str} --- - [2, 3, 2, ''] ... s:replace{2, 4, 2, str} --- - [2, 4, 2, ''] ... s:replace{3, 3, 4} --- - [3, 3, 4] ... box.snapshot() --- - ok ... _ = stat_changed() -- init --- ... box.begin() --- ... s:get{1, 2} --- - [1, 2, 1, ''] ... box.commit() --- ... stat_changed() -- cache miss, true --- - true ... s:get{1, 2} --- - [1, 2, 1, ''] ... stat_changed() -- cache hit, false --- - false ... box.begin() --- ... s:select{1} --- - - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... box.commit() --- ... stat_changed() -- cache miss, true --- - true ... s:select{1} --- - - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... stat_changed() -- cache hit, false --- - false ... box.begin() --- ... s:select{} --- - - [0, 0, 0] - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] - [2, 1, 2, ''] - [2, 2, 2, ''] - [2, 3, 2, ''] - [2, 4, 2, ''] - [3, 3, 4] ... box.commit() --- ... stat_changed() -- cache miss, true --- - true ... s:select{} --- - - [0, 0, 0] - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] - [2, 1, 2, ''] - [2, 2, 2, ''] - [2, 3, 2, ''] - [2, 4, 2, ''] - [3, 3, 4] ... stat_changed() -- cache hit, false --- - false ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) --- ... str = '' --- ... s:replace{0, 0, 0} --- - [0, 0, 0] ... s:replace{1, 1, 1, str} --- - [1, 1, 1, ''] ... s:replace{1, 2, 1, str} --- - [1, 2, 1, ''] ... s:replace{1, 3, 1, str} --- - [1, 3, 1, ''] ... s:replace{1, 4, 1, str} --- - [1, 4, 1, ''] ... s:replace{2, 1, 2, str} --- - [2, 1, 2, ''] ... s:replace{2, 2, 2, str} --- - [2, 2, 2, ''] ... s:replace{2, 3, 2, str} --- - [2, 3, 2, ''] ... s:replace{2, 4, 2, str} --- - [2, 4, 2, ''] ... s:replace{3, 3, 4} --- - [3, 3, 4] ... box.snapshot() --- - ok ... _ = stat_changed() -- init --- ... box.begin() --- ... s:select{} --- - - [0, 0, 0] - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] - [2, 1, 2, ''] - [2, 2, 2, ''] - [2, 3, 2, ''] - [2, 4, 2, ''] - [3, 3, 4] ... box.commit() --- ... stat_changed() -- cache miss, true --- - true ... s:get{1, 2} --- - [1, 2, 1, ''] ... stat_changed() -- cache hit, false --- - false ... s:select{1} --- - - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... stat_changed() -- cache hit, false --- - false ... s:select{} --- - - [0, 0, 0] - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] - [2, 1, 2, ''] - [2, 2, 2, ''] - [2, 3, 2, ''] - [2, 4, 2, ''] - [3, 3, 4] ... stat_changed() -- cache hit, false --- - false ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) --- ... str = '' --- ... s:replace{0, 0, 0} --- - [0, 0, 0] ... s:replace{1, 2, 1, str} --- - [1, 2, 1, ''] ... s:replace{1, 3, 1, str} --- - [1, 3, 1, ''] ... s:replace{1, 4, 1, str} --- - [1, 4, 1, ''] ... s:replace{2, 1, 2, str} --- - [2, 1, 2, ''] ... s:replace{2, 2, 2, str} --- - [2, 2, 2, ''] ... s:replace{2, 3, 2, str} --- - [2, 3, 2, ''] ... s:replace{2, 4, 2, str} --- - [2, 4, 2, ''] ... s:replace{3, 3, 4} --- - [3, 3, 4] ... box.begin() --- ... s:select{1} --- - - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... box.commit() --- ... s:replace{1, 1, 1, str} --- - [1, 1, 1, ''] ... s:select{1} --- - - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) --- ... s:replace{1, 1, 1} --- - [1, 1, 1] ... s:replace{2, 2, 2} --- - [2, 2, 2] ... s:replace{3, 3, 3} --- - [3, 3, 3] ... s:replace{4, 4, 4} --- - [4, 4, 4] ... s:replace{5, 5, 5} --- - [5, 5, 5] ... box.begin() --- ... pk:min() --- - [1, 1, 1] ... pk:max() --- - [5, 5, 5] ... box.commit() --- ... s:replace{0, 0, 0} --- - [0, 0, 0] ... s:replace{6, 6, 6} --- - [6, 6, 6] ... pk:min() --- - [0, 0, 0] ... pk:max() --- - [6, 6, 6] ... s:drop() --- ... -- Same test w/o begin/end s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk') --- ... str = string.rep('!', 100) --- ... for i = 1,1000 do s:insert{i, str} end --- ... box.snapshot() --- - ok ... t = s:select{} --- ... #t --- - 1000 ... t = s:replace{100, str} --- ... for i = 1,10 do t = s:select{} end --- ... t = s:replace{200, str} --- ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) --- ... str = '' --- ... s:replace{0, 0, 0} --- - [0, 0, 0] ... s:replace{1, 1, 1, str} --- - [1, 1, 1, ''] ... s:replace{1, 2, 1, str} --- - [1, 2, 1, ''] ... s:replace{1, 3, 1, str} --- - [1, 3, 1, ''] ... s:replace{1, 4, 1, str} --- - [1, 4, 1, ''] ... s:replace{2, 1, 2, str} --- - [2, 1, 2, ''] ... s:replace{2, 2, 2, str} --- - [2, 2, 2, ''] ... s:replace{2, 3, 2, str} --- - [2, 3, 2, ''] ... s:replace{2, 4, 2, str} --- - [2, 4, 2, ''] ... s:replace{3, 3, 4} --- - [3, 3, 4] ... box.snapshot() --- - ok ... _ = stat_changed() -- init --- ... s:get{1, 2} --- - [1, 2, 1, ''] ... stat_changed() -- cache miss, true --- - true ... s:get{1, 2} --- - [1, 2, 1, ''] ... stat_changed() -- cache hit, false --- - false ... s:select{1} --- - - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... stat_changed() -- cache miss, true --- - true ... s:select{1} --- - - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... stat_changed() -- cache hit, false --- - false ... s:select{} --- - - [0, 0, 0] - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] - [2, 1, 2, ''] - [2, 2, 2, ''] - [2, 3, 2, ''] - [2, 4, 2, ''] - [3, 3, 4] ... stat_changed() -- cache miss, true --- - true ... s:select{} --- - - [0, 0, 0] - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] - [2, 1, 2, ''] - [2, 2, 2, ''] - [2, 3, 2, ''] - [2, 4, 2, ''] - [3, 3, 4] ... stat_changed() -- cache hit, false --- - false ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) --- ... str = '' --- ... s:replace{0, 0, 0} --- - [0, 0, 0] ... s:replace{1, 1, 1, str} --- - [1, 1, 1, ''] ... s:replace{1, 2, 1, str} --- - [1, 2, 1, ''] ... s:replace{1, 3, 1, str} --- - [1, 3, 1, ''] ... s:replace{1, 4, 1, str} --- - [1, 4, 1, ''] ... s:replace{2, 1, 2, str} --- - [2, 1, 2, ''] ... s:replace{2, 2, 2, str} --- - [2, 2, 2, ''] ... s:replace{2, 3, 2, str} --- - [2, 3, 2, ''] ... s:replace{2, 4, 2, str} --- - [2, 4, 2, ''] ... s:replace{3, 3, 4} --- - [3, 3, 4] ... box.snapshot() --- - ok ... _ = stat_changed() -- init --- ... s:select{} --- - - [0, 0, 0] - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] - [2, 1, 2, ''] - [2, 2, 2, ''] - [2, 3, 2, ''] - [2, 4, 2, ''] - [3, 3, 4] ... stat_changed() -- cache miss, true --- - true ... s:get{1, 2} --- - [1, 2, 1, ''] ... stat_changed() -- cache hit, false --- - false ... s:select{1} --- - - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... stat_changed() -- cache hit, false --- - false ... s:select{} --- - - [0, 0, 0] - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] - [2, 1, 2, ''] - [2, 2, 2, ''] - [2, 3, 2, ''] - [2, 4, 2, ''] - [3, 3, 4] ... stat_changed() -- cache hit, false --- - false ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) --- ... str = '' --- ... s:replace{0, 0, 0} --- - [0, 0, 0] ... s:replace{1, 2, 1, str} --- - [1, 2, 1, ''] ... s:replace{1, 3, 1, str} --- - [1, 3, 1, ''] ... s:replace{1, 4, 1, str} --- - [1, 4, 1, ''] ... s:replace{2, 1, 2, str} --- - [2, 1, 2, ''] ... s:replace{2, 2, 2, str} --- - [2, 2, 2, ''] ... s:replace{2, 3, 2, str} --- - [2, 3, 2, ''] ... s:replace{2, 4, 2, str} --- - [2, 4, 2, ''] ... s:replace{3, 3, 4} --- - [3, 3, 4] ... s:select{1} --- - - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... s:replace{1, 1, 1, str} --- - [1, 1, 1, ''] ... s:select{1} --- - - [1, 1, 1, ''] - [1, 2, 1, ''] - [1, 3, 1, ''] - [1, 4, 1, ''] ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk', {parts = {1, 'uint', 2, 'uint'}}) --- ... s:replace{1, 1, 1} --- - [1, 1, 1] ... s:replace{2, 2, 2} --- - [2, 2, 2] ... s:replace{3, 3, 3} --- - [3, 3, 3] ... s:replace{4, 4, 4} --- - [4, 4, 4] ... s:replace{5, 5, 5} --- - [5, 5, 5] ... pk:min() --- - [1, 1, 1] ... pk:max() --- - [5, 5, 5] ... s:replace{0, 0, 0} --- - [0, 0, 0] ... s:replace{6, 6, 6} --- - [6, 6, 6] ... pk:min() --- - [0, 0, 0] ... pk:max() --- - [6, 6, 6] ... s:drop() --- ... -- https://github.com/tarantool/tarantool/issues/2189 local_space = box.schema.space.create('test', {engine='vinyl'}) --- ... pk = local_space:create_index('pk') --- ... local_space:replace({1, 1}) --- - [1, 1] ... local_space:replace({2, 2}) --- - [2, 2] ... local_space:select{} --- - - [1, 1] - [2, 2] ... box.begin() --- ... local_space:replace({1}) --- - [1] ... local_space:select{} --- - - [1] - [2, 2] ... box.commit() --- ... local_space:select{} --- - - [1] - [2, 2] ... local_space:drop() --- ... -- -- gh-2661: vy_cache_next_key after version change returns the -- same statement as before. -- s = box.schema.create_space('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sec', {parts = {2, 'string'}, unique = false}) --- ... s:insert{1, 'key1'} --- - [1, 'key1'] ... sk:select('key1') --- - - [1, 'key1'] ... s:insert{3, 'key2'} --- - [3, 'key2'] ... sk:select('key2') --- - - [3, 'key2'] ... s:insert{5, 'key1'} --- - [5, 'key1'] ... sk:select('key1') --- - - [1, 'key1'] - [5, 'key1'] ... s:drop() --- ... -- -- gh-2789: vy_cache_iterator must stop iteration, if a sought -- statement does not exist and is between chained statements. -- s = box.schema.create_space('test', {engine = 'vinyl'}) --- ... pk = s:create_index('pk') --- ... s:replace{1} --- - [1] ... s:replace{2} --- - [2] ... s:replace{4} --- - [4] ... s:replace{5} --- - [5] ... box.snapshot() --- - ok ... -- Cache is not updated in autocommit mode. box.begin() s:select{} box.commit() --- ... info = pk:info().cache --- ... info.lookup --- - 1 ... info.get.rows --- - 0 ... pk:info().disk.iterator.lookup --- - 1 ... s:get{3} --- ... info = pk:info().cache --- ... info.lookup --- - 2 ... info.get.rows --- - 0 ... pk:info().disk.iterator.lookup --- - 1 ... s:drop() --- ... -- -- Cache resize -- vinyl_cache = box.cfg.vinyl_cache --- ... box.cfg{vinyl_cache = 1000 * 1000} --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk') --- ... for i = 1, 100 do s:replace{i, string.rep('x', 1000)} end --- ... for i = 1, 100 do s:get{i} end --- ... box.info.vinyl().cache.used --- - 107700 ... box.cfg{vinyl_cache = 50 * 1000} --- ... box.info.vinyl().cache.used --- - 49542 ... box.cfg{vinyl_cache = 0} --- ... box.info.vinyl().cache.used --- - 0 ... -- Make sure cache is not populated if box.cfg.vinyl_cache is set to 0 st1 = s.index.pk:info().cache --- ... #s:select() --- - 100 ... for i = 1, 100 do s:get{i} end --- ... st2 = s.index.pk:info().cache --- ... st2.put.rows - st1.put.rows --- - 0 ... box.info.vinyl().cache.used --- - 0 ... s:drop() --- ... box.cfg{vinyl_cache = vinyl_cache} --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/write_iterator.result0000664000000000000000000003324113306565107022642 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... fiber = require('fiber') --- ... -- -- Tests on data integrity after dump of memory runs or range -- compaction. -- -- The aim is to test vy_write_iterator. There are several combinations -- of various commands that can occur: -- 1) delete followed by upsert : write iterator should convert -- upsert to replace (insert) -- 2) upsert followed by delete: the upsert is filtered out, -- delete can be filtered out or not depending on whether it's -- compaction (filtered out) or dump (preserved) -- 3) upsert followed by upsert: two upserts are folded together -- into one -- 4) upsert followed by replace: upsert is replaced -- 5) replace followed by upsert: two commands are folded -- into a single replace with upsert ops applied -- 6) replace followed by delete: -- both are eliminated in case of compaction; -- replace is filtered out if it's dump -- 7) delete followed by replace: delete is filtered out -- 8) replace followed by replace: the first replace is filtered -- out -- 9) single upsert (for completeness) -- 10) single replace (for completeness) space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... -- -- pk = space:create_index('primary', { page_size = 12 * 1024, range_size = 12 * 1024 }) --- ... -- Insert many big tuples and then call snapshot to -- force dumping and compacting. big_val = string.rep('1', 2000) --- ... _ = space:insert{1} --- ... _ = space:insert{2, big_val} --- ... _ = space:insert{3, big_val} --- ... _ = space:insert{5, big_val} --- ... _ = space:insert{6, big_val} --- ... _ = space:insert{7, big_val} --- ... _ = space:insert{8, big_val} --- ... _ = space:insert{9, big_val} --- ... _ = space:insert{10, big_val} --- ... _ = space:insert{11, big_val} --- ... space:count() --- - 10 ... box.snapshot() --- - ok ... -- -- Create a couple of tiny runs on disk, to increate the "number of runs" -- heuristic of hte planner and trigger compaction -- space:insert{12} --- - [12] ... box.snapshot() --- - ok ... space:insert{13} --- - [13] ... box.snapshot() --- - ok ... #space:select{} --- - 12 ... space:drop() --- ... -- -- Create a vinyl index with small page_size parameter, so that -- big tuples will not fit in a single page. -- space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary', { page_size = 256, range_size = 3 * 1024 }) --- ... space:insert({1}) --- - [1] ... box.snapshot() --- - ok ... big_val = string.rep('1', 2000) --- ... _ = space:insert{2, big_val} --- ... _ = space:insert{3, big_val} --- ... _ = space:insert{5, big_val} --- ... _ = space:insert{6, big_val} --- ... _ = space:insert{7, big_val} --- ... _ = space:insert{8, big_val} --- ... _ = space:insert{9, big_val} --- ... _ = space:insert{10, big_val} --- ... _ = space:insert{11, big_val} --- ... -- Increate the number of runs, trigger compaction space:count() --- - 10 ... box.snapshot() --- - ok ... space:insert{12} --- - [12] ... box.snapshot() --- - ok ... space:insert{13} --- - [13] ... box.snapshot() --- - ok ... #space:select{} --- - 12 ... space:drop() --- ... -- Test dumping and compacting a space with more than one index. space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary', { page_size = 512, range_size = 1024 * 12 }) --- ... index2 = space:create_index('secondary', { parts = {2, 'string'}, page_size = 512, range_size = 1024 * 12 }) --- ... for i = 1, 100 do space:insert{i, ''..i} if i % 2 == 0 then box.snapshot() end end --- ... space:delete{1} --- ... space:delete{10} --- ... space:delete{100} --- ... box.snapshot() --- - ok ... index2:delete{'9'} --- ... index2:delete{'99'} --- ... box.snapshot() --- - ok ... space:select{2} --- - - [2, '2'] ... -- Test that not dumped changes are visible. space:upsert({2, '2'}, {{'=', 3, 22}}) --- ... space:select{2} --- - - [2, '2', 22] ... space:upsert({2, '2'}, {{'!', 3, 222}}) --- ... space:select{2} --- - - [2, '2', 222, 22] ... space:upsert({2, '2'}, {{'!', 3, 2222}}) --- ... space:select{2} --- - - [2, '2', 2222, 222, 22] ... box.snapshot() --- - ok ... space:select{2} --- - - [2, '2', 2222, 222, 22] ... space:update({2}, {{'!', 3, 22222}}) --- - [2, '2', 22222, 2222, 222, 22] ... box.snapshot() --- - ok ... space:select{2} --- - - [2, '2', 22222, 2222, 222, 22] ... space:drop() --- ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... pk = space:create_index('primary', { page_size = 128, range_size = 1024 }) --- ... -- Test that snaphot() inside a transaction doesn't lose data -- and that upserts are successfully merged. box.begin() --- ... space:upsert({2}, {{'=', 2, 22}}) --- ... space:upsert({2}, {{'!', 2, 222}}) --- ... space:upsert({2}, {{'!', 2, 2222}}) --- ... space:select{} --- - - [2, 2222, 222] ... box.snapshot() --- - ok ... box.commit() --- ... space:select{} --- - - [2, 2222, 222] ... space:insert({3}) --- - [3] ... box.snapshot() --- - ok ... space:select{} --- - - [2, 2222, 222] - [3] ... -- -- Verify that deletion of tuples with key 2 and 3 is -- successfully dumped and compacted. -- box.begin() --- ... space:delete{2} --- ... space:delete{3} --- ... box.commit() --- ... space:upsert({10}, {{'!', 2, 10}}) --- ... box.snapshot() --- - ok ... space:select{} --- - - [10] ... -- Test that deletion is successfully dumped and compacted. space:delete{10} --- ... space:upsert({10}, {{'!', 2, 10}}) --- ... space:upsert({10}, {{'!', 2, 10}}) --- ... box.snapshot() --- - ok ... space:select{} --- - - [10, 10] ... space:delete{10} --- ... space:upsert({10}, {{'!', 2, 10}}) --- ... space:delete({10}) --- ... box.snapshot() --- - ok ... space:select{} --- - [] ... -- Test that if replace is met then previous upsert is ignored. space:upsert({10}, {{'!', 2, 10}}) --- ... space:replace({10, 100}) --- - [10, 100] ... box.snapshot() --- - ok ... space:select{} --- - - [10, 100] ... space:delete{10} --- ... -- Test that dumping and compacting didn't lose single upsert. space:upsert({100}, {{'!', 2, 100}}) --- ... box.snapshot() --- - ok ... space:select{} --- - - [100] ... space:delete{100} --- ... -- Verify that if upsert goes after replace then they will be merged. space:replace({200}) --- - [200] ... space:upsert({200}, {{'!', 2, 200}}) --- ... box.snapshot() --- - ok ... space:select{} --- - - [200, 200] ... space:delete{200} --- ... -- Insert more tuples than can fit in range_size big_val = string.rep('1', 400) --- ... _ = space:replace({1, big_val}) --- ... _ = space:replace({2, big_val}) --- ... _ = space:replace({3, big_val}) --- ... _ = space:replace({4, big_val}) --- ... _ = space:replace({5, big_val}) --- ... _ = space:replace({6, big_val}) --- ... _ = space:replace({7, big_val}) --- ... space:count() --- - 7 ... box.snapshot() --- - ok ... space:count() --- - 7 ... space:delete({1}) --- ... space:delete({2}) --- ... space:delete({3}) --- ... space:delete({4}) --- ... space:delete({5}) --- ... space:delete({6}) --- ... space:delete({7}) --- ... space:select{} --- - [] ... box.snapshot() --- - ok ... space:select{} --- - [] ... -- Test that update successfully merged with replace and other updates space:insert({1}) --- - [1] ... space:update({1}, {{'=', 2, 111}}) --- - [1, 111] ... space:update({1}, {{'!', 2, 11}}) --- - [1, 11, 111] ... space:update({1}, {{'+', 3, 1}, {'!', 4, 444}}) --- - [1, 11, 112, 444] ... space:select{} --- - - [1, 11, 112, 444] ... box.snapshot() --- - ok ... space:select{} --- - - [1, 11, 112, 444] ... space:delete{1} --- ... box.snapshot() --- - ok ... space:select{} --- - [] ... -- Test upsert after deletion space:insert({1}) --- - [1] ... box.snapshot() --- - ok ... space:select{} --- - - [1] ... space:delete({1}) --- ... space:upsert({1}, {{'!', 2, 111}}) --- ... space:select{} --- - - [1] ... box.snapshot() --- - ok ... space:select{} --- - - [1] ... space:delete({1}) --- ... -- Test upsert before deletion space:insert({1}) --- - [1] ... box.snapshot() --- - ok ... space:select{} --- - - [1] ... space:upsert({1}, {{'!', 2, 111}}) --- ... space:delete({1}) --- ... box.snapshot() --- - ok ... space:select{} --- - [] ... -- Test deletion before replace space:insert({1}) --- - [1] ... box.snapshot() --- - ok ... space:select{} --- - - [1] ... space:delete({1}) --- ... space:replace({1, 1}) --- - [1, 1] ... box.snapshot() --- - ok ... space:select{} --- - - [1, 1] ... space:delete({1}) --- ... -- Test replace before deletion space:replace({5, 5}) --- - [5, 5] ... space:delete({5}) --- ... box.snapshot() --- - ok ... space:select{} --- - [] ... -- Test many replaces space:replace{6} --- - [6] ... space:replace{6, 6, 6} --- - [6, 6, 6] ... space:replace{6, 6, 6, 6} --- - [6, 6, 6, 6] ... space:replace{6, 6, 6, 6, 6} --- - [6, 6, 6, 6, 6] ... space:replace{6, 6, 6, 6, 6, 6} --- - [6, 6, 6, 6, 6, 6] ... space:replace{6, 6, 6, 6, 6, 6, 6} --- - [6, 6, 6, 6, 6, 6, 6] ... box.snapshot() --- - ok ... space:select{} --- - - [6, 6, 6, 6, 6, 6, 6] ... space:delete({6}) --- ... space:drop() --- ... -- gh-1725 merge iterator can't merge more than two runs space = box.schema.space.create('tweedledum', {engine = 'vinyl'}) --- ... pk = space:create_index('primary') --- ... -- integer keys space:replace{1, 'tuple'} --- - [1, 'tuple'] ... box.snapshot() --- - ok ... space:replace{2, 'tuple 2'} --- - [2, 'tuple 2'] ... box.snapshot() --- - ok ... space:replace{3, 'tuple 3'} --- - [3, 'tuple 3'] ... pk:get{1} or {'none'} --- - [1, 'tuple'] ... pk:get{2} --- - [2, 'tuple 2'] ... pk:get{3} --- - [3, 'tuple 3'] ... space:drop() --- ... -- gh-2875 INSERT+DELETE pairs are annihilated on compaction s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('primary', {run_count_per_level = 1}) --- ... sk = s:create_index('secondary', {run_count_per_level = 1, parts = {2, 'unsigned'}}) --- ... PAD1 = 100 --- ... PAD2 = 10 --- ... -- Create a big run to prevent major compaction. for i = 1001, 1000 + PAD1 do s:replace{i, i} end --- ... box.snapshot() --- - ok ... -- Generate some INSERT statements and dump them to disk. _ = s:insert{1, 1} -- insert --- ... _ = s:replace{2, 2} -- replace, no old tuple --- ... _ = s:upsert({3, 3}, {{'!', 1, 1}}) -- upsert, no old tuple --- ... box.begin() s:insert{4, 4} s:delete(4) box.commit() --- ... box.begin() s:insert{5, 5} s:replace{5, 5, 5} box.commit() --- ... box.begin() s:insert{6, 6} s:update(6, {{'!', 2, 6}}) box.commit() --- ... _ = s:insert{7, 7} --- ... _ = s:insert{8, 8} --- ... box.snapshot() --- - ok ... -- Delete the inserted tuples and trigger compaction. s:delete{1} --- ... s:delete{2} --- ... s:delete{3} --- ... s:delete{4} --- ... s:delete{5} --- ... s:delete{6} --- ... -- Check that a REPLACE in a secondary index generated by -- an update operation is converted into an INSERT on dump -- and hence gets annihilated by the next DELETE. _ = s:update(7, {{'=', 2, 77}}) --- ... s:delete{7} --- ... _ = s:upsert({8, 8}, {{'=', 2, 88}}) --- ... s:delete{8} --- ... -- Some padding to trigger minor compaction. for i = 1001, 1000 + PAD2 do s:replace{i, i} end --- ... box.snapshot() --- - ok ... -- Wait for compaction. while pk:info().disk.compact.count == 0 do fiber.sleep(0.001) end --- ... while sk:info().disk.compact.count == 0 do fiber.sleep(0.001) end --- ... pk:info().disk.compact.count -- 1 --- - 1 ... sk:info().disk.compact.count -- 1 --- - 1 ... -- All INSERT+DELETE pairs should have been annihilated, -- only padding is left. pk:info().disk.compact.out.rows - PAD2 -- 0 --- - 0 ... sk:info().disk.compact.out.rows - PAD2 -- 0 --- - 0 ... pk:select(1000, {iterator = 'LE'}) -- empty --- - [] ... sk:select(1000, {iterator = 'LE'}) -- empty --- - [] ... s:drop() --- ... -- Check that an INSERT+DELETE pair is annihilated on compaction -- only if the first statement among all sources was an INSERT. s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... pk = s:create_index('primary', {run_count_per_level = 1}) --- ... sk = s:create_index('secondary', {run_count_per_level = 1, parts = {2, 'unsigned'}}) --- ... PAD1 = 100 --- ... PAD2 = 10 --- ... -- Create a big run to prevent major compaction. for i = 1001, 1000 + PAD1 do s:insert{i, i} end --- ... _ = s:insert{1, 1} --- ... _ = s:insert{2, 2} --- ... _ = s:insert{3, 3} --- ... _ = s:insert{4, 4} --- ... _ = s:insert{5, 5} --- ... _ = s:insert{6, 6} --- ... _ = s:insert{7, 7} --- ... _ = s:insert{8, 8} --- ... box.snapshot() --- - ok ... -- Generate DELETE+INSERT statements and write them to disk. s:delete{1} s:insert{1, 100} --- ... box.begin() s:delete{2} s:insert{2, 200} box.commit() --- ... s:replace{3, 30} s:delete{3} s:insert{3, 300} --- ... box.begin() s:replace{4, 40} s:delete{4} s:insert{4, 400} box.commit() --- ... s:delete{5} s:upsert({5, 500}, {{'=', 2, 500}}) --- ... box.begin() s:delete{6} s:upsert({6, 600}, {{'=', 2, 600}}) box.commit() --- ... s:replace{7, 70} s:delete{7} s:upsert({7, 700}, {{'=', 2, 700}}) --- ... box.begin() s:replace{8, 80} s:delete{8} s:upsert({8, 800}, {{'=', 2, 800}}) box.commit() --- ... box.snapshot() --- - ok ... -- Generate DELETE statements and trigger compaction. s:delete{1} --- ... s:delete{2} --- ... s:delete{3} --- ... s:delete{4} --- ... s:delete{5} --- ... s:delete{6} --- ... s:delete{7} --- ... s:delete{8} --- ... -- Some padding to trigger minor compaction. for i = 1001, 1000 + PAD2 do s:replace{i, i} end --- ... box.snapshot() --- - ok ... -- Wait for compaction. while pk:info().disk.compact.count == 0 do fiber.sleep(0.001) end --- ... while sk:info().disk.compact.count == 0 do fiber.sleep(0.001) end --- ... pk:info().disk.compact.count -- 1 --- - 1 ... sk:info().disk.compact.count -- 1 --- - 1 ... -- If INSERT+DELETE statements stored in the two compacted runs -- were annihilated we would see tuples stored in the first run. pk:select(1000, {iterator = 'LE'}) -- empty --- - [] ... sk:select(1000, {iterator = 'LE'}) -- empty --- - [] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/replica_quota.result0000664000000000000000000000404013306560010022406 0ustar rootroottest_run = require('test_run').new() --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... box.schema.user.grant('guest', 'replication') --- ... s = box.schema.space.create('test', { engine = 'vinyl' }) --- ... _ = s:create_index('pk', {run_count_per_level = 1}) --- ... -- Send > 2 MB to replica. pad = string.rep('x', 1100) --- ... for i = 1,1000 do s:insert{i, pad} end --- ... box.snapshot() --- - ok ... for i = 1001,2000 do s:insert{i, pad} end --- ... -- Replica has memory limit set to 1 MB so replication would hang -- if the scheduler didn't work on the destination. -- -- Also check that quota timeout isn't taken into account while -- the replica is joining (see gh-2873). To do that, we set -- vinyl_timeout to 1 ms on the replica, which isn't enough for -- a dump to complete and hence would result in bootstrap failure -- were the timeout not ignored. -- _ = test_run:cmd("create server replica with rpl_master=default, script='vinyl/replica_quota.lua'") --- ... _ = test_run:cmd("start server replica") --- ... _ = test_run:wait_lsn('replica', 'default') --- ... -- Check vinyl_timeout is ignored on 'subscribe' (gh-3087). _ = test_run:cmd("stop server replica") --- ... for i = 2001,3000 do s:insert{i, pad} end --- ... _ = test_run:cmd("start server replica") --- ... _ = test_run:wait_lsn('replica', 'default') --- ... -- During join we remove compacted run files immediately (gh-3162). -- Check that we don't delete files that are still in use. _ = test_run:cmd("stop server replica") --- ... _ = test_run:cmd("cleanup server replica") --- ... box.snapshot() --- - ok ... for i = 3001,4000 do s:insert{i, pad} end --- ... _ = test_run:cmd("start server replica") -- join --- ... _ = test_run:cmd("stop server replica") --- ... _ = test_run:cmd("start server replica") -- recovery --- ... _ = test_run:cmd("stop server replica") --- ... _ = test_run:cmd("cleanup server replica") --- ... s:drop() --- ... box.schema.user.revoke('guest', 'replication') --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/recovery_quota.test.lua0000664000000000000000000000475113306565107023073 0ustar rootroottest_run = require('test_run').new() -- Upon start the test server creates a space and populates it with -- more tuples than can be stored in memory, which results in dumping -- some of them to disk. If on restart, during recovery from WAL, -- it replayed the dumped statements, it would exceed memory quota. -- Check that it does not. test_run:cmd('create server test with script = "vinyl/low_quota.lua"') test_run:cmd('start server test with args="2097152"') test_run:cmd('switch test') -- Create a vinyl space and trigger dump by exceeding memory quota. s = box.schema.space.create('test', {engine = 'vinyl'}) _ = s:create_index('pk', {run_count_per_level = 10}) pad_size = 1000 pad = string.rep('x', pad_size) for i = 1, 2 * box.cfg.vinyl_memory / pad_size do s:insert{i, pad} end -- Save the total number of committed and dumped statements. -- Make sure no task is completed after we saved stats. box.error.injection.set('ERRINJ_VY_TASK_COMPLETE', true) var = box.schema.space.create('var') _ = var:create_index('pk', {parts = {1, 'string'}}) stat = box.space.test.index.pk:info() _ = var:insert{'put', stat.put.rows} _ = var:insert{'dump', stat.disk.dump.out.rows} test_run:cmd('restart server test with args="2097152"') -- Check that we do not exceed quota. stat = box.info.vinyl() stat.quota.used <= stat.quota.limit or {stat.quota.used, stat.quota.limit} -- Check that we did not replay statements dumped before restart. stat = box.space.test.index.pk:info() var = box.space.var dump_before = var:get('dump')[2] dump_after = stat.disk.dump.out.rows put_before = var:get('put')[2] put_after = stat.put.rows dump_after == 0 or dump_after put_before - dump_before == put_after or {dump_before, dump_after, put_before, put_after} -- Disable dump and use all memory up to the limit. box.error.injection.set('ERRINJ_VY_RUN_WRITE', true) box.cfg{vinyl_timeout=0.001} pad_size = 1000 pad = string.rep('x', pad_size) for i = 1, box.cfg.vinyl_memory / pad_size do box.space.test:replace{i, pad} end box.info.vinyl().quota.used > 1024 * 1024 -- Check that tarantool can recover with a smaller memory limit. test_run:cmd('restart server test with args="1048576"') fiber = require 'fiber' -- All memory above the limit must be dumped after recovery. while box.space.test.index.pk:info().disk.dump.count == 0 do fiber.sleep(0.001) end stat = box.info.vinyl() stat.quota.used <= stat.quota.limit or {stat.quota.used, stat.quota.limit} _ = test_run:cmd('switch default') test_run:cmd('stop server test') test_run:cmd('cleanup server test') tarantool_1.9.1.26.g63eb81e3c/test/vinyl/mvcc.result0000664000000000000000000013035413306565107020532 0ustar rootroottest_run = require('test_run').new() --- ... -- need to restart in order to reset box.info.vinyl() stats test_run:cmd("restart server default") txn_proxy = require('txn_proxy') --- ... _ = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = box.space.test:create_index('pk') --- ... c1 = txn_proxy.new() --- ... c2 = txn_proxy.new() --- ... c3 = txn_proxy.new() --- ... c4 = txn_proxy.new() --- ... c5 = txn_proxy.new() --- ... c6 = txn_proxy.new() --- ... c7 = txn_proxy.new() --- ... t = box.space.test --- ... -- -- empty transaction commit -- c1:begin() --- - ... c1:commit() --- - ... -- -- empty transaction rollback -- c1:begin() --- - ... c1:rollback() --- - ... -- -- single-statement transaction commit -- c1:begin() --- - ... c1("t:replace{1}") --- - - [1] ... c1:commit() --- - ... c1("t:get{1}") --- - - [1] ... -- cleanup c1("t:delete{1}") --- - ... -- -- single-statement transaction rollback -- c1:begin() --- - ... c1("t:replace{1}") --- - - [1] ... c1:rollback() --- - ... c1("t:get{1}") --- - ... -- -- basic effects: if a transaction is rolled back, it has no effect -- c1:begin() --- - ... c1("t:insert{1}") --- - - [1] ... c1("t:get{1}") --- - - [1] ... c1:rollback() --- - ... c1("t:get{1}") --- - ... c2("t:get{1}") --- - ... -- -- multi-statement transaction -- test_run:cmd("setopt delimiter ';'") --- - true ... c1:begin(); --- - ... for i = 1,100 do c1(string.format("t:insert{%d}", i)) assert(c1(string.format("t:get{%d}", i))[1][1] == i) end; --- ... c1:commit(); --- - ... for i = 1,100 do c1(string.format("t:delete{%d}", i)) end; --- ... for i = 1,100 do assert(#c1(string.format("t:get{%d}", i)) == 0) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- -- multi-statement transaction rollback -- test_run:cmd("setopt delimiter ';'") --- - true ... c1:begin(); --- - ... for i = 1,100 do c1(string.format("t:insert{%d}", i)) assert(c1(string.format("t:get{%d}", i))[1][1] == i) end; --- ... c1:rollback(); --- - ... for i = 1,100 do assert(#c1(string.format("t:get{%d}", i)) == 0) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- transaction_set_set_get_commit(void) c1:begin() --- - ... c1("t:replace{1, 1}") --- - - [1, 1] ... c1("t:replace{1, 2}") --- - - [1, 2] ... c1("t:get{1}") --- - - [1, 2] ... c1:commit() --- - ... c1("t:get{1}") --- - - [1, 2] ... c1("t:delete{1}") --- - ... -- transaction_set_set_commit_get(void) c1:begin() --- - ... c1("t:replace{1}") --- - - [1] ... c1("t:replace{1, 2}") --- - - [1, 2] ... c1:commit() --- - ... c2:begin() --- - ... c2("t:get{1}") --- - - [1, 2] ... c2:rollback() --- - ... c1("t:delete{1}") --- - ... -- transaction_set_set_rollback_get(void) c1:begin() --- - ... c1("t:replace{1}") --- - - [1] ... c1("t:replace{1, 2}") --- - - [1, 2] ... c1:rollback() --- - ... c2:begin() --- - ... c2("t:get{1}") --- - ... c2:rollback() --- - ... -- transaction_set_delete_get_commit(void) c1:begin() --- - ... c1("t:insert{1}") --- - - [1] ... c1("t:delete{1}") --- - ... c1("t:get{1}") --- - ... c1:commit() --- - ... -- transaction_set_delete_get_commit_get(void) c1:begin() --- - ... c1("t:insert{1}") --- - - [1] ... c1("t:delete{1}") --- - ... c1("t:get{1}") --- - ... c1:commit() --- - ... c1("t:get{1}") --- - ... -- -- transaction_set_delete_set_commit_get(void) -- c1:begin() --- - ... c1("t:insert{1, 1}") --- - - [1, 1] ... c1("t:delete{1}") --- - ... c1("t:insert{1, 2}") --- - - [1, 2] ... c1("t:get{1}") --- - - [1, 2] ... c1:commit() --- - ... c2("t:get{1}") --- - - [1, 2] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- transaction_set_delete_commit_get_set(void) -- c1:begin() --- - ... c1("t:insert{1}") --- - - [1] ... c1("t:delete{1}") --- - ... c1:commit() --- - ... c1("t:get{1}") --- - ... c1("t:insert{1}") --- - - [1] ... c1("t:get{1}") --- - - [1] ... c1("t:delete{1}") --- - ... c1("t:get{1}") --- - ... -- -- transaction_p_set_commit(void) -- c1:begin() --- - ... c2:begin() --- - ... c1("t:replace{1, 10}") --- - - [1, 10] ... c1:commit() --- - ... c2("t:replace{2, 15}"); --- - - [2, 15] ... c2:commit() --- - ... c1("t:get{1}") --- - - [1, 10] ... c1("t:get{2}") --- - - [2, 15] ... c1("t:delete{1}") --- - ... c1("t:delete{2}") --- - ... -- -- no dirty reads: if a transaction is not committed, its effects are not -- visible -- c1:begin() --- - ... c1("t:insert{1}") --- - - [1] ... c1("t:get{1}") --- - - [1] ... -- -- not visible in c2 -- c2("t:get{1}") --- - ... c1:commit() --- - ... -- -- become visible in c2 after c1 commits (c2 runs in autocommit) -- c2("t:get{1}") --- - - [1] ... -- -- repeatable read: if c1 has read X, and there was -- another transaction, which modified X after c1 started, -- and c1 reads X again, it gets the same result -- c1:begin() --- - ... c1("t:get{1}") --- - - [1] ... -- -- not visible in c1 -- c2("t:replace{1, 'c2'}") --- - - [1, 'c2'] ... c1("t:get{1}") --- - - [1] ... c2:commit() --- - ... -- -- still not visible, even though c2 has committed -- c1("t:get{1}") --- - - [1] ... -- commits ok since is a read only transaction c1:commit() --- - ... -- -- now visible -- c1("t:get{1}") --- - - [1, 'c2'] ... c1("t:delete{1}") --- - ... -- ******************************* -- tx manager tests from sophia * -- ******************************* -- -------------------------------------------------------------------------- -- transaction_p_set_get_commit(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... c1("t:replace{1, 10}") --- - - [1, 10] ... c1("t:get{1}") -- {1, 10} --- - - [1, 10] ... -- c1:commit() --- - ... -- -- c2("t:replace{2, 15}") --- - - [2, 15] ... -- c2("t:get{2}") -- {2, 15} --- - - [2, 15] ... -- c2:commit() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... c1("t:delete{2}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_p_set_commit_get0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c1:commit() --- - ... -- c2("t:replace{2, 15}") --- - - [2, 15] ... c2:commit() --- - ... -- c1:begin() --- - ... c1("t:get{1}") -- {1, 10} --- - - [1, 10] ... -- c1("t:get{2}") -- {2, 15} --- - - [2, 15] ... c1:rollback() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... c1("t:delete{2}") --- - ... -- -------------------------------------------------------------------------- -- transaction_p_set_commit_get1(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") --- - ... c2("t:get{200}") --- - ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... c2:commit() --- - ... -- -- try writing an unrelated key -- c1("t:replace{2, 15}") --- - - [2, 15] ... c1:commit() --- - ... -- c2:begin() --- - ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2:rollback() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... c1("t:delete{2}") --- - ... -- -- -- now try the same key -- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") --- - ... c2("t:get{200}") --- - ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... c2:commit() --- - ... -- c1("t:replace{1, 15}") --- - - [1, 15] ... c1:commit() --- - ... -- c2:begin() --- - ... c2("t:get{1}") -- {1, 15} --- - - [1, 15] ... c2:rollback() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -------------------------------------------------------------------------- -- transaction_p_set_commit_get2(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") --- - ... c2("t:get{200}") --- - ... -- -- c1("t:replace{2, 15}") --- - - [2, 15] ... c1:commit() --- - ... -- -- c2("t:replace{1, 10}") --- - - [1, 10] ... c2:commit() -- commits successfully --- - ... -- c1:begin() --- - ... c1("t:get{1}") -- {1, 10} --- - - [1, 10] ... -- c1("t:get{2}") -- {2, 15} --- - - [2, 15] ... c1:rollback() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... c1("t:delete{2}") --- - ... -- -------------------------------------------------------------------------- -- transaction_p_set_rollback_get0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") --- - ... c2("t:get{200}") --- - ... -- -- c1("t:replace{1, 10}") --- - - [1, 10] ... c1:rollback() --- - ... -- c2("t:replace{2, 15}") --- - - [2, 15] ... c2:rollback() --- - ... -- c3:begin() --- - ... c3("t:get{1}") -- finds nothing --- - ... c3("t:get{2}") -- finds nothing --- - ... c3:rollback() --- - ... -- -------------------------------------------------------------------------- -- transaction_p_set_rollback_get1(void) -- -------------------------------------------------------------------------- -- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... c2:rollback() --- - ... -- c1("t:replace{2, 15}") --- - - [2, 15] ... c1:rollback() --- - ... -- c3:begin() --- - ... c3("t:get{1}") -- finds nothing --- - ... c3("t:get{2}") -- finds nothing --- - ... c3:rollback() --- - ... -- -- -------------------------------------------------------------------------- -- transaction_p_set_rollback_get2(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- -- c2("t:replace{1, 10}") --- - - [1, 10] ... c2:rollback() --- - ... -- c1("t:replace{1, 15}") --- - - [1, 15] ... c1:rollback() --- - ... -- c3("t:get{1}") -- finds nothing --- - ... -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- -- c2("t:replace{1, 10}") --- - - [1, 10] ... c2:rollback() --- - ... -- c1("t:replace{1, 15}") --- - - [1, 15] ... c1:commit() --- - ... -- c3("t:get{1}") -- {1, 15} --- - - [1, 15] ... -- -- cleanup -- c3("t:delete{1}") --- - ... -- -------------------------------------------------------------------------- -- transaction_c_set_commit0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... c1("t:replace{1, 10}") --- - - [1, 10] ... c1:commit() --- - ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... c2:commit() --- - ... -- c2("t:get{1}") -- {1,15} --- - - [1, 15] ... -- cleanup -- c1("t:delete{1}") --- - ... -- -------------------------------------------------------------------------- -- transaction_c_set_commit1(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... c2:commit() --- - ... -- c1("t:replace{1, 15}") --- - - [1, 15] ... c1:commit() --- - ... -- c3("t:get{1}") -- {1, 15} --- - - [1, 15] ... -- -- cleanup -- c3("t:delete{1}") --- - ... -- -------------------------------------------------------------------------- -- transaction_c_set_commit2(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 15}") --- - - [1, 15] ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... -- c2:commit() --- - ... c1:commit() --- - ... -- c3("t:get{1}") -- {1, 15} --- - - [1, 15] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 15}") --- - - [1, 15] ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... -- -- sic: commit order c1:commit() --- - ... c2:commit() -- write after write is ok, the last writer to commit wins --- - ... -- c3("t:get{1}") -- {1, 10} --- - - [1, 10] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_a0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... -- c2:rollback() --- - ... -- c1("t:replace{1, 15}") --- - - [1, 15] ... -- c1:commit() --- - ... -- c3("t:get{1}") --- - - [1, 15] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -- statement order is irrelevant, rollback order is important c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... c2("t:replace{1, 15}") --- - - [1, 15] ... -- c2:rollback() --- - ... c1:commit() --- - ... -- c3("t:get{1}") --- - - [1, 10] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_a1(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... c1("t:replace{1, 15}") --- - - [1, 15] ... -- c2:rollback() --- - ... c1:commit() -- success --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- statements in different order now -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... c2("t:replace{1, 15}") --- - - [1, 15] ... -- c2:rollback() --- - ... c1:commit() -- success --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_b0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... c2:commit() -- success --- - ... -- c1("t:replace{1, 15}") --- - - [1, 15] ... c1:rollback() -- success --- - ... -- c3("t:get{1}") --- - - [1, 10] ... -- cleanup -- c1("t:delete{1}") --- - ... -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_b1(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2:commit() --- - ... c1:rollback() --- - ... -- c3("t:get{1}") --- - - [1, 15] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- now commit the second transaction -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2:commit() --- - ... c1:commit() -- ok, the last committer wins --- - ... -- c3("t:get{1}") -- {1, 10} --- - - [1, 10] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_ab0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... c2:rollback() --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... c1:rollback() --- - ... -- c3("t:get{1}") --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- now commit the second transaction -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... c2:rollback() --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... c1:commit() --- - ... -- c3("t:get{1}") --- - - [1, 10] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_rollback_ab1(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 10}") --- - - [1, 10] ... c1("t:replace{1, 15}") --- - - [1, 15] ... -- c2:rollback() --- - ... c1:rollback() --- - ... -- c3("t:get{1}") --- - ... -- -- cleanup -- c2("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_a0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c1:commit() -- success --- - ... c2:commit() -- success, the last writer wins --- - ... -- c2("t:get{1}") -- {1, 15} --- - - [1, 15] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_a1(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c2:commit() -- success --- - ... c1:commit() -- success, the last writer wins --- - ... -- -- cleanup -- c2("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_b0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c2:commit() -- success --- - ... c1:commit() -- success --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_b1(void) -- -------------------------------------------------------------------------- -- c2:begin() --- - ... c1:begin() --- - ... c2("t:get{100}") -- start transaction in the engine --- - ... c1("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c2:commit() -- success --- - ... c1:commit() -- success --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_a0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c2:commit() -- success --- - ... c1:commit() -- success --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_a1(void) -- -------------------------------------------------------------------------- -- c2:begin() --- - ... c1:begin() --- - ... c2("t:get{100}") -- start transaction in the engine --- - ... c1("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c2:commit() -- success --- - ... c1:rollback() -- success --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_b0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c2:commit() -- success --- - ... c2:rollback() -- not in transaction --- - ... c1:commit() -- success --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_b1(void) -- -------------------------------------------------------------------------- -- c2:begin() --- - ... c1:begin() --- - ... c2("t:get{100}") -- start transaction in the engine --- - ... c1("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c2:commit() -- success --- - ... c2:rollback() -- not in transaction --- - ... c1:commit() -- success --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... c1("t:delete{2}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_n0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... c3:begin() --- - ... c3("t:get{300}") -- start transaction in the engine --- - ... -- -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c3("t:replace{1, 20}") --- - - [1, 20] ... -- c2:commit() -- success --- - ... c3:commit() -- success --- - ... c1:commit() -- success, the last committer wins --- - ... c2:commit() -- not in transaction --- - ... c3:commit() -- not in transaction --- - ... -- c3:get{1} -- {1, 20} --- - error: '[string "return c3:get{1} -- {1, 20} "]:1: attempt to call method ''get'' (a nil value)' ... -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_n1(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c3:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c3("t:get{200}") -- start transaction in the engine --- - ... c2("t:get{300}") -- start transaction in the engine --- - ... -- -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 20}") --- - - [1, 20] ... -- c3("t:replace{1, 30}") --- - - [1, 30] ... -- c1:commit() -- success --- - ... c2:commit() -- success --- - ... c3:commit() -- success --- - ... -- c3("t:get{1}") -- {1, 30} --- - - [1, 30] ... -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... c3("t:get{300}") -- start transaction in the engine --- - ... -- -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c3("t:replace{1, 20}") --- - - [1, 20] ... -- c2:commit() -- success --- - ... c3:commit() -- rollback --- - ... c1:rollback() -- success --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n1(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... c3("t:get{300}") -- start transaction in the engine --- - ... -- -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c3("t:replace{1, 20}") --- - - [1, 20] ... -- c2:commit() -- success --- - ... c3:commit() -- rollback --- - ... c2:rollback() -- success, not in transaction in tarantool --- - ... c3:commit() -- success, not in transaction in tarantool --- - ... c1:commit() -- rollback --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n2(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... c3("t:get{300}") -- start transaction in the engine --- - ... -- -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c3("t:replace{1, 20}") --- - - [1, 20] ... -- c3:rollback() --- - ... c2:commit() --- - ... c1:commit() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n3(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... c3("t:get{300}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c3("t:replace{1, 20}") --- - - [1, 20] ... -- c2:commit() --- - ... c3:rollback() --- - ... c1:commit() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_commit_wait_rollback_n4(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... c3("t:get{300}") -- start transaction in the engine --- - ... -- -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c3("t:replace{1, 20}") --- - - [1, 20] ... -- c2:commit() --- - ... c3:rollback() --- - ... c1:rollback() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_get0(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... c1:commit() --- - ... -- c2("t:get{1}") -- find newest {1, 10} --- - - [1, 10] ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... c2:commit() -- rollback --- - ... -- c3:begin() --- - ... c3("t:get{1}") -- {1, 10} --- - - [1, 15] ... c3:commit() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_get1(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- c1:rollback() --- - ... -- c2("t:get{1}") -- finds nothing --- - ... -- c2("t:replace{1, 15}") --- - - [1, 15] ... c2:commit() --- - ... -- c3:begin() --- - ... c3("t:get{1}") -- {1, 15} --- - - [1, 15] ... c3:commit() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_get2(void) -- -------------------------------------------------------------------------- -- c7:begin() --- - ... c7("t:get{100}") -- start transaction in the engine --- - ... c1:begin() --- - ... -- c1("t:replace{1, 1}") --- - - [1, 1] ... -- c2:begin() --- - ... -- c2("t:replace{1, 2}") --- - - [1, 2] ... -- c4:begin() --- - ... c4("t:replace{1, 4}") --- - - [1, 4] ... -- c5:begin() --- - ... c5("t:replace{1, 5}") --- - - [1, 5] ... -- c6:begin() --- - ... c6("t:get{100}") -- start transaction in the engine --- - ... -- c1("t:get{1}") -- {1, 1} --- - - [1, 1] ... -- c2("t:get{1}") -- {1, 2} --- - - [1, 2] ... -- c4("t:get{1}") -- {1, 4} --- - - [1, 4] ... -- c5("t:get{1}") -- {1, 5} --- - - [1, 5] ... -- c6("t:get{1}") -- nothing --- - ... -- c7("t:get{1}") -- nothing --- - ... -- c3:begin() --- - ... -- c3("t:get{1}") -- nothing --- - ... c3:rollback() --- - ... -- c1:rollback() --- - ... c2:rollback() --- - ... c3:rollback() --- - ... c4:rollback() --- - ... c5:rollback() --- - ... c6:rollback() --- - ... c7:rollback() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_get3(void) -- -------------------------------------------------------------------------- -- c7:begin() --- - ... c1:begin() --- - ... c7("t:get{100}") -- start transaction in the engine --- - ... c1("t:get{1}") -- start transaction in the engine --- - ... -- c3:begin() --- - ... c3("t:replace{1, 3}") --- - - [1, 3] ... c3:commit() --- - ... -- c2:begin() --- - ... c3:begin() --- - ... c2("t:get{500}") -- start transaction in the engine --- - ... c3("t:get{600}") -- start transaction in the engine --- - ... c2("t:get{1}") -- {1, 3} --- - - [1, 3] ... -- c3("t:replace{1, 6}") --- - - [1, 6] ... c3:commit() -- c2 goes to read view now --- - ... -- c4:begin() --- - ... c3:begin() --- - ... -- c3("t:replace{1, 9}") --- - - [1, 9] ... c3:commit() --- - ... -- c5:begin() --- - ... c3:begin() --- - ... c5("t:get{800}") -- start transaction in the engine --- - ... c3("t:get{900}") -- start transaction in the engine --- - ... -- c3("t:replace{1, 12}") --- - - [1, 12] ... c3:commit() --- - ... -- c6:begin() --- - ... c6("t:get{1000}") -- start transaction in the engine --- - ... -- c2("t:get{1}") -- {1, 3} --- - - [1, 3] ... -- c4("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c5("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c6("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c3:begin() --- - ... c3("t:get{1}") -- {1, 12} --- - - [1, 12] ... c3:rollback() --- - ... -- c1("t:get{1}") -- nothing --- - ... -- c7("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c2:rollback() --- - ... -- c4("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c5("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c6("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c3:begin() --- - ... c3("t:get{1}") -- {1, 12} --- - - [1, 12] ... c3:rollback() --- - ... -- c1("t:get{1}") -- nothing --- - ... -- c7("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c4:rollback() --- - ... -- c5("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c6("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c3:begin() --- - ... c3("t:get{1}") -- {1, 12} --- - - [1, 12] ... c3:rollback() --- - ... -- c1("t:get{1}") -- nothing --- - ... -- c7("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c5:rollback() --- - ... -- c6("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c3:begin() --- - ... c3("t:get{1}") -- {1, 12} --- - - [1, 12] ... c3:rollback() --- - ... -- c1("t:get{1}") -- nothing --- - ... -- c7("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c6:rollback() --- - ... -- c3:begin() --- - ... c3("t:get{1}") -- {1, 12} --- - - [1, 12] ... c3:rollback() --- - ... -- c1("t:get{1}") -- nothing --- - ... -- c7("t:get{1}") -- {1, 12} --- - - [1, 12] ... -- c1:rollback() --- - ... c7:rollback() --- - ... -- c3:begin() --- - ... c3("t:get{1}") -- {1, 12} --- - - [1, 12] ... c3:rollback() --- - ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_c_set_conflict_derive(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... c2:begin() --- - ... c1("t:get{100}") -- start transaction in the engine --- - ... c2("t:get{200}") -- start transaction in the engine --- - ... c1("t:replace{1, 10}") --- - - [1, 10] ... c2("t:replace{1, 15}") --- - - [1, 15] ... -- c1:commit() --- - ... -- c2("t:replace{1, 20}") -- should not reset conflict flag --- - - [1, 20] ... -- c2:commit() -- rollback --- - ... -- c3("t:get{1}") --- - - [1, 20] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- transaction_sc_set_wait(void) -- -------------------------------------------------------------------------- -- c1:begin() --- - ... -- c1("t:replace{1, 10}") --- - - [1, 10] ... -- -- sic: runs in autocommit mode -- c2("t:replace{1, 15}") --- - - [1, 15] ... -- c1:commit() --- - ... -- c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... -- c1("t:delete{1}") --- - ... -- -------------------------------------------------------------------------- -- transaction_sc_get(void) -- -------------------------------------------------------------------------- -- c1("t:replace{1, 7}") --- - - [1, 7] ... -- c2:begin() --- - ... -- c2("t:replace{1, 8}") --- - - [1, 8] ... -- c1("t:get{1}") -- {1, 7} --- - - [1, 7] ... -- c2:commit() --- - ... -- c1("t:get{1}") -- {1, 8} --- - - [1, 8] ... -- c3("t:get{1}") -- {1, 8} --- - - [1, 8] ... -- -- cleanup -- c1("t:delete{1}") --- - ... -- -------------------------------------------------------------------------- -- two conflicting inserts -- -------------------------------------------------------------------------- c1:begin() --- - ... c2:begin() --- - ... -- c1("t:insert{1, 10}") --- - - [1, 10] ... -- c2("t:insert{1, 15}") --- - - [1, 15] ... -- c1:commit() -- success --- - ... c2:commit() -- rollback, c2 reads {1} before writing it --- - - {'error': 'Transaction has been aborted by conflict'} ... -- c3("t:get{1}") -- {1, 10} --- - - [1, 10] ... -- -- -- cleanup -- c1("t:delete{1}") --- - ... -- c1:begin() --- - ... c2:begin() --- - ... -- c1("t:insert{1, 10}") --- - - [1, 10] ... -- c2("t:insert{1, 15}") --- - - [1, 15] ... -- c2:commit() -- success --- - ... c1:commit() -- rollback, c1 reads {1} before writing it --- - - {'error': 'Transaction has been aborted by conflict'} ... -- c3("t:get{1}") -- {1, 15} --- - - [1, 15] ... -- -- -- cleanup -- c1("t:delete{1}") --- - ... -- -- -------------------------------------------------------------------------- -- Transaction spuriously abort based on CSN clock -- -------------------------------------------------------------------------- t:insert{1, 10} --- - [1, 10] ... t:insert{2, 20} --- - [2, 20] ... c7:begin() --- - ... c7("t:insert{8, 800}") --- - - [8, 800] ... c3:begin() --- - ... c3("t:get{1}") --- - - [1, 10] ... c3:commit() --- - ... c1:begin() --- - ... c2:begin() --- - ... -- c1("t:replace{4, 40}") --- - - [4, 40] ... -- c2("t:get{1}") --- - - [1, 10] ... -- c3:begin() --- - ... c3("t:insert{3, 30}") --- - - [3, 30] ... c3:commit() --- - ... -- c2("t:replace{5, 50}") --- - - [5, 50] ... c1("t:get{1}") --- - - [1, 10] ... c1:commit() --- - ... c2:commit() --- - ... c7:rollback() --- - ... -- -- cleanup -- t:delete{1} --- ... t:delete{2} --- ... t:delete{3} --- ... t:delete{4} --- ... t:delete{5} --- ... -- -------------------------------------------------------------------------- -- Conflict manager works for iterators -- -------------------------------------------------------------------------- t:insert{1, 10} --- - [1, 10] ... t:insert{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:select{}") --- - - [[1, 10], [2, 20]] ... c2("t:select{}") --- - - [[1, 10], [2, 20]] ... c1("t:replace{1, 'new'}") --- - - [1, 'new'] ... c2("t:replace{2, 'new'}") --- - - [2, 'new'] ... c1:commit() --- - ... c2:commit() -- rollback --- - - {'error': 'Transaction has been aborted by conflict'} ... -- -- -- gh-1606 visibility of changes in transaction in range queries -- c1:begin() --- - ... c1("t:select{}") --- - - [[1, 'new'], [2, 20]] ... c1("t:replace{3, 30}") --- - - [3, 30] ... c1("t:select{}") --- - - [[1, 'new'], [2, 20], [3, 30]] ... c1("t:select({3}, {iterator='ge'})") --- - - [[3, 30]] ... c1("t:select({3}, {iterator='lt'})") --- - - [[2, 20], [1, 'new']] ... c1("t:select({3}, {iterator='gt'})") --- - - [] ... c1("t:select({3}, {iterator='eq'})") --- - - [[3, 30]] ... c1("t:replace{3, 'new'}") --- - - [3, 'new'] ... c1("t:select({3}, {iterator='ge'})") --- - - [[3, 'new']] ... c1("t:select({3}, {iterator='lt'})") --- - - [[2, 20], [1, 'new']] ... c1("t:select({3}, {iterator='gt'})") --- - - [] ... c1("t:select({3}, {iterator='eq'})") --- - - [[3, 'new']] ... c1("t:delete{3}") --- - ... c1("t:select({3}, {iterator='ge'})") --- - - [] ... c1("t:select({3}, {iterator='lt'})") --- - - [[2, 20], [1, 'new']] ... c1("t:select({3}, {iterator='gt'})") --- - - [] ... c1("t:select({3}, {iterator='eq'})") --- - - [] ... c1("t:replace{3}") --- - - [3] ... c1("t:delete{2}") --- - ... c1("t:select({3}, {iterator='lt'})") --- - - [[1, 'new']] ... c1("t:select({3}, {iterator='le'})") --- - - [[3], [1, 'new']] ... c1("t:replace{2}") --- - - [2] ... c1("t:delete{1}") --- - ... c1("t:select({3}, {iterator='lt'})") --- - - [[2]] ... c1("t:select({3}, {iterator='le'})") --- - - [[3], [2]] ... c1("t:delete{3}") --- - ... c1("t:select({3}, {iterator='lt'})") --- - - [[2]] ... c1("t:select({3}, {iterator='le'})") --- - - [[2]] ... c1:rollback() --- - ... c1("t:select{}") --- - - [[1, 'new'], [2, 20]] ... -- -- -- Check that a cursor is closed automatically when a transaction -- is committed or rolled back -- c1:begin() --- - ... c1("t:select{1}") --- - - [[1, 'new']] ... c1("for k, v in box.space.test:pairs() do box.commit() end") --- - - {'error': 'The transaction the cursor belongs to has ended'} ... c1:rollback() --- - ... c1:begin() --- - ... c1("t:select{1}") --- - - [[1, 'new']] ... c1("for k, v in box.space.test:pairs() do box.rollback() end") --- - - {'error': 'The transaction the cursor belongs to has ended'} ... c1:rollback() --- - ... t:truncate() --- ... -- -- Check that min/max/count transactions stay within a read view -- t:replace{1} --- - [1] ... c1:begin() --- - ... c1("t.index.pk:max()") -- {1} --- - - [1] ... c1("t.index.pk:min()") -- {1} --- - - [1] ... c1("t.index.pk:count()") -- 1 --- - - 1 ... c2:begin() --- - ... c2("t:replace{2}") -- conflicts with c1 so c1 starts using a read view --- - - [2] ... c2:commit() --- - ... c1("t.index.pk:max()") -- {1} --- - - [1] ... c1("t.index.pk:min()") -- {1} --- - - [1] ... c1("t.index.pk:count()") -- 1 --- - - 1 ... c1:commit() --- - ... -- -- Convert the reader to a read view: in this test we have -- an explicit conflict between c1 and c2, so c1 begins -- using a read view -- c1:begin() --- - ... c1("t.index.pk:max()") -- {2} --- - - [2] ... c1("t.index.pk:min()") -- {1} --- - - [1] ... c1("t.index.pk:count()") -- 2 --- - - 2 ... c2:begin() --- - ... c2("t:replace{1, 'new'}") -- conflits with c1 so c1 starts using a read view --- - - [1, 'new'] ... c2("t:replace{3}") --- - - [3] ... c2:commit() --- - ... c1("t.index.pk:max()") -- {2} --- - - [2] ... c1("t.index.pk:min()") -- {1} --- - - [1] ... c1("t.index.pk:count()") -- 2 --- - - 2 ... c1:commit() --- - ... t:truncate() --- ... -- -- Check that select() does not add the key following -- the last returned key to the conflict manager. -- t:replace{1} --- - [1] ... t:replace{2} --- - [2] ... c1:begin() --- - ... c1("t:select({}, {limit = 0})") -- none --- - - [] ... c2:begin() --- - ... c2("t:replace{1, 'new'}") --- - - [1, 'new'] ... c2:commit() --- - ... c1("t:select({}, {limit = 1})") -- {1, 'new'} --- - - [[1, 'new']] ... c2:begin() --- - ... c2("t:replace{2, 'new'}") --- - - [2, 'new'] ... c2:commit() --- - ... c1("t:select()") -- {1, 'new'}, {2, 'new'} --- - - [[1, 'new'], [2, 'new']] ... c1:commit() --- - ... t:truncate() --- ... -- -- gh-2716 uniqueness check for secondary indexes -- _ = t:create_index('sk', {parts = {2, 'unsigned'}, unique = true}) --- ... c1:begin() --- - ... c2:begin() --- - ... c1("t:insert{1, 2}") --- - - [1, 2] ... c2("t:insert{2, 2}") --- - - [2, 2] ... c1:commit() --- - ... c2:commit() -- rollback --- - - {'error': 'Transaction has been aborted by conflict'} ... t:select() -- {1, 2} --- - - [1, 2] ... t:truncate() --- ... t.index.sk:drop() --- ... -- ************************************************************************* -- 1.7 cleanup marker: end of tests cleanup -- ************************************************************************* -- box.space.test:drop() --- ... c1 = nil --- ... c2 = nil --- ... c3 = nil --- ... c4 = nil --- ... c5 = nil --- ... c6 = nil --- ... c7 = nil --- ... collectgarbage() --- - 0 ... -- check of read views proper allocation/deallocation s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... i = box.space.test:create_index('pk') --- ... s:replace{1, 2, 3} --- - [1, 2, 3] ... s:replace{4, 5, 6} --- - [4, 5, 6] ... s:replace{7, 8, 9} --- - [7, 8, 9] ... box.info.vinyl().tx.read_views -- 0 (no read views needed) --- - 0 ... box.info.vinyl().tx.transactions -- 0 --- - 0 ... c1 = txn_proxy.new() --- ... c2 = txn_proxy.new() --- ... c3 = txn_proxy.new() --- ... c4 = txn_proxy.new() --- ... c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c4:begin() --- - ... box.info.vinyl().tx.read_views -- 0 (no read views needed) --- - 0 ... box.info.vinyl().tx.transactions -- 0 --- - 0 ... c1("s:select{1}") --- - - [[1, 2, 3]] ... c2("s:select{1}") --- - - [[1, 2, 3]] ... c3("s:select{1}") --- - - [[1, 2, 3]] ... c4("s:select{1}") --- - - [[1, 2, 3]] ... box.info.vinyl().tx.read_views -- 0 (no read views needed) --- - 0 ... box.info.vinyl().tx.transactions -- 4 --- - 4 ... c4("s:replace{1, 0, 0}") --- - - [1, 0, 0] ... box.info.vinyl().tx.read_views -- 0 (no read views needed) --- - 0 ... box.info.vinyl().tx.transactions -- 4 --- - 4 ... c4:commit() --- - ... box.info.vinyl().tx.read_views -- 1 (one read view for all TXs) --- - 1 ... box.info.vinyl().tx.transactions -- 3 --- - 3 ... c1:commit() --- - ... box.info.vinyl().tx.read_views -- 1 (one read view for all TXs) --- - 1 ... box.info.vinyl().tx.transactions -- 2 --- - 2 ... c2:rollback() --- - ... box.info.vinyl().tx.read_views -- 1 (one read view for all TXs) --- - 1 ... box.info.vinyl().tx.transactions -- 1 --- - 1 ... c3:commit() --- - ... box.info.vinyl().tx.read_views -- 0 (no read views needed) --- - 0 ... box.info.vinyl().tx.transactions -- 0 (all done) --- - 0 ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/0000775000000000000000000000000013306565107017763 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/0000775000000000000000000000000013306565107020432 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/514/0000775000000000000000000000000013306565107020743 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/514/0/0000775000000000000000000000000013306565107021102 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/514/0/00000000000000000023.run0000664000000000000000000000021613306565107023613 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ k?٧! aaftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/514/0/00000000000000000023.index0000664000000000000000000000027213306565107024120 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ AΚK(/Pd  @ e`*  9>Wtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/514/1/0000775000000000000000000000000013306565107021103 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/514/1/00000000000000000021.run0000664000000000000000000000021613306565107023612 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ L3R!aa ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/514/1/00000000000000000021.index0000664000000000000000000000032013306565107024111 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ Wh<(/Pudaa @e`* !p_ |wh?&.F(Wtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/00000000000000000010.snap0000664000000000000000000000250213306565107023274 0ustar rootrootSNAP 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {1: 10} պ  6_54\3 hg?rC}>9[GmP" F^BYVbk\6'mTc|Rϙnԥy%|wriTRx"@r\ ܺo0mV!s^% [2~P렔ИtX@d$!b%j.i6SIy|3NQ@-: PHk(0*k; m$E3ֆBj4lnvD\*#SQ5 EX;AuU"Q$W 0PG)|tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/00000000000000000000.vylog0000664000000000000000000000300213306565107023466 0ustar rootrootVYLOG 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ A ! AJ0{!unsignedAJ0{!պ ;wkݧAJ0!unsignedAJ0!պ ;Τ)AJ0~!stringAJ0~!պ OSJAJ0e!պ F YAJ0s! AJ0s!AJ0s!  պ RA9AJ0uR!պ FUCAJ0ـ߁! AJ0ـ߁!AJ0ـ߁!  պ Π AJ0,!պ F;yߏAJ0\! AJ0\!AJ0\!  պ Ρ|%AJ0X! պ FSqAJ0ց! AJ0ց!  AJ0ց!  պ ΗAJ0! պ ̀έ[&AJ0~!  AJ0~! AJ0~!AJ0~! AJ0~! AJ0~!  պ HAJ0恩! պ FΘJAJ0.! AJ0.! AJ0.!  պ ΕcAJ0f!պ F9@AJ0棯! AJ0棯!AJ0棯!  պ Ά~[AJ0؁!պ ̨΅ǫ AJ0涉! AJ0涉! AJ0涉! AJ0涉!AJ0涉!AJ0涉! AJ0涉! AJ0涉!tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0000775000000000000000000000000013306565107020745 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/0000775000000000000000000000000013306565107021104 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000040.run0000664000000000000000000000064413306565107023621 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ  *!fպ Α*Ч+!fպ  ,!fպ Δݧ-!fպ u0$i.!fպ cKx/!fպ 0! fպ s1! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000077.index0000664000000000000000000000027213306565107024133 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ AȒe(/Pd::@ e`( >tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000056.index0000664000000000000000000000036213306565107024130 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ y")(/PDd:A@ e`( ̈(Px `0Fj ([8Yii^[wjtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000075.run0000664000000000000000000000033413306565107023625 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ fݧ?! fպ  G@! fպ 2jA! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000044.run0000664000000000000000000000064413306565107023625 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ ·{2!fպ θ?3!fպ Af4!fպ ν!25!fպ uW6!fպ J 7! fպ Gԧ8! fպ 80Y9! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000035.index0000664000000000000000000000036213306565107024125 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ yMЧ(/PDd")@ e`( ̈(Px `0Fj ([8Yii^[wjtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000044.index0000664000000000000000000000036213306565107024125 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ y;(/PDd29@ e`( ̈(Px `0Fj ([8Yii^[wjtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000042.index0000664000000000000000000000036213306565107024123 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ y(/PDd*1@ e`( ̈(Px `0Fj ([8Yii^[wjtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000073.run0000664000000000000000000000026413306565107023625 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ Qɧ=!fպ ΰfc}>! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000052.run0000664000000000000000000000033413306565107023620 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ ·{2!fպ θ?3!fպ Af4!ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000052.index0000664000000000000000000000032113306565107024117 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ X5Jk(/P}dd24@ e`( ̈ `ط47ЇP tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000073.index0000664000000000000000000000031313306565107024123 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ R=(/PMDd=>@ e`( ̈ 5ymAO1tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000071.run0000664000000000000000000000026413306565107023623 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ u;!fպ ΄eL! fպ fݧ?! fպ  G@! fպ 2jA! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000071.index0000664000000000000000000000031313306565107024121 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ R:(/PMDd;<@ e`( ̈ 5ymA` tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000054.run0000664000000000000000000000045413306565107023625 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ ν!25!fպ uW6!fպ J 7! fպ Gԧ8! fպ 80Y9! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/516/0/00000000000000000035.run0000664000000000000000000000064413306565107023625 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ OI"!fպ  Z#!fպ y~ç$!fպ 3ߗ%!fպ ΰC&!fպ Ώ\'!fպ q(!fպ ζ)! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/version0000664000000000000000000000002313306565107022035 0ustar rootroot1.7.4-52-g980d3009 tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/00000000000000000010.vylog0000664000000000000000000001150013306565107023471 0ustar rootrootVYLOG 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {1: 10} պ _΋!unsigned!!unsigned!  ! ! ! ! ! !!!  !!string!  ! ! ! ! !! !!!! պ Pi-AJ05Ɂ!պ IXAJ08! պ λ AJ09k!պ 8AJ0:+!պ YPçAJ0:! պ ;LвAJ0DK! unsignedAJ0DK! պ ;"AJ0I! stringAJ0I! պ AJ0A! պ FA\#AJ0ہ!  AJ0ہ!AJ0ہ! պ ΢;&AJ0֘! պ F[KAJ0!  AJ0!AJ0! պ R΀AJ0! AJ0!AJ0! AJ0! պ R͍AJ0! AJ0!AJ0! AJ0! պ ; AJ0/́!unsignedAJ0/́!պ ;D;ZAJ0:!stringAJ0:!պ ΂%AJ0!պ F$DAJ0I! AJ0I!AJ0I!  պ )AJ0΁!պ FΣ跧AJ0ޟ! AJ0ޟ!AJ0ޟ!  պ Rί9AJ0ԙ! AJ0ԙ!AJ0ԙ! AJ0ԙ!պ R첺NAJ0h! AJ0h!AJ0h! AJ0h!պ ;s2AJ0!unsignedAJ0!պ ;KAJ03!stringAJ03! պ ;tڧAJ0! unsignedAJ0! !պ VAJ0v!"պ adϰAJ0! #պ F}AJ0m!" AJ0m! "$AJ0m!  պ F΅ P'AJ0! # )AJ0!!#%AJ0! )պ ]AJ0!&պ FUاAJ0 !& AJ0 !&'AJ0 !  պ AJ1Vρ! (պ FfAJ1s! ( 1AJ1s!!()AJ1s! 1պ NJAJ1v! *պ ̄9AJ1:! )AJ1:! %AJ1:!# AJ1:!( AJ1:! * 1AJ1:!!*+պ /BAJ1! ,պ F؏AJ1^! , 9AJ1^!!,-AJ1^! 9պ Μk\AJ1ҏ! -AJ1ҏ! +AJ1ҏ!!AJ1ҏ! .AJ1ҏ!.,0AJ1ҏ!.*/AJ1ҏ! 1AJ1ҏ!1,3AJ1ҏ!1*2պ "AJ1ӈ! 4պ XhAJ12! 0AJ12! /AJ12! 4 9AJ12!.45պ ؟AJ1f! 6պ ̄AJ1! 3AJ1! 2AJ1!* AJ1!, AJ1! 6 9AJ1!167պ ·yAJ1! 8պ d)AJ1! 8 AAJ1!.89AJ1!18:AJ1! Aպ gzAJ1y! 9AJ1y! 5AJ1y!.AJ1y! ;AJ1y!;8=AJ1y!;4<AJ1y! >AJ1y!>8@AJ1y!>4?պ /˦AJ1! :AJ1! 7AJ1!1AJ1! AAJ1!A8CAJ1!A6BAJ1! DAJ1!D8FAJ1!D6Eպ AJ1r! Gպ Xk^}oAJ1׼! @AJ1׼! ?AJ1׼! G AAJ1׼!>GHպ JolAJ1! Iպ X&aAJ1! CAJ1! BAJ1! I AAJ1!AIJպ ΏAJ1! Kպ nΫAJ11! FAJ11! EAJ11!6 AJ11! K AAJ11!DKLպ KAJ1B! Mպ ̄OAJ1(! =AJ1(! <AJ1(!4 AJ1(!8 AJ1(! M AAJ1(!;MNպ &|)AJ1!AJ1!tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/00000000000000000010.xlog0000664000000000000000000000673013306565107023313 0ustar rootrootXLOG 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {1: 10} պ (bZd AJ0<> max_id!+պ /Ϊ AJ0@!test_dropvinylպ ̛΋Ja AJ0D !i1treepage_size run_count_per_levelrun_size_ratio@ uniqueébloom_fpr?lsn range_size@unsignedպ ̙΄ AJ0JI !i2treepage_size run_count_per_levelrun_size_ratio@ uniqueébloom_fpr?lsn range_size@stringպ VAJ0L! aaպ ΙM AJ0!bbպ <AJ07 պ ΜAJ0 @ պ $@ AJ0 ( պ (9'AJ0#< max_id!+պ 3Αr{_AJ0)2!test_truncatevinylպ ̛yk:AJ00 !i1treepage_size run_count_per_levelrun_size_ratio@ uniqueébloom_fpr?lsnrange_size@unsignedպ ̙2BUAJ0= !i2treepage_size run_count_per_levelrun_size_ratio@ uniqueébloom_fpr?lsnrange_size@stringպ οRKAJ0D!aպ  )AJ0n! abպ ΅WAJ0ς պ ΩݦAJ0ܫ պ S RAJ0  !i1treerange_size@page_size lsnunsignedպ Q;\nAJ0R !i2treerange_size@page_size lsnstringպ Ί\&AJ0 !{abcպ (dطAJ0  max_id!+պ 0Μ>% AJ0!test_splitvinylպ ̏۔!AJ0 !pktreepage_sizerun_count_per_levelrun_size_ratiouniqueébloom_fpr?lsn range_sizeunsignedպ S "AJ0y!պ E%#AJ0 !պ ȈE$AJ0!!պ tŧ%AJ0"!պ a&AJ0#!պ ]'AJ0%!պ Y7(AJ0'-!պ ]Y)AJ0('! պ )H*AJ1S!պ Ψq+AJ1x!պ ΤU̧,AJ1!պ ̝屧-AJ1!պ aU.AJ1<!պ :/AJ1Ż!պ Fw0AJ1ɑ! պ D1AJ1r! պ Q2AJ1\!պ ljW3AJ1_!պ t#٧4AJ1a!պ λ?65AJ1d=!պ 5~d>6AJ1f!պ Θ77AJ1h! պ Γ=8AJ1k;! պ Eo9AJ1l! պ Vl:AJ1!պ Β:M;AJ1 I!պ νl<AJ1 u!պ _젧=AJ1 !պ ^kE>AJ1U! պ Ξ>?AJ1! պ Cl@AJ1! պ H_JAAJ1B! պ 0ABAJ1҂ պ /0 CAJ1Z tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/0000775000000000000000000000000013306565107020744 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/0/0000775000000000000000000000000013306565107021103 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/0/00000000000000000038.index0000664000000000000000000000027213306565107024127 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ A9(/Pd{{@e`+{ 9@%ctarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/0/00000000000000000029.run0000664000000000000000000000021513306565107023621 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ o<:!aftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/0/00000000000000000038.run0000664000000000000000000000021713306565107023623 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ r7!{abcftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/0/00000000000000000029.index0000664000000000000000000000027213306565107024127 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ A h(/Pd@ e`) >tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/1/0000775000000000000000000000000013306565107021104 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/1/00000000000000000034.index0000664000000000000000000000031413306565107024121 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ S(/PUdabc{@ e`+ "p2&# ?ab8$tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/1/00000000000000000027.run0000664000000000000000000000021513306565107023620 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ !aftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/1/00000000000000000027.index0000664000000000000000000000031413306565107024123 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ SQ(/PU4daa@ e`)a w@\~tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/515/1/00000000000000000034.run0000664000000000000000000000021713306565107023620 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ -$$N!abc{ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/513/0000775000000000000000000000000013306565107020742 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/513/0/0000775000000000000000000000000013306565107021101 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/513/0/00000000000000000011.run0000664000000000000000000000023413306565107023607 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ % I,!a !bf tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/513/0/00000000000000000015.index0000664000000000000000000000027213306565107024120 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ AΒD(/Pd  @ e`) 9>G1tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/513/0/00000000000000000015.run0000664000000000000000000000021513306565107023612 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ M0 !cftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/513/0/00000000000000000011.index0000664000000000000000000000027713306565107024121 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ Fg8ܧ(/Pd @ e`8%=: HJ` tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/513/1/0000775000000000000000000000000013306565107021102 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/513/1/00000000000000000017.index0000664000000000000000000000035213306565107024122 0ustar rootrootINDEX 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ qCa(/PEdac @ !@@e`Ga4!>A%;0YP[0 `OV"(H.L#rNtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.4/513/1/00000000000000000017.run0000664000000000000000000000025313306565107023617 0ustar rootrootRUN 0.13 Version: 1.7.4-52-g980d3009 Instance: f9303557-4a86-4f34-8eb0-1b9a5efb4de6 VClock: {} պ 4ո5!a !b !cf tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/fill.lua0000664000000000000000000000545413306565107021424 0ustar rootroot-- -- This script generates a vinyl metadata log -- containing all possible record types. -- fiber = require 'fiber' box.cfg{vinyl_memory = 1024 * 1024, vinyl_timeout = 0.1, checkpoint_count = 1} dump_trigger = box.schema.space.create('dump_trigger', {engine = 'vinyl'}) dump_trigger:create_index('pk') -- Trigger dump of all indexes and wait for it to finish. -- -- We trigger dump by attempting to insert a huge (> memory limit) -- tuple into a vinyl memory space. Before failing on timeout this -- makes the scheduler force dump. function dump() pcall(dump_trigger.insert, dump_trigger, {1, string.rep('x', 1024 * 1024)}) while box.info.vinyl().quota.used > 0 do fiber.sleep(0.1) end end -- -- Create a space: -- -- VY_LOG_CREATE_INDEX -- VY_LOG_INSERT_RANGE -- s = box.schema.space.create('test', {engine = 'vinyl'}) s:create_index('i1', {parts = {1, 'unsigned'}, run_count_per_level = 1}) s:create_index('i2', {parts = {2, 'string'}, run_count_per_level = 2}) -- -- Trigger compaction: -- -- VY_LOG_PREPARE_RUN -- VY_LOG_CREATE_RUN -- VY_LOG_DROP_RUN -- VY_LOG_INSERT_SLICE -- VY_LOG_DELETE_SLICE -- s:insert{1, 'a'} dump() s:insert{2, 'b'} dump() s:insert{3, 'c'} dump() -- -- Make a snapshot and collect garbage: -- -- VY_LOG_SNAPSHOT -- VY_LOG_FORGET_RUN -- -- Note, this purges: -- -- VY_LOG_PREPARE_RUN -- VY_LOG_DELETE_SLICE -- box.snapshot() -- -- Space drop: -- -- VY_LOG_CREATE_INDEX -- VY_LOG_DROP_INDEX -- VY_LOG_PREPARE_RUN -- VY_LOG_CREATE_RUN -- VY_LOG_DROP_RUN -- VY_LOG_INSERT_RANGE -- VY_LOG_DELETE_RANGE -- VY_LOG_INSERT_SLICE -- VY_LOG_DELETE_SLICE -- s = box.schema.space.create('test_drop', {engine = 'vinyl'}) s:create_index('i1', {parts = {1, 'unsigned'}}) s:create_index('i2', {parts = {2, 'string'}}) s:insert{11, 'aa'} dump() s:insert{22, 'bb'} s:drop() -- -- Space truncation. -- -- Before 1.7.4-126-g2ba51ab2, implemented as index drop/create. -- In newer versions, writes a special record: -- -- VY_LOG_TRUNCATE_INDEX -- s = box.schema.space.create('test_truncate', {engine = 'vinyl'}) s:create_index('i1', {parts = {1, 'unsigned'}}) s:create_index('i2', {parts = {2, 'string'}}) s:insert{1, 'a'} dump() s:insert{12, 'ab'} s:truncate() s:insert{123, 'abc'} -- -- Create a space and trigger range splitting: -- -- VY_LOG_CREATE_INDEX -- VY_LOG_PREPARE_RUN -- VY_LOG_CREATE_RUN -- VY_LOG_DROP_RUN -- VY_LOG_INSERT_RANGE with finite begin/end. -- VY_LOG_DELETE_RANGE -- VY_LOG_INSERT_SLICE with finite begin/end -- VY_LOG_DELETE_SLICE -- s = box.schema.space.create('test_split', {engine = 'vinyl'}) s:create_index('pk', {page_size = 4, range_size = 16, run_count_per_level = 1, run_size_ratio = 1000}) for i = 1, 4 do for k = 1, 8 do s:replace{k, i + k} end dump() end assert(s.index.pk:info().range_count >= 2) dump_trigger:drop() os.exit(0) tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/0000775000000000000000000000000013306565107020433 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/514/0000775000000000000000000000000013306565107020744 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/514/0/0000775000000000000000000000000013306565107021103 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/514/0/00000000000000000023.run0000664000000000000000000000022013306565107023607 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ k?٧! aaftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/514/0/00000000000000000023.index0000664000000000000000000000027413306565107024123 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ A΀"(/Pd  @ eb*  9>Wtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/514/1/0000775000000000000000000000000013306565107021104 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/514/1/00000000000000000021.run0000664000000000000000000000022013306565107023606 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ L3R!aa ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/514/1/00000000000000000021.index0000664000000000000000000000032213306565107024114 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ WNw|(/Pudaa @eb* !p_ |wh?&.F(Wtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/00000000000000000010.snap0000664000000000000000000000251013306565107023274 0ustar rootrootSNAP 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {1: 10} պ [(/P&F8BdşKvA]r^N%޸CA;g3)rP h>8 yRrsxC~ҾKPk~ #ߟ&w63;#,J Tn}[N~TJ4eO [O&voh𭧽;(egPԵh$ʸD\歈P($ʥlX$;Y NlGNf'Nɭ36yox5_`޲V!SAf4( |-Ǿ$e p =Mǜw/}gL-$^fR;"@37 eۥ$3yL(MṂ\Iȗl*RE%FX@ nIK6՚j#;s_sz1Go Er{Ko>]r!7f6 zZteĞF&Իıe"gɭe!{48>a[rtٛ[OCTNm$>/5HЅŊËrG# 7S&RN1ɲ/V6b.0ZX1lFF df0   4@i0Z6j <hCgRX"ٳGA>TkJ j(T=@)I{ |t\Yy"ztu@vbVA [$Se_V+@a;n\6Nb:PEєU6Yj=!&(9B(iøvx! fպ kW?! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000044.run0000664000000000000000000000064613306565107023630 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ 8aA0!fպ Q!1!fպ ΄<U2!fպ x3!fպ  4!fպ @)5! fպ v`6! fպ  xtp7! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000035.index0000664000000000000000000000036413306565107024130 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ yٷ(/PDd '@ eb( ̊*Rz `0Fj ([8Yii^[wjtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000044.index0000664000000000000000000000036413306565107024130 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ y+d(/PDd07@ eb( ̊*Rz `0Fj ([8Yii^[wjtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000042.index0000664000000000000000000000036413306565107024126 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ y΂{t&(/PDd(/@ eb( ̊*Rz `0Fj ([8Yii^[wjtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000073.run0000664000000000000000000000026613306565107023630 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ Π=R˧9!fպ A:!ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000052.run0000664000000000000000000000033613306565107023623 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ 8aA0!fպ Q!1!fպ ΄<U2!ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000052.index0000664000000000000000000000032313306565107024122 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ X(/P}dd02@ eb( ̊ `ط47ЇP tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000073.index0000664000000000000000000000031513306565107024126 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ RHFݧ(/PMDd9:@ eb( ̊ 5ymA` tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000071.run0000664000000000000000000000021613306565107023621 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ =k8!ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000042.run0000664000000000000000000000064613306565107023626 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ ζ *(!fպ .Βn)!fպ @ڧ*!fպ Q+!fպ ק,!fպ \Ƨ-!fպ ={r.! fպ  ǟ/! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000040.index0000664000000000000000000000036413306565107024124 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ y΂{t&(/PDd(/@ eb( ̊*Rz `0Fj ([8Yii^[wjtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000077.run0000664000000000000000000000026613306565107023634 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ Δ^T;!fպ ç! fպ kW?! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000071.index0000664000000000000000000000027413306565107024130 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ A΅}(/Pd88@ eb( >tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000054.run0000664000000000000000000000045613306565107023630 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ x3!fպ  4!fպ @)5! fպ v`6! fպ  xtp7! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/516/0/00000000000000000035.run0000664000000000000000000000064613306565107023630 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ s$ !fպ Le!!fպ  "!fպ d`#!fպ vG$!fպ 0=%!fպ ]X&!fպ ·է'! ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/00000000000000000010.vylog0000664000000000000000000001130313306565107023473 0ustar rootrootVYLOG 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {1: 10} պ _h¥!unsigned!!unsigned!  ! ! ! ! ! !!!  !!string!  ! ! ! ! !! !!!! պ dȧAfˌ!պ ΀'XAfˌ! պ r#Afˌ g!պ e!Afˌ!պ )7Afˌʁ! պ ;=AfˌGR! unsignedAfˌGR! պ ;$ߧAfˌL!stringAfˌL!պ ۔Afˌ!պ FAfˌ! Afˌ!Afˌ!  պ lY%Afˌ! պ F1vAfˌ!  Afˌ!Afˌ! պ RΘAfˌ! Afˌ!Afˌ! Afˌ!պ RMAfˌ! Afˌ!Afˌ! Afˌ! պ ;Y䕧Afˌ-!unsignedAfˌ-!պ ;tAfˌ6"!stringAfˌ6"!պ Q=ԧAfˌwG!պ FW:KAfˌ! Afˌ!Afˌ!  պ ΰAfˌ!պ F_Afˌ! Afˌ!Afˌ!  պ  MAfˌ`! Afˌ`!Afˌ`! Afˌ`!Afˌ`!  Afˌ`! Afˌ`!Afˌ`! Afˌ`! Afˌ`!  պ ;oAfˌ!unsignedAfˌ!!պ ΚzAfˌ_l!"պ T AƧAfˌ`p!#պ FI@VAfˌ{)!# 'Afˌ{)!!#$Afˌ{)!  'պ FsespAfˌ'!" Afˌ'! "%Afˌ'!  պ SҧAfˌ!&պ FAfˌ!& Afˌ!&'Afˌ!  պ >yAfˌ!(պ FΓD"Afˌ7!( /Afˌ7!!()Afˌ7!  /պ rYAfˌ8Ł!*պ ̄AfˌS3! )AfˌS3! $AfˌS3!# AfˌS3!( AfˌS3!* /AfˌS3!!*+պ Afˌ)!,պ FWtAfˌx!, 7Afˌx!!,-Afˌx!  7պ pAfˌÁ! -AfˌÁ! +AfˌÁ!!AfˌÁ!.AfˌÁ!.,0AfˌÁ!.*/AfˌÁ!1AfˌÁ!1,3AfˌÁ!1*2պ ΐv%Afˌ!4պ XPYAfˌ! 0Afˌ! /Afˌ!4 7Afˌ!.45պ CAfˌ#!6պ ̄ΙߨAfˌ4! 3Afˌ4! 2Afˌ4!* Afˌ4!, Afˌ4!6 7Afˌ4!167պ Φ%HAfˌ.!8պ dawdAfˌ=!!8 ?Afˌ=!!.89Afˌ=!!18:Afˌ=!!  ?պ Afˌ>ف! 9Afˌ>ف! 5Afˌ>ف!.Afˌ>ف!;Afˌ>ف!;8=Afˌ>ف!;4<Afˌ>ف!>Afˌ>ف!>8@Afˌ>ف!>4?պ Γ%BjAfˌ?! :Afˌ?! 7Afˌ?!1Afˌ?!AAfˌ?!A8CAfˌ?!A6BAfˌ?!DAfˌ?!D8FAfˌ?!D6Eպ 9[ Afˌ@!Gպ XU.OAfˌ^݁! =Afˌ^݁! <Afˌ^݁!G ?Afˌ^݁!;GHպ ّAfˌ`5!Iպ nm$Afˌ! @Afˌ! ?Afˌ!4 Afˌ!I ?Afˌ!>IJպ ΝGUoAfˌ!Kպ XCnAfˌ! FAfˌ! EAfˌ!K ?Afˌ!DKLպ μPAfˌ!Mպ ̄ҦAfˌځ! CAfˌځ! BAfˌځ!6 Afˌځ!8 Afˌځ!M ?Afˌځ!AMNպ &=>Afˌ߁!Afˌ߁!tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/00000000000000000010.xlog0000664000000000000000000000645313306565107023316 0ustar rootrootXLOG 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {1: 10} պ (6F Afˌi max_id!+պ /Ό Afˌ'9!test_dropvinylպ ̖XY AfˌF !i1treeuniqueórun_count_per_levelrun_size_ratio@ bloom_fpr?page_size range_size@unsignedպ ̔ιDTAfˌL !i2treeuniqueórun_count_per_levelrun_size_ratio@ bloom_fpr?page_size range_size@stringպ ~AfˌM! aaպ ΐOAfˌA!bbպ 7PAfˌ պ  Afˌ\ պ }[Afˌ!6 պ θoAfˌ")J պ (NAfˌ# max_id!+պ 3<Afˌ&p!test_truncatevinylպ ̖ΞʼnAfˌ- !i1treeuniqueórun_count_per_levelrun_size_ratio@ bloom_fpr?page_size range_size@unsignedպ ̔έ?KئAfˌ5v !i2treeuniqueórun_count_per_levelrun_size_ratio@ bloom_fpr?page_size range_size@stringպ Τn!Afˌ7!aպ 4LKAfˌ ! abպ #O AfˌTJ(+!պ !ӧAfˌނ!{abcպ (8AfˌՄ max_id!+պ 0ѕeFAfˌ !test_splitvinylպ ̊J-AfˌM !pktreeuniqueórun_count_per_levelrun_size_ratiobloom_fpr?page_sizerange_sizeunsignedպ R AfˌD!պ ʺM!AfˌU!պ ΅"AfˌD!պ Ε6#Afˌ!պ s$Afˌ!պ > %Afˌe!պ -&&Afˌʂ!պ XE'Afˌm! պ (AfˌԀ!պ  })Afˌ!պ **Afˌי!պ ΋i ܧ+Afˌk!պ ZwTݧ,Afˌo!պ 6^v-Afˌ0!պ Ε1.Afˌ۾! պ T޻/AfˌƂ! պ "fX0AfˌRn!պ ]?1AfˌS!պ I\2AfˌU!պ Ώ>~3AfˌWi!պ -4AfˌXu!պ '5AfˌY! պ Ύ6AfˌY! պ >27AfˌZW! պ ΄8Afˌ !պ V!9Afˌһ!պ ΒQ:AfˌA!պ =c ;Afˌ!պ Γ/<AfˌQ! պ 6=Afˌ! պ txr>Afˌ! պ ΢$?Afˌ{! պ A@Afˌ պ εKyAAfˌւ պ ΦQBAfˌqJ tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/0000775000000000000000000000000013306565107020745 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/0/0000775000000000000000000000000013306565107021104 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/0/00000000000000000038.index0000664000000000000000000000027413306565107024132 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ A΢^(/Pd{{@eb+{ 9@%ctarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/0/00000000000000000029.run0000664000000000000000000000021713306565107023624 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ `U!aftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/0/00000000000000000038.run0000664000000000000000000000022113306565107023617 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ ]!{abcftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/0/00000000000000000029.index0000664000000000000000000000027413306565107024132 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ A](/Pd@ eb) >tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/1/0000775000000000000000000000000013306565107021105 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/1/00000000000000000034.index0000664000000000000000000000031613306565107024124 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ S]K(/PUdabc{@ eb+ "p2&# ?ab8$tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/1/00000000000000000027.run0000664000000000000000000000021713306565107023623 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ !aftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/1/00000000000000000027.index0000664000000000000000000000031613306565107024126 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ S$V(/PU4daa@ eb)a w@\~tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/515/1/00000000000000000034.run0000664000000000000000000000022113306565107023614 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ C4!abc{ftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/513/0000775000000000000000000000000013306565107020743 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/513/0/0000775000000000000000000000000013306565107021102 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/513/0/00000000000000000011.run0000664000000000000000000000023613306565107023612 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ % I,!a !bf tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/513/0/00000000000000000015.index0000664000000000000000000000027413306565107024123 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ AΈq(/Pd  @ eb) 9>G1tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/513/0/00000000000000000015.run0000664000000000000000000000021713306565107023615 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ M0 !cftarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/513/0/00000000000000000011.index0000664000000000000000000000030113306565107024106 0ustar rootrootINDEX 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ FA%;0YP[0 `OV"(H.L#rNtarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade/1.7.5/513/1/00000000000000000017.run0000664000000000000000000000025513306565107023622 0ustar rootrootRUN 0.13 Version: 1.7.4-462-g833895bd4 Instance: 3206ae74-b369-4bb1-bbe6-abf35a334192 VClock: {} պ 4ո5!a !b !cf tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade.lua0000664000000000000000000000016613306560010020455 0ustar rootroot#!/usr/bin/env tarantool box.cfg{ listen = os.getenv("LISTEN"), } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/vinyl/dump_stress.test.lua0000664000000000000000000000253413306560010022355 0ustar rootroottest_run = require('test_run').new() test_run:cmd("create server test with script='vinyl/low_quota.lua'") test_run:cmd("start server test with args='1048576'") test_run:cmd('switch test') fiber = require 'fiber' pad_size = 1000 test_run:cmd("setopt delimiter ';'") function gen_tuple(k) local pad = {} for i = 1,pad_size do pad[i] = string.char(math.random(65, 90)) end return {k, k + 1, k + 2, k + 3, table.concat(pad)} end test_run:cmd("setopt delimiter ''"); s = box.schema.space.create('test', {engine='vinyl'}) _ = s:create_index('i1', {parts = {1, 'unsigned'}}) _ = s:create_index('i2', {unique = false, parts = {2, 'unsigned'}}) _ = s:create_index('i3', {unique = false, parts = {3, 'unsigned'}}) _ = s:create_index('i4', {unique = false, parts = {4, 'unsigned'}}) -- -- Schedule dump caused by snapshot and memory shortage concurrently. -- _ = fiber.create(function() while true do box.snapshot() fiber.sleep(0.01) end end) test_run:cmd("setopt delimiter ';'") for i = 1, 10 * box.cfg.vinyl_memory / pad_size do s:replace(gen_tuple(i)) if i % 100 == 0 then box.commit() box.begin() end end test_run:cmd("setopt delimiter ''"); s.index.i1:count() s.index.i2:count() s.index.i3:count() s.index.i4:count() test_run:cmd('switch default') test_run:cmd("stop server test") test_run:cmd("cleanup server test") tarantool_1.9.1.26.g63eb81e3c/test/vinyl/partial_dump.test.lua0000664000000000000000000000306513306560010022466 0ustar rootroot-- -- 1. Create a space which has more indexes that can be scheduled -- for dump simultaneously (> vinyl_write_threads). -- -- 2. Insert tuples and then update values of secondary keys. -- -- 3. Inject a dump error for a random index. Try to make a snapshot. -- -- 4. Restart and check the space. -- test_run = require('test_run').new() INDEX_COUNT = box.cfg.vinyl_write_threads * 3 assert(INDEX_COUNT < 100) s = box.schema.space.create('test', {engine='vinyl'}) for i = 1, INDEX_COUNT do s:create_index('i' .. i, {parts = {i, 'unsigned'}}) end test_run:cmd("setopt delimiter ';'") function make_tuple(key, val) local tuple = {} tuple[1] = key for i = 2, INDEX_COUNT do tuple[i] = val * (i - 1) end return tuple end test_run:cmd("setopt delimiter ''"); for i = 1, 5 do s:insert(make_tuple(i, i)) end for i = 1, 5 do s:replace(make_tuple(i, i * 100)) end math.randomseed(os.time()) box.error.injection.set('ERRINJ_VY_INDEX_DUMP', math.random(INDEX_COUNT) - 1) box.snapshot() box.error.injection.set('ERRINJ_VY_INDEX_DUMP', -1) test_run:cmd('restart server default') INDEX_COUNT = box.cfg.vinyl_write_threads * 3 assert(INDEX_COUNT < 100) s = box.space.test s:select() bad_index = -1 test_run:cmd("setopt delimiter ';'") for i = 1, INDEX_COUNT - 1 do if s:count() ~= s.index[i]:count() then bad_index = i end for _, v in s.index[i]:pairs() do if v ~= s:get(v[1]) then bad_index = i end end end test_run:cmd("setopt delimiter ''"); bad_index < 0 or {bad_index, s.index[bad_index]:select()} s:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/large.test.lua0000664000000000000000000000033313306560010021072 0ustar rootroot#!/usr/bin/env tarantool test_run = require('test_run').new() large = require('large') large.prepare() large.large(500) test_run:cmd('restart server default') large = require('large') large.check() large.teardown() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/hermitage.result0000664000000000000000000002647113306560010021537 0ustar rootroot-- -- hermitage: Testing transaction isolation levels. -- github.com/ept/hermitage -- -- Testing Vinyl transactional isolation in Tarantool. -- -- ************************************************************************* -- 1.7 setup begins -- ************************************************************************* test_run = require('test_run').new() --- ... txn_proxy = require('txn_proxy') --- ... _ = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = box.space.test:create_index('pk') --- ... c1 = txn_proxy.new() --- ... c2 = txn_proxy.new() --- ... c3 = txn_proxy.new() --- ... t = box.space.test --- ... -- ************************************************************************* -- 1.7 setup up marker: end of test setup -- ************************************************************************* -- ------------------------------------------------------------------------ -- READ COMMITTED basic requirements: G0 -- ------------------------------------------------------------------------ -- -- REPLACE -- -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:replace{1, 11}") --- - - [1, 11] ... c2("t:replace{1, 12}") --- - - [1, 12] ... c1("t:replace{2, 21}") --- - - [2, 21] ... c1:commit() --- - ... c2("t:replace{2, 22}") --- - - [2, 22] ... c2:commit() -- success, the last writer wins --- - ... t:get{1} -- {1, 12} --- - [1, 12] ... t:get{2} -- {2, 22} --- - [2, 22] ... -- teardown t:truncate() --- ... -- -- UPDATE -- -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:update(1, {{'=', 2, 11}})") --- - - [1, 11] ... c2("t:update(1, {{'=', 2, 12}})") --- - - [1, 12] ... c1("t:update(2, {{'=', 2, 21}})") --- - - [2, 21] ... c1:commit() --- - ... c2("t:update(2, {{'=', 2, 22}})") --- - - [2, 22] ... c2:commit() -- rollback --- - - {'error': 'Transaction has been aborted by conflict'} ... t:get{1} -- {1, 11} --- - [1, 11] ... t:get{2} -- {2, 21} --- - [2, 21] ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- READ COMMITTED basic requirements: G1A -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:replace{1, 101}") --- - - [1, 101] ... c2("t:replace{1, 10}") --- - - [1, 10] ... c1:rollback() --- - ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2:commit() -- true --- - ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- READ COMMITTED basic requirements: G1B -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:replace{1, 101}") --- - - [1, 101] ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c1("t:replace{1, 11}") --- - - [1, 11] ... c1:commit() -- ok --- - ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2:commit() -- ok --- - ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- Circular information flow: G1C -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:replace{1, 11}") --- - - [1, 11] ... c2("t:replace{2, 22}") --- - - [2, 22] ... c1("t:get{2}") -- {2, 20} --- - - [2, 20] ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c1:commit() -- ok --- - ... c2:commit() -- rollback (@fixme: not necessary) --- - - {'error': 'Transaction has been aborted by conflict'} ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- OTV: observable transaction vanishes -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c3:begin() --- - ... c1("t:replace{1, 11}") --- - - [1, 11] ... c1("t:replace{2, 19}") --- - - [2, 19] ... c2("t:replace{1, 12}") --- - - [1, 12] ... c1:commit() -- ok --- - ... c3("t:get{1}") -- {1, 11} --- - - [1, 11] ... c2("t:replace{2, 18}") --- - - [2, 18] ... c3("t:get{2}") -- {2, 19} --- - - [2, 19] ... c2:commit() -- write only transaction - OK to commit --- - ... c3("t:get{2}") -- {2, 19} --- - - [2, 19] ... c3("t:get{1}") -- {1, 11} --- - - [1, 11] ... c3:commit() -- read only transaction - OK to commit, stays with its read view --- - ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- PMP: Predicate with many preceders -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:select()") -- {1, 10}, {2, 20} --- - - [[1, 10], [2, 20]] ... c2("t:replace{3, 30}") --- - - [3, 30] ... c2:commit() -- ok --- - ... c1("t:select()") -- still {1, 10}, {2, 20} --- - - [[1, 10], [2, 20]] ... c1:commit() -- ok --- - ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- PMP write: predicate many preceders for write predicates -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:replace{1, 20}") --- - - [1, 20] ... c1("t:replace{2, 30}") --- - - [2, 30] ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2("t:get{2}") -- {2, 20} --- - - [2, 20] ... c2("t:delete{2}") --- - ... c1:commit() -- ok --- - ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2:commit() -- rollback -- conflict --- - - {'error': 'Transaction has been aborted by conflict'} ... t:get{1} -- {1, 20} --- - [1, 20] ... t:get{2} -- {2, 30} --- - [2, 30] ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- P4: lost update: don't allow a subsequent commit to lose update -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c1("t:replace{1, 11}") --- - - [1, 11] ... c2("t:replace{1, 12}") --- - - [1, 12] ... c1:commit() -- ok --- - ... c2:commit() -- rollback -- conflict --- - - {'error': 'Transaction has been aborted by conflict'} ... -- teardown t:truncate() --- ... ------------------------------------------------------------------------ -- G-single: read skew -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2("t:get{2}") -- {2, 20} --- - - [2, 20] ... c2("t:replace{1, 12}") --- - - [1, 12] ... c2("t:replace{2, 18}") --- - - [2, 18] ... c2:commit() -- ok --- - ... c1("t:get{2}") -- {2, 20} --- - - [2, 20] ... c1:commit() -- ok --- - ... -- teardown t:truncate() --- ... ------------------------------------------------------------------------ -- G-single: read skew, test with write predicate -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2("t:get{2}") -- {2, 20} --- - - [2, 20] ... c2("t:replace{1, 12}") --- - - [1, 12] ... c2("t:replace{2, 18}") --- - - [2, 18] ... c2:commit() -- T2 --- - ... c1("t:delete{2}") --- - ... c1("t:get{2}") -- finds nothing --- - ... c1:commit() -- rollback --- - - {'error': 'Transaction has been aborted by conflict'} ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- G2-item: write skew -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... c1("t:get{1}") -- {1, 10} --- - - [1, 10] ... c1("t:get{2}") -- {2, 20} --- - - [2, 20] ... c2("t:get{1}") -- {1, 10} --- - - [1, 10] ... c2("t:get{2}") -- {2, 20} --- - - [2, 20] ... c1("t:replace{1, 11}") --- - - [1, 11] ... c2("t:replace{1, 21}") --- - - [1, 21] ... c1:commit() -- ok --- - ... c2:commit() -- rollback -- conflict --- - - {'error': 'Transaction has been aborted by conflict'} ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- G2: anti-dependency cycles -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c2:begin() --- - ... -- select * from test where value % 3 = 0 c1("t:select()") -- {1, 10}, {2, 20} --- - - [[1, 10], [2, 20]] ... c2("t:select()") -- {1, 10}, {2, 20} --- - - [[1, 10], [2, 20]] ... c1("t:replace{3, 30}") --- - - [3, 30] ... c2("t:replace{4, 42}") --- - - [4, 42] ... c1:commit() -- ok --- - ... c2:commit() -- rollback --- - - {'error': 'Transaction has been aborted by conflict'} ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- G2: anti-dependency cycles with two items -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c1("t:get{1}") -- {1, 10} --- - - [1, 10] ... c1("t:get{2}") -- {2, 20} --- - - [2, 20] ... c2:begin() --- - ... c2("t:replace{2, 25}") --- - - [2, 25] ... c2:commit() -- ok --- - ... c3:begin() --- - ... c3("t:get{1}") -- {1, 10} --- - - [1, 10] ... c3("t:get{2}") -- {2, 25} --- - - [2, 25] ... c3:commit() -- ok --- - ... c1("t:replace{1, 0}") --- - - [1, 0] ... c1:commit() -- rollback --- - - {'error': 'Transaction has been aborted by conflict'} ... -- teardown t:truncate() --- ... -- ------------------------------------------------------------------------ -- G2: anti-dependency cycles with two items (no replace) -- ------------------------------------------------------------------------ -- setup t:replace{1, 10} --- - [1, 10] ... t:replace{2, 20} --- - [2, 20] ... c1:begin() --- - ... c1("t:get{1}") -- {1, 10} --- - - [1, 10] ... c1("t:get{2}") -- {2, 20} --- - - [2, 20] ... c2:begin() --- - ... c2("t:replace{2, 25}") --- - - [2, 25] ... c2:commit() -- ok --- - ... c3:begin() --- - ... c3("t:get{1}") -- {1, 10} --- - - [1, 10] ... c3("t:get{2}") -- {2, 25} --- - - [2, 25] ... c3:commit() -- ok --- - ... -- c1("t:replace{1, 0)") c1:commit() -- ok --- - ... -- teardown t:truncate() --- ... -- ************************************************************************* -- 1.7 cleanup marker: end of test cleanup -- ************************************************************************* -- box.space.test:drop() --- ... c1 = nil --- ... c2 = nil --- ... c3 = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/recover.test.lua0000664000000000000000000001252613306565107021470 0ustar rootrootfiber = require('fiber') test_run = require('test_run').new() -- Temporary table to restore variables after restart. var = box.schema.space.create('var') _ = var:create_index('primary', {parts = {1, 'string'}}) -- Empty space. s1 = box.schema.space.create('test1', {engine = 'vinyl'}) _ = s1:create_index('pk') -- Truncated space. s2 = box.schema.space.create('test2', {engine = 'vinyl'}) _ = s2:create_index('pk') _ = s2:insert{123} s2:truncate() -- Data space. s3 = box.schema.space.create('test3', {engine='vinyl'}) _ = s3:create_index('primary') _ = s3:create_index('secondary', {unique = false, parts = {2, 'string'}}) for i = 0, 4 do s3:insert{i, 'test' .. i} end -- Flush data to disk. box.snapshot() -- Write some data to memory. for i = 5, 9 do s3:insert{i, 'test' .. i} end -- Concurrent index creation (gh-2288). ch = fiber.channel(2) s4 = box.schema.space.create('test4', {engine = 'vinyl'}) s5 = box.schema.space.create('test5', {engine = 'vinyl'}) _ = fiber.create(function() s4:create_index('i1') s4:create_index('i2') ch:put(true) end) _ = fiber.create(function() s5:create_index('i1') s5:create_index('i2') ch:put(true) end) ch:get() ch:get() s4:insert{44} s5:insert{55} -- Remember stats before restarting the server. _ = var:insert{'vyinfo', s3.index.primary:info()} test_run:cmd('restart server default') s1 = box.space.test1 s2 = box.space.test2 s3 = box.space.test3 s4 = box.space.test4 s5 = box.space.test5 var = box.space.var -- Check space contents. s1:select() s2:select() s3.index.primary:select() s3.index.secondary:select() s4.index.i1:select() s4.index.i2:select() s5.index.i1:select() s5.index.i2:select() -- Check that stats didn't change after recovery. vyinfo1 = var:get('vyinfo')[2] vyinfo2 = s3.index.primary:info() vyinfo1.memory.rows == vyinfo2.memory.rows vyinfo1.memory.bytes == vyinfo2.memory.bytes vyinfo1.disk.rows == vyinfo2.disk.rows vyinfo1.disk.bytes == vyinfo2.disk.bytes vyinfo1.disk.bytes_compressed == vyinfo2.disk.bytes_compressed vyinfo1.disk.pages == vyinfo2.disk.pages vyinfo1.run_count == vyinfo2.run_count vyinfo1.range_count == vyinfo2.range_count s1:drop() s2:drop() s3:drop() s4:drop() s5:drop() var:drop() test_run:cmd('create server force_recovery with script="vinyl/force_recovery.lua"') test_run:cmd('start server force_recovery') test_run:cmd('switch force_recovery') fio = require'fio' test = box.schema.space.create('test', {engine = 'vinyl'}) _ = test:create_index('pk') for i = 0, 9999 do test:replace({i, i, string.rep('a', 512)}) end box.snapshot() for i = 10000, 11999 do test:delete({i - 10000}) end box.snapshot() for i = 12000, 13999 do test:upsert({i - 10000, i, string.rep('a', 128)}, {{'+', 2, 5}}) end box.snapshot() for _, f in pairs(fio.glob(box.cfg.vinyl_dir .. '/' .. test.id .. '/0/*.index')) do fio.unlink(f) end _ = box.schema.space.create('info') _ = box.space.info:create_index('pk') _ = box.space.info:insert{1, box.space.test.index.pk:info()} test2 = box.schema.space.create('test2', {engine = 'vinyl'}) _ = test2:create_index('pk') _ = test2:create_index('sec', {parts = {4, 'unsigned', 2, 'string'}}) test2:replace({1, 'a', 2, 3}) test2:replace({2, 'd', 4, 1}) test2:replace({3, 'c', 6, 7}) test2:replace({4, 'b', 6, 3}) box.snapshot() for _, f in pairs(fio.glob(box.cfg.vinyl_dir .. '/' .. test2.id .. '/0/*.index')) do fio.unlink(f) end for _, f in pairs(fio.glob(box.cfg.vinyl_dir .. '/' .. test2.id .. '/1/*.index')) do fio.unlink(f) end test_run = require('test_run').new() test_run:cmd('switch default') test_run:cmd('stop server force_recovery') test_run:cmd('start server force_recovery') test_run:cmd('switch force_recovery') sum = 0 for k, v in pairs(box.space.test:select()) do sum = sum + v[2] end -- should be a sum(2005 .. 4004) + sum(4000 .. 9999) = 48006000 sum -- Check that disk stats are restored after index rebuild (gh-3173). old_info = box.space.info:get(1)[2] new_info = box.space.test.index.pk:info() new_info.disk.index_size == old_info.disk.index_size new_info.disk.bloom_size == old_info.disk.bloom_size new_info.disk.rows == old_info.disk.rows new_info.disk.bytes == old_info.disk.bytes new_info.disk.bytes_compressed == old_info.disk.bytes_compressed new_info.disk.pages == old_info.disk.pages new_info.run_count == old_info.run_count new_info.range_count == old_info.range_count box.space.test2:select() box.space.test2.index.sec:select() test_run:cmd('switch default') test_run:cmd('stop server force_recovery') test_run:cmd('delete server force_recovery') -- garbaged vy run indexes test_run:cmd('create server force_recovery with script="vinyl/bad_run_indexes.lua"') test_run:cmd('start server force_recovery') test_run:cmd('switch force_recovery') test = box.schema.space.create('test', {engine = 'vinyl'}) _ = test:create_index('pk') for i = 0, 9999 do test:replace({i, i, string.rep('a', 512)}) end box.snapshot() for i = 10000, 11999 do test:delete({i - 10000}) end box.snapshot() for i = 12000, 13999 do test:upsert({i - 10000, i, string.rep('a', 128)}, {{'+', 2, 5}}) end box.snapshot() test_run:cmd('switch default') test_run:cmd('stop server force_recovery') test_run:cmd('start server force_recovery') test_run:cmd('switch force_recovery') sum = 0 for k, v in pairs(box.space.test:select()) do sum = sum + v[2] end -- should be a sum(2005 .. 4004) + sum(4000 .. 9999) = 48006000 sum test_run:cmd('switch default') test_run:cmd('stop server force_recovery') test_run:cmd('cleanup server force_recovery') tarantool_1.9.1.26.g63eb81e3c/test/vinyl/bloom.result0000664000000000000000000000413413306565107020706 0ustar rootroottest_run = require('test_run').new() --- ... -- -- Setting bloom_fpr to 1 disables bloom filter. -- s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk', {bloom_fpr = 1}) --- ... for i = 1, 10, 2 do s:insert{i} end --- ... box.snapshot() --- - ok ... for i = 1, 10 do s:get{i} end --- ... stat = s.index.pk:info() --- ... stat.disk.bloom_size -- 0 --- - 0 ... stat.disk.iterator.bloom.hit -- 0 --- - 0 ... stat.disk.iterator.bloom.miss -- 0 --- - 0 ... s:drop() --- ... s = box.schema.space.create('test', {engine = 'vinyl'}) --- ... _ = s:create_index('pk') --- ... reflects = 0 --- ... function cur_reflects() return box.space.test.index.pk:info().disk.iterator.bloom.hit end --- ... function new_reflects() local o = reflects reflects = cur_reflects() return reflects - o end --- ... seeks = 0 --- ... function cur_seeks() return box.space.test.index.pk:info().disk.iterator.lookup end --- ... function new_seeks() local o = seeks seeks = cur_seeks() return seeks - o end --- ... for i = 1,1000 do s:replace{i} end --- ... box.snapshot() --- - ok ... _ = new_reflects() --- ... _ = new_seeks() --- ... for i = 1,1000 do s:select{i} end --- ... new_reflects() == 0 --- - true ... new_seeks() == 1000 --- - true ... for i = 1001,2000 do s:select{i} end --- ... new_reflects() > 980 --- - true ... new_seeks() < 20 --- - true ... test_run:cmd('restart server default') s = box.space.test --- ... reflects = 0 --- ... function cur_reflects() return box.space.test.index.pk:info().disk.iterator.bloom.hit end --- ... function new_reflects() local o = reflects reflects = cur_reflects() return reflects - o end --- ... seeks = 0 --- ... function cur_seeks() return box.space.test.index.pk:info().disk.iterator.lookup end --- ... function new_seeks() local o = seeks seeks = cur_seeks() return seeks - o end --- ... _ = new_reflects() --- ... _ = new_seeks() --- ... for i = 1,1000 do s:select{i} end --- ... new_reflects() == 0 --- - true ... new_seeks() == 1000 --- - true ... for i = 1001,2000 do s:select{i} end --- ... new_reflects() > 980 --- - true ... new_seeks() < 20 --- - true ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/quota.test.lua0000664000000000000000000000161013306565107021144 0ustar rootrootenv = require('test_run') test_run = env.new() -- -- Restart the server because need to reset quota used memory -- after previous tests. -- test_run:cmd('restart server default') -- -- gh-1863 add BPS tree extents to memory quota -- box.info.vinyl().quota.used space = box.schema.space.create('test', { engine = 'vinyl' }) pk = space:create_index('pk') sec = space:create_index('sec', { parts = {2, 'unsigned'} }) space:insert({1, 1}) box.info.vinyl().quota.used space:insert({1, 1}) box.info.vinyl().quota.used space:update({1}, {{'!', 1, 100}}) -- try to modify the primary key box.info.vinyl().quota.used space:insert({2, 2}) space:insert({3, 3}) space:insert({4, 4}) box.info.vinyl().quota.used box.snapshot() box.info.vinyl().quota.used space:select{} box.info.vinyl().quota.used _ = space:replace{1, 1, string.rep('a', 1024 * 1024 * 5)} box.info.vinyl().quota.used space:drop() tarantool_1.9.1.26.g63eb81e3c/test/vinyl/options.test.lua0000664000000000000000000000055313306560010021477 0ustar rootrootutils = require('utils') test_run = require('test_run').new() index_options = test_run:get_cfg('index_options') index_options.type = 'TREE' index_options.parts = {1, 'unsigned'} space = box.schema.space.create('test', { engine = 'vinyl' }) _ = space:create_index('primary', index_options) utils.check_space(space, 1024) space:drop() test_run = nil utils = nil tarantool_1.9.1.26.g63eb81e3c/test/vinyl/low_quota.lua0000664000000000000000000000026413306560010021037 0ustar rootroot#!/usr/bin/env tarantool local LIMIT = tonumber(arg[1]) box.cfg{ vinyl_memory = LIMIT, vinyl_max_tuple_size = 2 * LIMIT, } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/vinyl/txn_proxy.lua0000664000000000000000000000320113306560010021071 0ustar rootroot-- A fiber can't use multiple transactions simultaneously; -- i.e. [fiber] --? [transaction] in UML parlor. -- -- This module provides a simple transaction proxy facility -- to control multiple transactions at once. A proxy executes -- statements in a worker fiber in order to overcome -- "one transaction per fiber" limitation. -- -- Ex: -- proxy = require('txn_proxy').new() -- proxy:begin() -- proxy('box.space.test:replace{1, 42}') -- proxy:commit() -- or proxy:rollback() local ffi = require('ffi') local yaml = require('yaml') local fiber = require('fiber') local console = require('console') local array_mt = { __serialize = 'array' } local mt = { __call = function(self, code_str) self.c1:put(code_str) local res = yaml.decode(self.c2:get()) return type(res) == 'table' and setmetatable(res, array_mt) or res end, __index = { begin = function(self) return self('box.begin()') end, commit = function(self) return self('box.commit()') end, rollback = function(self) return self('box.rollback()') end, close = function(self) self.c1:close(); self.c2:close() end } } local function fiber_main(c1, c2) local code_str = c1:get() if code_str then c2:put(console.eval(code_str)) return fiber_main(c1, c2) -- tail call end end local function new_txn_proxy() local c1, c2 = fiber.channel(), fiber.channel() local function on_gc() c1:close(); c2:close() end fiber.create(fiber_main, c1, c2) return setmetatable({ c1 = c1, c2 = c2, __gc = ffi.gc(ffi.new('char[1]'), on_gc) }, mt) end return { new = new_txn_proxy } tarantool_1.9.1.26.g63eb81e3c/test/vinyl/suite.cfg0000664000000000000000000000032713306565107020150 0ustar rootroot{ "options.test.lua": { "edge": {"index_options": {"range_size": 1, "page_size": 1}} }, "upgrade.test.lua": { "1.7.4": {"version": "1.7.4"}, "1.7.5": {"version": "1.7.5"} } } tarantool_1.9.1.26.g63eb81e3c/test/vinyl/upgrade.test.lua0000664000000000000000000000115513306560010021432 0ustar rootroottest_run = require('test_run').new() version = test_run:get_cfg('version') work_dir = 'vinyl/upgrade/' .. version test_run:cmd('create server upgrade with script="vinyl/upgrade.lua", workdir="' .. work_dir .. '"') test_run:cmd('start server upgrade') test_run:switch('upgrade') box.space.test.index.i1:select() box.space.test.index.i2:select() box.space.test_truncate.index.i1:select() box.space.test_truncate.index.i2:select() box.space.test_split:select() box.space.test_split:select() box.space.test_drop == nil test_run:switch('default') test_run:cmd('stop server upgrade') test_run:cmd('cleanup server upgrade') tarantool_1.9.1.26.g63eb81e3c/test/vinyl/options.result0000664000000000000000000000070513306560010021255 0ustar rootrootutils = require('utils') --- ... test_run = require('test_run').new() --- ... index_options = test_run:get_cfg('index_options') --- ... index_options.type = 'TREE' --- ... index_options.parts = {1, 'unsigned'} --- ... space = box.schema.space.create('test', { engine = 'vinyl' }) --- ... _ = space:create_index('primary', index_options) --- ... utils.check_space(space, 1024) --- - [] ... space:drop() --- ... test_run = nil --- ... utils = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/vinyl/constraint.test.lua0000664000000000000000000000311613306560010022166 0ustar rootroot -- key type validations (str, num) space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) space:insert{1} space:replace{1} space:delete{1} space:update({1}, {{'=', 1, 101}}) space:upsert({1}, {{'+', 1, 10}}) space:get{1} index:pairs(1, {iterator = 'GE'}) space:drop() -- key type validations (num, str) space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) space:insert{'A'} space:replace{'A'} space:delete{'A'} space:update({'A'}, {{'=', 1, 101}}) space:upsert({'A'}, {{'+', 1, 10}}) space:get{'A'} index:pairs('A', {iterator = 'GE'}) space:drop() -- ensure all key-parts are passed space = box.schema.space.create('test', { engine = 'vinyl' }) index = space:create_index('primary', { type = 'tree', parts = {1,'unsigned',2,'unsigned'} }) space:insert{1} space:replace{1} space:delete{1} space:update(1, {{'=', 1, 101}}) space:upsert({1}, {{'+', 1, 10}}) space:get{1} index:select({1}, {iterator = box.index.GT}) space:drop() ------------------------------------------------------------------------------- -- Key part length without limit ------------------------------------------------------------------------------- space = box.schema.space.create('single_part', { engine = 'vinyl' }) _ = space:create_index('primary', { type = 'tree', parts = {1, 'string'}}) t1 = space:insert({string.rep('x', 1020)}) t1 = space:insert({string.rep('x', 10210)}) t3 = space:insert({string.rep('x', 102200)}) space:drop() space = nil pk = nil tarantool_1.9.1.26.g63eb81e3c/test/vinyl/tx_conflict.result0000664000000000000000000002676013306560010022107 0ustar rootroot-- The test runs loop of given number of rounds. -- Every round does the following: -- The test starts several concurrent transactions in vinyl. -- The transactions make some read/write operations over several keys in -- a random order and commit at a random moment. -- After that all transactions are sorted in order of commit -- With the sublist of read-write transactions committed w/o conflict: -- Test tries to make these transactions in memtex, one tx after another, -- without interleaving and compares select results with vinyl to make sure -- if the transaction could be serialized in order of commit or not -- With the sublist of read-write transactions committed with conflict: -- Test tries to make the same operations end ensures that the read results -- are not possible in memtx. -- With the sublist of read only transactions: -- Test tries to insert these transactions between other transactions and checks -- that it possible to get same results. test_run = require('test_run').new() --- ... txn_proxy = require('txn_proxy') --- ... --settings num_tx = 10 --number of concurrent transactions --- ... num_key = 5 --number of keys that transactions use --- ... num_tests = 50 --number of test rounds to run --- ... txs = {} --- ... order_of_commit = {} --- ... num_committed = 0 --- ... stmts = {} --- ... errors = {} --- ... initial_data = {} --- ... initial_repro = "" --- ... ops = {'begin', 'commit', 'select', 'replace', 'upsert', 'delete'} --- ... -- ignore case of unnecessary conflict: -- s:delete{1} -- t1:begin() t1:select{1} t1:replace{2} s:replace{1} s:delete{1} t1:commit() ignore_unnecessary_conflict1 = true --- ... --avoid first upsert in transaction --fails if num_tests = 1000 ignore_unnecessary_conflict2 = true --- ... -- New point iterator introduced additional possible conflicts that -- happens during run page read yield. -- This flag disables 'could be serializable' checks. latest_broken = true --- ... test_run:cmd("setopt delimiter ';'") --- - true ... s1 = box.schema.create_space('test1', { engine = 'vinyl' }) i1 = s1:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) s2 = box.schema.create_space('test2', { engine = 'memtx' }) i2 = s2:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) if ignore_unnecessary_conflict1 then q1 = box.schema.create_space('testq1', { engine = 'vinyl' }) iq1 = q1:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) q2 = box.schema.create_space('testq2', { engine = 'memtx' }) iq2 = q2:create_index('test', { type = 'TREE', parts = {1, 'uint'} }) end; --- ... for i=1,num_tx do txs[i] = {con = txn_proxy.new()} end; --- ... function my_equal(a, b) local typea = box.tuple.is(a) and 'table' or type(a) local typeb = box.tuple.is(b) and 'table' or type(b) if typea ~= typeb then return false elseif typea ~= 'table' then return a == b end for k,v in pairs(a) do if not my_equal(b[k], v) then return false end end for k,v in pairs(b) do if not my_equal(a[k], v) then return false end end return true end; --- ... unique_value = 0 function get_unique_value() unique_value = unique_value + 1 return unique_value end; --- ... function prepare() order_of_commit = {} num_committed = 0 stmts = {} for i=1,num_tx do txs[i].started = false txs[i].ended = false if math.random(3) == 1 then txs[i].read_only = true else txs[i].read_only = false end txs[i].read_only_checked = false txs[i].conflicted = false txs[i].possible = nil txs[i].num_writes = 0 end s1:truncate() s2:truncate() if ignore_unnecessary_conflict1 then q1:truncate() q2:truncate() end for i=1,num_key do local r = math.random(5) local v = get_unique_value() if (r >= 2) then s1:replace{i, v} s2:replace{i, v } if ignore_unnecessary_conflict1 then q1:replace{i, v} q2:replace{i, v } end end if (r == 2) then s1:delete{i} s2:delete{i} end end initial_data = s1:select{} initial_repro = "" initial_repro = initial_repro .. "s = box.schema.space.create('test', {engine = 'vinyl', if_not_exists = true})\n" initial_repro = initial_repro .. "i1 = s:create_index('test', {parts = {1, 'uint'}, if_not_exists = true})\n" initial_repro = initial_repro .. "txn_proxy = require('txn_proxy')\n" for _,tuple in pairs(initial_data) do initial_repro = initial_repro .. "s:replace{" .. tuple[1] .. ", " .. tuple[2] .. "} " end end; --- ... function apply(t, k, op) local tx = txs[t] local v = nil local q = nil local k = k local repro = nil if op == 'begin' then if tx.started then table.insert(errors, "assert #1") end tx.started = true tx.con:begin() k = nil repro = "c" .. t .. " = txn_proxy.new() c" .. t .. ":begin()" repro = "p(\"c" .. t .. ":begin()\") " .. repro elseif op == 'commit' then if tx.ended or not tx.started then table.insert(errors, "assert #2") end tx.ended = true table.insert(order_of_commit, t) num_committed = num_committed + 1 local res = tx.con:commit() if res ~= "" and res[1]['error'] then tx.conflicted = true else tx.select_all = s1:select{} if tx.num_writes == 0 then tx.read_only = true end end k = nil repro = "c" .. t .. ":commit()" repro = "p(\"" .. repro .. "\", " .. repro .. ", s:select{})" elseif op == 'select' then v = tx.con('s1:select{'..k..'}') if ignore_unnecessary_conflict1 then q = tx.con('q1:select{'..k..'}') end repro = "c" .. t .. "('s:select{" .. k .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'replace' then v = get_unique_value() tx.con('s1:replace{'..k..','..v..'}') if ignore_unnecessary_conflict1 then tx.con('q1:replace{'..k..','..v..'}') end tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:replace{" .. k .. ", " .. v .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'upsert' then v = math.random(100) tx.con('s1:upsert({'..k..','..v..'}, {{"+", 2,'..v..'}})') if ignore_unnecessary_conflict1 then tx.con('q1:upsert({'..k..','..v..'}, {{"+", 2,'..v..'}})') end tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:upsert({" .. k .. ", " .. v .. "}, {{\\'+\\', 2, " .. v .. "}})')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" elseif op == 'delete' then tx.con('s1:delete{'..k..'}') tx.num_writes = tx.num_writes + 1 repro = "c" .. t .. "('s:delete{" .. k .. "}')" repro = "p(\"" .. repro .. "\", " .. repro .. ")" end table.insert(stmts, {t=t, k=k, op=op, v=v, q=q, repro=repro}) end; --- ... function act() while true do local t = math.random(num_tx) local k = math.random(num_key) local tx = txs[t] if not tx.ended then local op_no = 0 if (tx.read_only) then op_no = math.random(3) else op_no = math.random(6) end local op = ops[op_no] if ignore_unnecessary_conflict2 then local were_ops = false for i,st in ipairs(stmts) do if st.t == t and st.k == k and st.op ~= 'commit' then were_ops = true end end if op == 'upsert' and not were_ops then op = 'replace' end end if op ~= 'commit' or tx.started then if not tx.started then apply(t, k, 'begin') end if op ~= 'begin' then apply(t, k, op) end end return end end end; --- ... function is_rdonly_tx_possible(t) for _,s in pairs(stmts) do if s.t == t and s.op == 'select' then local cmp_with = {s2:select{s.k}} if not my_equal(s.v, cmp_with) then return false end end end return true end; --- ... function try_to_apply_tx(t) for _,s in pairs(stmts) do if s.t == t then if s.op == 'select' then local cmp_with = {s2:select{s.k}} if not my_equal(s.v, cmp_with) then return false end if ignore_unnecessary_conflict1 then cmp_with = {q2:select{s.k}} if not my_equal(s.q, cmp_with) then return false end end elseif s.op == 'replace' then s2:replace{s.k, s.v} if ignore_unnecessary_conflict1 then q2:replace{s.k, s.v } end elseif s.op == 'upsert' then s2:upsert({s.k, s.v}, {{'+', 2, s.v}}) if ignore_unnecessary_conflict1 then q2:upsert({s.k, s.v}, {{'+', 2, s.v}}) end elseif s.op == 'delete' then s2:delete{s.k} end end end return true end; --- ... function check_rdonly_possibility() for i=1,num_tx do if txs[i].read_only and not txs[i].possible then if is_rdonly_tx_possible(i) then txs[i].possible = true end end end end; --- ... function check() local had_errors = (errors[1] ~= nil) for i=1,num_tx do if txs[i].read_only then if txs[i].conflicted then table.insert(errors, "read-only conflicted " .. i) end txs[i].possible = false end end check_rdonly_possibility() for _,t in ipairs(order_of_commit) do if not txs[t].read_only then if txs[t].conflicted then box.begin() if try_to_apply_tx(t) and not latest_broken then table.insert(errors, "could be serializable " .. t) end box.rollback() else if not try_to_apply_tx(t) then table.insert(errors, "not serializable " .. t) end if not my_equal(txs[t].select_all, s2:select{}) then table.insert(errors, "results are different " .. t) end check_rdonly_possibility() end end end for i=1,num_tx do if txs[i].read_only and not txs[i].possible then table.insert(errors, "not valid read view " .. i) end end if errors[1] and not had_errors then print("p(\"" .. errors[1] .. "\")") print(initial_repro) print("p(\"" .. initial_repro .. "\")") print('----------------------') for _,stmt in ipairs(stmts) do print(stmt.repro) end io.flush() end end; --- ... for i = 1, num_tests do prepare() while num_committed ~= num_tx do act() end check() end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... errors --- - [] ... s1:drop() --- ... s2:drop() --- ... if ignore_unnecessary_conflict1 then q1:drop() q2:drop() end --- ... tarantool_1.9.1.26.g63eb81e3c/test/long_run-py/0000775000000000000000000000000013306560010017430 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/long_run-py/finalizers.test.py0000664000000000000000000000061313306560010023126 0ustar rootrootimport os import sys import re import yaml from lib.tarantool_server import TarantoolServer server = TarantoolServer(server.ini) server.script = 'long_run-py/lua/finalizers.lua' server.vardir = os.path.join(server.vardir, 'finalizers') server.crash_expected = True try: server.deploy() except: print "Expected error:", sys.exc_info()[0] else: print "Error! exception did not occur" tarantool_1.9.1.26.g63eb81e3c/test/long_run-py/suite.ini0000664000000000000000000000032413306560010021261 0ustar rootroot[default] core = tarantool description = long running tests script = box.lua long_run = finalizers.test.py valgrind_disabled = release_disabled = lua_libs = suite.lua use_unix_sockets = True is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/long_run-py/suite.lua0000664000000000000000000000736413306560010021276 0ustar rootroot function string_function() local random_number local random_string random_string = "" for x = 1,20,1 do random_number = math.random(65, 90) random_string = random_string .. string.char(random_number) end return random_string end function delete_replace_update(engine_name) local string_value if (box.space._space.index.name:select{'tester'}[1] ~= nil) then box.space.tester:drop() end box.schema.space.create('tester', {engine=engine_name}) box.space.tester:create_index('primary',{type = 'tree', parts = {1, 'STR'}}) local random_number local string_value_2 local string_value_3 local counter = 1 while counter < 100000 do local string_value = string_function() local string_table = box.space.tester.index.primary:select({string_value}, {iterator = 'GE', limit = 1}) if string_table[1] == nil then box.space.tester:insert{string_value, counter} string_value_2 = string_value else string_value_2 = string_table[1][1] end if string_value_2 == nil then box.space.tester:insert{string_value, counter} string_value_2 = string_value end random_number = math.random(1,6) string_value_3 = string_function() -- print('<'..counter..'> [' .. random_number .. '] value_2: ' .. string_value_2 .. ' value_3: ' .. string_value_3) if random_number == 1 then box.space.tester:delete{string_value_2} end if random_number == 2 then box.space.tester:replace{string_value_2, counter, string_value_3} end if random_number == 3 then box.space.tester:delete{string_value_2} box.space.tester:insert{string_value_2, counter} end if random_number == 4 then if counter < 1000000 then box.space.tester:delete{string_value_3} box.space.tester:insert{string_value_3, counter, string_value_2} end end if random_number == 5 then box.space.tester:update({string_value_2}, {{'=', 2, string_value_3}}) end if random_number == 6 then box.space.tester:update({string_value_2}, {{'=', 2, string_value_3}}) end counter = counter + 1 end box.space.tester:drop() return {counter, random_number, string_value_2, string_value_3} end function delete_insert(engine_name) local string_value if (box.space._space.index.name:select{'tester'}[1] ~= nil) then box.space.tester:drop() end box.schema.space.create('tester', {engine=engine_name}) box.space.tester:create_index('primary',{type = 'tree', parts = {1, 'STR'}}) local string_value_2 local counter = 1 while counter < 100000 do local string_value = string_function() local string_table = box.space.tester.index.primary:select({string_value}, {iterator = 'GE', limit = 1}) if string_table[1] == nil then -- print (1, ' insert', counter, string_value) box.space.tester:insert{string_value, counter} string_value_2 = string_value else string_value_2 = string_table[1][1] end if string_value_2 == nil then -- print (2, ' insert', counter, string_value) box.space.tester:insert{string_value, counter} string_value_2 = string_value end -- print (3, ' delete', counter, string_value_2) box.space.tester:delete{string_value_2} -- print (4, ' insert', counter, string_value_2) box.space.tester:insert{string_value_2, counter} counter = counter + 1 end box.space.tester:drop() return {counter, string_value_2} end tarantool_1.9.1.26.g63eb81e3c/test/long_run-py/box.lua0000664000000000000000000000064413306560010020727 0ustar rootroot#!/usr/bin/env tarantool require('suite') os.execute("rm -rf vinyl_test") os.execute("mkdir -p vinyl_test") box.cfg { listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", rows_per_wal = 500000, vinyl_dir = "./vinyl_test", vinyl_read_threads = 3, vinyl_write_threads = 5, } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/long_run-py/finalizers.result0000664000000000000000000000010313306560010023030 0ustar rootrootExpected error: tarantool_1.9.1.26.g63eb81e3c/test/long_run-py/lua/0000775000000000000000000000000013306560010020211 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/long_run-py/lua/finalizers.lua0000664000000000000000000000046413306560010023066 0ustar rootroot#!/usr/bin/env tarantool function on_gc(t) end; function test_finalizers() local result = {} local i = 1 local ffi = require('ffi') while true do result[i] = ffi.gc(ffi.cast('void *', 0), on_gc) i = i + 1 end return "done" end; test_finalizers() test_finalizers() tarantool_1.9.1.26.g63eb81e3c/test/share/0000775000000000000000000000000013306560010016261 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/share/tarantool.sup0000664000000000000000000000776413306560010021033 0ustar rootroot{ Memcheck:Leak match-leak-kinds: reachable fun:malloc ... obj:/bin/dash ... } { Memcheck:Cond fun:str_fastcmp fun:lj_str_new ... } { Memcheck:Leak match-leak-kinds: reachable fun:*alloc fun:CRYPTO_*alloc ... fun:ERR_load_crypto_strings ... fun:main } { Memcheck:Leak match-leak-kinds: reachable fun:*alloc fun:CRYPTO_*alloc ... fun:OpenSSL_add_all_* ... fun:main } { Memcheck:Leak match-leak-kinds: reachable fun:*alloc fun:CRYPTO_*alloc ... fun:EVP_add_digest ... fun:main } { Memcheck:Leak match-leak-kinds: reachable fun:malloc fun:CRYPTO_malloc ... fun:OBJ_NAME_add ... fun:main } { Memcheck:Leak match-leak-kinds: reachable fun:malloc fun:CRYPTO_malloc fun:lh_insert obj:/lib/x86_64-linux-gnu/libcrypto.so.1.0.0 obj:/lib/x86_64-linux-gnu/libcrypto.so.1.0.0 ... } { Memcheck:Leak match-leak-kinds: reachable fun:calloc fun:mh_i32ptr_new fun:cord_create fun:fiber_init fun:main } { Memcheck:Leak match-leak-kinds: reachable fun:malloc ... fun:title_init fun:main } { Memcheck:Leak match-leak-kinds: reachable fun:malloc fun:strdup fun:say_init fun:main } { Memcheck:Leak match-leak-kinds: reachable fun:*alloc ... fun:rl_initialize ... } { Memcheck:Leak match-leak-kinds: reachable fun:*alloc ... fun:rl_redisplay ... } { Memcheck:Leak match-leak-kinds: reachable ... fun:*alloc ... fun:array_realloc ... fun:ev_* ... } { Memcheck:Leak match-leak-kinds: reachable ... fun:*alloc ... fun:ev_default_loop ... } { Memcheck:Leak match-leak-kinds: reachable fun:*alloc ... fun:box_load_cfg fun:load_cfg ... } { Memcheck:Leak match-leak-kinds: definite fun:malloc fun:xmalloc fun:readline_internal_teardown fun:rl_callback_read_char fun:lbox_console_readline fun:lj_BC_FUNCC fun:lua_pcall fun:lbox_call fun:run_script_f fun:_ZL16fiber_cxx_invokePFiP13__va_list_tagES0_ fun:fiber_loop fun:coro_init } { Memcheck:Leak match-leak-kinds: reachable fun:realloc fun:xrealloc fun:add_history fun:read_history_range fun:main } { Memcheck:Leak match-leak-kinds: reachable fun:*alloc ... fun:cord_create fun:cord_thread_func fun:start_thread fun:clone } { Memcheck:Leak match-leak-kinds: reachable ... fun:_Z9space_newP9space_defP5rlist ... fun:trigger_run ... } { Memcheck:Leak match-leak-kinds: reachable ... fun:_Z12sc_space_newP9space_defP7key_defP7trigger ... } { Memcheck:Leak match-leak-kinds: reachable fun:malloc fun:rmean_new fun:_ZL10net_cord_fP13__va_list_tag fun:_ZL16fiber_cxx_invokePFiP13__va_list_tagES0_ fun:fiber_loop fun:coro_init } { Memcheck:Leak match-leak-kinds: reachable fun:malloc fun:strbuf_init fun:json_create_tokens fun:luaopen_json fun:tarantool_lua_init fun:main } { Memcheck:Leak match-leak-kinds: reachable ... fun:mh_i32ptr_put fun:register_fid ... } { Memcheck:Leak match-leak-kinds: reachable fun:calloc fun:_dlerror_run fun:dlsym fun:clib_getsym fun:lj_clib_index fun:ffi_clib_index fun:lj_cf_ffi_clib___index fun:lj_BC_FUNCC fun:lua_call fun:tarantool_lua_init fun:main } tarantool_1.9.1.26.g63eb81e3c/test/engine_long/0000775000000000000000000000000013306560010017443 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/engine_long/delete_replace_update.test.lua0000664000000000000000000000057113306560010025426 0ustar rootrootengine_name = 'memtx' iterations = 100000 math.randomseed(1) delete_replace_update(engine_name, iterations) math.randomseed(2) delete_replace_update(engine_name, iterations) math.randomseed(3) delete_replace_update(engine_name, iterations) math.randomseed(4) delete_replace_update(engine_name, iterations) math.randomseed(5) delete_replace_update(engine_name, iterations) tarantool_1.9.1.26.g63eb81e3c/test/engine_long/delete_insert.test.lua0000664000000000000000000000025013306560010023747 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') iterations = 100000 math.randomseed(1) delete_insert(engine, iterations) tarantool_1.9.1.26.g63eb81e3c/test/engine_long/delete_replace_update.result0000664000000000000000000000144313306560010025204 0ustar rootrootengine_name = 'memtx' --- ... iterations = 100000 --- ... math.randomseed(1) --- ... delete_replace_update(engine_name, iterations) --- - - 100000 - 3 - EISMEJTLAMKKUKAGXHLK - BUPYVKWEHVEFKSZFAQIK ... math.randomseed(2) --- ... delete_replace_update(engine_name, iterations) --- - - 100000 - 3 - DTUNRKMNKTQDTBLYQVUQ - UHMBETMEPMQWOIHLAOSR ... math.randomseed(3) --- ... delete_replace_update(engine_name, iterations) --- - - 100000 - 1 - KORQQVUDKFSDPZVQOEZE - RSMFJYXLNSOILKDEFJZW ... math.randomseed(4) --- ... delete_replace_update(engine_name, iterations) --- - - 100000 - 3 - OKKQFSHPFMOLMOFOXEAN - RXXYWNPDXVVGONOGTEZK ... math.randomseed(5) --- ... delete_replace_update(engine_name, iterations) --- - - 100000 - 3 - LCUFGMXHLNHJBAQJDHGH - HKBELZCRRWNZHFIRAHJD ... tarantool_1.9.1.26.g63eb81e3c/test/engine_long/suite.ini0000664000000000000000000000035713306560010021302 0ustar rootroot[default] core = tarantool description = tarantool engine stress tests script = box.lua long_run = delete_replace_update.test.lua delete_insert.test.lua lua_libs = suite.lua use_unix_sockets = True config = engine.cfg is_parallel = False tarantool_1.9.1.26.g63eb81e3c/test/engine_long/suite.lua0000664000000000000000000000730013306560010021277 0ustar rootroot function string_function() local random_number local random_string random_string = "" for x = 1,20,1 do random_number = math.random(65, 90) random_string = random_string .. string.char(random_number) end return random_string end function delete_replace_update(engine_name, iterations) local string_value if (box.space._space.index.name:select{'tester'}[1] ~= nil) then box.space.tester:drop() end box.schema.space.create('tester', {engine=engine_name}) box.space.tester:create_index('primary',{type = 'tree', parts = {1, 'STR'}}) local random_number local string_value_2 local string_value_3 local counter = 1 while counter < iterations do local string_value = string_function() local string_table = box.space.tester.index.primary:select({string_value}, {iterator = 'EQ'}) if string_table[1] == nil then box.space.tester:insert{string_value, counter} string_value_2 = string_value else string_value_2 = string_table[1][1] end if string_value_2 == nil then box.space.tester:insert{string_value, counter} string_value_2 = string_value end random_number = math.random(1,6) string_value_3 = string_function() -- print('<'..counter..'> [' .. random_number .. '] value_2: ' .. string_value_2 .. ' value_3: ' .. string_value_3) if random_number == 1 then box.space.tester:delete{string_value_2} end if random_number == 2 then box.space.tester:replace{string_value_2, counter, string_value_3} end if random_number == 3 then box.space.tester:delete{string_value_2} box.space.tester:insert{string_value_2, counter} end if random_number == 4 then box.space.tester:delete{string_value_3} box.space.tester:insert{string_value_3, counter, string_value_2} end if random_number == 5 then box.space.tester:update({string_value_2}, {{'=', 2, string_value_3}}) end if random_number == 6 then box.space.tester:update({string_value_2}, {{'=', 2, string_value_3}}) end counter = counter + 1 end box.space.tester:drop() return {counter, random_number, string_value_2, string_value_3} end function delete_insert(engine_name, iterations) local string_value if (box.space._space.index.name:select{'tester'}[1] ~= nil) then box.space.tester:drop() end box.schema.space.create('tester', {engine=engine_name}) box.space.tester:create_index('primary',{type = 'tree', parts = {1, 'STR'}}) local string_value_2 local counter = 1 while counter < iterations do local string_value = string_function() local string_table = box.space.tester.index.primary:select({string_value}, {iterator = 'EQ'}) if string_table[1] == nil then -- print (1, ' insert', counter, string_value) box.space.tester:insert{string_value, counter} string_value_2 = string_value else string_value_2 = string_table[1][1] end if string_value_2 == nil then -- print (2, ' insert', counter, string_value) box.space.tester:insert{string_value, counter} string_value_2 = string_value end -- print (3, ' delete', counter, string_value_2) box.space.tester:delete{string_value_2} -- print (4, ' insert', counter, string_value_2) box.space.tester:insert{string_value_2, counter} counter = counter + 1 end box.space.tester:drop() return {counter, string_value_2} end tarantool_1.9.1.26.g63eb81e3c/test/engine_long/box.lua0000664000000000000000000000101613306560010020734 0ustar rootroot#!/usr/bin/env tarantool require('suite') os.execute("rm -rf vinyl_test") os.execute("mkdir -p vinyl_test") box.cfg { listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", rows_per_wal = 500000, vinyl_dir = "./vinyl_test", vinyl_memory = 107374182, vinyl_read_threads = 3, vinyl_write_threads = 5, vinyl_range_size = 1024 * 1024, vinyl_page_size = 4 * 1024, } require('console').listen(os.getenv('ADMIN')) tarantool_1.9.1.26.g63eb81e3c/test/engine_long/delete_insert.result0000664000000000000000000000037413306560010023535 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... iterations = 100000 --- ... math.randomseed(1) --- ... delete_insert(engine, iterations) --- - - 100000 - IAKGPQANAOSLARIFIBKB ... tarantool_1.9.1.26.g63eb81e3c/test/engine_long/engine.cfg0000664000000000000000000000014113306560010021365 0ustar rootroot{ "*": { "memtx": {"engine": "memtx"}, "vinyl": {"engine": "vinyl"} } } tarantool_1.9.1.26.g63eb81e3c/test/engine/0000775000000000000000000000000013306565107016440 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test/engine/snapshot.test.lua0000664000000000000000000000227413306560010021751 0ustar rootroot -- write data recover from latest snapshot env = require('test_run') test_run = env.new() test_run:cmd('restart server default') engine = test_run:get_cfg('engine') space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') for key = 1, 51 do space:insert({key}) end box.snapshot() test_run:cmd('restart server default') space = box.space['test'] index = space.index['primary'] index:select({}, {iterator = box.index.ALL}) for key = 52, 91 do space:insert({key}) end box.snapshot() test_run:cmd('restart server default') space = box.space['test'] index = space.index['primary'] index:select({}, {iterator = box.index.ALL}) box.space.test:drop() -- https://github.com/tarantool/tarantool/issues/1899 engine = test_run:get_cfg('engine') space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { parts = {1, 'unsigned'} } ) index2 = space:create_index('secondary', { parts = {2, 'unsigned'} } ) space:insert{1, 11, 21} space:insert{20, 10, 0} box.snapshot() test_run:cmd('restart server default') box.space.test:select{} box.space.test.index.primary:select{} box.space.test.index.secondary:select{} box.space.test:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/params.test.lua0000664000000000000000000000070113306560010021366 0ustar rootroot--init test_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') box.schema.user.grant('guest', 'read,write,execute', 'universe') s = box.schema.create_space('engine', {engine=engine}) i = s:create_index('primary') --test example for memtx and vinyl box.space.engine:insert{1,2,3} box.space.engine:select{} -- cleanup box.space.engine:drop() box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/engine/crossjoin.result0000664000000000000000000000353213306560010021700 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... inspector = env.new() --- ... engine = inspector:get_cfg('engine') --- ... space = box.schema.space.create('tweedledum', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree' }) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function crossjoin(space0, space1, limit) local result = {} for _,v0 in space0:pairs() do for _,v1 in space1:pairs() do if limit <= 0 then return result end local newtuple = v0:totable() for _, v in v1:pairs() do table.insert(newtuple, v) end table.insert(result, newtuple) limit = limit - 1 end end return result end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... crossjoin(space, space, 0) --- - [] ... crossjoin(space, space, 10000) --- - [] ... space:insert{1} --- - [1] ... crossjoin(space, space, 10000) --- - - [1, 1] ... space:insert{2} --- - [2] ... crossjoin(space, space, 10000) --- - - [1, 1] - [1, 2] - [2, 1] - [2, 2] ... space:insert{3, 'hello'} --- - [3, 'hello'] ... crossjoin(space, space, 10000) --- - - [1, 1] - [1, 2] - [1, 3, 'hello'] - [2, 1] - [2, 2] - [2, 3, 'hello'] - [3, 'hello', 1] - [3, 'hello', 2] - [3, 'hello', 3, 'hello'] ... space:insert{4, 'world'} --- - [4, 'world'] ... space[0]:insert{5, 'hello world'} --- - error: '[string "return space[0]:insert{5, ''hello world''} "]:1: attempt to index a nil value' ... crossjoin(space, space, 10000) --- - - [1, 1] - [1, 2] - [1, 3, 'hello'] - [1, 4, 'world'] - [2, 1] - [2, 2] - [2, 3, 'hello'] - [2, 4, 'world'] - [3, 'hello', 1] - [3, 'hello', 2] - [3, 'hello', 3, 'hello'] - [3, 'hello', 4, 'world'] - [4, 'world', 1] - [4, 'world', 2] - [4, 'world', 3, 'hello'] - [4, 'world', 4, 'world'] ... space:drop() --- ... crossjoin = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/iterator.test.lua0000664000000000000000000005034013306560010021740 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') inspector:cmd("push filter '"..engine.."' to 'engine'") -- iterator (str) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:replace({tostring(key)}) end t = {} for state, v in index:pairs({}, {iterator = 'ALL'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({}, {iterator = 'GE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(tostring(44), {iterator = 'GE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(tostring(44), {iterator = 'GT'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({}, {iterator = 'LE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(tostring(77), {iterator = 'LE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({}, {iterator = 'LT'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(tostring(77), {iterator = 'LT'}) do table.insert(t, v) end t space:drop() -- iterator (num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) for key = 1, 100 do space:replace({key}) end t = {} for state, v in index:pairs({}, {iterator = 'ALL'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({}, {iterator = 'GE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(44, {iterator = 'GE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(44, {iterator = 'GT'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({}, {iterator = 'LE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(77, {iterator = 'LE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({}, {iterator = 'LT'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(77, {iterator = 'LT'}) do table.insert(t, v) end t space:drop() -- iterator multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for key = 1, 100 do space:replace({key, key}) end t = {} for state, v in index:pairs({}, {iterator = 'ALL'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({}, {iterator = 'GE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({44, 44}, {iterator = 'GE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({44, 44}, {iterator = 'GT'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({}, {iterator = 'LE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({77, 77}, {iterator = 'LE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({}, {iterator = 'LT'}) do table.insert(t, v) end t t = {} for state, v in index:pairs({77, 77}, {iterator = 'LT'}) do table.insert(t, v) end t space:drop() -- iterator with tuple.new space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:replace({tostring(key)}) end t = {} for state, v in index:pairs(box.tuple.new{}, {iterator = 'ALL'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(box.tuple.new{}, {iterator = 'GE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(box.tuple.new(tostring(44)), {iterator = 'GE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(box.tuple.new(tostring(44)), {iterator = 'GT'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(box.tuple.new{}, {iterator = 'LE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(box.tuple.new(tostring(77)), {iterator = 'LE'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(box.tuple.new{}, {iterator = 'LT'}) do table.insert(t, v) end t t = {} for state, v in index:pairs(box.tuple.new(tostring(77)), {iterator = 'LT'}) do table.insert(t, v) end t space:drop() iterate = dofile('utils.lua').iterate inspector:cmd("push filter '(error: .builtin/.*[.]lua):[0-9]+' to '\\1'") # Tree single-part unique space = box.schema.space.create('tweedledum', { engine = engine }) idx1 = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true}) -- Tree single-part non-unique idx2 = space:create_index('i1', { type = 'tree', parts = {2, 'string'}, unique = false}) -- Tree multi-part unique idx3 = space:create_index('i2', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true}) -- Tree multi-part non-unique idx4 = space:create_index('i3', { type = 'tree', parts = {3, 'string', 4, 'string'}, unique = false }) space:insert{'pid_001', 'sid_001', 'tid_998', 'a'} space:insert{'pid_002', 'sid_001', 'tid_997', 'a'} space:insert{'pid_003', 'sid_002', 'tid_997', 'b'} space:insert{'pid_005', 'sid_002', 'tid_996', 'b'} space:insert{'pid_007', 'sid_003', 'tid_996', 'a'} space:insert{'pid_011', 'sid_004', 'tid_996', 'c'} space:insert{'pid_013', 'sid_005', 'tid_996', 'b'} space:insert{'pid_017', 'sid_006', 'tid_996', 'a'} space:insert{'pid_019', 'sid_005', 'tid_995', 'a'} space:insert{'pid_023', 'sid_005', 'tid_994', 'a'} ------------------------------------------------------------------------------- -- Iterator: tree single-part unique ------------------------------------------------------------------------------- iterate('tweedledum', 'primary', 0, 1) iterate('tweedledum', 'primary', 0, 1, box.index.ALL) iterate('tweedledum', 'primary', 0, 1, box.index.EQ) iterate('tweedledum', 'primary', 0, 1, box.index.REQ) iterate('tweedledum', 'primary', 0, 1, box.index.GE) iterate('tweedledum', 'primary', 0, 1, box.index.GT) iterate('tweedledum', 'primary', 0, 1, box.index.LE) iterate('tweedledum', 'primary', 0, 1, box.index.LT) iterate('tweedledum', 'primary', 0, 1, box.index.EQ, 'pid_003') iterate('tweedledum', 'primary', 0, 1, box.index.REQ, 'pid_003') iterate('tweedledum', 'primary', 0, 1, box.index.EQ, 'pid_666') iterate('tweedledum', 'primary', 0, 1, box.index.REQ, 'pid_666') iterate('tweedledum', 'primary', 0, 1, box.index.GE, 'pid_001') iterate('tweedledum', 'primary', 0, 1, box.index.GT, 'pid_001') iterate('tweedledum', 'primary', 0, 1, box.index.GE, 'pid_999') iterate('tweedledum', 'primary', 0, 1, box.index.GT, 'pid_999') iterate('tweedledum', 'primary', 0, 1, box.index.LE, 'pid_002') iterate('tweedledum', 'primary', 0, 1, box.index.LT, 'pid_002') iterate('tweedledum', 'primary', 0, 1, box.index.LE, 'pid_000') iterate('tweedledum', 'primary', 0, 1, box.index.LT, 'pid_000') ------------------------------------------------------------------------------- -- Iterator: tree single-part non-unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i1', 1, 2, box.index.ALL) iterate('tweedledum', 'i1', 1, 2, box.index.EQ) iterate('tweedledum', 'i1', 1, 2, box.index.REQ) iterate('tweedledum', 'i1', 1, 2, box.index.GE) iterate('tweedledum', 'i1', 1, 2, box.index.GT) iterate('tweedledum', 'i1', 1, 2, box.index.LE) iterate('tweedledum', 'i1', 1, 2, box.index.LT) iterate('tweedledum', 'i1', 1, 2, box.index.EQ, 'sid_005') iterate('tweedledum', 'i1', 1, 2, box.index.REQ, 'sid_005') iterate('tweedledum', 'i1', 1, 2, box.index.GE, 'sid_005') iterate('tweedledum', 'i1', 1, 2, box.index.GT, 'sid_005') iterate('tweedledum', 'i1', 1, 2, box.index.GE, 'sid_999') iterate('tweedledum', 'i1', 1, 2, box.index.GT, 'sid_999') iterate('tweedledum', 'i1', 1, 2, box.index.LE, 'sid_005') iterate('tweedledum', 'i1', 1, 2, box.index.LT, 'sid_005') iterate('tweedledum', 'i1', 1, 2, box.index.LE, 'sid_000') iterate('tweedledum', 'i1', 1, 2, box.index.LT, 'sid_000') ------------------------------------------------------------------------------- -- Iterator: tree multi-part unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i2', 1, 3, box.index.ALL) iterate('tweedledum', 'i2', 1, 3, box.index.EQ) iterate('tweedledum', 'i2', 1, 3, box.index.REQ) iterate('tweedledum', 'i2', 1, 3, box.index.GE) iterate('tweedledum', 'i2', 1, 3, box.index.GT) iterate('tweedledum', 'i2', 1, 3, box.index.LE) iterate('tweedledum', 'i2', 1, 3, box.index.LT) iterate('tweedledum', 'i2', 1, 3, box.index.EQ, 'sid_005') iterate('tweedledum', 'i2', 1, 3, box.index.EQ, 'sid_005', 'tid_995') iterate('tweedledum', 'i2', 1, 3, box.index.EQ, 'sid_005', 'tid_999') iterate('tweedledum', 'i2', 1, 3, box.index.REQ, 'sid_005') iterate('tweedledum', 'i2', 1, 3, box.index.REQ, 'sid_005', 'tid_995') iterate('tweedledum', 'i2', 1, 3, box.index.REQ, 'sid_005', 'tid_999') iterate('tweedledum', 'i2', 1, 3, box.index.GE, 'sid_005') iterate('tweedledum', 'i2', 1, 3, box.index.GT, 'sid_005') iterate('tweedledum', 'i2', 1, 3, box.index.GE, 'sid_005', 'tid_995') iterate('tweedledum', 'i2', 1, 3, box.index.GT, 'sid_005', 'tid_995') iterate('tweedledum', 'i2', 1, 3, box.index.GE, 'sid_005', 'tid_999') iterate('tweedledum', 'i2', 1, 3, box.index.GT, 'sid_005', 'tid_999') iterate('tweedledum', 'i2', 1, 3, box.index.GE, 'sid_999') iterate('tweedledum', 'i2', 1, 3, box.index.GT, 'sid_999') iterate('tweedledum', 'i2', 1, 3, box.index.LE, 'sid_005') iterate('tweedledum', 'i2', 1, 3, box.index.LT, 'sid_005') iterate('tweedledum', 'i2', 1, 3, box.index.LE, 'sid_005', 'tid_997') iterate('tweedledum', 'i2', 1, 3, box.index.LT, 'sid_005', 'tid_997') iterate('tweedledum', 'i2', 1, 3, box.index.LE, 'sid_005', 'tid_000') iterate('tweedledum', 'i2', 1, 3, box.index.LT, 'sid_005', 'tid_000') iterate('tweedledum', 'i2', 1, 3, box.index.LE, 'sid_000') iterate('tweedledum', 'i2', 1, 3, box.index.LT, 'sid_000') ------------------------------------------------------------------------------- -- Iterator: tree multi-part non-unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i3', 2, 4, box.index.ALL) iterate('tweedledum', 'i3', 2, 4, box.index.EQ) iterate('tweedledum', 'i3', 2, 4, box.index.REQ) iterate('tweedledum', 'i3', 2, 4, box.index.GE) iterate('tweedledum', 'i3', 2, 4, box.index.GT) iterate('tweedledum', 'i3', 2, 4, box.index.LE) iterate('tweedledum', 'i3', 2, 4, box.index.LT) iterate('tweedledum', 'i3', 2, 4, box.index.EQ, 'tid_996') iterate('tweedledum', 'i3', 2, 4, box.index.EQ, 'tid_996', 'a') iterate('tweedledum', 'i3', 2, 4, box.index.EQ, 'tid_996', 'z') iterate('tweedledum', 'i3', 2, 4, box.index.REQ, 'tid_996') iterate('tweedledum', 'i3', 2, 4, box.index.REQ, 'tid_996', 'a') iterate('tweedledum', 'i3', 2, 4, box.index.REQ, 'tid_996', '0') iterate('tweedledum', 'i3', 2, 4, box.index.GE, 'tid_997') iterate('tweedledum', 'i3', 2, 4, box.index.GT, 'tid_997') iterate('tweedledum', 'i3', 2, 4, box.index.GE, 'tid_998') iterate('tweedledum', 'i3', 2, 4, box.index.GT, 'tid_998') iterate('tweedledum', 'i3', 2, 4, box.index.LE, 'tid_997') iterate('tweedledum', 'i3', 2, 4, box.index.LT, 'tid_997') iterate('tweedledum', 'i3', 2, 4, box.index.LE, 'tid_000') iterate('tweedledum', 'i3', 2, 4, box.index.LT, 'tid_000') iterate('tweedledum', 'i3', 2, 4, box.index.LT, 'tid_996', 'to', 'many', 'keys') ------------------------------------------------------------------------------- -- Iterator: various ------------------------------------------------------------------------------- space.index['primary']:pairs({}, {iterator = 666 }) -- Test cases for #123: box.index.count does not check arguments properly status, msg = pcall(function() space.index['primary']:pairs(function() end, { iterator = box.index.EQ }) end) msg:match('function') -- Check that iterators successfully invalidated when index deleted gen, param, state = space.index['i1']:pairs(nil, { iterator = box.index.GE }) index_space = box.space[box.schema.INDEX_ID] _ = index_space:delete{space.id, space.index['i1'].id} type(_) _, value = gen(param, state) value space:drop() -- gh-1801 space:pairs() don't pass arguments to index:pairs() space = box.schema.space.create('test') pk = space:create_index('primary') space:replace({1}) space:replace({2}) space:replace({3}) space:replace({4}) space:pairs(2, { iterator = 'GE' }):totable() space:drop() inspector:cmd("clear filter") -- -- gh-1875 Add support for index:pairs(key, iterator-type) syntax -- space = box.schema.space.create('test', {engine=engine}) pk = space:create_index('pk') space:auto_increment{1} space:auto_increment{2} space:auto_increment{3} space:auto_increment{4} space:auto_increment{5} -- -- test pairs() -- space:pairs(3, 'GE'):totable() pk:pairs(3, 'GE'):totable() space:pairs(3, {iterator = 'GE' }):totable() pk:pairs(3, {iterator = 'GE' }):totable() space:pairs(3, 'EQ'):totable() pk:pairs(3, 'EQ'):totable() space:pairs(3, {iterator = 'EQ' }):totable() pk:pairs(3, {iterator = 'EQ' }):totable() space:pairs(3, 'GT'):totable() pk:pairs(3, 'GT'):totable() space:pairs(3, {iterator = 'GT' }):totable() pk:pairs(3, {iterator = 'GT' }):totable() -- -- test select() -- pk:select({3}, 'LE') space:select({3}, 'LE') -- -- test count() -- pk:count({3}, 'GT') space:count({3}, 'GT') space:drop() -- vinyl: broken rollback to savepoint -- https://github.com/tarantool/tarantool/issues/2589 s = box.schema.create_space('s', { engine = engine}) i1 = s:create_index('i1', { type = 'tree', parts = {1,'unsigned'}, unique = true }) i2 = s:create_index('i2', { type = 'tree', parts = {2,'unsigned'}, unique = true }) _ = s:replace{2, 2} box.begin() _ = s:replace{1, 1} _ = pcall(s.upsert, s, {1, 1}, {{"+", 2, 1}}) -- failed in unique secondary box.commit() s:select{} s:drop{} -- implement lazy iterator positioning s = box.schema.space.create('test' ,{engine=engine}) i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for i = 1,3 do for j = 1,3 do s:replace{i, j} end end itr1,itr2,itr3 = s:pairs{2} _ = s:replace{1, 4} r = {} for k,v in itr1,itr2,itr3 do table.insert(r, v) end r itr1,itr2,itr3 = s:pairs({2}, {iterator = 'GE'}) _ = s:replace{1, 5} r = {} for k,v in itr1,itr2,itr3 do table.insert(r, v) end r itr1,itr2,itr3 = s:pairs({2}, {iterator = 'REQ'}) s:replace{2, 4} r = {} for k,v in itr1,itr2,itr3 do table.insert(r, v) end r r = nil s:drop() -- make tree iterators stable -- https://github.com/tarantool/tarantool/issues/1796 s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) for i = 1,10 do s:replace{i} end r = {} for k,v in s:pairs{} do table.insert(r, v[1]) s:delete(v[1]) end r s:select{} for i = 1,10 do s:replace{i} end r = {} for k,v in s:pairs({}, {iterator = 'REQ'}) do table.insert(r, v[1]) s:delete(v[1]) end r s:select{} s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for i = 1,3 do for j = 1,3 do s:replace{i, j} end end r = {} for k,v in s:pairs{2} do table.insert(r, v) s:delete{v[1], v[2]} end r s:select{} for i = 1,3 do for j = 1,3 do s:replace{i, j} end end r = {} for k,v in s:pairs({3}, {iterator = 'REQ'}) do table.insert(r, v) s:delete{v[1], v[2]} end r s:select{} r = nil s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} gen,param,state = i:pairs({25}) s:replace{25} state, value = gen(param,state) value state, value = gen(param,state) value gen,param,state = i:pairs({35}) state, value = gen(param,state) value s:replace{35} state, value = gen(param,state) value s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} gen,param,state = i:pairs({30}, {iterator = 'GE'}) state, value = gen(param, state) value s:replace{0} state, value = gen(param, state) value s:replace{42} state, value = gen(param, state) value s:replace{80} state, value = gen(param, state) value s:replace{15} state, value = gen(param, state) value state, value = gen(param, state) value state, value = gen(param, state) state value s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} gen,param,state = i:pairs({40}, {iterator = 'LE'}) state, value = gen(param, state) value s:replace{0} state, value = gen(param, state) value s:replace{15} state, value = gen(param, state) value s:replace{42} state, value = gen(param, state) value s:replace{32} state, value = gen(param, state) value s:replace{80} state, value = gen(param, state) value state, value = gen(param, state) state value s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} gen,param,state = i:pairs({28}, {iterator = 'GE'}) s:replace{0} state, value = gen(param, state) value s:replace{15} state, value = gen(param, state) value s:replace{42} state, value = gen(param, state) value s:replace{32} state, value = gen(param, state) value s:replace{80} state, value = gen(param, state) value state, value = gen(param, state) value gen(param, state) -- test iterator dummy function, invoked when it's out of bounds gen(param, state) s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} gen,param,state = i:pairs({42}, {iterator = 'LE'}) s:replace{0} state, value = gen(param, state) value s:replace{42} state, value = gen(param, state) value s:replace{15} state, value = gen(param, state) value s:replace{32} state, value = gen(param, state) value s:replace{80} state, value = gen(param, state) value state, value = gen(param, state) value gen(param, state) -- test iterator dummy function, invoked when it's out of bounds gen(param, state) s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} gen,param,state = i:pairs({20}, {iterator = 'GT'}) state, value = gen(param, state) value s:replace{0} state, value = gen(param, state) value s:replace{42} state, value = gen(param, state) value s:replace{80} state, value = gen(param, state) value s:replace{15} state, value = gen(param, state) value state, value = gen(param, state) value gen(param, state) -- test iterator dummy function, invoked when it's out of bounds gen(param, state) s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} gen,param,state = i:pairs({50}, {iterator = 'LT'}) state, value = gen(param, state) value s:replace{0} state, value = gen(param, state) value s:replace{15} state, value = gen(param, state) value s:replace{42} state, value = gen(param, state) value s:replace{32} state, value = gen(param, state) value s:replace{80} state, value = gen(param, state) value gen(param, state) -- test iterator dummy function, invoked when it's out of bounds gen(param, state) s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} gen,param,state = i:pairs({28}, {iterator = 'GT'}) s:replace{0} state, value = gen(param, state) value s:replace{15} state, value = gen(param, state) value s:replace{42} state, value = gen(param, state) value s:replace{32} state, value = gen(param, state) value s:replace{80} state, value = gen(param, state) value state, value = gen(param, state) value gen(param, state) -- test iterator dummy function, invoked when it's out of bounds gen(param, state) s:drop() s = box.schema.space.create('test') i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} gen,param,state = i:pairs({42}, {iterator = 'LT'}) s:replace{0} state, value = gen(param, state) value s:replace{42} state, value = gen(param, state) value s:replace{15} state, value = gen(param, state) value s:replace{32} state, value = gen(param, state) value s:replace{80} state, value = gen(param, state) value state, value = gen(param, state) value gen(param, state) -- test iterator dummy function, invoked when it's out of bounds gen(param, state) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/suite.ini0000664000000000000000000000037013306560010020256 0ustar rootroot[default] core = tarantool description = tarantool multiengine tests script = box.lua use_unix_sockets = True config = engine.cfg #disabled = replica_join.test.lua lua_libs = conflict.lua ../box/lua/utils.lua ../box/lua/push.lua is_parallel = True tarantool_1.9.1.26.g63eb81e3c/test/engine/transaction.result0000664000000000000000000000661413306560010022220 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... -- basic transaction tests space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... -- begin/rollback inspector:cmd("setopt delimiter ';'") --- - true ... box.begin() for key = 1, 10 do space:insert({key}) end box.rollback(); --- ... inspector:cmd("setopt delimiter ''"); --- - true ... t = {} --- ... for key = 1, 10 do assert(#space:select({key}) == 0) end --- ... t --- - [] ... -- begin/commit insert inspector:cmd("setopt delimiter ';'") --- - true ... box.begin() for key = 1, 10 do space:insert({key}) end box.commit(); --- ... inspector:cmd("setopt delimiter ''"); --- - true ... t = {} --- ... for key = 1, 10 do table.insert(t, space:select({key})[1]) end --- ... t --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] ... -- begin/commit delete inspector:cmd("setopt delimiter ';'") --- - true ... box.begin() for key = 1, 10 do space:delete({key}) end box.commit(); --- ... inspector:cmd("setopt delimiter ''"); --- - true ... t = {} --- ... for key = 1, 10 do assert(#space:select({key}) == 0) end --- ... t --- - [] ... space:drop() --- ... -- multi-space transactions a = box.schema.space.create('test', { engine = engine }) --- ... index = a:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... b = box.schema.space.create('test_tmp', { engine = engine }) --- ... index = b:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... -- begin/rollback inspector:cmd("setopt delimiter ';'") --- - true ... box.begin() for key = 1, 10 do a:insert({key}) end for key = 1, 10 do b:insert({key}) end box.rollback(); --- ... inspector:cmd("setopt delimiter ''"); --- - true ... t = {} --- ... for key = 1, 10 do assert(#a:select({key}) == 0) end --- ... t --- - [] ... for key = 1, 10 do assert(#b:select({key}) == 0) end --- ... t --- - [] ... -- begin/commit insert inspector:cmd("setopt delimiter ';'") --- - true ... box.begin() for key = 1, 10 do a:insert({key}) end for key = 1, 10 do b:insert({key}) end box.commit(); --- ... inspector:cmd("setopt delimiter ''"); --- - true ... t = {} --- ... for key = 1, 10 do table.insert(t, a:select({key})[1]) end --- ... t --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] ... t = {} --- ... for key = 1, 10 do table.insert(t, b:select({key})[1]) end --- ... t --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] ... -- begin/commit delete inspector:cmd("setopt delimiter ';'") --- - true ... box.begin() for key = 1, 10 do a:delete({key}) end for key = 1, 10 do b:delete({key}) end box.commit(); --- ... inspector:cmd("setopt delimiter ''"); --- - true ... t = {} --- ... for key = 1, 10 do assert(#a:select({key}) == 0) end --- ... t --- - [] ... for key = 1, 10 do assert(#b:select({key}) == 0) end --- ... t --- - [] ... a:drop() --- ... b:drop() --- ... -- ensure findByKey works in empty transaction context space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... inspector:cmd("setopt delimiter ';'") --- - true ... box.begin() space:get({0}) box.rollback(); --- ... inspector:cmd("setopt delimiter ''"); --- - true ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/delete.test.lua0000664000000000000000000000521113306560010021346 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') -- delete (str) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:replace({tostring(key)}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t for key = 1, 100 do space:delete({tostring(key)}) end for key = 1, 100 do assert(space:get({tostring(key)}) == nil) end space:delete({tostring(7)}) space:drop() -- delete (num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) for key = 1, 100 do space:replace({key}) end t = {} for key = 1, 100 do table.insert(t, space:get({key})) end t for key = 1, 100 do space:delete({key}) end for key = 1, 100 do assert(space:get({key}) == nil) end space:delete({7}) space:drop() -- delete multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for key = 1, 100 do space:replace({key, key}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t for key = 1, 100 do space:delete({key, key}) end for key = 1, 100 do assert(space:get({key, key}) == nil) end space:delete({7, 7}) space:drop() -- delete (str) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:replace({tostring(key)}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t for key = 1, 100 do space:delete(box.tuple.new{tostring(key)}) end for key = 1, 100 do assert(space:get({tostring(key)}) == nil) end space:delete(box.tuple.new{tostring(7)}) space:drop() -- delete with multiple indices space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'string', 3, 'scalar'}}) index3 = space:create_index('third', { type = 'tree', parts = {1, 'unsigned', 3, 'scalar'}}) space:insert({1, 'abc', 100}) space:insert({3, 'weif', 345}) space:insert({2, 'gbot', '023'}) space:insert({10, 'dflgner', 532.123}) space:insert({0, 'igkkm', 4902}) index1:select{} index2:select{} index3:select{} tmp = index1:delete({1}) tmp = index2:delete({'weif'}) -- must fail tmp = index2:delete({'weif', 345}) tmp = index2:delete({'weif', 345}) tmp = index3:delete({2, '023'}) index1:select{} index2:select{} index3:select{} space:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/upsert.result0000664000000000000000000012265313306560010021217 0ustar rootroottest_run = require('test_run').new() --- ... engine = test_run:get_cfg('engine') --- ... -- upsert (str) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:upsert({tostring(key), 0}, {{'+', 2, 1}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1', 0] - ['2', 0] - ['3', 0] - ['4', 0] - ['5', 0] - ['6', 0] - ['7', 0] - ['8', 0] - ['9', 0] - ['10', 0] - ['11', 0] - ['12', 0] - ['13', 0] - ['14', 0] - ['15', 0] - ['16', 0] - ['17', 0] - ['18', 0] - ['19', 0] - ['20', 0] - ['21', 0] - ['22', 0] - ['23', 0] - ['24', 0] - ['25', 0] - ['26', 0] - ['27', 0] - ['28', 0] - ['29', 0] - ['30', 0] - ['31', 0] - ['32', 0] - ['33', 0] - ['34', 0] - ['35', 0] - ['36', 0] - ['37', 0] - ['38', 0] - ['39', 0] - ['40', 0] - ['41', 0] - ['42', 0] - ['43', 0] - ['44', 0] - ['45', 0] - ['46', 0] - ['47', 0] - ['48', 0] - ['49', 0] - ['50', 0] - ['51', 0] - ['52', 0] - ['53', 0] - ['54', 0] - ['55', 0] - ['56', 0] - ['57', 0] - ['58', 0] - ['59', 0] - ['60', 0] - ['61', 0] - ['62', 0] - ['63', 0] - ['64', 0] - ['65', 0] - ['66', 0] - ['67', 0] - ['68', 0] - ['69', 0] - ['70', 0] - ['71', 0] - ['72', 0] - ['73', 0] - ['74', 0] - ['75', 0] - ['76', 0] - ['77', 0] - ['78', 0] - ['79', 0] - ['80', 0] - ['81', 0] - ['82', 0] - ['83', 0] - ['84', 0] - ['85', 0] - ['86', 0] - ['87', 0] - ['88', 0] - ['89', 0] - ['90', 0] - ['91', 0] - ['92', 0] - ['93', 0] - ['94', 0] - ['95', 0] - ['96', 0] - ['97', 0] - ['98', 0] - ['99', 0] - ['100', 0] ... for key = 1, 100 do space:upsert({tostring(key), 0}, {{'+', 2, 10}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1', 10] - ['2', 10] - ['3', 10] - ['4', 10] - ['5', 10] - ['6', 10] - ['7', 10] - ['8', 10] - ['9', 10] - ['10', 10] - ['11', 10] - ['12', 10] - ['13', 10] - ['14', 10] - ['15', 10] - ['16', 10] - ['17', 10] - ['18', 10] - ['19', 10] - ['20', 10] - ['21', 10] - ['22', 10] - ['23', 10] - ['24', 10] - ['25', 10] - ['26', 10] - ['27', 10] - ['28', 10] - ['29', 10] - ['30', 10] - ['31', 10] - ['32', 10] - ['33', 10] - ['34', 10] - ['35', 10] - ['36', 10] - ['37', 10] - ['38', 10] - ['39', 10] - ['40', 10] - ['41', 10] - ['42', 10] - ['43', 10] - ['44', 10] - ['45', 10] - ['46', 10] - ['47', 10] - ['48', 10] - ['49', 10] - ['50', 10] - ['51', 10] - ['52', 10] - ['53', 10] - ['54', 10] - ['55', 10] - ['56', 10] - ['57', 10] - ['58', 10] - ['59', 10] - ['60', 10] - ['61', 10] - ['62', 10] - ['63', 10] - ['64', 10] - ['65', 10] - ['66', 10] - ['67', 10] - ['68', 10] - ['69', 10] - ['70', 10] - ['71', 10] - ['72', 10] - ['73', 10] - ['74', 10] - ['75', 10] - ['76', 10] - ['77', 10] - ['78', 10] - ['79', 10] - ['80', 10] - ['81', 10] - ['82', 10] - ['83', 10] - ['84', 10] - ['85', 10] - ['86', 10] - ['87', 10] - ['88', 10] - ['89', 10] - ['90', 10] - ['91', 10] - ['92', 10] - ['93', 10] - ['94', 10] - ['95', 10] - ['96', 10] - ['97', 10] - ['98', 10] - ['99', 10] - ['100', 10] ... for key = 1, 100 do space:delete({tostring(key)}) end --- ... for key = 1, 100 do space:upsert({tostring(key), 0}, {{'+', 2, 1}, {'=', 3, key}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1', 0] - ['2', 0] - ['3', 0] - ['4', 0] - ['5', 0] - ['6', 0] - ['7', 0] - ['8', 0] - ['9', 0] - ['10', 0] - ['11', 0] - ['12', 0] - ['13', 0] - ['14', 0] - ['15', 0] - ['16', 0] - ['17', 0] - ['18', 0] - ['19', 0] - ['20', 0] - ['21', 0] - ['22', 0] - ['23', 0] - ['24', 0] - ['25', 0] - ['26', 0] - ['27', 0] - ['28', 0] - ['29', 0] - ['30', 0] - ['31', 0] - ['32', 0] - ['33', 0] - ['34', 0] - ['35', 0] - ['36', 0] - ['37', 0] - ['38', 0] - ['39', 0] - ['40', 0] - ['41', 0] - ['42', 0] - ['43', 0] - ['44', 0] - ['45', 0] - ['46', 0] - ['47', 0] - ['48', 0] - ['49', 0] - ['50', 0] - ['51', 0] - ['52', 0] - ['53', 0] - ['54', 0] - ['55', 0] - ['56', 0] - ['57', 0] - ['58', 0] - ['59', 0] - ['60', 0] - ['61', 0] - ['62', 0] - ['63', 0] - ['64', 0] - ['65', 0] - ['66', 0] - ['67', 0] - ['68', 0] - ['69', 0] - ['70', 0] - ['71', 0] - ['72', 0] - ['73', 0] - ['74', 0] - ['75', 0] - ['76', 0] - ['77', 0] - ['78', 0] - ['79', 0] - ['80', 0] - ['81', 0] - ['82', 0] - ['83', 0] - ['84', 0] - ['85', 0] - ['86', 0] - ['87', 0] - ['88', 0] - ['89', 0] - ['90', 0] - ['91', 0] - ['92', 0] - ['93', 0] - ['94', 0] - ['95', 0] - ['96', 0] - ['97', 0] - ['98', 0] - ['99', 0] - ['100', 0] ... space:drop() --- ... -- upsert (num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... for key = 1, 100 do space:upsert({key, 0}, {{'+', 2, 1}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key})) end --- ... t --- - - [1, 0] - [2, 0] - [3, 0] - [4, 0] - [5, 0] - [6, 0] - [7, 0] - [8, 0] - [9, 0] - [10, 0] - [11, 0] - [12, 0] - [13, 0] - [14, 0] - [15, 0] - [16, 0] - [17, 0] - [18, 0] - [19, 0] - [20, 0] - [21, 0] - [22, 0] - [23, 0] - [24, 0] - [25, 0] - [26, 0] - [27, 0] - [28, 0] - [29, 0] - [30, 0] - [31, 0] - [32, 0] - [33, 0] - [34, 0] - [35, 0] - [36, 0] - [37, 0] - [38, 0] - [39, 0] - [40, 0] - [41, 0] - [42, 0] - [43, 0] - [44, 0] - [45, 0] - [46, 0] - [47, 0] - [48, 0] - [49, 0] - [50, 0] - [51, 0] - [52, 0] - [53, 0] - [54, 0] - [55, 0] - [56, 0] - [57, 0] - [58, 0] - [59, 0] - [60, 0] - [61, 0] - [62, 0] - [63, 0] - [64, 0] - [65, 0] - [66, 0] - [67, 0] - [68, 0] - [69, 0] - [70, 0] - [71, 0] - [72, 0] - [73, 0] - [74, 0] - [75, 0] - [76, 0] - [77, 0] - [78, 0] - [79, 0] - [80, 0] - [81, 0] - [82, 0] - [83, 0] - [84, 0] - [85, 0] - [86, 0] - [87, 0] - [88, 0] - [89, 0] - [90, 0] - [91, 0] - [92, 0] - [93, 0] - [94, 0] - [95, 0] - [96, 0] - [97, 0] - [98, 0] - [99, 0] - [100, 0] ... for key = 1, 100 do space:upsert({key, 0}, {{'+', 2, 10}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key})) end --- ... t --- - - [1, 10] - [2, 10] - [3, 10] - [4, 10] - [5, 10] - [6, 10] - [7, 10] - [8, 10] - [9, 10] - [10, 10] - [11, 10] - [12, 10] - [13, 10] - [14, 10] - [15, 10] - [16, 10] - [17, 10] - [18, 10] - [19, 10] - [20, 10] - [21, 10] - [22, 10] - [23, 10] - [24, 10] - [25, 10] - [26, 10] - [27, 10] - [28, 10] - [29, 10] - [30, 10] - [31, 10] - [32, 10] - [33, 10] - [34, 10] - [35, 10] - [36, 10] - [37, 10] - [38, 10] - [39, 10] - [40, 10] - [41, 10] - [42, 10] - [43, 10] - [44, 10] - [45, 10] - [46, 10] - [47, 10] - [48, 10] - [49, 10] - [50, 10] - [51, 10] - [52, 10] - [53, 10] - [54, 10] - [55, 10] - [56, 10] - [57, 10] - [58, 10] - [59, 10] - [60, 10] - [61, 10] - [62, 10] - [63, 10] - [64, 10] - [65, 10] - [66, 10] - [67, 10] - [68, 10] - [69, 10] - [70, 10] - [71, 10] - [72, 10] - [73, 10] - [74, 10] - [75, 10] - [76, 10] - [77, 10] - [78, 10] - [79, 10] - [80, 10] - [81, 10] - [82, 10] - [83, 10] - [84, 10] - [85, 10] - [86, 10] - [87, 10] - [88, 10] - [89, 10] - [90, 10] - [91, 10] - [92, 10] - [93, 10] - [94, 10] - [95, 10] - [96, 10] - [97, 10] - [98, 10] - [99, 10] - [100, 10] ... for key = 1, 100 do space:delete({key}) end --- ... for key = 1, 100 do space:upsert({key, 0}, {{'+', 2, 1}, {'=', 3, key}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key})) end --- ... t --- - - [1, 0] - [2, 0] - [3, 0] - [4, 0] - [5, 0] - [6, 0] - [7, 0] - [8, 0] - [9, 0] - [10, 0] - [11, 0] - [12, 0] - [13, 0] - [14, 0] - [15, 0] - [16, 0] - [17, 0] - [18, 0] - [19, 0] - [20, 0] - [21, 0] - [22, 0] - [23, 0] - [24, 0] - [25, 0] - [26, 0] - [27, 0] - [28, 0] - [29, 0] - [30, 0] - [31, 0] - [32, 0] - [33, 0] - [34, 0] - [35, 0] - [36, 0] - [37, 0] - [38, 0] - [39, 0] - [40, 0] - [41, 0] - [42, 0] - [43, 0] - [44, 0] - [45, 0] - [46, 0] - [47, 0] - [48, 0] - [49, 0] - [50, 0] - [51, 0] - [52, 0] - [53, 0] - [54, 0] - [55, 0] - [56, 0] - [57, 0] - [58, 0] - [59, 0] - [60, 0] - [61, 0] - [62, 0] - [63, 0] - [64, 0] - [65, 0] - [66, 0] - [67, 0] - [68, 0] - [69, 0] - [70, 0] - [71, 0] - [72, 0] - [73, 0] - [74, 0] - [75, 0] - [76, 0] - [77, 0] - [78, 0] - [79, 0] - [80, 0] - [81, 0] - [82, 0] - [83, 0] - [84, 0] - [85, 0] - [86, 0] - [87, 0] - [88, 0] - [89, 0] - [90, 0] - [91, 0] - [92, 0] - [93, 0] - [94, 0] - [95, 0] - [96, 0] - [97, 0] - [98, 0] - [99, 0] - [100, 0] ... space:drop() --- ... -- upsert multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for key = 1, 100 do space:upsert({key, key, 0}, {{'+', 3, 1}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1, 0] - [2, 2, 0] - [3, 3, 0] - [4, 4, 0] - [5, 5, 0] - [6, 6, 0] - [7, 7, 0] - [8, 8, 0] - [9, 9, 0] - [10, 10, 0] - [11, 11, 0] - [12, 12, 0] - [13, 13, 0] - [14, 14, 0] - [15, 15, 0] - [16, 16, 0] - [17, 17, 0] - [18, 18, 0] - [19, 19, 0] - [20, 20, 0] - [21, 21, 0] - [22, 22, 0] - [23, 23, 0] - [24, 24, 0] - [25, 25, 0] - [26, 26, 0] - [27, 27, 0] - [28, 28, 0] - [29, 29, 0] - [30, 30, 0] - [31, 31, 0] - [32, 32, 0] - [33, 33, 0] - [34, 34, 0] - [35, 35, 0] - [36, 36, 0] - [37, 37, 0] - [38, 38, 0] - [39, 39, 0] - [40, 40, 0] - [41, 41, 0] - [42, 42, 0] - [43, 43, 0] - [44, 44, 0] - [45, 45, 0] - [46, 46, 0] - [47, 47, 0] - [48, 48, 0] - [49, 49, 0] - [50, 50, 0] - [51, 51, 0] - [52, 52, 0] - [53, 53, 0] - [54, 54, 0] - [55, 55, 0] - [56, 56, 0] - [57, 57, 0] - [58, 58, 0] - [59, 59, 0] - [60, 60, 0] - [61, 61, 0] - [62, 62, 0] - [63, 63, 0] - [64, 64, 0] - [65, 65, 0] - [66, 66, 0] - [67, 67, 0] - [68, 68, 0] - [69, 69, 0] - [70, 70, 0] - [71, 71, 0] - [72, 72, 0] - [73, 73, 0] - [74, 74, 0] - [75, 75, 0] - [76, 76, 0] - [77, 77, 0] - [78, 78, 0] - [79, 79, 0] - [80, 80, 0] - [81, 81, 0] - [82, 82, 0] - [83, 83, 0] - [84, 84, 0] - [85, 85, 0] - [86, 86, 0] - [87, 87, 0] - [88, 88, 0] - [89, 89, 0] - [90, 90, 0] - [91, 91, 0] - [92, 92, 0] - [93, 93, 0] - [94, 94, 0] - [95, 95, 0] - [96, 96, 0] - [97, 97, 0] - [98, 98, 0] - [99, 99, 0] - [100, 100, 0] ... for key = 1, 100 do space:upsert({key, key, 0}, {{'+', 3, 10}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1, 10] - [2, 2, 10] - [3, 3, 10] - [4, 4, 10] - [5, 5, 10] - [6, 6, 10] - [7, 7, 10] - [8, 8, 10] - [9, 9, 10] - [10, 10, 10] - [11, 11, 10] - [12, 12, 10] - [13, 13, 10] - [14, 14, 10] - [15, 15, 10] - [16, 16, 10] - [17, 17, 10] - [18, 18, 10] - [19, 19, 10] - [20, 20, 10] - [21, 21, 10] - [22, 22, 10] - [23, 23, 10] - [24, 24, 10] - [25, 25, 10] - [26, 26, 10] - [27, 27, 10] - [28, 28, 10] - [29, 29, 10] - [30, 30, 10] - [31, 31, 10] - [32, 32, 10] - [33, 33, 10] - [34, 34, 10] - [35, 35, 10] - [36, 36, 10] - [37, 37, 10] - [38, 38, 10] - [39, 39, 10] - [40, 40, 10] - [41, 41, 10] - [42, 42, 10] - [43, 43, 10] - [44, 44, 10] - [45, 45, 10] - [46, 46, 10] - [47, 47, 10] - [48, 48, 10] - [49, 49, 10] - [50, 50, 10] - [51, 51, 10] - [52, 52, 10] - [53, 53, 10] - [54, 54, 10] - [55, 55, 10] - [56, 56, 10] - [57, 57, 10] - [58, 58, 10] - [59, 59, 10] - [60, 60, 10] - [61, 61, 10] - [62, 62, 10] - [63, 63, 10] - [64, 64, 10] - [65, 65, 10] - [66, 66, 10] - [67, 67, 10] - [68, 68, 10] - [69, 69, 10] - [70, 70, 10] - [71, 71, 10] - [72, 72, 10] - [73, 73, 10] - [74, 74, 10] - [75, 75, 10] - [76, 76, 10] - [77, 77, 10] - [78, 78, 10] - [79, 79, 10] - [80, 80, 10] - [81, 81, 10] - [82, 82, 10] - [83, 83, 10] - [84, 84, 10] - [85, 85, 10] - [86, 86, 10] - [87, 87, 10] - [88, 88, 10] - [89, 89, 10] - [90, 90, 10] - [91, 91, 10] - [92, 92, 10] - [93, 93, 10] - [94, 94, 10] - [95, 95, 10] - [96, 96, 10] - [97, 97, 10] - [98, 98, 10] - [99, 99, 10] - [100, 100, 10] ... for key = 1, 100 do space:delete({key, key}) end --- ... for key = 1, 100 do space:upsert({key, key, 0}, {{'+', 3, 1}, {'=', 4, key}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1, 0] - [2, 2, 0] - [3, 3, 0] - [4, 4, 0] - [5, 5, 0] - [6, 6, 0] - [7, 7, 0] - [8, 8, 0] - [9, 9, 0] - [10, 10, 0] - [11, 11, 0] - [12, 12, 0] - [13, 13, 0] - [14, 14, 0] - [15, 15, 0] - [16, 16, 0] - [17, 17, 0] - [18, 18, 0] - [19, 19, 0] - [20, 20, 0] - [21, 21, 0] - [22, 22, 0] - [23, 23, 0] - [24, 24, 0] - [25, 25, 0] - [26, 26, 0] - [27, 27, 0] - [28, 28, 0] - [29, 29, 0] - [30, 30, 0] - [31, 31, 0] - [32, 32, 0] - [33, 33, 0] - [34, 34, 0] - [35, 35, 0] - [36, 36, 0] - [37, 37, 0] - [38, 38, 0] - [39, 39, 0] - [40, 40, 0] - [41, 41, 0] - [42, 42, 0] - [43, 43, 0] - [44, 44, 0] - [45, 45, 0] - [46, 46, 0] - [47, 47, 0] - [48, 48, 0] - [49, 49, 0] - [50, 50, 0] - [51, 51, 0] - [52, 52, 0] - [53, 53, 0] - [54, 54, 0] - [55, 55, 0] - [56, 56, 0] - [57, 57, 0] - [58, 58, 0] - [59, 59, 0] - [60, 60, 0] - [61, 61, 0] - [62, 62, 0] - [63, 63, 0] - [64, 64, 0] - [65, 65, 0] - [66, 66, 0] - [67, 67, 0] - [68, 68, 0] - [69, 69, 0] - [70, 70, 0] - [71, 71, 0] - [72, 72, 0] - [73, 73, 0] - [74, 74, 0] - [75, 75, 0] - [76, 76, 0] - [77, 77, 0] - [78, 78, 0] - [79, 79, 0] - [80, 80, 0] - [81, 81, 0] - [82, 82, 0] - [83, 83, 0] - [84, 84, 0] - [85, 85, 0] - [86, 86, 0] - [87, 87, 0] - [88, 88, 0] - [89, 89, 0] - [90, 90, 0] - [91, 91, 0] - [92, 92, 0] - [93, 93, 0] - [94, 94, 0] - [95, 95, 0] - [96, 96, 0] - [97, 97, 0] - [98, 98, 0] - [99, 99, 0] - [100, 100, 0] ... space:drop() --- ... test_run:cmd("setopt delimiter ';'"); --- - true ... function less(a, b) if type(a[2]) ~= type(b[2]) then return type(a[2]) < type(b[2]) end if a[2] == b[2] then return a[1] < b[1] end if type(a[2]) == 'boolean' then return a[2] == false and b[2] == true end return a[2] < b[2] end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... function sort(t) table.sort(t, less) return t end --- ... -- upsert default tuple constraint space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... space:upsert({0, 'key', 0}, {{'+', 3, 1}}) --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... space:drop() --- ... -- upsert primary key modify (skipped) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... space:upsert({0, 0}, {{'+', 1, 1}, {'+', 2, 1}}) --- ... space:get({0}) --- - [0, 0] ... space:drop() --- ... -- upsert with box.tuple.new space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for key = 1, 100 do space:upsert(box.tuple.new{key, key, 0}, box.tuple.new{{'+', 3, 1}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1, 0] - [2, 2, 0] - [3, 3, 0] - [4, 4, 0] - [5, 5, 0] - [6, 6, 0] - [7, 7, 0] - [8, 8, 0] - [9, 9, 0] - [10, 10, 0] - [11, 11, 0] - [12, 12, 0] - [13, 13, 0] - [14, 14, 0] - [15, 15, 0] - [16, 16, 0] - [17, 17, 0] - [18, 18, 0] - [19, 19, 0] - [20, 20, 0] - [21, 21, 0] - [22, 22, 0] - [23, 23, 0] - [24, 24, 0] - [25, 25, 0] - [26, 26, 0] - [27, 27, 0] - [28, 28, 0] - [29, 29, 0] - [30, 30, 0] - [31, 31, 0] - [32, 32, 0] - [33, 33, 0] - [34, 34, 0] - [35, 35, 0] - [36, 36, 0] - [37, 37, 0] - [38, 38, 0] - [39, 39, 0] - [40, 40, 0] - [41, 41, 0] - [42, 42, 0] - [43, 43, 0] - [44, 44, 0] - [45, 45, 0] - [46, 46, 0] - [47, 47, 0] - [48, 48, 0] - [49, 49, 0] - [50, 50, 0] - [51, 51, 0] - [52, 52, 0] - [53, 53, 0] - [54, 54, 0] - [55, 55, 0] - [56, 56, 0] - [57, 57, 0] - [58, 58, 0] - [59, 59, 0] - [60, 60, 0] - [61, 61, 0] - [62, 62, 0] - [63, 63, 0] - [64, 64, 0] - [65, 65, 0] - [66, 66, 0] - [67, 67, 0] - [68, 68, 0] - [69, 69, 0] - [70, 70, 0] - [71, 71, 0] - [72, 72, 0] - [73, 73, 0] - [74, 74, 0] - [75, 75, 0] - [76, 76, 0] - [77, 77, 0] - [78, 78, 0] - [79, 79, 0] - [80, 80, 0] - [81, 81, 0] - [82, 82, 0] - [83, 83, 0] - [84, 84, 0] - [85, 85, 0] - [86, 86, 0] - [87, 87, 0] - [88, 88, 0] - [89, 89, 0] - [90, 90, 0] - [91, 91, 0] - [92, 92, 0] - [93, 93, 0] - [94, 94, 0] - [95, 95, 0] - [96, 96, 0] - [97, 97, 0] - [98, 98, 0] - [99, 99, 0] - [100, 100, 0] ... for key = 1, 100 do space:upsert(box.tuple.new{key, key, 0}, box.tuple.new{{'+', 3, 10}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1, 10] - [2, 2, 10] - [3, 3, 10] - [4, 4, 10] - [5, 5, 10] - [6, 6, 10] - [7, 7, 10] - [8, 8, 10] - [9, 9, 10] - [10, 10, 10] - [11, 11, 10] - [12, 12, 10] - [13, 13, 10] - [14, 14, 10] - [15, 15, 10] - [16, 16, 10] - [17, 17, 10] - [18, 18, 10] - [19, 19, 10] - [20, 20, 10] - [21, 21, 10] - [22, 22, 10] - [23, 23, 10] - [24, 24, 10] - [25, 25, 10] - [26, 26, 10] - [27, 27, 10] - [28, 28, 10] - [29, 29, 10] - [30, 30, 10] - [31, 31, 10] - [32, 32, 10] - [33, 33, 10] - [34, 34, 10] - [35, 35, 10] - [36, 36, 10] - [37, 37, 10] - [38, 38, 10] - [39, 39, 10] - [40, 40, 10] - [41, 41, 10] - [42, 42, 10] - [43, 43, 10] - [44, 44, 10] - [45, 45, 10] - [46, 46, 10] - [47, 47, 10] - [48, 48, 10] - [49, 49, 10] - [50, 50, 10] - [51, 51, 10] - [52, 52, 10] - [53, 53, 10] - [54, 54, 10] - [55, 55, 10] - [56, 56, 10] - [57, 57, 10] - [58, 58, 10] - [59, 59, 10] - [60, 60, 10] - [61, 61, 10] - [62, 62, 10] - [63, 63, 10] - [64, 64, 10] - [65, 65, 10] - [66, 66, 10] - [67, 67, 10] - [68, 68, 10] - [69, 69, 10] - [70, 70, 10] - [71, 71, 10] - [72, 72, 10] - [73, 73, 10] - [74, 74, 10] - [75, 75, 10] - [76, 76, 10] - [77, 77, 10] - [78, 78, 10] - [79, 79, 10] - [80, 80, 10] - [81, 81, 10] - [82, 82, 10] - [83, 83, 10] - [84, 84, 10] - [85, 85, 10] - [86, 86, 10] - [87, 87, 10] - [88, 88, 10] - [89, 89, 10] - [90, 90, 10] - [91, 91, 10] - [92, 92, 10] - [93, 93, 10] - [94, 94, 10] - [95, 95, 10] - [96, 96, 10] - [97, 97, 10] - [98, 98, 10] - [99, 99, 10] - [100, 100, 10] ... for key = 1, 100 do space:delete({key, key}) end --- ... for key = 1, 100 do space:upsert(box.tuple.new{key, key, 0}, box.tuple.new{{'+', 3, 1}, {'=', 4, key}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1, 0] - [2, 2, 0] - [3, 3, 0] - [4, 4, 0] - [5, 5, 0] - [6, 6, 0] - [7, 7, 0] - [8, 8, 0] - [9, 9, 0] - [10, 10, 0] - [11, 11, 0] - [12, 12, 0] - [13, 13, 0] - [14, 14, 0] - [15, 15, 0] - [16, 16, 0] - [17, 17, 0] - [18, 18, 0] - [19, 19, 0] - [20, 20, 0] - [21, 21, 0] - [22, 22, 0] - [23, 23, 0] - [24, 24, 0] - [25, 25, 0] - [26, 26, 0] - [27, 27, 0] - [28, 28, 0] - [29, 29, 0] - [30, 30, 0] - [31, 31, 0] - [32, 32, 0] - [33, 33, 0] - [34, 34, 0] - [35, 35, 0] - [36, 36, 0] - [37, 37, 0] - [38, 38, 0] - [39, 39, 0] - [40, 40, 0] - [41, 41, 0] - [42, 42, 0] - [43, 43, 0] - [44, 44, 0] - [45, 45, 0] - [46, 46, 0] - [47, 47, 0] - [48, 48, 0] - [49, 49, 0] - [50, 50, 0] - [51, 51, 0] - [52, 52, 0] - [53, 53, 0] - [54, 54, 0] - [55, 55, 0] - [56, 56, 0] - [57, 57, 0] - [58, 58, 0] - [59, 59, 0] - [60, 60, 0] - [61, 61, 0] - [62, 62, 0] - [63, 63, 0] - [64, 64, 0] - [65, 65, 0] - [66, 66, 0] - [67, 67, 0] - [68, 68, 0] - [69, 69, 0] - [70, 70, 0] - [71, 71, 0] - [72, 72, 0] - [73, 73, 0] - [74, 74, 0] - [75, 75, 0] - [76, 76, 0] - [77, 77, 0] - [78, 78, 0] - [79, 79, 0] - [80, 80, 0] - [81, 81, 0] - [82, 82, 0] - [83, 83, 0] - [84, 84, 0] - [85, 85, 0] - [86, 86, 0] - [87, 87, 0] - [88, 88, 0] - [89, 89, 0] - [90, 90, 0] - [91, 91, 0] - [92, 92, 0] - [93, 93, 0] - [94, 94, 0] - [95, 95, 0] - [96, 96, 0] - [97, 97, 0] - [98, 98, 0] - [99, 99, 0] - [100, 100, 0] ... space:drop() --- ... -- https://github.com/tarantool/tarantool/issues/1671 space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) --- ... space:insert({1, 1}) --- - [1, 1] ... space:insert({2, 2}) --- - [2, 2] ... index1:select{} --- - - [1, 1] - [2, 2] ... index2:select{} --- - - [1, 1] - [2, 2] ... space:upsert({1, 1}, {{'=', 2, 2}}) --- ... sort(index1:select{}) --- - - [1, 2] - [2, 2] ... sort(index2:select{}) --- - - [1, 2] - [2, 2] ... space:drop() --- ... s = box.schema.space.create('tweedledum', { engine = engine }) --- ... index = s:create_index('pk') --- ... s:upsert({0, 0}, {{'+', 2, 2}}) --- ... s:select{0} --- - - [0, 0] ... tmp = s:delete{0} --- ... s:upsert({0, 0, 0}, {{'+', 2, 2}}) --- ... s:select{0} --- - - [0, 0, 0] ... tmp = s:delete{0} --- ... s:upsert({0}, {{'+', 2, 2}}) --- ... s:select{0} --- - - [0] ... s:replace{0, 1, 2, 4} --- - [0, 1, 2, 4] ... s:upsert({0, 0, "you will not see it"}, {{'+', 2, 2}}) --- ... s:select{0} --- - - [0, 3, 2, 4] ... s:replace{0, -0x4000000000000000ll} --- - [0, -4611686018427387904] ... s:upsert({0}, {{'+', 2, -0x4000000000000001ll}}) -- overflow --- ... s:select{0} --- - - [0, -4611686018427387904] ... s:replace{0, "thing"} --- - [0, 'thing'] ... s:upsert({0, "nothing"}, {{'+', 2, 2}}) --- ... s:select{0} --- - - [0, 'thing'] ... tmp = s:delete{0} --- ... s:upsert({0, "thing"}, {{'+', 2, 2}}) --- ... s:select{0} --- - - [0, 'thing'] ... s:replace{0, 1, 2} --- - [0, 1, 2] ... s:upsert({0}, {{'!', 42, 42}}) --- ... s:select{0} --- - - [0, 1, 2] ... s:upsert({0}, {{'#', 42, 42}}) --- ... s:select{0} --- - - [0, 1, 2] ... s:upsert({0}, {{'=', 42, 42}}) --- ... s:select{} --- - - [0, 1, 2] ... s:replace{0, 1.5} --- - [0, 1.5] ... s:select{} --- - - [0, 1.5] ... s:upsert({0}, {{'|', 1, 255}}) --- ... s:select{0} --- - - [0, 1.5] ... s:replace{0, 1.5} --- - [0, 1.5] ... s:replace{0, 'something to splice'} --- - [0, 'something to splice'] ... s:upsert({0}, {{':', 2, 1, 4, 'no'}}) --- ... s:select{0} --- - - [0, 'nothing to splice'] ... s:upsert({0}, {{':', 2, 1, 2, 'every'}}) --- ... s:select{0} --- - - [0, 'everything to splice'] ... s:upsert({0}, {{':', 2, -100, 2, 'every'}}) --- ... s:select{0} --- - - [0, 'everything to splice'] ... s:drop() --- ... space = box.schema.space.create('test', { engine = engine, field_count = 1 }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... space:insert({1}) --- - [1] ... space:select{} --- - - [1] ... space:upsert({2, 2}, {{'+', 2, 1}}) --- - error: Tuple field count 2 does not match space field count 1 ... -- TODO: https://github.com/tarantool/tarantool/issues/1622 -- space:upsert({1}, {{'=', 2, 10}}) space:select{} --- - - [1] ... space:drop() --- ... space = box.schema.space.create('test', { engine = engine, field_count = 2 }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... space:insert({1, 1}) --- - [1, 1] ... space:select{} --- - - [1, 1] ... space:upsert({2, 2, 2}, {{'+', 3, 1}}) --- - error: Tuple field count 3 does not match space field count 2 ... space:upsert({3, 3}, {{'+', 2, 1}}) --- ... -- TODO: https://github.com/tarantool/tarantool/issues/1622 --space:upsert({4}, {{'=', 2, 10}}) --space:upsert({1}, {{'#', 2}}) space:select{} --- - - [1, 1] - [3, 3] ... space:drop() --- ... --UPSERT https://github.com/tarantool/tarantool/issues/966 test_run:cmd("setopt delimiter ';'") --- - true ... function anything_to_string(tab) if tab == nil then return 'nil' end local str = '[' local first_route = true local t = 0 for k,f in pairs(tab) do if not first_route then str = str .. ',' end first_route = false t = t + 1 if k ~= t then str = str .. k .. '=' end if type(f) == 'string' then str = str .. "'" .. f .. "'" elseif type (f) == 'number' then str = str .. tostring(f) elseif type (f) == 'table' or type (f) == 'cdata' then str = str .. anything_to_string(f) else str = str .. '?' end end str = str .. ']' return str end; --- ... function things_equal(var1, var2) local type1 = type(var1) == 'cdata' and 'table' or type(var1) local type2 = type(var2) == 'cdata' and 'table' or type(var2) if type1 ~= type2 then return false end if type1 ~= 'table' then return var1 == var2 end for k,v in pairs(var1) do if not things_equal(v, var2[k]) then return false end end for k,v in pairs(var2) do if not things_equal(v, var1[k]) then return false end end return true end; --- ... function copy_thing(t) if type(t) ~= 'table' then return t end local res = {} for k,v in pairs(t) do res[copy_thing(k)] = copy_thing(v) end return res end; --- ... function test(key_tuple, ops, expect) box.space.s:upsert(key_tuple, ops) if (things_equal(box.space.s:select{}, expect)) then return 'upsert('.. anything_to_string(key_tuple) .. ', ' .. anything_to_string(ops) .. ', ' .. ') OK ' .. anything_to_string(box.space.s:select{}) end return 'upsert('.. anything_to_string(key_tuple) .. ', ' .. anything_to_string(ops) .. ', ' .. ') FAILED, got ' .. anything_to_string(box.space.s:select{}) .. ' expected ' .. anything_to_string(expect) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... -- https://github.com/tarantool/tarantool/issues/1671 -- test upserts without triggers -- test case with one index space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { parts = {1, 'string'} }) --- ... space:upsert({1}, {{'!', 2, 100}}) -- must fail on checking tuple --- - error: 'Tuple field 1 type does not match one required by operation: expected string' ... space:upsert({'a'}, {{'a', 2, 100}}) -- must fail on checking ops --- - error: Unknown UPDATE operation ... space:upsert({'a'}, {{'!', 2, 'ups1'}}) -- 'fast' upsert via insert in one index --- ... space:upsert({'a', 'b'}, {{'!', 2, 'ups2'}}) -- 'fast' upsert via update in one index --- ... space:select{} --- - - ['a', 'ups2'] ... space:drop() --- ... -- tests on multiple indexes space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { parts = {1, 'string'} }) --- ... index2 = space:create_index('secondary', { parts = {2, 'scalar', 3, 'unsigned'} }) --- ... -- test upsert that executes as insert in all indexes space:upsert({'a', 100, 100}, {{'!', 4, 200}}) --- ... space:upsert({'b', 100, 200}, {{'!', 4, 300}}) --- ... space:upsert({'c', 100, 300}, {{'!', 4, 400}}) --- ... index1:select{} --- - - ['a', 100, 100] - ['b', 100, 200] - ['c', 100, 300] ... index2:select{} --- - - ['a', 100, 100] - ['b', 100, 200] - ['c', 100, 300] ... -- test upsert that executes as update space:upsert({'a', 100, 100}, {{'=', 3, -200}}) -- must fail on cheking new tuple in secondary index --- - error: 'Tuple field 3 type does not match one required by operation: expected unsigned' ... space:upsert({'b', 100, 200}, {{'=', 1, 'd'}}) -- must fail with attempt to modify primary index --- ... index1:select{} --- - - ['a', 100, 100] - ['b', 100, 200] - ['c', 100, 300] ... index2:select{} --- - - ['a', 100, 100] - ['b', 100, 200] - ['c', 100, 300] ... space:drop() --- ... -- https://github.com/tarantool/tarantool/issues/1854 space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... space:insert({1, 1, 1}) --- - [1, 1, 1] ... space:insert({2, 2, 2}) --- - [2, 2, 2] ... space:insert({3, 3, 3}) --- - [3, 3, 3] ... space:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... space:upsert({2, 18, 76}, {}) --- ... space:upsert({4, 4, 4}, {}) --- ... space:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] - [4, 4, 4] ... space:drop() --- ... s = box.schema.space.create('s', { engine = engine }) --- ... index1 = s:create_index('i1') --- ... index2 = s:create_index('i2', { parts = {2, 'string'}, unique = false }) --- ... t = {1, '1', 1, 'qwerty'} --- ... s:insert(t) --- - [1, '1', 1, 'qwerty'] ... -- all good operations, one op, equivalent to update test(t, {{'+', 3, 5}}, {{1, '1', 6, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['+',3,5]], ) OK [[1,'1',6,'qwerty']] ... test(t, {{'-', 3, 3}}, {{1, '1', 3, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['-',3,3]], ) OK [[1,'1',3,'qwerty']] ... test(t, {{'&', 3, 5}}, {{1, '1', 1, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['&',3,5]], ) OK [[1,'1',1,'qwerty']] ... test(t, {{'|', 3, 8}}, {{1, '1', 9, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['|',3,8]], ) OK [[1,'1',9,'qwerty']] ... test(t, {{'^', 3, 12}}, {{1, '1', 5, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['^',3,12]], ) OK [[1,'1',5,'qwerty']] ... test(t, {{':', 4, 2, 4, "uer"}}, {{1, '1', 5, 'query'}}) --- - upsert([1,'1',1,'qwerty'], [[':',4,2,4,'uer']], ) OK [[1,'1',5,'query']] ... test(t, {{'!', 4, 'answer'}}, {{1, '1', 5, 'answer', 'query'}}) --- - upsert([1,'1',1,'qwerty'], [['!',4,'answer']], ) OK [[1,'1',5,'answer','query']] ... test(t, {{'#', 5, 1}}, {{1, '1', 5, 'answer'}}) --- - upsert([1,'1',1,'qwerty'], [['#',5,1]], ) OK [[1,'1',5,'answer']] ... test(t, {{'!', -1, 1}}, {{1, '1', 5, 'answer', 1}}) --- - upsert([1,'1',1,'qwerty'], [['!',-1,1]], ) OK [[1,'1',5,'answer',1]] ... test(t, {{'!', -1, 2}}, {{1, '1', 5, 'answer', 1, 2}}) --- - upsert([1,'1',1,'qwerty'], [['!',-1,2]], ) OK [[1,'1',5,'answer',1,2]] ... test(t, {{'!', -1, 3}}, {{1, '1', 5, 'answer', 1, 2 ,3}}) --- - upsert([1,'1',1,'qwerty'], [['!',-1,3]], ) OK [[1,'1',5,'answer',1,2,3]] ... test(t, {{'#', 5, 100500}}, {{1, '1', 5, 'answer'}}) --- - upsert([1,'1',1,'qwerty'], [['#',5,100500]], ) OK [[1,'1',5,'answer']] ... test(t, {{'=', 4, 'qwerty'}}, {{1, '1', 5, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['=',4,'qwerty']], ) OK [[1,'1',5,'qwerty']] ... -- same check for negative posistion test(t, {{'+', -2, 5}}, {{1, '1', 10, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['+',-2,5]], ) OK [[1,'1',10,'qwerty']] ... test(t, {{'-', -2, 3}}, {{1, '1', 7, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['-',-2,3]], ) OK [[1,'1',7,'qwerty']] ... test(t, {{'&', -2, 5}}, {{1, '1', 5, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['&',-2,5]], ) OK [[1,'1',5,'qwerty']] ... test(t, {{'|', -2, 8}}, {{1, '1', 13, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['|',-2,8]], ) OK [[1,'1',13,'qwerty']] ... test(t, {{'^', -2, 12}}, {{1, '1', 1, 'qwerty'}}) --- - upsert([1,'1',1,'qwerty'], [['^',-2,12]], ) OK [[1,'1',1,'qwerty']] ... test(t, {{':', -1, 2, 4, "uer"}}, {{1, '1', 1, 'query'}}) --- - upsert([1,'1',1,'qwerty'], [[':',-1,2,4,'uer']], ) OK [[1,'1',1,'query']] ... test(t, {{'!', -2, 'answer'}}, {{1, '1', 1, 'answer', 'query'}}) --- - upsert([1,'1',1,'qwerty'], [['!',-2,'answer']], ) OK [[1,'1',1,'answer','query']] ... test(t, {{'#', -1, 1}}, {{1, '1', 1, 'answer'}}) --- - upsert([1,'1',1,'qwerty'], [['#',-1,1]], ) OK [[1,'1',1,'answer']] ... test(t, {{'=', -1, 'answer!'}}, {{1, '1', 1, 'answer!'}}) --- - upsert([1,'1',1,'qwerty'], [['=',-1,'answer!']], ) OK [[1,'1',1,'answer!']] ... -- selective test for good multiple ops test(t, {{'+', 3, 2}, {'!', 4, 42}}, {{1, '1', 3, 42, 'answer!'}}) --- - upsert([1,'1',1,'qwerty'], [['+',3,2],['!',4,42]], ) OK [[1,'1',3,42,'answer!']] ... test(t, {{'!', 1, 666}, {'#', 1, 1}, {'+', 3, 2}}, {{1, '1', 5, 42, 'answer!'}}) --- - upsert([1,'1',1,'qwerty'], [['!',1,666],['#',1,1],['+',3,2]], ) OK [[1,'1',5,42,'answer!']] ... test(t, {{'!', 3, 43}, {'+', 4, 2}}, {{1, '1', 43, 7, 42, 'answer!'}}) --- - upsert([1,'1',1,'qwerty'], [['!',3,43],['+',4,2]], ) OK [[1,'1',43,7,42,'answer!']] ... test(t, {{'#', 3, 2}, {'=', 3, 1}, {'=', 4, '1'}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['#',3,2],['=',3,1],['=',4,'1']], ) OK [[1,'1',1,'1']] ... -- all bad operations, one op, equivalent to update but error is supressed test(t, {{'+', 4, 3}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['+',4,3]], ) OK [[1,'1',1,'1']] ... test(t, {{'-', 4, 3}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['-',4,3]], ) OK [[1,'1',1,'1']] ... test(t, {{'&', 4, 1}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['&',4,1]], ) OK [[1,'1',1,'1']] ... test(t, {{'|', 4, 1}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['|',4,1]], ) OK [[1,'1',1,'1']] ... test(t, {{'^', 4, 1}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['^',4,1]], ) OK [[1,'1',1,'1']] ... test(t, {{':', 3, 2, 4, "uer"}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [[':',3,2,4,'uer']], ) OK [[1,'1',1,'1']] ... test(t, {{'!', 18, 'answer'}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['!',18,'answer']], ) OK [[1,'1',1,'1']] ... test(t, {{'#', 18, 1}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['#',18,1]], ) OK [[1,'1',1,'1']] ... test(t, {{'=', 18, 'qwerty'}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['=',18,'qwerty']], ) OK [[1,'1',1,'1']] ... -- selective test for good/bad multiple ops mix test(t, {{'+', 3, 1}, {'+', 4, 1}}, {{1, '1', 2, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['+',3,1],['+',4,1]], ) OK [[1,'1',2,'1']] ... test(t, {{'-', 4, 1}, {'-', 3, 1}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['-',4,1],['-',3,1]], ) OK [[1,'1',1,'1']] ... test(t, {{'#', 18, 1}, {'|', 3, 14}, {'!', 18, '!'}}, {{1, '1', 15, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['#',18,1],['|',3,14],['!',18,'!']], ) OK [[1,'1',15,'1']] ... test(t, {{'^', 42, 42}, {':', 1, 1, 1, ''}, {'^', 3, 8}}, {{1, '1', 7, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['^',42,42],[':',1,1,1,''],['^',3,8]], ) OK [[1,'1',7,'1']] ... test(t, {{'&', 3, 1}, {'&', 2, 1}, {'&', 4, 1}}, {{1, '1', 1, '1'}}) --- - upsert([1,'1',1,'qwerty'], [['&',3,1],['&',2,1],['&',4,1]], ) OK [[1,'1',1,'1']] ... -- broken ops must raise an exception and discarded 'dump ' .. anything_to_string(box.space.s:select{}) --- - dump [[1,'1',1,'1']] ... test(t, {{'&', 'a', 3}, {'+', 3, 3}}, {{1, '1', 1, '1'}}) --- - error: Illegal parameters, field id must be a number ... test(t, {{'+', 3, 3}, {'&', 3, 'a'}}, {{1, '1', 1, '1'}}) --- - error: 'Argument type in operation ''&'' on field 3 does not match field type: expected a positive integer' ... test(t, {{'+', 3}, {'&', 3, 'a'}}, {{1, '1', 1, '1'}}) --- - error: Unknown UPDATE operation ... test(t, {{':', 3, 3}}, {{1, '1', 1, '1'}}) --- - error: Unknown UPDATE operation ... test(t, {{':', 3, 3, 3}}, {{1, '1', 1, '1'}}) --- - error: Unknown UPDATE operation ... test(t, {{'?', 3, 3}}, {{1, '1', 1, '1'}}) --- - error: Unknown UPDATE operation ... 'dump ' .. anything_to_string(box.space.s:select{}) --- - dump [[1,'1',1,'1']] ... -- ignoring ops for insert upsert test({2, '2', 2, '2'}, {{}}, {{1, '1', 1, '1'}}) --- - error: Illegal parameters, update operation must be an array {op,..}, got empty array ... test({3, '3', 3, '3'}, {{'+', 3, 3}}, {{1, '1', 1, '1'}, {3, '3', 3, '3'}}) --- - upsert([3,'3',3,'3'], [['+',3,3]], ) OK [[1,'1',1,'1'],[3,'3',3,'3']] ... -- adding random ops t[1] = 1 --- ... test(t, {{'+', 3, 3}, {'+', 4, 3}}, {{1, '1', 4, '1'}, {3, '3', 3, '3'}}) --- - upsert([1,'1',1,'qwerty'], [['+',3,3],['+',4,3]], ) OK [[1,'1',4,'1'],[3,'3',3,'3']] ... t[1] = 2 --- ... test(t, {{'-', 4, 1}}, {{1, '1', 4, '1'}, {2, '1', 1, 'qwerty'}, {3, '3', 3, '3'}}) --- - upsert([2,'1',1,'qwerty'], [['-',4,1]], ) OK [[1,'1',4,'1'],[2,'1',1,'qwerty'],[3,'3',3,'3']] ... t[1] = 3 --- ... test(t, {{':', 3, 3, 3, ''}, {'|', 3, 4}}, {{1, '1', 4, '1'}, {2, '1', 1, 'qwerty'}, {3, '3', 7, '3'}}) --- - upsert([3,'1',1,'qwerty'], [[':',3,3,3,''],['|',3,4]], ) OK [[1,'1',4,'1'],[2,'1',1,'qwerty'],[3,'3',7,'3']] ... 'dump ' .. anything_to_string(box.space.s:select{}) -- (1) --- - dump [[1,'1',4,'1'],[2,'1',1,'qwerty'],[3,'3',7,'3']] ... test_run:cmd("restart server default") test_run = require('test_run').new() --- ... engine = test_run:get_cfg('engine') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function anything_to_string(tab) if tab == nil then return 'nil' end local str = '[' local first_route = true local t = 0 for k,f in pairs(tab) do if not first_route then str = str .. ',' end first_route = false t = t + 1 if k ~= t then str = str .. k .. '=' end if type(f) == 'string' then str = str .. "'" .. f .. "'" elseif type (f) == 'number' then str = str .. tostring(f) elseif type (f) == 'table' or type (f) == 'cdata' then str = str .. anything_to_string(f) else str = str .. '?' end end str = str .. ']' return str end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... s = box.space.s --- ... 'dump ' .. anything_to_string(box.space.s:select{})-- compare with (1) visually! --- - dump [[1,'1',4,'1'],[2,'1',1,'qwerty'],[3,'3',7,'3']] ... box.space.s:drop() --- ... -- -- gh-2104: vinyl: assert in tuple_upsert_squash -- s = box.schema.space.create('test', {engine = engine}) --- ... i = s:create_index('test') --- ... s:replace({1, 1, 1}) --- - [1, 1, 1] ... box.snapshot() --- - ok ... s:upsert({1, 1}, {{'+', 2, 2}}) --- ... s:upsert({1, 1}, {{'+', 3, 4}}) --- ... s:select() --- - - [1, 3, 5] ... s:drop() --- ... -- -- gh-2259: space:upsert() crashes in absence of indices -- s = box.schema.space.create('test', {engine = engine}) --- ... s:upsert({1}, {}) --- - error: 'No index #0 is defined in space ''test''' ... s:drop() --- ... -- -- gh-2461 - segfault on sparse or unordered keys. -- s = box.schema.space.create('test', {engine = engine}) --- ... pk = s:create_index('pk', {parts = {1, 'unsigned', 3, 'unsigned'}}) --- ... s:upsert({100, 100, 100}, {{'+', 2, 200}}) --- ... s:upsert({200, 100, 200}, {{'+', 2, 300}}) --- ... s:upsert({300, 100, 300}, {{'+', 2, 400}}) --- ... pk:select{} --- - - [100, 100, 100] - [200, 100, 200] - [300, 100, 300] ... s:drop() --- ... -- test for non-spased and non-sequential index s = box.schema.space.create('test', {engine = engine}) --- ... pk = s:create_index('pk', {parts = {2, 'unsigned', 3, 'unsigned'}}) --- ... s:upsert({100, 100, 100}, {{'+', 1, 200}}) --- ... s:upsert({200, 100, 200}, {{'+', 1, 300}}) --- ... s:upsert({300, 100, 300}, {{'+', 1, 400}}) --- ... pk:select{} --- - - [100, 100, 100] - [200, 100, 200] - [300, 100, 300] ... s:drop() --- ... s = box.schema.space.create('test', {engine = engine}) --- ... pk = s:create_index('pk', {parts = {3, 'unsigned', 2, 'unsigned'}}) --- ... s:upsert({100, 100, 100}, {{'+', 1, 200}}) --- ... s:upsert({200, 100, 200}, {{'+', 1, 300}}) --- ... s:upsert({300, 100, 300}, {{'+', 1, 400}}) --- ... pk:select{} --- - - [100, 100, 100] - [200, 100, 200] - [300, 100, 300] ... s:drop() --- ... s = box.schema.space.create('test', {engine = engine}) --- ... pk = s:create_index('pk', {parts = {1, 'unsigned'}}) --- ... sec = s:create_index('sec', {parts = {4, 'unsigned', 2, 'unsigned', 3, 'unsigned'}}) --- ... s:replace{1, 301, 300, 300} --- - [1, 301, 300, 300] ... sec:select{} --- - - [1, 301, 300, 300] ... s:upsert({1, 301, 300, 300}, {{'+', 2, 1}, {'+', 3, 1}, {'+', 4, 1}}) --- ... sec:select{} --- - - [1, 302, 301, 301] ... s:upsert({1, 302, 301, 301}, {{'+', 2, 1}, {'+', 3, 1}, {'+', 4, 1}}) --- ... sec:select{} --- - - [1, 303, 302, 302] ... s:upsert({2, 203, 200, 200}, {{'+', 2, 1}, {'+', 3, 1}, {'+', 4, 1}}) --- ... sec:select{} --- - - [2, 203, 200, 200] - [1, 303, 302, 302] ... s:replace{3, 302, 50, 100} --- - [3, 302, 50, 100] ... sec:select{} --- - - [3, 302, 50, 100] - [2, 203, 200, 200] - [1, 303, 302, 302] ... sec:get{100, 302, 50} --- - [3, 302, 50, 100] ... sec:get{200, 203, 200} --- - [2, 203, 200, 200] ... sec:get{302, 303, 302} --- - [1, 303, 302, 302] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/recover_snapshot_wal.result0000664000000000000000000002520213306560010024114 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... -- write data recover from latest snapshot and logs test_run:cmd("restart server default") engine = test_run:get_cfg('engine') --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... space:insert({0}) --- - [0] ... box.snapshot() --- - ok ... space:insert({1001}) --- - [1001] ... test_run:cmd("restart server default") space = box.space['test'] --- ... index = space.index['primary'] --- ... index:select({}, {iterator = box.index.ALL}) --- - - [0] - [1001] ... for key = 1, 351 do space:insert({key}) end --- ... box.snapshot() --- - ok ... for key = 352, 1000 do space:insert({key}) end --- ... test_run:cmd("restart server default") space = box.space['test'] --- ... index = space.index['primary'] --- ... index:select({}, {iterator = box.index.ALL}) --- - - [0] - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] - [101] - [102] - [103] - [104] - [105] - [106] - [107] - [108] - [109] - [110] - [111] - [112] - [113] - [114] - [115] - [116] - [117] - [118] - [119] - [120] - [121] - [122] - [123] - [124] - [125] - [126] - [127] - [128] - [129] - [130] - [131] - [132] - [133] - [134] - [135] - [136] - [137] - [138] - [139] - [140] - [141] - [142] - [143] - [144] - [145] - [146] - [147] - [148] - [149] - [150] - [151] - [152] - [153] - [154] - [155] - [156] - [157] - [158] - [159] - [160] - [161] - [162] - [163] - [164] - [165] - [166] - [167] - [168] - [169] - [170] - [171] - [172] - [173] - [174] - [175] - [176] - [177] - [178] - [179] - [180] - [181] - [182] - [183] - [184] - [185] - [186] - [187] - [188] - [189] - [190] - [191] - [192] - [193] - [194] - [195] - [196] - [197] - [198] - [199] - [200] - [201] - [202] - [203] - [204] - [205] - [206] - [207] - [208] - [209] - [210] - [211] - [212] - [213] - [214] - [215] - [216] - [217] - [218] - [219] - [220] - [221] - [222] - [223] - [224] - [225] - [226] - [227] - [228] - [229] - [230] - [231] - [232] - [233] - [234] - [235] - [236] - [237] - [238] - [239] - [240] - [241] - [242] - [243] - [244] - [245] - [246] - [247] - [248] - [249] - [250] - [251] - [252] - [253] - [254] - [255] - [256] - [257] - [258] - [259] - [260] - [261] - [262] - [263] - [264] - [265] - [266] - [267] - [268] - [269] - [270] - [271] - [272] - [273] - [274] - [275] - [276] - [277] - [278] - [279] - [280] - [281] - [282] - [283] - [284] - [285] - [286] - [287] - [288] - [289] - [290] - [291] - [292] - [293] - [294] - [295] - [296] - [297] - [298] - [299] - [300] - [301] - [302] - [303] - [304] - [305] - [306] - [307] - [308] - [309] - [310] - [311] - [312] - [313] - [314] - [315] - [316] - [317] - [318] - [319] - [320] - [321] - [322] - [323] - [324] - [325] - [326] - [327] - [328] - [329] - [330] - [331] - [332] - [333] - [334] - [335] - [336] - [337] - [338] - [339] - [340] - [341] - [342] - [343] - [344] - [345] - [346] - [347] - [348] - [349] - [350] - [351] - [352] - [353] - [354] - [355] - [356] - [357] - [358] - [359] - [360] - [361] - [362] - [363] - [364] - [365] - [366] - [367] - [368] - [369] - [370] - [371] - [372] - [373] - [374] - [375] - [376] - [377] - [378] - [379] - [380] - [381] - [382] - [383] - [384] - [385] - [386] - [387] - [388] - [389] - [390] - [391] - [392] - [393] - [394] - [395] - [396] - [397] - [398] - [399] - [400] - [401] - [402] - [403] - [404] - [405] - [406] - [407] - [408] - [409] - [410] - [411] - [412] - [413] - [414] - [415] - [416] - [417] - [418] - [419] - [420] - [421] - [422] - [423] - [424] - [425] - [426] - [427] - [428] - [429] - [430] - [431] - [432] - [433] - [434] - [435] - [436] - [437] - [438] - [439] - [440] - [441] - [442] - [443] - [444] - [445] - [446] - [447] - [448] - [449] - [450] - [451] - [452] - [453] - [454] - [455] - [456] - [457] - [458] - [459] - [460] - [461] - [462] - [463] - [464] - [465] - [466] - [467] - [468] - [469] - [470] - [471] - [472] - [473] - [474] - [475] - [476] - [477] - [478] - [479] - [480] - [481] - [482] - [483] - [484] - [485] - [486] - [487] - [488] - [489] - [490] - [491] - [492] - [493] - [494] - [495] - [496] - [497] - [498] - [499] - [500] - [501] - [502] - [503] - [504] - [505] - [506] - [507] - [508] - [509] - [510] - [511] - [512] - [513] - [514] - [515] - [516] - [517] - [518] - [519] - [520] - [521] - [522] - [523] - [524] - [525] - [526] - [527] - [528] - [529] - [530] - [531] - [532] - [533] - [534] - [535] - [536] - [537] - [538] - [539] - [540] - [541] - [542] - [543] - [544] - [545] - [546] - [547] - [548] - [549] - [550] - [551] - [552] - [553] - [554] - [555] - [556] - [557] - [558] - [559] - [560] - [561] - [562] - [563] - [564] - [565] - [566] - [567] - [568] - [569] - [570] - [571] - [572] - [573] - [574] - [575] - [576] - [577] - [578] - [579] - [580] - [581] - [582] - [583] - [584] - [585] - [586] - [587] - [588] - [589] - [590] - [591] - [592] - [593] - [594] - [595] - [596] - [597] - [598] - [599] - [600] - [601] - [602] - [603] - [604] - [605] - [606] - [607] - [608] - [609] - [610] - [611] - [612] - [613] - [614] - [615] - [616] - [617] - [618] - [619] - [620] - [621] - [622] - [623] - [624] - [625] - [626] - [627] - [628] - [629] - [630] - [631] - [632] - [633] - [634] - [635] - [636] - [637] - [638] - [639] - [640] - [641] - [642] - [643] - [644] - [645] - [646] - [647] - [648] - [649] - [650] - [651] - [652] - [653] - [654] - [655] - [656] - [657] - [658] - [659] - [660] - [661] - [662] - [663] - [664] - [665] - [666] - [667] - [668] - [669] - [670] - [671] - [672] - [673] - [674] - [675] - [676] - [677] - [678] - [679] - [680] - [681] - [682] - [683] - [684] - [685] - [686] - [687] - [688] - [689] - [690] - [691] - [692] - [693] - [694] - [695] - [696] - [697] - [698] - [699] - [700] - [701] - [702] - [703] - [704] - [705] - [706] - [707] - [708] - [709] - [710] - [711] - [712] - [713] - [714] - [715] - [716] - [717] - [718] - [719] - [720] - [721] - [722] - [723] - [724] - [725] - [726] - [727] - [728] - [729] - [730] - [731] - [732] - [733] - [734] - [735] - [736] - [737] - [738] - [739] - [740] - [741] - [742] - [743] - [744] - [745] - [746] - [747] - [748] - [749] - [750] - [751] - [752] - [753] - [754] - [755] - [756] - [757] - [758] - [759] - [760] - [761] - [762] - [763] - [764] - [765] - [766] - [767] - [768] - [769] - [770] - [771] - [772] - [773] - [774] - [775] - [776] - [777] - [778] - [779] - [780] - [781] - [782] - [783] - [784] - [785] - [786] - [787] - [788] - [789] - [790] - [791] - [792] - [793] - [794] - [795] - [796] - [797] - [798] - [799] - [800] - [801] - [802] - [803] - [804] - [805] - [806] - [807] - [808] - [809] - [810] - [811] - [812] - [813] - [814] - [815] - [816] - [817] - [818] - [819] - [820] - [821] - [822] - [823] - [824] - [825] - [826] - [827] - [828] - [829] - [830] - [831] - [832] - [833] - [834] - [835] - [836] - [837] - [838] - [839] - [840] - [841] - [842] - [843] - [844] - [845] - [846] - [847] - [848] - [849] - [850] - [851] - [852] - [853] - [854] - [855] - [856] - [857] - [858] - [859] - [860] - [861] - [862] - [863] - [864] - [865] - [866] - [867] - [868] - [869] - [870] - [871] - [872] - [873] - [874] - [875] - [876] - [877] - [878] - [879] - [880] - [881] - [882] - [883] - [884] - [885] - [886] - [887] - [888] - [889] - [890] - [891] - [892] - [893] - [894] - [895] - [896] - [897] - [898] - [899] - [900] - [901] - [902] - [903] - [904] - [905] - [906] - [907] - [908] - [909] - [910] - [911] - [912] - [913] - [914] - [915] - [916] - [917] - [918] - [919] - [920] - [921] - [922] - [923] - [924] - [925] - [926] - [927] - [928] - [929] - [930] - [931] - [932] - [933] - [934] - [935] - [936] - [937] - [938] - [939] - [940] - [941] - [942] - [943] - [944] - [945] - [946] - [947] - [948] - [949] - [950] - [951] - [952] - [953] - [954] - [955] - [956] - [957] - [958] - [959] - [960] - [961] - [962] - [963] - [964] - [965] - [966] - [967] - [968] - [969] - [970] - [971] - [972] - [973] - [974] - [975] - [976] - [977] - [978] - [979] - [980] - [981] - [982] - [983] - [984] - [985] - [986] - [987] - [988] - [989] - [990] - [991] - [992] - [993] - [994] - [995] - [996] - [997] - [998] - [999] - [1000] - [1001] ... space:drop() --- ... test_run:cmd("restart server default with cleanup=1") tarantool_1.9.1.26.g63eb81e3c/test/engine/params.result0000664000000000000000000000104713306560010021151 0ustar rootroot--init test_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... s = box.schema.create_space('engine', {engine=engine}) --- ... i = s:create_index('primary') --- ... --test example for memtx and vinyl box.space.engine:insert{1,2,3} --- - [1, 2, 3] ... box.space.engine:select{} --- - - [1, 2, 3] ... -- cleanup box.space.engine:drop() --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/conflict.lua0000664000000000000000000000121013306560010020722 0ustar rootroot function test_conflict() local test_run = require('test_run') local inspector = test_run.new() local engine = inspector:get_cfg('engine') local s = box.schema.space.create('tester', {engine=engine}); local i = s:create_index('test_index', {type = 'tree', parts = {1, 'string'}}); local commits = 0 local function conflict() box.begin() s:replace({'test'}) box.commit() commits = commits + 1 end; local fiber = require('fiber'); local f0 = fiber.create(conflict); local f1 = fiber.create(conflict); -- conflict fiber.sleep(0); s:drop(); return commits end tarantool_1.9.1.26.g63eb81e3c/test/engine/delete.result0000664000000000000000000002006013306560010021124 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... -- delete (str) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:replace({tostring(key)}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1'] - ['2'] - ['3'] - ['4'] - ['5'] - ['6'] - ['7'] - ['8'] - ['9'] - ['10'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] - ['100'] ... for key = 1, 100 do space:delete({tostring(key)}) end --- ... for key = 1, 100 do assert(space:get({tostring(key)}) == nil) end --- ... space:delete({tostring(7)}) --- ... space:drop() --- ... -- delete (num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key})) end --- ... t --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... for key = 1, 100 do space:delete({key}) end --- ... for key = 1, 100 do assert(space:get({key}) == nil) end --- ... space:delete({7}) --- ... space:drop() --- ... -- delete multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key, key}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] - [6, 6] - [7, 7] - [8, 8] - [9, 9] - [10, 10] - [11, 11] - [12, 12] - [13, 13] - [14, 14] - [15, 15] - [16, 16] - [17, 17] - [18, 18] - [19, 19] - [20, 20] - [21, 21] - [22, 22] - [23, 23] - [24, 24] - [25, 25] - [26, 26] - [27, 27] - [28, 28] - [29, 29] - [30, 30] - [31, 31] - [32, 32] - [33, 33] - [34, 34] - [35, 35] - [36, 36] - [37, 37] - [38, 38] - [39, 39] - [40, 40] - [41, 41] - [42, 42] - [43, 43] - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... for key = 1, 100 do space:delete({key, key}) end --- ... for key = 1, 100 do assert(space:get({key, key}) == nil) end --- ... space:delete({7, 7}) --- ... space:drop() --- ... -- delete (str) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:replace({tostring(key)}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1'] - ['2'] - ['3'] - ['4'] - ['5'] - ['6'] - ['7'] - ['8'] - ['9'] - ['10'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] - ['100'] ... for key = 1, 100 do space:delete(box.tuple.new{tostring(key)}) end --- ... for key = 1, 100 do assert(space:get({tostring(key)}) == nil) end --- ... space:delete(box.tuple.new{tostring(7)}) --- ... space:drop() --- ... -- delete with multiple indices space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'string', 3, 'scalar'}}) --- ... index3 = space:create_index('third', { type = 'tree', parts = {1, 'unsigned', 3, 'scalar'}}) --- ... space:insert({1, 'abc', 100}) --- - [1, 'abc', 100] ... space:insert({3, 'weif', 345}) --- - [3, 'weif', 345] ... space:insert({2, 'gbot', '023'}) --- - [2, 'gbot', '023'] ... space:insert({10, 'dflgner', 532.123}) --- - [10, 'dflgner', 532.123] ... space:insert({0, 'igkkm', 4902}) --- - [0, 'igkkm', 4902] ... index1:select{} --- - - [0, 'igkkm', 4902] - [1, 'abc', 100] - [2, 'gbot', '023'] - [3, 'weif', 345] - [10, 'dflgner', 532.123] ... index2:select{} --- - - [1, 'abc', 100] - [10, 'dflgner', 532.123] - [2, 'gbot', '023'] - [0, 'igkkm', 4902] - [3, 'weif', 345] ... index3:select{} --- - - [0, 'igkkm', 4902] - [1, 'abc', 100] - [2, 'gbot', '023'] - [3, 'weif', 345] - [10, 'dflgner', 532.123] ... tmp = index1:delete({1}) --- ... tmp = index2:delete({'weif'}) -- must fail --- - error: Invalid key part count in an exact match (expected 2, got 1) ... tmp = index2:delete({'weif', 345}) --- ... tmp = index2:delete({'weif', 345}) --- ... tmp = index3:delete({2, '023'}) --- ... index1:select{} --- - - [0, 'igkkm', 4902] - [10, 'dflgner', 532.123] ... index2:select{} --- - - [10, 'dflgner', 532.123] - [0, 'igkkm', 4902] ... index3:select{} --- - - [0, 'igkkm', 4902] - [10, 'dflgner', 532.123] ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/tree_min_max_count.result0000664000000000000000000005721713306565107023573 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... -- one part indices -- int type space0 = box.schema.space.create('space0', { engine = engine }) --- ... index0 = space0:create_index('primary', { type = 'tree', parts = {1, 'INTEGER'} }) --- ... space0:insert({1, "AAAA"}) --- - [1, 'AAAA'] ... space0:insert({2, "AAAA"}) --- - [2, 'AAAA'] ... space0:insert({3, "AAAA"}) --- - [3, 'AAAA'] ... space0:insert({4, "AAAA"}) --- - [4, 'AAAA'] ... index0:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] ... index0:max(2) --- - [2, 'AAAA'] ... index0:min(2) --- - [2, 'AAAA'] ... index0:count(2) --- - 1 ... index0:max() --- - [4, 'AAAA'] ... index0:min() --- - [1, 'AAAA'] ... index0:count() --- - 4 ... space0:insert({20, "AAAA"}) --- - [20, 'AAAA'] ... space0:insert({30, "AAAA"}) --- - [30, 'AAAA'] ... space0:insert({40, "AAAA"}) --- - [40, 'AAAA'] ... index0:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] ... index0:max(15) --- - [4, 'AAAA'] ... index0:min(15) --- - [20, 'AAAA'] ... index0:count(15) --- - 0 ... index0:max() --- - [40, 'AAAA'] ... index0:min() --- - [1, 'AAAA'] ... index0:count() --- - 7 ... space0:insert({-2, "AAAA"}) --- - [-2, 'AAAA'] ... space0:insert({-3, "AAAA"}) --- - [-3, 'AAAA'] ... space0:insert({-4, "AAAA"}) --- - [-4, 'AAAA'] ... index0:select() --- - - [-4, 'AAAA'] - [-3, 'AAAA'] - [-2, 'AAAA'] - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] ... index0:max(0) --- - [-2, 'AAAA'] ... index0:min(0) --- - [1, 'AAAA'] ... index0:count(0) --- - 0 ... index0:max() --- - [40, 'AAAA'] ... index0:min() --- - [-4, 'AAAA'] ... index0:count() --- - 10 ... space0:drop() --- ... -- number type space1 = box.schema.space.create('space1', { engine = engine }) --- ... index1 = space1:create_index('primary', { type = 'tree', parts = {1, 'number'} }) --- ... space1:insert({1, "AAAA"}) --- - [1, 'AAAA'] ... space1:insert({2, "AAAA"}) --- - [2, 'AAAA'] ... space1:insert({3, "AAAA"}) --- - [3, 'AAAA'] ... space1:insert({4, "AAAA"}) --- - [4, 'AAAA'] ... index1:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] ... index1:max(2) --- - [2, 'AAAA'] ... index1:min(2) --- - [2, 'AAAA'] ... index1:count(2) --- - 1 ... index1:max() --- - [4, 'AAAA'] ... index1:min() --- - [1, 'AAAA'] ... index1:count() --- - 4 ... space1:insert({20, "AAAA"}) --- - [20, 'AAAA'] ... space1:insert({30, "AAAA"}) --- - [30, 'AAAA'] ... space1:insert({40, "AAAA"}) --- - [40, 'AAAA'] ... index1:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] ... index1:max(15) --- - [4, 'AAAA'] ... index1:min(15) --- - [20, 'AAAA'] ... index1:count(15) --- - 0 ... index1:max() --- - [40, 'AAAA'] ... index1:min() --- - [1, 'AAAA'] ... index1:count() --- - 7 ... space1:insert({-2, "AAAA"}) --- - [-2, 'AAAA'] ... space1:insert({-3, "AAAA"}) --- - [-3, 'AAAA'] ... space1:insert({-4, "AAAA"}) --- - [-4, 'AAAA'] ... index1:select() --- - - [-4, 'AAAA'] - [-3, 'AAAA'] - [-2, 'AAAA'] - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] ... index1:max(0) --- - [-2, 'AAAA'] ... index1:min(0) --- - [1, 'AAAA'] ... index1:count(0) --- - 0 ... index1:max() --- - [40, 'AAAA'] ... index1:min() --- - [-4, 'AAAA'] ... index1:count() --- - 10 ... space1:insert({1.5, "AAAA"}) --- - [1.5, 'AAAA'] ... space1:insert({2.5, "AAAA"}) --- - [2.5, 'AAAA'] ... space1:insert({3.5, "AAAA"}) --- - [3.5, 'AAAA'] ... space1:insert({4.5, "AAAA"}) --- - [4.5, 'AAAA'] ... index1:select() --- - - [-4, 'AAAA'] - [-3, 'AAAA'] - [-2, 'AAAA'] - [1, 'AAAA'] - [1.5, 'AAAA'] - [2, 'AAAA'] - [2.5, 'AAAA'] - [3, 'AAAA'] - [3.5, 'AAAA'] - [4, 'AAAA'] - [4.5, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] ... index1:max(1) --- - [1, 'AAAA'] ... index1:min(1) --- - [1, 'AAAA'] ... index1:count(1) --- - 1 ... index1:max() --- - [40, 'AAAA'] ... index1:min() --- - [-4, 'AAAA'] ... index1:count() --- - 14 ... space1:drop() --- ... -- str type space2 = box.schema.space.create('space2', { engine = engine }) --- ... index2 = space2:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... space2:insert({'1', "AAAA"}) --- - ['1', 'AAAA'] ... space2:insert({'2', "AAAA"}) --- - ['2', 'AAAA'] ... space2:insert({'3', "AAAA"}) --- - ['3', 'AAAA'] ... space2:insert({'4', "AAAA"}) --- - ['4', 'AAAA'] ... index2:select() --- - - ['1', 'AAAA'] - ['2', 'AAAA'] - ['3', 'AAAA'] - ['4', 'AAAA'] ... index2:max('2') --- - ['2', 'AAAA'] ... index2:min('2') --- - ['2', 'AAAA'] ... index2:count('2') --- - 1 ... index2:max() --- - ['4', 'AAAA'] ... index2:min() --- - ['1', 'AAAA'] ... index2:count() --- - 4 ... space2:insert({'20', "AAAA"}) --- - ['20', 'AAAA'] ... space2:insert({'30', "AAAA"}) --- - ['30', 'AAAA'] ... space2:insert({'40', "AAAA"}) --- - ['40', 'AAAA'] ... index2:select() --- - - ['1', 'AAAA'] - ['2', 'AAAA'] - ['20', 'AAAA'] - ['3', 'AAAA'] - ['30', 'AAAA'] - ['4', 'AAAA'] - ['40', 'AAAA'] ... index2:max('15') --- - ['1', 'AAAA'] ... index2:min('15') --- - ['2', 'AAAA'] ... index2:count('15') --- - 0 ... index2:max() --- - ['40', 'AAAA'] ... index2:min() --- - ['1', 'AAAA'] ... index2:count() --- - 7 ... space2:insert({'-2', "AAAA"}) --- - ['-2', 'AAAA'] ... space2:insert({'-3', "AAAA"}) --- - ['-3', 'AAAA'] ... space2:insert({'-4', "AAAA"}) --- - ['-4', 'AAAA'] ... index2:select() --- - - ['-2', 'AAAA'] - ['-3', 'AAAA'] - ['-4', 'AAAA'] - ['1', 'AAAA'] - ['2', 'AAAA'] - ['20', 'AAAA'] - ['3', 'AAAA'] - ['30', 'AAAA'] - ['4', 'AAAA'] - ['40', 'AAAA'] ... index2:max('0') --- - ['-4', 'AAAA'] ... index2:min('0') --- - ['1', 'AAAA'] ... index2:count('0') --- - 0 ... index2:max() --- - ['40', 'AAAA'] ... index2:min() --- - ['-2', 'AAAA'] ... index2:count() --- - 10 ... space2:drop() --- ... -- num type space3 = box.schema.space.create('space3', { engine = engine }) --- ... index3 = space3:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... space3:insert({1, "AAAA"}) --- - [1, 'AAAA'] ... space3:insert({2, "AAAA"}) --- - [2, 'AAAA'] ... space3:insert({3, "AAAA"}) --- - [3, 'AAAA'] ... space3:insert({4, "AAAA"}) --- - [4, 'AAAA'] ... index3:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] ... index3:max(2) --- - [2, 'AAAA'] ... index3:min(2) --- - [2, 'AAAA'] ... index3:count(2) --- - 1 ... index3:max() --- - [4, 'AAAA'] ... index3:min() --- - [1, 'AAAA'] ... index3:count() --- - 4 ... space3:insert({20, "AAAA"}) --- - [20, 'AAAA'] ... space3:insert({30, "AAAA"}) --- - [30, 'AAAA'] ... space3:insert({40, "AAAA"}) --- - [40, 'AAAA'] ... index3:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] ... index3:max(15) --- - [4, 'AAAA'] ... index3:min(15) --- - [20, 'AAAA'] ... index3:count(15) --- - 0 ... index3:max() --- - [40, 'AAAA'] ... index3:min() --- - [1, 'AAAA'] ... index3:count() --- - 7 ... space3:drop() --- ... -- scalar type space4 = box.schema.space.create('space4', { engine = engine }) --- ... index4 = space4:create_index('primary', { type = 'tree', parts = {1, 'scalar'} }) --- ... space4:insert({1, "AAAA"}) --- - [1, 'AAAA'] ... space4:insert({2, "AAAA"}) --- - [2, 'AAAA'] ... space4:insert({3, "AAAA"}) --- - [3, 'AAAA'] ... space4:insert({4, "AAAA"}) --- - [4, 'AAAA'] ... index4:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] ... index4:max(2) --- - [2, 'AAAA'] ... index4:min(2) --- - [2, 'AAAA'] ... index4:count(2) --- - 1 ... index4:max() --- - [4, 'AAAA'] ... index4:min() --- - [1, 'AAAA'] ... index4:count() --- - 4 ... space4:insert({20, "AAAA"}) --- - [20, 'AAAA'] ... space4:insert({30, "AAAA"}) --- - [30, 'AAAA'] ... space4:insert({40, "AAAA"}) --- - [40, 'AAAA'] ... index4:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] ... index4:max(15) --- - [4, 'AAAA'] ... index4:min(15) --- - [20, 'AAAA'] ... index4:count(15) --- - 0 ... index4:max() --- - [40, 'AAAA'] ... index4:min() --- - [1, 'AAAA'] ... index4:count() --- - 7 ... space4:insert({'1', "AAAA"}) --- - ['1', 'AAAA'] ... space4:insert({'2', "AAAA"}) --- - ['2', 'AAAA'] ... space4:insert({'3', "AAAA"}) --- - ['3', 'AAAA'] ... space4:insert({'4', "AAAA"}) --- - ['4', 'AAAA'] ... index4:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] - ['1', 'AAAA'] - ['2', 'AAAA'] - ['3', 'AAAA'] - ['4', 'AAAA'] ... index4:max('2') --- - ['2', 'AAAA'] ... index4:min('2') --- - ['2', 'AAAA'] ... index4:count('2') --- - 1 ... index4:max() --- - ['4', 'AAAA'] ... index4:min() --- - [1, 'AAAA'] ... index4:count() --- - 11 ... space4:insert({'20', "AAAA"}) --- - ['20', 'AAAA'] ... space4:insert({'30', "AAAA"}) --- - ['30', 'AAAA'] ... space4:insert({'40', "AAAA"}) --- - ['40', 'AAAA'] ... index4:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] - ['1', 'AAAA'] - ['2', 'AAAA'] - ['20', 'AAAA'] - ['3', 'AAAA'] - ['30', 'AAAA'] - ['4', 'AAAA'] - ['40', 'AAAA'] ... index4:max('15') --- - ['1', 'AAAA'] ... index4:min('15') --- - ['2', 'AAAA'] ... index4:count('15') --- - 0 ... index4:max() --- - ['40', 'AAAA'] ... index4:min() --- - [1, 'AAAA'] ... index4:count() --- - 14 ... space4:insert({'-2', "AAAA"}) --- - ['-2', 'AAAA'] ... space4:insert({'-3', "AAAA"}) --- - ['-3', 'AAAA'] ... space4:insert({'-4', "AAAA"}) --- - ['-4', 'AAAA'] ... index4:select() --- - - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] - ['-2', 'AAAA'] - ['-3', 'AAAA'] - ['-4', 'AAAA'] - ['1', 'AAAA'] - ['2', 'AAAA'] - ['20', 'AAAA'] - ['3', 'AAAA'] - ['30', 'AAAA'] - ['4', 'AAAA'] - ['40', 'AAAA'] ... index4:max('0') --- - ['-4', 'AAAA'] ... index4:min('0') --- - ['1', 'AAAA'] ... index4:count('0') --- - 0 ... index4:max() --- - ['40', 'AAAA'] ... index4:min() --- - [1, 'AAAA'] ... index4:count() --- - 17 ... space4:insert({-2, "AAAA"}) --- - [-2, 'AAAA'] ... space4:insert({-3, "AAAA"}) --- - [-3, 'AAAA'] ... space4:insert({-4, "AAAA"}) --- - [-4, 'AAAA'] ... index4:select() --- - - [-4, 'AAAA'] - [-3, 'AAAA'] - [-2, 'AAAA'] - [1, 'AAAA'] - [2, 'AAAA'] - [3, 'AAAA'] - [4, 'AAAA'] - [20, 'AAAA'] - [30, 'AAAA'] - [40, 'AAAA'] - ['-2', 'AAAA'] - ['-3', 'AAAA'] - ['-4', 'AAAA'] - ['1', 'AAAA'] - ['2', 'AAAA'] - ['20', 'AAAA'] - ['3', 'AAAA'] - ['30', 'AAAA'] - ['4', 'AAAA'] - ['40', 'AAAA'] ... index4:max(0) --- - [-2, 'AAAA'] ... index4:min(0) --- - [1, 'AAAA'] ... index4:count(0) --- - 0 ... index4:max() --- - ['40', 'AAAA'] ... index4:min() --- - [-4, 'AAAA'] ... index4:count() --- - 20 ... space4:drop() --- ... -- multi filed indices -- scalar int space5 = box.schema.space.create('space5', { engine = engine }) --- ... index5 = space5:create_index('primary', { type = 'tree', parts = {1, 'scalar', 2, 'INTEGER'} }) --- ... space5:insert({1, 1}) --- - [1, 1] ... space5:insert({1, 2}) --- - [1, 2] ... space5:insert({1, 3}) --- - [1, 3] ... space5:insert({1, -4}) --- - [1, -4] ... index5:select() --- - - [1, -4] - [1, 1] - [1, 2] - [1, 3] ... index5:max({1}) --- - [1, 3] ... index5:min({1}) --- - [1, -4] ... index5:count({1}) --- - 4 ... index5:max({1, 0}) --- - [1, -4] ... index5:min({1, 1}) --- - [1, 1] ... index5:count({1}) --- - 4 ... index5:max() --- - [1, 3] ... index5:min() --- - [1, -4] ... index5:count() --- - 4 ... space5:insert({2, 1}) --- - [2, 1] ... space5:insert({2, 2}) --- - [2, 2] ... space5:insert({2, 3}) --- - [2, 3] ... space5:insert({2, -4}) --- - [2, -4] ... index5:select() --- - - [1, -4] - [1, 1] - [1, 2] - [1, 3] - [2, -4] - [2, 1] - [2, 2] - [2, 3] ... index5:max({2}) --- - [2, 3] ... index5:min({2}) --- - [2, -4] ... index5:count({2}) --- - 4 ... index5:max({2, 0}) --- - [2, -4] ... index5:min({2, 1}) --- - [2, 1] ... index5:count({2}) --- - 4 ... index5:max() --- - [2, 3] ... index5:min() --- - [1, -4] ... index5:count() --- - 8 ... space5:drop() --- ... -- scalar str space6 = box.schema.space.create('space6', { engine = engine }) --- ... index6 = space6:create_index('primary', { type = 'tree', parts = {1, 'scalar', 2, 'string'} }) --- ... space6:insert({1, '1'}) --- - [1, '1'] ... space6:insert({1, '2'}) --- - [1, '2'] ... space6:insert({1, '3'}) --- - [1, '3'] ... space6:insert({1, '-4'}) --- - [1, '-4'] ... index6:select() --- - - [1, '-4'] - [1, '1'] - [1, '2'] - [1, '3'] ... index6:max({1}) --- - [1, '3'] ... index6:min({1}) --- - [1, '-4'] ... index6:count({1}) --- - 4 ... index6:max({1, '0'}) --- - [1, '-4'] ... index6:min({1, '1'}) --- - [1, '1'] ... index6:count({1}) --- - 4 ... index6:max() --- - [1, '3'] ... index6:min() --- - [1, '-4'] ... index6:count() --- - 4 ... space6:insert({2, '1'}) --- - [2, '1'] ... space6:insert({2, '2'}) --- - [2, '2'] ... space6:insert({2, '3'}) --- - [2, '3'] ... space6:insert({2, '-4'}) --- - [2, '-4'] ... index6:select() --- - - [1, '-4'] - [1, '1'] - [1, '2'] - [1, '3'] - [2, '-4'] - [2, '1'] - [2, '2'] - [2, '3'] ... index6:max({2}) --- - [2, '3'] ... index6:min({2}) --- - [2, '-4'] ... index6:count({2}) --- - 4 ... index6:max({2, '0'}) --- - [2, '-4'] ... index6:min({2, '1'}) --- - [2, '1'] ... index6:count({2}) --- - 4 ... index6:max() --- - [2, '3'] ... index6:min() --- - [1, '-4'] ... index6:count() --- - 8 ... space6:drop() --- ... -- min max count after many inserts string = require('string') --- ... space7 = box.schema.space.create('space7', { engine = engine }) --- ... index7 = space7:create_index('primary', { type = 'tree', parts = {1, 'scalar'} }) --- ... long_string = string.rep('A', 650) --- ... for i = 1, 1000 do space7:insert({i, long_string}) end --- ... index7:max({100}) --- - [100, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index7:max({700}) --- - [700, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index7:min({100}) --- - [100, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index7:min({700}) --- - [700, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index7:count({2}) --- - 1 ... index7:max() --- - [1000, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index7:min() --- - [1, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index7:count() --- - 1000 ... space7:drop() --- ... space8 = box.schema.space.create('space8', { engine = engine }) --- ... index8 = space8:create_index('primary', { type = 'tree', parts = {1, 'scalar', 2, 'INTEGER'} }) --- ... for i = 1, 1000 do space8:insert({i % 10, i, long_string}) end --- ... index8:max({1, 100}) --- - [1, 91, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index8:max({2, 700}) --- - [2, 692, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index8:max({3}) --- - [3, 993, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index8:min({1, 10}) --- - [1, 11, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index8:min({1, 700}) --- - [1, 701, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index8:min({3}) --- - [3, 3, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index8:count({2}) --- - 100 ... index8:max() --- - [9, 999, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index8:min() --- - [0, 10, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'] ... index8:count() --- - 1000 ... space8:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/update.test.lua0000664000000000000000000000676313306565107021417 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') -- update (str) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:replace({tostring(key)}) end for key = 1, 100 do space:update({tostring(key)}, {{'=', 2, key}}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t space:update({tostring(101)}, {{'=', 2, 101}}) space:get({tostring(101)}) space:drop() -- update (num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) for key = 1, 100 do space:replace({key}) end for key = 1, 100 do space:update({key}, {{'=', 2, key}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key})) end t space:update({101}, {{'=', 2, 101}}) space:get({101}) space:drop() -- update multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for key = 1, 100 do space:replace({key, key}) end for key = 1, 100 do space:update({key, key}, {{'=', 3, key}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t space:update({101, 101}, {{'=', 3, 101}}) space:get({101, 101}) space:drop() -- update with box.tuple.new space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for key = 1, 100 do space:replace({key, key}) end for key = 1, 100 do space:update(box.tuple.new{key, key}, box.tuple.new{{'=', 3, key}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t space:update({101, 101}, {{'=', 3, 101}}) space:get({101, 101}) space:drop() -- update multiple indices space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'string'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'string'}, unique = false }) index3 = space:create_index('third', { type = 'tree', parts = {3, 'scalar', 2, 'string', 1, 'unsigned'}, unique = false }) space:insert({1, 'fwoen', 324}) space:insert({2, 'fwoen', 123}) space:insert({3, 'fwoen', 324}) space:insert({4, '21qn2', 213}) space:insert({5, 'fgb', '231293'}) space:insert({6, 'nrhjrt', -1231.234}) index1:update({1}, {{'+', 3, 10}}) index1:update({1, 'fwoen'}, {{'+', 3, 10}}) index1:update({0, 'fwoen'}, {{'=', 3, 5}}) index2:update({'fwoen'}, {'=', 3, 1000}) index3:update({324, 'fwoen', 3}, {{'-', 3, 100}}) space:drop() space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'} }) index3 = space:create_index('third', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) space:insert({1, 1, 1}) space:insert({2, 2, 2}) space:insert({3, 3, 3}) space:select{} space:update({1}, {{'=', 2, 2}, {'=', 3, 3}}) index1:select{} index2:select{} index3:select{} space:drop() -- https://github.com/tarantool/tarantool/issues/1854 space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) space:insert({1, 1, 1}) space:insert({2, 2, 2}) space:insert({3, 3, 3}) space:select{} space:update({2}, {}) space:select{} space:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/savepoint.test.lua0000664000000000000000000001332113306560010022115 0ustar rootrootenv = require('test_run') test_run = env.new() -- gh-2025 box.savepoint s1 = nil s1 = box.savepoint() box.rollback_to_savepoint(s1) box.begin() s1 = box.savepoint() box.rollback() box.begin() box.rollback_to_savepoint(s1) box.rollback() engine = test_run:get_cfg('engine') -- Test many savepoints on each statement. s = box.schema.space.create('test', {engine = engine}) p = s:create_index('pk') test_run:cmd("setopt delimiter ';'") box.begin() s:replace{1} save1 = box.savepoint() s:replace{2} save2 = box.savepoint() s:replace{3} save3 = box.savepoint() s:replace{4} select1 = s:select{} box.rollback_to_savepoint(save3) select2 = s:select{} box.rollback_to_savepoint(save2) select3 = s:select{} box.rollback_to_savepoint(save1) select4 = s:select{} box.commit() test_run:cmd("setopt delimiter ''"); select1 select2 select3 select4 s:truncate() -- Test rollback to savepoint on the current statement. test_run:cmd("setopt delimiter ';'") box.begin() s:replace{1} s:replace{2} s1 = box.savepoint() box.rollback_to_savepoint(s1) box.commit() test_run:cmd("setopt delimiter ''"); s:select{} s:truncate() -- Test rollback to savepoint after multiple statements. test_run:cmd("setopt delimiter ';'") box.begin() s:replace{1} s1 = box.savepoint() s:replace{2} s:replace{3} s:replace{4} box.rollback_to_savepoint(s1) box.commit() test_run:cmd("setopt delimiter ''"); s:select{} s:truncate() -- Test rollback to savepoint after failed statement. test_run:cmd("setopt delimiter ';'") box.begin() s:replace{1} s1 = box.savepoint() s:replace{3} pcall(s.replace, s, {'kek'}) s:replace{4} box.rollback_to_savepoint(s1) box.commit() test_run:cmd("setopt delimiter ''"); s:select{} s:truncate() -- Test rollback to savepoint inside the trigger. select1 = nil select2 = nil select3 = nil select4 = nil test_run:cmd("setopt delimiter ';'") function on_replace(old, new) if new[1] > 10 then return end select1 = s:select{} s1 = box.savepoint() s:replace{100} box.rollback_to_savepoint(s1) select2 = s:select{} end; _ = s:on_replace(on_replace); box.begin() s:replace{1} select3 = select1 select4 = select2 s:replace{2} box.commit() test_run:cmd("setopt delimiter ''"); select4 select3 select2 select1 s:select{} s:drop() -- Test rollback to savepoint, created in trigger, -- from main tx stream. Fail, because of different substatement -- levels. s = box.schema.space.create('test', {engine = engine}) p = s:create_index('pk') test_run:cmd("setopt delimiter ';'") function on_replace2(old, new) if new[1] ~= 1 then return end s1 = box.savepoint() s:replace{100} end; _ = s:on_replace(on_replace2); box.begin() s:replace{1} select1 = s:select{} s:replace{2} s:replace{3} select2 = s:select{} ok1, errmsg1 = pcall(box.rollback_to_savepoint, s1) select3 = s:select{} s:replace{4} select4 = s:select{} box.commit() test_run:cmd("setopt delimiter ''"); select1 select2 select3 select4 ok1 errmsg1 s:drop() -- Test incorrect savepoints usage inside a transaction. s = box.schema.space.create('test', {engine = engine}) p = s:create_index('pk') test_run:cmd("setopt delimiter ';'") box.begin() s1 = box.savepoint() txn_id = s1.txn_id s:replace{1} ok1, errmsg1 = pcall(box.rollback_to_savepoint) ok2, errmsg2 = pcall(box.rollback_to_savepoint, {txn_id=txn_id}) ok3, errmsg3 = pcall(box.rollback_to_savepoint, {txn_id=txn_id, csavepoint=100}) fake_cdata = box.tuple.new({txn_id}) ok4, errmsg4 = pcall(box.rollback_to_savepoint, {txn_id=txn_id, csavepoint=fake_cdata}) ok5, errmsg5 = pcall(box.rollback_to_savepoint, {txn_id=fake_cdata, csavepoint=s1.csavepoint}) box.commit() test_run:cmd("setopt delimiter ''"); ok1, errmsg1 ok2, errmsg2 ok3, errmsg3 ok4, errmsg4 ok5, errmsg5 s:select{} -- Rollback to released savepoint. box.begin() ok1, errmsg1 = pcall(box.rollback_to_savepoint, s1) box.commit() ok1, errmsg1 s:select{} s:truncate() -- Rollback several savepoints at once. test_run:cmd("setopt delimiter ';'") box.begin() s0 = box.savepoint() s:replace{1} s1 = box.savepoint() s:replace{2} s2 = box.savepoint() s:replace{3} s3 = box.savepoint() s:replace{4} s4 = box.savepoint() s:replace{5} select1 = s:select{} box.rollback_to_savepoint(s2) select2 = s:select{} ok1, errmsg1 = pcall(box.rollback_to_savepoint, s3) select3 = s:select{} s5 = box.savepoint() s:replace{6} s6 = box.savepoint() s:replace{7} select4 = s:select{} ok2, errmsg2 = pcall(box.rollback_to_savepoint, s4) select5 = s:select{} box.rollback_to_savepoint(s6) select6 = s:select{} box.rollback_to_savepoint(s0) select7 = s:select{} box.rollback() test_run:cmd("setopt delimiter ''"); select1 select2 select3 select4 select5 select6 select7 ok1, errmsg1 ok2, errmsg2 s:truncate() -- Rollback to the same substatement level, but from different -- context. test_run:cmd("setopt delimiter ';'") function on_replace3(old_tuple, new_tuple) if new_tuple[2] == 'create savepoint' then s1 = box.savepoint() elseif new_tuple[2] == 'rollback to savepoint' then box.rollback_to_savepoint(s1) end end; _ = s:on_replace(on_replace3); box.begin() s:replace{1, 'create savepoint'} s:replace{2} s:replace{3} s:replace{4, 'rollback to savepoint'} s:replace{5} box.commit() test_run:cmd("setopt delimiter ''"); s:select{} s:truncate() -- Several savepoints on a same statement. test_run:cmd("setopt delimiter ';'") box.begin() s:replace{1} s1 = box.savepoint() s2 = box.savepoint() s3 = box.savepoint() s:replace{2} box.rollback_to_savepoint(s3) box.rollback_to_savepoint(s2) box.rollback_to_savepoint(s1) box.commit() test_run:cmd("setopt delimiter ''"); s:select{} s:truncate() -- Test multiple rollback of a same savepoint. test_run:cmd("setopt delimiter ';'") box.begin() s1 = box.savepoint() s:replace{1} box.rollback_to_savepoint(s1) s:replace{2} box.rollback_to_savepoint(s1) s:replace{3} box.commit() test_run:cmd("setopt delimiter ''"); s:select{} s:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/iterator.result0000664000000000000000000017425013306565107021542 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... inspector:cmd("push filter '"..engine.."' to 'engine'") --- - true ... -- iterator (str) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:replace({tostring(key)}) end --- ... t = {} for state, v in index:pairs({}, {iterator = 'ALL'}) do table.insert(t, v) end --- ... t --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... t = {} for state, v in index:pairs({}, {iterator = 'GE'}) do table.insert(t, v) end --- ... t --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... t = {} for state, v in index:pairs(tostring(44), {iterator = 'GE'}) do table.insert(t, v) end --- ... t --- - - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... t = {} for state, v in index:pairs(tostring(44), {iterator = 'GT'}) do table.insert(t, v) end --- ... t --- - - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... t = {} for state, v in index:pairs({}, {iterator = 'LE'}) do table.insert(t, v) end --- ... t --- - - ['99'] - ['98'] - ['97'] - ['96'] - ['95'] - ['94'] - ['93'] - ['92'] - ['91'] - ['90'] - ['9'] - ['89'] - ['88'] - ['87'] - ['86'] - ['85'] - ['84'] - ['83'] - ['82'] - ['81'] - ['80'] - ['8'] - ['79'] - ['78'] - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... t = {} for state, v in index:pairs(tostring(77), {iterator = 'LE'}) do table.insert(t, v) end --- ... t --- - - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... t = {} for state, v in index:pairs({}, {iterator = 'LT'}) do table.insert(t, v) end --- ... t --- - - ['99'] - ['98'] - ['97'] - ['96'] - ['95'] - ['94'] - ['93'] - ['92'] - ['91'] - ['90'] - ['9'] - ['89'] - ['88'] - ['87'] - ['86'] - ['85'] - ['84'] - ['83'] - ['82'] - ['81'] - ['80'] - ['8'] - ['79'] - ['78'] - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... t = {} for state, v in index:pairs(tostring(77), {iterator = 'LT'}) do table.insert(t, v) end --- ... t --- - - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... space:drop() --- ... -- iterator (num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key}) end --- ... t = {} for state, v in index:pairs({}, {iterator = 'ALL'}) do table.insert(t, v) end --- ... t --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... t = {} for state, v in index:pairs({}, {iterator = 'GE'}) do table.insert(t, v) end --- ... t --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... t = {} for state, v in index:pairs(44, {iterator = 'GE'}) do table.insert(t, v) end --- ... t --- - - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... t = {} for state, v in index:pairs(44, {iterator = 'GT'}) do table.insert(t, v) end --- ... t --- - - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... t = {} for state, v in index:pairs({}, {iterator = 'LE'}) do table.insert(t, v) end --- ... t --- - - [100] - [99] - [98] - [97] - [96] - [95] - [94] - [93] - [92] - [91] - [90] - [89] - [88] - [87] - [86] - [85] - [84] - [83] - [82] - [81] - [80] - [79] - [78] - [77] - [76] - [75] - [74] - [73] - [72] - [71] - [70] - [69] - [68] - [67] - [66] - [65] - [64] - [63] - [62] - [61] - [60] - [59] - [58] - [57] - [56] - [55] - [54] - [53] - [52] - [51] - [50] - [49] - [48] - [47] - [46] - [45] - [44] - [43] - [42] - [41] - [40] - [39] - [38] - [37] - [36] - [35] - [34] - [33] - [32] - [31] - [30] - [29] - [28] - [27] - [26] - [25] - [24] - [23] - [22] - [21] - [20] - [19] - [18] - [17] - [16] - [15] - [14] - [13] - [12] - [11] - [10] - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... t = {} for state, v in index:pairs(77, {iterator = 'LE'}) do table.insert(t, v) end --- ... t --- - - [77] - [76] - [75] - [74] - [73] - [72] - [71] - [70] - [69] - [68] - [67] - [66] - [65] - [64] - [63] - [62] - [61] - [60] - [59] - [58] - [57] - [56] - [55] - [54] - [53] - [52] - [51] - [50] - [49] - [48] - [47] - [46] - [45] - [44] - [43] - [42] - [41] - [40] - [39] - [38] - [37] - [36] - [35] - [34] - [33] - [32] - [31] - [30] - [29] - [28] - [27] - [26] - [25] - [24] - [23] - [22] - [21] - [20] - [19] - [18] - [17] - [16] - [15] - [14] - [13] - [12] - [11] - [10] - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... t = {} for state, v in index:pairs({}, {iterator = 'LT'}) do table.insert(t, v) end --- ... t --- - - [100] - [99] - [98] - [97] - [96] - [95] - [94] - [93] - [92] - [91] - [90] - [89] - [88] - [87] - [86] - [85] - [84] - [83] - [82] - [81] - [80] - [79] - [78] - [77] - [76] - [75] - [74] - [73] - [72] - [71] - [70] - [69] - [68] - [67] - [66] - [65] - [64] - [63] - [62] - [61] - [60] - [59] - [58] - [57] - [56] - [55] - [54] - [53] - [52] - [51] - [50] - [49] - [48] - [47] - [46] - [45] - [44] - [43] - [42] - [41] - [40] - [39] - [38] - [37] - [36] - [35] - [34] - [33] - [32] - [31] - [30] - [29] - [28] - [27] - [26] - [25] - [24] - [23] - [22] - [21] - [20] - [19] - [18] - [17] - [16] - [15] - [14] - [13] - [12] - [11] - [10] - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... t = {} for state, v in index:pairs(77, {iterator = 'LT'}) do table.insert(t, v) end --- ... t --- - - [76] - [75] - [74] - [73] - [72] - [71] - [70] - [69] - [68] - [67] - [66] - [65] - [64] - [63] - [62] - [61] - [60] - [59] - [58] - [57] - [56] - [55] - [54] - [53] - [52] - [51] - [50] - [49] - [48] - [47] - [46] - [45] - [44] - [43] - [42] - [41] - [40] - [39] - [38] - [37] - [36] - [35] - [34] - [33] - [32] - [31] - [30] - [29] - [28] - [27] - [26] - [25] - [24] - [23] - [22] - [21] - [20] - [19] - [18] - [17] - [16] - [15] - [14] - [13] - [12] - [11] - [10] - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... space:drop() --- ... -- iterator multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key, key}) end --- ... t = {} for state, v in index:pairs({}, {iterator = 'ALL'}) do table.insert(t, v) end --- ... t --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] - [6, 6] - [7, 7] - [8, 8] - [9, 9] - [10, 10] - [11, 11] - [12, 12] - [13, 13] - [14, 14] - [15, 15] - [16, 16] - [17, 17] - [18, 18] - [19, 19] - [20, 20] - [21, 21] - [22, 22] - [23, 23] - [24, 24] - [25, 25] - [26, 26] - [27, 27] - [28, 28] - [29, 29] - [30, 30] - [31, 31] - [32, 32] - [33, 33] - [34, 34] - [35, 35] - [36, 36] - [37, 37] - [38, 38] - [39, 39] - [40, 40] - [41, 41] - [42, 42] - [43, 43] - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... t = {} for state, v in index:pairs({}, {iterator = 'GE'}) do table.insert(t, v) end --- ... t --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] - [6, 6] - [7, 7] - [8, 8] - [9, 9] - [10, 10] - [11, 11] - [12, 12] - [13, 13] - [14, 14] - [15, 15] - [16, 16] - [17, 17] - [18, 18] - [19, 19] - [20, 20] - [21, 21] - [22, 22] - [23, 23] - [24, 24] - [25, 25] - [26, 26] - [27, 27] - [28, 28] - [29, 29] - [30, 30] - [31, 31] - [32, 32] - [33, 33] - [34, 34] - [35, 35] - [36, 36] - [37, 37] - [38, 38] - [39, 39] - [40, 40] - [41, 41] - [42, 42] - [43, 43] - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... t = {} for state, v in index:pairs({44, 44}, {iterator = 'GE'}) do table.insert(t, v) end --- ... t --- - - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... t = {} for state, v in index:pairs({44, 44}, {iterator = 'GT'}) do table.insert(t, v) end --- ... t --- - - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... t = {} for state, v in index:pairs({}, {iterator = 'LE'}) do table.insert(t, v) end --- ... t --- - - [100, 100] - [99, 99] - [98, 98] - [97, 97] - [96, 96] - [95, 95] - [94, 94] - [93, 93] - [92, 92] - [91, 91] - [90, 90] - [89, 89] - [88, 88] - [87, 87] - [86, 86] - [85, 85] - [84, 84] - [83, 83] - [82, 82] - [81, 81] - [80, 80] - [79, 79] - [78, 78] - [77, 77] - [76, 76] - [75, 75] - [74, 74] - [73, 73] - [72, 72] - [71, 71] - [70, 70] - [69, 69] - [68, 68] - [67, 67] - [66, 66] - [65, 65] - [64, 64] - [63, 63] - [62, 62] - [61, 61] - [60, 60] - [59, 59] - [58, 58] - [57, 57] - [56, 56] - [55, 55] - [54, 54] - [53, 53] - [52, 52] - [51, 51] - [50, 50] - [49, 49] - [48, 48] - [47, 47] - [46, 46] - [45, 45] - [44, 44] - [43, 43] - [42, 42] - [41, 41] - [40, 40] - [39, 39] - [38, 38] - [37, 37] - [36, 36] - [35, 35] - [34, 34] - [33, 33] - [32, 32] - [31, 31] - [30, 30] - [29, 29] - [28, 28] - [27, 27] - [26, 26] - [25, 25] - [24, 24] - [23, 23] - [22, 22] - [21, 21] - [20, 20] - [19, 19] - [18, 18] - [17, 17] - [16, 16] - [15, 15] - [14, 14] - [13, 13] - [12, 12] - [11, 11] - [10, 10] - [9, 9] - [8, 8] - [7, 7] - [6, 6] - [5, 5] - [4, 4] - [3, 3] - [2, 2] - [1, 1] ... t = {} for state, v in index:pairs({77, 77}, {iterator = 'LE'}) do table.insert(t, v) end --- ... t --- - - [77, 77] - [76, 76] - [75, 75] - [74, 74] - [73, 73] - [72, 72] - [71, 71] - [70, 70] - [69, 69] - [68, 68] - [67, 67] - [66, 66] - [65, 65] - [64, 64] - [63, 63] - [62, 62] - [61, 61] - [60, 60] - [59, 59] - [58, 58] - [57, 57] - [56, 56] - [55, 55] - [54, 54] - [53, 53] - [52, 52] - [51, 51] - [50, 50] - [49, 49] - [48, 48] - [47, 47] - [46, 46] - [45, 45] - [44, 44] - [43, 43] - [42, 42] - [41, 41] - [40, 40] - [39, 39] - [38, 38] - [37, 37] - [36, 36] - [35, 35] - [34, 34] - [33, 33] - [32, 32] - [31, 31] - [30, 30] - [29, 29] - [28, 28] - [27, 27] - [26, 26] - [25, 25] - [24, 24] - [23, 23] - [22, 22] - [21, 21] - [20, 20] - [19, 19] - [18, 18] - [17, 17] - [16, 16] - [15, 15] - [14, 14] - [13, 13] - [12, 12] - [11, 11] - [10, 10] - [9, 9] - [8, 8] - [7, 7] - [6, 6] - [5, 5] - [4, 4] - [3, 3] - [2, 2] - [1, 1] ... t = {} for state, v in index:pairs({}, {iterator = 'LT'}) do table.insert(t, v) end --- ... t --- - - [100, 100] - [99, 99] - [98, 98] - [97, 97] - [96, 96] - [95, 95] - [94, 94] - [93, 93] - [92, 92] - [91, 91] - [90, 90] - [89, 89] - [88, 88] - [87, 87] - [86, 86] - [85, 85] - [84, 84] - [83, 83] - [82, 82] - [81, 81] - [80, 80] - [79, 79] - [78, 78] - [77, 77] - [76, 76] - [75, 75] - [74, 74] - [73, 73] - [72, 72] - [71, 71] - [70, 70] - [69, 69] - [68, 68] - [67, 67] - [66, 66] - [65, 65] - [64, 64] - [63, 63] - [62, 62] - [61, 61] - [60, 60] - [59, 59] - [58, 58] - [57, 57] - [56, 56] - [55, 55] - [54, 54] - [53, 53] - [52, 52] - [51, 51] - [50, 50] - [49, 49] - [48, 48] - [47, 47] - [46, 46] - [45, 45] - [44, 44] - [43, 43] - [42, 42] - [41, 41] - [40, 40] - [39, 39] - [38, 38] - [37, 37] - [36, 36] - [35, 35] - [34, 34] - [33, 33] - [32, 32] - [31, 31] - [30, 30] - [29, 29] - [28, 28] - [27, 27] - [26, 26] - [25, 25] - [24, 24] - [23, 23] - [22, 22] - [21, 21] - [20, 20] - [19, 19] - [18, 18] - [17, 17] - [16, 16] - [15, 15] - [14, 14] - [13, 13] - [12, 12] - [11, 11] - [10, 10] - [9, 9] - [8, 8] - [7, 7] - [6, 6] - [5, 5] - [4, 4] - [3, 3] - [2, 2] - [1, 1] ... t = {} for state, v in index:pairs({77, 77}, {iterator = 'LT'}) do table.insert(t, v) end --- ... t --- - - [76, 76] - [75, 75] - [74, 74] - [73, 73] - [72, 72] - [71, 71] - [70, 70] - [69, 69] - [68, 68] - [67, 67] - [66, 66] - [65, 65] - [64, 64] - [63, 63] - [62, 62] - [61, 61] - [60, 60] - [59, 59] - [58, 58] - [57, 57] - [56, 56] - [55, 55] - [54, 54] - [53, 53] - [52, 52] - [51, 51] - [50, 50] - [49, 49] - [48, 48] - [47, 47] - [46, 46] - [45, 45] - [44, 44] - [43, 43] - [42, 42] - [41, 41] - [40, 40] - [39, 39] - [38, 38] - [37, 37] - [36, 36] - [35, 35] - [34, 34] - [33, 33] - [32, 32] - [31, 31] - [30, 30] - [29, 29] - [28, 28] - [27, 27] - [26, 26] - [25, 25] - [24, 24] - [23, 23] - [22, 22] - [21, 21] - [20, 20] - [19, 19] - [18, 18] - [17, 17] - [16, 16] - [15, 15] - [14, 14] - [13, 13] - [12, 12] - [11, 11] - [10, 10] - [9, 9] - [8, 8] - [7, 7] - [6, 6] - [5, 5] - [4, 4] - [3, 3] - [2, 2] - [1, 1] ... space:drop() --- ... -- iterator with tuple.new space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:replace({tostring(key)}) end --- ... t = {} for state, v in index:pairs(box.tuple.new{}, {iterator = 'ALL'}) do table.insert(t, v) end --- ... t --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... t = {} for state, v in index:pairs(box.tuple.new{}, {iterator = 'GE'}) do table.insert(t, v) end --- ... t --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... t = {} for state, v in index:pairs(box.tuple.new(tostring(44)), {iterator = 'GE'}) do table.insert(t, v) end --- ... t --- - - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... t = {} for state, v in index:pairs(box.tuple.new(tostring(44)), {iterator = 'GT'}) do table.insert(t, v) end --- ... t --- - - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... t = {} for state, v in index:pairs(box.tuple.new{}, {iterator = 'LE'}) do table.insert(t, v) end --- ... t --- - - ['99'] - ['98'] - ['97'] - ['96'] - ['95'] - ['94'] - ['93'] - ['92'] - ['91'] - ['90'] - ['9'] - ['89'] - ['88'] - ['87'] - ['86'] - ['85'] - ['84'] - ['83'] - ['82'] - ['81'] - ['80'] - ['8'] - ['79'] - ['78'] - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... t = {} for state, v in index:pairs(box.tuple.new(tostring(77)), {iterator = 'LE'}) do table.insert(t, v) end --- ... t --- - - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... t = {} for state, v in index:pairs(box.tuple.new{}, {iterator = 'LT'}) do table.insert(t, v) end --- ... t --- - - ['99'] - ['98'] - ['97'] - ['96'] - ['95'] - ['94'] - ['93'] - ['92'] - ['91'] - ['90'] - ['9'] - ['89'] - ['88'] - ['87'] - ['86'] - ['85'] - ['84'] - ['83'] - ['82'] - ['81'] - ['80'] - ['8'] - ['79'] - ['78'] - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... t = {} for state, v in index:pairs(box.tuple.new(tostring(77)), {iterator = 'LT'}) do table.insert(t, v) end --- ... t --- - - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... space:drop() --- ... iterate = dofile('utils.lua').iterate --- ... inspector:cmd("push filter '(error: .builtin/.*[.]lua):[0-9]+' to '\\1'") --- - true ... # Tree single-part unique --- ... space = box.schema.space.create('tweedledum', { engine = engine }) --- ... idx1 = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true}) --- ... -- Tree single-part non-unique idx2 = space:create_index('i1', { type = 'tree', parts = {2, 'string'}, unique = false}) --- ... -- Tree multi-part unique idx3 = space:create_index('i2', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true}) --- ... -- Tree multi-part non-unique idx4 = space:create_index('i3', { type = 'tree', parts = {3, 'string', 4, 'string'}, unique = false }) --- ... space:insert{'pid_001', 'sid_001', 'tid_998', 'a'} --- - ['pid_001', 'sid_001', 'tid_998', 'a'] ... space:insert{'pid_002', 'sid_001', 'tid_997', 'a'} --- - ['pid_002', 'sid_001', 'tid_997', 'a'] ... space:insert{'pid_003', 'sid_002', 'tid_997', 'b'} --- - ['pid_003', 'sid_002', 'tid_997', 'b'] ... space:insert{'pid_005', 'sid_002', 'tid_996', 'b'} --- - ['pid_005', 'sid_002', 'tid_996', 'b'] ... space:insert{'pid_007', 'sid_003', 'tid_996', 'a'} --- - ['pid_007', 'sid_003', 'tid_996', 'a'] ... space:insert{'pid_011', 'sid_004', 'tid_996', 'c'} --- - ['pid_011', 'sid_004', 'tid_996', 'c'] ... space:insert{'pid_013', 'sid_005', 'tid_996', 'b'} --- - ['pid_013', 'sid_005', 'tid_996', 'b'] ... space:insert{'pid_017', 'sid_006', 'tid_996', 'a'} --- - ['pid_017', 'sid_006', 'tid_996', 'a'] ... space:insert{'pid_019', 'sid_005', 'tid_995', 'a'} --- - ['pid_019', 'sid_005', 'tid_995', 'a'] ... space:insert{'pid_023', 'sid_005', 'tid_994', 'a'} --- - ['pid_023', 'sid_005', 'tid_994', 'a'] ... ------------------------------------------------------------------------------- -- Iterator: tree single-part unique ------------------------------------------------------------------------------- iterate('tweedledum', 'primary', 0, 1) --- - - $pid_001$ - $pid_002$ - $pid_003$ - $pid_005$ - $pid_007$ - $pid_011$ - $pid_013$ - $pid_017$ - $pid_019$ - $pid_023$ ... iterate('tweedledum', 'primary', 0, 1, box.index.ALL) --- - - $pid_001$ - $pid_002$ - $pid_003$ - $pid_005$ - $pid_007$ - $pid_011$ - $pid_013$ - $pid_017$ - $pid_019$ - $pid_023$ ... iterate('tweedledum', 'primary', 0, 1, box.index.EQ) --- - - $pid_001$ - $pid_002$ - $pid_003$ - $pid_005$ - $pid_007$ - $pid_011$ - $pid_013$ - $pid_017$ - $pid_019$ - $pid_023$ ... iterate('tweedledum', 'primary', 0, 1, box.index.REQ) --- - - $pid_023$ - $pid_019$ - $pid_017$ - $pid_013$ - $pid_011$ - $pid_007$ - $pid_005$ - $pid_003$ - $pid_002$ - $pid_001$ ... iterate('tweedledum', 'primary', 0, 1, box.index.GE) --- - - $pid_001$ - $pid_002$ - $pid_003$ - $pid_005$ - $pid_007$ - $pid_011$ - $pid_013$ - $pid_017$ - $pid_019$ - $pid_023$ ... iterate('tweedledum', 'primary', 0, 1, box.index.GT) --- - - $pid_001$ - $pid_002$ - $pid_003$ - $pid_005$ - $pid_007$ - $pid_011$ - $pid_013$ - $pid_017$ - $pid_019$ - $pid_023$ ... iterate('tweedledum', 'primary', 0, 1, box.index.LE) --- - - $pid_023$ - $pid_019$ - $pid_017$ - $pid_013$ - $pid_011$ - $pid_007$ - $pid_005$ - $pid_003$ - $pid_002$ - $pid_001$ ... iterate('tweedledum', 'primary', 0, 1, box.index.LT) --- - - $pid_023$ - $pid_019$ - $pid_017$ - $pid_013$ - $pid_011$ - $pid_007$ - $pid_005$ - $pid_003$ - $pid_002$ - $pid_001$ ... iterate('tweedledum', 'primary', 0, 1, box.index.EQ, 'pid_003') --- - - $pid_003$ ... iterate('tweedledum', 'primary', 0, 1, box.index.REQ, 'pid_003') --- - - $pid_003$ ... iterate('tweedledum', 'primary', 0, 1, box.index.EQ, 'pid_666') --- - [] ... iterate('tweedledum', 'primary', 0, 1, box.index.REQ, 'pid_666') --- - [] ... iterate('tweedledum', 'primary', 0, 1, box.index.GE, 'pid_001') --- - - $pid_001$ - $pid_002$ - $pid_003$ - $pid_005$ - $pid_007$ - $pid_011$ - $pid_013$ - $pid_017$ - $pid_019$ - $pid_023$ ... iterate('tweedledum', 'primary', 0, 1, box.index.GT, 'pid_001') --- - - $pid_002$ - $pid_003$ - $pid_005$ - $pid_007$ - $pid_011$ - $pid_013$ - $pid_017$ - $pid_019$ - $pid_023$ ... iterate('tweedledum', 'primary', 0, 1, box.index.GE, 'pid_999') --- - [] ... iterate('tweedledum', 'primary', 0, 1, box.index.GT, 'pid_999') --- - [] ... iterate('tweedledum', 'primary', 0, 1, box.index.LE, 'pid_002') --- - - $pid_002$ - $pid_001$ ... iterate('tweedledum', 'primary', 0, 1, box.index.LT, 'pid_002') --- - - $pid_001$ ... iterate('tweedledum', 'primary', 0, 1, box.index.LE, 'pid_000') --- - [] ... iterate('tweedledum', 'primary', 0, 1, box.index.LT, 'pid_000') --- - [] ... ------------------------------------------------------------------------------- -- Iterator: tree single-part non-unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i1', 1, 2, box.index.ALL) --- - - $sid_001$ - $sid_001$ - $sid_002$ - $sid_002$ - $sid_003$ - $sid_004$ - $sid_005$ - $sid_005$ - $sid_005$ - $sid_006$ ... iterate('tweedledum', 'i1', 1, 2, box.index.EQ) --- - - $sid_001$ - $sid_001$ - $sid_002$ - $sid_002$ - $sid_003$ - $sid_004$ - $sid_005$ - $sid_005$ - $sid_005$ - $sid_006$ ... iterate('tweedledum', 'i1', 1, 2, box.index.REQ) --- - - $sid_006$ - $sid_005$ - $sid_005$ - $sid_005$ - $sid_004$ - $sid_003$ - $sid_002$ - $sid_002$ - $sid_001$ - $sid_001$ ... iterate('tweedledum', 'i1', 1, 2, box.index.GE) --- - - $sid_001$ - $sid_001$ - $sid_002$ - $sid_002$ - $sid_003$ - $sid_004$ - $sid_005$ - $sid_005$ - $sid_005$ - $sid_006$ ... iterate('tweedledum', 'i1', 1, 2, box.index.GT) --- - - $sid_001$ - $sid_001$ - $sid_002$ - $sid_002$ - $sid_003$ - $sid_004$ - $sid_005$ - $sid_005$ - $sid_005$ - $sid_006$ ... iterate('tweedledum', 'i1', 1, 2, box.index.LE) --- - - $sid_006$ - $sid_005$ - $sid_005$ - $sid_005$ - $sid_004$ - $sid_003$ - $sid_002$ - $sid_002$ - $sid_001$ - $sid_001$ ... iterate('tweedledum', 'i1', 1, 2, box.index.LT) --- - - $sid_006$ - $sid_005$ - $sid_005$ - $sid_005$ - $sid_004$ - $sid_003$ - $sid_002$ - $sid_002$ - $sid_001$ - $sid_001$ ... iterate('tweedledum', 'i1', 1, 2, box.index.EQ, 'sid_005') --- - - $sid_005$ - $sid_005$ - $sid_005$ ... iterate('tweedledum', 'i1', 1, 2, box.index.REQ, 'sid_005') --- - - $sid_005$ - $sid_005$ - $sid_005$ ... iterate('tweedledum', 'i1', 1, 2, box.index.GE, 'sid_005') --- - - $sid_005$ - $sid_005$ - $sid_005$ - $sid_006$ ... iterate('tweedledum', 'i1', 1, 2, box.index.GT, 'sid_005') --- - - $sid_006$ ... iterate('tweedledum', 'i1', 1, 2, box.index.GE, 'sid_999') --- - [] ... iterate('tweedledum', 'i1', 1, 2, box.index.GT, 'sid_999') --- - [] ... iterate('tweedledum', 'i1', 1, 2, box.index.LE, 'sid_005') --- - - $sid_005$ - $sid_005$ - $sid_005$ - $sid_004$ - $sid_003$ - $sid_002$ - $sid_002$ - $sid_001$ - $sid_001$ ... iterate('tweedledum', 'i1', 1, 2, box.index.LT, 'sid_005') --- - - $sid_004$ - $sid_003$ - $sid_002$ - $sid_002$ - $sid_001$ - $sid_001$ ... iterate('tweedledum', 'i1', 1, 2, box.index.LE, 'sid_000') --- - [] ... iterate('tweedledum', 'i1', 1, 2, box.index.LT, 'sid_000') --- - [] ... ------------------------------------------------------------------------------- -- Iterator: tree multi-part unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i2', 1, 3, box.index.ALL) --- - - $sid_001$tid_997$ - $sid_001$tid_998$ - $sid_002$tid_996$ - $sid_002$tid_997$ - $sid_003$tid_996$ - $sid_004$tid_996$ - $sid_005$tid_994$ - $sid_005$tid_995$ - $sid_005$tid_996$ - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.EQ) --- - - $sid_001$tid_997$ - $sid_001$tid_998$ - $sid_002$tid_996$ - $sid_002$tid_997$ - $sid_003$tid_996$ - $sid_004$tid_996$ - $sid_005$tid_994$ - $sid_005$tid_995$ - $sid_005$tid_996$ - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.REQ) --- - - $sid_006$tid_996$ - $sid_005$tid_996$ - $sid_005$tid_995$ - $sid_005$tid_994$ - $sid_004$tid_996$ - $sid_003$tid_996$ - $sid_002$tid_997$ - $sid_002$tid_996$ - $sid_001$tid_998$ - $sid_001$tid_997$ ... iterate('tweedledum', 'i2', 1, 3, box.index.GE) --- - - $sid_001$tid_997$ - $sid_001$tid_998$ - $sid_002$tid_996$ - $sid_002$tid_997$ - $sid_003$tid_996$ - $sid_004$tid_996$ - $sid_005$tid_994$ - $sid_005$tid_995$ - $sid_005$tid_996$ - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.GT) --- - - $sid_001$tid_997$ - $sid_001$tid_998$ - $sid_002$tid_996$ - $sid_002$tid_997$ - $sid_003$tid_996$ - $sid_004$tid_996$ - $sid_005$tid_994$ - $sid_005$tid_995$ - $sid_005$tid_996$ - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.LE) --- - - $sid_006$tid_996$ - $sid_005$tid_996$ - $sid_005$tid_995$ - $sid_005$tid_994$ - $sid_004$tid_996$ - $sid_003$tid_996$ - $sid_002$tid_997$ - $sid_002$tid_996$ - $sid_001$tid_998$ - $sid_001$tid_997$ ... iterate('tweedledum', 'i2', 1, 3, box.index.LT) --- - - $sid_006$tid_996$ - $sid_005$tid_996$ - $sid_005$tid_995$ - $sid_005$tid_994$ - $sid_004$tid_996$ - $sid_003$tid_996$ - $sid_002$tid_997$ - $sid_002$tid_996$ - $sid_001$tid_998$ - $sid_001$tid_997$ ... iterate('tweedledum', 'i2', 1, 3, box.index.EQ, 'sid_005') --- - - $sid_005$tid_994$ - $sid_005$tid_995$ - $sid_005$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.EQ, 'sid_005', 'tid_995') --- - - $sid_005$tid_995$ ... iterate('tweedledum', 'i2', 1, 3, box.index.EQ, 'sid_005', 'tid_999') --- - [] ... iterate('tweedledum', 'i2', 1, 3, box.index.REQ, 'sid_005') --- - - $sid_005$tid_996$ - $sid_005$tid_995$ - $sid_005$tid_994$ ... iterate('tweedledum', 'i2', 1, 3, box.index.REQ, 'sid_005', 'tid_995') --- - - $sid_005$tid_995$ ... iterate('tweedledum', 'i2', 1, 3, box.index.REQ, 'sid_005', 'tid_999') --- - [] ... iterate('tweedledum', 'i2', 1, 3, box.index.GE, 'sid_005') --- - - $sid_005$tid_994$ - $sid_005$tid_995$ - $sid_005$tid_996$ - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.GT, 'sid_005') --- - - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.GE, 'sid_005', 'tid_995') --- - - $sid_005$tid_995$ - $sid_005$tid_996$ - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.GT, 'sid_005', 'tid_995') --- - - $sid_005$tid_996$ - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.GE, 'sid_005', 'tid_999') --- - - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.GT, 'sid_005', 'tid_999') --- - - $sid_006$tid_996$ ... iterate('tweedledum', 'i2', 1, 3, box.index.GE, 'sid_999') --- - [] ... iterate('tweedledum', 'i2', 1, 3, box.index.GT, 'sid_999') --- - [] ... iterate('tweedledum', 'i2', 1, 3, box.index.LE, 'sid_005') --- - - $sid_005$tid_996$ - $sid_005$tid_995$ - $sid_005$tid_994$ - $sid_004$tid_996$ - $sid_003$tid_996$ - $sid_002$tid_997$ - $sid_002$tid_996$ - $sid_001$tid_998$ - $sid_001$tid_997$ ... iterate('tweedledum', 'i2', 1, 3, box.index.LT, 'sid_005') --- - - $sid_004$tid_996$ - $sid_003$tid_996$ - $sid_002$tid_997$ - $sid_002$tid_996$ - $sid_001$tid_998$ - $sid_001$tid_997$ ... iterate('tweedledum', 'i2', 1, 3, box.index.LE, 'sid_005', 'tid_997') --- - - $sid_005$tid_996$ - $sid_005$tid_995$ - $sid_005$tid_994$ - $sid_004$tid_996$ - $sid_003$tid_996$ - $sid_002$tid_997$ - $sid_002$tid_996$ - $sid_001$tid_998$ - $sid_001$tid_997$ ... iterate('tweedledum', 'i2', 1, 3, box.index.LT, 'sid_005', 'tid_997') --- - - $sid_005$tid_996$ - $sid_005$tid_995$ - $sid_005$tid_994$ - $sid_004$tid_996$ - $sid_003$tid_996$ - $sid_002$tid_997$ - $sid_002$tid_996$ - $sid_001$tid_998$ - $sid_001$tid_997$ ... iterate('tweedledum', 'i2', 1, 3, box.index.LE, 'sid_005', 'tid_000') --- - - $sid_004$tid_996$ - $sid_003$tid_996$ - $sid_002$tid_997$ - $sid_002$tid_996$ - $sid_001$tid_998$ - $sid_001$tid_997$ ... iterate('tweedledum', 'i2', 1, 3, box.index.LT, 'sid_005', 'tid_000') --- - - $sid_004$tid_996$ - $sid_003$tid_996$ - $sid_002$tid_997$ - $sid_002$tid_996$ - $sid_001$tid_998$ - $sid_001$tid_997$ ... iterate('tweedledum', 'i2', 1, 3, box.index.LE, 'sid_000') --- - [] ... iterate('tweedledum', 'i2', 1, 3, box.index.LT, 'sid_000') --- - [] ... ------------------------------------------------------------------------------- -- Iterator: tree multi-part non-unique ------------------------------------------------------------------------------- iterate('tweedledum', 'i3', 2, 4, box.index.ALL) --- - - $tid_994$a$ - $tid_995$a$ - $tid_996$a$ - $tid_996$a$ - $tid_996$b$ - $tid_996$b$ - $tid_996$c$ - $tid_997$a$ - $tid_997$b$ - $tid_998$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.EQ) --- - - $tid_994$a$ - $tid_995$a$ - $tid_996$a$ - $tid_996$a$ - $tid_996$b$ - $tid_996$b$ - $tid_996$c$ - $tid_997$a$ - $tid_997$b$ - $tid_998$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.REQ) --- - - $tid_998$a$ - $tid_997$b$ - $tid_997$a$ - $tid_996$c$ - $tid_996$b$ - $tid_996$b$ - $tid_996$a$ - $tid_996$a$ - $tid_995$a$ - $tid_994$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.GE) --- - - $tid_994$a$ - $tid_995$a$ - $tid_996$a$ - $tid_996$a$ - $tid_996$b$ - $tid_996$b$ - $tid_996$c$ - $tid_997$a$ - $tid_997$b$ - $tid_998$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.GT) --- - - $tid_994$a$ - $tid_995$a$ - $tid_996$a$ - $tid_996$a$ - $tid_996$b$ - $tid_996$b$ - $tid_996$c$ - $tid_997$a$ - $tid_997$b$ - $tid_998$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.LE) --- - - $tid_998$a$ - $tid_997$b$ - $tid_997$a$ - $tid_996$c$ - $tid_996$b$ - $tid_996$b$ - $tid_996$a$ - $tid_996$a$ - $tid_995$a$ - $tid_994$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.LT) --- - - $tid_998$a$ - $tid_997$b$ - $tid_997$a$ - $tid_996$c$ - $tid_996$b$ - $tid_996$b$ - $tid_996$a$ - $tid_996$a$ - $tid_995$a$ - $tid_994$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.EQ, 'tid_996') --- - - $tid_996$a$ - $tid_996$a$ - $tid_996$b$ - $tid_996$b$ - $tid_996$c$ ... iterate('tweedledum', 'i3', 2, 4, box.index.EQ, 'tid_996', 'a') --- - - $tid_996$a$ - $tid_996$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.EQ, 'tid_996', 'z') --- - [] ... iterate('tweedledum', 'i3', 2, 4, box.index.REQ, 'tid_996') --- - - $tid_996$c$ - $tid_996$b$ - $tid_996$b$ - $tid_996$a$ - $tid_996$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.REQ, 'tid_996', 'a') --- - - $tid_996$a$ - $tid_996$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.REQ, 'tid_996', '0') --- - [] ... iterate('tweedledum', 'i3', 2, 4, box.index.GE, 'tid_997') --- - - $tid_997$a$ - $tid_997$b$ - $tid_998$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.GT, 'tid_997') --- - - $tid_998$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.GE, 'tid_998') --- - - $tid_998$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.GT, 'tid_998') --- - [] ... iterate('tweedledum', 'i3', 2, 4, box.index.LE, 'tid_997') --- - - $tid_997$b$ - $tid_997$a$ - $tid_996$c$ - $tid_996$b$ - $tid_996$b$ - $tid_996$a$ - $tid_996$a$ - $tid_995$a$ - $tid_994$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.LT, 'tid_997') --- - - $tid_996$c$ - $tid_996$b$ - $tid_996$b$ - $tid_996$a$ - $tid_996$a$ - $tid_995$a$ - $tid_994$a$ ... iterate('tweedledum', 'i3', 2, 4, box.index.LE, 'tid_000') --- - [] ... iterate('tweedledum', 'i3', 2, 4, box.index.LT, 'tid_000') --- - [] ... iterate('tweedledum', 'i3', 2, 4, box.index.LT, 'tid_996', 'to', 'many', 'keys') --- - error: Invalid key part count (expected [0..2], got 4) ... ------------------------------------------------------------------------------- -- Iterator: various ------------------------------------------------------------------------------- space.index['primary']:pairs({}, {iterator = 666 }) --- - error: Illegal parameters, Invalid iterator type ... -- Test cases for #123: box.index.count does not check arguments properly status, msg = pcall(function() space.index['primary']:pairs(function() end, { iterator = box.index.EQ }) end) --- ... msg:match('function') --- - function ... -- Check that iterators successfully invalidated when index deleted gen, param, state = space.index['i1']:pairs(nil, { iterator = box.index.GE }) --- ... index_space = box.space[box.schema.INDEX_ID] --- ... _ = index_space:delete{space.id, space.index['i1'].id} --- ... type(_) --- - cdata ... _, value = gen(param, state) --- ... value --- - null ... space:drop() --- ... -- gh-1801 space:pairs() don't pass arguments to index:pairs() space = box.schema.space.create('test') --- ... pk = space:create_index('primary') --- ... space:replace({1}) --- - [1] ... space:replace({2}) --- - [2] ... space:replace({3}) --- - [3] ... space:replace({4}) --- - [4] ... space:pairs(2, { iterator = 'GE' }):totable() --- - - [2] - [3] - [4] ... space:drop() --- ... inspector:cmd("clear filter") --- - true ... -- -- gh-1875 Add support for index:pairs(key, iterator-type) syntax -- space = box.schema.space.create('test', {engine=engine}) --- ... pk = space:create_index('pk') --- ... space:auto_increment{1} --- - [1, 1] ... space:auto_increment{2} --- - [2, 2] ... space:auto_increment{3} --- - [3, 3] ... space:auto_increment{4} --- - [4, 4] ... space:auto_increment{5} --- - [5, 5] ... -- -- test pairs() -- space:pairs(3, 'GE'):totable() --- - - [3, 3] - [4, 4] - [5, 5] ... pk:pairs(3, 'GE'):totable() --- - - [3, 3] - [4, 4] - [5, 5] ... space:pairs(3, {iterator = 'GE' }):totable() --- - - [3, 3] - [4, 4] - [5, 5] ... pk:pairs(3, {iterator = 'GE' }):totable() --- - - [3, 3] - [4, 4] - [5, 5] ... space:pairs(3, 'EQ'):totable() --- - - [3, 3] ... pk:pairs(3, 'EQ'):totable() --- - - [3, 3] ... space:pairs(3, {iterator = 'EQ' }):totable() --- - - [3, 3] ... pk:pairs(3, {iterator = 'EQ' }):totable() --- - - [3, 3] ... space:pairs(3, 'GT'):totable() --- - - [4, 4] - [5, 5] ... pk:pairs(3, 'GT'):totable() --- - - [4, 4] - [5, 5] ... space:pairs(3, {iterator = 'GT' }):totable() --- - - [4, 4] - [5, 5] ... pk:pairs(3, {iterator = 'GT' }):totable() --- - - [4, 4] - [5, 5] ... -- -- test select() -- pk:select({3}, 'LE') --- - - [3, 3] - [2, 2] - [1, 1] ... space:select({3}, 'LE') --- - - [3, 3] - [2, 2] - [1, 1] ... -- -- test count() -- pk:count({3}, 'GT') --- - 2 ... space:count({3}, 'GT') --- - 2 ... space:drop() --- ... -- vinyl: broken rollback to savepoint -- https://github.com/tarantool/tarantool/issues/2589 s = box.schema.create_space('s', { engine = engine}) --- ... i1 = s:create_index('i1', { type = 'tree', parts = {1,'unsigned'}, unique = true }) --- ... i2 = s:create_index('i2', { type = 'tree', parts = {2,'unsigned'}, unique = true }) --- ... _ = s:replace{2, 2} --- ... box.begin() --- ... _ = s:replace{1, 1} --- ... _ = pcall(s.upsert, s, {1, 1}, {{"+", 2, 1}}) -- failed in unique secondary --- ... box.commit() --- ... s:select{} --- - - [1, 1] - [2, 2] ... s:drop{} --- ... -- implement lazy iterator positioning s = box.schema.space.create('test' ,{engine=engine}) --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for i = 1,3 do for j = 1,3 do s:replace{i, j} end end --- ... itr1,itr2,itr3 = s:pairs{2} --- ... _ = s:replace{1, 4} --- ... r = {} --- ... for k,v in itr1,itr2,itr3 do table.insert(r, v) end --- ... r --- - - [2, 1] - [2, 2] - [2, 3] ... itr1,itr2,itr3 = s:pairs({2}, {iterator = 'GE'}) --- ... _ = s:replace{1, 5} --- ... r = {} --- ... for k,v in itr1,itr2,itr3 do table.insert(r, v) end --- ... r --- - - [2, 1] - [2, 2] - [2, 3] - [3, 1] - [3, 2] - [3, 3] ... itr1,itr2,itr3 = s:pairs({2}, {iterator = 'REQ'}) --- ... s:replace{2, 4} --- - [2, 4] ... r = {} --- ... for k,v in itr1,itr2,itr3 do table.insert(r, v) end --- ... r --- - - [2, 4] - [2, 3] - [2, 2] - [2, 1] ... r = nil --- ... s:drop() --- ... -- make tree iterators stable -- https://github.com/tarantool/tarantool/issues/1796 s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... for i = 1,10 do s:replace{i} end --- ... r = {} --- ... for k,v in s:pairs{} do table.insert(r, v[1]) s:delete(v[1]) end --- ... r --- - - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 ... s:select{} --- - [] ... for i = 1,10 do s:replace{i} end --- ... r = {} --- ... for k,v in s:pairs({}, {iterator = 'REQ'}) do table.insert(r, v[1]) s:delete(v[1]) end --- ... r --- - - 10 - 9 - 8 - 7 - 6 - 5 - 4 - 3 - 2 - 1 ... s:select{} --- - [] ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for i = 1,3 do for j = 1,3 do s:replace{i, j} end end --- ... r = {} --- ... for k,v in s:pairs{2} do table.insert(r, v) s:delete{v[1], v[2]} end --- ... r --- - - [2, 1] - [2, 2] - [2, 3] ... s:select{} --- - - [1, 1] - [1, 2] - [1, 3] - [3, 1] - [3, 2] - [3, 3] ... for i = 1,3 do for j = 1,3 do s:replace{i, j} end end --- ... r = {} --- ... for k,v in s:pairs({3}, {iterator = 'REQ'}) do table.insert(r, v) s:delete{v[1], v[2]} end --- ... r --- - - [3, 3] - [3, 2] - [3, 1] ... s:select{} --- - - [1, 1] - [1, 2] - [1, 3] - [2, 1] - [2, 2] - [2, 3] ... r = nil --- ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} --- ... gen,param,state = i:pairs({25}) --- ... s:replace{25} --- - [25] ... state, value = gen(param,state) --- ... value --- - [25] ... state, value = gen(param,state) --- ... value --- - null ... gen,param,state = i:pairs({35}) --- ... state, value = gen(param,state) --- ... value --- - null ... s:replace{35} --- - [35] ... state, value = gen(param,state) --- - error: 'builtin/box/schema.lua:983: usage: next(param, state)' ... value --- - null ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} --- ... gen,param,state = i:pairs({30}, {iterator = 'GE'}) --- ... state, value = gen(param, state) --- ... value --- - [30] ... s:replace{0} --- - [0] ... state, value = gen(param, state) --- ... value --- - [40] ... s:replace{42} --- - [42] ... state, value = gen(param, state) --- ... value --- - [42] ... s:replace{80} --- - [80] ... state, value = gen(param, state) --- ... value --- - [50] ... s:replace{15} --- - [15] ... state, value = gen(param, state) --- ... value --- - [60] ... state, value = gen(param, state) --- ... value --- - [80] ... state, value = gen(param, state) --- ... state --- - null ... value --- - null ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} --- ... gen,param,state = i:pairs({40}, {iterator = 'LE'}) --- ... state, value = gen(param, state) --- ... value --- - [40] ... s:replace{0} --- - [0] ... state, value = gen(param, state) --- ... value --- - [30] ... s:replace{15} --- - [15] ... state, value = gen(param, state) --- ... value --- - [20] ... s:replace{42} --- - [42] ... state, value = gen(param, state) --- ... value --- - [15] ... s:replace{32} --- - [32] ... state, value = gen(param, state) --- ... value --- - [10] ... s:replace{80} --- - [80] ... state, value = gen(param, state) --- ... value --- - [0] ... state, value = gen(param, state) --- ... state --- - null ... value --- - null ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} --- ... gen,param,state = i:pairs({28}, {iterator = 'GE'}) --- ... s:replace{0} --- - [0] ... state, value = gen(param, state) --- ... value --- - [30] ... s:replace{15} --- - [15] ... state, value = gen(param, state) --- ... value --- - [40] ... s:replace{42} --- - [42] ... state, value = gen(param, state) --- ... value --- - [42] ... s:replace{32} --- - [32] ... state, value = gen(param, state) --- ... value --- - [50] ... s:replace{80} --- - [80] ... state, value = gen(param, state) --- ... value --- - [60] ... state, value = gen(param, state) --- ... value --- - [80] ... gen(param, state) --- - null ... -- test iterator dummy function, invoked when it's out of bounds gen(param, state) --- - null ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} --- ... gen,param,state = i:pairs({42}, {iterator = 'LE'}) --- ... s:replace{0} --- - [0] ... state, value = gen(param, state) --- ... value --- - [40] ... s:replace{42} --- - [42] ... state, value = gen(param, state) --- ... value --- - [30] ... s:replace{15} --- - [15] ... state, value = gen(param, state) --- ... value --- - [20] ... s:replace{32} --- - [32] ... state, value = gen(param, state) --- ... value --- - [15] ... s:replace{80} --- - [80] ... state, value = gen(param, state) --- ... value --- - [10] ... state, value = gen(param, state) --- ... value --- - [0] ... gen(param, state) --- - null ... -- test iterator dummy function, invoked when it's out of bounds gen(param, state) --- - null ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} --- ... gen,param,state = i:pairs({20}, {iterator = 'GT'}) --- ... state, value = gen(param, state) --- ... value --- - [30] ... s:replace{0} --- - [0] ... state, value = gen(param, state) --- ... value --- - [40] ... s:replace{42} --- - [42] ... state, value = gen(param, state) --- ... value --- - [42] ... s:replace{80} --- - [80] ... state, value = gen(param, state) --- ... value --- - [50] ... s:replace{15} --- - [15] ... state, value = gen(param, state) --- ... value --- - [60] ... state, value = gen(param, state) --- ... value --- - [80] ... gen(param, state) --- - null ... -- test iterator dummy function, invoked when it's out of bounds gen(param, state) --- - null ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} --- ... gen,param,state = i:pairs({50}, {iterator = 'LT'}) --- ... state, value = gen(param, state) --- ... value --- - [40] ... s:replace{0} --- - [0] ... state, value = gen(param, state) --- ... value --- - [30] ... s:replace{15} --- - [15] ... state, value = gen(param, state) --- ... value --- - [20] ... s:replace{42} --- - [42] ... state, value = gen(param, state) --- ... value --- - [15] ... s:replace{32} --- - [32] ... state, value = gen(param, state) --- ... value --- - [10] ... s:replace{80} --- - [80] ... state, value = gen(param, state) --- ... value --- - [0] ... gen(param, state) --- - null ... -- test iterator dummy function, invoked when it's out of bounds gen(param, state) --- - null ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} --- ... gen,param,state = i:pairs({28}, {iterator = 'GT'}) --- ... s:replace{0} --- - [0] ... state, value = gen(param, state) --- ... value --- - [30] ... s:replace{15} --- - [15] ... state, value = gen(param, state) --- ... value --- - [40] ... s:replace{42} --- - [42] ... state, value = gen(param, state) --- ... value --- - [42] ... s:replace{32} --- - [32] ... state, value = gen(param, state) --- ... value --- - [50] ... s:replace{80} --- - [80] ... state, value = gen(param, state) --- ... value --- - [60] ... state, value = gen(param, state) --- ... value --- - [80] ... gen(param, state) --- - null ... -- test iterator dummy function, invoked when it's out of bounds gen(param, state) --- - null ... s:drop() --- ... s = box.schema.space.create('test') --- ... i = s:create_index('i', { type = 'tree', parts = {1, 'unsigned'} }) --- ... s:replace{10} s:replace{20} s:replace{30} s:replace{40} s:replace{50} s:replace{60} --- ... gen,param,state = i:pairs({42}, {iterator = 'LT'}) --- ... s:replace{0} --- - [0] ... state, value = gen(param, state) --- ... value --- - [40] ... s:replace{42} --- - [42] ... state, value = gen(param, state) --- ... value --- - [30] ... s:replace{15} --- - [15] ... state, value = gen(param, state) --- ... value --- - [20] ... s:replace{32} --- - [32] ... state, value = gen(param, state) --- ... value --- - [15] ... s:replace{80} --- - [80] ... state, value = gen(param, state) --- ... value --- - [10] ... state, value = gen(param, state) --- ... value --- - [0] ... gen(param, state) --- - null ... -- test iterator dummy function, invoked when it's out of bounds gen(param, state) --- - null ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/misc.test.lua0000664000000000000000000000216613306560010021045 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') -- https://github.com/tarantool/tarantool/issues/1109 -- Update via a secondary key breaks recovery s = box.schema.create_space('test', { engine = engine }) i1 = s:create_index('test1', {parts = {1, 'unsigned'}}) i2 = s:create_index('test2', {parts = {2, 'unsigned'}}) s:insert{1, 2, 3} s:insert{5, 8, 13} i2:update({2}, {{'+', 3, 3}}) tmp = i2:delete{8} inspector:cmd("restart server default") test_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') box.space.test:select{} box.space.test:drop() -- https://github.com/tarantool/tarantool/issues/1435 -- Truncate does not work _ = box.schema.space.create('t5',{engine=engine}) _ = box.space.t5:create_index('primary') box.space.t5:insert{44} box.space.t5:truncate() box.space.t5:insert{55} box.space.t5:drop() -- https://github.com/tarantool/tarantool/issues/2257 -- crash somewhere in bsize s = box.schema.space.create('test',{engine=engine}) _ = s:create_index('primary') s:replace{1} box.begin() _ = s:delete{1} box.rollback() _ = s:delete{1} s:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/snapshot.result0000664000000000000000000000540113306560010021523 0ustar rootroot-- write data recover from latest snapshot env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default') engine = test_run:get_cfg('engine') --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... for key = 1, 51 do space:insert({key}) end --- ... box.snapshot() --- - ok ... test_run:cmd('restart server default') space = box.space['test'] --- ... index = space.index['primary'] --- ... index:select({}, {iterator = box.index.ALL}) --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] ... for key = 52, 91 do space:insert({key}) end --- ... box.snapshot() --- - ok ... test_run:cmd('restart server default') space = box.space['test'] --- ... index = space.index['primary'] --- ... index:select({}, {iterator = box.index.ALL}) --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] ... box.space.test:drop() --- ... -- https://github.com/tarantool/tarantool/issues/1899 engine = test_run:get_cfg('engine') --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { parts = {1, 'unsigned'} } ) --- ... index2 = space:create_index('secondary', { parts = {2, 'unsigned'} } ) --- ... space:insert{1, 11, 21} --- - [1, 11, 21] ... space:insert{20, 10, 0} --- - [20, 10, 0] ... box.snapshot() --- - ok ... test_run:cmd('restart server default') box.space.test:select{} --- - - [1, 11, 21] - [20, 10, 0] ... box.space.test.index.primary:select{} --- - - [1, 11, 21] - [20, 10, 0] ... box.space.test.index.secondary:select{} --- - - [20, 10, 0] - [1, 11, 21] ... box.space.test:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/replica_join.test.lua0000664000000000000000000001116513306565107022563 0ustar rootrootenv = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') index = test_run:get_cfg('index') box.schema.user.grant('guest', 'read,write,execute', 'universe') box.schema.user.grant('guest', 'replication') space = box.schema.space.create('test', { id = 99999, engine = engine }) _ = space:create_index('primary', { type = index}) _ = space:create_index('secondary', { type = index, unique = false, parts = {2, 'unsigned'}}) space2 = box.schema.space.create('test2', { id = 99998, engine = engine}) _ = space2:create_index('primary', { parts = {1, 'unsigned', 2, 'string'}}) space3 = box.schema.space.create('test3', { id = 99997, engine = engine}) _ = space3:create_index('primary', { parts = {{1, 'string', collation = 'unicode_ci'}}}) box.snapshot() -- replica join test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") test_run:cmd("start server replica") test_run:wait_lsn('replica', 'default') test_run:cmd('switch replica') box.space.test:select() box.space.test.index.secondary:select() box.space.test2:select() box.space.test3:select() test_run:cmd('switch default') test_run:cmd("stop server replica") _ = test_run:cmd("cleanup server replica") -- new data for k = 1, 8 do box.space.test:insert{k, 17 - k} end for k = 16, 9, -1 do box.space.test:insert{k, 17 - k} end _ = box.space.test2:insert{1, 'test1', 1} _ = box.space.test2:upsert({1, 'test1', 10}, {{'=', 3, 10}}) _ = box.space.test2:upsert({2, 'test2', 20}, {{'=', 3, 20}}) _ = box.space.test2:insert{3, 'test3', 30} _ = box.space.test3:insert{'Ёж'} _ = box.space.test3:insert{'ель'} _ = box.space.test3:insert{'Юла'} _ = box.space.test3:insert{'Эль'} _ = box.space.test3:insert{'ёлка'} _ = box.space.test3:insert{'йогурт'} -- replica join test_run:cmd("deploy server replica") test_run:cmd("start server replica") test_run:wait_lsn('replica', 'default') test_run:cmd('switch replica') box.space.test:select() box.space.test.index.secondary:select() box.space.test2:select() box.space.test3:select() test_run:cmd('switch default') test_run:cmd("stop server replica") _ = test_run:cmd("cleanup server replica") -- add snapshot box.snapshot() -- replica join test_run:cmd("deploy server replica") test_run:cmd("start server replica") test_run:wait_lsn('replica', 'default') test_run:cmd('switch replica') box.space.test:select() box.space.test.index.secondary:select() box.space.test2:select() box.space.test3:select() test_run:cmd('switch default') test_run:cmd("stop server replica") _ = test_run:cmd("cleanup server replica") -- new data for k = 8, 1, -1 do box.space.test:update(k, {{'-', 2, 8}}) end for k = 9, 16 do box.space.test:delete(k) end _ = box.space.test2:upsert({1, 'test1', 11}, {{'+', 3, 1}}) _ = box.space.test2:update({2, 'test2'}, {{'+', 3, 2}}) _ = box.space.test2:delete{3, 'test3'} _ = box.space.test3:upsert({'ёж', 123}, {{'!', 2, 123}}) _ = box.space.test3:update('ЭЛЬ', {{'!', 2, 456}}) _ = box.space.test3:delete('ёлка') box.snapshot() -- replica join test_run:cmd("deploy server replica") test_run:cmd("start server replica") test_run:wait_lsn('replica', 'default') test_run:cmd('switch replica') box.space.test:select() box.space.test.index.secondary:select() box.space.test2:select() box.space.test3:select() test_run:cmd('switch default') test_run:cmd("stop server replica") _ = test_run:cmd("cleanup server replica") -- recreate space space:drop() space = box.schema.space.create('test', { id = 12345, engine = engine }) _ = space:create_index('primary', { type = index}) _ = space:insert{12345} -- truncate space space3:truncate() -- replica join test_run:cmd("deploy server replica") test_run:cmd("start server replica") test_run:wait_lsn('replica', 'default') test_run:cmd('switch replica') box.space.test.id box.space.test:select() box.space.test2:select() box.space.test3:select() test_run:cmd('switch default') test_run:cmd("stop server replica") _ = test_run:cmd("cleanup server replica") space:drop() space2:drop() space3:drop() box.snapshot() space = box.schema.space.create('test', { id = 99998, engine = engine }) index = space:create_index('primary', { type = test_run:get_cfg('index')}) for i = 0, 9 do space:insert({i, 'test' .. tostring(i)}) end test_run:cmd("deploy server replica") test_run:cmd("start server replica") test_run:wait_lsn('replica', 'default') test_run:cmd('restart server replica') test_run:cmd('switch replica') box.space.test:select() test_run:cmd('switch default') test_run:cmd("stop server replica") _ = test_run:cmd("cleanup server replica") space:drop() box.snapshot() box.schema.user.revoke('guest', 'replication') box.schema.user.revoke('guest', 'read,write,execute', 'universe') tarantool_1.9.1.26.g63eb81e3c/test/engine/truncate.result0000664000000000000000000001775513306565107021544 0ustar rootroottest_run = require('test_run').new() --- ... engine = test_run:get_cfg('engine') --- ... fiber = require('fiber') --- ... -- -- Check that space truncation is forbidden in a transaction. -- s = box.schema.create_space('test', {engine = engine}) --- ... _ = s:create_index('pk') --- ... _ = s:insert{123} --- ... box.begin() --- ... s:truncate() --- - error: DDL does not support multi-statement transactions ... box.commit() --- ... s:select() --- - - [123] ... s:drop() --- ... -- -- Check that space truncation works for spaces created via -- the internal API. -- _ = box.space._space:insert{512, 1, 'test', engine, 0, {temporary = false}, {}} --- ... _ = box.space._index:insert{512, 0, 'pk', 'tree', {unique = true}, {{0, 'unsigned'}}} --- ... _ = box.space.test:insert{123} --- ... box.space.test:select() --- - - [123] ... box.space.test:truncate() --- ... box.space.test:select() --- - [] ... box.space.test:drop() --- ... -- -- Check that a space cannot be dropped if it has a record -- in _truncate space. -- s = box.schema.create_space('test', {engine = engine}) --- ... s:truncate() --- ... _ = box.space._space:delete{s.id} -- error --- - error: 'Can''t drop space ''test'': the space has truncate record' ... _ = box.space._truncate:delete{s.id} --- ... _ = box.space._space:delete{s.id} -- ok --- ... -- -- Check that truncation of system spaces is not permitted. -- box.space._space:truncate() --- - error: Can't truncate a system space, space '_space' ... box.space._index:truncate() --- - error: Can't truncate a system space, space '_index' ... -- -- Truncate space with no indexes. -- s = box.schema.create_space('test', {engine = engine}) --- ... s:truncate() --- ... s:drop() --- ... -- -- Truncate empty space. -- s = box.schema.create_space('test', {engine = engine}) --- ... _ = s:create_index('pk') --- ... s:truncate() --- ... s:select() --- - [] ... s:drop() --- ... -- -- Truncate non-empty space. -- s = box.schema.create_space('test', {engine = engine}) --- ... _ = s:create_index('i1', {parts = {1, 'unsigned'}}) --- ... _ = s:create_index('i2', {parts = {2, 'unsigned'}}) --- ... _ = s:create_index('i3', {parts = {3, 'string'}}) --- ... _ = s:insert{1, 3, 'a'} --- ... _ = s:insert{2, 2, 'b'} --- ... _ = s:insert{3, 1, 'c'} --- ... s:truncate() --- ... s.index.i1:select() --- - [] ... s.index.i2:select() --- - [] ... s.index.i3:select() --- - [] ... _ = s:insert{10, 30, 'x'} --- ... _ = s:insert{20, 20, 'y'} --- ... _ = s:insert{30, 10, 'z'} --- ... s.index.i1:select() --- - - [10, 30, 'x'] - [20, 20, 'y'] - [30, 10, 'z'] ... s.index.i2:select() --- - - [30, 10, 'z'] - [20, 20, 'y'] - [10, 30, 'x'] ... s.index.i3:select() --- - - [10, 30, 'x'] - [20, 20, 'y'] - [30, 10, 'z'] ... s:drop() --- ... -- -- Check that space truncation is linearizable. -- -- Create a space with several indexes and start three fibers: -- 1st and 3rd update the space, 2nd truncates it. Then wait -- until all fibers are done. The space should contain data -- inserted by the 3rd fiber. -- -- Note, this is guaranteed to be true only if space updates -- don't yield, which is always true for memtx and is true -- for vinyl in case there's no data on disk, as in this case. -- s = box.schema.create_space('test', {engine = engine}) --- ... _ = s:create_index('i1', {parts = {1, 'unsigned'}}) --- ... _ = s:create_index('i2', {parts = {2, 'unsigned'}}) --- ... _ = s:create_index('i3', {parts = {3, 'string'}}) --- ... _ = s:insert{1, 1, 'a'} --- ... _ = s:insert{2, 2, 'b'} --- ... _ = s:insert{3, 3, 'c'} --- ... c = fiber.channel(3) --- ... test_run:cmd("setopt delimiter ';'") --- - true ... fiber.create(function() box.begin() s:replace{1, 10, 'aa'} s:replace{2, 20, 'bb'} s:replace{3, 30, 'cc'} box.commit() c:put(true) end) fiber.create(function() s:truncate() c:put(true) end) fiber.create(function() box.begin() s:replace{1, 100, 'aaa'} s:replace{2, 200, 'bbb'} s:replace{3, 300, 'ccc'} box.commit() c:put(true) end) test_run:cmd("setopt delimiter ''"); --- ... for i = 1, 3 do c:get() end --- ... s.index.i1:select() --- - - [1, 100, 'aaa'] - [2, 200, 'bbb'] - [3, 300, 'ccc'] ... s.index.i2:select() --- - - [1, 100, 'aaa'] - [2, 200, 'bbb'] - [3, 300, 'ccc'] ... s.index.i3:select() --- - - [1, 100, 'aaa'] - [2, 200, 'bbb'] - [3, 300, 'ccc'] ... s:drop() --- ... -- -- Calling space.truncate concurrently. -- s = box.schema.create_space('test', {engine = engine}) --- ... _ = s:create_index('pk') --- ... c = fiber.channel(5) --- ... for i = 1, 5 do fiber.create(function() s:truncate() c:put(true) end) end --- ... for i = 1, 5 do c:get() end --- ... s:drop() --- ... -- -- Check that space truncation is persistent. -- -- The test checks the following cases: -- - Create and truncate before snapshot -- - Create before snapshot, truncate after snapshot -- - Create and truncate after snapshot -- s1 = box.schema.create_space('test1', {engine = engine}) --- ... _ = s1:create_index('i1', {parts = {1, 'unsigned'}}) --- ... _ = s1:create_index('i2', {parts = {2, 'unsigned'}}) --- ... _ = s1:insert{1, 3} --- ... _ = s1:insert{2, 2} --- ... _ = s1:insert{3, 1} --- ... s1:truncate() --- ... _ = s1:insert{123, 321} --- ... s2 = box.schema.create_space('test2', {engine = engine}) --- ... _ = s2:create_index('i1', {parts = {1, 'unsigned'}}) --- ... _ = s2:create_index('i2', {parts = {2, 'unsigned'}}) --- ... _ = s2:insert{10, 30} --- ... _ = s2:insert{20, 20} --- ... _ = s2:insert{30, 10} --- ... box.snapshot() --- - ok ... _ = s1:insert{321, 123} --- ... s2:truncate() --- ... _ = s2:insert{456, 654} --- ... s3 = box.schema.create_space('test3', {engine = engine}) --- ... _ = s3:create_index('i1', {parts = {1, 'unsigned'}}) --- ... _ = s3:create_index('i2', {parts = {2, 'unsigned'}}) --- ... _ = s3:insert{100, 300} --- ... _ = s3:insert{200, 200} --- ... _ = s3:insert{300, 100} --- ... s3:truncate() --- ... _ = s3:insert{789, 987} --- ... -- Check that index drop, create, and alter called after space -- truncate do not break recovery (gh-2615) s4 = box.schema.create_space('test4', {engine = 'memtx'}) --- ... _ = s4:create_index('i1', {parts = {1, 'string'}}) --- ... _ = s4:create_index('i3', {parts = {3, 'string'}}) --- ... _ = s4:insert{'zzz', 111, 'yyy'} --- ... s4:truncate() --- ... s4.index.i3:drop() --- ... _ = s4:create_index('i2', {parts = {2, 'string'}}) --- ... s4.index.i1:alter({parts = {1, 'string', 2, 'string'}}) --- ... _ = s4:insert{'abc', 'cba'} --- ... test_run:cmd('restart server default') s1 = box.space.test1 --- ... s2 = box.space.test2 --- ... s3 = box.space.test3 --- ... s4 = box.space.test4 --- ... s1.index.i1:select() --- - - [123, 321] - [321, 123] ... s1.index.i2:select() --- - - [321, 123] - [123, 321] ... s2.index.i1:select() --- - - [456, 654] ... s2.index.i2:select() --- - - [456, 654] ... s3.index.i1:select() --- - - [789, 987] ... s3.index.i2:select() --- - - [789, 987] ... s4.index.i1:select() --- - - ['abc', 'cba'] ... s4.index.i2:select() --- - - ['abc', 'cba'] ... s1:drop() --- ... s2:drop() --- ... s3:drop() --- ... s4:drop() --- ... -- Truncate should fail in no write access for the space engine = test_run:get_cfg('engine') --- ... s = box.schema.create_space('access_truncate', {engine = engine}) --- ... _ = s:create_index('pk') --- ... s:insert({1, 2, 3}) --- - [1, 2, 3] ... s:insert({3, 2, 1}) --- - [3, 2, 1] ... box.schema.user.grant('guest', 'execute', 'universe') --- ... box.schema.user.grant('guest', 'read', 'space', 'access_truncate') --- ... con = require('net.box').connect(box.cfg.listen) --- ... con:eval([[box.space.access_truncate:truncate()]]) --- - error: Write access to space 'access_truncate' is denied for user 'guest' ... con.space.access_truncate:select() --- - - [1, 2, 3] - [3, 2, 1] ... box.schema.user.grant('guest', 'write', 'space', 'access_truncate') --- ... con:eval([[box.space.access_truncate:truncate()]]) --- ... con.space.access_truncate:select() --- - [] ... con:close() --- ... box.schema.user.revoke('guest', 'execute', 'universe') --- ... box.schema.user.revoke('guest', 'read,write', 'space', 'access_truncate') --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/indices_any_type.test.lua0000664000000000000000000001301413306560010023432 0ustar rootroot -- Test for unique indices -- Tests for TREE index type env = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') s0 = box.schema.space.create('my_space1', { engine = engine }) i0 = s0:create_index('my_space1_idx1', {type='TREE', parts={1, 'number'}, unique=true}) s0:insert({10}) s0:insert({11}) s0:insert({12}) s0:insert({13}) s0:select{} s0:insert({3}) s0:insert({4}) s0:insert({5}) s0:insert({6}) s0:select{} s0:insert({-5}) s0:insert({-6}) s0:insert({-7}) s0:insert({-8}) s0:select{} s0:insert({-10}) s0:insert({-11}) s0:insert({-12}) s0:insert({-13}) s0:select{} s0:insert({3.5}) s0:insert({4.5}) s0:insert({5.5}) s0:select{} s0:insert({-3.5}) s0:insert({-4.5}) s0:insert({-5.5}) s0:select{} s0:drop() s1 = box.schema.space.create('my_space2', { engine = engine }) i1 = s1:create_index('my_space2_idx1', {type='TREE', parts={1, 'scalar'}, unique=true}) s1:insert({10}) s1:insert({11}) s1:insert({12}) s1:insert({13}) s1:select{} s1:insert({3}) s1:insert({4}) s1:insert({5}) s1:insert({6}) s1:select{} s1:insert({'ffff'}) s1:insert({'gggg'}) s1:insert({'hhhh'}) s1:select{} s1:insert({'aaaa'}) s1:insert({'bbbb'}) s1:insert({'cccc'}) s1:select{} s1:insert({3.5}) s1:insert({4.5}) s1:insert({5.5}) s1:select{} s1:insert({-3.5}) s1:insert({-4.5}) s1:insert({-5.5}) s1:select{} s1:insert({true}) s1:insert({false}) s1:insert({1}) s1:insert({0}) s1:insert({'!!!!'}) s1:insert({'????'}) s1:select{} s1:drop() s2 = box.schema.space.create('my_space3', { engine = engine }) i2_1 = s2:create_index('my_space3_idx1', {type='TREE', parts={1, 'scalar', 2, 'integer', 3, 'number'}, unique=true}) s2:insert({10, 1, -1, 'z', true}) s2:insert({11, 2, 2, 'g', false}) s2:insert({12, 3, -3, 'e', -100.5}) s2:insert({13, 4, 4, 'h', 200}) s2:select{} s2:insert({3, 5, -5, 'w', 'strstr'}) s2:insert({4, 6, 6, 'q', ';;;;'}) s2:insert({5, 7, -7, 'c', '???'}) s2:insert({6, 8, 8, 'k', '!!!'}) s2:select{} s2:insert({'ffff', 9, -9, 'm', '123'}) s2:insert({'gggg', 10, 10, 'r', '456'}) s2:insert({'hhhh', 11, -11, 'i', 55555}) s2:insert({'hhhh', 11, -10, 'i', 55556}) s2:insert({'hhhh', 11, -12, 'i', 55554}) s2:select{} s2:insert({'aaaa', 12, 12, 'o', 333}) s2:insert({'bbbb', 13, -13, 'p', '123'}) s2:insert({'cccc', 14, 14, 'l', 123}) s2:select{} s2:insert({3.5, 15, -15, 'n', 500}) s2:insert({4.5, 16, 16, 'b', 'ghtgtg'}) s2:insert({5.5, 17, -17, 'v', '"""""'}) s2:select{} s2:insert({-3.5, 18, 18, 'x', '---'}) s2:insert({-4.5, 19, -19, 'a', 56.789}) s2:insert({-5.5, 20, 20, 'f', -138.4}) s2:select{} s2:insert({true, 21, -21, 'y', 50}) s2:insert({false, 22, 22, 's', 60}) s2:insert({'!!!!', 23, -23, 'u', 0}) s2:insert({'????', 24, 24, 'j', 70}) s2:select{} s2.index.my_space3_idx2:select{} s2:drop() -- Tests for NULL mp = require('msgpack') s4 = box.schema.space.create('my_space5', { engine = engine }) i4_1 = s4:create_index('my_space5_idx1', {type='TREE', parts={1, 'scalar', 2, 'integer', 3, 'number'}, unique=true}) s4:insert({mp.NULL, 1, 1, 1}) s4:insert({2, mp.NULL, 2, 2}) -- all nulls must fail s4:insert({3, 3, mp.NULL, 3}) s4:insert({4, 4, 4, mp.NULL}) s4:drop() -- Test for nonunique indices s5 = box.schema.space.create('my_space6', { engine = engine }) i5_1 = s5:create_index('my_space6_idx1', {type='TREE', parts={1, 'unsigned'}, unique=true}) i5_2 = s5:create_index('my_space6_idx2', {type='TREE', parts={2, 'scalar'}, unique=false}) test_run:cmd("setopt delimiter ';'"); function less(a, b) if type(a[2]) ~= type(b[2]) then return type(a[2]) < type(b[2]) end if type(a[2]) == 'boolean' then if a[2] == false and b[2] == true then return true end end if a[2] == b[2] then return a[1] < b[1] end return a[2] < b[2] end; test_run:cmd("setopt delimiter ''"); function sort(t) table.sort(t, less) return t end s5:insert({1, "123"}) s5:insert({2, "123"}) s5:insert({3, "123"}) s5:insert({4, 123}) s5:insert({5, 123}) s5:insert({6, true}) s5:insert({7, true}) s5:insert({8, mp.NULL}) -- must fail s5:insert({9, -40.5}) s5:insert({10, -39.5}) s5:insert({11, -38.5}) s5:insert({12, 100.5}) s5:select{} sort(i5_2:select({123})) sort(i5_2:select({"123"})) sort(i5_2:select({true})) sort(i5_2:select({false})) sort(i5_2:select({true})) sort(i5_2:select({-38.5})) sort(i5_2:select({-40}, {iterator = 'GE'})) s5:drop() -- gh-1897 Crash on index field type 'any' space = box.schema.space.create('test', {engine = engine}) pk = space:create_index('primary', { parts = {1, 'any'} }) -- space:insert({1}) -- must fail space:insert({2}) -- space:drop() -- gh-1701 allow NaN rnd = math.random(2147483648) ffi = require('ffi') ffi.cdef(string.format("union nan_%s { double d; uint64_t i; }", rnd)) nan_ffi = ffi.new(string.format('union nan_%s', rnd)) nan_ffi.i = 0x7ff4000000000000 sNaN = nan_ffi.d nan_ffi.i = 0x7ff8000000000000 qNaN = nan_ffi.d -- basic test space = box.schema.space.create('test', { engine = engine }) pk = space:create_index('primary', {parts = {1, 'number'}}) space:replace({sNaN, 'signaling NaN'}) space:replace({qNaN, 'quiet NaN'}) space:get{sNaN} space:get{qNaN} space:get{1/0} space:get{1/0 - 1/0} space:get{0/0} space:select{} space:truncate() -- test ordering of special values space:replace({1/0, '+inf'}) space:replace({sNaN, 'snan'}) space:replace({100}) space:replace({-1/0, '-inf'}) space:replace({50}) space:replace({qNaN, 'qnan'}) pk:get{100/0} pk:get{sNaN} pk:get{100} pk:get{-100/0} pk:get{50} pk:get{qNaN} pk:select({sNaN}, {iterator = 'GE'}) pk:select({1/0}, {iterator = 'LT'}) space:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/box.lua0000664000000000000000000000170513306560010017722 0ustar rootroot#!/usr/bin/env tarantool os = require('os') local vinyl = { threads = 3, range_size=1024*64, page_size=1024, } box.cfg{ listen = os.getenv("LISTEN"), memtx_memory = 107374182, pid_file = "tarantool.pid", rows_per_wal = 50, vinyl_read_threads = 2, vinyl_write_threads = 3, vinyl_range_size = 64 * 1024, vinyl_page_size = 1024, memtx_max_tuple_size = 1024 * 1024 * 100, vinyl_max_tuple_size = 1024 * 1024 * 100, } require('console').listen(os.getenv('ADMIN')) _to_exclude = { 'pid_file', 'log', 'vinyl_dir', 'memtx_dir', 'wal_dir', 'memtx_min_tuple_size', 'memtx_max_tuple_size' } _exclude = {} for _, f in pairs(_to_exclude) do _exclude[f] = 1 end function cfg_filter(data) local result = {} for field, val in pairs(data) do if _exclude[field] == nil then result[field] = val end end return result end tarantool_1.9.1.26.g63eb81e3c/test/engine/conflict.result0000664000000000000000000000007313306560010021465 0ustar rootrootdofile('conflict.lua') --- ... test_conflict() --- - 2 ... tarantool_1.9.1.26.g63eb81e3c/test/engine/lua.test.lua0000664000000000000000000000414613306560010020673 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') -- -- Lua select_reverse_range -- -- lua select_reverse_range() testing -- https://blueprints.launchpad.net/tarantool/+spec/backward-tree-index-iterator space = box.schema.space.create('tweedledum', { engine = engine }) tmp = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) tmp = space:create_index('range', { type = 'tree', parts = {2, 'unsigned', 1, 'unsigned'}, unique = true }) space:insert{0, 0} space:insert{1, 0} space:insert{2, 0} space:insert{3, 0} space:insert{4, 0} space:insert{5, 0} space:insert{6, 0} space:insert{7, 0} space:insert{8, 0} space:insert{9, 0} space.index['range']:select({}, { limit = 10, iterator = 'GE' }) space.index['range']:select({}, { limit = 10, iterator = 'LE' }) space.index['range']:select({}, { limit = 4, iterator = 'LE' }) space:drop() -- -- Tests for box.index iterators -- space = box.schema.space.create('tweedledum', { engine = engine }) tmp = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) tmp = space:create_index('i1', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true }) pid = 1 tid = 999 inspector:cmd("setopt delimiter ';'") for sid = 1, 2 do for i = 1, 3 do space:insert{'pid_'..pid, 'sid_'..sid, 'tid_'..tid} pid = pid + 1 tid = tid - 1 end end; inspector:cmd("setopt delimiter ''"); index = space.index['i1'] t = {} for state, v in index:pairs('sid_1', { iterator = 'GE' }) do table.insert(t, v) end t t = {} for state, v in index:pairs('sid_2', { iterator = 'LE' }) do table.insert(t, v) end t t = {} for state, v in index:pairs('sid_1', { iterator = 'EQ' }) do table.insert(t, v) end t t = {} for state, v in index:pairs('sid_1', { iterator = 'REQ' }) do table.insert(t, v) end t t = {} for state, v in index:pairs('sid_2', { iterator = 'EQ' }) do table.insert(t, v) end t t = {} for state, v in index:pairs('sid_2', { iterator = 'REQ' }) do table.insert(t, v) end t t = {} index:pairs('sid_t', { iterator = 'wrong_iterator_type' }) index = nil space:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/engine.cfg0000664000000000000000000000014313306560010020350 0ustar rootroot{ "*": { "memtx": {"engine": "memtx"}, "vinyl": {"engine": "vinyl"} } } tarantool_1.9.1.26.g63eb81e3c/test/engine/update.result0000664000000000000000000002552613306565107021174 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... -- update (str) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:replace({tostring(key)}) end --- ... for key = 1, 100 do space:update({tostring(key)}, {{'=', 2, key}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1', 1] - ['2', 2] - ['3', 3] - ['4', 4] - ['5', 5] - ['6', 6] - ['7', 7] - ['8', 8] - ['9', 9] - ['10', 10] - ['11', 11] - ['12', 12] - ['13', 13] - ['14', 14] - ['15', 15] - ['16', 16] - ['17', 17] - ['18', 18] - ['19', 19] - ['20', 20] - ['21', 21] - ['22', 22] - ['23', 23] - ['24', 24] - ['25', 25] - ['26', 26] - ['27', 27] - ['28', 28] - ['29', 29] - ['30', 30] - ['31', 31] - ['32', 32] - ['33', 33] - ['34', 34] - ['35', 35] - ['36', 36] - ['37', 37] - ['38', 38] - ['39', 39] - ['40', 40] - ['41', 41] - ['42', 42] - ['43', 43] - ['44', 44] - ['45', 45] - ['46', 46] - ['47', 47] - ['48', 48] - ['49', 49] - ['50', 50] - ['51', 51] - ['52', 52] - ['53', 53] - ['54', 54] - ['55', 55] - ['56', 56] - ['57', 57] - ['58', 58] - ['59', 59] - ['60', 60] - ['61', 61] - ['62', 62] - ['63', 63] - ['64', 64] - ['65', 65] - ['66', 66] - ['67', 67] - ['68', 68] - ['69', 69] - ['70', 70] - ['71', 71] - ['72', 72] - ['73', 73] - ['74', 74] - ['75', 75] - ['76', 76] - ['77', 77] - ['78', 78] - ['79', 79] - ['80', 80] - ['81', 81] - ['82', 82] - ['83', 83] - ['84', 84] - ['85', 85] - ['86', 86] - ['87', 87] - ['88', 88] - ['89', 89] - ['90', 90] - ['91', 91] - ['92', 92] - ['93', 93] - ['94', 94] - ['95', 95] - ['96', 96] - ['97', 97] - ['98', 98] - ['99', 99] - ['100', 100] ... space:update({tostring(101)}, {{'=', 2, 101}}) --- ... space:get({tostring(101)}) --- ... space:drop() --- ... -- update (num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key}) end --- ... for key = 1, 100 do space:update({key}, {{'=', 2, key}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key})) end --- ... t --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] - [6, 6] - [7, 7] - [8, 8] - [9, 9] - [10, 10] - [11, 11] - [12, 12] - [13, 13] - [14, 14] - [15, 15] - [16, 16] - [17, 17] - [18, 18] - [19, 19] - [20, 20] - [21, 21] - [22, 22] - [23, 23] - [24, 24] - [25, 25] - [26, 26] - [27, 27] - [28, 28] - [29, 29] - [30, 30] - [31, 31] - [32, 32] - [33, 33] - [34, 34] - [35, 35] - [36, 36] - [37, 37] - [38, 38] - [39, 39] - [40, 40] - [41, 41] - [42, 42] - [43, 43] - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... space:update({101}, {{'=', 2, 101}}) --- ... space:get({101}) --- ... space:drop() --- ... -- update multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key, key}) end --- ... for key = 1, 100 do space:update({key, key}, {{'=', 3, key}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] - [4, 4, 4] - [5, 5, 5] - [6, 6, 6] - [7, 7, 7] - [8, 8, 8] - [9, 9, 9] - [10, 10, 10] - [11, 11, 11] - [12, 12, 12] - [13, 13, 13] - [14, 14, 14] - [15, 15, 15] - [16, 16, 16] - [17, 17, 17] - [18, 18, 18] - [19, 19, 19] - [20, 20, 20] - [21, 21, 21] - [22, 22, 22] - [23, 23, 23] - [24, 24, 24] - [25, 25, 25] - [26, 26, 26] - [27, 27, 27] - [28, 28, 28] - [29, 29, 29] - [30, 30, 30] - [31, 31, 31] - [32, 32, 32] - [33, 33, 33] - [34, 34, 34] - [35, 35, 35] - [36, 36, 36] - [37, 37, 37] - [38, 38, 38] - [39, 39, 39] - [40, 40, 40] - [41, 41, 41] - [42, 42, 42] - [43, 43, 43] - [44, 44, 44] - [45, 45, 45] - [46, 46, 46] - [47, 47, 47] - [48, 48, 48] - [49, 49, 49] - [50, 50, 50] - [51, 51, 51] - [52, 52, 52] - [53, 53, 53] - [54, 54, 54] - [55, 55, 55] - [56, 56, 56] - [57, 57, 57] - [58, 58, 58] - [59, 59, 59] - [60, 60, 60] - [61, 61, 61] - [62, 62, 62] - [63, 63, 63] - [64, 64, 64] - [65, 65, 65] - [66, 66, 66] - [67, 67, 67] - [68, 68, 68] - [69, 69, 69] - [70, 70, 70] - [71, 71, 71] - [72, 72, 72] - [73, 73, 73] - [74, 74, 74] - [75, 75, 75] - [76, 76, 76] - [77, 77, 77] - [78, 78, 78] - [79, 79, 79] - [80, 80, 80] - [81, 81, 81] - [82, 82, 82] - [83, 83, 83] - [84, 84, 84] - [85, 85, 85] - [86, 86, 86] - [87, 87, 87] - [88, 88, 88] - [89, 89, 89] - [90, 90, 90] - [91, 91, 91] - [92, 92, 92] - [93, 93, 93] - [94, 94, 94] - [95, 95, 95] - [96, 96, 96] - [97, 97, 97] - [98, 98, 98] - [99, 99, 99] - [100, 100, 100] ... space:update({101, 101}, {{'=', 3, 101}}) --- ... space:get({101, 101}) --- ... space:drop() --- ... -- update with box.tuple.new space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key, key}) end --- ... for key = 1, 100 do space:update(box.tuple.new{key, key}, box.tuple.new{{'=', 3, key}}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] - [4, 4, 4] - [5, 5, 5] - [6, 6, 6] - [7, 7, 7] - [8, 8, 8] - [9, 9, 9] - [10, 10, 10] - [11, 11, 11] - [12, 12, 12] - [13, 13, 13] - [14, 14, 14] - [15, 15, 15] - [16, 16, 16] - [17, 17, 17] - [18, 18, 18] - [19, 19, 19] - [20, 20, 20] - [21, 21, 21] - [22, 22, 22] - [23, 23, 23] - [24, 24, 24] - [25, 25, 25] - [26, 26, 26] - [27, 27, 27] - [28, 28, 28] - [29, 29, 29] - [30, 30, 30] - [31, 31, 31] - [32, 32, 32] - [33, 33, 33] - [34, 34, 34] - [35, 35, 35] - [36, 36, 36] - [37, 37, 37] - [38, 38, 38] - [39, 39, 39] - [40, 40, 40] - [41, 41, 41] - [42, 42, 42] - [43, 43, 43] - [44, 44, 44] - [45, 45, 45] - [46, 46, 46] - [47, 47, 47] - [48, 48, 48] - [49, 49, 49] - [50, 50, 50] - [51, 51, 51] - [52, 52, 52] - [53, 53, 53] - [54, 54, 54] - [55, 55, 55] - [56, 56, 56] - [57, 57, 57] - [58, 58, 58] - [59, 59, 59] - [60, 60, 60] - [61, 61, 61] - [62, 62, 62] - [63, 63, 63] - [64, 64, 64] - [65, 65, 65] - [66, 66, 66] - [67, 67, 67] - [68, 68, 68] - [69, 69, 69] - [70, 70, 70] - [71, 71, 71] - [72, 72, 72] - [73, 73, 73] - [74, 74, 74] - [75, 75, 75] - [76, 76, 76] - [77, 77, 77] - [78, 78, 78] - [79, 79, 79] - [80, 80, 80] - [81, 81, 81] - [82, 82, 82] - [83, 83, 83] - [84, 84, 84] - [85, 85, 85] - [86, 86, 86] - [87, 87, 87] - [88, 88, 88] - [89, 89, 89] - [90, 90, 90] - [91, 91, 91] - [92, 92, 92] - [93, 93, 93] - [94, 94, 94] - [95, 95, 95] - [96, 96, 96] - [97, 97, 97] - [98, 98, 98] - [99, 99, 99] - [100, 100, 100] ... space:update({101, 101}, {{'=', 3, 101}}) --- ... space:get({101, 101}) --- ... space:drop() --- ... -- update multiple indices space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'string'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'string'}, unique = false }) --- ... index3 = space:create_index('third', { type = 'tree', parts = {3, 'scalar', 2, 'string', 1, 'unsigned'}, unique = false }) --- ... space:insert({1, 'fwoen', 324}) --- - [1, 'fwoen', 324] ... space:insert({2, 'fwoen', 123}) --- - [2, 'fwoen', 123] ... space:insert({3, 'fwoen', 324}) --- - [3, 'fwoen', 324] ... space:insert({4, '21qn2', 213}) --- - [4, '21qn2', 213] ... space:insert({5, 'fgb', '231293'}) --- - [5, 'fgb', '231293'] ... space:insert({6, 'nrhjrt', -1231.234}) --- - [6, 'nrhjrt', -1231.234] ... index1:update({1}, {{'+', 3, 10}}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... index1:update({1, 'fwoen'}, {{'+', 3, 10}}) --- - [1, 'fwoen', 334] ... index1:update({0, 'fwoen'}, {{'=', 3, 5}}) --- ... index2:update({'fwoen'}, {'=', 3, 1000}) --- - error: Get() doesn't support partial keys and non-unique indexes ... index3:update({324, 'fwoen', 3}, {{'-', 3, 100}}) --- - error: Get() doesn't support partial keys and non-unique indexes ... space:drop() --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'} }) --- ... index3 = space:create_index('third', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) --- ... space:insert({1, 1, 1}) --- - [1, 1, 1] ... space:insert({2, 2, 2}) --- - [2, 2, 2] ... space:insert({3, 3, 3}) --- - [3, 3, 3] ... space:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... space:update({1}, {{'=', 2, 2}, {'=', 3, 3}}) --- - error: Duplicate key exists in unique index 'secondary' in space 'test' ... index1:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... index2:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... index3:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... space:drop() --- ... -- https://github.com/tarantool/tarantool/issues/1854 space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... space:insert({1, 1, 1}) --- - [1, 1, 1] ... space:insert({2, 2, 2}) --- - [2, 2, 2] ... space:insert({3, 3, 3}) --- - [3, 3, 3] ... space:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... space:update({2}, {}) --- - [2, 2, 2] ... space:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/tuple.test.lua0000664000000000000000000001563313306565107021262 0ustar rootroottest_run = require('test_run').new() engine = test_run:get_cfg('engine') test_run:cmd("push filter 'Failed to allocate [0-9]+' to 'Failed to allocate '") test_run:cmd("push filter '"..engine.."_max_tuple_size' to '_max_tuple_size'") -- https://github.com/tarantool/tarantool/issues/2667 -- Allow to insert tuples bigger than `max_tuple_size' s = box.schema.space.create('test', { engine = engine }) _ = s:create_index('primary') engine_max_tuple_size = engine ..'_max_tuple_size' engine_tuple_size = engine == 'memtx' and 16 or 32 box.cfg{[engine_max_tuple_size] = 1024 * 1024} -- check max_tuple_size limit max_tuple_size = box.cfg[engine_max_tuple_size] _ = s:replace({1, string.rep('x', max_tuple_size)}) -- check max_tuple_size dynamic configuration box.cfg { [engine_max_tuple_size] = 2 * max_tuple_size } _ = s:replace({1, string.rep('x', max_tuple_size)}) -- check tuple sie box.cfg { [engine_max_tuple_size] = engine_tuple_size + 2 } _ = s:replace({1}) -- check large tuples allocated on malloc box.cfg { [engine_max_tuple_size] = 32 * 1024 * 1024 } _ = s:replace({1, string.rep('x', 32 * 1024 * 1024 - engine_tuple_size - 8)}) -- decrease max_tuple_size limit box.cfg { [engine_max_tuple_size] = 1 * 1024 * 1024 } _ = s:replace({1, string.rep('x', 2 * 1024 * 1024 )}) _ = s:replace({1, string.rep('x', 1 * 1024 * 1024 - engine_tuple_size - 8)}) -- gh-2698 Tarantool crashed on 4M tuple max_item_size = 0 test_run:cmd("setopt delimiter ';'") for _, v in pairs(box.slab.stats()) do max_item_size = math.max(max_item_size, v.item_size) end; test_run:cmd("setopt delimiter ''"); box.cfg { [engine_max_tuple_size] = max_item_size + engine_tuple_size + 8 } _ = box.space.test:replace{1, 1, string.rep('a', max_item_size)} -- reset to original value box.cfg { [engine_max_tuple_size] = max_tuple_size } s:drop(); collectgarbage('collect') -- collect all large tuples box.snapshot() -- discard xlogs with large tuples test_run:cmd("clear filter") -- -- gh-1014: tuple field names. -- format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {name = 'field2', type = 'string'} format[3] = {name = 'field3', type = 'array'} format[4] = {name = 'field4', type = 'number'} format[5] = {name = 'field5', type = 'integer'} format[6] = {name = 'field6', type = 'scalar'} format[7] = {name = 'field7', type = 'map'} format[8] = {name = 'field8', type = 'any'} format[9] = {name = 'field9'} format[10] = {name = 'bsize'} format[11] = {name = 'totable'} format[12] = {name = 'longlonglonglonglonglongname'} s = box.schema.space.create('test', {engine = engine, format = format}) pk = s:create_index('pk') t = {1, '2', {3, 3}, 4.4, -5, true, {key = 7}, 8, 9, 10, 11, 12} t = s:replace(t) t t.field1, t.field2, t.field3, t.field4, t.field5, t.field6, t.field7, t.field8, t.field9, t.bsize, t.totable t.longlonglonglonglonglongname box.tuple.bsize(t) box.tuple.totable(t) s:drop() -- -- Increase collisions number and make juajit use second hash -- function. -- format = {} for i = 1, 100 do format[i] = {name = "skwjhfjwhfwfhwkhfwkjh"..i.."avjnbknwkvbwekjf"} end s = box.schema.space.create('test', { engine = engine, format = format }) p = s:create_index('pk') to_insert = {} for i = 1, 100 do to_insert[i] = i end t = s:replace(to_insert) format = nil name = nil s = nil p = nil to_insert = nil collectgarbage('collect') -- Print many many strings (> 40 to reach max_collisions limit in luajit). t.skwjhfjwhfwfhwkhfwkjh01avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh02avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh03avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh04avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh05avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh06avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh07avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh08avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh09avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh10avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh11avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh12avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh13avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh14avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh15avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh16avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh17avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh18avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh19avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh20avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh21avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh22avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh23avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh24avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh25avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh26avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh27avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh28avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh29avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh30avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh31avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh32avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh33avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh34avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh35avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh36avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh37avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh38avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh39avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh40avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh41avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh42avjnbknwkvbwekjf t.skwjhfjwhfwfhwkhfwkjh43avjnbknwkvbwekjf box.space.test:drop() -- -- gh-2773: correctly reset max tuple size on restart. -- box.cfg{[engine_max_tuple_size] = 1024 * 1024 * 100} s = box.schema.space.create('test', {engine = engine}) pk = s:create_index('pk') _ = s:replace({1, string.rep('*', 1024 * 1024)}) _ = s:replace({2, string.rep('*', 1024 * 1024 * 2)}) pk:count() test_run:cmd('restart server default') engine = test_run:get_cfg('engine') s = box.space.test s:count() s:drop() -- -- gh-2821: tuple:tomap(). -- format = {} format[1] = {'field1', 'unsigned'} format[2] = {'field2', 'unsigned'} format[3] = {'field3', 'unsigned'} format[4] = {'field4', 'array'} s = box.schema.space.create('test', {format = format, engine = engine}) pk = s:create_index('pk') t1 = s:replace{1, 2, 3, {'a', 'b', 'c'}} t1map = t1:tomap() function maplen(map) local count = 0 for _ in pairs(map) do count = count + 1 end return count end maplen(t1map), t1map.field1, t1map.field2, t1map.field3, t1map.field4 t1map[1], t1map[2], t1map[3], t1map[4] -- Fields with table type are stored once for name and for index. t1map[4] == t1map.field4 t2 = s:replace{4, 5, 6, {'a', 'b', 'c'}, 'extra1'} t2map = t2:tomap() maplen(t2map), t2map.field1, t2map.field2, t2map.field3, t2map.field4 t1map[1], t1map[2], t1map[3], t2map[4], t2map[5] -- Use box.tuple.tomap alias. t3 = s:replace{7, 8, 9, {'a', 'b', 'c'}, 'extra1', 'extra2'} t3map = box.tuple.tomap(t3) maplen(t3map), t3map.field1, t3map.field2, t3map.field3, t3map.field4 t1map[1], t1map[2], t1map[3], t3map[4], t3map[5], t3map[6] -- Invalid arguments. t3.tomap('123') box.tuple.tomap('456') s:drop() -- No names, no format. s = box.schema.space.create('test', { engine = engine }) pk = s:create_index('pk') t1 = s:replace{1, 2, 3} t1map = t1:tomap() maplen(t1map), t1map[1], t1map[2], t1map[3] s:drop() engine = nil test_run = nil tarantool_1.9.1.26.g63eb81e3c/test/engine/null.result0000664000000000000000000005631313306565107020662 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... msgpack = require('msgpack') --- ... -- -- gh-1557: box.NULL in indexes. -- box.NULL == msgpack.NULL --- - true ... box.NULL == nil --- - true ... msgpack.decode(msgpack.encode({box.NULL})) --- - [null] - 3 ... format = {} --- ... format[1] = { name = 'field1', type = 'unsigned' } --- ... format[2] = { name = 'field2', type = 'unsigned', is_nullable = true } --- ... s = box.schema.space.create('test', { engine = engine, format = format }) --- ... -- Bad nullable value. format[2].is_nullable = 100 --- ... s:format(format) -- Fail. --- - error: 'Wrong space format (field 2): ''is_nullable'' must be boolean' ... -- Primary can not be nullable. parts = {} --- ... parts[1] = {field = 2, type = 'unsigned', is_nullable = true} --- ... pk = s:create_index('pk', { parts = parts }) -- Fail. --- - error: Primary index of the space 'test' can not contain nullable parts ... pk = s:create_index('pk') --- ... -- Not TREE nullable. -- Do not print errmsg, because Vinyl's one is different - it does -- not support HASH. ok = pcall(s.create_index, s, 'sk', { parts = parts, type = 'hash' }) -- Fail. --- ... ok --- - false ... -- Conflict of is_nullable in format and in parts. parts[1].is_nullable = false --- ... sk = s:create_index('sk', { parts = parts }) -- Fail. --- - error: Field 2 is nullable in space format, but not nullable in index parts ... -- Try skip nullable in format and specify in part. parts[1].is_nullable = true --- ... sk = s:create_index('sk', { parts = parts }) -- Ok. --- ... format[2].is_nullable = nil --- ... s:format(format) -- Fail. --- - error: Field 2 is not nullable in space format, but nullable in index parts ... sk:drop() --- ... -- Try to set nullable in part with no format. s:format({}) --- ... sk = s:create_index('sk', { parts = parts }) --- ... -- And then set format with no nullable. s:format(format) -- Fail. --- - error: Field 2 is not nullable in space format, but nullable in index parts ... format[2].is_nullable = true --- ... s:format(format) -- Ok. --- ... -- Test insert. s:insert{1, 1} --- - [1, 1] ... s:insert{2, box.NULL} --- - [2, null] ... s:insert{3, box.NULL} --- - [3, null] ... s:insert{4, 1} -- Fail. --- - error: Duplicate key exists in unique index 'sk' in space 'test' ... s:insert{4, 4} --- - [4, 4] ... s:insert{5, box.NULL} --- - [5, null] ... pk:select{} --- - - [1, 1] - [2, null] - [3, null] - [4, 4] - [5, null] ... sk:select{} --- - - [2, null] - [3, null] - [5, null] - [1, 1] - [4, 4] ... -- Test exact match. sk:get({1}) --- - [1, 1] ... sk:get({box.NULL}) -- Fail. --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... sk:update({1}, {}) --- - [1, 1] ... sk:update({box.NULL}, {}) -- Fail. --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... _ = sk:delete({1}) --- ... sk:delete({box.NULL}) -- Fail. --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... s:insert({1, 1}) --- - [1, 1] ... -- Test iterators. sk:select{box.NULL} --- - - [2, null] - [3, null] - [5, null] ... sk:select({box.NULL}, {iterator = 'LE'}) --- - - [5, null] - [3, null] - [2, null] ... sk:select({box.NULL}, {iterator = 'LT'}) --- - [] ... sk:select({box.NULL}, {iterator = 'GE'}) --- - - [2, null] - [3, null] - [5, null] - [1, 1] - [4, 4] ... sk:select({box.NULL}, {iterator = 'GT'}) --- - - [1, 1] - [4, 4] ... _ = sk:delete{box.NULL} --- - error: 'Supplied key type of part 0 does not match index part type: expected unsigned' ... sk:select{} --- - - [2, null] - [3, null] - [5, null] - [1, 1] - [4, 4] ... pk:select{} --- - - [1, 1] - [2, null] - [3, null] - [4, 4] - [5, null] ... -- Test snapshot during iterator (Vinyl restore). create_iterator = require('utils').create_iterator --- ... iter = create_iterator(sk, {box.NULL}) --- ... iter.next() --- - [2, null] ... box.snapshot() --- - ok ... iter.iterate_over() --- - 0: [3, null] 1: [5, null] ... sk:select{} --- - - [2, null] - [3, null] - [5, null] - [1, 1] - [4, 4] ... pk:select{} --- - - [1, 1] - [2, null] - [3, null] - [4, 4] - [5, null] ... -- Test replace. s:replace{2, 2} --- - [2, 2] ... s:replace{3, box.NULL} -- no changes. --- - [3, null] ... s:replace{6, box.NULL} --- - [6, null] ... pk:select{} --- - - [1, 1] - [2, 2] - [3, null] - [4, 4] - [5, null] - [6, null] ... sk:select{} --- - - [3, null] - [5, null] - [6, null] - [1, 1] - [2, 2] - [4, 4] ... -- Test not unique indexes. s:truncate() --- ... sk:drop() --- ... sk = s:create_index('sk', { parts = parts, unique = false }) --- ... s:insert{1, 1} --- - [1, 1] ... s:insert{2, box.NULL} --- - [2, null] ... s:insert{3, box.NULL} --- - [3, null] ... s:insert{4, 1} --- - [4, 1] ... s:insert{5, box.NULL} --- - [5, null] ... pk:select{} --- - - [1, 1] - [2, null] - [3, null] - [4, 1] - [5, null] ... sk:select{} --- - - [2, null] - [3, null] - [5, null] - [1, 1] - [4, 1] ... -- Test several secondary indexes. s:truncate() --- ... format[2].is_nullable = true --- ... format[3] = { name = 'field3', type = 'unsigned', is_nullable = true } --- ... s:format(format) --- ... parts[1].field = 3 --- ... sk2 = s:create_index('sk2', { parts = parts }) --- ... s:replace{4, 3, 4} --- - [4, 3, 4] ... s:replace{3, 3, 3} --- - [3, 3, 3] ... s:replace{2, box.NULL, box.NULL} --- - [2, null, null] ... s:replace{1, box.NULL, 1} --- - [1, null, 1] ... s:replace{0, 0, box.NULL} --- - [0, 0, null] ... pk:select{} --- - - [0, 0, null] - [1, null, 1] - [2, null, null] - [3, 3, 3] - [4, 3, 4] ... sk:select{} --- - - [1, null, 1] - [2, null, null] - [0, 0, null] - [3, 3, 3] - [4, 3, 4] ... sk2:select{} --- - - [0, 0, null] - [2, null, null] - [1, null, 1] - [3, 3, 3] - [4, 3, 4] ... -- Check duplicate conflict on replace. s:replace{4, 4, 3} -- fail --- - error: Duplicate key exists in unique index 'sk2' in space 'test' ... s:replace{4, 4, box.NULL} -- ok --- - [4, 4, null] ... pk:select{} --- - - [0, 0, null] - [1, null, 1] - [2, null, null] - [3, 3, 3] - [4, 4, null] ... sk:select{} --- - - [1, null, 1] - [2, null, null] - [0, 0, null] - [3, 3, 3] - [4, 4, null] ... sk2:select{} --- - - [0, 0, null] - [2, null, null] - [4, 4, null] - [1, null, 1] - [3, 3, 3] ... _ = pk:delete{2} --- ... pk:select{} --- - - [0, 0, null] - [1, null, 1] - [3, 3, 3] - [4, 4, null] ... sk:select{} --- - - [1, null, 1] - [0, 0, null] - [3, 3, 3] - [4, 4, null] ... sk2:select{} --- - - [0, 0, null] - [4, 4, null] - [1, null, 1] - [3, 3, 3] ... s:drop() --- ... -- -- gh-2880: allow to store less field count than specified in a -- format. -- format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {name = 'field2', type = 'unsigned'} --- ... format[3] = {name = 'field3'} --- ... format[4] = {name = 'field4', is_nullable = true} --- ... s = box.schema.create_space('test', {engine = engine, format = format}) --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sk', {parts = {2, 'unsigned'}}) --- ... s:replace{1, 2} -- error --- - error: Tuple field count 2 is less than required by space format or defined indexes (expected at least 3) ... t1 = s:replace{2, 3, 4} --- ... t2 = s:replace{3, 4, 5, 6} --- ... t1.field1, t1.field2, t1.field3, t1.field4 --- - 2 - 3 - 4 - null ... t2.field1, t2.field2, t2.field3, t2.field4 --- - 3 - 4 - 5 - 6 ... -- Ensure the tuple is read ok from disk in a case of vinyl. --- ... if engine == 'vinyl' then box.snapshot() end --- ... s:select{2} --- - - [2, 3, 4] ... s:drop() --- ... -- Check the case when not contiguous format tail is nullable. format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {name = 'field2', type = 'unsigned'} --- ... format[3] = {name = 'field3'} --- ... format[4] = {name = 'field4', is_nullable = true} --- ... format[5] = {name = 'field5'} --- ... format[6] = {name = 'field6', is_nullable = true} --- ... format[7] = {name = 'field7', is_nullable = true} --- ... s = box.schema.create_space('test', {engine = engine, format = format}) --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sk', {parts = {2, 'unsigned'}}) --- ... s:replace{1, 2} -- error --- - error: Tuple field count 2 is less than required by space format or defined indexes (expected at least 5) ... s:replace{2, 3, 4} -- error --- - error: Tuple field count 3 is less than required by space format or defined indexes (expected at least 5) ... s:replace{3, 4, 5, 6} -- error --- - error: Tuple field count 4 is less than required by space format or defined indexes (expected at least 5) ... t1 = s:replace{4, 5, 6, 7, 8} --- ... t2 = s:replace{5, 6, 7, 8, 9, 10} --- ... t3 = s:replace{6, 7, 8, 9, 10, 11, 12} --- ... t1.field1, t1.field2, t1.field3, t1.field4, t1.field5, t1.field6, t1.field7 --- - 4 - 5 - 6 - 7 - 8 - null - null ... t2.field1, t2.field2, t2.field3, t2.field4, t2.field5, t2.field6, t2.field7 --- - 5 - 6 - 7 - 8 - 9 - 10 - null ... t3.field1, t3.field2, t3.field3, t3.field4, t3.field5, t3.field6, t3.field7 --- - 6 - 7 - 8 - 9 - 10 - 11 - 12 ... s:select{} --- - - [4, 5, 6, 7, 8] - [5, 6, 7, 8, 9, 10] - [6, 7, 8, 9, 10, 11, 12] ... s:drop() --- ... -- Check nullable indexes with other types s = box.schema.space.create('test', {engine = engine}) --- ... _ = s:create_index('pk') --- ... _ = s:create_index('i1', {parts = {{2, 'string', is_nullable = true}}}) --- ... _ = s:create_index('i2', {parts = {{3, 'number', is_nullable = true}}}) --- ... _ = s:create_index('i3', {parts = {{4, 'integer', is_nullable = true}}}) --- ... _ = s:create_index('i4', {parts = {{5, 'boolean', is_nullable = true}}, unique = false}) --- ... _ = s:create_index('i5', {parts = {{6, 'scalar', is_nullable = true}}}) --- ... _ = s:auto_increment{box.NULL, 1.11, -111, false, '111'} --- ... _ = s:auto_increment{'222', box.NULL, -222, true, 222} --- ... _ = s:auto_increment{'333', 3.33, box.NULL, false, 3.33} --- ... _ = s:auto_increment{'444', 4.44, -444, box.NULL, true} --- ... _ = s:auto_increment{'555', 5.55, -555, false, box.NULL} --- ... box.snapshot() --- - ok ... _ = s:auto_increment{box.NULL, 6.66, -666, true, '666'} --- ... _ = s:auto_increment{'777', box.NULL, -777, false, 777} --- ... _ = s:auto_increment{'888', 8.88, box.NULL, true, 8.88} --- ... _ = s:auto_increment{'999', 9.99, -999, box.NULL, false} --- ... _ = s:auto_increment{'000', 0.00, -000, true, box.NULL} --- ... s.index.i1:select() --- - - [1, null, 1.11, -111, false, '111'] - [6, null, 6.66, -666, true, '666'] - [10, '000', 0, 0, true, null] - [2, '222', null, -222, true, 222] - [3, '333', 3.33, null, false, 3.33] - [4, '444', 4.44, -444, null, true] - [5, '555', 5.55, -555, false, null] - [7, '777', null, -777, false, 777] - [8, '888', 8.88, null, true, 8.88] - [9, '999', 9.99, -999, null, false] ... s.index.i2:select() --- - - [2, '222', null, -222, true, 222] - [7, '777', null, -777, false, 777] - [10, '000', 0, 0, true, null] - [1, null, 1.11, -111, false, '111'] - [3, '333', 3.33, null, false, 3.33] - [4, '444', 4.44, -444, null, true] - [5, '555', 5.55, -555, false, null] - [6, null, 6.66, -666, true, '666'] - [8, '888', 8.88, null, true, 8.88] - [9, '999', 9.99, -999, null, false] ... s.index.i3:select() --- - - [3, '333', 3.33, null, false, 3.33] - [8, '888', 8.88, null, true, 8.88] - [9, '999', 9.99, -999, null, false] - [7, '777', null, -777, false, 777] - [6, null, 6.66, -666, true, '666'] - [5, '555', 5.55, -555, false, null] - [4, '444', 4.44, -444, null, true] - [2, '222', null, -222, true, 222] - [1, null, 1.11, -111, false, '111'] - [10, '000', 0, 0, true, null] ... s.index.i4:select() --- - - [4, '444', 4.44, -444, null, true] - [9, '999', 9.99, -999, null, false] - [1, null, 1.11, -111, false, '111'] - [3, '333', 3.33, null, false, 3.33] - [5, '555', 5.55, -555, false, null] - [7, '777', null, -777, false, 777] - [2, '222', null, -222, true, 222] - [6, null, 6.66, -666, true, '666'] - [8, '888', 8.88, null, true, 8.88] - [10, '000', 0, 0, true, null] ... s.index.i5:select() --- - - [5, '555', 5.55, -555, false, null] - [10, '000', 0, 0, true, null] - [9, '999', 9.99, -999, null, false] - [4, '444', 4.44, -444, null, true] - [3, '333', 3.33, null, false, 3.33] - [8, '888', 8.88, null, true, 8.88] - [2, '222', null, -222, true, 222] - [7, '777', null, -777, false, 777] - [1, null, 1.11, -111, false, '111'] - [6, null, 6.66, -666, true, '666'] ... s:drop() --- ... -- -- gh-2973: allow to enable nullable on a non-empty space. -- format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {name = 'field2', type = 'unsigned'} --- ... s = box.schema.create_space('test', {format = format}) --- ... pk = s:create_index('pk') --- ... s:replace{1, 1} --- - [1, 1] ... s:replace{100, 100} --- - [100, 100] ... s:replace{50, 50} --- - [50, 50] ... s:replace{25, box.NULL} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... format[2].is_nullable = true --- ... s:format(format) --- ... s:replace{25, box.NULL} --- - [25, null] ... s:replace{10, box.NULL} --- - [10, null] ... s:replace{150, box.NULL} --- - [150, null] ... s:select{} --- - - [1, 1] - [10, null] - [25, null] - [50, 50] - [100, 100] - [150, null] ... s:drop() --- ... s = box.schema.create_space('test') --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sk', {parts = {{2, 'unsigned', is_nullable = false}}}) --- ... s:replace{1, 1} --- - [1, 1] ... s:replace{100, 100} --- - [100, 100] ... s:replace{50, 50} --- - [50, 50] ... s:replace{25, box.NULL} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... sk:alter({parts = {{2, 'unsigned', is_nullable = true}}}) --- ... s:replace{25, box.NULL} --- - [25, null] ... s:replace{10, box.NULL} --- - [10, null] ... s:replace{150, box.NULL} --- - [150, null] ... sk:select{} --- - - [10, null] - [25, null] - [150, null] - [1, 1] - [50, 50] - [100, 100] ... s:drop() --- ... -- -- gh-2988: allow absense of tail nullable indexed fields. -- s = box.schema.space.create('test', {engine = engine}) --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}}) --- ... -- Test tuple_compare_slowpath, tuple_compare_with_key_slowpath. s:replace{} -- Fail --- - error: Tuple field count 0 is less than required by space format or defined indexes (expected at least 1) ... -- Compare full vs not full. s:replace{2} --- - [2] ... s:replace{1, 2} --- - [1, 2] ... s:select{} --- - - [1, 2] - [2] ... sk:select{box.NULL} --- - - [2] ... sk:select{2} --- - - [1, 2] ... -- Compare not full vs full. s:replace{4, 5} --- - [4, 5] ... s:replace{3} --- - [3] ... s:select{} --- - - [1, 2] - [2] - [3] - [4, 5] ... sk:select{box.NULL} --- - - [2] - [3] ... sk:select{5} --- - - [4, 5] ... -- Compare extended keys. s:replace{7} --- - [7] ... s:replace{6} --- - [6] ... s:select{} --- - - [1, 2] - [2] - [3] - [4, 5] - [6] - [7] ... sk:select{box.NULL} --- - - [2] - [3] - [6] - [7] ... sk:select{} --- - - [2] - [3] - [6] - [7] - [1, 2] - [4, 5] ... -- Test tuple extract key during dump for vinyl. box.snapshot() --- - ok ... sk:select{} --- - - [2] - [3] - [6] - [7] - [1, 2] - [4, 5] ... s:select{} --- - - [1, 2] - [2] - [3] - [4, 5] - [6] - [7] ... -- Test tuple_compare_sequential_nullable, -- tuple_compare_with_key_sequential. s:drop() --- ... s = box.schema.space.create('test', {engine = engine}) --- ... pk = s:create_index('pk') --- ... parts = {} --- ... parts[1] = {1, 'unsigned'} --- ... parts[2] = {2, 'unsigned', is_nullable = true} --- ... parts[3] = {3, 'unsigned', is_nullable = true} --- ... sk = s:create_index('sk', {parts = parts}) --- ... -- Compare full vs not full. s:replace{1, 2, 3} --- - [1, 2, 3] ... s:replace{3} --- - [3] ... s:replace{2, 3} --- - [2, 3] ... sk:select{} --- - - [1, 2, 3] - [2, 3] - [3] ... sk:select{3, box.NULL} --- - - [3] ... sk:select{3, box.NULL, box.NULL} --- - - [3] ... sk:select{2} --- - - [2, 3] ... sk:select{2, 3} --- - - [2, 3] ... sk:select{3, 100} --- - [] ... sk:select{3, box.NULL, 100} --- - [] ... sk:select({3, box.NULL}, {iterator = 'GE'}) --- - - [3] ... sk:select({3, box.NULL}, {iterator = 'LE'}) --- - - [3] - [2, 3] - [1, 2, 3] ... s:select{} --- - - [1, 2, 3] - [2, 3] - [3] ... -- Test tuple extract key for vinyl. box.snapshot() --- - ok ... sk:select{} --- - - [1, 2, 3] - [2, 3] - [3] ... sk:select{3, box.NULL} --- - - [3] ... sk:select{3, box.NULL, box.NULL} --- - - [3] ... sk:select{2} --- - - [2, 3] ... sk:select{2, 3} --- - - [2, 3] ... sk:select{3, 100} --- - [] ... sk:select{3, box.NULL, 100} --- - [] ... sk:select({3, box.NULL}, {iterator = 'GE'}) --- - - [3] ... sk:select({3, box.NULL}, {iterator = 'LE'}) --- - - [3] - [2, 3] - [1, 2, 3] ... -- Test a tuple_compare_sequential() for a case, when there are -- two equal tuples, but in one of them field count < unique field -- count. s:replace{1, box.NULL} --- - [1, null] ... s:replace{1, box.NULL, box.NULL} --- - [1, null, null] ... s:select{1} --- - - [1, null, null] ... -- -- Partially sequential keys. See tuple_extract_key.cc and -- contains_sequential_parts template flag. -- s:drop() --- ... s = box.schema.space.create('test', {engine = engine}) --- ... pk = s:create_index('pk') --- ... parts = {} --- ... parts[1] = {2, 'unsigned', is_nullable = true} --- ... parts[2] = {3, 'unsigned', is_nullable = true} --- ... parts[3] = {5, 'unsigned', is_nullable = true} --- ... parts[4] = {6, 'unsigned', is_nullable = true} --- ... parts[5] = {4, 'unsigned', is_nullable = true} --- ... parts[6] = {7, 'unsigned', is_nullable = true} --- ... sk = s:create_index('sk', {parts = parts}) --- ... s:insert{1, 1, 1, 1, 1, 1, 1} --- - [1, 1, 1, 1, 1, 1, 1] ... s:insert{8, 1, 1, 1, 1, box.NULL} --- - [8, 1, 1, 1, 1, null] ... s:insert{9, 1, 1, 1, box.NULL} --- - [9, 1, 1, 1, null] ... s:insert{6, 6} --- - [6, 6] ... s:insert{10, 6, box.NULL} --- - [10, 6, null] ... s:insert{2, 2, 2, 2, 2, 2} --- - [2, 2, 2, 2, 2, 2] ... s:insert{7} --- - [7] ... s:insert{5, 5, 5} --- - [5, 5, 5] ... s:insert{3, 5, box.NULL, box.NULL, box.NULL} --- - [3, 5, null, null, null] ... s:insert{4, 5, 5, 5, box.NULL} --- - [4, 5, 5, 5, null] ... s:insert{11, 4, 4, 4} --- - [11, 4, 4, 4] ... s:insert{12, 4, box.NULL, 4} --- - [12, 4, null, 4] ... s:insert{13, 3, 3, 3, 3} --- - [13, 3, 3, 3, 3] ... s:insert{14, box.NULL, 3, box.NULL, 3} --- - [14, null, 3, null, 3] ... s:select{} --- - - [1, 1, 1, 1, 1, 1, 1] - [2, 2, 2, 2, 2, 2] - [3, 5, null, null, null] - [4, 5, 5, 5, null] - [5, 5, 5] - [6, 6] - [7] - [8, 1, 1, 1, 1, null] - [9, 1, 1, 1, null] - [10, 6, null] - [11, 4, 4, 4] - [12, 4, null, 4] - [13, 3, 3, 3, 3] - [14, null, 3, null, 3] ... sk:select{} --- - - [7] - [14, null, 3, null, 3] - [9, 1, 1, 1, null] - [8, 1, 1, 1, 1, null] - [1, 1, 1, 1, 1, 1, 1] - [2, 2, 2, 2, 2, 2] - [13, 3, 3, 3, 3] - [12, 4, null, 4] - [11, 4, 4, 4] - [3, 5, null, null, null] - [5, 5, 5] - [4, 5, 5, 5, null] - [6, 6] - [10, 6, null] ... sk:select{5, 5, box.NULL} --- - - [5, 5, 5] - [4, 5, 5, 5, null] ... sk:select{5, 5, box.NULL, 100} --- - [] ... sk:select({7, box.NULL}, {iterator = 'LT'}) --- - - [10, 6, null] - [6, 6] - [4, 5, 5, 5, null] - [5, 5, 5] - [3, 5, null, null, null] - [11, 4, 4, 4] - [12, 4, null, 4] - [13, 3, 3, 3, 3] - [2, 2, 2, 2, 2, 2] - [1, 1, 1, 1, 1, 1, 1] - [8, 1, 1, 1, 1, null] - [9, 1, 1, 1, null] - [14, null, 3, null, 3] - [7] ... box.snapshot() --- - ok ... sk:select{} --- - - [7] - [14, null, 3, null, 3] - [9, 1, 1, 1, null] - [8, 1, 1, 1, 1, null] - [1, 1, 1, 1, 1, 1, 1] - [2, 2, 2, 2, 2, 2] - [13, 3, 3, 3, 3] - [12, 4, null, 4] - [11, 4, 4, 4] - [3, 5, null, null, null] - [5, 5, 5] - [4, 5, 5, 5, null] - [6, 6] - [10, 6, null] ... sk:select{5, 5, box.NULL} --- - - [5, 5, 5] - [4, 5, 5, 5, null] ... sk:select{5, 5, box.NULL, 100} --- - [] ... sk:select({7, box.NULL}, {iterator = 'LT'}) --- - - [10, 6, null] - [6, 6] - [4, 5, 5, 5, null] - [5, 5, 5] - [3, 5, null, null, null] - [11, 4, 4, 4] - [12, 4, null, 4] - [13, 3, 3, 3, 3] - [2, 2, 2, 2, 2, 2] - [1, 1, 1, 1, 1, 1, 1] - [8, 1, 1, 1, 1, null] - [9, 1, 1, 1, null] - [14, null, 3, null, 3] - [7] ... s:drop() --- ... -- -- The main case of absent nullable fields - create an index over -- them on not empty space (available on memtx only). -- s = box.schema.space.create('test', {engine = 'memtx'}) --- ... pk = s:create_index('pk') --- ... s:replace{1} --- - [1] ... s:replace{2} --- - [2] ... s:replace{3} --- - [3] ... sk = s:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}}) --- ... s:replace{4} --- - [4] ... s:replace{5, 6} --- - [5, 6] ... s:replace{7, 8} --- - [7, 8] ... s:replace{9, box.NULL} --- - [9, null] ... s:select{} --- - - [1] - [2] - [3] - [4] - [5, 6] - [7, 8] - [9, null] ... sk:select{} --- - - [1] - [2] - [3] - [4] - [9, null] - [5, 6] - [7, 8] ... sk:select{box.NULL} --- - - [1] - [2] - [3] - [4] - [9, null] ... s:drop() --- ... -- -- The complex case: when an index part is_nullable is set to, -- false and it changes min_field_count, this part must become -- optional and turn on comparators for optional fields. See the -- big comment in alter.cc in index_def_new_from_tuple(). -- s = box.schema.create_space('test', {engine = 'memtx'}) --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sk', {parts = {2, 'unsigned'}}) --- ... s:replace{1, 1} --- - [1, 1] ... s:replace{2, box.NULL} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... s:select{} --- - - [1, 1] ... sk:alter({parts = {{2, 'unsigned', is_nullable = true}}}) --- ... s:replace{20, box.NULL} --- - [20, null] ... sk:select{} --- - - [20, null] - [1, 1] ... s:replace{10} --- - [10] ... sk:select{} --- - - [10] - [20, null] - [1, 1] ... s:replace{40} --- - [40] ... sk:select{} --- - - [10] - [20, null] - [40] - [1, 1] ... s:drop() --- ... -- -- Check that if an index alter makes a field be optional, and -- this field is used in another index, then this another index -- is updated too. Case of @locker. -- s = box.schema.space.create('test', {engine = 'memtx'}) --- ... _ = s:create_index('pk') --- ... i1 = s:create_index('i1', {parts = {2, 'unsigned', 3, 'unsigned'}}) --- ... i2 = s:create_index('i2', {parts = {3, 'unsigned', 2, 'unsigned'}}) --- ... i1:alter{parts = {{2, 'unsigned'}, {3, 'unsigned', is_nullable = true}}} --- ... -- i2 alter makes i1 contain optional part. Its key_def and -- comparators must be updated. i2:alter{parts = {{3, 'unsigned', is_nullable = true}, {2, 'unsigned'}}} --- ... s:insert{1, 1} --- - [1, 1] ... s:insert{100, 100} --- - [100, 100] ... s:insert{50, 50} --- - [50, 50] ... s:insert{25, 25, 25} --- - [25, 25, 25] ... s:insert{75, 75, 75} --- - [75, 75, 75] ... s:select{} --- - - [1, 1] - [25, 25, 25] - [50, 50] - [75, 75, 75] - [100, 100] ... i1:select{} --- - - [1, 1] - [25, 25, 25] - [50, 50] - [75, 75, 75] - [100, 100] ... i2:select{} --- - - [1, 1] - [50, 50] - [100, 100] - [25, 25, 25] - [75, 75, 75] ... i2:select{box.NULL, 50} --- - - [50, 50] ... i2:select{} --- - - [1, 1] - [50, 50] - [100, 100] - [25, 25, 25] - [75, 75, 75] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/select.test.lua0000664000000000000000000001007313306560010021365 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') -- select (str) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:replace({tostring(key)}) end index:select({}, {iterator = box.index.ALL}) index:select({}, {iterator = box.index.GE}) index:select(tostring(44), {iterator = box.index.GE}) index:select({}, {iterator = box.index.GT}) index:select(tostring(44), {iterator = box.index.GT}) index:select({}, {iterator = box.index.LE}) index:select(tostring(77), {iterator = box.index.LE}) index:select({}, {iterator = box.index.LT}) index:select(tostring(77), {iterator = box.index.LT}) space:drop() -- select (num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) for key = 1, 100 do space:replace({key}) end index:select({}, {iterator = box.index.ALL}) index:select({}, {iterator = box.index.GE}) index:select(44, {iterator = box.index.GE}) index:select({}, {iterator = box.index.GT}) index:select(44, {iterator = box.index.GT}) index:select({}, {iterator = box.index.LE}) index:select(77, {iterator = box.index.LE}) index:select({}, {iterator = box.index.LT}) index:select(77, {iterator = box.index.LT}) space:drop() -- select multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for key = 1, 100 do space:replace({key, key}) end index:select({}, {iterator = box.index.ALL}) index:select({}, {iterator = box.index.GE}) index:select({44, 44}, {iterator = box.index.GE}) index:select({}, {iterator = box.index.GT}) index:select({44, 44}, {iterator = box.index.GT}) index:select({}, {iterator = box.index.LE}) index:select({77, 77}, {iterator = box.index.LE}) index:select({}, {iterator = box.index.LT}) index:select({77, 77}, {iterator = box.index.LT}) space:drop() -- select with box.tuple.new space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:replace({tostring(key)}) end index:select(box.tuple.new{}, {iterator = box.index.ALL}) index:select(box.tuple.new{}, {iterator = box.index.GE}) index:select(box.tuple.new(tostring(44)), {iterator = box.index.GE}) index:select(box.tuple.new{}, {iterator = box.index.GT}) index:select(box.tuple.new(tostring(44)), {iterator = box.index.GT}) index:select(box.tuple.new{}, {iterator = box.index.LE}) index:select(box.tuple.new(tostring(77)), {iterator = box.index.LE}) index:select(box.tuple.new{}, {iterator = box.index.LT}) index:select(box.tuple.new(tostring(77)), {iterator = box.index.LT}) space:drop() -- select multiple indices -- two indices space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'number'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'string'} }) space:insert({1, 'a'}) space:insert({2, 'd'}) space:insert({3, 'c'}) space:insert({4, 'b'}) space:insert({5, 'bbbb'}) space:insert({5, 'cbcb'}) space:insert({6, 'bbbb'}) space:insert({-45.2, 'waerwe'}) index1:select{} index2:select{} space:get{5} index1:get{5} space:select{5} index1:get{5} index2:get{'a'} index2:select{'a'} space:drop() -- three indices space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'integer'} }) index3 = space:create_index('third', { type = 'tree', parts = {3, 'integer'} }) space:insert({1, -30, 9}) space:insert({5, 234, 9789}) space:insert({10, -56, 212}) space:insert({2, 762, 1235}) space:insert({4, 7873, 67545}) space:insert({9, 103, 1232}) index1:select{} index2:select{} index3:select{} index1:select{10} index1:get{9} index2:select{-56} index2:select{-57} index2:get{103} index2:get{104} index3:get{9} index3:select{1235} space:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/recover_wal.result0000664000000000000000000002471113306560010022201 0ustar rootroot-- write data recover from logs only env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default') engine = test_run:get_cfg('engine') --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... space:insert({0}) --- - [0] ... test_run:cmd('restart server default') space = box.space['test'] --- ... index = space.index['primary'] --- ... index:select({}, {iterator = box.index.ALL}) --- - - [0] ... for key = 1, 1000 do space:insert({key}) end --- ... test_run:cmd('restart server default') space = box.space['test'] --- ... index = space.index['primary'] --- ... index:select({}, {iterator = box.index.ALL}) --- - - [0] - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] - [101] - [102] - [103] - [104] - [105] - [106] - [107] - [108] - [109] - [110] - [111] - [112] - [113] - [114] - [115] - [116] - [117] - [118] - [119] - [120] - [121] - [122] - [123] - [124] - [125] - [126] - [127] - [128] - [129] - [130] - [131] - [132] - [133] - [134] - [135] - [136] - [137] - [138] - [139] - [140] - [141] - [142] - [143] - [144] - [145] - [146] - [147] - [148] - [149] - [150] - [151] - [152] - [153] - [154] - [155] - [156] - [157] - [158] - [159] - [160] - [161] - [162] - [163] - [164] - [165] - [166] - [167] - [168] - [169] - [170] - [171] - [172] - [173] - [174] - [175] - [176] - [177] - [178] - [179] - [180] - [181] - [182] - [183] - [184] - [185] - [186] - [187] - [188] - [189] - [190] - [191] - [192] - [193] - [194] - [195] - [196] - [197] - [198] - [199] - [200] - [201] - [202] - [203] - [204] - [205] - [206] - [207] - [208] - [209] - [210] - [211] - [212] - [213] - [214] - [215] - [216] - [217] - [218] - [219] - [220] - [221] - [222] - [223] - [224] - [225] - [226] - [227] - [228] - [229] - [230] - [231] - [232] - [233] - [234] - [235] - [236] - [237] - [238] - [239] - [240] - [241] - [242] - [243] - [244] - [245] - [246] - [247] - [248] - [249] - [250] - [251] - [252] - [253] - [254] - [255] - [256] - [257] - [258] - [259] - [260] - [261] - [262] - [263] - [264] - [265] - [266] - [267] - [268] - [269] - [270] - [271] - [272] - [273] - [274] - [275] - [276] - [277] - [278] - [279] - [280] - [281] - [282] - [283] - [284] - [285] - [286] - [287] - [288] - [289] - [290] - [291] - [292] - [293] - [294] - [295] - [296] - [297] - [298] - [299] - [300] - [301] - [302] - [303] - [304] - [305] - [306] - [307] - [308] - [309] - [310] - [311] - [312] - [313] - [314] - [315] - [316] - [317] - [318] - [319] - [320] - [321] - [322] - [323] - [324] - [325] - [326] - [327] - [328] - [329] - [330] - [331] - [332] - [333] - [334] - [335] - [336] - [337] - [338] - [339] - [340] - [341] - [342] - [343] - [344] - [345] - [346] - [347] - [348] - [349] - [350] - [351] - [352] - [353] - [354] - [355] - [356] - [357] - [358] - [359] - [360] - [361] - [362] - [363] - [364] - [365] - [366] - [367] - [368] - [369] - [370] - [371] - [372] - [373] - [374] - [375] - [376] - [377] - [378] - [379] - [380] - [381] - [382] - [383] - [384] - [385] - [386] - [387] - [388] - [389] - [390] - [391] - [392] - [393] - [394] - [395] - [396] - [397] - [398] - [399] - [400] - [401] - [402] - [403] - [404] - [405] - [406] - [407] - [408] - [409] - [410] - [411] - [412] - [413] - [414] - [415] - [416] - [417] - [418] - [419] - [420] - [421] - [422] - [423] - [424] - [425] - [426] - [427] - [428] - [429] - [430] - [431] - [432] - [433] - [434] - [435] - [436] - [437] - [438] - [439] - [440] - [441] - [442] - [443] - [444] - [445] - [446] - [447] - [448] - [449] - [450] - [451] - [452] - [453] - [454] - [455] - [456] - [457] - [458] - [459] - [460] - [461] - [462] - [463] - [464] - [465] - [466] - [467] - [468] - [469] - [470] - [471] - [472] - [473] - [474] - [475] - [476] - [477] - [478] - [479] - [480] - [481] - [482] - [483] - [484] - [485] - [486] - [487] - [488] - [489] - [490] - [491] - [492] - [493] - [494] - [495] - [496] - [497] - [498] - [499] - [500] - [501] - [502] - [503] - [504] - [505] - [506] - [507] - [508] - [509] - [510] - [511] - [512] - [513] - [514] - [515] - [516] - [517] - [518] - [519] - [520] - [521] - [522] - [523] - [524] - [525] - [526] - [527] - [528] - [529] - [530] - [531] - [532] - [533] - [534] - [535] - [536] - [537] - [538] - [539] - [540] - [541] - [542] - [543] - [544] - [545] - [546] - [547] - [548] - [549] - [550] - [551] - [552] - [553] - [554] - [555] - [556] - [557] - [558] - [559] - [560] - [561] - [562] - [563] - [564] - [565] - [566] - [567] - [568] - [569] - [570] - [571] - [572] - [573] - [574] - [575] - [576] - [577] - [578] - [579] - [580] - [581] - [582] - [583] - [584] - [585] - [586] - [587] - [588] - [589] - [590] - [591] - [592] - [593] - [594] - [595] - [596] - [597] - [598] - [599] - [600] - [601] - [602] - [603] - [604] - [605] - [606] - [607] - [608] - [609] - [610] - [611] - [612] - [613] - [614] - [615] - [616] - [617] - [618] - [619] - [620] - [621] - [622] - [623] - [624] - [625] - [626] - [627] - [628] - [629] - [630] - [631] - [632] - [633] - [634] - [635] - [636] - [637] - [638] - [639] - [640] - [641] - [642] - [643] - [644] - [645] - [646] - [647] - [648] - [649] - [650] - [651] - [652] - [653] - [654] - [655] - [656] - [657] - [658] - [659] - [660] - [661] - [662] - [663] - [664] - [665] - [666] - [667] - [668] - [669] - [670] - [671] - [672] - [673] - [674] - [675] - [676] - [677] - [678] - [679] - [680] - [681] - [682] - [683] - [684] - [685] - [686] - [687] - [688] - [689] - [690] - [691] - [692] - [693] - [694] - [695] - [696] - [697] - [698] - [699] - [700] - [701] - [702] - [703] - [704] - [705] - [706] - [707] - [708] - [709] - [710] - [711] - [712] - [713] - [714] - [715] - [716] - [717] - [718] - [719] - [720] - [721] - [722] - [723] - [724] - [725] - [726] - [727] - [728] - [729] - [730] - [731] - [732] - [733] - [734] - [735] - [736] - [737] - [738] - [739] - [740] - [741] - [742] - [743] - [744] - [745] - [746] - [747] - [748] - [749] - [750] - [751] - [752] - [753] - [754] - [755] - [756] - [757] - [758] - [759] - [760] - [761] - [762] - [763] - [764] - [765] - [766] - [767] - [768] - [769] - [770] - [771] - [772] - [773] - [774] - [775] - [776] - [777] - [778] - [779] - [780] - [781] - [782] - [783] - [784] - [785] - [786] - [787] - [788] - [789] - [790] - [791] - [792] - [793] - [794] - [795] - [796] - [797] - [798] - [799] - [800] - [801] - [802] - [803] - [804] - [805] - [806] - [807] - [808] - [809] - [810] - [811] - [812] - [813] - [814] - [815] - [816] - [817] - [818] - [819] - [820] - [821] - [822] - [823] - [824] - [825] - [826] - [827] - [828] - [829] - [830] - [831] - [832] - [833] - [834] - [835] - [836] - [837] - [838] - [839] - [840] - [841] - [842] - [843] - [844] - [845] - [846] - [847] - [848] - [849] - [850] - [851] - [852] - [853] - [854] - [855] - [856] - [857] - [858] - [859] - [860] - [861] - [862] - [863] - [864] - [865] - [866] - [867] - [868] - [869] - [870] - [871] - [872] - [873] - [874] - [875] - [876] - [877] - [878] - [879] - [880] - [881] - [882] - [883] - [884] - [885] - [886] - [887] - [888] - [889] - [890] - [891] - [892] - [893] - [894] - [895] - [896] - [897] - [898] - [899] - [900] - [901] - [902] - [903] - [904] - [905] - [906] - [907] - [908] - [909] - [910] - [911] - [912] - [913] - [914] - [915] - [916] - [917] - [918] - [919] - [920] - [921] - [922] - [923] - [924] - [925] - [926] - [927] - [928] - [929] - [930] - [931] - [932] - [933] - [934] - [935] - [936] - [937] - [938] - [939] - [940] - [941] - [942] - [943] - [944] - [945] - [946] - [947] - [948] - [949] - [950] - [951] - [952] - [953] - [954] - [955] - [956] - [957] - [958] - [959] - [960] - [961] - [962] - [963] - [964] - [965] - [966] - [967] - [968] - [969] - [970] - [971] - [972] - [973] - [974] - [975] - [976] - [977] - [978] - [979] - [980] - [981] - [982] - [983] - [984] - [985] - [986] - [987] - [988] - [989] - [990] - [991] - [992] - [993] - [994] - [995] - [996] - [997] - [998] - [999] - [1000] ... space:drop() --- ... test_run:cmd('restart server default with cleanup=1') tarantool_1.9.1.26.g63eb81e3c/test/engine/tree_variants.test.lua0000664000000000000000000001044413306560010022756 0ustar rootrootenv = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') space = box.schema.space.create('tweedledum', { engine = engine }) i0 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) i1 = space:create_index('i1', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) i2 = space:create_index('i2', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) i3 = space:create_index('i3', { type = 'tree', parts = {4, 'string', 5, 'string'}, unique = false }) i4 = space:create_index('i4', { type = 'tree', parts = {7, 'string', 6, 'string'}, unique = false }) i5 = space:create_index('i5', { type = 'tree', parts = {9, 'unsigned'}, unique = false }) i6 = space:create_index('i6', { type = 'tree', parts = {7, 'string', 6, 'string', 4, 'string', 5, 'string', 9, 'unsigned'}, unique = true }) space:insert{0, 0, 100, 'Joe', 'Sixpack', 'Drinks', 'Amstel', 'bar', 2000} space:insert{1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001} space:insert{2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002} space:insert{3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003} space:insert{4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004} space:insert{5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005} space:insert{6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006} space:insert{7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007} space:insert{8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008} space:insert{9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009} -- In non-unique indexes select output order is undefined, -- so it's better to additionally sort output to receive same order every time. function sort_cmp(a, b) return a[1] < b[1] and true or false end function sort(t) table.sort(t, sort_cmp) return t end space.index['primary']:get{1} sort(space.index['i1']:select{2}) sort(space.index[2]:select({300})) #space.index['i3']:select({'Joe', 'Sixpack'}) #space.index['i3']:select('John') #space.index['i4']:select('A Pipe') {sort(space.index['i4']:select{'Miller Genuine Draft', 'Drinks'})} sort(space.index['i5']:select{2007}) sort(space.index[6]:select{'Miller Genuine Draft', 'Drinks'}) tmp = space:delete{6} tmp = space:delete{7} tmp = space:delete{8} tmp = space:delete{9} space:insert{6, 6ULL, 400ULL, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006} space:insert{7, 7ULL, 400ULL, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007} space:insert{8, 8ULL, 400ULL, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008} space:insert{9, 9ULL, 400ULL, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009} sort(space.index['i1']:select{6ULL}) sort(space.index['i1']:select{6}) sort(space.index['i2']:select(400ULL)) sort(space.index['i2']:select(400)) sort(space:select{}) -- Test incorrect keys - supplied key field type does not match index type -- https://bugs.launchpad.net/tarantool/+bug/1072624 space:insert{'', 1, 2, '', '', '', '', '', 0} space:insert{'xxxxxxxx', 1, 2, '', '', '', '', '', 0} space:insert{1, '', 2, '', '', '', '', '', 0} space:insert{1, 'xxxxxxxxxxx', 2, '', '', '', '', '', 0} space:drop() sort = nil sort_cmp = nil -- test index bsize space = box.schema.space.create('test', { engine = engine }) pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'string', 3, 'scalar'} }) pk:bsize() == 0 index2:bsize() == 0 space:insert({1, 'a', 3}) pk:bsize() > 0 index2:bsize() > 0 space:insert({2, 'b', 4}) old_pk_size = pk:bsize() old_index2_size = index2:bsize() space:insert({2, 'b', 4}) old_pk_size == pk:bsize() old_index2_size == index2:bsize() tmp = pk:delete({1}) pk:bsize() > 0 index2:bsize() > 0 tmp = index2:delete({'b', 4}) pk:bsize() > 0 index2:bsize() > 0 space:drop() --range queries space = box.schema.space.create('test', { engine = engine }) pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) str = require('string').rep('A', 500) c1 = 100 c2 = 10 for i = 1,c1 do for j=1,c2 do space:insert{j, i, str} end end good = true for i = 1,c2 do if #space:select{i} ~= c1 then good = false end end good --true total_count = 0 for i = 1,c2 do total_count = total_count + #space:select{i} end total_count --c1 * c2 space:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/upsert.test.lua0000664000000000000000000004020213306560010021425 0ustar rootroot test_run = require('test_run').new() engine = test_run:get_cfg('engine') -- upsert (str) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:upsert({tostring(key), 0}, {{'+', 2, 1}}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t for key = 1, 100 do space:upsert({tostring(key), 0}, {{'+', 2, 10}}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t for key = 1, 100 do space:delete({tostring(key)}) end for key = 1, 100 do space:upsert({tostring(key), 0}, {{'+', 2, 1}, {'=', 3, key}}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t space:drop() -- upsert (num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) for key = 1, 100 do space:upsert({key, 0}, {{'+', 2, 1}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key})) end t for key = 1, 100 do space:upsert({key, 0}, {{'+', 2, 10}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key})) end t for key = 1, 100 do space:delete({key}) end for key = 1, 100 do space:upsert({key, 0}, {{'+', 2, 1}, {'=', 3, key}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key})) end t space:drop() -- upsert multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for key = 1, 100 do space:upsert({key, key, 0}, {{'+', 3, 1}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t for key = 1, 100 do space:upsert({key, key, 0}, {{'+', 3, 10}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t for key = 1, 100 do space:delete({key, key}) end for key = 1, 100 do space:upsert({key, key, 0}, {{'+', 3, 1}, {'=', 4, key}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t space:drop() test_run:cmd("setopt delimiter ';'"); function less(a, b) if type(a[2]) ~= type(b[2]) then return type(a[2]) < type(b[2]) end if a[2] == b[2] then return a[1] < b[1] end if type(a[2]) == 'boolean' then return a[2] == false and b[2] == true end return a[2] < b[2] end; test_run:cmd("setopt delimiter ''"); function sort(t) table.sort(t, less) return t end -- upsert default tuple constraint space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) space:upsert({0, 'key', 0}, {{'+', 3, 1}}) space:drop() -- upsert primary key modify (skipped) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) space:upsert({0, 0}, {{'+', 1, 1}, {'+', 2, 1}}) space:get({0}) space:drop() -- upsert with box.tuple.new space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for key = 1, 100 do space:upsert(box.tuple.new{key, key, 0}, box.tuple.new{{'+', 3, 1}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t for key = 1, 100 do space:upsert(box.tuple.new{key, key, 0}, box.tuple.new{{'+', 3, 10}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t for key = 1, 100 do space:delete({key, key}) end for key = 1, 100 do space:upsert(box.tuple.new{key, key, 0}, box.tuple.new{{'+', 3, 1}, {'=', 4, key}}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t space:drop() -- https://github.com/tarantool/tarantool/issues/1671 space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) space:insert({1, 1}) space:insert({2, 2}) index1:select{} index2:select{} space:upsert({1, 1}, {{'=', 2, 2}}) sort(index1:select{}) sort(index2:select{}) space:drop() s = box.schema.space.create('tweedledum', { engine = engine }) index = s:create_index('pk') s:upsert({0, 0}, {{'+', 2, 2}}) s:select{0} tmp = s:delete{0} s:upsert({0, 0, 0}, {{'+', 2, 2}}) s:select{0} tmp = s:delete{0} s:upsert({0}, {{'+', 2, 2}}) s:select{0} s:replace{0, 1, 2, 4} s:upsert({0, 0, "you will not see it"}, {{'+', 2, 2}}) s:select{0} s:replace{0, -0x4000000000000000ll} s:upsert({0}, {{'+', 2, -0x4000000000000001ll}}) -- overflow s:select{0} s:replace{0, "thing"} s:upsert({0, "nothing"}, {{'+', 2, 2}}) s:select{0} tmp = s:delete{0} s:upsert({0, "thing"}, {{'+', 2, 2}}) s:select{0} s:replace{0, 1, 2} s:upsert({0}, {{'!', 42, 42}}) s:select{0} s:upsert({0}, {{'#', 42, 42}}) s:select{0} s:upsert({0}, {{'=', 42, 42}}) s:select{} s:replace{0, 1.5} s:select{} s:upsert({0}, {{'|', 1, 255}}) s:select{0} s:replace{0, 1.5} s:replace{0, 'something to splice'} s:upsert({0}, {{':', 2, 1, 4, 'no'}}) s:select{0} s:upsert({0}, {{':', 2, 1, 2, 'every'}}) s:select{0} s:upsert({0}, {{':', 2, -100, 2, 'every'}}) s:select{0} s:drop() space = box.schema.space.create('test', { engine = engine, field_count = 1 }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) space:insert({1}) space:select{} space:upsert({2, 2}, {{'+', 2, 1}}) -- TODO: https://github.com/tarantool/tarantool/issues/1622 -- space:upsert({1}, {{'=', 2, 10}}) space:select{} space:drop() space = box.schema.space.create('test', { engine = engine, field_count = 2 }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) space:insert({1, 1}) space:select{} space:upsert({2, 2, 2}, {{'+', 3, 1}}) space:upsert({3, 3}, {{'+', 2, 1}}) -- TODO: https://github.com/tarantool/tarantool/issues/1622 --space:upsert({4}, {{'=', 2, 10}}) --space:upsert({1}, {{'#', 2}}) space:select{} space:drop() --UPSERT https://github.com/tarantool/tarantool/issues/966 test_run:cmd("setopt delimiter ';'") function anything_to_string(tab) if tab == nil then return 'nil' end local str = '[' local first_route = true local t = 0 for k,f in pairs(tab) do if not first_route then str = str .. ',' end first_route = false t = t + 1 if k ~= t then str = str .. k .. '=' end if type(f) == 'string' then str = str .. "'" .. f .. "'" elseif type (f) == 'number' then str = str .. tostring(f) elseif type (f) == 'table' or type (f) == 'cdata' then str = str .. anything_to_string(f) else str = str .. '?' end end str = str .. ']' return str end; function things_equal(var1, var2) local type1 = type(var1) == 'cdata' and 'table' or type(var1) local type2 = type(var2) == 'cdata' and 'table' or type(var2) if type1 ~= type2 then return false end if type1 ~= 'table' then return var1 == var2 end for k,v in pairs(var1) do if not things_equal(v, var2[k]) then return false end end for k,v in pairs(var2) do if not things_equal(v, var1[k]) then return false end end return true end; function copy_thing(t) if type(t) ~= 'table' then return t end local res = {} for k,v in pairs(t) do res[copy_thing(k)] = copy_thing(v) end return res end; function test(key_tuple, ops, expect) box.space.s:upsert(key_tuple, ops) if (things_equal(box.space.s:select{}, expect)) then return 'upsert('.. anything_to_string(key_tuple) .. ', ' .. anything_to_string(ops) .. ', ' .. ') OK ' .. anything_to_string(box.space.s:select{}) end return 'upsert('.. anything_to_string(key_tuple) .. ', ' .. anything_to_string(ops) .. ', ' .. ') FAILED, got ' .. anything_to_string(box.space.s:select{}) .. ' expected ' .. anything_to_string(expect) end; test_run:cmd("setopt delimiter ''"); -- https://github.com/tarantool/tarantool/issues/1671 -- test upserts without triggers -- test case with one index space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { parts = {1, 'string'} }) space:upsert({1}, {{'!', 2, 100}}) -- must fail on checking tuple space:upsert({'a'}, {{'a', 2, 100}}) -- must fail on checking ops space:upsert({'a'}, {{'!', 2, 'ups1'}}) -- 'fast' upsert via insert in one index space:upsert({'a', 'b'}, {{'!', 2, 'ups2'}}) -- 'fast' upsert via update in one index space:select{} space:drop() -- tests on multiple indexes space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { parts = {1, 'string'} }) index2 = space:create_index('secondary', { parts = {2, 'scalar', 3, 'unsigned'} }) -- test upsert that executes as insert in all indexes space:upsert({'a', 100, 100}, {{'!', 4, 200}}) space:upsert({'b', 100, 200}, {{'!', 4, 300}}) space:upsert({'c', 100, 300}, {{'!', 4, 400}}) index1:select{} index2:select{} -- test upsert that executes as update space:upsert({'a', 100, 100}, {{'=', 3, -200}}) -- must fail on cheking new tuple in secondary index space:upsert({'b', 100, 200}, {{'=', 1, 'd'}}) -- must fail with attempt to modify primary index index1:select{} index2:select{} space:drop() -- https://github.com/tarantool/tarantool/issues/1854 space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) space:insert({1, 1, 1}) space:insert({2, 2, 2}) space:insert({3, 3, 3}) space:select{} space:upsert({2, 18, 76}, {}) space:upsert({4, 4, 4}, {}) space:select{} space:drop() s = box.schema.space.create('s', { engine = engine }) index1 = s:create_index('i1') index2 = s:create_index('i2', { parts = {2, 'string'}, unique = false }) t = {1, '1', 1, 'qwerty'} s:insert(t) -- all good operations, one op, equivalent to update test(t, {{'+', 3, 5}}, {{1, '1', 6, 'qwerty'}}) test(t, {{'-', 3, 3}}, {{1, '1', 3, 'qwerty'}}) test(t, {{'&', 3, 5}}, {{1, '1', 1, 'qwerty'}}) test(t, {{'|', 3, 8}}, {{1, '1', 9, 'qwerty'}}) test(t, {{'^', 3, 12}}, {{1, '1', 5, 'qwerty'}}) test(t, {{':', 4, 2, 4, "uer"}}, {{1, '1', 5, 'query'}}) test(t, {{'!', 4, 'answer'}}, {{1, '1', 5, 'answer', 'query'}}) test(t, {{'#', 5, 1}}, {{1, '1', 5, 'answer'}}) test(t, {{'!', -1, 1}}, {{1, '1', 5, 'answer', 1}}) test(t, {{'!', -1, 2}}, {{1, '1', 5, 'answer', 1, 2}}) test(t, {{'!', -1, 3}}, {{1, '1', 5, 'answer', 1, 2 ,3}}) test(t, {{'#', 5, 100500}}, {{1, '1', 5, 'answer'}}) test(t, {{'=', 4, 'qwerty'}}, {{1, '1', 5, 'qwerty'}}) -- same check for negative posistion test(t, {{'+', -2, 5}}, {{1, '1', 10, 'qwerty'}}) test(t, {{'-', -2, 3}}, {{1, '1', 7, 'qwerty'}}) test(t, {{'&', -2, 5}}, {{1, '1', 5, 'qwerty'}}) test(t, {{'|', -2, 8}}, {{1, '1', 13, 'qwerty'}}) test(t, {{'^', -2, 12}}, {{1, '1', 1, 'qwerty'}}) test(t, {{':', -1, 2, 4, "uer"}}, {{1, '1', 1, 'query'}}) test(t, {{'!', -2, 'answer'}}, {{1, '1', 1, 'answer', 'query'}}) test(t, {{'#', -1, 1}}, {{1, '1', 1, 'answer'}}) test(t, {{'=', -1, 'answer!'}}, {{1, '1', 1, 'answer!'}}) -- selective test for good multiple ops test(t, {{'+', 3, 2}, {'!', 4, 42}}, {{1, '1', 3, 42, 'answer!'}}) test(t, {{'!', 1, 666}, {'#', 1, 1}, {'+', 3, 2}}, {{1, '1', 5, 42, 'answer!'}}) test(t, {{'!', 3, 43}, {'+', 4, 2}}, {{1, '1', 43, 7, 42, 'answer!'}}) test(t, {{'#', 3, 2}, {'=', 3, 1}, {'=', 4, '1'}}, {{1, '1', 1, '1'}}) -- all bad operations, one op, equivalent to update but error is supressed test(t, {{'+', 4, 3}}, {{1, '1', 1, '1'}}) test(t, {{'-', 4, 3}}, {{1, '1', 1, '1'}}) test(t, {{'&', 4, 1}}, {{1, '1', 1, '1'}}) test(t, {{'|', 4, 1}}, {{1, '1', 1, '1'}}) test(t, {{'^', 4, 1}}, {{1, '1', 1, '1'}}) test(t, {{':', 3, 2, 4, "uer"}}, {{1, '1', 1, '1'}}) test(t, {{'!', 18, 'answer'}}, {{1, '1', 1, '1'}}) test(t, {{'#', 18, 1}}, {{1, '1', 1, '1'}}) test(t, {{'=', 18, 'qwerty'}}, {{1, '1', 1, '1'}}) -- selective test for good/bad multiple ops mix test(t, {{'+', 3, 1}, {'+', 4, 1}}, {{1, '1', 2, '1'}}) test(t, {{'-', 4, 1}, {'-', 3, 1}}, {{1, '1', 1, '1'}}) test(t, {{'#', 18, 1}, {'|', 3, 14}, {'!', 18, '!'}}, {{1, '1', 15, '1'}}) test(t, {{'^', 42, 42}, {':', 1, 1, 1, ''}, {'^', 3, 8}}, {{1, '1', 7, '1'}}) test(t, {{'&', 3, 1}, {'&', 2, 1}, {'&', 4, 1}}, {{1, '1', 1, '1'}}) -- broken ops must raise an exception and discarded 'dump ' .. anything_to_string(box.space.s:select{}) test(t, {{'&', 'a', 3}, {'+', 3, 3}}, {{1, '1', 1, '1'}}) test(t, {{'+', 3, 3}, {'&', 3, 'a'}}, {{1, '1', 1, '1'}}) test(t, {{'+', 3}, {'&', 3, 'a'}}, {{1, '1', 1, '1'}}) test(t, {{':', 3, 3}}, {{1, '1', 1, '1'}}) test(t, {{':', 3, 3, 3}}, {{1, '1', 1, '1'}}) test(t, {{'?', 3, 3}}, {{1, '1', 1, '1'}}) 'dump ' .. anything_to_string(box.space.s:select{}) -- ignoring ops for insert upsert test({2, '2', 2, '2'}, {{}}, {{1, '1', 1, '1'}}) test({3, '3', 3, '3'}, {{'+', 3, 3}}, {{1, '1', 1, '1'}, {3, '3', 3, '3'}}) -- adding random ops t[1] = 1 test(t, {{'+', 3, 3}, {'+', 4, 3}}, {{1, '1', 4, '1'}, {3, '3', 3, '3'}}) t[1] = 2 test(t, {{'-', 4, 1}}, {{1, '1', 4, '1'}, {2, '1', 1, 'qwerty'}, {3, '3', 3, '3'}}) t[1] = 3 test(t, {{':', 3, 3, 3, ''}, {'|', 3, 4}}, {{1, '1', 4, '1'}, {2, '1', 1, 'qwerty'}, {3, '3', 7, '3'}}) 'dump ' .. anything_to_string(box.space.s:select{}) -- (1) test_run:cmd("restart server default") test_run = require('test_run').new() engine = test_run:get_cfg('engine') test_run:cmd("setopt delimiter ';'") function anything_to_string(tab) if tab == nil then return 'nil' end local str = '[' local first_route = true local t = 0 for k,f in pairs(tab) do if not first_route then str = str .. ',' end first_route = false t = t + 1 if k ~= t then str = str .. k .. '=' end if type(f) == 'string' then str = str .. "'" .. f .. "'" elseif type (f) == 'number' then str = str .. tostring(f) elseif type (f) == 'table' or type (f) == 'cdata' then str = str .. anything_to_string(f) else str = str .. '?' end end str = str .. ']' return str end; test_run:cmd("setopt delimiter ''"); s = box.space.s 'dump ' .. anything_to_string(box.space.s:select{})-- compare with (1) visually! box.space.s:drop() -- -- gh-2104: vinyl: assert in tuple_upsert_squash -- s = box.schema.space.create('test', {engine = engine}) i = s:create_index('test') s:replace({1, 1, 1}) box.snapshot() s:upsert({1, 1}, {{'+', 2, 2}}) s:upsert({1, 1}, {{'+', 3, 4}}) s:select() s:drop() -- -- gh-2259: space:upsert() crashes in absence of indices -- s = box.schema.space.create('test', {engine = engine}) s:upsert({1}, {}) s:drop() -- -- gh-2461 - segfault on sparse or unordered keys. -- s = box.schema.space.create('test', {engine = engine}) pk = s:create_index('pk', {parts = {1, 'unsigned', 3, 'unsigned'}}) s:upsert({100, 100, 100}, {{'+', 2, 200}}) s:upsert({200, 100, 200}, {{'+', 2, 300}}) s:upsert({300, 100, 300}, {{'+', 2, 400}}) pk:select{} s:drop() -- test for non-spased and non-sequential index s = box.schema.space.create('test', {engine = engine}) pk = s:create_index('pk', {parts = {2, 'unsigned', 3, 'unsigned'}}) s:upsert({100, 100, 100}, {{'+', 1, 200}}) s:upsert({200, 100, 200}, {{'+', 1, 300}}) s:upsert({300, 100, 300}, {{'+', 1, 400}}) pk:select{} s:drop() s = box.schema.space.create('test', {engine = engine}) pk = s:create_index('pk', {parts = {3, 'unsigned', 2, 'unsigned'}}) s:upsert({100, 100, 100}, {{'+', 1, 200}}) s:upsert({200, 100, 200}, {{'+', 1, 300}}) s:upsert({300, 100, 300}, {{'+', 1, 400}}) pk:select{} s:drop() s = box.schema.space.create('test', {engine = engine}) pk = s:create_index('pk', {parts = {1, 'unsigned'}}) sec = s:create_index('sec', {parts = {4, 'unsigned', 2, 'unsigned', 3, 'unsigned'}}) s:replace{1, 301, 300, 300} sec:select{} s:upsert({1, 301, 300, 300}, {{'+', 2, 1}, {'+', 3, 1}, {'+', 4, 1}}) sec:select{} s:upsert({1, 302, 301, 301}, {{'+', 2, 1}, {'+', 3, 1}, {'+', 4, 1}}) sec:select{} s:upsert({2, 203, 200, 200}, {{'+', 2, 1}, {'+', 3, 1}, {'+', 4, 1}}) sec:select{} s:replace{3, 302, 50, 100} sec:select{} sec:get{100, 302, 50} sec:get{200, 203, 200} sec:get{302, 303, 302} s:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/lua.result0000664000000000000000000000731213306560010020450 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... -- -- Lua select_reverse_range -- -- lua select_reverse_range() testing -- https://blueprints.launchpad.net/tarantool/+spec/backward-tree-index-iterator space = box.schema.space.create('tweedledum', { engine = engine }) --- ... tmp = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) --- ... tmp = space:create_index('range', { type = 'tree', parts = {2, 'unsigned', 1, 'unsigned'}, unique = true }) --- ... space:insert{0, 0} --- - [0, 0] ... space:insert{1, 0} --- - [1, 0] ... space:insert{2, 0} --- - [2, 0] ... space:insert{3, 0} --- - [3, 0] ... space:insert{4, 0} --- - [4, 0] ... space:insert{5, 0} --- - [5, 0] ... space:insert{6, 0} --- - [6, 0] ... space:insert{7, 0} --- - [7, 0] ... space:insert{8, 0} --- - [8, 0] ... space:insert{9, 0} --- - [9, 0] ... space.index['range']:select({}, { limit = 10, iterator = 'GE' }) --- - - [0, 0] - [1, 0] - [2, 0] - [3, 0] - [4, 0] - [5, 0] - [6, 0] - [7, 0] - [8, 0] - [9, 0] ... space.index['range']:select({}, { limit = 10, iterator = 'LE' }) --- - - [9, 0] - [8, 0] - [7, 0] - [6, 0] - [5, 0] - [4, 0] - [3, 0] - [2, 0] - [1, 0] - [0, 0] ... space.index['range']:select({}, { limit = 4, iterator = 'LE' }) --- - - [9, 0] - [8, 0] - [7, 0] - [6, 0] ... space:drop() --- ... -- -- Tests for box.index iterators -- space = box.schema.space.create('tweedledum', { engine = engine }) --- ... tmp = space:create_index('primary', { type = 'tree', parts = {1, 'string'}, unique = true }) --- ... tmp = space:create_index('i1', { type = 'tree', parts = {2, 'string', 3, 'string'}, unique = true }) --- ... pid = 1 --- ... tid = 999 --- ... inspector:cmd("setopt delimiter ';'") --- - true ... for sid = 1, 2 do for i = 1, 3 do space:insert{'pid_'..pid, 'sid_'..sid, 'tid_'..tid} pid = pid + 1 tid = tid - 1 end end; --- ... inspector:cmd("setopt delimiter ''"); --- - true ... index = space.index['i1'] --- ... t = {} --- ... for state, v in index:pairs('sid_1', { iterator = 'GE' }) do table.insert(t, v) end --- ... t --- - - ['pid_3', 'sid_1', 'tid_997'] - ['pid_2', 'sid_1', 'tid_998'] - ['pid_1', 'sid_1', 'tid_999'] - ['pid_6', 'sid_2', 'tid_994'] - ['pid_5', 'sid_2', 'tid_995'] - ['pid_4', 'sid_2', 'tid_996'] ... t = {} --- ... for state, v in index:pairs('sid_2', { iterator = 'LE' }) do table.insert(t, v) end --- ... t --- - - ['pid_4', 'sid_2', 'tid_996'] - ['pid_5', 'sid_2', 'tid_995'] - ['pid_6', 'sid_2', 'tid_994'] - ['pid_1', 'sid_1', 'tid_999'] - ['pid_2', 'sid_1', 'tid_998'] - ['pid_3', 'sid_1', 'tid_997'] ... t = {} --- ... for state, v in index:pairs('sid_1', { iterator = 'EQ' }) do table.insert(t, v) end --- ... t --- - - ['pid_3', 'sid_1', 'tid_997'] - ['pid_2', 'sid_1', 'tid_998'] - ['pid_1', 'sid_1', 'tid_999'] ... t = {} --- ... for state, v in index:pairs('sid_1', { iterator = 'REQ' }) do table.insert(t, v) end --- ... t --- - - ['pid_1', 'sid_1', 'tid_999'] - ['pid_2', 'sid_1', 'tid_998'] - ['pid_3', 'sid_1', 'tid_997'] ... t = {} --- ... for state, v in index:pairs('sid_2', { iterator = 'EQ' }) do table.insert(t, v) end --- ... t --- - - ['pid_6', 'sid_2', 'tid_994'] - ['pid_5', 'sid_2', 'tid_995'] - ['pid_4', 'sid_2', 'tid_996'] ... t = {} --- ... for state, v in index:pairs('sid_2', { iterator = 'REQ' }) do table.insert(t, v) end --- ... t --- - - ['pid_4', 'sid_2', 'tid_996'] - ['pid_5', 'sid_2', 'tid_995'] - ['pid_6', 'sid_2', 'tid_994'] ... t = {} --- ... index:pairs('sid_t', { iterator = 'wrong_iterator_type' }) --- - error: Unknown iterator type 'wrong_iterator_type' ... index = nil --- ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/indices_any_type.result0000664000000000000000000003515713306560010023225 0ustar rootroot-- Test for unique indices -- Tests for TREE index type env = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... s0 = box.schema.space.create('my_space1', { engine = engine }) --- ... i0 = s0:create_index('my_space1_idx1', {type='TREE', parts={1, 'number'}, unique=true}) --- ... s0:insert({10}) --- - [10] ... s0:insert({11}) --- - [11] ... s0:insert({12}) --- - [12] ... s0:insert({13}) --- - [13] ... s0:select{} --- - - [10] - [11] - [12] - [13] ... s0:insert({3}) --- - [3] ... s0:insert({4}) --- - [4] ... s0:insert({5}) --- - [5] ... s0:insert({6}) --- - [6] ... s0:select{} --- - - [3] - [4] - [5] - [6] - [10] - [11] - [12] - [13] ... s0:insert({-5}) --- - [-5] ... s0:insert({-6}) --- - [-6] ... s0:insert({-7}) --- - [-7] ... s0:insert({-8}) --- - [-8] ... s0:select{} --- - - [-8] - [-7] - [-6] - [-5] - [3] - [4] - [5] - [6] - [10] - [11] - [12] - [13] ... s0:insert({-10}) --- - [-10] ... s0:insert({-11}) --- - [-11] ... s0:insert({-12}) --- - [-12] ... s0:insert({-13}) --- - [-13] ... s0:select{} --- - - [-13] - [-12] - [-11] - [-10] - [-8] - [-7] - [-6] - [-5] - [3] - [4] - [5] - [6] - [10] - [11] - [12] - [13] ... s0:insert({3.5}) --- - [3.5] ... s0:insert({4.5}) --- - [4.5] ... s0:insert({5.5}) --- - [5.5] ... s0:select{} --- - - [-13] - [-12] - [-11] - [-10] - [-8] - [-7] - [-6] - [-5] - [3] - [3.5] - [4] - [4.5] - [5] - [5.5] - [6] - [10] - [11] - [12] - [13] ... s0:insert({-3.5}) --- - [-3.5] ... s0:insert({-4.5}) --- - [-4.5] ... s0:insert({-5.5}) --- - [-5.5] ... s0:select{} --- - - [-13] - [-12] - [-11] - [-10] - [-8] - [-7] - [-6] - [-5.5] - [-5] - [-4.5] - [-3.5] - [3] - [3.5] - [4] - [4.5] - [5] - [5.5] - [6] - [10] - [11] - [12] - [13] ... s0:drop() --- ... s1 = box.schema.space.create('my_space2', { engine = engine }) --- ... i1 = s1:create_index('my_space2_idx1', {type='TREE', parts={1, 'scalar'}, unique=true}) --- ... s1:insert({10}) --- - [10] ... s1:insert({11}) --- - [11] ... s1:insert({12}) --- - [12] ... s1:insert({13}) --- - [13] ... s1:select{} --- - - [10] - [11] - [12] - [13] ... s1:insert({3}) --- - [3] ... s1:insert({4}) --- - [4] ... s1:insert({5}) --- - [5] ... s1:insert({6}) --- - [6] ... s1:select{} --- - - [3] - [4] - [5] - [6] - [10] - [11] - [12] - [13] ... s1:insert({'ffff'}) --- - ['ffff'] ... s1:insert({'gggg'}) --- - ['gggg'] ... s1:insert({'hhhh'}) --- - ['hhhh'] ... s1:select{} --- - - [3] - [4] - [5] - [6] - [10] - [11] - [12] - [13] - ['ffff'] - ['gggg'] - ['hhhh'] ... s1:insert({'aaaa'}) --- - ['aaaa'] ... s1:insert({'bbbb'}) --- - ['bbbb'] ... s1:insert({'cccc'}) --- - ['cccc'] ... s1:select{} --- - - [3] - [4] - [5] - [6] - [10] - [11] - [12] - [13] - ['aaaa'] - ['bbbb'] - ['cccc'] - ['ffff'] - ['gggg'] - ['hhhh'] ... s1:insert({3.5}) --- - [3.5] ... s1:insert({4.5}) --- - [4.5] ... s1:insert({5.5}) --- - [5.5] ... s1:select{} --- - - [3] - [3.5] - [4] - [4.5] - [5] - [5.5] - [6] - [10] - [11] - [12] - [13] - ['aaaa'] - ['bbbb'] - ['cccc'] - ['ffff'] - ['gggg'] - ['hhhh'] ... s1:insert({-3.5}) --- - [-3.5] ... s1:insert({-4.5}) --- - [-4.5] ... s1:insert({-5.5}) --- - [-5.5] ... s1:select{} --- - - [-5.5] - [-4.5] - [-3.5] - [3] - [3.5] - [4] - [4.5] - [5] - [5.5] - [6] - [10] - [11] - [12] - [13] - ['aaaa'] - ['bbbb'] - ['cccc'] - ['ffff'] - ['gggg'] - ['hhhh'] ... s1:insert({true}) --- - [true] ... s1:insert({false}) --- - [false] ... s1:insert({1}) --- - [1] ... s1:insert({0}) --- - [0] ... s1:insert({'!!!!'}) --- - ['!!!!'] ... s1:insert({'????'}) --- - ['????'] ... s1:select{} --- - - [false] - [true] - [-5.5] - [-4.5] - [-3.5] - [0] - [1] - [3] - [3.5] - [4] - [4.5] - [5] - [5.5] - [6] - [10] - [11] - [12] - [13] - ['!!!!'] - ['????'] - ['aaaa'] - ['bbbb'] - ['cccc'] - ['ffff'] - ['gggg'] - ['hhhh'] ... s1:drop() --- ... s2 = box.schema.space.create('my_space3', { engine = engine }) --- ... i2_1 = s2:create_index('my_space3_idx1', {type='TREE', parts={1, 'scalar', 2, 'integer', 3, 'number'}, unique=true}) --- ... s2:insert({10, 1, -1, 'z', true}) --- - [10, 1, -1, 'z', true] ... s2:insert({11, 2, 2, 'g', false}) --- - [11, 2, 2, 'g', false] ... s2:insert({12, 3, -3, 'e', -100.5}) --- - [12, 3, -3, 'e', -100.5] ... s2:insert({13, 4, 4, 'h', 200}) --- - [13, 4, 4, 'h', 200] ... s2:select{} --- - - [10, 1, -1, 'z', true] - [11, 2, 2, 'g', false] - [12, 3, -3, 'e', -100.5] - [13, 4, 4, 'h', 200] ... s2:insert({3, 5, -5, 'w', 'strstr'}) --- - [3, 5, -5, 'w', 'strstr'] ... s2:insert({4, 6, 6, 'q', ';;;;'}) --- - [4, 6, 6, 'q', ';;;;'] ... s2:insert({5, 7, -7, 'c', '???'}) --- - [5, 7, -7, 'c', '???'] ... s2:insert({6, 8, 8, 'k', '!!!'}) --- - [6, 8, 8, 'k', '!!!'] ... s2:select{} --- - - [3, 5, -5, 'w', 'strstr'] - [4, 6, 6, 'q', ';;;;'] - [5, 7, -7, 'c', '???'] - [6, 8, 8, 'k', '!!!'] - [10, 1, -1, 'z', true] - [11, 2, 2, 'g', false] - [12, 3, -3, 'e', -100.5] - [13, 4, 4, 'h', 200] ... s2:insert({'ffff', 9, -9, 'm', '123'}) --- - ['ffff', 9, -9, 'm', '123'] ... s2:insert({'gggg', 10, 10, 'r', '456'}) --- - ['gggg', 10, 10, 'r', '456'] ... s2:insert({'hhhh', 11, -11, 'i', 55555}) --- - ['hhhh', 11, -11, 'i', 55555] ... s2:insert({'hhhh', 11, -10, 'i', 55556}) --- - ['hhhh', 11, -10, 'i', 55556] ... s2:insert({'hhhh', 11, -12, 'i', 55554}) --- - ['hhhh', 11, -12, 'i', 55554] ... s2:select{} --- - - [3, 5, -5, 'w', 'strstr'] - [4, 6, 6, 'q', ';;;;'] - [5, 7, -7, 'c', '???'] - [6, 8, 8, 'k', '!!!'] - [10, 1, -1, 'z', true] - [11, 2, 2, 'g', false] - [12, 3, -3, 'e', -100.5] - [13, 4, 4, 'h', 200] - ['ffff', 9, -9, 'm', '123'] - ['gggg', 10, 10, 'r', '456'] - ['hhhh', 11, -12, 'i', 55554] - ['hhhh', 11, -11, 'i', 55555] - ['hhhh', 11, -10, 'i', 55556] ... s2:insert({'aaaa', 12, 12, 'o', 333}) --- - ['aaaa', 12, 12, 'o', 333] ... s2:insert({'bbbb', 13, -13, 'p', '123'}) --- - ['bbbb', 13, -13, 'p', '123'] ... s2:insert({'cccc', 14, 14, 'l', 123}) --- - ['cccc', 14, 14, 'l', 123] ... s2:select{} --- - - [3, 5, -5, 'w', 'strstr'] - [4, 6, 6, 'q', ';;;;'] - [5, 7, -7, 'c', '???'] - [6, 8, 8, 'k', '!!!'] - [10, 1, -1, 'z', true] - [11, 2, 2, 'g', false] - [12, 3, -3, 'e', -100.5] - [13, 4, 4, 'h', 200] - ['aaaa', 12, 12, 'o', 333] - ['bbbb', 13, -13, 'p', '123'] - ['cccc', 14, 14, 'l', 123] - ['ffff', 9, -9, 'm', '123'] - ['gggg', 10, 10, 'r', '456'] - ['hhhh', 11, -12, 'i', 55554] - ['hhhh', 11, -11, 'i', 55555] - ['hhhh', 11, -10, 'i', 55556] ... s2:insert({3.5, 15, -15, 'n', 500}) --- - [3.5, 15, -15, 'n', 500] ... s2:insert({4.5, 16, 16, 'b', 'ghtgtg'}) --- - [4.5, 16, 16, 'b', 'ghtgtg'] ... s2:insert({5.5, 17, -17, 'v', '"""""'}) --- - [5.5, 17, -17, 'v', '"""""'] ... s2:select{} --- - - [3, 5, -5, 'w', 'strstr'] - [3.5, 15, -15, 'n', 500] - [4, 6, 6, 'q', ';;;;'] - [4.5, 16, 16, 'b', 'ghtgtg'] - [5, 7, -7, 'c', '???'] - [5.5, 17, -17, 'v', '"""""'] - [6, 8, 8, 'k', '!!!'] - [10, 1, -1, 'z', true] - [11, 2, 2, 'g', false] - [12, 3, -3, 'e', -100.5] - [13, 4, 4, 'h', 200] - ['aaaa', 12, 12, 'o', 333] - ['bbbb', 13, -13, 'p', '123'] - ['cccc', 14, 14, 'l', 123] - ['ffff', 9, -9, 'm', '123'] - ['gggg', 10, 10, 'r', '456'] - ['hhhh', 11, -12, 'i', 55554] - ['hhhh', 11, -11, 'i', 55555] - ['hhhh', 11, -10, 'i', 55556] ... s2:insert({-3.5, 18, 18, 'x', '---'}) --- - [-3.5, 18, 18, 'x', '---'] ... s2:insert({-4.5, 19, -19, 'a', 56.789}) --- - [-4.5, 19, -19, 'a', 56.789] ... s2:insert({-5.5, 20, 20, 'f', -138.4}) --- - [-5.5, 20, 20, 'f', -138.4] ... s2:select{} --- - - [-5.5, 20, 20, 'f', -138.4] - [-4.5, 19, -19, 'a', 56.789] - [-3.5, 18, 18, 'x', '---'] - [3, 5, -5, 'w', 'strstr'] - [3.5, 15, -15, 'n', 500] - [4, 6, 6, 'q', ';;;;'] - [4.5, 16, 16, 'b', 'ghtgtg'] - [5, 7, -7, 'c', '???'] - [5.5, 17, -17, 'v', '"""""'] - [6, 8, 8, 'k', '!!!'] - [10, 1, -1, 'z', true] - [11, 2, 2, 'g', false] - [12, 3, -3, 'e', -100.5] - [13, 4, 4, 'h', 200] - ['aaaa', 12, 12, 'o', 333] - ['bbbb', 13, -13, 'p', '123'] - ['cccc', 14, 14, 'l', 123] - ['ffff', 9, -9, 'm', '123'] - ['gggg', 10, 10, 'r', '456'] - ['hhhh', 11, -12, 'i', 55554] - ['hhhh', 11, -11, 'i', 55555] - ['hhhh', 11, -10, 'i', 55556] ... s2:insert({true, 21, -21, 'y', 50}) --- - [true, 21, -21, 'y', 50] ... s2:insert({false, 22, 22, 's', 60}) --- - [false, 22, 22, 's', 60] ... s2:insert({'!!!!', 23, -23, 'u', 0}) --- - ['!!!!', 23, -23, 'u', 0] ... s2:insert({'????', 24, 24, 'j', 70}) --- - ['????', 24, 24, 'j', 70] ... s2:select{} --- - - [false, 22, 22, 's', 60] - [true, 21, -21, 'y', 50] - [-5.5, 20, 20, 'f', -138.4] - [-4.5, 19, -19, 'a', 56.789] - [-3.5, 18, 18, 'x', '---'] - [3, 5, -5, 'w', 'strstr'] - [3.5, 15, -15, 'n', 500] - [4, 6, 6, 'q', ';;;;'] - [4.5, 16, 16, 'b', 'ghtgtg'] - [5, 7, -7, 'c', '???'] - [5.5, 17, -17, 'v', '"""""'] - [6, 8, 8, 'k', '!!!'] - [10, 1, -1, 'z', true] - [11, 2, 2, 'g', false] - [12, 3, -3, 'e', -100.5] - [13, 4, 4, 'h', 200] - ['!!!!', 23, -23, 'u', 0] - ['????', 24, 24, 'j', 70] - ['aaaa', 12, 12, 'o', 333] - ['bbbb', 13, -13, 'p', '123'] - ['cccc', 14, 14, 'l', 123] - ['ffff', 9, -9, 'm', '123'] - ['gggg', 10, 10, 'r', '456'] - ['hhhh', 11, -12, 'i', 55554] - ['hhhh', 11, -11, 'i', 55555] - ['hhhh', 11, -10, 'i', 55556] ... s2.index.my_space3_idx2:select{} --- - error: '[string "return s2.index.my_space3_idx2:select{} "]:1: attempt to index field ''my_space3_idx2'' (a nil value)' ... s2:drop() --- ... -- Tests for NULL mp = require('msgpack') --- ... s4 = box.schema.space.create('my_space5', { engine = engine }) --- ... i4_1 = s4:create_index('my_space5_idx1', {type='TREE', parts={1, 'scalar', 2, 'integer', 3, 'number'}, unique=true}) --- ... s4:insert({mp.NULL, 1, 1, 1}) --- - error: 'Tuple field 1 type does not match one required by operation: expected scalar' ... s4:insert({2, mp.NULL, 2, 2}) -- all nulls must fail --- - error: 'Tuple field 2 type does not match one required by operation: expected integer' ... s4:insert({3, 3, mp.NULL, 3}) --- - error: 'Tuple field 3 type does not match one required by operation: expected number' ... s4:insert({4, 4, 4, mp.NULL}) --- - [4, 4, 4, null] ... s4:drop() --- ... -- Test for nonunique indices s5 = box.schema.space.create('my_space6', { engine = engine }) --- ... i5_1 = s5:create_index('my_space6_idx1', {type='TREE', parts={1, 'unsigned'}, unique=true}) --- ... i5_2 = s5:create_index('my_space6_idx2', {type='TREE', parts={2, 'scalar'}, unique=false}) --- ... test_run:cmd("setopt delimiter ';'"); --- - true ... function less(a, b) if type(a[2]) ~= type(b[2]) then return type(a[2]) < type(b[2]) end if type(a[2]) == 'boolean' then if a[2] == false and b[2] == true then return true end end if a[2] == b[2] then return a[1] < b[1] end return a[2] < b[2] end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... function sort(t) table.sort(t, less) return t end --- ... s5:insert({1, "123"}) --- - [1, '123'] ... s5:insert({2, "123"}) --- - [2, '123'] ... s5:insert({3, "123"}) --- - [3, '123'] ... s5:insert({4, 123}) --- - [4, 123] ... s5:insert({5, 123}) --- - [5, 123] ... s5:insert({6, true}) --- - [6, true] ... s5:insert({7, true}) --- - [7, true] ... s5:insert({8, mp.NULL}) -- must fail --- - error: 'Tuple field 2 type does not match one required by operation: expected scalar' ... s5:insert({9, -40.5}) --- - [9, -40.5] ... s5:insert({10, -39.5}) --- - [10, -39.5] ... s5:insert({11, -38.5}) --- - [11, -38.5] ... s5:insert({12, 100.5}) --- - [12, 100.5] ... s5:select{} --- - - [1, '123'] - [2, '123'] - [3, '123'] - [4, 123] - [5, 123] - [6, true] - [7, true] - [9, -40.5] - [10, -39.5] - [11, -38.5] - [12, 100.5] ... sort(i5_2:select({123})) --- - - [4, 123] - [5, 123] ... sort(i5_2:select({"123"})) --- - - [1, '123'] - [2, '123'] - [3, '123'] ... sort(i5_2:select({true})) --- - - [6, true] - [7, true] ... sort(i5_2:select({false})) --- - [] ... sort(i5_2:select({true})) --- - - [6, true] - [7, true] ... sort(i5_2:select({-38.5})) --- - - [11, -38.5] ... sort(i5_2:select({-40}, {iterator = 'GE'})) --- - - [10, -39.5] - [11, -38.5] - [12, 100.5] - [4, 123] - [5, 123] - [1, '123'] - [2, '123'] - [3, '123'] ... s5:drop() --- ... -- gh-1897 Crash on index field type 'any' space = box.schema.space.create('test', {engine = engine}) --- ... pk = space:create_index('primary', { parts = {1, 'any'} }) -- --- - error: 'Can''t create or modify index ''primary'' in space ''test'': field type ''any'' is not supported' ... space:insert({1}) -- must fail --- - error: 'No index #0 is defined in space ''test''' ... space:insert({2}) -- --- - error: 'No index #0 is defined in space ''test''' ... space:drop() --- ... -- gh-1701 allow NaN rnd = math.random(2147483648) --- ... ffi = require('ffi') --- ... ffi.cdef(string.format("union nan_%s { double d; uint64_t i; }", rnd)) --- ... nan_ffi = ffi.new(string.format('union nan_%s', rnd)) --- ... nan_ffi.i = 0x7ff4000000000000 --- ... sNaN = nan_ffi.d --- ... nan_ffi.i = 0x7ff8000000000000 --- ... qNaN = nan_ffi.d --- ... -- basic test space = box.schema.space.create('test', { engine = engine }) --- ... pk = space:create_index('primary', {parts = {1, 'number'}}) --- ... space:replace({sNaN, 'signaling NaN'}) --- - [nan, 'signaling NaN'] ... space:replace({qNaN, 'quiet NaN'}) --- - [nan, 'quiet NaN'] ... space:get{sNaN} --- - [nan, 'signaling NaN'] ... space:get{qNaN} --- - [nan, 'quiet NaN'] ... space:get{1/0} --- ... space:get{1/0 - 1/0} --- - [nan, 'quiet NaN'] ... space:get{0/0} --- - [nan, 'quiet NaN'] ... space:select{} --- - - [nan, 'signaling NaN'] - [nan, 'quiet NaN'] ... space:truncate() --- ... -- test ordering of special values space:replace({1/0, '+inf'}) --- - [inf, '+inf'] ... space:replace({sNaN, 'snan'}) --- - [nan, 'snan'] ... space:replace({100}) --- - [100] ... space:replace({-1/0, '-inf'}) --- - [-inf, '-inf'] ... space:replace({50}) --- - [50] ... space:replace({qNaN, 'qnan'}) --- - [nan, 'qnan'] ... pk:get{100/0} --- - [inf, '+inf'] ... pk:get{sNaN} --- - [nan, 'snan'] ... pk:get{100} --- - [100] ... pk:get{-100/0} --- - [-inf, '-inf'] ... pk:get{50} --- - [50] ... pk:get{qNaN} --- - [nan, 'qnan'] ... pk:select({sNaN}, {iterator = 'GE'}) --- - - [nan, 'snan'] - [nan, 'qnan'] - [-inf, '-inf'] - [50] - [100] - [inf, '+inf'] ... pk:select({1/0}, {iterator = 'LT'}) --- - - [100] - [50] - [-inf, '-inf'] - [nan, 'qnan'] - [nan, 'snan'] ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/conflict.test.lua0000664000000000000000000000005113306560010021702 0ustar rootroot dofile('conflict.lua') test_conflict() tarantool_1.9.1.26.g63eb81e3c/test/engine/replace.test.lua0000664000000000000000000000440313306560010021521 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') -- replace (str) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:replace({tostring(key)}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t _ = space:replace({tostring(7)}) space:drop() -- replace (num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) for key = 1, 100 do space:replace({key}) end t = {} for key = 1, 100 do table.insert(t, space:get({key})) end t _ = space:replace({7}) space:drop() -- replace multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for key = 1, 100 do space:replace({key, key}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t _ = space:replace({7, 7}) space:drop() -- replace with box.tuple.new space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:replace({tostring(key)}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t _ = space:replace(box.tuple.new{tostring(7)}) space:drop() -- replace multiple indices space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'} }) space:replace({1, 1}) space:replace({1, 2}) index1:select{} index2:select{} space:drop() space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'} }) index3 = space:create_index('third', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) space:insert({1, 1, 1}) space:insert({2, 2, 2}) space:insert({3, 3, 3}) space:select{} space:replace({1, 2, 3}) index1:select{} index2:select{} index3:select{} space:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/savepoint.result0000664000000000000000000001772313306565107021722 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... -- gh-2025 box.savepoint s1 = nil --- ... s1 = box.savepoint() --- - error: Can not set a savepoint in absence of active transaction ... box.rollback_to_savepoint(s1) --- - error: 'builtin/box/schema.lua:298: Usage: box.rollback_to_savepoint(savepoint)' ... box.begin() s1 = box.savepoint() --- ... box.rollback() --- ... box.begin() box.rollback_to_savepoint(s1) --- - error: 'Can not rollback to savepoint: the savepoint does not exist' ... box.rollback() --- ... engine = test_run:get_cfg('engine') --- ... -- Test many savepoints on each statement. s = box.schema.space.create('test', {engine = engine}) --- ... p = s:create_index('pk') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s:replace{1} save1 = box.savepoint() s:replace{2} save2 = box.savepoint() s:replace{3} save3 = box.savepoint() s:replace{4} select1 = s:select{} box.rollback_to_savepoint(save3) select2 = s:select{} box.rollback_to_savepoint(save2) select3 = s:select{} box.rollback_to_savepoint(save1) select4 = s:select{} box.commit() test_run:cmd("setopt delimiter ''"); --- ... select1 --- - - [1] - [2] - [3] - [4] ... select2 --- - - [1] - [2] - [3] ... select3 --- - - [1] - [2] ... select4 --- - - [1] ... s:truncate() --- ... -- Test rollback to savepoint on the current statement. test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s:replace{1} s:replace{2} s1 = box.savepoint() box.rollback_to_savepoint(s1) box.commit() test_run:cmd("setopt delimiter ''"); --- ... s:select{} --- - - [1] - [2] ... s:truncate() --- ... -- Test rollback to savepoint after multiple statements. test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s:replace{1} s1 = box.savepoint() s:replace{2} s:replace{3} s:replace{4} box.rollback_to_savepoint(s1) box.commit() test_run:cmd("setopt delimiter ''"); --- ... s:select{} --- - - [1] ... s:truncate() --- ... -- Test rollback to savepoint after failed statement. test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s:replace{1} s1 = box.savepoint() s:replace{3} pcall(s.replace, s, {'kek'}) s:replace{4} box.rollback_to_savepoint(s1) box.commit() test_run:cmd("setopt delimiter ''"); --- ... s:select{} --- - - [1] ... s:truncate() --- ... -- Test rollback to savepoint inside the trigger. select1 = nil --- ... select2 = nil --- ... select3 = nil --- ... select4 = nil --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function on_replace(old, new) if new[1] > 10 then return end select1 = s:select{} s1 = box.savepoint() s:replace{100} box.rollback_to_savepoint(s1) select2 = s:select{} end; --- ... _ = s:on_replace(on_replace); --- ... box.begin() s:replace{1} select3 = select1 select4 = select2 s:replace{2} box.commit() test_run:cmd("setopt delimiter ''"); --- ... select4 --- - - [1] ... select3 --- - - [1] ... select2 --- - - [1] - [2] ... select1 --- - - [1] - [2] ... s:select{} --- - - [1] - [2] ... s:drop() --- ... -- Test rollback to savepoint, created in trigger, -- from main tx stream. Fail, because of different substatement -- levels. s = box.schema.space.create('test', {engine = engine}) --- ... p = s:create_index('pk') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... function on_replace2(old, new) if new[1] ~= 1 then return end s1 = box.savepoint() s:replace{100} end; --- ... _ = s:on_replace(on_replace2); --- ... box.begin() s:replace{1} select1 = s:select{} s:replace{2} s:replace{3} select2 = s:select{} ok1, errmsg1 = pcall(box.rollback_to_savepoint, s1) select3 = s:select{} s:replace{4} select4 = s:select{} box.commit() test_run:cmd("setopt delimiter ''"); --- ... select1 --- - - [1] - [100] ... select2 --- - - [1] - [2] - [3] - [100] ... select3 --- - - [1] - [2] - [3] - [100] ... select4 --- - - [1] - [2] - [3] - [4] - [100] ... ok1 --- - false ... errmsg1 --- - 'Can not rollback to savepoint: the savepoint does not exist' ... s:drop() --- ... -- Test incorrect savepoints usage inside a transaction. s = box.schema.space.create('test', {engine = engine}) --- ... p = s:create_index('pk') --- ... test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s1 = box.savepoint() txn_id = s1.txn_id s:replace{1} ok1, errmsg1 = pcall(box.rollback_to_savepoint) ok2, errmsg2 = pcall(box.rollback_to_savepoint, {txn_id=txn_id}) ok3, errmsg3 = pcall(box.rollback_to_savepoint, {txn_id=txn_id, csavepoint=100}) fake_cdata = box.tuple.new({txn_id}) ok4, errmsg4 = pcall(box.rollback_to_savepoint, {txn_id=txn_id, csavepoint=fake_cdata}) ok5, errmsg5 = pcall(box.rollback_to_savepoint, {txn_id=fake_cdata, csavepoint=s1.csavepoint}) box.commit() test_run:cmd("setopt delimiter ''"); --- ... ok1, errmsg1 --- - false - 'builtin/box/schema.lua:298: Usage: box.rollback_to_savepoint(savepoint)' ... ok2, errmsg2 --- - false - 'builtin/box/schema.lua:298: Usage: box.rollback_to_savepoint(savepoint)' ... ok3, errmsg3 --- - false - 'builtin/box/schema.lua:298: Usage: box.rollback_to_savepoint(savepoint)' ... ok4, errmsg4 --- - false - 'builtin/box/schema.lua:298: Usage: box.rollback_to_savepoint(savepoint)' ... ok5, errmsg5 --- - false - 'builtin/box/schema.lua:298: Usage: box.rollback_to_savepoint(savepoint)' ... s:select{} --- - - [1] ... -- Rollback to released savepoint. box.begin() ok1, errmsg1 = pcall(box.rollback_to_savepoint, s1) box.commit() --- ... ok1, errmsg1 --- - false - 'Can not rollback to savepoint: the savepoint does not exist' ... s:select{} --- - - [1] ... s:truncate() --- ... -- Rollback several savepoints at once. test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s0 = box.savepoint() s:replace{1} s1 = box.savepoint() s:replace{2} s2 = box.savepoint() s:replace{3} s3 = box.savepoint() s:replace{4} s4 = box.savepoint() s:replace{5} select1 = s:select{} box.rollback_to_savepoint(s2) select2 = s:select{} ok1, errmsg1 = pcall(box.rollback_to_savepoint, s3) select3 = s:select{} s5 = box.savepoint() s:replace{6} s6 = box.savepoint() s:replace{7} select4 = s:select{} ok2, errmsg2 = pcall(box.rollback_to_savepoint, s4) select5 = s:select{} box.rollback_to_savepoint(s6) select6 = s:select{} box.rollback_to_savepoint(s0) select7 = s:select{} box.rollback() test_run:cmd("setopt delimiter ''"); --- ... select1 --- - - [1] - [2] - [3] - [4] - [5] ... select2 --- - - [1] - [2] ... select3 --- - - [1] - [2] ... select4 --- - - [1] - [2] - [6] - [7] ... select5 --- - - [1] - [2] - [6] - [7] ... select6 --- - - [1] - [2] - [6] ... select7 --- - [] ... ok1, errmsg1 --- - false - 'Can not rollback to savepoint: the savepoint does not exist' ... ok2, errmsg2 --- - false - 'Can not rollback to savepoint: the savepoint does not exist' ... s:truncate() --- ... -- Rollback to the same substatement level, but from different -- context. test_run:cmd("setopt delimiter ';'") --- - true ... function on_replace3(old_tuple, new_tuple) if new_tuple[2] == 'create savepoint' then s1 = box.savepoint() elseif new_tuple[2] == 'rollback to savepoint' then box.rollback_to_savepoint(s1) end end; --- ... _ = s:on_replace(on_replace3); --- ... box.begin() s:replace{1, 'create savepoint'} s:replace{2} s:replace{3} s:replace{4, 'rollback to savepoint'} s:replace{5} box.commit() test_run:cmd("setopt delimiter ''"); --- ... s:select{} --- - - [1, 'create savepoint'] - [5] ... s:truncate() --- ... -- Several savepoints on a same statement. test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s:replace{1} s1 = box.savepoint() s2 = box.savepoint() s3 = box.savepoint() s:replace{2} box.rollback_to_savepoint(s3) box.rollback_to_savepoint(s2) box.rollback_to_savepoint(s1) box.commit() test_run:cmd("setopt delimiter ''"); --- ... s:select{} --- - - [1] ... s:truncate() --- ... -- Test multiple rollback of a same savepoint. test_run:cmd("setopt delimiter ';'") --- - true ... box.begin() s1 = box.savepoint() s:replace{1} box.rollback_to_savepoint(s1) s:replace{2} box.rollback_to_savepoint(s1) s:replace{3} box.commit() test_run:cmd("setopt delimiter ''"); --- ... s:select{} --- - - [3] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/recover_drop.result0000664000000000000000000001322513306560010022360 0ustar rootroot-- recover dropped spaces env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default') engine = test_run:get_cfg('engine') --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... for key = 1, 351 do space:insert({key}) end --- ... space:drop() --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... for key = 500, 1000 do space:insert({key}) end --- ... test_run:cmd('restart server default') space = box.space['test'] --- ... index = space.index['primary'] --- ... index:select({}, {iterator = box.index.ALL}) --- - - [500] - [501] - [502] - [503] - [504] - [505] - [506] - [507] - [508] - [509] - [510] - [511] - [512] - [513] - [514] - [515] - [516] - [517] - [518] - [519] - [520] - [521] - [522] - [523] - [524] - [525] - [526] - [527] - [528] - [529] - [530] - [531] - [532] - [533] - [534] - [535] - [536] - [537] - [538] - [539] - [540] - [541] - [542] - [543] - [544] - [545] - [546] - [547] - [548] - [549] - [550] - [551] - [552] - [553] - [554] - [555] - [556] - [557] - [558] - [559] - [560] - [561] - [562] - [563] - [564] - [565] - [566] - [567] - [568] - [569] - [570] - [571] - [572] - [573] - [574] - [575] - [576] - [577] - [578] - [579] - [580] - [581] - [582] - [583] - [584] - [585] - [586] - [587] - [588] - [589] - [590] - [591] - [592] - [593] - [594] - [595] - [596] - [597] - [598] - [599] - [600] - [601] - [602] - [603] - [604] - [605] - [606] - [607] - [608] - [609] - [610] - [611] - [612] - [613] - [614] - [615] - [616] - [617] - [618] - [619] - [620] - [621] - [622] - [623] - [624] - [625] - [626] - [627] - [628] - [629] - [630] - [631] - [632] - [633] - [634] - [635] - [636] - [637] - [638] - [639] - [640] - [641] - [642] - [643] - [644] - [645] - [646] - [647] - [648] - [649] - [650] - [651] - [652] - [653] - [654] - [655] - [656] - [657] - [658] - [659] - [660] - [661] - [662] - [663] - [664] - [665] - [666] - [667] - [668] - [669] - [670] - [671] - [672] - [673] - [674] - [675] - [676] - [677] - [678] - [679] - [680] - [681] - [682] - [683] - [684] - [685] - [686] - [687] - [688] - [689] - [690] - [691] - [692] - [693] - [694] - [695] - [696] - [697] - [698] - [699] - [700] - [701] - [702] - [703] - [704] - [705] - [706] - [707] - [708] - [709] - [710] - [711] - [712] - [713] - [714] - [715] - [716] - [717] - [718] - [719] - [720] - [721] - [722] - [723] - [724] - [725] - [726] - [727] - [728] - [729] - [730] - [731] - [732] - [733] - [734] - [735] - [736] - [737] - [738] - [739] - [740] - [741] - [742] - [743] - [744] - [745] - [746] - [747] - [748] - [749] - [750] - [751] - [752] - [753] - [754] - [755] - [756] - [757] - [758] - [759] - [760] - [761] - [762] - [763] - [764] - [765] - [766] - [767] - [768] - [769] - [770] - [771] - [772] - [773] - [774] - [775] - [776] - [777] - [778] - [779] - [780] - [781] - [782] - [783] - [784] - [785] - [786] - [787] - [788] - [789] - [790] - [791] - [792] - [793] - [794] - [795] - [796] - [797] - [798] - [799] - [800] - [801] - [802] - [803] - [804] - [805] - [806] - [807] - [808] - [809] - [810] - [811] - [812] - [813] - [814] - [815] - [816] - [817] - [818] - [819] - [820] - [821] - [822] - [823] - [824] - [825] - [826] - [827] - [828] - [829] - [830] - [831] - [832] - [833] - [834] - [835] - [836] - [837] - [838] - [839] - [840] - [841] - [842] - [843] - [844] - [845] - [846] - [847] - [848] - [849] - [850] - [851] - [852] - [853] - [854] - [855] - [856] - [857] - [858] - [859] - [860] - [861] - [862] - [863] - [864] - [865] - [866] - [867] - [868] - [869] - [870] - [871] - [872] - [873] - [874] - [875] - [876] - [877] - [878] - [879] - [880] - [881] - [882] - [883] - [884] - [885] - [886] - [887] - [888] - [889] - [890] - [891] - [892] - [893] - [894] - [895] - [896] - [897] - [898] - [899] - [900] - [901] - [902] - [903] - [904] - [905] - [906] - [907] - [908] - [909] - [910] - [911] - [912] - [913] - [914] - [915] - [916] - [917] - [918] - [919] - [920] - [921] - [922] - [923] - [924] - [925] - [926] - [927] - [928] - [929] - [930] - [931] - [932] - [933] - [934] - [935] - [936] - [937] - [938] - [939] - [940] - [941] - [942] - [943] - [944] - [945] - [946] - [947] - [948] - [949] - [950] - [951] - [952] - [953] - [954] - [955] - [956] - [957] - [958] - [959] - [960] - [961] - [962] - [963] - [964] - [965] - [966] - [967] - [968] - [969] - [970] - [971] - [972] - [973] - [974] - [975] - [976] - [977] - [978] - [979] - [980] - [981] - [982] - [983] - [984] - [985] - [986] - [987] - [988] - [989] - [990] - [991] - [992] - [993] - [994] - [995] - [996] - [997] - [998] - [999] - [1000] ... space:drop() --- ... test_run:cmd('restart server default with cleanup=1') tarantool_1.9.1.26.g63eb81e3c/test/engine/recover_snapshot_wal.test.lua0000664000000000000000000000142713306560010024340 0ustar rootrootenv = require('test_run') test_run = env.new() -- write data recover from latest snapshot and logs test_run:cmd("restart server default") engine = test_run:get_cfg('engine') space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') space:insert({0}) box.snapshot() space:insert({1001}) test_run:cmd("restart server default") space = box.space['test'] index = space.index['primary'] index:select({}, {iterator = box.index.ALL}) for key = 1, 351 do space:insert({key}) end box.snapshot() for key = 352, 1000 do space:insert({key}) end test_run:cmd("restart server default") space = box.space['test'] index = space.index['primary'] index:select({}, {iterator = box.index.ALL}) space:drop() test_run:cmd("restart server default with cleanup=1") tarantool_1.9.1.26.g63eb81e3c/test/engine/tree.result0000664000000000000000000014170113306560010020627 0ustar rootroottest_run = require('test_run').new() --- ... engine = test_run:get_cfg('engine') --- ... test_run:cmd("push filter '"..engine.."' to 'engine'") --- - true ... -------------------------------------------------------------------------------- -- Prefix search (test partially specified keys in TREE indexes) -------------------------------------------------------------------------------- s1 = box.schema.space.create('tree_prefix_search', { engine = engine }) --- ... _ = s1:create_index('primary', { type = 'tree', parts = {1, 'string'}}) --- ... _ = s1:replace{''} --- ... _ = s1:replace{'abcd'} --- ... _ = s1:replace{'abcda'} --- ... _ = s1:replace{'abcda_'} --- ... _ = s1:replace{'abcdb'} --- ... _ = s1:replace{'abcdb_'} --- ... _ = s1:replace{'abcdb__'} --- ... _ = s1:replace{'abcdb___'} --- ... _ = s1:replace{'abcdc'} --- ... _ = s1:replace{'abcdc_'} --- ... s1.index.primary:select('abcdb', { iterator = 'GE' }) --- - - ['abcdb'] - ['abcdb_'] - ['abcdb__'] - ['abcdb___'] - ['abcdc'] - ['abcdc_'] ... s1.index.primary:select('', { iterator = 'GE' }) --- - - [''] - ['abcd'] - ['abcda'] - ['abcda_'] - ['abcdb'] - ['abcdb_'] - ['abcdb__'] - ['abcdb___'] - ['abcdc'] - ['abcdc_'] ... s1.index.primary:select('', { iterator = 'GT' }) --- - - ['abcd'] - ['abcda'] - ['abcda_'] - ['abcdb'] - ['abcdb_'] - ['abcdb__'] - ['abcdb___'] - ['abcdc'] - ['abcdc_'] ... s1.index.primary:select('', { iterator = 'LE' }) --- - - [''] ... s1.index.primary:select('', { iterator = 'LT' }) --- - [] ... s1:drop() --- ... s1 = nil --- ... ------------------------------------------------------------------------------- -- single-part (unsigned) ------------------------------------------------------------------------------- space = box.schema.space.create('uint', { engine = engine }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) --- ... for i=1,9 do space:replace{i} end --- ... pk:select({}, { iterator = 'ALL' }) --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] ... pk:select({}, { iterator = 'EQ' }) --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] ... pk:select({}, { iterator = 'REQ' }) --- - - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... pk:select({}, { iterator = 'GE' }) --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] ... pk:select({}, { iterator = 'GT' }) --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] ... pk:select({}, { iterator = 'LE' }) --- - - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... pk:select({}, { iterator = 'LT' }) --- - - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... pk:select({0}, { iterator = 'EQ' }) --- - [] ... pk:select({0}, { iterator = 'REQ' }) --- - [] ... pk:select({0}, { iterator = 'LE' }) --- - [] ... pk:select({0}, { iterator = 'LT' }) --- - [] ... pk:select({1}, { iterator = 'EQ' }) --- - - [1] ... pk:select({1}, { iterator = 'REQ' }) --- - - [1] ... pk:select({1}, { iterator = 'LE' }) --- - - [1] ... pk:select({1}, { iterator = 'LT' }) --- - [] ... pk:select({5}, { iterator = 'EQ' }) --- - - [5] ... pk:select({5}, { iterator = 'REQ' }) --- - - [5] ... pk:select({5}, { iterator = 'GE' }) --- - - [5] - [6] - [7] - [8] - [9] ... pk:select({5}, { iterator = 'GT' }) --- - - [6] - [7] - [8] - [9] ... pk:select({5}, { iterator = 'LE' }) --- - - [5] - [4] - [3] - [2] - [1] ... pk:select({5}, { iterator = 'LT' }) --- - - [4] - [3] - [2] - [1] ... pk:select({9}, { iterator = 'EQ' }) --- - - [9] ... pk:select({9}, { iterator = 'REQ' }) --- - - [9] ... pk:select({9}, { iterator = 'GE' }) --- - - [9] ... pk:select({9}, { iterator = 'GT' }) --- - [] ... pk:select({10}, { iterator = 'EQ' }) --- - [] ... pk:select({10}, { iterator = 'REQ' }) --- - [] ... pk:select({10}, { iterator = 'GE' }) --- - [] ... pk:select({10}, { iterator = 'GT' }) --- - [] ... pk:get({}) --- - error: Invalid key part count in an exact match (expected 1, got 0) ... pk:get({0}) --- ... pk:get({5}) --- - [5] ... pk:get({10}) --- ... pk:get({10, 15}) --- - error: Invalid key part count in an exact match (expected 1, got 2) ... space:drop() --- ... space = nil --- ... pk = nil --- ... ------------------------------------------------------------------------------- -- single-part sparse (unsigned) ------------------------------------------------------------------------------- space = box.schema.space.create('sparse_uint', { engine = engine }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {3, 'unsigned'}}) --- ... for i=1,9 do space:replace{'', 0, i} end --- ... space:insert{'', 0, 1} -- conflict --- - error: Duplicate key exists in unique index 'primary' in space 'sparse_uint' ... pk:select({}, { iterator = 'ALL' }) --- - - ['', 0, 1] - ['', 0, 2] - ['', 0, 3] - ['', 0, 4] - ['', 0, 5] - ['', 0, 6] - ['', 0, 7] - ['', 0, 8] - ['', 0, 9] ... pk:select({}, { iterator = 'EQ' }) --- - - ['', 0, 1] - ['', 0, 2] - ['', 0, 3] - ['', 0, 4] - ['', 0, 5] - ['', 0, 6] - ['', 0, 7] - ['', 0, 8] - ['', 0, 9] ... pk:select({}, { iterator = 'REQ' }) --- - - ['', 0, 9] - ['', 0, 8] - ['', 0, 7] - ['', 0, 6] - ['', 0, 5] - ['', 0, 4] - ['', 0, 3] - ['', 0, 2] - ['', 0, 1] ... pk:select({}, { iterator = 'GE' }) --- - - ['', 0, 1] - ['', 0, 2] - ['', 0, 3] - ['', 0, 4] - ['', 0, 5] - ['', 0, 6] - ['', 0, 7] - ['', 0, 8] - ['', 0, 9] ... pk:select({}, { iterator = 'GT' }) --- - - ['', 0, 1] - ['', 0, 2] - ['', 0, 3] - ['', 0, 4] - ['', 0, 5] - ['', 0, 6] - ['', 0, 7] - ['', 0, 8] - ['', 0, 9] ... pk:select({}, { iterator = 'LE' }) --- - - ['', 0, 9] - ['', 0, 8] - ['', 0, 7] - ['', 0, 6] - ['', 0, 5] - ['', 0, 4] - ['', 0, 3] - ['', 0, 2] - ['', 0, 1] ... pk:select({}, { iterator = 'LT' }) --- - - ['', 0, 9] - ['', 0, 8] - ['', 0, 7] - ['', 0, 6] - ['', 0, 5] - ['', 0, 4] - ['', 0, 3] - ['', 0, 2] - ['', 0, 1] ... pk:select({0}, { iterator = 'EQ' }) --- - [] ... pk:select({0}, { iterator = 'REQ' }) --- - [] ... pk:select({0}, { iterator = 'LE' }) --- - [] ... pk:select({0}, { iterator = 'LT' }) --- - [] ... pk:select({1}, { iterator = 'EQ' }) --- - - ['', 0, 1] ... pk:select({1}, { iterator = 'REQ' }) --- - - ['', 0, 1] ... pk:select({1}, { iterator = 'LE' }) --- - - ['', 0, 1] ... pk:select({1}, { iterator = 'LT' }) --- - [] ... pk:select({5}, { iterator = 'EQ' }) --- - - ['', 0, 5] ... pk:select({5}, { iterator = 'REQ' }) --- - - ['', 0, 5] ... pk:select({5}, { iterator = 'GE' }) --- - - ['', 0, 5] - ['', 0, 6] - ['', 0, 7] - ['', 0, 8] - ['', 0, 9] ... pk:select({5}, { iterator = 'GT' }) --- - - ['', 0, 6] - ['', 0, 7] - ['', 0, 8] - ['', 0, 9] ... pk:select({5}, { iterator = 'LE' }) --- - - ['', 0, 5] - ['', 0, 4] - ['', 0, 3] - ['', 0, 2] - ['', 0, 1] ... pk:select({5}, { iterator = 'LT' }) --- - - ['', 0, 4] - ['', 0, 3] - ['', 0, 2] - ['', 0, 1] ... pk:select({9}, { iterator = 'EQ' }) --- - - ['', 0, 9] ... pk:select({9}, { iterator = 'REQ' }) --- - - ['', 0, 9] ... pk:select({9}, { iterator = 'GE' }) --- - - ['', 0, 9] ... pk:select({9}, { iterator = 'GT' }) --- - [] ... pk:select({10}, { iterator = 'EQ' }) --- - [] ... pk:select({10}, { iterator = 'REQ' }) --- - [] ... pk:select({10}, { iterator = 'GE' }) --- - [] ... pk:select({10}, { iterator = 'GT' }) --- - [] ... pk:get({}) --- - error: Invalid key part count in an exact match (expected 1, got 0) ... pk:get({0}) --- ... pk:get({5}) --- - ['', 0, 5] ... pk:get({10}) --- ... pk:get({10, 15}) --- - error: Invalid key part count in an exact match (expected 1, got 2) ... space:drop() --- ... space = nil --- ... pk = nil --- ... ------------------------------------------------------------------------------- -- single-part (string) ------------------------------------------------------------------------------- space = box.schema.space.create('string', { engine = engine }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {1, 'string'}}) --- ... for i=1,9 do space:replace{'0'..i} end --- ... pk:select({}, { iterator = 'ALL' }) --- - - ['01'] - ['02'] - ['03'] - ['04'] - ['05'] - ['06'] - ['07'] - ['08'] - ['09'] ... pk:select({}, { iterator = 'EQ' }) --- - - ['01'] - ['02'] - ['03'] - ['04'] - ['05'] - ['06'] - ['07'] - ['08'] - ['09'] ... pk:select({}, { iterator = 'REQ' }) --- - - ['09'] - ['08'] - ['07'] - ['06'] - ['05'] - ['04'] - ['03'] - ['02'] - ['01'] ... pk:select({}, { iterator = 'GE' }) --- - - ['01'] - ['02'] - ['03'] - ['04'] - ['05'] - ['06'] - ['07'] - ['08'] - ['09'] ... pk:select({}, { iterator = 'GT' }) --- - - ['01'] - ['02'] - ['03'] - ['04'] - ['05'] - ['06'] - ['07'] - ['08'] - ['09'] ... pk:select({}, { iterator = 'LE' }) --- - - ['09'] - ['08'] - ['07'] - ['06'] - ['05'] - ['04'] - ['03'] - ['02'] - ['01'] ... pk:select({}, { iterator = 'LT' }) --- - - ['09'] - ['08'] - ['07'] - ['06'] - ['05'] - ['04'] - ['03'] - ['02'] - ['01'] ... pk:select({'00'}, { iterator = 'EQ' }) --- - [] ... pk:select({'00'}, { iterator = 'REQ' }) --- - [] ... pk:select({'00'}, { iterator = 'LE' }) --- - [] ... pk:select({'00'}, { iterator = 'LT' }) --- - [] ... pk:select({'01'}, { iterator = 'EQ' }) --- - - ['01'] ... pk:select({'01'}, { iterator = 'REQ' }) --- - - ['01'] ... pk:select({'01'}, { iterator = 'LE' }) --- - - ['01'] ... pk:select({'01'}, { iterator = 'LT' }) --- - [] ... pk:select({'05'}, { iterator = 'EQ' }) --- - - ['05'] ... pk:select({'05'}, { iterator = 'REQ' }) --- - - ['05'] ... pk:select({'05'}, { iterator = 'GE' }) --- - - ['05'] - ['06'] - ['07'] - ['08'] - ['09'] ... pk:select({'05'}, { iterator = 'GT' }) --- - - ['06'] - ['07'] - ['08'] - ['09'] ... pk:select({'05'}, { iterator = 'LE' }) --- - - ['05'] - ['04'] - ['03'] - ['02'] - ['01'] ... pk:select({'05'}, { iterator = 'LT' }) --- - - ['04'] - ['03'] - ['02'] - ['01'] ... pk:select({'09'}, { iterator = 'EQ' }) --- - - ['09'] ... pk:select({'09'}, { iterator = 'REQ' }) --- - - ['09'] ... pk:select({'09'}, { iterator = 'GE' }) --- - - ['09'] ... pk:select({'09'}, { iterator = 'GT' }) --- - [] ... pk:select({'10'}, { iterator = 'EQ' }) --- - [] ... pk:select({'10'}, { iterator = 'REQ' }) --- - [] ... pk:select({'10'}, { iterator = 'GE' }) --- - [] ... pk:select({'10'}, { iterator = 'GT' }) --- - [] ... pk:get({}) --- - error: Invalid key part count in an exact match (expected 1, got 0) ... pk:get({'0'}) --- ... pk:get({'5'}) --- ... pk:get({'10'}) --- ... pk:get({'10', '15'}) --- - error: Invalid key part count in an exact match (expected 1, got 2) ... space:drop() --- ... space = nil --- ... pk = nil --- ... ------------------------------------------------------------------------------- -- multi-part (unsigned + string) ------------------------------------------------------------------------------- space = box.schema.space.create('uint_str', { engine = engine }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'string'}}) --- ... for i=1,9 do for j=1,3 do space:replace({i, '0'..j}) end end --- ... -- -- one part -- pk:select({}, { iterator = 'ALL' }) --- - - [1, '01'] - [1, '02'] - [1, '03'] - [2, '01'] - [2, '02'] - [2, '03'] - [3, '01'] - [3, '02'] - [3, '03'] - [4, '01'] - [4, '02'] - [4, '03'] - [5, '01'] - [5, '02'] - [5, '03'] - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({}, { iterator = 'EQ' }) --- - - [1, '01'] - [1, '02'] - [1, '03'] - [2, '01'] - [2, '02'] - [2, '03'] - [3, '01'] - [3, '02'] - [3, '03'] - [4, '01'] - [4, '02'] - [4, '03'] - [5, '01'] - [5, '02'] - [5, '03'] - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({}, { iterator = 'REQ' }) --- - - [9, '03'] - [9, '02'] - [9, '01'] - [8, '03'] - [8, '02'] - [8, '01'] - [7, '03'] - [7, '02'] - [7, '01'] - [6, '03'] - [6, '02'] - [6, '01'] - [5, '03'] - [5, '02'] - [5, '01'] - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({}, { iterator = 'GE' }) --- - - [1, '01'] - [1, '02'] - [1, '03'] - [2, '01'] - [2, '02'] - [2, '03'] - [3, '01'] - [3, '02'] - [3, '03'] - [4, '01'] - [4, '02'] - [4, '03'] - [5, '01'] - [5, '02'] - [5, '03'] - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({}, { iterator = 'GT' }) --- - - [1, '01'] - [1, '02'] - [1, '03'] - [2, '01'] - [2, '02'] - [2, '03'] - [3, '01'] - [3, '02'] - [3, '03'] - [4, '01'] - [4, '02'] - [4, '03'] - [5, '01'] - [5, '02'] - [5, '03'] - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({}, { iterator = 'LE' }) --- - - [9, '03'] - [9, '02'] - [9, '01'] - [8, '03'] - [8, '02'] - [8, '01'] - [7, '03'] - [7, '02'] - [7, '01'] - [6, '03'] - [6, '02'] - [6, '01'] - [5, '03'] - [5, '02'] - [5, '01'] - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({}, { iterator = 'LT' }) --- - - [9, '03'] - [9, '02'] - [9, '01'] - [8, '03'] - [8, '02'] - [8, '01'] - [7, '03'] - [7, '02'] - [7, '01'] - [6, '03'] - [6, '02'] - [6, '01'] - [5, '03'] - [5, '02'] - [5, '01'] - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({0}, { iterator = 'EQ' }) --- - [] ... pk:select({0}, { iterator = 'REQ' }) --- - [] ... pk:select({0}, { iterator = 'LE' }) --- - [] ... pk:select({0}, { iterator = 'LT' }) --- - [] ... pk:select({1}, { iterator = 'EQ' }) --- - - [1, '01'] - [1, '02'] - [1, '03'] ... pk:select({1}, { iterator = 'REQ' }) --- - - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({1}, { iterator = 'LE' }) --- - - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({1}, { iterator = 'LT' }) --- - [] ... pk:select({9}, { iterator = 'EQ' }) --- - - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({9}, { iterator = 'REQ' }) --- - - [9, '03'] - [9, '02'] - [9, '01'] ... pk:select({9}, { iterator = 'GE' }) --- - - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({9}, { iterator = 'GT' }) --- - [] ... pk:select({10}, { iterator = 'EQ' }) --- - [] ... pk:select({10}, { iterator = 'REQ' }) --- - [] ... pk:select({10}, { iterator = 'GE' }) --- - [] ... pk:select({10}, { iterator = 'GT' }) --- - [] ... pk:get({}) --- - error: Invalid key part count in an exact match (expected 2, got 0) ... pk:get({'5'}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... -- -- two parts -- pk:select({5, '00'}, { iterator = 'EQ' }) --- - [] ... pk:select({5, '00'}, { iterator = 'REQ' }) --- - [] ... pk:select({5, '00'}, { iterator = 'GE' }) --- - - [5, '01'] - [5, '02'] - [5, '03'] - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({5, '00'}, { iterator = 'GT' }) --- - - [5, '01'] - [5, '02'] - [5, '03'] - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({5, '00'}, { iterator = 'LE' }) --- - - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({5, '00'}, { iterator = 'LT' }) --- - - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({5, '01'}, { iterator = 'EQ' }) --- - - [5, '01'] ... pk:select({5, '01'}, { iterator = 'REQ' }) --- - - [5, '01'] ... pk:select({5, '01'}, { iterator = 'GE' }) --- - - [5, '01'] - [5, '02'] - [5, '03'] - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({5, '01'}, { iterator = 'GT' }) --- - - [5, '02'] - [5, '03'] - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({5, '01'}, { iterator = 'LE' }) --- - - [5, '01'] - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({5, '01'}, { iterator = 'LT' }) --- - - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({5, '03'}, { iterator = 'EQ' }) --- - - [5, '03'] ... pk:select({5, '03'}, { iterator = 'REQ' }) --- - - [5, '03'] ... pk:select({5, '03'}, { iterator = 'GE' }) --- - - [5, '03'] - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({5, '03'}, { iterator = 'GT' }) --- - - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({5, '03'}, { iterator = 'LE' }) --- - - [5, '03'] - [5, '02'] - [5, '01'] - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({5, '03'}, { iterator = 'LT' }) --- - - [5, '02'] - [5, '01'] - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({5, '04'}, { iterator = 'EQ' }) --- - [] ... pk:select({5, '04'}, { iterator = 'REQ' }) --- - [] ... pk:select({5, '04'}, { iterator = 'GE' }) --- - - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({5, '04'}, { iterator = 'GT' }) --- - - [6, '01'] - [6, '02'] - [6, '03'] - [7, '01'] - [7, '02'] - [7, '03'] - [8, '01'] - [8, '02'] - [8, '03'] - [9, '01'] - [9, '02'] - [9, '03'] ... pk:select({5, '04'}, { iterator = 'LE' }) --- - - [5, '03'] - [5, '02'] - [5, '01'] - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:select({5, '04'}, { iterator = 'LT' }) --- - - [5, '03'] - [5, '02'] - [5, '01'] - [4, '03'] - [4, '02'] - [4, '01'] - [3, '03'] - [3, '02'] - [3, '01'] - [2, '03'] - [2, '02'] - [2, '01'] - [1, '03'] - [1, '02'] - [1, '01'] ... pk:get({4, '05'}) --- ... pk:get({4, '03'}) --- - [4, '03'] ... pk:get({4, '03', 100}) --- - error: Invalid key part count in an exact match (expected 2, got 3) ... space:drop() --- ... space = nil --- ... pk = nil --- ... ------------------------------------------------------------------------------- -- multi-part (string + unsigned) ------------------------------------------------------------------------------- space = box.schema.space.create('str_uint', { engine = engine }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {1, 'string', 2, 'unsigned'}}) --- ... for i=1,9 do for j=1,3 do space:replace({'0'..i, j}) end end --- ... -- -- one part -- pk:select({}, { iterator = 'ALL' }) --- - - ['01', 1] - ['01', 2] - ['01', 3] - ['02', 1] - ['02', 2] - ['02', 3] - ['03', 1] - ['03', 2] - ['03', 3] - ['04', 1] - ['04', 2] - ['04', 3] - ['05', 1] - ['05', 2] - ['05', 3] - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({}, { iterator = 'EQ' }) --- - - ['01', 1] - ['01', 2] - ['01', 3] - ['02', 1] - ['02', 2] - ['02', 3] - ['03', 1] - ['03', 2] - ['03', 3] - ['04', 1] - ['04', 2] - ['04', 3] - ['05', 1] - ['05', 2] - ['05', 3] - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({}, { iterator = 'REQ' }) --- - - ['09', 3] - ['09', 2] - ['09', 1] - ['08', 3] - ['08', 2] - ['08', 1] - ['07', 3] - ['07', 2] - ['07', 1] - ['06', 3] - ['06', 2] - ['06', 1] - ['05', 3] - ['05', 2] - ['05', 1] - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({}, { iterator = 'GE' }) --- - - ['01', 1] - ['01', 2] - ['01', 3] - ['02', 1] - ['02', 2] - ['02', 3] - ['03', 1] - ['03', 2] - ['03', 3] - ['04', 1] - ['04', 2] - ['04', 3] - ['05', 1] - ['05', 2] - ['05', 3] - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({}, { iterator = 'GT' }) --- - - ['01', 1] - ['01', 2] - ['01', 3] - ['02', 1] - ['02', 2] - ['02', 3] - ['03', 1] - ['03', 2] - ['03', 3] - ['04', 1] - ['04', 2] - ['04', 3] - ['05', 1] - ['05', 2] - ['05', 3] - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({}, { iterator = 'LE' }) --- - - ['09', 3] - ['09', 2] - ['09', 1] - ['08', 3] - ['08', 2] - ['08', 1] - ['07', 3] - ['07', 2] - ['07', 1] - ['06', 3] - ['06', 2] - ['06', 1] - ['05', 3] - ['05', 2] - ['05', 1] - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({}, { iterator = 'LT' }) --- - - ['09', 3] - ['09', 2] - ['09', 1] - ['08', 3] - ['08', 2] - ['08', 1] - ['07', 3] - ['07', 2] - ['07', 1] - ['06', 3] - ['06', 2] - ['06', 1] - ['05', 3] - ['05', 2] - ['05', 1] - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'00'}, { iterator = 'EQ' }) --- - [] ... pk:select({'00'}, { iterator = 'REQ' }) --- - [] ... pk:select({'00'}, { iterator = 'LE' }) --- - [] ... pk:select({'00'}, { iterator = 'LT' }) --- - [] ... pk:select({'01'}, { iterator = 'EQ' }) --- - - ['01', 1] - ['01', 2] - ['01', 3] ... pk:select({'01'}, { iterator = 'REQ' }) --- - - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'01'}, { iterator = 'LE' }) --- - - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'01'}, { iterator = 'LT' }) --- - [] ... pk:select({'09'}, { iterator = 'EQ' }) --- - - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'09'}, { iterator = 'REQ' }) --- - - ['09', 3] - ['09', 2] - ['09', 1] ... pk:select({'09'}, { iterator = 'GE' }) --- - - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'09'}, { iterator = 'GT' }) --- - [] ... pk:select({'10'}, { iterator = 'EQ' }) --- - [] ... pk:select({'10'}, { iterator = 'REQ' }) --- - [] ... pk:select({'10'}, { iterator = 'GE' }) --- - [] ... pk:select({'10'}, { iterator = 'GT' }) --- - [] ... pk:get({}) --- - error: Invalid key part count in an exact match (expected 2, got 0) ... pk:get({'00'}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... pk:get({'05'}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... pk:get({'10'}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... -- -- two parts -- pk:select({'05', 0}, { iterator = 'EQ' }) --- - [] ... pk:select({'05', 0}, { iterator = 'REQ' }) --- - [] ... pk:select({'05', 0}, { iterator = 'GE' }) --- - - ['05', 1] - ['05', 2] - ['05', 3] - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'05', 0}, { iterator = 'GT' }) --- - - ['05', 1] - ['05', 2] - ['05', 3] - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'05', 0}, { iterator = 'LE' }) --- - - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'05', 0}, { iterator = 'LT' }) --- - - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'05', 1}, { iterator = 'EQ' }) --- - - ['05', 1] ... pk:select({'05', 1}, { iterator = 'REQ' }) --- - - ['05', 1] ... pk:select({'05', 1}, { iterator = 'GE' }) --- - - ['05', 1] - ['05', 2] - ['05', 3] - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'05', 1}, { iterator = 'GT' }) --- - - ['05', 2] - ['05', 3] - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'05', 1}, { iterator = 'LE' }) --- - - ['05', 1] - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'05', 1}, { iterator = 'LT' }) --- - - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'05', 3}, { iterator = 'EQ' }) --- - - ['05', 3] ... pk:select({'05', 3}, { iterator = 'REQ' }) --- - - ['05', 3] ... pk:select({'05', 3}, { iterator = 'GE' }) --- - - ['05', 3] - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'05', 3}, { iterator = 'GT' }) --- - - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'05', 3}, { iterator = 'LE' }) --- - - ['05', 3] - ['05', 2] - ['05', 1] - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'05', 3}, { iterator = 'LT' }) --- - - ['05', 2] - ['05', 1] - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'05', 4}, { iterator = 'EQ' }) --- - [] ... pk:select({'05', 4}, { iterator = 'REQ' }) --- - [] ... pk:select({'05', 4}, { iterator = 'GE' }) --- - - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'05', 4}, { iterator = 'GT' }) --- - - ['06', 1] - ['06', 2] - ['06', 3] - ['07', 1] - ['07', 2] - ['07', 3] - ['08', 1] - ['08', 2] - ['08', 3] - ['09', 1] - ['09', 2] - ['09', 3] ... pk:select({'05', 4}, { iterator = 'LE' }) --- - - ['05', 3] - ['05', 2] - ['05', 1] - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:select({'05', 4}, { iterator = 'LT' }) --- - - ['05', 3] - ['05', 2] - ['05', 1] - ['04', 3] - ['04', 2] - ['04', 1] - ['03', 3] - ['03', 2] - ['03', 1] - ['02', 3] - ['02', 2] - ['02', 1] - ['01', 3] - ['01', 2] - ['01', 1] ... pk:get({'04', 5}) --- ... pk:get({'04', 3}) --- - ['04', 3] ... pk:get({'04', 3, 100}) --- - error: Invalid key part count in an exact match (expected 2, got 3) ... space:drop() --- ... space = nil --- ... pk = nil --- ... ------------------------------------------------------------------------------- -- multi-part sparse (string + unsigned) ------------------------------------------------------------------------------- space = box.schema.space.create('sparse_str_uint', { engine = engine }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {3, 'string', 1, 'unsigned'}}) --- ... for i=1,9 do for j=1,3 do space:replace({i, '', '0'..j}) end end --- ... -- conflicts space:insert({9, '', '01'}) --- - error: Duplicate key exists in unique index 'primary' in space 'sparse_str_uint' ... -- -- one part -- pk:select({}, { iterator = 'ALL' }) --- - - [1, '', '01'] - [2, '', '01'] - [3, '', '01'] - [4, '', '01'] - [5, '', '01'] - [6, '', '01'] - [7, '', '01'] - [8, '', '01'] - [9, '', '01'] - [1, '', '02'] - [2, '', '02'] - [3, '', '02'] - [4, '', '02'] - [5, '', '02'] - [6, '', '02'] - [7, '', '02'] - [8, '', '02'] - [9, '', '02'] - [1, '', '03'] - [2, '', '03'] - [3, '', '03'] - [4, '', '03'] - [5, '', '03'] - [6, '', '03'] - [7, '', '03'] - [8, '', '03'] - [9, '', '03'] ... pk:select({}, { iterator = 'EQ' }) --- - - [1, '', '01'] - [2, '', '01'] - [3, '', '01'] - [4, '', '01'] - [5, '', '01'] - [6, '', '01'] - [7, '', '01'] - [8, '', '01'] - [9, '', '01'] - [1, '', '02'] - [2, '', '02'] - [3, '', '02'] - [4, '', '02'] - [5, '', '02'] - [6, '', '02'] - [7, '', '02'] - [8, '', '02'] - [9, '', '02'] - [1, '', '03'] - [2, '', '03'] - [3, '', '03'] - [4, '', '03'] - [5, '', '03'] - [6, '', '03'] - [7, '', '03'] - [8, '', '03'] - [9, '', '03'] ... pk:select({}, { iterator = 'REQ' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({}, { iterator = 'GE' }) --- - - [1, '', '01'] - [2, '', '01'] - [3, '', '01'] - [4, '', '01'] - [5, '', '01'] - [6, '', '01'] - [7, '', '01'] - [8, '', '01'] - [9, '', '01'] - [1, '', '02'] - [2, '', '02'] - [3, '', '02'] - [4, '', '02'] - [5, '', '02'] - [6, '', '02'] - [7, '', '02'] - [8, '', '02'] - [9, '', '02'] - [1, '', '03'] - [2, '', '03'] - [3, '', '03'] - [4, '', '03'] - [5, '', '03'] - [6, '', '03'] - [7, '', '03'] - [8, '', '03'] - [9, '', '03'] ... pk:select({}, { iterator = 'GT' }) --- - - [1, '', '01'] - [2, '', '01'] - [3, '', '01'] - [4, '', '01'] - [5, '', '01'] - [6, '', '01'] - [7, '', '01'] - [8, '', '01'] - [9, '', '01'] - [1, '', '02'] - [2, '', '02'] - [3, '', '02'] - [4, '', '02'] - [5, '', '02'] - [6, '', '02'] - [7, '', '02'] - [8, '', '02'] - [9, '', '02'] - [1, '', '03'] - [2, '', '03'] - [3, '', '03'] - [4, '', '03'] - [5, '', '03'] - [6, '', '03'] - [7, '', '03'] - [8, '', '03'] - [9, '', '03'] ... pk:select({}, { iterator = 'LE' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({}, { iterator = 'LT' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'00'}, { iterator = 'EQ' }) --- - [] ... pk:select({'00'}, { iterator = 'REQ' }) --- - [] ... pk:select({'00'}, { iterator = 'LE' }) --- - [] ... pk:select({'00'}, { iterator = 'LT' }) --- - [] ... pk:select({'01'}, { iterator = 'EQ' }) --- - - [1, '', '01'] - [2, '', '01'] - [3, '', '01'] - [4, '', '01'] - [5, '', '01'] - [6, '', '01'] - [7, '', '01'] - [8, '', '01'] - [9, '', '01'] ... pk:select({'01'}, { iterator = 'REQ' }) --- - - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'01'}, { iterator = 'LE' }) --- - - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'01'}, { iterator = 'LT' }) --- - [] ... pk:select({'09'}, { iterator = 'EQ' }) --- - [] ... pk:select({'09'}, { iterator = 'REQ' }) --- - [] ... pk:select({'09'}, { iterator = 'GE' }) --- - [] ... pk:select({'09'}, { iterator = 'GT' }) --- - [] ... pk:select({'10'}, { iterator = 'EQ' }) --- - [] ... pk:select({'10'}, { iterator = 'REQ' }) --- - [] ... pk:select({'10'}, { iterator = 'GE' }) --- - [] ... pk:select({'10'}, { iterator = 'GT' }) --- - [] ... pk:get({}) --- - error: Invalid key part count in an exact match (expected 2, got 0) ... pk:get({'00'}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... pk:get({'05'}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... pk:get({'10'}) --- - error: Invalid key part count in an exact match (expected 2, got 1) ... -- -- two parts -- pk:select({'05', 0}, { iterator = 'EQ' }) --- - [] ... pk:select({'05', 0}, { iterator = 'REQ' }) --- - [] ... pk:select({'05', 0}, { iterator = 'GE' }) --- - [] ... pk:select({'05', 0}, { iterator = 'GT' }) --- - [] ... pk:select({'05', 0}, { iterator = 'LE' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'05', 0}, { iterator = 'LT' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'05', 1}, { iterator = 'EQ' }) --- - [] ... pk:select({'05', 1}, { iterator = 'REQ' }) --- - [] ... pk:select({'05', 1}, { iterator = 'GE' }) --- - [] ... pk:select({'05', 1}, { iterator = 'GT' }) --- - [] ... pk:select({'05', 1}, { iterator = 'LE' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'05', 1}, { iterator = 'LT' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'05', 3}, { iterator = 'EQ' }) --- - [] ... pk:select({'05', 3}, { iterator = 'REQ' }) --- - [] ... pk:select({'05', 3}, { iterator = 'GE' }) --- - [] ... pk:select({'05', 3}, { iterator = 'GT' }) --- - [] ... pk:select({'05', 3}, { iterator = 'LE' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'05', 3}, { iterator = 'LT' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'05', 4}, { iterator = 'EQ' }) --- - [] ... pk:select({'05', 4}, { iterator = 'REQ' }) --- - [] ... pk:select({'05', 4}, { iterator = 'GE' }) --- - [] ... pk:select({'05', 4}, { iterator = 'GT' }) --- - [] ... pk:select({'05', 4}, { iterator = 'LE' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:select({'05', 4}, { iterator = 'LT' }) --- - - [9, '', '03'] - [8, '', '03'] - [7, '', '03'] - [6, '', '03'] - [5, '', '03'] - [4, '', '03'] - [3, '', '03'] - [2, '', '03'] - [1, '', '03'] - [9, '', '02'] - [8, '', '02'] - [7, '', '02'] - [6, '', '02'] - [5, '', '02'] - [4, '', '02'] - [3, '', '02'] - [2, '', '02'] - [1, '', '02'] - [9, '', '01'] - [8, '', '01'] - [7, '', '01'] - [6, '', '01'] - [5, '', '01'] - [4, '', '01'] - [3, '', '01'] - [2, '', '01'] - [1, '', '01'] ... pk:get({'04', 5}) --- ... pk:get({'04', 3}) --- ... pk:get({'04', 3, 100}) --- - error: Invalid key part count in an exact match (expected 2, got 3) ... space:drop() --- ... space = nil --- ... pk = nil --- ... ------------------------------------------------------------------------------- -- multiple indices ------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum', { engine = engine }) --- ... i0 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) --- ... i1 = space:create_index('i1', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) --- ... i2 = space:create_index('i2', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) --- ... i3 = space:create_index('i3', { type = 'tree', parts = {4, 'string', 5, 'string'}, unique = false }) --- ... i4 = space:create_index('i4', { type = 'tree', parts = {7, 'string', 6, 'string'}, unique = false }) --- ... i5 = space:create_index('i5', { type = 'tree', parts = {9, 'unsigned'}, unique = false }) --- ... i6 = space:create_index('i6', { type = 'tree', parts = {7, 'string', 6, 'string', 4, 'string', 5, 'string', 9, 'unsigned'}, unique = true }) --- ... space:insert{0, 0, 100, 'Joe', 'Sixpack', 'Drinks', 'Amstel', 'bar', 2000} --- - [0, 0, 100, 'Joe', 'Sixpack', 'Drinks', 'Amstel', 'bar', 2000] ... space:insert{1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001} --- - [1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001] ... space:insert{2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002} --- - [2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002] ... space:insert{3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003} --- - [3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003] ... space:insert{4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004} --- - [4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004] ... space:insert{5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005} --- - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] ... space:insert{6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006} --- - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] ... space:insert{7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007} --- - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] ... space:insert{8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008} --- - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] ... space:insert{9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009} --- - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... -- In non-unique indexes select output order is undefined, -- so it's better to additionally sort output to receive same order every time. function sort_cmp(a, b) return a[1] < b[1] and true or false end --- ... function sort(t) table.sort(t, sort_cmp) return t end --- ... space.index['primary']:get{1} --- - [1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001] ... sort(space.index['i1']:select{2}) --- - - [2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002] ... sort(space.index[2]:select({300})) --- - - [3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003] - [4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004] - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] ... #space.index['i3']:select({'Joe', 'Sixpack'}) --- - 6 ... #space.index['i3']:select('John') --- - 4 ... #space.index['i4']:select('A Pipe') --- - 1 ... {sort(space.index['i4']:select{'Miller Genuine Draft', 'Drinks'})} --- - - - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] ... sort(space.index['i5']:select{2007}) --- - - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] ... sort(space.index[6]:select{'Miller Genuine Draft', 'Drinks'}) --- - - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] ... tmp = space:delete{6} --- ... tmp = space:delete{7} --- ... tmp = space:delete{8} --- ... tmp = space:delete{9} --- ... space:insert{6, 6ULL, 400ULL, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006} --- - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] ... space:insert{7, 7ULL, 400ULL, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007} --- - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] ... space:insert{8, 8ULL, 400ULL, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008} --- - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] ... space:insert{9, 9ULL, 400ULL, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009} --- - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... sort(space.index['i1']:select{6ULL}) --- - - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] ... sort(space.index['i1']:select{6}) --- - - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] ... sort(space.index['i2']:select(400ULL)) --- - - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... sort(space.index['i2']:select(400)) --- - - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... sort(space:select{}) --- - - [0, 0, 100, 'Joe', 'Sixpack', 'Drinks', 'Amstel', 'bar', 2000] - [1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001] - [2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002] - [3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003] - [4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004] - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... -- Test incorrect keys - supplied key field type does not match index type -- https://bugs.launchpad.net/tarantool/+bug/1072624 space:insert{'', 1, 2, '', '', '', '', '', 0} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... space:insert{'xxxxxxxx', 1, 2, '', '', '', '', '', 0} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... space:insert{1, '', 2, '', '', '', '', '', 0} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... space:insert{1, 'xxxxxxxxxxx', 2, '', '', '', '', '', 0} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... space:drop() --- ... sort = nil --- ... sort_cmp = nil --- ... ------------------------------------------------------------------------------- -- gh-1467: invalid iterator type ------------------------------------------------------------------------------- space = box.schema.space.create('invalid', { engine = engine }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {1, 'string'}}) --- ... pk:select({}, {iterator = 'BITS_ALL_SET'}) --- - error: Index 'primary' (TREE) of space 'invalid' (engine) does not support requested iterator type ... space:drop() --- ... space = nil --- ... pk = nil --- ... -- Create and drop several indices space = box.schema.space.create('test', { engine = engine }) --- ... pk = space:create_index('primary') --- ... index2 = space:create_index('secondary', { parts = {2, 'str'} }) --- ... index3 = space:create_index('third', { parts = {3, 'str'}, unique = false }) --- ... index2:drop() --- ... #box.space._index:select{space.id} --- - 2 ... index4 = space:create_index('fourth', { parts = {2, 'str', 3, 'str'} }) --- ... index2 = space:create_index('secondary', { parts = {4, 'str'} }) --- ... #box.space._index:select{space.id} --- - 4 ... index3:drop() --- ... index2:drop() --- ... index4:drop() --- ... #box.space._index:select{space.id} --- - 1 ... index2 = space:create_index('secondary', { parts = {2, 'str'} }) --- ... index3 = space:create_index('third', { parts = {3, 'str'}, unique = false }) --- ... index4 = space:create_index('fourth', { parts = {2, 'str', 3, 'str'} }) --- ... #box.space._index:select{space.id} --- - 4 ... space:drop() --- ... --Collation s = box.schema.space.create('test') --- ... i1 = s:create_index('i1', { type = 'tree', parts = {{1, 'str', collation='unicode'}}, unique = true }) --- ... _ = s:replace{"ааа"} --- ... _ = s:replace{"еее"} --- ... _ = s:replace{"ёёё"} --- ... _ = s:replace{"жжж"} --- ... _ = s:replace{"яяя"} --- ... _ = s:replace{"ААА"} --- ... _ = s:replace{"ЯЯЯ"} --- ... -- good output s:select{} --- - - ['ааа'] - ['ААА'] - ['еее'] - ['ёёё'] - ['жжж'] - ['яяя'] - ['ЯЯЯ'] ... s:drop() --- ... ------------------------------------------------------------------------------- -- Cleanup ------------------------------------------------------------------------------- test_run:cmd("clear filter") --- - true ... engine = nil --- ... test_run = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/ddl.result0000664000000000000000000002353713306565107020455 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... -- space create/drop space = box.schema.space.create('test', { engine = engine }) --- ... space:drop() --- ... -- space index create/drop space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... space:drop() --- ... -- space index create/drop alter space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... _index = box.space[box.schema.INDEX_ID] --- ... _index:delete{102, 0} --- ... space:drop() --- ... -- space index create/drop tree string space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', {type = 'tree', parts = {1, 'string'}}) --- ... space:insert({'test'}) --- - ['test'] ... space:drop() --- ... -- space index create/drop tree num space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', {type = 'tree', parts = {1, 'unsigned'}}) --- ... space:insert({13}) --- - [13] ... space:drop() --- ... -- space index create/drop tree multi-part num space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', {type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'}}) --- ... space:insert({13}) --- - error: Tuple field count 1 is less than required by space format or defined indexes (expected at least 2) ... space:drop() --- ... -- space index size space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... primary = space.index[0] --- ... primary:count() --- - 0 ... space:insert({13}) --- - [13] ... space:insert({14}) --- - [14] ... space:insert({15}) --- - [15] ... primary:count() --- - 3 ... space:drop() --- ... -- Key part max parts = {} --- ... for i=1,box.schema.INDEX_PART_MAX,1 do parts[2 * i - 1] = i; parts[2 * i] = 'unsigned' end --- ... space = box.schema.space.create('test', { engine = engine }) --- ... _ = space:create_index('primary', { type = 'tree', parts = parts }) --- ... tuple = {} --- ... for i=1,box.schema.INDEX_PART_MAX,1 do tuple[i] = i; end --- ... space:replace(tuple) --- - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] ... -- https://github.com/tarantool/tarantool/issues/1651 and https://github.com/tarantool/tarantool/issues/1671 -- space:upsert(tuple, {{'=', box.schema.INDEX_PART_MAX + 1, 100500}}) space:get(tuple) --- - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] ... space:select(tuple) --- - - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] ... _ = space:delete(tuple) --- ... space:drop() --- ... -- Too many key parts parts = {} --- ... for i=1,box.schema.INDEX_PART_MAX + 1,1 do parts[2 * i - 1] = i; parts[2 * i] = 'unsigned' end --- ... space = box.schema.space.create('test', { engine = engine }) --- ... _ = space:create_index('primary', { type = 'tree', parts = parts }) --- - error: 'Can''t create or modify index ''primary'' in space ''test'': too many key parts' ... space:drop() --- ... -- -- vy_mem of primary index contains statements with two formats. -- space = box.schema.space.create('test1', { engine = engine }) --- ... pk = space:create_index('primary1') --- ... idx2 = space:create_index('idx2', { parts = {2, 'unsigned'} }) --- ... space:replace({3, 8, 1}) --- - [3, 8, 1] ... idx2:select{} --- - - [3, 8, 1] ... space:get{3} --- - [3, 8, 1] ... iter_obj = space:pairs(2, {iterator = 'GT'}) --- ... idx2:drop() --- ... space:replace({4, 5, 6}) --- - [4, 5, 6] ... space:select{} --- - - [3, 8, 1] - [4, 5, 6] ... space:drop() --- ... -- Change index name space = box.schema.space.create('test', {engine = engine}) --- ... pk = space:create_index('pk') --- ... space:replace{1} --- - [1] ... space:replace{2} --- - [2] ... space:replace{3} --- - [3] ... box.space._index:select{space.id}[1][3] --- - pk ... pk:alter({name = 'altered_pk'}) --- ... box.space._index:select{space.id}[1][3] --- - altered_pk ... space:drop() --- ... --new index format space = box.schema.space.create('test', {engine = engine}) --- ... pk = space:create_index('pk', {parts={{field1 = 1, type = 'unsigned'}}}) --- - error: 'Illegal parameters, options.parts[1]: field (name or number) is expected' ... pk = space:create_index('pk', {parts={{field = 0, type = 'unsigned'}}}) --- - error: 'Illegal parameters, options.parts[1]: field (number) must be one-based' ... pk = space:create_index('pk', {parts={{field = 1, type = 'const char *'}}}) --- - error: 'Wrong index parts: unknown field type; expected field1 id (number), field1 type (string), ...' ... pk = space:create_index('pk', {parts={{field = 1, type = 'unsigned'}}}) --- ... pk.parts --- - - type: unsigned is_nullable: false fieldno: 1 ... pk:drop() --- ... pk = space:create_index('pk', {parts={{1, 'unsigned'}}}) --- ... pk.parts --- - - type: unsigned is_nullable: false fieldno: 1 ... pk:drop() --- ... pk = space:create_index('pk', {parts={{1, type='unsigned'}}}) --- ... pk.parts --- - - type: unsigned is_nullable: false fieldno: 1 ... space:insert{1, 2, 3} --- - [1, 2, 3] ... pk:drop() --- ... space:drop() --- ... -- -- gh-2893: inherit index part type from a format, if a parts array -- is omited. -- format = {{'field1', 'scalar'}} --- ... s = box.schema.create_space('test', {format = format}) --- ... pk = s:create_index('pk') --- ... pk.parts[1].type --- - scalar ... s:drop() --- ... -- Ensure type 'any' to be not inherited. format = {{'field1'}} --- ... s = box.schema.create_space('test', {format = format}) --- ... pk = s:create_index('pk') --- ... pk.parts[1].type --- - unsigned ... s:drop() --- ... -- -- gh-3229: update optionality if a space format is changed too, -- not only when indexes are updated. -- box.cfg{} --- ... s = box.schema.create_space('test', {engine = engine}) --- ... format = {} --- ... format[1] = {'field1', 'unsigned'} --- ... format[2] = {'field2', 'unsigned', is_nullable = true} --- ... format[3] = {'field3', 'unsigned'} --- ... s:format(format) --- ... pk = s:create_index('pk') --- ... sk = s:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}}) --- ... s:replace{2, 3, 4} --- - [2, 3, 4] ... s:format({}) --- ... s:insert({1}) --- - [1] ... s:insert({4, 5}) --- - [4, 5] ... s:insert({3, 4}) --- - [3, 4] ... s:insert({0}) --- - [0] ... _ = s:delete({1}) --- ... s:select({}) --- - - [0] - [2, 3, 4] - [3, 4] - [4, 5] ... pk:get({4}) --- - [4, 5] ... sk:select({box.NULL}) --- - - [0] ... sk:get({5}) --- - [4, 5] ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/ddl.test.lua0000664000000000000000000001076013306565107020670 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') -- space create/drop space = box.schema.space.create('test', { engine = engine }) space:drop() -- space index create/drop space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') space:drop() -- space index create/drop alter space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') _index = box.space[box.schema.INDEX_ID] _index:delete{102, 0} space:drop() -- space index create/drop tree string space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', {type = 'tree', parts = {1, 'string'}}) space:insert({'test'}) space:drop() -- space index create/drop tree num space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', {type = 'tree', parts = {1, 'unsigned'}}) space:insert({13}) space:drop() -- space index create/drop tree multi-part num space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', {type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'}}) space:insert({13}) space:drop() -- space index size space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') primary = space.index[0] primary:count() space:insert({13}) space:insert({14}) space:insert({15}) primary:count() space:drop() -- Key part max parts = {} for i=1,box.schema.INDEX_PART_MAX,1 do parts[2 * i - 1] = i; parts[2 * i] = 'unsigned' end space = box.schema.space.create('test', { engine = engine }) _ = space:create_index('primary', { type = 'tree', parts = parts }) tuple = {} for i=1,box.schema.INDEX_PART_MAX,1 do tuple[i] = i; end space:replace(tuple) -- https://github.com/tarantool/tarantool/issues/1651 and https://github.com/tarantool/tarantool/issues/1671 -- space:upsert(tuple, {{'=', box.schema.INDEX_PART_MAX + 1, 100500}}) space:get(tuple) space:select(tuple) _ = space:delete(tuple) space:drop() -- Too many key parts parts = {} for i=1,box.schema.INDEX_PART_MAX + 1,1 do parts[2 * i - 1] = i; parts[2 * i] = 'unsigned' end space = box.schema.space.create('test', { engine = engine }) _ = space:create_index('primary', { type = 'tree', parts = parts }) space:drop() -- -- vy_mem of primary index contains statements with two formats. -- space = box.schema.space.create('test1', { engine = engine }) pk = space:create_index('primary1') idx2 = space:create_index('idx2', { parts = {2, 'unsigned'} }) space:replace({3, 8, 1}) idx2:select{} space:get{3} iter_obj = space:pairs(2, {iterator = 'GT'}) idx2:drop() space:replace({4, 5, 6}) space:select{} space:drop() -- Change index name space = box.schema.space.create('test', {engine = engine}) pk = space:create_index('pk') space:replace{1} space:replace{2} space:replace{3} box.space._index:select{space.id}[1][3] pk:alter({name = 'altered_pk'}) box.space._index:select{space.id}[1][3] space:drop() --new index format space = box.schema.space.create('test', {engine = engine}) pk = space:create_index('pk', {parts={{field1 = 1, type = 'unsigned'}}}) pk = space:create_index('pk', {parts={{field = 0, type = 'unsigned'}}}) pk = space:create_index('pk', {parts={{field = 1, type = 'const char *'}}}) pk = space:create_index('pk', {parts={{field = 1, type = 'unsigned'}}}) pk.parts pk:drop() pk = space:create_index('pk', {parts={{1, 'unsigned'}}}) pk.parts pk:drop() pk = space:create_index('pk', {parts={{1, type='unsigned'}}}) pk.parts space:insert{1, 2, 3} pk:drop() space:drop() -- -- gh-2893: inherit index part type from a format, if a parts array -- is omited. -- format = {{'field1', 'scalar'}} s = box.schema.create_space('test', {format = format}) pk = s:create_index('pk') pk.parts[1].type s:drop() -- Ensure type 'any' to be not inherited. format = {{'field1'}} s = box.schema.create_space('test', {format = format}) pk = s:create_index('pk') pk.parts[1].type s:drop() -- -- gh-3229: update optionality if a space format is changed too, -- not only when indexes are updated. -- box.cfg{} s = box.schema.create_space('test', {engine = engine}) format = {} format[1] = {'field1', 'unsigned'} format[2] = {'field2', 'unsigned', is_nullable = true} format[3] = {'field3', 'unsigned'} s:format(format) pk = s:create_index('pk') sk = s:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}}) s:replace{2, 3, 4} s:format({}) s:insert({1}) s:insert({4, 5}) s:insert({3, 4}) s:insert({0}) _ = s:delete({1}) s:select({}) pk:get({4}) sk:select({box.NULL}) sk:get({5}) s:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/crossjoin.test.lua0000664000000000000000000000176313306560010022125 0ustar rootrootenv = require('test_run') test_run = env.new() inspector = env.new() engine = inspector:get_cfg('engine') space = box.schema.space.create('tweedledum', { engine = engine }) index = space:create_index('primary', { type = 'tree' }) test_run:cmd("setopt delimiter ';'") function crossjoin(space0, space1, limit) local result = {} for _,v0 in space0:pairs() do for _,v1 in space1:pairs() do if limit <= 0 then return result end local newtuple = v0:totable() for _, v in v1:pairs() do table.insert(newtuple, v) end table.insert(result, newtuple) limit = limit - 1 end end return result end; test_run:cmd("setopt delimiter ''"); crossjoin(space, space, 0) crossjoin(space, space, 10000) space:insert{1} crossjoin(space, space, 10000) space:insert{2} crossjoin(space, space, 10000) space:insert{3, 'hello'} crossjoin(space, space, 10000) space:insert{4, 'world'} space[0]:insert{5, 'hello world'} crossjoin(space, space, 10000) space:drop() crossjoin = nil tarantool_1.9.1.26.g63eb81e3c/test/engine/tuple.result0000664000000000000000000002227213306565107021036 0ustar rootroottest_run = require('test_run').new() --- ... engine = test_run:get_cfg('engine') --- ... test_run:cmd("push filter 'Failed to allocate [0-9]+' to 'Failed to allocate '") --- - true ... test_run:cmd("push filter '"..engine.."_max_tuple_size' to '_max_tuple_size'") --- - true ... -- https://github.com/tarantool/tarantool/issues/2667 -- Allow to insert tuples bigger than `max_tuple_size' s = box.schema.space.create('test', { engine = engine }) --- ... _ = s:create_index('primary') --- ... engine_max_tuple_size = engine ..'_max_tuple_size' --- ... engine_tuple_size = engine == 'memtx' and 16 or 32 --- ... box.cfg{[engine_max_tuple_size] = 1024 * 1024} --- ... -- check max_tuple_size limit max_tuple_size = box.cfg[engine_max_tuple_size] --- ... _ = s:replace({1, string.rep('x', max_tuple_size)}) --- - error: 'Failed to allocate bytes for tuple: tuple is too large. Check ''_max_tuple_size'' configuration option.' ... -- check max_tuple_size dynamic configuration box.cfg { [engine_max_tuple_size] = 2 * max_tuple_size } --- ... _ = s:replace({1, string.rep('x', max_tuple_size)}) --- ... -- check tuple sie box.cfg { [engine_max_tuple_size] = engine_tuple_size + 2 } --- ... _ = s:replace({1}) --- ... -- check large tuples allocated on malloc box.cfg { [engine_max_tuple_size] = 32 * 1024 * 1024 } --- ... _ = s:replace({1, string.rep('x', 32 * 1024 * 1024 - engine_tuple_size - 8)}) --- ... -- decrease max_tuple_size limit box.cfg { [engine_max_tuple_size] = 1 * 1024 * 1024 } --- ... _ = s:replace({1, string.rep('x', 2 * 1024 * 1024 )}) --- - error: 'Failed to allocate bytes for tuple: tuple is too large. Check ''_max_tuple_size'' configuration option.' ... _ = s:replace({1, string.rep('x', 1 * 1024 * 1024 - engine_tuple_size - 8)}) --- ... -- gh-2698 Tarantool crashed on 4M tuple max_item_size = 0 --- ... test_run:cmd("setopt delimiter ';'") --- - true ... for _, v in pairs(box.slab.stats()) do max_item_size = math.max(max_item_size, v.item_size) end; --- ... test_run:cmd("setopt delimiter ''"); --- - true ... box.cfg { [engine_max_tuple_size] = max_item_size + engine_tuple_size + 8 } --- ... _ = box.space.test:replace{1, 1, string.rep('a', max_item_size)} --- ... -- reset to original value box.cfg { [engine_max_tuple_size] = max_tuple_size } --- ... s:drop(); --- ... collectgarbage('collect') -- collect all large tuples --- - 0 ... box.snapshot() -- discard xlogs with large tuples --- - ok ... test_run:cmd("clear filter") --- - true ... -- -- gh-1014: tuple field names. -- format = {} --- ... format[1] = {name = 'field1', type = 'unsigned'} --- ... format[2] = {name = 'field2', type = 'string'} --- ... format[3] = {name = 'field3', type = 'array'} --- ... format[4] = {name = 'field4', type = 'number'} --- ... format[5] = {name = 'field5', type = 'integer'} --- ... format[6] = {name = 'field6', type = 'scalar'} --- ... format[7] = {name = 'field7', type = 'map'} --- ... format[8] = {name = 'field8', type = 'any'} --- ... format[9] = {name = 'field9'} --- ... format[10] = {name = 'bsize'} --- ... format[11] = {name = 'totable'} --- ... format[12] = {name = 'longlonglonglonglonglongname'} --- ... s = box.schema.space.create('test', {engine = engine, format = format}) --- ... pk = s:create_index('pk') --- ... t = {1, '2', {3, 3}, 4.4, -5, true, {key = 7}, 8, 9, 10, 11, 12} --- ... t = s:replace(t) --- ... t --- - [1, '2', [3, 3], 4.4, -5, true, {'key': 7}, 8, 9, 10, 11, 12] ... t.field1, t.field2, t.field3, t.field4, t.field5, t.field6, t.field7, t.field8, t.field9, t.bsize, t.totable --- - 1 - '2' - [3, 3] - 4.4 - -5 - true - {'key': 7} - 8 - 9 - 10 - 11 ... t.longlonglonglonglonglongname --- - 12 ... box.tuple.bsize(t) --- - 29 ... box.tuple.totable(t) --- - [1, '2', [3, 3], 4.4, -5, true, {'key': 7}, 8, 9, 10, 11, 12] ... s:drop() --- ... -- -- Increase collisions number and make juajit use second hash -- function. -- format = {} --- ... for i = 1, 100 do format[i] = {name = "skwjhfjwhfwfhwkhfwkjh"..i.."avjnbknwkvbwekjf"} end --- ... s = box.schema.space.create('test', { engine = engine, format = format }) --- ... p = s:create_index('pk') --- ... to_insert = {} --- ... for i = 1, 100 do to_insert[i] = i end --- ... t = s:replace(to_insert) --- ... format = nil --- ... name = nil --- ... s = nil --- ... p = nil --- ... to_insert = nil --- ... collectgarbage('collect') --- - 0 ... -- Print many many strings (> 40 to reach max_collisions limit in luajit). t.skwjhfjwhfwfhwkhfwkjh01avjnbknwkvbwekjf --- - null ... t.skwjhfjwhfwfhwkhfwkjh02avjnbknwkvbwekjf --- - null ... t.skwjhfjwhfwfhwkhfwkjh03avjnbknwkvbwekjf --- - null ... t.skwjhfjwhfwfhwkhfwkjh04avjnbknwkvbwekjf --- - null ... t.skwjhfjwhfwfhwkhfwkjh05avjnbknwkvbwekjf --- - null ... t.skwjhfjwhfwfhwkhfwkjh06avjnbknwkvbwekjf --- - null ... t.skwjhfjwhfwfhwkhfwkjh07avjnbknwkvbwekjf --- - null ... t.skwjhfjwhfwfhwkhfwkjh08avjnbknwkvbwekjf --- - null ... t.skwjhfjwhfwfhwkhfwkjh09avjnbknwkvbwekjf --- - null ... t.skwjhfjwhfwfhwkhfwkjh10avjnbknwkvbwekjf --- - 10 ... t.skwjhfjwhfwfhwkhfwkjh11avjnbknwkvbwekjf --- - 11 ... t.skwjhfjwhfwfhwkhfwkjh12avjnbknwkvbwekjf --- - 12 ... t.skwjhfjwhfwfhwkhfwkjh13avjnbknwkvbwekjf --- - 13 ... t.skwjhfjwhfwfhwkhfwkjh14avjnbknwkvbwekjf --- - 14 ... t.skwjhfjwhfwfhwkhfwkjh15avjnbknwkvbwekjf --- - 15 ... t.skwjhfjwhfwfhwkhfwkjh16avjnbknwkvbwekjf --- - 16 ... t.skwjhfjwhfwfhwkhfwkjh17avjnbknwkvbwekjf --- - 17 ... t.skwjhfjwhfwfhwkhfwkjh18avjnbknwkvbwekjf --- - 18 ... t.skwjhfjwhfwfhwkhfwkjh19avjnbknwkvbwekjf --- - 19 ... t.skwjhfjwhfwfhwkhfwkjh20avjnbknwkvbwekjf --- - 20 ... t.skwjhfjwhfwfhwkhfwkjh21avjnbknwkvbwekjf --- - 21 ... t.skwjhfjwhfwfhwkhfwkjh22avjnbknwkvbwekjf --- - 22 ... t.skwjhfjwhfwfhwkhfwkjh23avjnbknwkvbwekjf --- - 23 ... t.skwjhfjwhfwfhwkhfwkjh24avjnbknwkvbwekjf --- - 24 ... t.skwjhfjwhfwfhwkhfwkjh25avjnbknwkvbwekjf --- - 25 ... t.skwjhfjwhfwfhwkhfwkjh26avjnbknwkvbwekjf --- - 26 ... t.skwjhfjwhfwfhwkhfwkjh27avjnbknwkvbwekjf --- - 27 ... t.skwjhfjwhfwfhwkhfwkjh28avjnbknwkvbwekjf --- - 28 ... t.skwjhfjwhfwfhwkhfwkjh29avjnbknwkvbwekjf --- - 29 ... t.skwjhfjwhfwfhwkhfwkjh30avjnbknwkvbwekjf --- - 30 ... t.skwjhfjwhfwfhwkhfwkjh31avjnbknwkvbwekjf --- - 31 ... t.skwjhfjwhfwfhwkhfwkjh32avjnbknwkvbwekjf --- - 32 ... t.skwjhfjwhfwfhwkhfwkjh33avjnbknwkvbwekjf --- - 33 ... t.skwjhfjwhfwfhwkhfwkjh34avjnbknwkvbwekjf --- - 34 ... t.skwjhfjwhfwfhwkhfwkjh35avjnbknwkvbwekjf --- - 35 ... t.skwjhfjwhfwfhwkhfwkjh36avjnbknwkvbwekjf --- - 36 ... t.skwjhfjwhfwfhwkhfwkjh37avjnbknwkvbwekjf --- - 37 ... t.skwjhfjwhfwfhwkhfwkjh38avjnbknwkvbwekjf --- - 38 ... t.skwjhfjwhfwfhwkhfwkjh39avjnbknwkvbwekjf --- - 39 ... t.skwjhfjwhfwfhwkhfwkjh40avjnbknwkvbwekjf --- - 40 ... t.skwjhfjwhfwfhwkhfwkjh41avjnbknwkvbwekjf --- - 41 ... t.skwjhfjwhfwfhwkhfwkjh42avjnbknwkvbwekjf --- - 42 ... t.skwjhfjwhfwfhwkhfwkjh43avjnbknwkvbwekjf --- - 43 ... box.space.test:drop() --- ... -- -- gh-2773: correctly reset max tuple size on restart. -- box.cfg{[engine_max_tuple_size] = 1024 * 1024 * 100} --- ... s = box.schema.space.create('test', {engine = engine}) --- ... pk = s:create_index('pk') --- ... _ = s:replace({1, string.rep('*', 1024 * 1024)}) --- ... _ = s:replace({2, string.rep('*', 1024 * 1024 * 2)}) --- ... pk:count() --- - 2 ... test_run:cmd('restart server default') engine = test_run:get_cfg('engine') --- ... s = box.space.test --- ... s:count() --- - 2 ... s:drop() --- ... -- -- gh-2821: tuple:tomap(). -- format = {} --- ... format[1] = {'field1', 'unsigned'} --- ... format[2] = {'field2', 'unsigned'} --- ... format[3] = {'field3', 'unsigned'} --- ... format[4] = {'field4', 'array'} --- ... s = box.schema.space.create('test', {format = format, engine = engine}) --- ... pk = s:create_index('pk') --- ... t1 = s:replace{1, 2, 3, {'a', 'b', 'c'}} --- ... t1map = t1:tomap() --- ... function maplen(map) local count = 0 for _ in pairs(map) do count = count + 1 end return count end --- ... maplen(t1map), t1map.field1, t1map.field2, t1map.field3, t1map.field4 --- - 8 - 1 - 2 - 3 - ['a', 'b', 'c'] ... t1map[1], t1map[2], t1map[3], t1map[4] --- - 1 - 2 - 3 - ['a', 'b', 'c'] ... -- Fields with table type are stored once for name and for index. t1map[4] == t1map.field4 --- - true ... t2 = s:replace{4, 5, 6, {'a', 'b', 'c'}, 'extra1'} --- ... t2map = t2:tomap() --- ... maplen(t2map), t2map.field1, t2map.field2, t2map.field3, t2map.field4 --- - 9 - 4 - 5 - 6 - ['a', 'b', 'c'] ... t1map[1], t1map[2], t1map[3], t2map[4], t2map[5] --- - 1 - 2 - 3 - ['a', 'b', 'c'] - extra1 ... -- Use box.tuple.tomap alias. t3 = s:replace{7, 8, 9, {'a', 'b', 'c'}, 'extra1', 'extra2'} --- ... t3map = box.tuple.tomap(t3) --- ... maplen(t3map), t3map.field1, t3map.field2, t3map.field3, t3map.field4 --- - 10 - 7 - 8 - 9 - ['a', 'b', 'c'] ... t1map[1], t1map[2], t1map[3], t3map[4], t3map[5], t3map[6] --- - 1 - 2 - 3 - ['a', 'b', 'c'] - extra1 - extra2 ... -- Invalid arguments. t3.tomap('123') --- - error: 'Invalid argument #1 (box.tuple expected, got string)' ... box.tuple.tomap('456') --- - error: 'Invalid argument #1 (box.tuple expected, got string)' ... s:drop() --- ... -- No names, no format. s = box.schema.space.create('test', { engine = engine }) --- ... pk = s:create_index('pk') --- ... t1 = s:replace{1, 2, 3} --- ... t1map = t1:tomap() --- ... maplen(t1map), t1map[1], t1map[2], t1map[3] --- - 3 - 1 - 2 - 3 ... s:drop() --- ... engine = nil --- ... test_run = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/recover_wal.test.lua0000664000000000000000000000124713306560010022421 0ustar rootroot -- write data recover from logs only env = require('test_run') test_run = env.new() test_run:cmd('restart server default') engine = test_run:get_cfg('engine') space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') space:insert({0}) test_run:cmd('restart server default') space = box.space['test'] index = space.index['primary'] index:select({}, {iterator = box.index.ALL}) for key = 1, 1000 do space:insert({key}) end test_run:cmd('restart server default') space = box.space['test'] index = space.index['primary'] index:select({}, {iterator = box.index.ALL}) space:drop() test_run:cmd('restart server default with cleanup=1') tarantool_1.9.1.26.g63eb81e3c/test/engine/misc.result0000664000000000000000000000264213306560010020623 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... -- https://github.com/tarantool/tarantool/issues/1109 -- Update via a secondary key breaks recovery s = box.schema.create_space('test', { engine = engine }) --- ... i1 = s:create_index('test1', {parts = {1, 'unsigned'}}) --- ... i2 = s:create_index('test2', {parts = {2, 'unsigned'}}) --- ... s:insert{1, 2, 3} --- - [1, 2, 3] ... s:insert{5, 8, 13} --- - [5, 8, 13] ... i2:update({2}, {{'+', 3, 3}}) --- - [1, 2, 6] ... tmp = i2:delete{8} --- ... inspector:cmd("restart server default") test_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... box.space.test:select{} --- - - [1, 2, 6] ... box.space.test:drop() --- ... -- https://github.com/tarantool/tarantool/issues/1435 -- Truncate does not work _ = box.schema.space.create('t5',{engine=engine}) --- ... _ = box.space.t5:create_index('primary') --- ... box.space.t5:insert{44} --- - [44] ... box.space.t5:truncate() --- ... box.space.t5:insert{55} --- - [55] ... box.space.t5:drop() --- ... -- https://github.com/tarantool/tarantool/issues/2257 -- crash somewhere in bsize s = box.schema.space.create('test',{engine=engine}) --- ... _ = s:create_index('primary') --- ... s:replace{1} --- - [1] ... box.begin() --- ... _ = s:delete{1} --- ... box.rollback() --- ... _ = s:delete{1} --- ... s:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/recover_snapshot.result0000664000000000000000000001016513306560010023253 0ustar rootroot-- write data recover from latest snapshot env = require('test_run') --- ... test_run = env.new() --- ... test_run:cmd('restart server default') engine = test_run:get_cfg('engine') --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary') --- ... space:insert({0}) --- - [0] ... box.snapshot() --- - ok ... test_run:cmd('restart server default') space = box.space['test'] --- ... index = space.index['primary'] --- ... index:select({}, {iterator = box.index.ALL}) --- - - [0] ... for key = 1, 351 do space:insert({key}) end --- ... box.snapshot() --- - ok ... test_run:cmd('restart server default') space = box.space['test'] --- ... index = space.index['primary'] --- ... index:select({}, {iterator = box.index.ALL}) --- - - [0] - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] - [101] - [102] - [103] - [104] - [105] - [106] - [107] - [108] - [109] - [110] - [111] - [112] - [113] - [114] - [115] - [116] - [117] - [118] - [119] - [120] - [121] - [122] - [123] - [124] - [125] - [126] - [127] - [128] - [129] - [130] - [131] - [132] - [133] - [134] - [135] - [136] - [137] - [138] - [139] - [140] - [141] - [142] - [143] - [144] - [145] - [146] - [147] - [148] - [149] - [150] - [151] - [152] - [153] - [154] - [155] - [156] - [157] - [158] - [159] - [160] - [161] - [162] - [163] - [164] - [165] - [166] - [167] - [168] - [169] - [170] - [171] - [172] - [173] - [174] - [175] - [176] - [177] - [178] - [179] - [180] - [181] - [182] - [183] - [184] - [185] - [186] - [187] - [188] - [189] - [190] - [191] - [192] - [193] - [194] - [195] - [196] - [197] - [198] - [199] - [200] - [201] - [202] - [203] - [204] - [205] - [206] - [207] - [208] - [209] - [210] - [211] - [212] - [213] - [214] - [215] - [216] - [217] - [218] - [219] - [220] - [221] - [222] - [223] - [224] - [225] - [226] - [227] - [228] - [229] - [230] - [231] - [232] - [233] - [234] - [235] - [236] - [237] - [238] - [239] - [240] - [241] - [242] - [243] - [244] - [245] - [246] - [247] - [248] - [249] - [250] - [251] - [252] - [253] - [254] - [255] - [256] - [257] - [258] - [259] - [260] - [261] - [262] - [263] - [264] - [265] - [266] - [267] - [268] - [269] - [270] - [271] - [272] - [273] - [274] - [275] - [276] - [277] - [278] - [279] - [280] - [281] - [282] - [283] - [284] - [285] - [286] - [287] - [288] - [289] - [290] - [291] - [292] - [293] - [294] - [295] - [296] - [297] - [298] - [299] - [300] - [301] - [302] - [303] - [304] - [305] - [306] - [307] - [308] - [309] - [310] - [311] - [312] - [313] - [314] - [315] - [316] - [317] - [318] - [319] - [320] - [321] - [322] - [323] - [324] - [325] - [326] - [327] - [328] - [329] - [330] - [331] - [332] - [333] - [334] - [335] - [336] - [337] - [338] - [339] - [340] - [341] - [342] - [343] - [344] - [345] - [346] - [347] - [348] - [349] - [350] - [351] ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/select.result0000664000000000000000000011511613306560010021150 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... -- select (str) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:replace({tostring(key)}) end --- ... index:select({}, {iterator = box.index.ALL}) --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select({}, {iterator = box.index.GE}) --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select(tostring(44), {iterator = box.index.GE}) --- - - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select({}, {iterator = box.index.GT}) --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select(tostring(44), {iterator = box.index.GT}) --- - - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select({}, {iterator = box.index.LE}) --- - - ['99'] - ['98'] - ['97'] - ['96'] - ['95'] - ['94'] - ['93'] - ['92'] - ['91'] - ['90'] - ['9'] - ['89'] - ['88'] - ['87'] - ['86'] - ['85'] - ['84'] - ['83'] - ['82'] - ['81'] - ['80'] - ['8'] - ['79'] - ['78'] - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... index:select(tostring(77), {iterator = box.index.LE}) --- - - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... index:select({}, {iterator = box.index.LT}) --- - - ['99'] - ['98'] - ['97'] - ['96'] - ['95'] - ['94'] - ['93'] - ['92'] - ['91'] - ['90'] - ['9'] - ['89'] - ['88'] - ['87'] - ['86'] - ['85'] - ['84'] - ['83'] - ['82'] - ['81'] - ['80'] - ['8'] - ['79'] - ['78'] - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... index:select(tostring(77), {iterator = box.index.LT}) --- - - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... space:drop() --- ... -- select (num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key}) end --- ... index:select({}, {iterator = box.index.ALL}) --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... index:select({}, {iterator = box.index.GE}) --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... index:select(44, {iterator = box.index.GE}) --- - - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... index:select({}, {iterator = box.index.GT}) --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... index:select(44, {iterator = box.index.GT}) --- - - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... index:select({}, {iterator = box.index.LE}) --- - - [100] - [99] - [98] - [97] - [96] - [95] - [94] - [93] - [92] - [91] - [90] - [89] - [88] - [87] - [86] - [85] - [84] - [83] - [82] - [81] - [80] - [79] - [78] - [77] - [76] - [75] - [74] - [73] - [72] - [71] - [70] - [69] - [68] - [67] - [66] - [65] - [64] - [63] - [62] - [61] - [60] - [59] - [58] - [57] - [56] - [55] - [54] - [53] - [52] - [51] - [50] - [49] - [48] - [47] - [46] - [45] - [44] - [43] - [42] - [41] - [40] - [39] - [38] - [37] - [36] - [35] - [34] - [33] - [32] - [31] - [30] - [29] - [28] - [27] - [26] - [25] - [24] - [23] - [22] - [21] - [20] - [19] - [18] - [17] - [16] - [15] - [14] - [13] - [12] - [11] - [10] - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... index:select(77, {iterator = box.index.LE}) --- - - [77] - [76] - [75] - [74] - [73] - [72] - [71] - [70] - [69] - [68] - [67] - [66] - [65] - [64] - [63] - [62] - [61] - [60] - [59] - [58] - [57] - [56] - [55] - [54] - [53] - [52] - [51] - [50] - [49] - [48] - [47] - [46] - [45] - [44] - [43] - [42] - [41] - [40] - [39] - [38] - [37] - [36] - [35] - [34] - [33] - [32] - [31] - [30] - [29] - [28] - [27] - [26] - [25] - [24] - [23] - [22] - [21] - [20] - [19] - [18] - [17] - [16] - [15] - [14] - [13] - [12] - [11] - [10] - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... index:select({}, {iterator = box.index.LT}) --- - - [100] - [99] - [98] - [97] - [96] - [95] - [94] - [93] - [92] - [91] - [90] - [89] - [88] - [87] - [86] - [85] - [84] - [83] - [82] - [81] - [80] - [79] - [78] - [77] - [76] - [75] - [74] - [73] - [72] - [71] - [70] - [69] - [68] - [67] - [66] - [65] - [64] - [63] - [62] - [61] - [60] - [59] - [58] - [57] - [56] - [55] - [54] - [53] - [52] - [51] - [50] - [49] - [48] - [47] - [46] - [45] - [44] - [43] - [42] - [41] - [40] - [39] - [38] - [37] - [36] - [35] - [34] - [33] - [32] - [31] - [30] - [29] - [28] - [27] - [26] - [25] - [24] - [23] - [22] - [21] - [20] - [19] - [18] - [17] - [16] - [15] - [14] - [13] - [12] - [11] - [10] - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... index:select(77, {iterator = box.index.LT}) --- - - [76] - [75] - [74] - [73] - [72] - [71] - [70] - [69] - [68] - [67] - [66] - [65] - [64] - [63] - [62] - [61] - [60] - [59] - [58] - [57] - [56] - [55] - [54] - [53] - [52] - [51] - [50] - [49] - [48] - [47] - [46] - [45] - [44] - [43] - [42] - [41] - [40] - [39] - [38] - [37] - [36] - [35] - [34] - [33] - [32] - [31] - [30] - [29] - [28] - [27] - [26] - [25] - [24] - [23] - [22] - [21] - [20] - [19] - [18] - [17] - [16] - [15] - [14] - [13] - [12] - [11] - [10] - [9] - [8] - [7] - [6] - [5] - [4] - [3] - [2] - [1] ... space:drop() --- ... -- select multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key, key}) end --- ... index:select({}, {iterator = box.index.ALL}) --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] - [6, 6] - [7, 7] - [8, 8] - [9, 9] - [10, 10] - [11, 11] - [12, 12] - [13, 13] - [14, 14] - [15, 15] - [16, 16] - [17, 17] - [18, 18] - [19, 19] - [20, 20] - [21, 21] - [22, 22] - [23, 23] - [24, 24] - [25, 25] - [26, 26] - [27, 27] - [28, 28] - [29, 29] - [30, 30] - [31, 31] - [32, 32] - [33, 33] - [34, 34] - [35, 35] - [36, 36] - [37, 37] - [38, 38] - [39, 39] - [40, 40] - [41, 41] - [42, 42] - [43, 43] - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... index:select({}, {iterator = box.index.GE}) --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] - [6, 6] - [7, 7] - [8, 8] - [9, 9] - [10, 10] - [11, 11] - [12, 12] - [13, 13] - [14, 14] - [15, 15] - [16, 16] - [17, 17] - [18, 18] - [19, 19] - [20, 20] - [21, 21] - [22, 22] - [23, 23] - [24, 24] - [25, 25] - [26, 26] - [27, 27] - [28, 28] - [29, 29] - [30, 30] - [31, 31] - [32, 32] - [33, 33] - [34, 34] - [35, 35] - [36, 36] - [37, 37] - [38, 38] - [39, 39] - [40, 40] - [41, 41] - [42, 42] - [43, 43] - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... index:select({44, 44}, {iterator = box.index.GE}) --- - - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... index:select({}, {iterator = box.index.GT}) --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] - [6, 6] - [7, 7] - [8, 8] - [9, 9] - [10, 10] - [11, 11] - [12, 12] - [13, 13] - [14, 14] - [15, 15] - [16, 16] - [17, 17] - [18, 18] - [19, 19] - [20, 20] - [21, 21] - [22, 22] - [23, 23] - [24, 24] - [25, 25] - [26, 26] - [27, 27] - [28, 28] - [29, 29] - [30, 30] - [31, 31] - [32, 32] - [33, 33] - [34, 34] - [35, 35] - [36, 36] - [37, 37] - [38, 38] - [39, 39] - [40, 40] - [41, 41] - [42, 42] - [43, 43] - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... index:select({44, 44}, {iterator = box.index.GT}) --- - - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... index:select({}, {iterator = box.index.LE}) --- - - [100, 100] - [99, 99] - [98, 98] - [97, 97] - [96, 96] - [95, 95] - [94, 94] - [93, 93] - [92, 92] - [91, 91] - [90, 90] - [89, 89] - [88, 88] - [87, 87] - [86, 86] - [85, 85] - [84, 84] - [83, 83] - [82, 82] - [81, 81] - [80, 80] - [79, 79] - [78, 78] - [77, 77] - [76, 76] - [75, 75] - [74, 74] - [73, 73] - [72, 72] - [71, 71] - [70, 70] - [69, 69] - [68, 68] - [67, 67] - [66, 66] - [65, 65] - [64, 64] - [63, 63] - [62, 62] - [61, 61] - [60, 60] - [59, 59] - [58, 58] - [57, 57] - [56, 56] - [55, 55] - [54, 54] - [53, 53] - [52, 52] - [51, 51] - [50, 50] - [49, 49] - [48, 48] - [47, 47] - [46, 46] - [45, 45] - [44, 44] - [43, 43] - [42, 42] - [41, 41] - [40, 40] - [39, 39] - [38, 38] - [37, 37] - [36, 36] - [35, 35] - [34, 34] - [33, 33] - [32, 32] - [31, 31] - [30, 30] - [29, 29] - [28, 28] - [27, 27] - [26, 26] - [25, 25] - [24, 24] - [23, 23] - [22, 22] - [21, 21] - [20, 20] - [19, 19] - [18, 18] - [17, 17] - [16, 16] - [15, 15] - [14, 14] - [13, 13] - [12, 12] - [11, 11] - [10, 10] - [9, 9] - [8, 8] - [7, 7] - [6, 6] - [5, 5] - [4, 4] - [3, 3] - [2, 2] - [1, 1] ... index:select({77, 77}, {iterator = box.index.LE}) --- - - [77, 77] - [76, 76] - [75, 75] - [74, 74] - [73, 73] - [72, 72] - [71, 71] - [70, 70] - [69, 69] - [68, 68] - [67, 67] - [66, 66] - [65, 65] - [64, 64] - [63, 63] - [62, 62] - [61, 61] - [60, 60] - [59, 59] - [58, 58] - [57, 57] - [56, 56] - [55, 55] - [54, 54] - [53, 53] - [52, 52] - [51, 51] - [50, 50] - [49, 49] - [48, 48] - [47, 47] - [46, 46] - [45, 45] - [44, 44] - [43, 43] - [42, 42] - [41, 41] - [40, 40] - [39, 39] - [38, 38] - [37, 37] - [36, 36] - [35, 35] - [34, 34] - [33, 33] - [32, 32] - [31, 31] - [30, 30] - [29, 29] - [28, 28] - [27, 27] - [26, 26] - [25, 25] - [24, 24] - [23, 23] - [22, 22] - [21, 21] - [20, 20] - [19, 19] - [18, 18] - [17, 17] - [16, 16] - [15, 15] - [14, 14] - [13, 13] - [12, 12] - [11, 11] - [10, 10] - [9, 9] - [8, 8] - [7, 7] - [6, 6] - [5, 5] - [4, 4] - [3, 3] - [2, 2] - [1, 1] ... index:select({}, {iterator = box.index.LT}) --- - - [100, 100] - [99, 99] - [98, 98] - [97, 97] - [96, 96] - [95, 95] - [94, 94] - [93, 93] - [92, 92] - [91, 91] - [90, 90] - [89, 89] - [88, 88] - [87, 87] - [86, 86] - [85, 85] - [84, 84] - [83, 83] - [82, 82] - [81, 81] - [80, 80] - [79, 79] - [78, 78] - [77, 77] - [76, 76] - [75, 75] - [74, 74] - [73, 73] - [72, 72] - [71, 71] - [70, 70] - [69, 69] - [68, 68] - [67, 67] - [66, 66] - [65, 65] - [64, 64] - [63, 63] - [62, 62] - [61, 61] - [60, 60] - [59, 59] - [58, 58] - [57, 57] - [56, 56] - [55, 55] - [54, 54] - [53, 53] - [52, 52] - [51, 51] - [50, 50] - [49, 49] - [48, 48] - [47, 47] - [46, 46] - [45, 45] - [44, 44] - [43, 43] - [42, 42] - [41, 41] - [40, 40] - [39, 39] - [38, 38] - [37, 37] - [36, 36] - [35, 35] - [34, 34] - [33, 33] - [32, 32] - [31, 31] - [30, 30] - [29, 29] - [28, 28] - [27, 27] - [26, 26] - [25, 25] - [24, 24] - [23, 23] - [22, 22] - [21, 21] - [20, 20] - [19, 19] - [18, 18] - [17, 17] - [16, 16] - [15, 15] - [14, 14] - [13, 13] - [12, 12] - [11, 11] - [10, 10] - [9, 9] - [8, 8] - [7, 7] - [6, 6] - [5, 5] - [4, 4] - [3, 3] - [2, 2] - [1, 1] ... index:select({77, 77}, {iterator = box.index.LT}) --- - - [76, 76] - [75, 75] - [74, 74] - [73, 73] - [72, 72] - [71, 71] - [70, 70] - [69, 69] - [68, 68] - [67, 67] - [66, 66] - [65, 65] - [64, 64] - [63, 63] - [62, 62] - [61, 61] - [60, 60] - [59, 59] - [58, 58] - [57, 57] - [56, 56] - [55, 55] - [54, 54] - [53, 53] - [52, 52] - [51, 51] - [50, 50] - [49, 49] - [48, 48] - [47, 47] - [46, 46] - [45, 45] - [44, 44] - [43, 43] - [42, 42] - [41, 41] - [40, 40] - [39, 39] - [38, 38] - [37, 37] - [36, 36] - [35, 35] - [34, 34] - [33, 33] - [32, 32] - [31, 31] - [30, 30] - [29, 29] - [28, 28] - [27, 27] - [26, 26] - [25, 25] - [24, 24] - [23, 23] - [22, 22] - [21, 21] - [20, 20] - [19, 19] - [18, 18] - [17, 17] - [16, 16] - [15, 15] - [14, 14] - [13, 13] - [12, 12] - [11, 11] - [10, 10] - [9, 9] - [8, 8] - [7, 7] - [6, 6] - [5, 5] - [4, 4] - [3, 3] - [2, 2] - [1, 1] ... space:drop() --- ... -- select with box.tuple.new space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:replace({tostring(key)}) end --- ... index:select(box.tuple.new{}, {iterator = box.index.ALL}) --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select(box.tuple.new{}, {iterator = box.index.GE}) --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select(box.tuple.new(tostring(44)), {iterator = box.index.GE}) --- - - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select(box.tuple.new{}, {iterator = box.index.GT}) --- - - ['1'] - ['10'] - ['100'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['2'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['3'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['4'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select(box.tuple.new(tostring(44)), {iterator = box.index.GT}) --- - - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['5'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['6'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['7'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['8'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['9'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] ... index:select(box.tuple.new{}, {iterator = box.index.LE}) --- - - ['99'] - ['98'] - ['97'] - ['96'] - ['95'] - ['94'] - ['93'] - ['92'] - ['91'] - ['90'] - ['9'] - ['89'] - ['88'] - ['87'] - ['86'] - ['85'] - ['84'] - ['83'] - ['82'] - ['81'] - ['80'] - ['8'] - ['79'] - ['78'] - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... index:select(box.tuple.new(tostring(77)), {iterator = box.index.LE}) --- - - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... index:select(box.tuple.new{}, {iterator = box.index.LT}) --- - - ['99'] - ['98'] - ['97'] - ['96'] - ['95'] - ['94'] - ['93'] - ['92'] - ['91'] - ['90'] - ['9'] - ['89'] - ['88'] - ['87'] - ['86'] - ['85'] - ['84'] - ['83'] - ['82'] - ['81'] - ['80'] - ['8'] - ['79'] - ['78'] - ['77'] - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... index:select(box.tuple.new(tostring(77)), {iterator = box.index.LT}) --- - - ['76'] - ['75'] - ['74'] - ['73'] - ['72'] - ['71'] - ['70'] - ['7'] - ['69'] - ['68'] - ['67'] - ['66'] - ['65'] - ['64'] - ['63'] - ['62'] - ['61'] - ['60'] - ['6'] - ['59'] - ['58'] - ['57'] - ['56'] - ['55'] - ['54'] - ['53'] - ['52'] - ['51'] - ['50'] - ['5'] - ['49'] - ['48'] - ['47'] - ['46'] - ['45'] - ['44'] - ['43'] - ['42'] - ['41'] - ['40'] - ['4'] - ['39'] - ['38'] - ['37'] - ['36'] - ['35'] - ['34'] - ['33'] - ['32'] - ['31'] - ['30'] - ['3'] - ['29'] - ['28'] - ['27'] - ['26'] - ['25'] - ['24'] - ['23'] - ['22'] - ['21'] - ['20'] - ['2'] - ['19'] - ['18'] - ['17'] - ['16'] - ['15'] - ['14'] - ['13'] - ['12'] - ['11'] - ['100'] - ['10'] - ['1'] ... space:drop() --- ... -- select multiple indices -- two indices space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'number'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'string'} }) --- ... space:insert({1, 'a'}) --- - [1, 'a'] ... space:insert({2, 'd'}) --- - [2, 'd'] ... space:insert({3, 'c'}) --- - [3, 'c'] ... space:insert({4, 'b'}) --- - [4, 'b'] ... space:insert({5, 'bbbb'}) --- - [5, 'bbbb'] ... space:insert({5, 'cbcb'}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:insert({6, 'bbbb'}) --- - error: Duplicate key exists in unique index 'secondary' in space 'test' ... space:insert({-45.2, 'waerwe'}) --- - [-45.2, 'waerwe'] ... index1:select{} --- - - [-45.2, 'waerwe'] - [1, 'a'] - [2, 'd'] - [3, 'c'] - [4, 'b'] - [5, 'bbbb'] ... index2:select{} --- - - [1, 'a'] - [4, 'b'] - [5, 'bbbb'] - [3, 'c'] - [2, 'd'] - [-45.2, 'waerwe'] ... space:get{5} --- - [5, 'bbbb'] ... index1:get{5} --- - [5, 'bbbb'] ... space:select{5} --- - - [5, 'bbbb'] ... index1:get{5} --- - [5, 'bbbb'] ... index2:get{'a'} --- - [1, 'a'] ... index2:select{'a'} --- - - [1, 'a'] ... space:drop() --- ... -- three indices space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'integer'} }) --- ... index3 = space:create_index('third', { type = 'tree', parts = {3, 'integer'} }) --- ... space:insert({1, -30, 9}) --- - [1, -30, 9] ... space:insert({5, 234, 9789}) --- - [5, 234, 9789] ... space:insert({10, -56, 212}) --- - [10, -56, 212] ... space:insert({2, 762, 1235}) --- - [2, 762, 1235] ... space:insert({4, 7873, 67545}) --- - [4, 7873, 67545] ... space:insert({9, 103, 1232}) --- - [9, 103, 1232] ... index1:select{} --- - - [1, -30, 9] - [2, 762, 1235] - [4, 7873, 67545] - [5, 234, 9789] - [9, 103, 1232] - [10, -56, 212] ... index2:select{} --- - - [10, -56, 212] - [1, -30, 9] - [9, 103, 1232] - [5, 234, 9789] - [2, 762, 1235] - [4, 7873, 67545] ... index3:select{} --- - - [1, -30, 9] - [10, -56, 212] - [9, 103, 1232] - [2, 762, 1235] - [5, 234, 9789] - [4, 7873, 67545] ... index1:select{10} --- - - [10, -56, 212] ... index1:get{9} --- - [9, 103, 1232] ... index2:select{-56} --- - - [10, -56, 212] ... index2:select{-57} --- - [] ... index2:get{103} --- - [9, 103, 1232] ... index2:get{104} --- ... index3:get{9} --- - [1, -30, 9] ... index3:select{1235} --- - - [2, 762, 1235] ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/null.test.lua0000664000000000000000000002707613306565107021107 0ustar rootrootenv = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') msgpack = require('msgpack') -- -- gh-1557: box.NULL in indexes. -- box.NULL == msgpack.NULL box.NULL == nil msgpack.decode(msgpack.encode({box.NULL})) format = {} format[1] = { name = 'field1', type = 'unsigned' } format[2] = { name = 'field2', type = 'unsigned', is_nullable = true } s = box.schema.space.create('test', { engine = engine, format = format }) -- Bad nullable value. format[2].is_nullable = 100 s:format(format) -- Fail. -- Primary can not be nullable. parts = {} parts[1] = {field = 2, type = 'unsigned', is_nullable = true} pk = s:create_index('pk', { parts = parts }) -- Fail. pk = s:create_index('pk') -- Not TREE nullable. -- Do not print errmsg, because Vinyl's one is different - it does -- not support HASH. ok = pcall(s.create_index, s, 'sk', { parts = parts, type = 'hash' }) -- Fail. ok -- Conflict of is_nullable in format and in parts. parts[1].is_nullable = false sk = s:create_index('sk', { parts = parts }) -- Fail. -- Try skip nullable in format and specify in part. parts[1].is_nullable = true sk = s:create_index('sk', { parts = parts }) -- Ok. format[2].is_nullable = nil s:format(format) -- Fail. sk:drop() -- Try to set nullable in part with no format. s:format({}) sk = s:create_index('sk', { parts = parts }) -- And then set format with no nullable. s:format(format) -- Fail. format[2].is_nullable = true s:format(format) -- Ok. -- Test insert. s:insert{1, 1} s:insert{2, box.NULL} s:insert{3, box.NULL} s:insert{4, 1} -- Fail. s:insert{4, 4} s:insert{5, box.NULL} pk:select{} sk:select{} -- Test exact match. sk:get({1}) sk:get({box.NULL}) -- Fail. sk:update({1}, {}) sk:update({box.NULL}, {}) -- Fail. _ = sk:delete({1}) sk:delete({box.NULL}) -- Fail. s:insert({1, 1}) -- Test iterators. sk:select{box.NULL} sk:select({box.NULL}, {iterator = 'LE'}) sk:select({box.NULL}, {iterator = 'LT'}) sk:select({box.NULL}, {iterator = 'GE'}) sk:select({box.NULL}, {iterator = 'GT'}) _ = sk:delete{box.NULL} sk:select{} pk:select{} -- Test snapshot during iterator (Vinyl restore). create_iterator = require('utils').create_iterator iter = create_iterator(sk, {box.NULL}) iter.next() box.snapshot() iter.iterate_over() sk:select{} pk:select{} -- Test replace. s:replace{2, 2} s:replace{3, box.NULL} -- no changes. s:replace{6, box.NULL} pk:select{} sk:select{} -- Test not unique indexes. s:truncate() sk:drop() sk = s:create_index('sk', { parts = parts, unique = false }) s:insert{1, 1} s:insert{2, box.NULL} s:insert{3, box.NULL} s:insert{4, 1} s:insert{5, box.NULL} pk:select{} sk:select{} -- Test several secondary indexes. s:truncate() format[2].is_nullable = true format[3] = { name = 'field3', type = 'unsigned', is_nullable = true } s:format(format) parts[1].field = 3 sk2 = s:create_index('sk2', { parts = parts }) s:replace{4, 3, 4} s:replace{3, 3, 3} s:replace{2, box.NULL, box.NULL} s:replace{1, box.NULL, 1} s:replace{0, 0, box.NULL} pk:select{} sk:select{} sk2:select{} -- Check duplicate conflict on replace. s:replace{4, 4, 3} -- fail s:replace{4, 4, box.NULL} -- ok pk:select{} sk:select{} sk2:select{} _ = pk:delete{2} pk:select{} sk:select{} sk2:select{} s:drop() -- -- gh-2880: allow to store less field count than specified in a -- format. -- format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {name = 'field2', type = 'unsigned'} format[3] = {name = 'field3'} format[4] = {name = 'field4', is_nullable = true} s = box.schema.create_space('test', {engine = engine, format = format}) pk = s:create_index('pk') sk = s:create_index('sk', {parts = {2, 'unsigned'}}) s:replace{1, 2} -- error t1 = s:replace{2, 3, 4} t2 = s:replace{3, 4, 5, 6} t1.field1, t1.field2, t1.field3, t1.field4 t2.field1, t2.field2, t2.field3, t2.field4 -- Ensure the tuple is read ok from disk in a case of vinyl. if engine == 'vinyl' then box.snapshot() end s:select{2} s:drop() -- Check the case when not contiguous format tail is nullable. format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {name = 'field2', type = 'unsigned'} format[3] = {name = 'field3'} format[4] = {name = 'field4', is_nullable = true} format[5] = {name = 'field5'} format[6] = {name = 'field6', is_nullable = true} format[7] = {name = 'field7', is_nullable = true} s = box.schema.create_space('test', {engine = engine, format = format}) pk = s:create_index('pk') sk = s:create_index('sk', {parts = {2, 'unsigned'}}) s:replace{1, 2} -- error s:replace{2, 3, 4} -- error s:replace{3, 4, 5, 6} -- error t1 = s:replace{4, 5, 6, 7, 8} t2 = s:replace{5, 6, 7, 8, 9, 10} t3 = s:replace{6, 7, 8, 9, 10, 11, 12} t1.field1, t1.field2, t1.field3, t1.field4, t1.field5, t1.field6, t1.field7 t2.field1, t2.field2, t2.field3, t2.field4, t2.field5, t2.field6, t2.field7 t3.field1, t3.field2, t3.field3, t3.field4, t3.field5, t3.field6, t3.field7 s:select{} s:drop() -- Check nullable indexes with other types s = box.schema.space.create('test', {engine = engine}) _ = s:create_index('pk') _ = s:create_index('i1', {parts = {{2, 'string', is_nullable = true}}}) _ = s:create_index('i2', {parts = {{3, 'number', is_nullable = true}}}) _ = s:create_index('i3', {parts = {{4, 'integer', is_nullable = true}}}) _ = s:create_index('i4', {parts = {{5, 'boolean', is_nullable = true}}, unique = false}) _ = s:create_index('i5', {parts = {{6, 'scalar', is_nullable = true}}}) _ = s:auto_increment{box.NULL, 1.11, -111, false, '111'} _ = s:auto_increment{'222', box.NULL, -222, true, 222} _ = s:auto_increment{'333', 3.33, box.NULL, false, 3.33} _ = s:auto_increment{'444', 4.44, -444, box.NULL, true} _ = s:auto_increment{'555', 5.55, -555, false, box.NULL} box.snapshot() _ = s:auto_increment{box.NULL, 6.66, -666, true, '666'} _ = s:auto_increment{'777', box.NULL, -777, false, 777} _ = s:auto_increment{'888', 8.88, box.NULL, true, 8.88} _ = s:auto_increment{'999', 9.99, -999, box.NULL, false} _ = s:auto_increment{'000', 0.00, -000, true, box.NULL} s.index.i1:select() s.index.i2:select() s.index.i3:select() s.index.i4:select() s.index.i5:select() s:drop() -- -- gh-2973: allow to enable nullable on a non-empty space. -- format = {} format[1] = {name = 'field1', type = 'unsigned'} format[2] = {name = 'field2', type = 'unsigned'} s = box.schema.create_space('test', {format = format}) pk = s:create_index('pk') s:replace{1, 1} s:replace{100, 100} s:replace{50, 50} s:replace{25, box.NULL} format[2].is_nullable = true s:format(format) s:replace{25, box.NULL} s:replace{10, box.NULL} s:replace{150, box.NULL} s:select{} s:drop() s = box.schema.create_space('test') pk = s:create_index('pk') sk = s:create_index('sk', {parts = {{2, 'unsigned', is_nullable = false}}}) s:replace{1, 1} s:replace{100, 100} s:replace{50, 50} s:replace{25, box.NULL} sk:alter({parts = {{2, 'unsigned', is_nullable = true}}}) s:replace{25, box.NULL} s:replace{10, box.NULL} s:replace{150, box.NULL} sk:select{} s:drop() -- -- gh-2988: allow absense of tail nullable indexed fields. -- s = box.schema.space.create('test', {engine = engine}) pk = s:create_index('pk') sk = s:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}}) -- Test tuple_compare_slowpath, tuple_compare_with_key_slowpath. s:replace{} -- Fail -- Compare full vs not full. s:replace{2} s:replace{1, 2} s:select{} sk:select{box.NULL} sk:select{2} -- Compare not full vs full. s:replace{4, 5} s:replace{3} s:select{} sk:select{box.NULL} sk:select{5} -- Compare extended keys. s:replace{7} s:replace{6} s:select{} sk:select{box.NULL} sk:select{} -- Test tuple extract key during dump for vinyl. box.snapshot() sk:select{} s:select{} -- Test tuple_compare_sequential_nullable, -- tuple_compare_with_key_sequential. s:drop() s = box.schema.space.create('test', {engine = engine}) pk = s:create_index('pk') parts = {} parts[1] = {1, 'unsigned'} parts[2] = {2, 'unsigned', is_nullable = true} parts[3] = {3, 'unsigned', is_nullable = true} sk = s:create_index('sk', {parts = parts}) -- Compare full vs not full. s:replace{1, 2, 3} s:replace{3} s:replace{2, 3} sk:select{} sk:select{3, box.NULL} sk:select{3, box.NULL, box.NULL} sk:select{2} sk:select{2, 3} sk:select{3, 100} sk:select{3, box.NULL, 100} sk:select({3, box.NULL}, {iterator = 'GE'}) sk:select({3, box.NULL}, {iterator = 'LE'}) s:select{} -- Test tuple extract key for vinyl. box.snapshot() sk:select{} sk:select{3, box.NULL} sk:select{3, box.NULL, box.NULL} sk:select{2} sk:select{2, 3} sk:select{3, 100} sk:select{3, box.NULL, 100} sk:select({3, box.NULL}, {iterator = 'GE'}) sk:select({3, box.NULL}, {iterator = 'LE'}) -- Test a tuple_compare_sequential() for a case, when there are -- two equal tuples, but in one of them field count < unique field -- count. s:replace{1, box.NULL} s:replace{1, box.NULL, box.NULL} s:select{1} -- -- Partially sequential keys. See tuple_extract_key.cc and -- contains_sequential_parts template flag. -- s:drop() s = box.schema.space.create('test', {engine = engine}) pk = s:create_index('pk') parts = {} parts[1] = {2, 'unsigned', is_nullable = true} parts[2] = {3, 'unsigned', is_nullable = true} parts[3] = {5, 'unsigned', is_nullable = true} parts[4] = {6, 'unsigned', is_nullable = true} parts[5] = {4, 'unsigned', is_nullable = true} parts[6] = {7, 'unsigned', is_nullable = true} sk = s:create_index('sk', {parts = parts}) s:insert{1, 1, 1, 1, 1, 1, 1} s:insert{8, 1, 1, 1, 1, box.NULL} s:insert{9, 1, 1, 1, box.NULL} s:insert{6, 6} s:insert{10, 6, box.NULL} s:insert{2, 2, 2, 2, 2, 2} s:insert{7} s:insert{5, 5, 5} s:insert{3, 5, box.NULL, box.NULL, box.NULL} s:insert{4, 5, 5, 5, box.NULL} s:insert{11, 4, 4, 4} s:insert{12, 4, box.NULL, 4} s:insert{13, 3, 3, 3, 3} s:insert{14, box.NULL, 3, box.NULL, 3} s:select{} sk:select{} sk:select{5, 5, box.NULL} sk:select{5, 5, box.NULL, 100} sk:select({7, box.NULL}, {iterator = 'LT'}) box.snapshot() sk:select{} sk:select{5, 5, box.NULL} sk:select{5, 5, box.NULL, 100} sk:select({7, box.NULL}, {iterator = 'LT'}) s:drop() -- -- The main case of absent nullable fields - create an index over -- them on not empty space (available on memtx only). -- s = box.schema.space.create('test', {engine = 'memtx'}) pk = s:create_index('pk') s:replace{1} s:replace{2} s:replace{3} sk = s:create_index('sk', {parts = {{2, 'unsigned', is_nullable = true}}}) s:replace{4} s:replace{5, 6} s:replace{7, 8} s:replace{9, box.NULL} s:select{} sk:select{} sk:select{box.NULL} s:drop() -- -- The complex case: when an index part is_nullable is set to, -- false and it changes min_field_count, this part must become -- optional and turn on comparators for optional fields. See the -- big comment in alter.cc in index_def_new_from_tuple(). -- s = box.schema.create_space('test', {engine = 'memtx'}) pk = s:create_index('pk') sk = s:create_index('sk', {parts = {2, 'unsigned'}}) s:replace{1, 1} s:replace{2, box.NULL} s:select{} sk:alter({parts = {{2, 'unsigned', is_nullable = true}}}) s:replace{20, box.NULL} sk:select{} s:replace{10} sk:select{} s:replace{40} sk:select{} s:drop() -- -- Check that if an index alter makes a field be optional, and -- this field is used in another index, then this another index -- is updated too. Case of @locker. -- s = box.schema.space.create('test', {engine = 'memtx'}) _ = s:create_index('pk') i1 = s:create_index('i1', {parts = {2, 'unsigned', 3, 'unsigned'}}) i2 = s:create_index('i2', {parts = {3, 'unsigned', 2, 'unsigned'}}) i1:alter{parts = {{2, 'unsigned'}, {3, 'unsigned', is_nullable = true}}} -- i2 alter makes i1 contain optional part. Its key_def and -- comparators must be updated. i2:alter{parts = {{3, 'unsigned', is_nullable = true}, {2, 'unsigned'}}} s:insert{1, 1} s:insert{100, 100} s:insert{50, 50} s:insert{25, 25, 25} s:insert{75, 75, 75} s:select{} i1:select{} i2:select{} i2:select{box.NULL, 50} i2:select{} s:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/tree.test.lua0000664000000000000000000004366413306560010021061 0ustar rootroottest_run = require('test_run').new() engine = test_run:get_cfg('engine') test_run:cmd("push filter '"..engine.."' to 'engine'") -------------------------------------------------------------------------------- -- Prefix search (test partially specified keys in TREE indexes) -------------------------------------------------------------------------------- s1 = box.schema.space.create('tree_prefix_search', { engine = engine }) _ = s1:create_index('primary', { type = 'tree', parts = {1, 'string'}}) _ = s1:replace{''} _ = s1:replace{'abcd'} _ = s1:replace{'abcda'} _ = s1:replace{'abcda_'} _ = s1:replace{'abcdb'} _ = s1:replace{'abcdb_'} _ = s1:replace{'abcdb__'} _ = s1:replace{'abcdb___'} _ = s1:replace{'abcdc'} _ = s1:replace{'abcdc_'} s1.index.primary:select('abcdb', { iterator = 'GE' }) s1.index.primary:select('', { iterator = 'GE' }) s1.index.primary:select('', { iterator = 'GT' }) s1.index.primary:select('', { iterator = 'LE' }) s1.index.primary:select('', { iterator = 'LT' }) s1:drop() s1 = nil ------------------------------------------------------------------------------- -- single-part (unsigned) ------------------------------------------------------------------------------- space = box.schema.space.create('uint', { engine = engine }) pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}}) for i=1,9 do space:replace{i} end pk:select({}, { iterator = 'ALL' }) pk:select({}, { iterator = 'EQ' }) pk:select({}, { iterator = 'REQ' }) pk:select({}, { iterator = 'GE' }) pk:select({}, { iterator = 'GT' }) pk:select({}, { iterator = 'LE' }) pk:select({}, { iterator = 'LT' }) pk:select({0}, { iterator = 'EQ' }) pk:select({0}, { iterator = 'REQ' }) pk:select({0}, { iterator = 'LE' }) pk:select({0}, { iterator = 'LT' }) pk:select({1}, { iterator = 'EQ' }) pk:select({1}, { iterator = 'REQ' }) pk:select({1}, { iterator = 'LE' }) pk:select({1}, { iterator = 'LT' }) pk:select({5}, { iterator = 'EQ' }) pk:select({5}, { iterator = 'REQ' }) pk:select({5}, { iterator = 'GE' }) pk:select({5}, { iterator = 'GT' }) pk:select({5}, { iterator = 'LE' }) pk:select({5}, { iterator = 'LT' }) pk:select({9}, { iterator = 'EQ' }) pk:select({9}, { iterator = 'REQ' }) pk:select({9}, { iterator = 'GE' }) pk:select({9}, { iterator = 'GT' }) pk:select({10}, { iterator = 'EQ' }) pk:select({10}, { iterator = 'REQ' }) pk:select({10}, { iterator = 'GE' }) pk:select({10}, { iterator = 'GT' }) pk:get({}) pk:get({0}) pk:get({5}) pk:get({10}) pk:get({10, 15}) space:drop() space = nil pk = nil ------------------------------------------------------------------------------- -- single-part sparse (unsigned) ------------------------------------------------------------------------------- space = box.schema.space.create('sparse_uint', { engine = engine }) pk = space:create_index('primary', { type = 'tree', parts = {3, 'unsigned'}}) for i=1,9 do space:replace{'', 0, i} end space:insert{'', 0, 1} -- conflict pk:select({}, { iterator = 'ALL' }) pk:select({}, { iterator = 'EQ' }) pk:select({}, { iterator = 'REQ' }) pk:select({}, { iterator = 'GE' }) pk:select({}, { iterator = 'GT' }) pk:select({}, { iterator = 'LE' }) pk:select({}, { iterator = 'LT' }) pk:select({0}, { iterator = 'EQ' }) pk:select({0}, { iterator = 'REQ' }) pk:select({0}, { iterator = 'LE' }) pk:select({0}, { iterator = 'LT' }) pk:select({1}, { iterator = 'EQ' }) pk:select({1}, { iterator = 'REQ' }) pk:select({1}, { iterator = 'LE' }) pk:select({1}, { iterator = 'LT' }) pk:select({5}, { iterator = 'EQ' }) pk:select({5}, { iterator = 'REQ' }) pk:select({5}, { iterator = 'GE' }) pk:select({5}, { iterator = 'GT' }) pk:select({5}, { iterator = 'LE' }) pk:select({5}, { iterator = 'LT' }) pk:select({9}, { iterator = 'EQ' }) pk:select({9}, { iterator = 'REQ' }) pk:select({9}, { iterator = 'GE' }) pk:select({9}, { iterator = 'GT' }) pk:select({10}, { iterator = 'EQ' }) pk:select({10}, { iterator = 'REQ' }) pk:select({10}, { iterator = 'GE' }) pk:select({10}, { iterator = 'GT' }) pk:get({}) pk:get({0}) pk:get({5}) pk:get({10}) pk:get({10, 15}) space:drop() space = nil pk = nil ------------------------------------------------------------------------------- -- single-part (string) ------------------------------------------------------------------------------- space = box.schema.space.create('string', { engine = engine }) pk = space:create_index('primary', { type = 'tree', parts = {1, 'string'}}) for i=1,9 do space:replace{'0'..i} end pk:select({}, { iterator = 'ALL' }) pk:select({}, { iterator = 'EQ' }) pk:select({}, { iterator = 'REQ' }) pk:select({}, { iterator = 'GE' }) pk:select({}, { iterator = 'GT' }) pk:select({}, { iterator = 'LE' }) pk:select({}, { iterator = 'LT' }) pk:select({'00'}, { iterator = 'EQ' }) pk:select({'00'}, { iterator = 'REQ' }) pk:select({'00'}, { iterator = 'LE' }) pk:select({'00'}, { iterator = 'LT' }) pk:select({'01'}, { iterator = 'EQ' }) pk:select({'01'}, { iterator = 'REQ' }) pk:select({'01'}, { iterator = 'LE' }) pk:select({'01'}, { iterator = 'LT' }) pk:select({'05'}, { iterator = 'EQ' }) pk:select({'05'}, { iterator = 'REQ' }) pk:select({'05'}, { iterator = 'GE' }) pk:select({'05'}, { iterator = 'GT' }) pk:select({'05'}, { iterator = 'LE' }) pk:select({'05'}, { iterator = 'LT' }) pk:select({'09'}, { iterator = 'EQ' }) pk:select({'09'}, { iterator = 'REQ' }) pk:select({'09'}, { iterator = 'GE' }) pk:select({'09'}, { iterator = 'GT' }) pk:select({'10'}, { iterator = 'EQ' }) pk:select({'10'}, { iterator = 'REQ' }) pk:select({'10'}, { iterator = 'GE' }) pk:select({'10'}, { iterator = 'GT' }) pk:get({}) pk:get({'0'}) pk:get({'5'}) pk:get({'10'}) pk:get({'10', '15'}) space:drop() space = nil pk = nil ------------------------------------------------------------------------------- -- multi-part (unsigned + string) ------------------------------------------------------------------------------- space = box.schema.space.create('uint_str', { engine = engine }) pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'string'}}) for i=1,9 do for j=1,3 do space:replace({i, '0'..j}) end end -- -- one part -- pk:select({}, { iterator = 'ALL' }) pk:select({}, { iterator = 'EQ' }) pk:select({}, { iterator = 'REQ' }) pk:select({}, { iterator = 'GE' }) pk:select({}, { iterator = 'GT' }) pk:select({}, { iterator = 'LE' }) pk:select({}, { iterator = 'LT' }) pk:select({0}, { iterator = 'EQ' }) pk:select({0}, { iterator = 'REQ' }) pk:select({0}, { iterator = 'LE' }) pk:select({0}, { iterator = 'LT' }) pk:select({1}, { iterator = 'EQ' }) pk:select({1}, { iterator = 'REQ' }) pk:select({1}, { iterator = 'LE' }) pk:select({1}, { iterator = 'LT' }) pk:select({9}, { iterator = 'EQ' }) pk:select({9}, { iterator = 'REQ' }) pk:select({9}, { iterator = 'GE' }) pk:select({9}, { iterator = 'GT' }) pk:select({10}, { iterator = 'EQ' }) pk:select({10}, { iterator = 'REQ' }) pk:select({10}, { iterator = 'GE' }) pk:select({10}, { iterator = 'GT' }) pk:get({}) pk:get({'5'}) -- -- two parts -- pk:select({5, '00'}, { iterator = 'EQ' }) pk:select({5, '00'}, { iterator = 'REQ' }) pk:select({5, '00'}, { iterator = 'GE' }) pk:select({5, '00'}, { iterator = 'GT' }) pk:select({5, '00'}, { iterator = 'LE' }) pk:select({5, '00'}, { iterator = 'LT' }) pk:select({5, '01'}, { iterator = 'EQ' }) pk:select({5, '01'}, { iterator = 'REQ' }) pk:select({5, '01'}, { iterator = 'GE' }) pk:select({5, '01'}, { iterator = 'GT' }) pk:select({5, '01'}, { iterator = 'LE' }) pk:select({5, '01'}, { iterator = 'LT' }) pk:select({5, '03'}, { iterator = 'EQ' }) pk:select({5, '03'}, { iterator = 'REQ' }) pk:select({5, '03'}, { iterator = 'GE' }) pk:select({5, '03'}, { iterator = 'GT' }) pk:select({5, '03'}, { iterator = 'LE' }) pk:select({5, '03'}, { iterator = 'LT' }) pk:select({5, '04'}, { iterator = 'EQ' }) pk:select({5, '04'}, { iterator = 'REQ' }) pk:select({5, '04'}, { iterator = 'GE' }) pk:select({5, '04'}, { iterator = 'GT' }) pk:select({5, '04'}, { iterator = 'LE' }) pk:select({5, '04'}, { iterator = 'LT' }) pk:get({4, '05'}) pk:get({4, '03'}) pk:get({4, '03', 100}) space:drop() space = nil pk = nil ------------------------------------------------------------------------------- -- multi-part (string + unsigned) ------------------------------------------------------------------------------- space = box.schema.space.create('str_uint', { engine = engine }) pk = space:create_index('primary', { type = 'tree', parts = {1, 'string', 2, 'unsigned'}}) for i=1,9 do for j=1,3 do space:replace({'0'..i, j}) end end -- -- one part -- pk:select({}, { iterator = 'ALL' }) pk:select({}, { iterator = 'EQ' }) pk:select({}, { iterator = 'REQ' }) pk:select({}, { iterator = 'GE' }) pk:select({}, { iterator = 'GT' }) pk:select({}, { iterator = 'LE' }) pk:select({}, { iterator = 'LT' }) pk:select({'00'}, { iterator = 'EQ' }) pk:select({'00'}, { iterator = 'REQ' }) pk:select({'00'}, { iterator = 'LE' }) pk:select({'00'}, { iterator = 'LT' }) pk:select({'01'}, { iterator = 'EQ' }) pk:select({'01'}, { iterator = 'REQ' }) pk:select({'01'}, { iterator = 'LE' }) pk:select({'01'}, { iterator = 'LT' }) pk:select({'09'}, { iterator = 'EQ' }) pk:select({'09'}, { iterator = 'REQ' }) pk:select({'09'}, { iterator = 'GE' }) pk:select({'09'}, { iterator = 'GT' }) pk:select({'10'}, { iterator = 'EQ' }) pk:select({'10'}, { iterator = 'REQ' }) pk:select({'10'}, { iterator = 'GE' }) pk:select({'10'}, { iterator = 'GT' }) pk:get({}) pk:get({'00'}) pk:get({'05'}) pk:get({'10'}) -- -- two parts -- pk:select({'05', 0}, { iterator = 'EQ' }) pk:select({'05', 0}, { iterator = 'REQ' }) pk:select({'05', 0}, { iterator = 'GE' }) pk:select({'05', 0}, { iterator = 'GT' }) pk:select({'05', 0}, { iterator = 'LE' }) pk:select({'05', 0}, { iterator = 'LT' }) pk:select({'05', 1}, { iterator = 'EQ' }) pk:select({'05', 1}, { iterator = 'REQ' }) pk:select({'05', 1}, { iterator = 'GE' }) pk:select({'05', 1}, { iterator = 'GT' }) pk:select({'05', 1}, { iterator = 'LE' }) pk:select({'05', 1}, { iterator = 'LT' }) pk:select({'05', 3}, { iterator = 'EQ' }) pk:select({'05', 3}, { iterator = 'REQ' }) pk:select({'05', 3}, { iterator = 'GE' }) pk:select({'05', 3}, { iterator = 'GT' }) pk:select({'05', 3}, { iterator = 'LE' }) pk:select({'05', 3}, { iterator = 'LT' }) pk:select({'05', 4}, { iterator = 'EQ' }) pk:select({'05', 4}, { iterator = 'REQ' }) pk:select({'05', 4}, { iterator = 'GE' }) pk:select({'05', 4}, { iterator = 'GT' }) pk:select({'05', 4}, { iterator = 'LE' }) pk:select({'05', 4}, { iterator = 'LT' }) pk:get({'04', 5}) pk:get({'04', 3}) pk:get({'04', 3, 100}) space:drop() space = nil pk = nil ------------------------------------------------------------------------------- -- multi-part sparse (string + unsigned) ------------------------------------------------------------------------------- space = box.schema.space.create('sparse_str_uint', { engine = engine }) pk = space:create_index('primary', { type = 'tree', parts = {3, 'string', 1, 'unsigned'}}) for i=1,9 do for j=1,3 do space:replace({i, '', '0'..j}) end end -- conflicts space:insert({9, '', '01'}) -- -- one part -- pk:select({}, { iterator = 'ALL' }) pk:select({}, { iterator = 'EQ' }) pk:select({}, { iterator = 'REQ' }) pk:select({}, { iterator = 'GE' }) pk:select({}, { iterator = 'GT' }) pk:select({}, { iterator = 'LE' }) pk:select({}, { iterator = 'LT' }) pk:select({'00'}, { iterator = 'EQ' }) pk:select({'00'}, { iterator = 'REQ' }) pk:select({'00'}, { iterator = 'LE' }) pk:select({'00'}, { iterator = 'LT' }) pk:select({'01'}, { iterator = 'EQ' }) pk:select({'01'}, { iterator = 'REQ' }) pk:select({'01'}, { iterator = 'LE' }) pk:select({'01'}, { iterator = 'LT' }) pk:select({'09'}, { iterator = 'EQ' }) pk:select({'09'}, { iterator = 'REQ' }) pk:select({'09'}, { iterator = 'GE' }) pk:select({'09'}, { iterator = 'GT' }) pk:select({'10'}, { iterator = 'EQ' }) pk:select({'10'}, { iterator = 'REQ' }) pk:select({'10'}, { iterator = 'GE' }) pk:select({'10'}, { iterator = 'GT' }) pk:get({}) pk:get({'00'}) pk:get({'05'}) pk:get({'10'}) -- -- two parts -- pk:select({'05', 0}, { iterator = 'EQ' }) pk:select({'05', 0}, { iterator = 'REQ' }) pk:select({'05', 0}, { iterator = 'GE' }) pk:select({'05', 0}, { iterator = 'GT' }) pk:select({'05', 0}, { iterator = 'LE' }) pk:select({'05', 0}, { iterator = 'LT' }) pk:select({'05', 1}, { iterator = 'EQ' }) pk:select({'05', 1}, { iterator = 'REQ' }) pk:select({'05', 1}, { iterator = 'GE' }) pk:select({'05', 1}, { iterator = 'GT' }) pk:select({'05', 1}, { iterator = 'LE' }) pk:select({'05', 1}, { iterator = 'LT' }) pk:select({'05', 3}, { iterator = 'EQ' }) pk:select({'05', 3}, { iterator = 'REQ' }) pk:select({'05', 3}, { iterator = 'GE' }) pk:select({'05', 3}, { iterator = 'GT' }) pk:select({'05', 3}, { iterator = 'LE' }) pk:select({'05', 3}, { iterator = 'LT' }) pk:select({'05', 4}, { iterator = 'EQ' }) pk:select({'05', 4}, { iterator = 'REQ' }) pk:select({'05', 4}, { iterator = 'GE' }) pk:select({'05', 4}, { iterator = 'GT' }) pk:select({'05', 4}, { iterator = 'LE' }) pk:select({'05', 4}, { iterator = 'LT' }) pk:get({'04', 5}) pk:get({'04', 3}) pk:get({'04', 3, 100}) space:drop() space = nil pk = nil ------------------------------------------------------------------------------- -- multiple indices ------------------------------------------------------------------------------- space = box.schema.space.create('tweedledum', { engine = engine }) i0 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) i1 = space:create_index('i1', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) i2 = space:create_index('i2', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) i3 = space:create_index('i3', { type = 'tree', parts = {4, 'string', 5, 'string'}, unique = false }) i4 = space:create_index('i4', { type = 'tree', parts = {7, 'string', 6, 'string'}, unique = false }) i5 = space:create_index('i5', { type = 'tree', parts = {9, 'unsigned'}, unique = false }) i6 = space:create_index('i6', { type = 'tree', parts = {7, 'string', 6, 'string', 4, 'string', 5, 'string', 9, 'unsigned'}, unique = true }) space:insert{0, 0, 100, 'Joe', 'Sixpack', 'Drinks', 'Amstel', 'bar', 2000} space:insert{1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001} space:insert{2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002} space:insert{3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003} space:insert{4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004} space:insert{5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005} space:insert{6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006} space:insert{7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007} space:insert{8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008} space:insert{9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009} -- In non-unique indexes select output order is undefined, -- so it's better to additionally sort output to receive same order every time. function sort_cmp(a, b) return a[1] < b[1] and true or false end function sort(t) table.sort(t, sort_cmp) return t end space.index['primary']:get{1} sort(space.index['i1']:select{2}) sort(space.index[2]:select({300})) #space.index['i3']:select({'Joe', 'Sixpack'}) #space.index['i3']:select('John') #space.index['i4']:select('A Pipe') {sort(space.index['i4']:select{'Miller Genuine Draft', 'Drinks'})} sort(space.index['i5']:select{2007}) sort(space.index[6]:select{'Miller Genuine Draft', 'Drinks'}) tmp = space:delete{6} tmp = space:delete{7} tmp = space:delete{8} tmp = space:delete{9} space:insert{6, 6ULL, 400ULL, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006} space:insert{7, 7ULL, 400ULL, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007} space:insert{8, 8ULL, 400ULL, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008} space:insert{9, 9ULL, 400ULL, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009} sort(space.index['i1']:select{6ULL}) sort(space.index['i1']:select{6}) sort(space.index['i2']:select(400ULL)) sort(space.index['i2']:select(400)) sort(space:select{}) -- Test incorrect keys - supplied key field type does not match index type -- https://bugs.launchpad.net/tarantool/+bug/1072624 space:insert{'', 1, 2, '', '', '', '', '', 0} space:insert{'xxxxxxxx', 1, 2, '', '', '', '', '', 0} space:insert{1, '', 2, '', '', '', '', '', 0} space:insert{1, 'xxxxxxxxxxx', 2, '', '', '', '', '', 0} space:drop() sort = nil sort_cmp = nil ------------------------------------------------------------------------------- -- gh-1467: invalid iterator type ------------------------------------------------------------------------------- space = box.schema.space.create('invalid', { engine = engine }) pk = space:create_index('primary', { type = 'tree', parts = {1, 'string'}}) pk:select({}, {iterator = 'BITS_ALL_SET'}) space:drop() space = nil pk = nil -- Create and drop several indices space = box.schema.space.create('test', { engine = engine }) pk = space:create_index('primary') index2 = space:create_index('secondary', { parts = {2, 'str'} }) index3 = space:create_index('third', { parts = {3, 'str'}, unique = false }) index2:drop() #box.space._index:select{space.id} index4 = space:create_index('fourth', { parts = {2, 'str', 3, 'str'} }) index2 = space:create_index('secondary', { parts = {4, 'str'} }) #box.space._index:select{space.id} index3:drop() index2:drop() index4:drop() #box.space._index:select{space.id} index2 = space:create_index('secondary', { parts = {2, 'str'} }) index3 = space:create_index('third', { parts = {3, 'str'}, unique = false }) index4 = space:create_index('fourth', { parts = {2, 'str', 3, 'str'} }) #box.space._index:select{space.id} space:drop() --Collation s = box.schema.space.create('test') i1 = s:create_index('i1', { type = 'tree', parts = {{1, 'str', collation='unicode'}}, unique = true }) _ = s:replace{"ааа"} _ = s:replace{"еее"} _ = s:replace{"ёёё"} _ = s:replace{"жжж"} _ = s:replace{"яяя"} _ = s:replace{"ААА"} _ = s:replace{"ЯЯЯ"} -- good output s:select{} s:drop() ------------------------------------------------------------------------------- -- Cleanup ------------------------------------------------------------------------------- test_run:cmd("clear filter") engine = nil test_run = nil tarantool_1.9.1.26.g63eb81e3c/test/engine/replica_join.result0000664000000000000000000001654513306565107022351 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... index = test_run:get_cfg('index') --- ... box.schema.user.grant('guest', 'read,write,execute', 'universe') --- ... box.schema.user.grant('guest', 'replication') --- ... space = box.schema.space.create('test', { id = 99999, engine = engine }) --- ... _ = space:create_index('primary', { type = index}) --- ... _ = space:create_index('secondary', { type = index, unique = false, parts = {2, 'unsigned'}}) --- ... space2 = box.schema.space.create('test2', { id = 99998, engine = engine}) --- ... _ = space2:create_index('primary', { parts = {1, 'unsigned', 2, 'string'}}) --- ... space3 = box.schema.space.create('test3', { id = 99997, engine = engine}) --- ... _ = space3:create_index('primary', { parts = {{1, 'string', collation = 'unicode_ci'}}}) --- ... box.snapshot() --- - ok ... -- replica join test_run:cmd("create server replica with rpl_master=default, script='replication/replica.lua'") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:wait_lsn('replica', 'default') --- ... test_run:cmd('switch replica') --- - true ... box.space.test:select() --- - [] ... box.space.test.index.secondary:select() --- - [] ... box.space.test2:select() --- - [] ... box.space.test3:select() --- - [] ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server replica") --- - true ... _ = test_run:cmd("cleanup server replica") --- ... -- new data for k = 1, 8 do box.space.test:insert{k, 17 - k} end --- ... for k = 16, 9, -1 do box.space.test:insert{k, 17 - k} end --- ... _ = box.space.test2:insert{1, 'test1', 1} --- ... _ = box.space.test2:upsert({1, 'test1', 10}, {{'=', 3, 10}}) --- ... _ = box.space.test2:upsert({2, 'test2', 20}, {{'=', 3, 20}}) --- ... _ = box.space.test2:insert{3, 'test3', 30} --- ... _ = box.space.test3:insert{'Ёж'} --- ... _ = box.space.test3:insert{'ель'} --- ... _ = box.space.test3:insert{'Юла'} --- ... _ = box.space.test3:insert{'Эль'} --- ... _ = box.space.test3:insert{'ёлка'} --- ... _ = box.space.test3:insert{'йогурт'} --- ... -- replica join test_run:cmd("deploy server replica") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:wait_lsn('replica', 'default') --- ... test_run:cmd('switch replica') --- - true ... box.space.test:select() --- - - [1, 16] - [2, 15] - [3, 14] - [4, 13] - [5, 12] - [6, 11] - [7, 10] - [8, 9] - [9, 8] - [10, 7] - [11, 6] - [12, 5] - [13, 4] - [14, 3] - [15, 2] - [16, 1] ... box.space.test.index.secondary:select() --- - - [16, 1] - [15, 2] - [14, 3] - [13, 4] - [12, 5] - [11, 6] - [10, 7] - [9, 8] - [8, 9] - [7, 10] - [6, 11] - [5, 12] - [4, 13] - [3, 14] - [2, 15] - [1, 16] ... box.space.test2:select() --- - - [1, 'test1', 10] - [2, 'test2', 20] - [3, 'test3', 30] ... box.space.test3:select() --- - - ['Ёж'] - ['ёлка'] - ['ель'] - ['йогурт'] - ['Эль'] - ['Юла'] ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server replica") --- - true ... _ = test_run:cmd("cleanup server replica") --- ... -- add snapshot box.snapshot() --- - ok ... -- replica join test_run:cmd("deploy server replica") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:wait_lsn('replica', 'default') --- ... test_run:cmd('switch replica') --- - true ... box.space.test:select() --- - - [1, 16] - [2, 15] - [3, 14] - [4, 13] - [5, 12] - [6, 11] - [7, 10] - [8, 9] - [9, 8] - [10, 7] - [11, 6] - [12, 5] - [13, 4] - [14, 3] - [15, 2] - [16, 1] ... box.space.test.index.secondary:select() --- - - [16, 1] - [15, 2] - [14, 3] - [13, 4] - [12, 5] - [11, 6] - [10, 7] - [9, 8] - [8, 9] - [7, 10] - [6, 11] - [5, 12] - [4, 13] - [3, 14] - [2, 15] - [1, 16] ... box.space.test2:select() --- - - [1, 'test1', 10] - [2, 'test2', 20] - [3, 'test3', 30] ... box.space.test3:select() --- - - ['Ёж'] - ['ёлка'] - ['ель'] - ['йогурт'] - ['Эль'] - ['Юла'] ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server replica") --- - true ... _ = test_run:cmd("cleanup server replica") --- ... -- new data for k = 8, 1, -1 do box.space.test:update(k, {{'-', 2, 8}}) end --- ... for k = 9, 16 do box.space.test:delete(k) end --- ... _ = box.space.test2:upsert({1, 'test1', 11}, {{'+', 3, 1}}) --- ... _ = box.space.test2:update({2, 'test2'}, {{'+', 3, 2}}) --- ... _ = box.space.test2:delete{3, 'test3'} --- ... _ = box.space.test3:upsert({'ёж', 123}, {{'!', 2, 123}}) --- ... _ = box.space.test3:update('ЭЛЬ', {{'!', 2, 456}}) --- ... _ = box.space.test3:delete('ёлка') --- ... box.snapshot() --- - ok ... -- replica join test_run:cmd("deploy server replica") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:wait_lsn('replica', 'default') --- ... test_run:cmd('switch replica') --- - true ... box.space.test:select() --- - - [1, 8] - [2, 7] - [3, 6] - [4, 5] - [5, 4] - [6, 3] - [7, 2] - [8, 1] ... box.space.test.index.secondary:select() --- - - [8, 1] - [7, 2] - [6, 3] - [5, 4] - [4, 5] - [3, 6] - [2, 7] - [1, 8] ... box.space.test2:select() --- - - [1, 'test1', 11] - [2, 'test2', 22] ... box.space.test3:select() --- - - ['Ёж', 123] - ['ель'] - ['йогурт'] - ['Эль', 456] - ['Юла'] ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server replica") --- - true ... _ = test_run:cmd("cleanup server replica") --- ... -- recreate space space:drop() --- ... space = box.schema.space.create('test', { id = 12345, engine = engine }) --- ... _ = space:create_index('primary', { type = index}) --- ... _ = space:insert{12345} --- ... -- truncate space space3:truncate() --- ... -- replica join test_run:cmd("deploy server replica") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:wait_lsn('replica', 'default') --- ... test_run:cmd('switch replica') --- - true ... box.space.test.id --- - 12345 ... box.space.test:select() --- - - [12345] ... box.space.test2:select() --- - - [1, 'test1', 11] - [2, 'test2', 22] ... box.space.test3:select() --- - [] ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server replica") --- - true ... _ = test_run:cmd("cleanup server replica") --- ... space:drop() --- ... space2:drop() --- ... space3:drop() --- ... box.snapshot() --- - ok ... space = box.schema.space.create('test', { id = 99998, engine = engine }) --- ... index = space:create_index('primary', { type = test_run:get_cfg('index')}) --- ... for i = 0, 9 do space:insert({i, 'test' .. tostring(i)}) end --- ... test_run:cmd("deploy server replica") --- - true ... test_run:cmd("start server replica") --- - true ... test_run:wait_lsn('replica', 'default') --- ... test_run:cmd('restart server replica') --- - true ... test_run:cmd('switch replica') --- - true ... box.space.test:select() --- - - [0, 'test0'] - [1, 'test1'] - [2, 'test2'] - [3, 'test3'] - [4, 'test4'] - [5, 'test5'] - [6, 'test6'] - [7, 'test7'] - [8, 'test8'] - [9, 'test9'] ... test_run:cmd('switch default') --- - true ... test_run:cmd("stop server replica") --- - true ... _ = test_run:cmd("cleanup server replica") --- ... space:drop() --- ... box.snapshot() --- - ok ... box.schema.user.revoke('guest', 'replication') --- ... box.schema.user.revoke('guest', 'read,write,execute', 'universe') --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/replace.result0000664000000000000000000001640013306560010021300 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... -- replace (str) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:replace({tostring(key)}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1'] - ['2'] - ['3'] - ['4'] - ['5'] - ['6'] - ['7'] - ['8'] - ['9'] - ['10'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] - ['100'] ... _ = space:replace({tostring(7)}) --- ... space:drop() --- ... -- replace (num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key})) end --- ... t --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... _ = space:replace({7}) --- ... space:drop() --- ... -- replace multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for key = 1, 100 do space:replace({key, key}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] - [6, 6] - [7, 7] - [8, 8] - [9, 9] - [10, 10] - [11, 11] - [12, 12] - [13, 13] - [14, 14] - [15, 15] - [16, 16] - [17, 17] - [18, 18] - [19, 19] - [20, 20] - [21, 21] - [22, 22] - [23, 23] - [24, 24] - [25, 25] - [26, 26] - [27, 27] - [28, 28] - [29, 29] - [30, 30] - [31, 31] - [32, 32] - [33, 33] - [34, 34] - [35, 35] - [36, 36] - [37, 37] - [38, 38] - [39, 39] - [40, 40] - [41, 41] - [42, 42] - [43, 43] - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... _ = space:replace({7, 7}) --- ... space:drop() --- ... -- replace with box.tuple.new space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:replace({tostring(key)}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1'] - ['2'] - ['3'] - ['4'] - ['5'] - ['6'] - ['7'] - ['8'] - ['9'] - ['10'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] - ['100'] ... _ = space:replace(box.tuple.new{tostring(7)}) --- ... space:drop() --- ... -- replace multiple indices space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'} }) --- ... space:replace({1, 1}) --- - [1, 1] ... space:replace({1, 2}) --- - [1, 2] ... index1:select{} --- - - [1, 2] ... index2:select{} --- - - [1, 2] ... space:drop() --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'} }) --- ... index3 = space:create_index('third', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) --- ... space:insert({1, 1, 1}) --- - [1, 1, 1] ... space:insert({2, 2, 2}) --- - [2, 2, 2] ... space:insert({3, 3, 3}) --- - [3, 3, 3] ... space:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... space:replace({1, 2, 3}) --- - error: Duplicate key exists in unique index 'secondary' in space 'test' ... index1:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... index2:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... index3:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/tree_min_max_count.test.lua0000664000000000000000000001541713306560010023774 0ustar rootrootenv = require('test_run') test_run = env.new() engine = test_run:get_cfg('engine') -- one part indices -- int type space0 = box.schema.space.create('space0', { engine = engine }) index0 = space0:create_index('primary', { type = 'tree', parts = {1, 'INTEGER'} }) space0:insert({1, "AAAA"}) space0:insert({2, "AAAA"}) space0:insert({3, "AAAA"}) space0:insert({4, "AAAA"}) index0:select() index0:max(2) index0:min(2) index0:count(2) index0:max() index0:min() index0:count() space0:insert({20, "AAAA"}) space0:insert({30, "AAAA"}) space0:insert({40, "AAAA"}) index0:select() index0:max(15) index0:min(15) index0:count(15) index0:max() index0:min() index0:count() space0:insert({-2, "AAAA"}) space0:insert({-3, "AAAA"}) space0:insert({-4, "AAAA"}) index0:select() index0:max(0) index0:min(0) index0:count(0) index0:max() index0:min() index0:count() space0:drop() -- number type space1 = box.schema.space.create('space1', { engine = engine }) index1 = space1:create_index('primary', { type = 'tree', parts = {1, 'number'} }) space1:insert({1, "AAAA"}) space1:insert({2, "AAAA"}) space1:insert({3, "AAAA"}) space1:insert({4, "AAAA"}) index1:select() index1:max(2) index1:min(2) index1:count(2) index1:max() index1:min() index1:count() space1:insert({20, "AAAA"}) space1:insert({30, "AAAA"}) space1:insert({40, "AAAA"}) index1:select() index1:max(15) index1:min(15) index1:count(15) index1:max() index1:min() index1:count() space1:insert({-2, "AAAA"}) space1:insert({-3, "AAAA"}) space1:insert({-4, "AAAA"}) index1:select() index1:max(0) index1:min(0) index1:count(0) index1:max() index1:min() index1:count() space1:insert({1.5, "AAAA"}) space1:insert({2.5, "AAAA"}) space1:insert({3.5, "AAAA"}) space1:insert({4.5, "AAAA"}) index1:select() index1:max(1) index1:min(1) index1:count(1) index1:max() index1:min() index1:count() space1:drop() -- str type space2 = box.schema.space.create('space2', { engine = engine }) index2 = space2:create_index('primary', { type = 'tree', parts = {1, 'string'} }) space2:insert({'1', "AAAA"}) space2:insert({'2', "AAAA"}) space2:insert({'3', "AAAA"}) space2:insert({'4', "AAAA"}) index2:select() index2:max('2') index2:min('2') index2:count('2') index2:max() index2:min() index2:count() space2:insert({'20', "AAAA"}) space2:insert({'30', "AAAA"}) space2:insert({'40', "AAAA"}) index2:select() index2:max('15') index2:min('15') index2:count('15') index2:max() index2:min() index2:count() space2:insert({'-2', "AAAA"}) space2:insert({'-3', "AAAA"}) space2:insert({'-4', "AAAA"}) index2:select() index2:max('0') index2:min('0') index2:count('0') index2:max() index2:min() index2:count() space2:drop() -- num type space3 = box.schema.space.create('space3', { engine = engine }) index3 = space3:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) space3:insert({1, "AAAA"}) space3:insert({2, "AAAA"}) space3:insert({3, "AAAA"}) space3:insert({4, "AAAA"}) index3:select() index3:max(2) index3:min(2) index3:count(2) index3:max() index3:min() index3:count() space3:insert({20, "AAAA"}) space3:insert({30, "AAAA"}) space3:insert({40, "AAAA"}) index3:select() index3:max(15) index3:min(15) index3:count(15) index3:max() index3:min() index3:count() space3:drop() -- scalar type space4 = box.schema.space.create('space4', { engine = engine }) index4 = space4:create_index('primary', { type = 'tree', parts = {1, 'scalar'} }) space4:insert({1, "AAAA"}) space4:insert({2, "AAAA"}) space4:insert({3, "AAAA"}) space4:insert({4, "AAAA"}) index4:select() index4:max(2) index4:min(2) index4:count(2) index4:max() index4:min() index4:count() space4:insert({20, "AAAA"}) space4:insert({30, "AAAA"}) space4:insert({40, "AAAA"}) index4:select() index4:max(15) index4:min(15) index4:count(15) index4:max() index4:min() index4:count() space4:insert({'1', "AAAA"}) space4:insert({'2', "AAAA"}) space4:insert({'3', "AAAA"}) space4:insert({'4', "AAAA"}) index4:select() index4:max('2') index4:min('2') index4:count('2') index4:max() index4:min() index4:count() space4:insert({'20', "AAAA"}) space4:insert({'30', "AAAA"}) space4:insert({'40', "AAAA"}) index4:select() index4:max('15') index4:min('15') index4:count('15') index4:max() index4:min() index4:count() space4:insert({'-2', "AAAA"}) space4:insert({'-3', "AAAA"}) space4:insert({'-4', "AAAA"}) index4:select() index4:max('0') index4:min('0') index4:count('0') index4:max() index4:min() index4:count() space4:insert({-2, "AAAA"}) space4:insert({-3, "AAAA"}) space4:insert({-4, "AAAA"}) index4:select() index4:max(0) index4:min(0) index4:count(0) index4:max() index4:min() index4:count() space4:drop() -- multi filed indices -- scalar int space5 = box.schema.space.create('space5', { engine = engine }) index5 = space5:create_index('primary', { type = 'tree', parts = {1, 'scalar', 2, 'INTEGER'} }) space5:insert({1, 1}) space5:insert({1, 2}) space5:insert({1, 3}) space5:insert({1, -4}) index5:select() index5:max({1}) index5:min({1}) index5:count({1}) index5:max({1, 0}) index5:min({1, 1}) index5:count({1}) index5:max() index5:min() index5:count() space5:insert({2, 1}) space5:insert({2, 2}) space5:insert({2, 3}) space5:insert({2, -4}) index5:select() index5:max({2}) index5:min({2}) index5:count({2}) index5:max({2, 0}) index5:min({2, 1}) index5:count({2}) index5:max() index5:min() index5:count() space5:drop() -- scalar str space6 = box.schema.space.create('space6', { engine = engine }) index6 = space6:create_index('primary', { type = 'tree', parts = {1, 'scalar', 2, 'string'} }) space6:insert({1, '1'}) space6:insert({1, '2'}) space6:insert({1, '3'}) space6:insert({1, '-4'}) index6:select() index6:max({1}) index6:min({1}) index6:count({1}) index6:max({1, '0'}) index6:min({1, '1'}) index6:count({1}) index6:max() index6:min() index6:count() space6:insert({2, '1'}) space6:insert({2, '2'}) space6:insert({2, '3'}) space6:insert({2, '-4'}) index6:select() index6:max({2}) index6:min({2}) index6:count({2}) index6:max({2, '0'}) index6:min({2, '1'}) index6:count({2}) index6:max() index6:min() index6:count() space6:drop() -- min max count after many inserts string = require('string') space7 = box.schema.space.create('space7', { engine = engine }) index7 = space7:create_index('primary', { type = 'tree', parts = {1, 'scalar'} }) long_string = string.rep('A', 650) for i = 1, 1000 do space7:insert({i, long_string}) end index7:max({100}) index7:max({700}) index7:min({100}) index7:min({700}) index7:count({2}) index7:max() index7:min() index7:count() space7:drop() space8 = box.schema.space.create('space8', { engine = engine }) index8 = space8:create_index('primary', { type = 'tree', parts = {1, 'scalar', 2, 'INTEGER'} }) for i = 1, 1000 do space8:insert({i % 10, i, long_string}) end index8:max({1, 100}) index8:max({2, 700}) index8:max({3}) index8:min({1, 10}) index8:min({1, 700}) index8:min({3}) index8:count({2}) index8:max() index8:min() index8:count() space8:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/recover_snapshot.test.lua0000664000000000000000000000122413306560010023470 0ustar rootroot -- write data recover from latest snapshot env = require('test_run') test_run = env.new() test_run:cmd('restart server default') engine = test_run:get_cfg('engine') space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') space:insert({0}) box.snapshot() test_run:cmd('restart server default') space = box.space['test'] index = space.index['primary'] index:select({}, {iterator = box.index.ALL}) for key = 1, 351 do space:insert({key}) end box.snapshot() test_run:cmd('restart server default') space = box.space['test'] index = space.index['primary'] index:select({}, {iterator = box.index.ALL}) space:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/insert.test.lua0000664000000000000000000000732013306560010021413 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') -- insert (str) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:insert({tostring(key)}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t space:insert({tostring(7)}) space:drop() -- insert (num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) for key = 1, 100 do space:insert({key}) end t = {} for key = 1, 100 do table.insert(t, space:get({key})) end t space:insert({7}) space:drop() -- insert multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) for key = 1, 100 do space:insert({key, key}) end t = {} for key = 1, 100 do table.insert(t, space:get({key, key})) end t space:insert({7, 7}) space:drop() -- insert with tuple.new space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) for key = 1, 100 do space:insert({tostring(key)}) end t = {} for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end t space:insert(box.tuple.new{tostring(7)}) space:drop() -- In non-unique indexes select output order is undefined, -- so it's better to additionally sort output to receive same order every time. function sort_cmp(a, b) return a[1] < b[1] and true or false end function sort(t) table.sort(t, sort_cmp) return t end -- insert in space with multiple indices space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'number', 2, 'scalar'}}) index2 = space:create_index('secondary', { type = 'tree', parts = {3, 'unsigned', 1, 'number'}}) index3 = space:create_index('third', { type = 'tree', parts = {2, 'scalar', 4, 'string'}, unique = false}) space:insert({50, 'fere', 3, 'rgrtht'}) space:insert({-14.645, true, 562, 'jknew'}) space:insert({533, 1293.352, 2132, 'hyorj'}) space:insert({4824, 1293.352, 684, 'hyorj'}) index1:select{} index2:select{} sort(index3:select{}) space:drop() space = box.schema.space.create('test', { engine = engine }) index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'} }) index3 = space:create_index('third', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) space:insert({1, 1, 1}) space:insert({2, 2, 2}) space:insert({3, 3, 3}) space:select{} space:insert({1, 2, 3}) index1:select{} index2:select{} sort(index3:select{}) space:drop() -- gh-186 New implementation of box.replace does not check that tuple is -- array s = box.schema.space.create('s', { engine = engine }) index = s:create_index('pk') s:insert(1) s:insert(1, 2) s:insert(1, 2, 3) s:insert{1} s:insert{2, 3} -- xxx: silently truncates the tail - should warn perhaps tmp = s:delete(1, 2, 3) s:select{} s:drop() -- concurrent insert fail fiber = require('fiber') s = box.schema.space.create('s', { engine = engine }) index = s:create_index('pk') n_workers = 3 n_success = 0 n_failed = 0 c = fiber.channel(n_workers) inspector:cmd("setopt delimiter ';'") for i=1,n_workers do fiber.create(function() if pcall(s.insert, s, {42}) then n_success = n_success + 1 else n_failed = n_failed + 1 end c:put(true) end) end; inspector:cmd("setopt delimiter ''"); -- Join background fibers. for i=1,n_workers do c:get() end n_success n_failed s:select{} s:drop() fiber = nil tarantool_1.9.1.26.g63eb81e3c/test/engine/insert.result0000664000000000000000000002360013306560010021171 0ustar rootroottest_run = require('test_run') --- ... inspector = test_run.new() --- ... engine = inspector:get_cfg('engine') --- ... -- insert (str) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:insert({tostring(key)}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1'] - ['2'] - ['3'] - ['4'] - ['5'] - ['6'] - ['7'] - ['8'] - ['9'] - ['10'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] - ['100'] ... space:insert({tostring(7)}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:drop() --- ... -- insert (num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... for key = 1, 100 do space:insert({key}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key})) end --- ... t --- - - [1] - [2] - [3] - [4] - [5] - [6] - [7] - [8] - [9] - [10] - [11] - [12] - [13] - [14] - [15] - [16] - [17] - [18] - [19] - [20] - [21] - [22] - [23] - [24] - [25] - [26] - [27] - [28] - [29] - [30] - [31] - [32] - [33] - [34] - [35] - [36] - [37] - [38] - [39] - [40] - [41] - [42] - [43] - [44] - [45] - [46] - [47] - [48] - [49] - [50] - [51] - [52] - [53] - [54] - [55] - [56] - [57] - [58] - [59] - [60] - [61] - [62] - [63] - [64] - [65] - [66] - [67] - [68] - [69] - [70] - [71] - [72] - [73] - [74] - [75] - [76] - [77] - [78] - [79] - [80] - [81] - [82] - [83] - [84] - [85] - [86] - [87] - [88] - [89] - [90] - [91] - [92] - [93] - [94] - [95] - [96] - [97] - [98] - [99] - [100] ... space:insert({7}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:drop() --- ... -- insert multi-part (num, num) space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... for key = 1, 100 do space:insert({key, key}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({key, key})) end --- ... t --- - - [1, 1] - [2, 2] - [3, 3] - [4, 4] - [5, 5] - [6, 6] - [7, 7] - [8, 8] - [9, 9] - [10, 10] - [11, 11] - [12, 12] - [13, 13] - [14, 14] - [15, 15] - [16, 16] - [17, 17] - [18, 18] - [19, 19] - [20, 20] - [21, 21] - [22, 22] - [23, 23] - [24, 24] - [25, 25] - [26, 26] - [27, 27] - [28, 28] - [29, 29] - [30, 30] - [31, 31] - [32, 32] - [33, 33] - [34, 34] - [35, 35] - [36, 36] - [37, 37] - [38, 38] - [39, 39] - [40, 40] - [41, 41] - [42, 42] - [43, 43] - [44, 44] - [45, 45] - [46, 46] - [47, 47] - [48, 48] - [49, 49] - [50, 50] - [51, 51] - [52, 52] - [53, 53] - [54, 54] - [55, 55] - [56, 56] - [57, 57] - [58, 58] - [59, 59] - [60, 60] - [61, 61] - [62, 62] - [63, 63] - [64, 64] - [65, 65] - [66, 66] - [67, 67] - [68, 68] - [69, 69] - [70, 70] - [71, 71] - [72, 72] - [73, 73] - [74, 74] - [75, 75] - [76, 76] - [77, 77] - [78, 78] - [79, 79] - [80, 80] - [81, 81] - [82, 82] - [83, 83] - [84, 84] - [85, 85] - [86, 86] - [87, 87] - [88, 88] - [89, 89] - [90, 90] - [91, 91] - [92, 92] - [93, 93] - [94, 94] - [95, 95] - [96, 96] - [97, 97] - [98, 98] - [99, 99] - [100, 100] ... space:insert({7, 7}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:drop() --- ... -- insert with tuple.new space = box.schema.space.create('test', { engine = engine }) --- ... index = space:create_index('primary', { type = 'tree', parts = {1, 'string'} }) --- ... for key = 1, 100 do space:insert({tostring(key)}) end --- ... t = {} --- ... for key = 1, 100 do table.insert(t, space:get({tostring(key)})) end --- ... t --- - - ['1'] - ['2'] - ['3'] - ['4'] - ['5'] - ['6'] - ['7'] - ['8'] - ['9'] - ['10'] - ['11'] - ['12'] - ['13'] - ['14'] - ['15'] - ['16'] - ['17'] - ['18'] - ['19'] - ['20'] - ['21'] - ['22'] - ['23'] - ['24'] - ['25'] - ['26'] - ['27'] - ['28'] - ['29'] - ['30'] - ['31'] - ['32'] - ['33'] - ['34'] - ['35'] - ['36'] - ['37'] - ['38'] - ['39'] - ['40'] - ['41'] - ['42'] - ['43'] - ['44'] - ['45'] - ['46'] - ['47'] - ['48'] - ['49'] - ['50'] - ['51'] - ['52'] - ['53'] - ['54'] - ['55'] - ['56'] - ['57'] - ['58'] - ['59'] - ['60'] - ['61'] - ['62'] - ['63'] - ['64'] - ['65'] - ['66'] - ['67'] - ['68'] - ['69'] - ['70'] - ['71'] - ['72'] - ['73'] - ['74'] - ['75'] - ['76'] - ['77'] - ['78'] - ['79'] - ['80'] - ['81'] - ['82'] - ['83'] - ['84'] - ['85'] - ['86'] - ['87'] - ['88'] - ['89'] - ['90'] - ['91'] - ['92'] - ['93'] - ['94'] - ['95'] - ['96'] - ['97'] - ['98'] - ['99'] - ['100'] ... space:insert(box.tuple.new{tostring(7)}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... space:drop() --- ... -- In non-unique indexes select output order is undefined, -- so it's better to additionally sort output to receive same order every time. function sort_cmp(a, b) return a[1] < b[1] and true or false end --- ... function sort(t) table.sort(t, sort_cmp) return t end --- ... -- insert in space with multiple indices space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'number', 2, 'scalar'}}) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {3, 'unsigned', 1, 'number'}}) --- ... index3 = space:create_index('third', { type = 'tree', parts = {2, 'scalar', 4, 'string'}, unique = false}) --- ... space:insert({50, 'fere', 3, 'rgrtht'}) --- - [50, 'fere', 3, 'rgrtht'] ... space:insert({-14.645, true, 562, 'jknew'}) --- - [-14.645, true, 562, 'jknew'] ... space:insert({533, 1293.352, 2132, 'hyorj'}) --- - [533, 1293.352, 2132, 'hyorj'] ... space:insert({4824, 1293.352, 684, 'hyorj'}) --- - [4824, 1293.352, 684, 'hyorj'] ... index1:select{} --- - - [-14.645, true, 562, 'jknew'] - [50, 'fere', 3, 'rgrtht'] - [533, 1293.352, 2132, 'hyorj'] - [4824, 1293.352, 684, 'hyorj'] ... index2:select{} --- - - [50, 'fere', 3, 'rgrtht'] - [-14.645, true, 562, 'jknew'] - [4824, 1293.352, 684, 'hyorj'] - [533, 1293.352, 2132, 'hyorj'] ... sort(index3:select{}) --- - - [-14.645, true, 562, 'jknew'] - [50, 'fere', 3, 'rgrtht'] - [533, 1293.352, 2132, 'hyorj'] - [4824, 1293.352, 684, 'hyorj'] ... space:drop() --- ... space = box.schema.space.create('test', { engine = engine }) --- ... index1 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'unsigned'} }) --- ... index3 = space:create_index('third', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) --- ... space:insert({1, 1, 1}) --- - [1, 1, 1] ... space:insert({2, 2, 2}) --- - [2, 2, 2] ... space:insert({3, 3, 3}) --- - [3, 3, 3] ... space:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... space:insert({1, 2, 3}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... index1:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... index2:select{} --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... sort(index3:select{}) --- - - [1, 1, 1] - [2, 2, 2] - [3, 3, 3] ... space:drop() --- ... -- gh-186 New implementation of box.replace does not check that tuple is -- array s = box.schema.space.create('s', { engine = engine }) --- ... index = s:create_index('pk') --- ... s:insert(1) --- - error: Tuple/Key must be MsgPack array ... s:insert(1, 2) --- - error: Tuple/Key must be MsgPack array ... s:insert(1, 2, 3) --- - error: Tuple/Key must be MsgPack array ... s:insert{1} --- - [1] ... s:insert{2, 3} --- - [2, 3] ... -- xxx: silently truncates the tail - should warn perhaps tmp = s:delete(1, 2, 3) --- ... s:select{} --- - - [2, 3] ... s:drop() --- ... -- concurrent insert fail fiber = require('fiber') --- ... s = box.schema.space.create('s', { engine = engine }) --- ... index = s:create_index('pk') --- ... n_workers = 3 --- ... n_success = 0 --- ... n_failed = 0 --- ... c = fiber.channel(n_workers) --- ... inspector:cmd("setopt delimiter ';'") --- - true ... for i=1,n_workers do fiber.create(function() if pcall(s.insert, s, {42}) then n_success = n_success + 1 else n_failed = n_failed + 1 end c:put(true) end) end; --- ... inspector:cmd("setopt delimiter ''"); --- - true ... -- Join background fibers. for i=1,n_workers do c:get() end --- ... n_success --- - 1 ... n_failed --- - 2 ... s:select{} --- - - [42] ... s:drop() --- ... fiber = nil --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/truncate.test.lua0000664000000000000000000001377113306565107021757 0ustar rootroottest_run = require('test_run').new() engine = test_run:get_cfg('engine') fiber = require('fiber') -- -- Check that space truncation is forbidden in a transaction. -- s = box.schema.create_space('test', {engine = engine}) _ = s:create_index('pk') _ = s:insert{123} box.begin() s:truncate() box.commit() s:select() s:drop() -- -- Check that space truncation works for spaces created via -- the internal API. -- _ = box.space._space:insert{512, 1, 'test', engine, 0, {temporary = false}, {}} _ = box.space._index:insert{512, 0, 'pk', 'tree', {unique = true}, {{0, 'unsigned'}}} _ = box.space.test:insert{123} box.space.test:select() box.space.test:truncate() box.space.test:select() box.space.test:drop() -- -- Check that a space cannot be dropped if it has a record -- in _truncate space. -- s = box.schema.create_space('test', {engine = engine}) s:truncate() _ = box.space._space:delete{s.id} -- error _ = box.space._truncate:delete{s.id} _ = box.space._space:delete{s.id} -- ok -- -- Check that truncation of system spaces is not permitted. -- box.space._space:truncate() box.space._index:truncate() -- -- Truncate space with no indexes. -- s = box.schema.create_space('test', {engine = engine}) s:truncate() s:drop() -- -- Truncate empty space. -- s = box.schema.create_space('test', {engine = engine}) _ = s:create_index('pk') s:truncate() s:select() s:drop() -- -- Truncate non-empty space. -- s = box.schema.create_space('test', {engine = engine}) _ = s:create_index('i1', {parts = {1, 'unsigned'}}) _ = s:create_index('i2', {parts = {2, 'unsigned'}}) _ = s:create_index('i3', {parts = {3, 'string'}}) _ = s:insert{1, 3, 'a'} _ = s:insert{2, 2, 'b'} _ = s:insert{3, 1, 'c'} s:truncate() s.index.i1:select() s.index.i2:select() s.index.i3:select() _ = s:insert{10, 30, 'x'} _ = s:insert{20, 20, 'y'} _ = s:insert{30, 10, 'z'} s.index.i1:select() s.index.i2:select() s.index.i3:select() s:drop() -- -- Check that space truncation is linearizable. -- -- Create a space with several indexes and start three fibers: -- 1st and 3rd update the space, 2nd truncates it. Then wait -- until all fibers are done. The space should contain data -- inserted by the 3rd fiber. -- -- Note, this is guaranteed to be true only if space updates -- don't yield, which is always true for memtx and is true -- for vinyl in case there's no data on disk, as in this case. -- s = box.schema.create_space('test', {engine = engine}) _ = s:create_index('i1', {parts = {1, 'unsigned'}}) _ = s:create_index('i2', {parts = {2, 'unsigned'}}) _ = s:create_index('i3', {parts = {3, 'string'}}) _ = s:insert{1, 1, 'a'} _ = s:insert{2, 2, 'b'} _ = s:insert{3, 3, 'c'} c = fiber.channel(3) test_run:cmd("setopt delimiter ';'") fiber.create(function() box.begin() s:replace{1, 10, 'aa'} s:replace{2, 20, 'bb'} s:replace{3, 30, 'cc'} box.commit() c:put(true) end) fiber.create(function() s:truncate() c:put(true) end) fiber.create(function() box.begin() s:replace{1, 100, 'aaa'} s:replace{2, 200, 'bbb'} s:replace{3, 300, 'ccc'} box.commit() c:put(true) end) test_run:cmd("setopt delimiter ''"); for i = 1, 3 do c:get() end s.index.i1:select() s.index.i2:select() s.index.i3:select() s:drop() -- -- Calling space.truncate concurrently. -- s = box.schema.create_space('test', {engine = engine}) _ = s:create_index('pk') c = fiber.channel(5) for i = 1, 5 do fiber.create(function() s:truncate() c:put(true) end) end for i = 1, 5 do c:get() end s:drop() -- -- Check that space truncation is persistent. -- -- The test checks the following cases: -- - Create and truncate before snapshot -- - Create before snapshot, truncate after snapshot -- - Create and truncate after snapshot -- s1 = box.schema.create_space('test1', {engine = engine}) _ = s1:create_index('i1', {parts = {1, 'unsigned'}}) _ = s1:create_index('i2', {parts = {2, 'unsigned'}}) _ = s1:insert{1, 3} _ = s1:insert{2, 2} _ = s1:insert{3, 1} s1:truncate() _ = s1:insert{123, 321} s2 = box.schema.create_space('test2', {engine = engine}) _ = s2:create_index('i1', {parts = {1, 'unsigned'}}) _ = s2:create_index('i2', {parts = {2, 'unsigned'}}) _ = s2:insert{10, 30} _ = s2:insert{20, 20} _ = s2:insert{30, 10} box.snapshot() _ = s1:insert{321, 123} s2:truncate() _ = s2:insert{456, 654} s3 = box.schema.create_space('test3', {engine = engine}) _ = s3:create_index('i1', {parts = {1, 'unsigned'}}) _ = s3:create_index('i2', {parts = {2, 'unsigned'}}) _ = s3:insert{100, 300} _ = s3:insert{200, 200} _ = s3:insert{300, 100} s3:truncate() _ = s3:insert{789, 987} -- Check that index drop, create, and alter called after space -- truncate do not break recovery (gh-2615) s4 = box.schema.create_space('test4', {engine = 'memtx'}) _ = s4:create_index('i1', {parts = {1, 'string'}}) _ = s4:create_index('i3', {parts = {3, 'string'}}) _ = s4:insert{'zzz', 111, 'yyy'} s4:truncate() s4.index.i3:drop() _ = s4:create_index('i2', {parts = {2, 'string'}}) s4.index.i1:alter({parts = {1, 'string', 2, 'string'}}) _ = s4:insert{'abc', 'cba'} test_run:cmd('restart server default') s1 = box.space.test1 s2 = box.space.test2 s3 = box.space.test3 s4 = box.space.test4 s1.index.i1:select() s1.index.i2:select() s2.index.i1:select() s2.index.i2:select() s3.index.i1:select() s3.index.i2:select() s4.index.i1:select() s4.index.i2:select() s1:drop() s2:drop() s3:drop() s4:drop() -- Truncate should fail in no write access for the space engine = test_run:get_cfg('engine') s = box.schema.create_space('access_truncate', {engine = engine}) _ = s:create_index('pk') s:insert({1, 2, 3}) s:insert({3, 2, 1}) box.schema.user.grant('guest', 'execute', 'universe') box.schema.user.grant('guest', 'read', 'space', 'access_truncate') con = require('net.box').connect(box.cfg.listen) con:eval([[box.space.access_truncate:truncate()]]) con.space.access_truncate:select() box.schema.user.grant('guest', 'write', 'space', 'access_truncate') con:eval([[box.space.access_truncate:truncate()]]) con.space.access_truncate:select() con:close() box.schema.user.revoke('guest', 'execute', 'universe') box.schema.user.revoke('guest', 'read,write', 'space', 'access_truncate') s:drop() tarantool_1.9.1.26.g63eb81e3c/test/engine/tree_variants.result0000664000000000000000000002045113306560010022534 0ustar rootrootenv = require('test_run') --- ... test_run = env.new() --- ... engine = test_run:get_cfg('engine') --- ... space = box.schema.space.create('tweedledum', { engine = engine }) --- ... i0 = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'}, unique = true }) --- ... i1 = space:create_index('i1', { type = 'tree', parts = {2, 'unsigned'}, unique = false }) --- ... i2 = space:create_index('i2', { type = 'tree', parts = {3, 'unsigned'}, unique = false }) --- ... i3 = space:create_index('i3', { type = 'tree', parts = {4, 'string', 5, 'string'}, unique = false }) --- ... i4 = space:create_index('i4', { type = 'tree', parts = {7, 'string', 6, 'string'}, unique = false }) --- ... i5 = space:create_index('i5', { type = 'tree', parts = {9, 'unsigned'}, unique = false }) --- ... i6 = space:create_index('i6', { type = 'tree', parts = {7, 'string', 6, 'string', 4, 'string', 5, 'string', 9, 'unsigned'}, unique = true }) --- ... space:insert{0, 0, 100, 'Joe', 'Sixpack', 'Drinks', 'Amstel', 'bar', 2000} --- - [0, 0, 100, 'Joe', 'Sixpack', 'Drinks', 'Amstel', 'bar', 2000] ... space:insert{1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001} --- - [1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001] ... space:insert{2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002} --- - [2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002] ... space:insert{3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003} --- - [3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003] ... space:insert{4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004} --- - [4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004] ... space:insert{5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005} --- - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] ... space:insert{6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006} --- - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] ... space:insert{7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007} --- - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] ... space:insert{8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008} --- - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] ... space:insert{9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009} --- - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... -- In non-unique indexes select output order is undefined, -- so it's better to additionally sort output to receive same order every time. function sort_cmp(a, b) return a[1] < b[1] and true or false end --- ... function sort(t) table.sort(t, sort_cmp) return t end --- ... space.index['primary']:get{1} --- - [1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001] ... sort(space.index['i1']:select{2}) --- - - [2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002] ... sort(space.index[2]:select({300})) --- - - [3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003] - [4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004] - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] ... #space.index['i3']:select({'Joe', 'Sixpack'}) --- - 6 ... #space.index['i3']:select('John') --- - 4 ... #space.index['i4']:select('A Pipe') --- - 1 ... {sort(space.index['i4']:select{'Miller Genuine Draft', 'Drinks'})} --- - - - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] ... sort(space.index['i5']:select{2007}) --- - - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] ... sort(space.index[6]:select{'Miller Genuine Draft', 'Drinks'}) --- - - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] ... tmp = space:delete{6} --- ... tmp = space:delete{7} --- ... tmp = space:delete{8} --- ... tmp = space:delete{9} --- ... space:insert{6, 6ULL, 400ULL, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006} --- - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] ... space:insert{7, 7ULL, 400ULL, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007} --- - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] ... space:insert{8, 8ULL, 400ULL, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008} --- - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] ... space:insert{9, 9ULL, 400ULL, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009} --- - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... sort(space.index['i1']:select{6ULL}) --- - - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] ... sort(space.index['i1']:select{6}) --- - - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] ... sort(space.index['i2']:select(400ULL)) --- - - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... sort(space.index['i2']:select(400)) --- - - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... sort(space:select{}) --- - - [0, 0, 100, 'Joe', 'Sixpack', 'Drinks', 'Amstel', 'bar', 2000] - [1, 1, 200, 'Joe', 'Sixpack', 'Drinks', 'Heineken', 'bar', 2001] - [2, 2, 200, 'Joe', 'Sixpack', 'Drinks', 'Carlsberg', 'bar', 2002] - [3, 3, 300, 'Joe', 'Sixpack', 'Drinks', 'Corona Extra', 'bar', 2003] - [4, 4, 300, 'Joe', 'Sixpack', 'Drinks', 'Stella Artois', 'bar', 2004] - [5, 5, 300, 'Joe', 'Sixpack', 'Drinks', 'Miller Genuine Draft', 'bar', 2005] - [6, 6, 400, 'John', 'Smoker', 'Hits', 'A Pipe', 'foo', 2006] - [7, 7, 400, 'John', 'Smoker', 'Hits', 'A Bong', 'foo', 2007] - [8, 8, 400, 'John', 'Smoker', 'Rolls', 'A Joint', 'foo', 2008] - [9, 9, 400, 'John', 'Smoker', 'Rolls', 'A Blunt', 'foo', 2009] ... -- Test incorrect keys - supplied key field type does not match index type -- https://bugs.launchpad.net/tarantool/+bug/1072624 space:insert{'', 1, 2, '', '', '', '', '', 0} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... space:insert{'xxxxxxxx', 1, 2, '', '', '', '', '', 0} --- - error: 'Tuple field 1 type does not match one required by operation: expected unsigned' ... space:insert{1, '', 2, '', '', '', '', '', 0} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... space:insert{1, 'xxxxxxxxxxx', 2, '', '', '', '', '', 0} --- - error: 'Tuple field 2 type does not match one required by operation: expected unsigned' ... space:drop() --- ... sort = nil --- ... sort_cmp = nil --- ... -- test index bsize space = box.schema.space.create('test', { engine = engine }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) --- ... index2 = space:create_index('secondary', { type = 'tree', parts = {2, 'string', 3, 'scalar'} }) --- ... pk:bsize() == 0 --- - true ... index2:bsize() == 0 --- - true ... space:insert({1, 'a', 3}) --- - [1, 'a', 3] ... pk:bsize() > 0 --- - true ... index2:bsize() > 0 --- - true ... space:insert({2, 'b', 4}) --- - [2, 'b', 4] ... old_pk_size = pk:bsize() --- ... old_index2_size = index2:bsize() --- ... space:insert({2, 'b', 4}) --- - error: Duplicate key exists in unique index 'primary' in space 'test' ... old_pk_size == pk:bsize() --- - true ... old_index2_size == index2:bsize() --- - true ... tmp = pk:delete({1}) --- ... pk:bsize() > 0 --- - true ... index2:bsize() > 0 --- - true ... tmp = index2:delete({'b', 4}) --- ... pk:bsize() > 0 --- - true ... index2:bsize() > 0 --- - true ... space:drop() --- ... --range queries space = box.schema.space.create('test', { engine = engine }) --- ... pk = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned', 2, 'unsigned'} }) --- ... str = require('string').rep('A', 500) --- ... c1 = 100 c2 = 10 --- ... for i = 1,c1 do for j=1,c2 do space:insert{j, i, str} end end --- ... good = true --- ... for i = 1,c2 do if #space:select{i} ~= c1 then good = false end end --- ... good --true --- - true ... total_count = 0 --- ... for i = 1,c2 do total_count = total_count + #space:select{i} end --- ... total_count --c1 * c2 --- - 1000 ... space:drop() --- ... tarantool_1.9.1.26.g63eb81e3c/test/engine/recover_drop.test.lua0000664000000000000000000000123013306560010022572 0ustar rootroot-- recover dropped spaces env = require('test_run') test_run = env.new() test_run:cmd('restart server default') engine = test_run:get_cfg('engine') space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') for key = 1, 351 do space:insert({key}) end space:drop() space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary') for key = 500, 1000 do space:insert({key}) end test_run:cmd('restart server default') space = box.space['test'] index = space.index['primary'] index:select({}, {iterator = box.index.ALL}) space:drop() test_run:cmd('restart server default with cleanup=1') tarantool_1.9.1.26.g63eb81e3c/test/engine/transaction.test.lua0000664000000000000000000000510213306560010022430 0ustar rootroottest_run = require('test_run') inspector = test_run.new() engine = inspector:get_cfg('engine') -- basic transaction tests space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) -- begin/rollback inspector:cmd("setopt delimiter ';'") box.begin() for key = 1, 10 do space:insert({key}) end box.rollback(); inspector:cmd("setopt delimiter ''"); t = {} for key = 1, 10 do assert(#space:select({key}) == 0) end t -- begin/commit insert inspector:cmd("setopt delimiter ';'") box.begin() for key = 1, 10 do space:insert({key}) end box.commit(); inspector:cmd("setopt delimiter ''"); t = {} for key = 1, 10 do table.insert(t, space:select({key})[1]) end t -- begin/commit delete inspector:cmd("setopt delimiter ';'") box.begin() for key = 1, 10 do space:delete({key}) end box.commit(); inspector:cmd("setopt delimiter ''"); t = {} for key = 1, 10 do assert(#space:select({key}) == 0) end t space:drop() -- multi-space transactions a = box.schema.space.create('test', { engine = engine }) index = a:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) b = box.schema.space.create('test_tmp', { engine = engine }) index = b:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) -- begin/rollback inspector:cmd("setopt delimiter ';'") box.begin() for key = 1, 10 do a:insert({key}) end for key = 1, 10 do b:insert({key}) end box.rollback(); inspector:cmd("setopt delimiter ''"); t = {} for key = 1, 10 do assert(#a:select({key}) == 0) end t for key = 1, 10 do assert(#b:select({key}) == 0) end t -- begin/commit insert inspector:cmd("setopt delimiter ';'") box.begin() for key = 1, 10 do a:insert({key}) end for key = 1, 10 do b:insert({key}) end box.commit(); inspector:cmd("setopt delimiter ''"); t = {} for key = 1, 10 do table.insert(t, a:select({key})[1]) end t t = {} for key = 1, 10 do table.insert(t, b:select({key})[1]) end t -- begin/commit delete inspector:cmd("setopt delimiter ';'") box.begin() for key = 1, 10 do a:delete({key}) end for key = 1, 10 do b:delete({key}) end box.commit(); inspector:cmd("setopt delimiter ''"); t = {} for key = 1, 10 do assert(#a:select({key}) == 0) end t for key = 1, 10 do assert(#b:select({key}) == 0) end t a:drop() b:drop() -- ensure findByKey works in empty transaction context space = box.schema.space.create('test', { engine = engine }) index = space:create_index('primary', { type = 'tree', parts = {1, 'unsigned'} }) inspector:cmd("setopt delimiter ';'") box.begin() space:get({0}) box.rollback(); inspector:cmd("setopt delimiter ''"); space:drop() tarantool_1.9.1.26.g63eb81e3c/test-run/0000775000000000000000000000000013306562360015773 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/test_run.lua0000644000000000000000000002251713306562360020346 0ustar rootrootlocal socket = require('socket') local json = require('json') local yaml = require('yaml') local log = require('log') local fiber = require('fiber') local fio = require('fio') local errno = require('errno') local function cmd(self, msg) local sock = socket.tcp_connect(self.host, self.port) local data = msg .. '\n' sock:send(data) local result = sock:read('\n...\n') sock:close() result = yaml.decode(result) if type(result) == 'table' and result.error ~= nil then error(result.error) end return result end local eval_cmd = 'eval %s "%s"' local function eval(self, node, expr) return self:cmd(eval_cmd:format(node, expr)) end local get_param_cmd = 'eval %s "return box.info%s"' local function get_param(self, node, param) -- if param is passed then append dot, otherwise make it empty param = param and '.' .. param or '' return self:cmd(get_param_cmd:format(node, param)) end local function get_lsn(self, node, sid) local nodes = self:get_param(node, 'vclock') return tonumber(nodes[1][tonumber(sid)]) end local function get_server_id(self, node) local server = self:get_param(node, "server")[1] if server ~= nil then -- Tarantool < 1.7.4 if server.id ~= nil and server.id <= 0 then return nil -- bootstrap in progress end return tonumber(server.id) end -- Tarantool 1.7.4+ local server_id = self:get_param(node, "id")[1] if server_id == nil then return nil -- bootstrap in progress end return tonumber(server_id) end local function wait_lsn(self, waiter, master) local sid = self:get_server_id(master) local lsn = self:get_lsn(master, sid) while self:get_lsn(waiter, sid) < lsn do fiber.sleep(0.001) end end local function get_vclock(self, node) return self:get_param(node, 'vclock')[1] end local function wait_vclock(self, node, to_vclock) while true do local vclock = self:get_vclock(node) local ok = true for server_id, to_lsn in pairs(to_vclock) do local lsn = vclock[server_id] if lsn < to_lsn then ok = false break end end if ok then return end log.info("wait vclock: %s to %s", yaml.encode(vclock), yaml.encode(to_vclock)) fiber.sleep(0.001) end end local create_cluster_cmd1 = 'create server %s with script="%s/%s.lua"' local create_cluster_cmd1_return_listen_uri = 'create server %s with script="%s/%s.lua", return_listen_uri=True' local create_cluster_cmd2 = 'start server %s with wait_load=False, wait=False' local function create_cluster(self, servers, test_suite, opts) local opts = opts or {} test_suite = test_suite or 'replication' local uris = {} for _, name in ipairs(servers) do if opts.return_listen_uri then local cmd1 = create_cluster_cmd1_return_listen_uri uris[#uris + 1] = self:cmd(cmd1:format(name, test_suite, name)) else self:cmd(create_cluster_cmd1:format(name, test_suite, name)) end self:cmd(create_cluster_cmd2:format(name)) end if opts.return_listen_uri then return uris end end local drop_cluster_cmd1 = 'stop server %s' local drop_cluster_cmd2 = 'cleanup server %s' local drop_cluster_cmd3 = 'delete server %s' local function drop_cluster(self, servers) for _, name in ipairs(servers) do self:cmd(drop_cluster_cmd1:format(name)) self:cmd(drop_cluster_cmd2:format(name)) self:cmd(drop_cluster_cmd3:format(name)) end end local function cleanup_cluster(self) local cluster = box.space._cluster:select() for _, tuple in pairs(cluster) do if tuple[1] ~= box.info.id then box.space._cluster:delete(tuple[1]) end end end local wait_fullmesh_cmd = 'box.info.replication[%s]' local function wait_fullmesh(self, servers) log.info("starting full mesh") for _, server in ipairs(servers) do -- wait bootstrap to finish log.info("%s: waiting bootstrap", server) local server_id while true do server_id = self:get_server_id(server) if server_id ~= nil then log.info("%s: bootstrapped", server) break end local info = self:eval(server, "box.info") fiber.sleep(0.01) end -- wait all for full mesh for _, server2 in ipairs(servers) do if server ~= server2 then log.info("%s -> %s: waiting for connection", server2, server) while true do local cmd = wait_fullmesh_cmd:format(server_id) local info = self:eval(server2, cmd)[1] if info ~= nil and (info.status == 'follow' or (info.upstream ~= nil and info.upstream.status == 'follow')) then log.info("%s -> %s: connected", server2, server) break end fiber.sleep(0.01) end end end end log.info("full mesh connected") end local function get_cluster_vclock(self, servers) local vclock = {} for _, name in pairs(servers) do for server_id, lsn in pairs(self:get_vclock(name)) do local prev_lsn = vclock[server_id] if prev_lsn == nil or prev_lsn < lsn then vclock[server_id] = lsn end end end return setmetatable(vclock, { __serialize = 'map' }) end local function wait_cluster_vclock(self, servers, vclock) for _, name in pairs(servers) do self:wait_vclock(name, vclock) end return vclock end local switch_cmd1 = "env = require('test_run')" local switch_cmd2 = "test_run = env.new('%s', '%s')" local switch_cmd3 = "set connection %s" local function switch(self, node) -- switch to other node and enable test_run self:eval(node, switch_cmd1) self:eval(node, switch_cmd2:format(self.host, self.port)) return self:cmd(switch_cmd3:format(node)) end local get_cfg_cmd = 'config %s' local function get_cfg(self, name) if self.run_conf == nil then self.run_conf = self:cmd(get_cfg_cmd:format(name)) end return self.run_conf[name] end local function grep_log(self, node, what, bytes) local filename = self:eval(node, "box.cfg.log")[1] local file = fio.open(filename, {'O_RDONLY', 'O_NONBLOCK'}) local function fail(msg) local err = errno.strerror() if file ~= nil then file:close() end error(string.format("%s: %s: %s", msg, filename, err)) end if file == nil then fail("Failed to open log file") end io.flush() -- attempt to flush stdout == log fd local filesize = file:seek(0, 'SEEK_END') if filesize == nil then fail("Failed to get log file size") end local bytes = bytes or 65536 -- don't read whole log - it can be huge bytes = bytes > filesize and filesize or bytes if file:seek(-bytes, 'SEEK_END') == nil then fail("Failed to seek log file") end local found, buf repeat -- read file in chunks local s = file:read(2048) if s == nil then fail("Failed to read log file") end local pos = 1 repeat -- split read string in lines local endpos = string.find(s, '\n', pos) endpos = endpos and endpos - 1 -- strip terminating \n local line = string.sub(s, pos, endpos) if endpos == nil and s ~= '' then -- line doesn't end with \n or eof, append it to buffer -- to be checked on next iteration buf = buf or {} table.insert(buf, line) else if buf ~= nil then -- prepend line with buffered data table.insert(buf, line) line = table.concat(buf) buf = nil end if string.match(line, "Starting instance") then found = nil -- server was restarted, reset search else found = string.match(line, what) or found end end pos = endpos and endpos + 2 -- jump to char after \n until pos == nil until s == '' file:close() return found end local inspector_methods = { cmd = cmd, eval = eval, -- get wrappers get_param = get_param, get_server_id = get_server_id, get_cfg = get_cfg, -- lsn get_lsn = get_lsn, wait_lsn = wait_lsn, -- vclock get_vclock = get_vclock, wait_vclock = wait_vclock, switch = switch, -- replication create_cluster = create_cluster, drop_cluster = drop_cluster, cleanup_cluster = cleanup_cluster, wait_fullmesh = wait_fullmesh, get_cluster_vclock = get_cluster_vclock, wait_cluster_vclock = wait_cluster_vclock, -- grep_log = grep_log, } local function inspector_new(host, port) local inspector = {} inspector.host = host or 'localhost' inspector.port = port or tonumber(os.getenv('INSPECTOR')) if inspector.port == nil then error('Inspector not started') end return setmetatable(inspector, { __index = inspector_methods }) end return { new = inspector_new; } tarantool_1.9.1.26.g63eb81e3c/test-run/lib/0000755000000000000000000000000013306562360016537 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/worker.py0000644000000000000000000002713213306562360020427 0ustar rootrootimport os import signal import traceback import yaml import copy import functools import collections import lib from lib.utils import safe_makedirs from lib.test_suite import TestSuite from lib.colorer import color_stdout, color_log from lib.tarantool_server import TarantoolServer # Utils ####### def find_suites(): suite_names = lib.Options().args.suites if suite_names == []: for root, dirs, names in os.walk(os.getcwd(), followlinks=True): if "suite.ini" in names: suite_names.append(os.path.basename(root)) suites = [TestSuite(suite_name, lib.Options().args) for suite_name in sorted(suite_names)] return suites def parse_reproduce_file(filepath): reproduce = [] if not filepath: return reproduce try: with open(filepath, 'r') as f: for task_id in yaml.load(f): task_name, task_conf = task_id reproduce.append((task_name, task_conf)) except IOError: color_stdout('Cannot read "%s" passed as --reproduce argument\n' % filepath, schema='error') exit(1) return reproduce def get_reproduce_file(worker_name): main_vardir = os.path.realpath(lib.Options().args.vardir) reproduce_dir = os.path.join(main_vardir, 'reproduce') return os.path.join(reproduce_dir, '%s.list.yaml' % worker_name) def print_greetings(): # print information about tarantool color_stdout('\n') TarantoolServer.print_exe() # Get tasks and worker generators ################################# def get_task_groups(): """Scan directories where tests files expected to reside, create the list of tests and group it by suites. Create workers generator for each of these group. """ suites = find_suites() res = collections.OrderedDict() for suite in suites: key = os.path.basename(suite.suite_path) gen_worker = functools.partial(Worker, suite) # get _id as an arg task_ids = [task.id for task in suite.find_tests()] if task_ids: res[key] = { 'gen_worker': gen_worker, 'task_ids': task_ids, 'is_parallel': suite.is_parallel(), } return res def reproduce_task_groups(task_groups): """Filter provided task_groups down to the one certain group. Sort tests in this group as in the reproduce file. """ found_keys = [] reproduce = parse_reproduce_file(lib.Options().args.reproduce) if not reproduce: raise ValueError('[reproduce] Tests list cannot be empty') for i, task_id in enumerate(reproduce): for key, task_group in task_groups.items(): if task_id in task_group['task_ids']: found_keys.append(key) break if len(found_keys) != i + 1: raise ValueError('[reproduce] Cannot find test "%s"' % str(task_id)) found_keys = list(set(found_keys)) if len(found_keys) < 1: raise ValueError('[reproduce] Cannot find any suite for given tests') elif len(found_keys) > 1: raise ValueError( '[reproduce] Given tests contained by different suites') res_key = found_keys[0] res_task_group = copy.deepcopy(task_groups[key]) res_task_group['task_ids'] = reproduce return {res_key: res_task_group} # Worker results ################ class BaseWorkerMessage(object): """Base class for all objects passed via result queues. It holds worker_id (int) and worker_name (string). Used as a structure, i.e. w/o data fields incapsulation. """ def __init__(self, worker_id, worker_name): super(BaseWorkerMessage, self).__init__() self.worker_id = worker_id self.worker_name = worker_name class WorkerTaskResult(BaseWorkerMessage): """ Passed into the result queue when a task processed (done) by the worker. The short_status (string) field intended to give short note whether the task processed successfully or not, but with little more flexibility than binary True/False. The task_id (any hashable object) field hold ID of the processed task. """ def __init__(self, worker_id, worker_name, task_id, short_status): super(WorkerTaskResult, self).__init__(worker_id, worker_name) self.short_status = short_status self.task_id = task_id class WorkerOutput(BaseWorkerMessage): """The output passed by worker processes via color_stdout/color_log functions. The output wrapped into objects of this class by setting queue and wrapper in the Colorer class (see lib/colorer.py). Check LogOutputWatcher and OutputWatcher classes in listeners.py file to see how the output multiplexed by the main process. """ def __init__(self, worker_id, worker_name, output, log_only): super(WorkerOutput, self).__init__(worker_id, worker_name) self.output = output self.log_only = log_only class WorkerDone(BaseWorkerMessage): """Report the worker as done its work.""" def __init__(self, worker_id, worker_name): super(WorkerDone, self).__init__(worker_id, worker_name) # Worker ######## class VoluntaryStopException(Exception): pass class Worker: def report_keyboard_interrupt(self): color_stdout('\n[Worker "%s"] Caught keyboard interrupt; stopping...\n' % self.name, schema='test_var') def wrap_output(self, output, log_only): return WorkerOutput(self.id, self.name, output, log_only) def done_marker(self): return WorkerDone(self.id, self.name) def wrap_result(self, task_id, short_status): return WorkerTaskResult(self.id, self.name, task_id, short_status) def sigterm_handler(self, signum, frame): self.sigterm_received = True def __init__(self, suite, _id): self.sigterm_received = False signal.signal(signal.SIGTERM, lambda x, y, z=self: z.sigterm_handler(x, y)) self.initialized = False self.server = None self.inspector = None self.id = _id self.suite = suite self.name = '%03d_%s' % (self.id, self.suite.suite_path) main_vardir = self.suite.ini['vardir'] self.suite.ini['vardir'] = os.path.join(main_vardir, self.name) self.reproduce_file = get_reproduce_file(self.name) safe_makedirs(os.path.dirname(self.reproduce_file)) color_stdout.queue_msg_wrapper = self.wrap_output self.last_task_done = True self.last_task_id = -1 try: self.server = suite.gen_server() self.inspector = suite.start_server(self.server) self.initialized = True except KeyboardInterrupt: self.report_keyboard_interrupt() self.stop_server(cleanup=False) except Exception as e: color_stdout('Worker "%s" cannot start tarantool server; ' 'the tasks will be ignored...\n' % self.name, schema='error') color_stdout("The raised exception is '%s' of type '%s'.\n" % (str(e), str(type(e))), schema='error') color_stdout('Worker "%s" received the following error:\n' % self.name + traceback.format_exc() + '\n', schema='error') self.stop_server(cleanup=False) def stop_server(self, rais=True, cleanup=True, silent=True): try: self.suite.stop_server(self.server, self.inspector, silent=silent, cleanup=cleanup) except (KeyboardInterrupt, Exception): if rais: raise # XXX: What if KeyboardInterrupt raised inside task_queue.get() and 'stop # worker' marker readed from the queue, but not returned to us? def task_get(self, task_queue): self.last_task_done = False self.last_task_id = task_queue.get() return self.last_task_id @staticmethod def is_joinable(task_queue): return 'task_done' in task_queue.__dict__.keys() def task_done(self, task_queue): if Worker.is_joinable(task_queue): task_queue.task_done() self.last_task_done = True def find_task(self, task_id): for cur_task in self.suite.tests: if cur_task.id == task_id: return cur_task raise ValueError('Cannot find test: %s' % str(task_id)) # Note: it's not exception safe def run_task(self, task_id): if not self.initialized: return self.done_marker() try: task = self.find_task(task_id) with open(self.reproduce_file, 'a') as f: f.write('- ' + yaml.safe_dump(task.id)) short_status = self.suite.run_test( task, self.server, self.inspector) except KeyboardInterrupt: self.report_keyboard_interrupt() raise except Exception as e: color_stdout( 'Worker "%s" received the following error; stopping...\n' % self.name + traceback.format_exc() + '\n', schema='error') raise return short_status def run_loop(self, task_queue, result_queue): """ called from 'run_all' """ while True: task_id = self.task_get(task_queue) # None is 'stop worker' marker if task_id is None: color_log('Worker "%s" exhausted task queue; ' 'stopping the server...\n' % self.name, schema='test_var') self.stop_worker(task_queue, result_queue) break short_status = self.run_task(task_id) result_queue.put(self.wrap_result(task_id, short_status)) if not lib.Options().args.is_force and short_status == 'fail': color_stdout( 'Worker "%s" got failed test; stopping the server...\n' % self.name, schema='test_var') raise VoluntaryStopException() if self.sigterm_received: color_stdout('Worker "%s" got signal to terminate; ' 'stopping the server...\n' % self.name, schema='test_var') raise VoluntaryStopException() self.task_done(task_queue) def run_all(self, task_queue, result_queue): if not self.initialized: self.flush_all_tasks(task_queue, result_queue) result_queue.put(self.done_marker()) return try: self.run_loop(task_queue, result_queue) except (KeyboardInterrupt, Exception): self.stop_worker(task_queue, result_queue, cleanup=False) result_queue.put(self.done_marker()) def stop_worker(self, task_queue, result_queue, cleanup=True): try: if not self.last_task_done: self.task_done(task_queue) self.flush_all_tasks(task_queue, result_queue) self.stop_server(cleanup=cleanup) except (KeyboardInterrupt, Exception): pass def flush_all_tasks(self, task_queue, result_queue): """ A queue flusing is necessary only for joinable queue (when runner controlling workers with using join() on task queues), so doesn't used in the current test-run implementation. """ if not Worker.is_joinable(task_queue): return # None is 'stop worker' marker while self.last_task_id is not None: task_id = self.task_get(task_queue) result_queue.put(self.wrap_result(task_id, 'not_run')) self.task_done(task_queue) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/yapps/0000755000000000000000000000000013306562360017673 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/yapps/runtime.py0000644000000000000000000002764213306562360021743 0ustar rootroot# Yapps 2 Runtime, part of Yapps 2 - yet another python parser system # Copyright 1999-2003 by Amit J. Patel # Enhancements copyright 2003-2004 by Matthias Urlichs # # This version of the Yapps 2 Runtime can be distributed under the # terms of the MIT open source license, either found in the LICENSE file # included with the Yapps distribution # or at # # """Run time libraries needed to run parsers generated by Yapps. This module defines parse-time exception classes, a scanner class, a base class for parsers produced by Yapps, and a context class that keeps track of the parse stack. """ import sys, re MIN_WINDOW=4096 # File lookup window class SyntaxError(Exception): """When we run into an unexpected token, this is the exception to use""" def __init__(self, pos=None, msg="Bad Token", context=None): Exception.__init__(self) self.pos = pos self.msg = msg self.context = context def __str__(self): if not self.pos: return 'SyntaxError' else: return 'SyntaxError@%s(%s)' % (repr(self.pos), self.msg) class NoMoreTokens(Exception): """Another exception object, for when we run out of tokens""" pass class Token(object): """Yapps token. This is a container for a scanned token. """ def __init__(self, type,value, pos=None): """Initialize a token.""" self.type = type self.value = value self.pos = pos def __repr__(self): output = '<%s: %s' % (self.type, repr(self.value)) if self.pos: output += " @ " if self.pos[0]: output += "%s:" % self.pos[0] if self.pos[1]: output += "%d" % self.pos[1] if self.pos[2] is not None: output += ".%d" % self.pos[2] output += ">" return output in_name=0 class Scanner(object): """Yapps scanner. The Yapps scanner can work in context sensitive or context insensitive modes. The token(i) method is used to retrieve the i-th token. It takes a restrict set that limits the set of tokens it is allowed to return. In context sensitive mode, this restrict set guides the scanner. In context insensitive mode, there is no restriction (the set is always the full set of tokens). """ def __init__(self, patterns, ignore, input="", file=None,filename=None,stacked=False): """Initialize the scanner. Parameters: patterns : [(terminal, uncompiled regex), ...] or None ignore : {terminal:None, ...} input : string If patterns is None, we assume that the subclass has defined self.patterns : [(terminal, compiled regex), ...]. Note that the patterns parameter expects uncompiled regexes, whereas the self.patterns field expects compiled regexes. The 'ignore' value is either None or a callable, which is called with the scanner and the to-be-ignored match object; this can be used for include file or comment handling. """ if not filename: global in_name filename="" % in_name in_name += 1 self.input = input self.ignore = ignore self.file = file self.filename = filename self.pos = 0 self.del_pos = 0 # skipped self.line = 1 self.del_line = 0 # skipped self.col = 0 self.tokens = [] self.stack = None self.stacked = stacked self.last_read_token = None self.last_token = None self.last_types = None if patterns is not None: # Compile the regex strings into regex objects self.patterns = [] for terminal, regex in patterns: self.patterns.append( (terminal, re.compile(regex)) ) def stack_input(self, input="", file=None, filename=None): """Temporarily parse from a second file.""" # Already reading from somewhere else: Go on top of that, please. if self.stack: # autogenerate a recursion-level-identifying filename if not filename: filename = 1 else: try: filename += 1 except TypeError: pass # now pass off to the include file self.stack.stack_input(input,file,filename) else: try: filename += 0 except TypeError: pass else: filename = "" % filename # self.stack = object.__new__(self.__class__) # Scanner.__init__(self.stack,self.patterns,self.ignore,input,file,filename, stacked=True) # Note that the pattern+ignore are added by the generated # scanner code self.stack = self.__class__(input,file,filename, stacked=True) def get_pos(self): """Return a file/line/char tuple.""" if self.stack: return self.stack.get_pos() return (self.filename, self.line+self.del_line, self.col) # def __repr__(self): # """Print the last few tokens that have been scanned in""" # output = '' # for t in self.tokens: # output += '%s\n' % (repr(t),) # return output def print_line_with_pointer(self, pos, length=0, out=sys.stderr): """Print the line of 'text' that includes position 'p', along with a second line with a single caret (^) at position p""" file,line,p = pos if file != self.filename: if self.stack: return self.stack.print_line_with_pointer(pos,length=length,out=out) print >>out, "(%s: not in input buffer)" % file return text = self.input p += length-1 # starts at pos 1 origline=line line -= self.del_line spos=0 if line > 0: while 1: line = line - 1 try: cr = text.index("\n",spos) except ValueError: if line: text = "" break if line == 0: text = text[spos:cr] break spos = cr+1 else: print >>out, "(%s:%d not in input buffer)" % (file,origline) return # Now try printing part of the line text = text[max(p-80, 0):p+80] p = p - max(p-80, 0) # Strip to the left i = text[:p].rfind('\n') j = text[:p].rfind('\r') if i < 0 or (0 <= j < i): i = j if 0 <= i < p: p = p - i - 1 text = text[i+1:] # Strip to the right i = text.find('\n', p) j = text.find('\r', p) if i < 0 or (0 <= j < i): i = j if i >= 0: text = text[:i] # Now shorten the text while len(text) > 70 and p > 60: # Cut off 10 chars text = "..." + text[10:] p = p - 7 # Now print the string, along with an indicator print >>out, '> ',text print >>out, '> ',' '*p + '^' def grab_input(self): """Get more input if possible.""" if not self.file: return if len(self.input) - self.pos >= MIN_WINDOW: return data = self.file.read(MIN_WINDOW) if data is None or data == "": self.file = None # Drop bytes from the start, if necessary. if self.pos > 2*MIN_WINDOW: self.del_pos += MIN_WINDOW self.del_line += self.input[:MIN_WINDOW].count("\n") self.pos -= MIN_WINDOW self.input = self.input[MIN_WINDOW:] + data else: self.input = self.input + data def getchar(self): """Return the next character.""" self.grab_input() c = self.input[self.pos] self.pos += 1 return c def token(self, restrict, context=None): """Scan for another token.""" while 1: if self.stack: try: return self.stack.token(restrict, context) except StopIteration: self.stack = None # Keep looking for a token, ignoring any in self.ignore self.grab_input() # special handling for end-of-file if self.stacked and self.pos==len(self.input): raise StopIteration # Search the patterns for the longest match, with earlier # tokens in the list having preference best_match = -1 best_pat = '(error)' best_m = None for p, regexp in self.patterns: # First check to see if we're ignoring this token if restrict and p not in restrict and p not in self.ignore: continue m = regexp.match(self.input, self.pos) if m and m.end()-m.start() > best_match: # We got a match that's better than the previous one best_pat = p best_match = m.end()-m.start() best_m = m # If we didn't find anything, raise an error if best_pat == '(error)' and best_match < 0: msg = 'Bad Token' if restrict: msg = 'Trying to find one of '+', '.join(restrict) raise SyntaxError(self.get_pos(), msg, context=context) ignore = best_pat in self.ignore value = self.input[self.pos:self.pos+best_match] if not ignore: tok=Token(type=best_pat, value=value, pos=self.get_pos()) self.pos += best_match npos = value.rfind("\n") if npos > -1: self.col = best_match-npos self.line += value.count("\n") else: self.col += best_match # If we found something that isn't to be ignored, return it if not ignore: if len(self.tokens) >= 10: del self.tokens[0] self.tokens.append(tok) self.last_read_token = tok # print repr(tok) return tok else: ignore = self.ignore[best_pat] if ignore: ignore(self, best_m) def peek(self, *types, **kw): """Returns the token type for lookahead; if there are any args then the list of args is the set of token types to allow""" context = kw.get("context",None) if self.last_token is None: self.last_types = types self.last_token = self.token(types,context) elif self.last_types: for t in types: if t not in self.last_types: raise NotImplementedError("Unimplemented: restriction set changed") return self.last_token.type def scan(self, type, **kw): """Returns the matched text, and moves to the next token""" context = kw.get("context",None) if self.last_token is None: tok = self.token([type],context) else: if self.last_types and type not in self.last_types: raise NotImplementedError("Unimplemented: restriction set changed") tok = self.last_token self.last_token = None if tok.type != type: if not self.last_types: self.last_types=[] raise SyntaxError(tok.pos, 'Trying to find '+type+': '+ ', '.join(self.last_types)+", got "+tok.type, context=context) return tok.value class Parser(object): """Base class for Yapps-generated parsers. """ def __init__(self, scanner): self._scanner = scanner def _stack(self, input="",file=None,filename=None): """Temporarily read from someplace else""" self._scanner.stack_input(input,file,filename) self._tok = None def _peek(self, *types, **kw): """Returns the token type for lookahead; if there are any args then the list of args is the set of token types to allow""" return self._scanner.peek(*types, **kw) def _scan(self, type, **kw): """Returns the matched text, and moves to the next token""" return self._scanner.scan(type, **kw) class Context(object): """Class to represent the parser's call stack. Every rule creates a Context that links to its parent rule. The contexts can be used for debugging. """ def __init__(self, parent, scanner, rule, args=()): """Create a new context. Args: parent: Context object or None scanner: Scanner object rule: string (name of the rule) args: tuple listing parameters to the rule """ self.parent = parent self.scanner = scanner self.rule = rule self.args = args while scanner.stack: scanner = scanner.stack self.token = scanner.last_read_token def __str__(self): output = '' if self.parent: output = str(self.parent) + ' > ' output += self.rule return output def print_error(err, scanner, max_ctx=None): """Print error messages, the parser stack, and the input text -- for human-readable error messages.""" # NOTE: this function assumes 80 columns :-( # Figure out the line number pos = err.pos if not pos: pos = scanner.get_pos() file_name, line_number, column_number = pos print >>sys.stderr, '%s:%d:%d: %s' % (file_name, line_number, column_number, err.msg) scanner.print_line_with_pointer(pos) context = err.context token = None while context: print >>sys.stderr, 'while parsing %s%s:' % (context.rule, tuple(context.args)) if context.token: token = context.token if token: scanner.print_line_with_pointer(token.pos, length=len(token.value)) context = context.parent if max_ctx: max_ctx = max_ctx-1 if not max_ctx: break def wrap_error_reporter(parser, rule, *args,**kw): try: return getattr(parser, rule)(*args,**kw) except SyntaxError, e: print_error(e, parser._scanner) except NoMoreTokens: print >>sys.stderr, 'Could not complete parsing; stopped around here:' print >>sys.stderr, parser._scanner tarantool_1.9.1.26.g63eb81e3c/test-run/lib/yapps/__init__.py0000644000000000000000000000000013306562360021772 0ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/box_connection.py0000644000000000000000000000641013306562360022121 0ustar rootroot__author__ = "Konstantin Osipov " # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import os import sys import errno import ctypes import socket import struct import warnings from tarantool_connection import TarantoolConnection #monkey patch tarantool and msgpack from lib.utils import check_libs check_libs() from tarantool import Connection as tnt_connection from tarantool import Schema SEPARATOR = '\n' class BoxConnection(TarantoolConnection): def __init__(self, host, port): super(BoxConnection, self).__init__(host, port) self.py_con = tnt_connection(host, port, connect_now=False, socket_timeout=100) self.py_con.error = False self.sort = False def connect(self): self.py_con.connect() def authenticate(self, user, password): self.py_con.authenticate(user, password) def disconnect(self): self.py_con.close() def reconnect(self): if self.py_con.connected: self.disconnect() self.connect() def set_schema(self, schemadict): self.py_con.schema = Schema(schemadict) def check_connection(self): rc = self.py_con._sys_recv( self.py_con._socket.fileno(), ' ', 1, socket.MSG_DONTWAIT | socket.MSG_PEEK ) if ctypes.get_errno() == errno.EAGAIN: ctypes.set_errno(0) return True return False def execute_no_reconnect(self, command, silent=True): if not command: return if not silent: print command cmd = command.replace(SEPARATOR, ' ') + SEPARATOR response = self.py_con.call(cmd) result = str(response) if not silent: print response return response def execute(self, command, silent=True): return self.execute_no_reconnect(command, silent) def call(self, command, *args): if not command: return print 'call ', command, args response = self.py_con.call(command, *args) result = str(response) print result return result tarantool_1.9.1.26.g63eb81e3c/test-run/lib/preprocessor.py0000644000000000000000000003443613306562360021651 0ustar rootrootimport os import shlex import sys from ast import literal_eval from collections import deque import yaml from gevent import socket from lib.admin_connection import AdminAsyncConnection from lib.colorer import color_stdout, color_log class Namespace(object): pass class LuaPreprocessorException(Exception): def __init__(self, val): super(LuaPreprocessorException, self).__init__() self.value = val def __str__(self): return "lua preprocessor error: " + repr(self.value) class TestState(object): def __init__(self, suite_ini, default_server, create_server, params = {}, **kwargs): self.delimiter = '' self.suite_ini = suite_ini self.environ = Namespace() self.operation = False self.create_server = create_server self.servers = { 'default': default_server } self.connections = {} self.run_params = params if default_server is not None: self.connections = { 'default': default_server.admin } # curcon is an array since we may have many connections self.curcon = [self.connections['default']] nmsp = Namespace() setattr(nmsp, 'admin', default_server.admin.uri) setattr(nmsp, 'listen', default_server.iproto.uri) setattr(self.environ, 'default', nmsp) # for propagating 'current_test' to non-default servers self.default_server_no_connect = kwargs.get('default_server_no_connect') def parse_preprocessor(self, string): token_store = deque() lexer = shlex.shlex(string) lexer.commenters = [] token = lexer.get_token() if not token: return if token == 'setopt': option = lexer.get_token() if not option: raise LuaPreprocessorException("Wrong token for setopt: expected option name") value = lexer.get_token() if not value: raise LuaPreprocessorException("Wrong token for setopt: expected option value") return self.options(option, value) elif token == 'eval': name = lexer.get_token() expr = lexer.get_token() # token format: eval "" return self.lua_eval(name, expr[1:-1]) elif token == 'switch': server = lexer.get_token() return self.switch(server) elif token == 'config': var_name = lexer.get_token() return self.run_params token_store.append(token) token = lexer.get_token() if token == 'server': stype = token_store.popleft() sname = lexer.get_token() if not sname: raise LuaPreprocessorException("Wrong token for server: expected name") options = {} temp = lexer.get_token() if not temp: pass elif temp == 'with': while True: k = lexer.get_token() if not k: break v = lexer.get_token() if v == '=': v = lexer.get_token() options[k] = v lexer.get_token() else: raise LuaPreprocessorException("Wrong token for server: expected 'with', got " + repr(temp)) return self.server(stype, sname, options) elif token == 'connection': ctype = token_store.popleft() cname = [lexer.get_token()] if not cname[0]: raise LuaPreprocessorException("Wrong token for connection: expected name") cargs = None temp = lexer.get_token() if temp == 'to': cargs = lexer.get_token() elif temp == ',': while True: a = lexer.get_token() if not a: break if a == ',': continue cname.append(a) elif temp: raise LuaPreprocessorException("Wrong token for server: expected 'to' or ',', got " + repr(temp)) return self.connection(ctype, cname, cargs) elif token == 'filter': ftype = token_store.popleft() ref = None ret = None temp = lexer.get_token() if temp: ref = temp if not temp: raise LuaPreprocessorException("Wrong token for filter: expected filter1") if lexer.get_token() != 'to': raise LuaPreprocessorException("Wrong token for filter: expected 'to', got {0}".format(repr(temp))) temp = lexer.get_token() if not temp: raise LuaPreprocessorException("Wrong token for filter: expected filter2") ret = temp return self.filter(ftype, ref, ret) elif token == 'variable': ftype = token_store.popleft() ref = lexer.get_token() temp = lexer.get_token() if temp != 'to': raise LuaPreprocessorException("Wrong token for filter: exptected 'to', got {0}".format(repr(temp))) ret = lexer.get_token() return self.variable(ftype, ref, ret) else: raise LuaPreprocessorException("Wrong command: "+repr(lexer.instream.getvalue())) def options(self, key, value): if key == 'delimiter': self.delimiter = value[1:-1] else: raise LuaPreprocessorException("Wrong option: "+repr(key)) def server_start(self, ctype, sname, opts): color_log('\nDEBUG: TestState[%s].server_start(%s, %s, %s)\n' % ( hex(id(self)), str(ctype), str(sname), str(opts)), schema='test_var') if sname not in self.servers: raise LuaPreprocessorException('Can\'t start nonexistent server '+repr(sname)) wait = True if 'wait' in opts and opts['wait'] == 'False': wait = False wait_load = True if 'wait_load' in opts and opts['wait_load'] == 'False': wait_load = False args = [] if 'args' in opts: args = opts['args'][1:-1].split(' ') self.servers[sname].start(silent=True, rais=True, wait=wait, wait_load=wait_load, args=args) self.connections[sname] = self.servers[sname].admin try: self.connections[sname]('return true', silent=True) except socket.error as e: LuaPreprocessorException('Can\'t start server '+repr(sname)) def server_stop(self, ctype, sname, opts): color_log('\nDEBUG: TestState[%s].server_stop(%s, %s, %s)\n' % ( hex(id(self)), str(ctype), str(sname), str(opts)), schema='test_var') if sname not in self.servers: raise LuaPreprocessorException('Can\'t stop nonexistent server '+repr(sname)) self.connections[sname].disconnect() self.connections.pop(sname) self.servers[sname].stop() def server_create(self, ctype, sname, opts): color_log('\nDEBUG: TestState[%s].server_create(%s, %s, %s)\n' % ( hex(id(self)), str(ctype), str(sname), str(opts)), schema='test_var') if sname in self.servers: raise LuaPreprocessorException('Server {0} already exists'.format(repr(sname))) temp = self.create_server() temp.name = sname if 'need_init' in opts: temp.need_init = True if opts['need_init'] == 'True' else False if 'script' in opts: temp.script = opts['script'][1:-1] if 'lua_libs' in opts: temp.lua_libs = opts['lua_libs'][1:-1].split(' ') temp.rpl_master = None if 'rpl_master' in opts: temp.rpl_master = self.servers[opts['rpl_master']] temp.vardir = self.suite_ini['vardir'] temp.inspector_port = int(self.suite_ini.get( 'inspector_port', temp.DEFAULT_INSPECTOR )) if self.default_server_no_connect: temp.current_test = self.default_server_no_connect.current_test elif self.servers['default']: temp.current_test = self.servers['default'].current_test temp.install(silent=True) self.servers[sname] = temp if 'workdir' in opts: copy_from = opts['workdir'] copy_to = self.servers[sname].name os.system('rm -rf %s/%s' % ( self.servers[sname].vardir, copy_to )) os.system('cp -r %s %s/%s' % ( copy_from, self.servers[sname].vardir, copy_to )) nmsp = Namespace() setattr(nmsp, 'admin', temp.admin.port) setattr(nmsp, 'listen', temp.iproto.port) if temp.rpl_master: setattr(nmsp, 'master', temp.rpl_master.iproto.port) setattr(self.environ, sname, nmsp) if 'return_listen_uri' in opts and opts['return_listen_uri'] == 'True': return self.servers[sname].iproto.uri def server_deploy(self, ctype, sname, opts): self.servers[sname].install() def server_cleanup(self, ctype, sname, opts): if sname not in self.servers: raise LuaPreprocessorException('Can\'t cleanup nonexistent server '+repr(sname)) self.servers[sname].cleanup() if sname != 'default': if hasattr(self.environ, sname): delattr(self.environ, sname) else: self.servers[sname].install(silent=True) def server_delete(self, ctype, sname, opts): if sname not in self.servers: raise LuaPreprocessorException('Can\'t cleanup nonexistent server '+repr(sname)) self.servers[sname].cleanup() if sname != 'default': if hasattr(self.environ, sname): delattr(self.environ, sname) del self.servers[sname] def switch(self, server): self.lua_eval(server, "env=require('test_run')", silent=True) self.lua_eval( server, "test_run=env.new()", silent=True ) return self.connection('set', [server, ], None) def server_restart(self, ctype, sname, opts): # self restart from lua with proxy if 'proxy' not in self.servers: self.server_create( 'create', 'proxy', {'script': '"box/proxy.lua"'} ) self.server_start('start', 'proxy', {}) self.switch('proxy') # restart real server and switch back self.server_stop(ctype, sname, opts) if 'cleanup' in opts: self.server_cleanup(ctype, sname, opts) self.server_deploy(ctype, sname, opts) self.server_start(ctype, sname, opts) self.switch(sname) # remove proxy self.server_stop('stop', 'proxy', {}) def server(self, ctype, sname, opts): attr = 'server_%s' % ctype if hasattr(self, attr): return getattr(self, attr)(ctype, sname, opts) else: raise LuaPreprocessorException( 'Unknown command for server: %s' % ctype ) def connection(self, ctype, cnames, sname): # we always get a list of connections as input here cname = cnames[0] if ctype == 'create': if sname not in self.servers: raise LuaPreprocessorException('Can\'t create connection to nonexistent server '+repr(sname)) if cname in self.connections: raise LuaPreprocessorException('Connection {0} already exists'.format(repr(cname))) self.connections[cname] = AdminAsyncConnection('localhost', self.servers[sname].admin.port) self.connections[cname].connect() elif ctype == 'drop': if cname not in self.connections: raise LuaPreprocessorException('Can\'t drop nonexistent connection '+repr(cname)) self.connections[cname].disconnect() self.connections.pop(cname) elif ctype == 'set': for i in cnames: if not i in self.connections: raise LuaPreprocessorException('Can\'t set nonexistent connection '+repr(cname)) self.curcon = [self.connections[i] for i in cnames] else: raise LuaPreprocessorException('Unknown command for connection: '+repr(ctype)) def filter(self, ctype, ref, ret): if ctype == 'push': sys.stdout.push_filter(ref[1:-1], ret[1:-1]) elif ctype == 'pop': sys.stdout.pop_filter() elif ctype == 'clear': sys.stdout.clear_all_filters() else: raise LuaPreprocessorException("Wrong command for filters: " + repr(ctype)) def lua_eval(self, name, expr, silent=True): self.servers[name].admin.reconnect() result = self.servers[name].admin( '%s%s' % (expr, self.delimiter), silent=silent ) result = yaml.load(result) if not result: result = [] return result def variable(self, ctype, ref, ret): if ctype == 'set': self.curcon[0].reconnect() self.curcon[0](ref+'='+str(eval(ret[1:-1], {}, self.environ.__dict__)), silent=True) else: raise LuaPreprocessorException("Wrong command for variables: " + repr(ctype)) def __call__(self, string): string = string[3:].strip() self.parse_preprocessor(string) def stop_nondefault(self): color_log('\nDEBUG: TestState[%s].stop_nondefault()\n' % hex(id(self)), schema='test_var') if sys.stdout.__class__.__name__ == 'FilteredStream': sys.stdout.clear_all_filters() for k, v in self.servers.iteritems(): # don't stop the default server if k == 'default': continue v.stop(silent=True) if k in self.connections: self.connections[k].disconnect() self.connections.pop(k) def cleanup_nondefault(self): color_log('\nDEBUG: TestState[%s].cleanup()\n' % hex(id(self)), schema='test_var') for k, v in self.servers.iteritems(): # don't cleanup the default server if k == 'default': continue v.cleanup() def kill_current_test(self): self.servers['default'].kill_current_test() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/connpool.py0000644000000000000000000001121513306562360020740 0ustar rootrootimport logging import gevent try: from gevent.lock import BoundedSemaphore except: from gevent.coros import BoundedSemaphore # before gevent-1.0 from gevent import socket from collections import deque from contextlib import contextmanager from functools import wraps from test import TestRunGreenlet __all__ = ["ConnectionPool", "retry"] DEFAULT_EXC_CLASSES = (socket.error,) class ConnectionPool(object): """ Generic TCP connection pool, with the following features: * Configurable pool size * Auto-reconnection when a broken socket is detected * Optional periodic keepalive """ # Frequency at which the pool is populated at startup SPAWN_FREQUENCY = 0.1 def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None): self.size = size self.conn = deque() self.lock = BoundedSemaphore(size) self.keepalive = keepalive # Exceptions list must be in tuple form to be caught properly self.exc_classes = tuple(exc_classes) for i in xrange(size): self.lock.acquire() for i in xrange(size): greenlet = TestRunGreenlet(self._addOne) greenlet.start_later(self.SPAWN_FREQUENCY * i) if self.keepalive: greenlet = TestRunGreenlet(self._keepalive_periodic) greenlet.start_later() def _new_connection(self): """ Estabilish a new connection (to be implemented in subclasses). """ raise NotImplementedError def _keepalive(self, c): """ Implement actual application-level keepalive (to be reimplemented in subclasses). :raise: socket.error if the connection has been closed or is broken. """ raise NotImplementedError() def _keepalive_periodic(self): delay = float(self.keepalive) / self.size while 1: try: with self.get() as c: self._keepalive(c) except self.exc_classes: # Nothing to do, the pool will generate a new connection later pass gevent.sleep(delay) def _addOne(self): stime = 0.1 while 1: c = self._new_connection() if c: break gevent.sleep(stime) if stime < 400: stime *= 2 self.conn.append(c) self.lock.release() @contextmanager def get(self): """ Get a connection from the pool, to make and receive traffic. If the connection fails for any reason (socket.error), it is dropped and a new one is scheduled. Please use @retry as a way to automatically retry whatever operation you were performing. """ self.lock.acquire() try: c = self.conn.popleft() yield c except self.exc_classes: # The current connection has failed, drop it and create a new one greenlet = TestRunGreenlet(self._addOne) greenlet.start_later(1) raise except: self.conn.append(c) self.lock.release() raise else: # NOTE: cannot use finally because MUST NOT reuse the connection # if it failed (socket.error) self.conn.append(c) self.lock.release() def retry(f, exc_classes=DEFAULT_EXC_CLASSES, logger=None, retry_log_level=logging.INFO, retry_log_message="Connection broken in '{f}' (error: '{e}'); " "retrying with new connection.", max_failures=None, interval=0, max_failure_log_level=logging.ERROR, max_failure_log_message="Max retries reached for '{f}'. Aborting."): """ Decorator to automatically reexecute a function if the connection is broken for any reason. """ exc_classes = tuple(exc_classes) @wraps(f) def deco(*args, **kwargs): failures = 0 while True: try: return f(*args, **kwargs) except exc_classes as e: if logger is not None: logger.log(retry_log_level, retry_log_message.format(f=f.func_name, e=e)) gevent.sleep(interval) failures += 1 if max_failures is not None \ and failures > max_failures: if logger is not None: logger.log(max_failure_log_level, max_failure_log_message.format( f=f.func_name, e=e)) raise return deco tarantool_1.9.1.26.g63eb81e3c/test-run/lib/utils.py0000644000000000000000000001301413306562360020250 0ustar rootrootimport os import sys import collections import signal import random from gevent import socket from lib.colorer import color_stdout UNIX_SOCKET_LEN_LIMIT = 107 def check_libs(): deps = [ ('msgpack', 'msgpack-python'), ('tarantool', 'tarantool-python') ] base_path = os.path.dirname(os.path.abspath(__file__)) for (mod_name, mod_dir) in deps: mod_path = os.path.join(base_path, mod_dir) if mod_path not in sys.path: sys.path = [mod_path] + sys.path for (mod_name, _mod_dir) in deps: try: __import__(mod_name) except ImportError as e: color_stdout("\n\nNo %s library found\n" % mod_name, schema='error') print(e) sys.exit(1) def non_empty_valgrind_logs(paths_to_log): """ Check that there were no warnings in the log.""" non_empty_logs = [] for path_to_log in paths_to_log: if os.path.exists(path_to_log) and os.path.getsize(path_to_log) != 0: non_empty_logs.append(path_to_log) return non_empty_logs def print_tail_n(filename, num_lines): """Print N last lines of a file.""" with open(filename, "r+") as logfile: tail_n = collections.deque(logfile, num_lines) for line in tail_n: color_stdout(line, schema='tail') def check_port(port, rais=True, ipv4=True, ipv6=True): """ True -- it's possible to listen on this port for TCP/IPv4 or TCP/IPv6 connections (UNIX Sockets in case of file path). False -- otherwise. """ try: if isinstance(port, (int, long)): if ipv4: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('127.0.0.1', port)) sock.listen(5) sock.close() if ipv6: sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) sock.bind(('::1', port)) sock.listen(5) sock.close() else: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(port) except socket.error as e: if rais: raise RuntimeError( "The server is already running on port {0}".format(port)) return False return True # A list of ports used so far. Avoid reusing ports # to reduce race conditions between starting and stopping servers. # We're using tarantoolctl for instance control, and it reports # a successful stop of the server before it really closes its # network sockets ports = {} is_ipv6_supported = check_port(port=0, rais=False, ipv4=False, ipv6=True) def find_port(): global ports start_port = int(os.environ.get('TEST_RUN_TCP_PORT_START', '3000')) end_port = int(os.environ.get('TEST_RUN_TCP_PORT_END', '65535')) port = random.randrange(start_port, end_port + 1) while port <= end_port: is_free = check_port(port, False, ipv4=True, ipv6=is_ipv6_supported) if port not in ports and is_free: ports[port] = True return port port += 1 # We've made a full circle, clear the list of used ports and start # from scratch ports = {} return find_port() def find_in_path(name): path = os.curdir + os.pathsep + os.environ["PATH"] for _dir in path.split(os.pathsep): exe = os.path.join(_dir, name) if os.access(exe, os.X_OK): return exe return '' # http://stackoverflow.com/a/2549950 SIGNAMES = dict((v, k) for k, v in reversed(sorted(signal.__dict__.items())) if k.startswith('SIG') and not k.startswith('SIG_')) def signame(signum): return SIGNAMES[signum] def warn_unix_sockets_at_start(vardir): max_unix_socket_rel = '???_replication/autobootstrap_guest3.control' real_vardir = os.path.realpath(vardir) max_unix_socket_abs = os.path.join(real_vardir, max_unix_socket_rel) max_unix_socket_real = os.path.realpath(max_unix_socket_abs) if len(max_unix_socket_real) > UNIX_SOCKET_LEN_LIMIT: color_stdout( 'WARGING: unix sockets can become longer than %d symbols:\n' % UNIX_SOCKET_LEN_LIMIT, schema='error') color_stdout('WARNING: for example: "%s" has length %d\n' % (max_unix_socket_real, len(max_unix_socket_real)), schema='error') def warn_unix_socket(path): real_path = os.path.realpath(path) if len(real_path) <= UNIX_SOCKET_LEN_LIMIT or \ real_path in warn_unix_socket.warned: return color_stdout( '\nWARGING: unix socket\'s "%s" path has length %d symbols that is ' 'longer than %d. That likely will cause failing of tests.\n' % (real_path, len(real_path), UNIX_SOCKET_LEN_LIMIT), schema='error') warn_unix_socket.warned.add(real_path) warn_unix_socket.warned = set() def safe_makedirs(directory): if os.path.isdir(directory): return # try-except to prevent races btw processes try: os.makedirs(directory) except OSError: pass def format_process(pid): cmdline = 'unknown' try: with open('/proc/%d/cmdline' % pid, 'r') as f: cmdline = ' '.join(f.read().split('\0')).strip() or cmdline except (OSError, IOError): pass status = 'unknown' try: with open('/proc/%d/status' % pid, 'r') as f: for line in f: key, value = line.split(':', 1) if key == 'State': status = value.strip() except (OSError, IOError): pass return 'process %d [%s; %s]' % (pid, status, cmdline) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/Makefile0000644000000000000000000000003313306562360020173 0ustar rootrootsql.py: sql.g yapps sql.g tarantool_1.9.1.26.g63eb81e3c/test-run/lib/colorer.py0000644000000000000000000001642213306562360020563 0ustar rootrootimport os import sys from lib.singleton import Singleton # Use it to print messages on the screen and to the worker's log. color_stdout = None # = Colorer(); below the class definition def color_log(*args, **kwargs): """ Print the message only to log file, not on the screen. The intention is use this function only for regular, non-error output that appears every run and mostly not needed for a user (but useful when investigating occured problem). Don't hide errors and backtraces (or any other details of an exceptional circumstances) from the screen, because such details especially useful with CI bots. """ kwargs['log_only'] = True color_stdout(*args, **kwargs) class CSchema(object): objects = {} def __init__(self): self.main_objects = { 'diff_mark': {}, 'diff_in': {}, 'diff_out': {}, 'test_pass': {}, 'test_fail': {}, 'test_new': {}, 'test_skip': {}, 'test_disa': {}, 'error': {}, 'lerror': {}, 'tail': {}, 'ts_text': {}, 'path': {}, 'info': {}, 'separator': {}, 't_name': {}, 'serv_text': {}, 'version': {}, 'tr_text': {}, 'log': {}, } self.main_objects.update(self.objects) class SchemaAscetic(CSchema): objects = { 'diff_mark': {'fgcolor': 'magenta'}, 'diff_in': {'fgcolor': 'green'}, 'diff_out': {'fgcolor': 'red'}, 'test_pass': {'fgcolor': 'green'}, 'test_fail': {'fgcolor': 'red'}, 'test_new': {'fgcolor': 'lblue'}, 'test_skip': {'fgcolor': 'grey'}, 'test_disa': {'fgcolor': 'grey'}, 'error': {'fgcolor': 'red'}, 'test_var': {'fgcolor': 'yellow'}, } class SchemaPretty(CSchema): objects = { 'diff_mark': {'fgcolor': 'magenta'}, 'diff_in': {'fgcolor': 'blue'}, 'diff_out': {'fgcolor': 'red'}, 'test_pass': {'fgcolor': 'green'}, 'test_fail': {'fgcolor': 'red'}, 'test_new': {'fgcolor': 'lblue'}, 'test_skip': {'fgcolor': 'grey'}, 'test_disa': {'fgcolor': 'grey'}, 'error': {'fgcolor': 'red'}, 'lerror': {'fgcolor': 'lred'}, 'tail': {'fgcolor': 'lblue'}, 'ts_text': {'fgcolor': 'lmagenta'}, 'path': {'fgcolor': 'green', 'bold':True}, 'info': {'fgcolor': 'yellow', 'bold':True}, 'separator': {'fgcolor': 'blue'}, 't_name': {'fgcolor': 'lblue'}, 'serv_text': {'fgcolor': 'lmagenta'}, 'version': {'fgcolor': 'yellow', 'bold':True}, 'tr_text': {'fgcolor': 'green'}, 'log': {'fgcolor': 'grey'}, 'test_var': {'fgcolor': 'yellow'}, } class Colorer(object): """ Colorer/Styler based on VT220+ specifications (Not full). Based on: 1. ftp://ftp.cs.utk.edu/pub/shuford/terminal/dec_vt220_codes.txt 2. http://invisible-island.net/xterm/ctlseqs/ctlseqs.html """ __metaclass__ = Singleton fgcolor = { "black" : '0;30', "red" : '0;31', "green" : '0;32', "brown" : '0;33', "blue" : '0;34', "magenta" : '0;35', "cyan" : '0;36', "grey" : '0;37', "lgrey" : '1;30', "lred" : '1;31', "lgreen" : '1;32', "yellow" : '1;33', "lblue" : '1;34', "lmagenta" : '1;35', "lcyan" : '1;36', "white" : '1;37', } bgcolor = { "black" : '0;40', "red" : '0;41', "green" : '0;42', "brown" : '0;43', "blue" : '0;44', "magenta" : '0;45', "cyan" : '0;46', "grey" : '0;47', "lgrey" : '1;40', "lred" : '1;41', "lgreen" : '1;42', "yellow" : '1;43', "lblue" : '1;44', "lmagenta" : '1;45', "lcyan" : '1;46', "white" : '1;47', } attributes = { "bold" : '1', "underline" : '4', "blinking" : '5', "negative" : '7', "invisible" : '8', } begin = "\033[" end = "m" disable = begin+'0'+end def __init__(self): # These two fields can be filled later. It's for passing output from # workers via result queue. When worker initializes, it set these # fields and just use Colorer as before having multiplexed output. self.queue_msg_wrapper = None self.queue = None self.stdout = sys.stdout self.is_term = self.stdout.isatty() self.colors = None if self.is_term: try: p = os.popen('tput colors 2>/dev/null') self.colors = int(p.read()) except: pass finally: p.close() schema = os.getenv('TT_SCHEMA', 'ascetic') if schema == 'ascetic': self.schema = SchemaAscetic() elif schema == 'pretty': self.schema = SchemaPretty() else: self.schema = CSchema() self.schema = self.schema.main_objects def set_stdout(self): sys.stdout = self def ret_stdout(self): sys.stdout = self.stdout def _write(self, obj, log_only): if self.queue: if self.queue_msg_wrapper: obj = self.queue_msg_wrapper(obj, log_only) self.queue.put(obj) elif not log_only: self.stdout.write(obj) def _flush(self): if not self.queue: self.stdout.flush() def write(self, *args, **kwargs): flags = [] if 'schema' in kwargs: kwargs.update(self.schema[kwargs['schema']]) for i in self.attributes: if i in kwargs and kwargs[i] == True: flags.append(self.attributes[i]) flags.append(self.fgcolor[kwargs['fgcolor']]) if 'fgcolor' in kwargs else None flags.append(self.bgcolor[kwargs['bgcolor']]) if 'bgcolor' in kwargs else None data = '' if self.is_term and flags: data += self.begin + (';'.join(flags)) + self.end for i in args: data += str(i) if self.is_term: # write 'color disable' before newline to better work with parallel # processes writing signle stdout/stderr if data.endswith('\n'): data = data[:-1] + self.disable + '\n' else: data += self.disable if data: self._write(data, kwargs.get('log_only', False)) self._flush() def __call__(self, *args, **kwargs): self.write(*args, **kwargs) def writeout_unidiff(self, diff): for i in diff: if i.startswith('+'): self.write(i, schema='diff_in') elif i.startswith('-'): self.write(i, schema='diff_out') elif i.startswith('@'): self.write(i, schema='diff_mark') else: self.write(i) def flush(self): return self.stdout.flush() def fileno(self): return self.stdout.fileno() def isatty(self): return self.is_term # Globals ######### color_stdout = Colorer() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/server.py0000644000000000000000000001051113306562360020415 0ustar rootrootimport glob import os import shutil from itertools import product from lib.server_mixins import ValgrindMixin from lib.server_mixins import GdbMixin from lib.server_mixins import GdbServerMixin from lib.server_mixins import LLdbMixin from lib.server_mixins import StraceMixin from lib.colorer import color_stdout class Server(object): """Server represents a single server instance. Normally, the program operates with only one server, but in future we may add replication slaves. The server is started once at the beginning of each suite, and stopped at the end.""" DEFAULT_INSPECTOR = 0 TEST_RUN_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) @property def vardir(self): if not hasattr(self, '_vardir'): raise ValueError("No vardir specified") return self._vardir @vardir.setter def vardir(self, path): if path == None: return self._vardir = os.path.abspath(path) @staticmethod def get_mixed_class(cls, ini): if ini is None: return cls conflict_options = ('valgrind', 'gdb', 'gdbserver', 'lldb', 'strace') for op1, op2 in product(conflict_options, repeat=2): if op1 != op2 and \ (op1 in ini and ini[op1]) and \ (op2 in ini and ini[op2]): format_str = 'Can\'t run under {} and {} simultaniously' raise OSError(format_str.format(op1, op2)) lname = cls.__name__.lower() if ini.get('valgrind') and not 'valgrind' in lname: cls = type('Valgrind' + cls.__name__, (ValgrindMixin, cls), {}) elif ini.get('gdbserver') and not 'gdbserver' in lname: cls = type('GdbServer' + cls.__name__, (GdbServerMixin, cls), {}) elif ini.get('gdb') and not 'gdb' in lname: cls = type('Gdb' + cls.__name__, (GdbMixin, cls), {}) elif ini.get('lldb') and not 'lldb' in lname: cls = type('LLdb' + cls.__name__, (LLdbMixin, cls), {}) elif 'strace' in ini and ini['strace']: cls = type('Strace' + cls.__name__, (StraceMixin, cls), {}) return cls def __new__(cls, ini=None, *args, **kwargs): if ini == None or 'core' not in ini or ini['core'] is None: return object.__new__(cls) core = ini['core'].lower().strip() cls.mdlname = "lib.{0}_server".format(core.replace(' ', '_')) cls.clsname = "{0}Server".format(core.title().replace(' ', '')) corecls = __import__(cls.mdlname, fromlist=cls.clsname).__dict__[cls.clsname] return corecls.__new__(corecls, ini, *args, **kwargs) def __init__(self, ini, test_suite=None): self.core = ini['core'] self.ini = ini self.re_vardir_cleanup = ['*.core.*', 'core'] self.vardir = ini['vardir'] self.inspector_port = int(ini.get( 'inspector_port', self.DEFAULT_INSPECTOR )) # filled in {Test,FuncTest,LuaTest,PythonTest}.execute() # or passed through execfile() for PythonTest (see # TarantoolServer.__init__). self.current_test = None # Used in valgrind_log property. 'test_suite' is not None only for # default servers running in TestSuite.run_all() self.test_suite = test_suite def prepare_args(self, args=[]): return args def cleanup(self, full=False): if full: shutil.rmtree(self.vardir) return for re in self.re_vardir_cleanup: for f in glob.glob(os.path.join(self.vardir, re)): if os.path.isdir(f): shutil.rmtree(f) else: os.remove(f) def install(self, binary=None, vardir=None, mem=None, silent=True): pass def init(self): pass def start(self, silent=True): pass def stop(self, silent=True): pass def restart(self): pass def print_log(self, lines): color_stdout('\nLast {0} lines of Tarantool Log file [Instance "{1}"][{2}]:\n'.format( lines, self.name, self.logfile or 'null'), schema='error') if os.path.exists(self.logfile): with open(self.logfile, 'r') as log: color_stdout(''.join(log.readlines()[-lines:])) else: color_stdout(" Can't find log:\n", schema='error') tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/0000755000000000000000000000000013306562377021513 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/tox.ini0000644000000000000000000000130213306562377023022 0ustar rootroot[tox] envlist = {py26,py27,py32,py33,py34}-{c,pure},{pypy,pypy3}-pure,py27-x86,py34-x86 [variants:pure] setenv= MSGPACK_PUREPYTHON=x [testenv] deps= pytest changedir=test commands= c,x86: python -c 'from msgpack import _packer, _unpacker' c,x86: py.test pure: py.test [testenv:py27-x86] basepython=python2.7-x86 deps= pytest changedir=test commands= python -c 'import sys; print(hex(sys.maxsize))' python -c 'from msgpack import _packer, _unpacker' py.test [testenv:py34-x86] basepython=python3.4-x86 deps= pytest changedir=test commands= python -c 'import sys; print(hex(sys.maxsize))' python -c 'from msgpack import _packer, _unpacker' py.test tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/0000755000000000000000000000000013306562377023140 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/pack_template.h0000644000000000000000000005034413306562377026130 0ustar rootroot/* * MessagePack packing routine template * * Copyright (C) 2008-2010 FURUHASHI Sadayuki * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if defined(__LITTLE_ENDIAN__) #define TAKE8_8(d) ((uint8_t*)&d)[0] #define TAKE8_16(d) ((uint8_t*)&d)[0] #define TAKE8_32(d) ((uint8_t*)&d)[0] #define TAKE8_64(d) ((uint8_t*)&d)[0] #elif defined(__BIG_ENDIAN__) #define TAKE8_8(d) ((uint8_t*)&d)[0] #define TAKE8_16(d) ((uint8_t*)&d)[1] #define TAKE8_32(d) ((uint8_t*)&d)[3] #define TAKE8_64(d) ((uint8_t*)&d)[7] #endif #ifndef msgpack_pack_append_buffer #error msgpack_pack_append_buffer callback is not defined #endif /* * Integer */ #define msgpack_pack_real_uint8(x, d) \ do { \ if(d < (1<<7)) { \ /* fixnum */ \ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \ } else { \ /* unsigned 8 */ \ unsigned char buf[2] = {0xcc, TAKE8_8(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } \ } while(0) #define msgpack_pack_real_uint16(x, d) \ do { \ if(d < (1<<7)) { \ /* fixnum */ \ msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \ } else if(d < (1<<8)) { \ /* unsigned 8 */ \ unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } else { \ /* unsigned 16 */ \ unsigned char buf[3]; \ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ msgpack_pack_append_buffer(x, buf, 3); \ } \ } while(0) #define msgpack_pack_real_uint32(x, d) \ do { \ if(d < (1<<8)) { \ if(d < (1<<7)) { \ /* fixnum */ \ msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \ } else { \ /* unsigned 8 */ \ unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } \ } else { \ if(d < (1<<16)) { \ /* unsigned 16 */ \ unsigned char buf[3]; \ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ msgpack_pack_append_buffer(x, buf, 3); \ } else { \ /* unsigned 32 */ \ unsigned char buf[5]; \ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ msgpack_pack_append_buffer(x, buf, 5); \ } \ } \ } while(0) #define msgpack_pack_real_uint64(x, d) \ do { \ if(d < (1ULL<<8)) { \ if(d < (1ULL<<7)) { \ /* fixnum */ \ msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \ } else { \ /* unsigned 8 */ \ unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } \ } else { \ if(d < (1ULL<<16)) { \ /* unsigned 16 */ \ unsigned char buf[3]; \ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ msgpack_pack_append_buffer(x, buf, 3); \ } else if(d < (1ULL<<32)) { \ /* unsigned 32 */ \ unsigned char buf[5]; \ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ msgpack_pack_append_buffer(x, buf, 5); \ } else { \ /* unsigned 64 */ \ unsigned char buf[9]; \ buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \ msgpack_pack_append_buffer(x, buf, 9); \ } \ } \ } while(0) #define msgpack_pack_real_int8(x, d) \ do { \ if(d < -(1<<5)) { \ /* signed 8 */ \ unsigned char buf[2] = {0xd0, TAKE8_8(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } else { \ /* fixnum */ \ msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \ } \ } while(0) #define msgpack_pack_real_int16(x, d) \ do { \ if(d < -(1<<5)) { \ if(d < -(1<<7)) { \ /* signed 16 */ \ unsigned char buf[3]; \ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \ msgpack_pack_append_buffer(x, buf, 3); \ } else { \ /* signed 8 */ \ unsigned char buf[2] = {0xd0, TAKE8_16(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } \ } else if(d < (1<<7)) { \ /* fixnum */ \ msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \ } else { \ if(d < (1<<8)) { \ /* unsigned 8 */ \ unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } else { \ /* unsigned 16 */ \ unsigned char buf[3]; \ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ msgpack_pack_append_buffer(x, buf, 3); \ } \ } \ } while(0) #define msgpack_pack_real_int32(x, d) \ do { \ if(d < -(1<<5)) { \ if(d < -(1<<15)) { \ /* signed 32 */ \ unsigned char buf[5]; \ buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \ msgpack_pack_append_buffer(x, buf, 5); \ } else if(d < -(1<<7)) { \ /* signed 16 */ \ unsigned char buf[3]; \ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \ msgpack_pack_append_buffer(x, buf, 3); \ } else { \ /* signed 8 */ \ unsigned char buf[2] = {0xd0, TAKE8_32(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } \ } else if(d < (1<<7)) { \ /* fixnum */ \ msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \ } else { \ if(d < (1<<8)) { \ /* unsigned 8 */ \ unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } else if(d < (1<<16)) { \ /* unsigned 16 */ \ unsigned char buf[3]; \ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ msgpack_pack_append_buffer(x, buf, 3); \ } else { \ /* unsigned 32 */ \ unsigned char buf[5]; \ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ msgpack_pack_append_buffer(x, buf, 5); \ } \ } \ } while(0) #define msgpack_pack_real_int64(x, d) \ do { \ if(d < -(1LL<<5)) { \ if(d < -(1LL<<15)) { \ if(d < -(1LL<<31)) { \ /* signed 64 */ \ unsigned char buf[9]; \ buf[0] = 0xd3; _msgpack_store64(&buf[1], d); \ msgpack_pack_append_buffer(x, buf, 9); \ } else { \ /* signed 32 */ \ unsigned char buf[5]; \ buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \ msgpack_pack_append_buffer(x, buf, 5); \ } \ } else { \ if(d < -(1<<7)) { \ /* signed 16 */ \ unsigned char buf[3]; \ buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \ msgpack_pack_append_buffer(x, buf, 3); \ } else { \ /* signed 8 */ \ unsigned char buf[2] = {0xd0, TAKE8_64(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } \ } \ } else if(d < (1<<7)) { \ /* fixnum */ \ msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \ } else { \ if(d < (1LL<<16)) { \ if(d < (1<<8)) { \ /* unsigned 8 */ \ unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \ msgpack_pack_append_buffer(x, buf, 2); \ } else { \ /* unsigned 16 */ \ unsigned char buf[3]; \ buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ msgpack_pack_append_buffer(x, buf, 3); \ } \ } else { \ if(d < (1LL<<32)) { \ /* unsigned 32 */ \ unsigned char buf[5]; \ buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ msgpack_pack_append_buffer(x, buf, 5); \ } else { \ /* unsigned 64 */ \ unsigned char buf[9]; \ buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \ msgpack_pack_append_buffer(x, buf, 9); \ } \ } \ } \ } while(0) static inline int msgpack_pack_uint8(msgpack_packer* x, uint8_t d) { msgpack_pack_real_uint8(x, d); } static inline int msgpack_pack_uint16(msgpack_packer* x, uint16_t d) { msgpack_pack_real_uint16(x, d); } static inline int msgpack_pack_uint32(msgpack_packer* x, uint32_t d) { msgpack_pack_real_uint32(x, d); } static inline int msgpack_pack_uint64(msgpack_packer* x, uint64_t d) { msgpack_pack_real_uint64(x, d); } static inline int msgpack_pack_int8(msgpack_packer* x, int8_t d) { msgpack_pack_real_int8(x, d); } static inline int msgpack_pack_int16(msgpack_packer* x, int16_t d) { msgpack_pack_real_int16(x, d); } static inline int msgpack_pack_int32(msgpack_packer* x, int32_t d) { msgpack_pack_real_int32(x, d); } static inline int msgpack_pack_int64(msgpack_packer* x, int64_t d) { msgpack_pack_real_int64(x, d); } //#ifdef msgpack_pack_inline_func_cint static inline int msgpack_pack_short(msgpack_packer* x, short d) { #if defined(SIZEOF_SHORT) #if SIZEOF_SHORT == 2 msgpack_pack_real_int16(x, d); #elif SIZEOF_SHORT == 4 msgpack_pack_real_int32(x, d); #else msgpack_pack_real_int64(x, d); #endif #elif defined(SHRT_MAX) #if SHRT_MAX == 0x7fff msgpack_pack_real_int16(x, d); #elif SHRT_MAX == 0x7fffffff msgpack_pack_real_int32(x, d); #else msgpack_pack_real_int64(x, d); #endif #else if(sizeof(short) == 2) { msgpack_pack_real_int16(x, d); } else if(sizeof(short) == 4) { msgpack_pack_real_int32(x, d); } else { msgpack_pack_real_int64(x, d); } #endif } static inline int msgpack_pack_int(msgpack_packer* x, int d) { #if defined(SIZEOF_INT) #if SIZEOF_INT == 2 msgpack_pack_real_int16(x, d); #elif SIZEOF_INT == 4 msgpack_pack_real_int32(x, d); #else msgpack_pack_real_int64(x, d); #endif #elif defined(INT_MAX) #if INT_MAX == 0x7fff msgpack_pack_real_int16(x, d); #elif INT_MAX == 0x7fffffff msgpack_pack_real_int32(x, d); #else msgpack_pack_real_int64(x, d); #endif #else if(sizeof(int) == 2) { msgpack_pack_real_int16(x, d); } else if(sizeof(int) == 4) { msgpack_pack_real_int32(x, d); } else { msgpack_pack_real_int64(x, d); } #endif } static inline int msgpack_pack_long(msgpack_packer* x, long d) { #if defined(SIZEOF_LONG) #if SIZEOF_LONG == 2 msgpack_pack_real_int16(x, d); #elif SIZEOF_LONG == 4 msgpack_pack_real_int32(x, d); #else msgpack_pack_real_int64(x, d); #endif #elif defined(LONG_MAX) #if LONG_MAX == 0x7fffL msgpack_pack_real_int16(x, d); #elif LONG_MAX == 0x7fffffffL msgpack_pack_real_int32(x, d); #else msgpack_pack_real_int64(x, d); #endif #else if(sizeof(long) == 2) { msgpack_pack_real_int16(x, d); } else if(sizeof(long) == 4) { msgpack_pack_real_int32(x, d); } else { msgpack_pack_real_int64(x, d); } #endif } static inline int msgpack_pack_long_long(msgpack_packer* x, long long d) { #if defined(SIZEOF_LONG_LONG) #if SIZEOF_LONG_LONG == 2 msgpack_pack_real_int16(x, d); #elif SIZEOF_LONG_LONG == 4 msgpack_pack_real_int32(x, d); #else msgpack_pack_real_int64(x, d); #endif #elif defined(LLONG_MAX) #if LLONG_MAX == 0x7fffL msgpack_pack_real_int16(x, d); #elif LLONG_MAX == 0x7fffffffL msgpack_pack_real_int32(x, d); #else msgpack_pack_real_int64(x, d); #endif #else if(sizeof(long long) == 2) { msgpack_pack_real_int16(x, d); } else if(sizeof(long long) == 4) { msgpack_pack_real_int32(x, d); } else { msgpack_pack_real_int64(x, d); } #endif } static inline int msgpack_pack_unsigned_short(msgpack_packer* x, unsigned short d) { #if defined(SIZEOF_SHORT) #if SIZEOF_SHORT == 2 msgpack_pack_real_uint16(x, d); #elif SIZEOF_SHORT == 4 msgpack_pack_real_uint32(x, d); #else msgpack_pack_real_uint64(x, d); #endif #elif defined(USHRT_MAX) #if USHRT_MAX == 0xffffU msgpack_pack_real_uint16(x, d); #elif USHRT_MAX == 0xffffffffU msgpack_pack_real_uint32(x, d); #else msgpack_pack_real_uint64(x, d); #endif #else if(sizeof(unsigned short) == 2) { msgpack_pack_real_uint16(x, d); } else if(sizeof(unsigned short) == 4) { msgpack_pack_real_uint32(x, d); } else { msgpack_pack_real_uint64(x, d); } #endif } static inline int msgpack_pack_unsigned_int(msgpack_packer* x, unsigned int d) { #if defined(SIZEOF_INT) #if SIZEOF_INT == 2 msgpack_pack_real_uint16(x, d); #elif SIZEOF_INT == 4 msgpack_pack_real_uint32(x, d); #else msgpack_pack_real_uint64(x, d); #endif #elif defined(UINT_MAX) #if UINT_MAX == 0xffffU msgpack_pack_real_uint16(x, d); #elif UINT_MAX == 0xffffffffU msgpack_pack_real_uint32(x, d); #else msgpack_pack_real_uint64(x, d); #endif #else if(sizeof(unsigned int) == 2) { msgpack_pack_real_uint16(x, d); } else if(sizeof(unsigned int) == 4) { msgpack_pack_real_uint32(x, d); } else { msgpack_pack_real_uint64(x, d); } #endif } static inline int msgpack_pack_unsigned_long(msgpack_packer* x, unsigned long d) { #if defined(SIZEOF_LONG) #if SIZEOF_LONG == 2 msgpack_pack_real_uint16(x, d); #elif SIZEOF_LONG == 4 msgpack_pack_real_uint32(x, d); #else msgpack_pack_real_uint64(x, d); #endif #elif defined(ULONG_MAX) #if ULONG_MAX == 0xffffUL msgpack_pack_real_uint16(x, d); #elif ULONG_MAX == 0xffffffffUL msgpack_pack_real_uint32(x, d); #else msgpack_pack_real_uint64(x, d); #endif #else if(sizeof(unsigned long) == 2) { msgpack_pack_real_uint16(x, d); } else if(sizeof(unsigned long) == 4) { msgpack_pack_real_uint32(x, d); } else { msgpack_pack_real_uint64(x, d); } #endif } static inline int msgpack_pack_unsigned_long_long(msgpack_packer* x, unsigned long long d) { #if defined(SIZEOF_LONG_LONG) #if SIZEOF_LONG_LONG == 2 msgpack_pack_real_uint16(x, d); #elif SIZEOF_LONG_LONG == 4 msgpack_pack_real_uint32(x, d); #else msgpack_pack_real_uint64(x, d); #endif #elif defined(ULLONG_MAX) #if ULLONG_MAX == 0xffffUL msgpack_pack_real_uint16(x, d); #elif ULLONG_MAX == 0xffffffffUL msgpack_pack_real_uint32(x, d); #else msgpack_pack_real_uint64(x, d); #endif #else if(sizeof(unsigned long long) == 2) { msgpack_pack_real_uint16(x, d); } else if(sizeof(unsigned long long) == 4) { msgpack_pack_real_uint32(x, d); } else { msgpack_pack_real_uint64(x, d); } #endif } //#undef msgpack_pack_inline_func_cint //#endif /* * Float */ static inline int msgpack_pack_float(msgpack_packer* x, float d) { union { float f; uint32_t i; } mem; mem.f = d; unsigned char buf[5]; buf[0] = 0xca; _msgpack_store32(&buf[1], mem.i); msgpack_pack_append_buffer(x, buf, 5); } static inline int msgpack_pack_double(msgpack_packer* x, double d) { union { double f; uint64_t i; } mem; mem.f = d; unsigned char buf[9]; buf[0] = 0xcb; #if defined(__arm__) && !(__ARM_EABI__) // arm-oabi // https://github.com/msgpack/msgpack-perl/pull/1 mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL); #endif _msgpack_store64(&buf[1], mem.i); msgpack_pack_append_buffer(x, buf, 9); } /* * Nil */ static inline int msgpack_pack_nil(msgpack_packer* x) { static const unsigned char d = 0xc0; msgpack_pack_append_buffer(x, &d, 1); } /* * Boolean */ static inline int msgpack_pack_true(msgpack_packer* x) { static const unsigned char d = 0xc3; msgpack_pack_append_buffer(x, &d, 1); } static inline int msgpack_pack_false(msgpack_packer* x) { static const unsigned char d = 0xc2; msgpack_pack_append_buffer(x, &d, 1); } /* * Array */ static inline int msgpack_pack_array(msgpack_packer* x, unsigned int n) { if(n < 16) { unsigned char d = 0x90 | n; msgpack_pack_append_buffer(x, &d, 1); } else if(n < 65536) { unsigned char buf[3]; buf[0] = 0xdc; _msgpack_store16(&buf[1], (uint16_t)n); msgpack_pack_append_buffer(x, buf, 3); } else { unsigned char buf[5]; buf[0] = 0xdd; _msgpack_store32(&buf[1], (uint32_t)n); msgpack_pack_append_buffer(x, buf, 5); } } /* * Map */ static inline int msgpack_pack_map(msgpack_packer* x, unsigned int n) { if(n < 16) { unsigned char d = 0x80 | n; msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); } else if(n < 65536) { unsigned char buf[3]; buf[0] = 0xde; _msgpack_store16(&buf[1], (uint16_t)n); msgpack_pack_append_buffer(x, buf, 3); } else { unsigned char buf[5]; buf[0] = 0xdf; _msgpack_store32(&buf[1], (uint32_t)n); msgpack_pack_append_buffer(x, buf, 5); } } /* * Raw */ static inline int msgpack_pack_raw(msgpack_packer* x, size_t l) { if (l < 32) { unsigned char d = 0xa0 | (uint8_t)l; msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); } else if (x->use_bin_type && l < 256) { // str8 is new format introduced with bin. unsigned char buf[2] = {0xd9, (uint8_t)l}; msgpack_pack_append_buffer(x, buf, 2); } else if (l < 65536) { unsigned char buf[3]; buf[0] = 0xda; _msgpack_store16(&buf[1], (uint16_t)l); msgpack_pack_append_buffer(x, buf, 3); } else { unsigned char buf[5]; buf[0] = 0xdb; _msgpack_store32(&buf[1], (uint32_t)l); msgpack_pack_append_buffer(x, buf, 5); } } /* * bin */ static inline int msgpack_pack_bin(msgpack_packer *x, size_t l) { if (!x->use_bin_type) { return msgpack_pack_raw(x, l); } if (l < 256) { unsigned char buf[2] = {0xc4, (unsigned char)l}; msgpack_pack_append_buffer(x, buf, 2); } else if (l < 65536) { unsigned char buf[3] = {0xc5}; _msgpack_store16(&buf[1], (uint16_t)l); msgpack_pack_append_buffer(x, buf, 3); } else { unsigned char buf[5] = {0xc6}; _msgpack_store32(&buf[1], (uint32_t)l); msgpack_pack_append_buffer(x, buf, 5); } } static inline int msgpack_pack_raw_body(msgpack_packer* x, const void* b, size_t l) { if (l > 0) msgpack_pack_append_buffer(x, (const unsigned char*)b, l); return 0; } /* * Ext */ static inline int msgpack_pack_ext(msgpack_packer* x, char typecode, size_t l) { if (l == 1) { unsigned char buf[2]; buf[0] = 0xd4; buf[1] = (unsigned char)typecode; msgpack_pack_append_buffer(x, buf, 2); } else if(l == 2) { unsigned char buf[2]; buf[0] = 0xd5; buf[1] = (unsigned char)typecode; msgpack_pack_append_buffer(x, buf, 2); } else if(l == 4) { unsigned char buf[2]; buf[0] = 0xd6; buf[1] = (unsigned char)typecode; msgpack_pack_append_buffer(x, buf, 2); } else if(l == 8) { unsigned char buf[2]; buf[0] = 0xd7; buf[1] = (unsigned char)typecode; msgpack_pack_append_buffer(x, buf, 2); } else if(l == 16) { unsigned char buf[2]; buf[0] = 0xd8; buf[1] = (unsigned char)typecode; msgpack_pack_append_buffer(x, buf, 2); } else if(l < 256) { unsigned char buf[3]; buf[0] = 0xc7; buf[1] = l; buf[2] = (unsigned char)typecode; msgpack_pack_append_buffer(x, buf, 3); } else if(l < 65536) { unsigned char buf[4]; buf[0] = 0xc8; _msgpack_store16(&buf[1], (uint16_t)l); buf[3] = (unsigned char)typecode; msgpack_pack_append_buffer(x, buf, 4); } else { unsigned char buf[6]; buf[0] = 0xc9; _msgpack_store32(&buf[1], (uint32_t)l); buf[5] = (unsigned char)typecode; msgpack_pack_append_buffer(x, buf, 6); } } #undef msgpack_pack_append_buffer #undef TAKE8_8 #undef TAKE8_16 #undef TAKE8_32 #undef TAKE8_64 #undef msgpack_pack_real_uint8 #undef msgpack_pack_real_uint16 #undef msgpack_pack_real_uint32 #undef msgpack_pack_real_uint64 #undef msgpack_pack_real_int8 #undef msgpack_pack_real_int16 #undef msgpack_pack_real_int32 #undef msgpack_pack_real_int64 tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/_unpacker.pyx0000644000000000000000000004031113306562377025650 0ustar rootroot# coding: utf-8 #cython: embedsignature=True from cpython cimport * cdef extern from "Python.h": ctypedef struct PyObject cdef int PyObject_AsReadBuffer(object o, const void** buff, Py_ssize_t* buf_len) except -1 from libc.stdlib cimport * from libc.string cimport * from libc.limits cimport * from msgpack.exceptions import ( BufferFull, OutOfData, UnpackValueError, ExtraData, ) from msgpack import ExtType cdef extern from "unpack.h": ctypedef struct msgpack_user: bint use_list PyObject* object_hook bint has_pairs_hook # call object_hook with k-v pairs PyObject* list_hook PyObject* ext_hook char *encoding char *unicode_errors Py_ssize_t max_str_len Py_ssize_t max_bin_len Py_ssize_t max_array_len Py_ssize_t max_map_len Py_ssize_t max_ext_len ctypedef struct unpack_context: msgpack_user user PyObject* obj size_t count ctypedef int (*execute_fn)(unpack_context* ctx, const char* data, size_t len, size_t* off) except? -1 execute_fn unpack_construct execute_fn unpack_skip execute_fn read_array_header execute_fn read_map_header void unpack_init(unpack_context* ctx) object unpack_data(unpack_context* ctx) cdef inline init_ctx(unpack_context *ctx, object object_hook, object object_pairs_hook, object list_hook, object ext_hook, bint use_list, char* encoding, char* unicode_errors, Py_ssize_t max_str_len, Py_ssize_t max_bin_len, Py_ssize_t max_array_len, Py_ssize_t max_map_len, Py_ssize_t max_ext_len): unpack_init(ctx) ctx.user.use_list = use_list ctx.user.object_hook = ctx.user.list_hook = NULL ctx.user.max_str_len = max_str_len ctx.user.max_bin_len = max_bin_len ctx.user.max_array_len = max_array_len ctx.user.max_map_len = max_map_len ctx.user.max_ext_len = max_ext_len if object_hook is not None and object_pairs_hook is not None: raise TypeError("object_pairs_hook and object_hook are mutually exclusive.") if object_hook is not None: if not PyCallable_Check(object_hook): raise TypeError("object_hook must be a callable.") ctx.user.object_hook = object_hook if object_pairs_hook is None: ctx.user.has_pairs_hook = False else: if not PyCallable_Check(object_pairs_hook): raise TypeError("object_pairs_hook must be a callable.") ctx.user.object_hook = object_pairs_hook ctx.user.has_pairs_hook = True if list_hook is not None: if not PyCallable_Check(list_hook): raise TypeError("list_hook must be a callable.") ctx.user.list_hook = list_hook if ext_hook is not None: if not PyCallable_Check(ext_hook): raise TypeError("ext_hook must be a callable.") ctx.user.ext_hook = ext_hook ctx.user.encoding = encoding ctx.user.unicode_errors = unicode_errors def default_read_extended_type(typecode, data): raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) def unpackb(object packed, object object_hook=None, object list_hook=None, bint use_list=1, encoding=None, unicode_errors="strict", object_pairs_hook=None, ext_hook=ExtType, Py_ssize_t max_str_len=2147483647, # 2**32-1 Py_ssize_t max_bin_len=2147483647, Py_ssize_t max_array_len=2147483647, Py_ssize_t max_map_len=2147483647, Py_ssize_t max_ext_len=2147483647): """ Unpack packed_bytes to object. Returns an unpacked object. Raises `ValueError` when `packed` contains extra bytes. See :class:`Unpacker` for options. """ cdef unpack_context ctx cdef size_t off = 0 cdef int ret cdef char* buf cdef Py_ssize_t buf_len cdef char* cenc = NULL cdef char* cerr = NULL PyObject_AsReadBuffer(packed, &buf, &buf_len) if encoding is not None: if isinstance(encoding, unicode): encoding = encoding.encode('ascii') cenc = PyBytes_AsString(encoding) if unicode_errors is not None: if isinstance(unicode_errors, unicode): unicode_errors = unicode_errors.encode('ascii') cerr = PyBytes_AsString(unicode_errors) init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook, use_list, cenc, cerr, max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len) ret = unpack_construct(&ctx, buf, buf_len, &off) if ret == 1: obj = unpack_data(&ctx) if off < buf_len: raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off)) return obj else: raise UnpackValueError("Unpack failed: error = %d" % (ret,)) def unpack(object stream, object object_hook=None, object list_hook=None, bint use_list=1, encoding=None, unicode_errors="strict", object_pairs_hook=None, ): """ Unpack an object from `stream`. Raises `ValueError` when `stream` has extra bytes. See :class:`Unpacker` for options. """ return unpackb(stream.read(), use_list=use_list, object_hook=object_hook, object_pairs_hook=object_pairs_hook, list_hook=list_hook, encoding=encoding, unicode_errors=unicode_errors, ) cdef class Unpacker(object): """Streaming unpacker. arguments: :param file_like: File-like object having `.read(n)` method. If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. :param int read_size: Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`) :param bool use_list: If true, unpack msgpack array to Python list. Otherwise, unpack to Python tuple. (default: True) :param callable object_hook: When specified, it should be callable. Unpacker calls it with a dict argument after unpacking msgpack map. (See also simplejson) :param callable object_pairs_hook: When specified, it should be callable. Unpacker calls it with a list of key-value pairs after unpacking msgpack map. (See also simplejson) :param str encoding: Encoding used for decoding msgpack raw. If it is None (default), msgpack raw is deserialized to Python bytes. :param str unicode_errors: Used for decoding msgpack raw with *encoding*. (default: `'strict'`) :param int max_buffer_size: Limits size of data waiting unpacked. 0 means system's INT_MAX (default). Raises `BufferFull` exception when it is insufficient. You shoud set this parameter when unpacking data from untrasted source. :param int max_str_len: Limits max length of str. (default: 2**31-1) :param int max_bin_len: Limits max length of bin. (default: 2**31-1) :param int max_array_len: Limits max length of array. (default: 2**31-1) :param int max_map_len: Limits max length of map. (default: 2**31-1) example of streaming deserialize from file-like object:: unpacker = Unpacker(file_like) for o in unpacker: process(o) example of streaming deserialize from socket:: unpacker = Unpacker() while True: buf = sock.recv(1024**2) if not buf: break unpacker.feed(buf) for o in unpacker: process(o) """ cdef unpack_context ctx cdef char* buf cdef size_t buf_size, buf_head, buf_tail cdef object file_like cdef object file_like_read cdef Py_ssize_t read_size # To maintain refcnt. cdef object object_hook, object_pairs_hook, list_hook, ext_hook cdef object encoding, unicode_errors cdef size_t max_buffer_size def __cinit__(self): self.buf = NULL def __dealloc__(self): free(self.buf) self.buf = NULL def __init__(self, file_like=None, Py_ssize_t read_size=0, bint use_list=1, object object_hook=None, object object_pairs_hook=None, object list_hook=None, encoding=None, unicode_errors='strict', int max_buffer_size=0, object ext_hook=ExtType, Py_ssize_t max_str_len=2147483647, # 2**32-1 Py_ssize_t max_bin_len=2147483647, Py_ssize_t max_array_len=2147483647, Py_ssize_t max_map_len=2147483647, Py_ssize_t max_ext_len=2147483647): cdef char *cenc=NULL, cdef char *cerr=NULL self.object_hook = object_hook self.object_pairs_hook = object_pairs_hook self.list_hook = list_hook self.ext_hook = ext_hook self.file_like = file_like if file_like: self.file_like_read = file_like.read if not PyCallable_Check(self.file_like_read): raise TypeError("`file_like.read` must be a callable.") if not max_buffer_size: max_buffer_size = INT_MAX if read_size > max_buffer_size: raise ValueError("read_size should be less or equal to max_buffer_size") if not read_size: read_size = min(max_buffer_size, 1024**2) self.max_buffer_size = max_buffer_size self.read_size = read_size self.buf = malloc(read_size) if self.buf == NULL: raise MemoryError("Unable to allocate internal buffer.") self.buf_size = read_size self.buf_head = 0 self.buf_tail = 0 if encoding is not None: if isinstance(encoding, unicode): self.encoding = encoding.encode('ascii') elif isinstance(encoding, bytes): self.encoding = encoding else: raise TypeError("encoding should be bytes or unicode") cenc = PyBytes_AsString(self.encoding) if unicode_errors is not None: if isinstance(unicode_errors, unicode): self.unicode_errors = unicode_errors.encode('ascii') elif isinstance(unicode_errors, bytes): self.unicode_errors = unicode_errors else: raise TypeError("unicode_errors should be bytes or unicode") cerr = PyBytes_AsString(self.unicode_errors) init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook, ext_hook, use_list, cenc, cerr, max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len) def feed(self, object next_bytes): """Append `next_bytes` to internal buffer.""" cdef Py_buffer pybuff if self.file_like is not None: raise AssertionError( "unpacker.feed() is not be able to use with `file_like`.") PyObject_GetBuffer(next_bytes, &pybuff, PyBUF_SIMPLE) try: self.append_buffer(pybuff.buf, pybuff.len) finally: PyBuffer_Release(&pybuff) cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len): cdef: char* buf = self.buf char* new_buf size_t head = self.buf_head size_t tail = self.buf_tail size_t buf_size = self.buf_size size_t new_size if tail + _buf_len > buf_size: if ((tail - head) + _buf_len) <= buf_size: # move to front. memmove(buf, buf + head, tail - head) tail -= head head = 0 else: # expand buffer. new_size = (tail-head) + _buf_len if new_size > self.max_buffer_size: raise BufferFull new_size = min(new_size*2, self.max_buffer_size) new_buf = malloc(new_size) if new_buf == NULL: # self.buf still holds old buffer and will be freed during # obj destruction raise MemoryError("Unable to enlarge internal buffer.") memcpy(new_buf, buf + head, tail - head) free(buf) buf = new_buf buf_size = new_size tail -= head head = 0 memcpy(buf + tail, (_buf), _buf_len) self.buf = buf self.buf_head = head self.buf_size = buf_size self.buf_tail = tail + _buf_len cdef read_from_file(self): next_bytes = self.file_like_read( min(self.read_size, self.max_buffer_size - (self.buf_tail - self.buf_head) )) if next_bytes: self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) else: self.file_like = None cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0): cdef int ret cdef object obj cdef size_t prev_head if self.buf_head >= self.buf_tail and self.file_like is not None: self.read_from_file() while 1: prev_head = self.buf_head if prev_head >= self.buf_tail: if iter: raise StopIteration("No more data to unpack.") else: raise OutOfData("No more data to unpack.") ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head) if write_bytes is not None: write_bytes(PyBytes_FromStringAndSize(self.buf + prev_head, self.buf_head - prev_head)) if ret == 1: obj = unpack_data(&self.ctx) unpack_init(&self.ctx) return obj elif ret == 0: if self.file_like is not None: self.read_from_file() continue if iter: raise StopIteration("No more data to unpack.") else: raise OutOfData("No more data to unpack.") else: raise ValueError("Unpack failed: error = %d" % (ret,)) def read_bytes(self, Py_ssize_t nbytes): """Read a specified number of raw bytes from the stream""" cdef size_t nread nread = min(self.buf_tail - self.buf_head, nbytes) ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) self.buf_head += nread if len(ret) < nbytes and self.file_like is not None: ret += self.file_like.read(nbytes - len(ret)) return ret def unpack(self, object write_bytes=None): """Unpack one object If write_bytes is not None, it will be called with parts of the raw message as it is unpacked. Raises `OutOfData` when there are no more bytes to unpack. """ return self._unpack(unpack_construct, write_bytes) def skip(self, object write_bytes=None): """Read and ignore one object, returning None If write_bytes is not None, it will be called with parts of the raw message as it is unpacked. Raises `OutOfData` when there are no more bytes to unpack. """ return self._unpack(unpack_skip, write_bytes) def read_array_header(self, object write_bytes=None): """assuming the next object is an array, return its size n, such that the next n unpack() calls will iterate over its contents. Raises `OutOfData` when there are no more bytes to unpack. """ return self._unpack(read_array_header, write_bytes) def read_map_header(self, object write_bytes=None): """assuming the next object is a map, return its size n, such that the next n * 2 unpack() calls will iterate over its key-value pairs. Raises `OutOfData` when there are no more bytes to unpack. """ return self._unpack(read_map_header, write_bytes) def __iter__(self): return self def __next__(self): return self._unpack(unpack_construct, None, 1) # for debug. #def _buf(self): # return PyString_FromStringAndSize(self.buf, self.buf_tail) #def _off(self): # return self.buf_head tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/_version.py0000644000000000000000000000002413306562377025332 0ustar rootrootversion = (0, 4, 6) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/unpack_define.h0000644000000000000000000000447613306562377026117 0ustar rootroot/* * MessagePack unpacking routine template * * Copyright (C) 2008-2010 FURUHASHI Sadayuki * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MSGPACK_UNPACK_DEFINE_H__ #define MSGPACK_UNPACK_DEFINE_H__ #include "msgpack/sysdep.h" #include #include #include #include #ifdef __cplusplus extern "C" { #endif #ifndef MSGPACK_EMBED_STACK_SIZE #define MSGPACK_EMBED_STACK_SIZE 32 #endif // CS is first byte & 0x1f typedef enum { CS_HEADER = 0x00, // nil //CS_ = 0x01, //CS_ = 0x02, // false //CS_ = 0x03, // true CS_BIN_8 = 0x04, CS_BIN_16 = 0x05, CS_BIN_32 = 0x06, CS_EXT_8 = 0x07, CS_EXT_16 = 0x08, CS_EXT_32 = 0x09, CS_FLOAT = 0x0a, CS_DOUBLE = 0x0b, CS_UINT_8 = 0x0c, CS_UINT_16 = 0x0d, CS_UINT_32 = 0x0e, CS_UINT_64 = 0x0f, CS_INT_8 = 0x10, CS_INT_16 = 0x11, CS_INT_32 = 0x12, CS_INT_64 = 0x13, //CS_FIXEXT1 = 0x14, //CS_FIXEXT2 = 0x15, //CS_FIXEXT4 = 0x16, //CS_FIXEXT8 = 0x17, //CS_FIXEXT16 = 0x18, CS_RAW_8 = 0x19, CS_RAW_16 = 0x1a, CS_RAW_32 = 0x1b, CS_ARRAY_16 = 0x1c, CS_ARRAY_32 = 0x1d, CS_MAP_16 = 0x1e, CS_MAP_32 = 0x1f, ACS_RAW_VALUE, ACS_BIN_VALUE, ACS_EXT_VALUE, } msgpack_unpack_state; typedef enum { CT_ARRAY_ITEM, CT_MAP_KEY, CT_MAP_VALUE, } msgpack_container_type; #ifdef __cplusplus } #endif #endif /* msgpack/unpack_define.h */ tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/fallback.py0000644000000000000000000007622413306562377025264 0ustar rootroot"""Fallback pure Python implementation of msgpack""" import sys import array import struct if sys.version_info[0] == 3: PY3 = True int_types = int Unicode = str xrange = range def dict_iteritems(d): return d.items() else: PY3 = False int_types = (int, long) Unicode = unicode def dict_iteritems(d): return d.iteritems() if hasattr(sys, 'pypy_version_info'): # cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own # StringBuilder is fastest. from __pypy__ import newlist_hint try: from __pypy__.builders import BytesBuilder as StringBuilder except ImportError: from __pypy__.builders import StringBuilder USING_STRINGBUILDER = True class StringIO(object): def __init__(self, s=b''): if s: self.builder = StringBuilder(len(s)) self.builder.append(s) else: self.builder = StringBuilder() def write(self, s): self.builder.append(s) def getvalue(self): return self.builder.build() else: USING_STRINGBUILDER = False from io import BytesIO as StringIO newlist_hint = lambda size: [] from msgpack.exceptions import ( BufferFull, OutOfData, UnpackValueError, PackValueError, ExtraData) from msgpack import ExtType EX_SKIP = 0 EX_CONSTRUCT = 1 EX_READ_ARRAY_HEADER = 2 EX_READ_MAP_HEADER = 3 TYPE_IMMEDIATE = 0 TYPE_ARRAY = 1 TYPE_MAP = 2 TYPE_RAW = 3 TYPE_BIN = 4 TYPE_EXT = 5 DEFAULT_RECURSE_LIMIT = 511 def unpack(stream, **kwargs): """ Unpack an object from `stream`. Raises `ExtraData` when `packed` contains extra bytes. See :class:`Unpacker` for options. """ unpacker = Unpacker(stream, **kwargs) ret = unpacker._fb_unpack() if unpacker._fb_got_extradata(): raise ExtraData(ret, unpacker._fb_get_extradata()) return ret def unpackb(packed, **kwargs): """ Unpack an object from `packed`. Raises `ExtraData` when `packed` contains extra bytes. See :class:`Unpacker` for options. """ unpacker = Unpacker(None, **kwargs) unpacker.feed(packed) try: ret = unpacker._fb_unpack() except OutOfData: raise UnpackValueError("Data is not enough.") if unpacker._fb_got_extradata(): raise ExtraData(ret, unpacker._fb_get_extradata()) return ret class Unpacker(object): """Streaming unpacker. arguments: :param file_like: File-like object having `.read(n)` method. If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. :param int read_size: Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`) :param bool use_list: If true, unpack msgpack array to Python list. Otherwise, unpack to Python tuple. (default: True) :param callable object_hook: When specified, it should be callable. Unpacker calls it with a dict argument after unpacking msgpack map. (See also simplejson) :param callable object_pairs_hook: When specified, it should be callable. Unpacker calls it with a list of key-value pairs after unpacking msgpack map. (See also simplejson) :param str encoding: Encoding used for decoding msgpack raw. If it is None (default), msgpack raw is deserialized to Python bytes. :param str unicode_errors: Used for decoding msgpack raw with *encoding*. (default: `'strict'`) :param int max_buffer_size: Limits size of data waiting unpacked. 0 means system's INT_MAX (default). Raises `BufferFull` exception when it is insufficient. You shoud set this parameter when unpacking data from untrasted source. :param int max_str_len: Limits max length of str. (default: 2**31-1) :param int max_bin_len: Limits max length of bin. (default: 2**31-1) :param int max_array_len: Limits max length of array. (default: 2**31-1) :param int max_map_len: Limits max length of map. (default: 2**31-1) example of streaming deserialize from file-like object:: unpacker = Unpacker(file_like) for o in unpacker: process(o) example of streaming deserialize from socket:: unpacker = Unpacker() while True: buf = sock.recv(1024**2) if not buf: break unpacker.feed(buf) for o in unpacker: process(o) """ def __init__(self, file_like=None, read_size=0, use_list=True, object_hook=None, object_pairs_hook=None, list_hook=None, encoding=None, unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType, max_str_len=2147483647, # 2**32-1 max_bin_len=2147483647, max_array_len=2147483647, max_map_len=2147483647, max_ext_len=2147483647): if file_like is None: self._fb_feeding = True else: if not callable(file_like.read): raise TypeError("`file_like.read` must be callable") self.file_like = file_like self._fb_feeding = False #: array of bytes feeded. self._fb_buffers = [] #: Which buffer we currently reads self._fb_buf_i = 0 #: Which position we currently reads self._fb_buf_o = 0 #: Total size of _fb_bufferes self._fb_buf_n = 0 # When Unpacker is used as an iterable, between the calls to next(), # the buffer is not "consumed" completely, for efficiency sake. # Instead, it is done sloppily. To make sure we raise BufferFull at # the correct moments, we have to keep track of how sloppy we were. # Furthermore, when the buffer is incomplete (that is: in the case # we raise an OutOfData) we need to rollback the buffer to the correct # state, which _fb_slopiness records. self._fb_sloppiness = 0 self._max_buffer_size = max_buffer_size or 2**31-1 if read_size > self._max_buffer_size: raise ValueError("read_size must be smaller than max_buffer_size") self._read_size = read_size or min(self._max_buffer_size, 4096) self._encoding = encoding self._unicode_errors = unicode_errors self._use_list = use_list self._list_hook = list_hook self._object_hook = object_hook self._object_pairs_hook = object_pairs_hook self._ext_hook = ext_hook self._max_str_len = max_str_len self._max_bin_len = max_bin_len self._max_array_len = max_array_len self._max_map_len = max_map_len self._max_ext_len = max_ext_len if list_hook is not None and not callable(list_hook): raise TypeError('`list_hook` is not callable') if object_hook is not None and not callable(object_hook): raise TypeError('`object_hook` is not callable') if object_pairs_hook is not None and not callable(object_pairs_hook): raise TypeError('`object_pairs_hook` is not callable') if object_hook is not None and object_pairs_hook is not None: raise TypeError("object_pairs_hook and object_hook are mutually " "exclusive") if not callable(ext_hook): raise TypeError("`ext_hook` is not callable") def feed(self, next_bytes): if isinstance(next_bytes, array.array): next_bytes = next_bytes.tostring() elif isinstance(next_bytes, bytearray): next_bytes = bytes(next_bytes) assert self._fb_feeding if (self._fb_buf_n + len(next_bytes) - self._fb_sloppiness > self._max_buffer_size): raise BufferFull self._fb_buf_n += len(next_bytes) self._fb_buffers.append(next_bytes) def _fb_sloppy_consume(self): """ Gets rid of some of the used parts of the buffer. """ if self._fb_buf_i: for i in xrange(self._fb_buf_i): self._fb_buf_n -= len(self._fb_buffers[i]) self._fb_buffers = self._fb_buffers[self._fb_buf_i:] self._fb_buf_i = 0 if self._fb_buffers: self._fb_sloppiness = self._fb_buf_o else: self._fb_sloppiness = 0 def _fb_consume(self): """ Gets rid of the used parts of the buffer. """ if self._fb_buf_i: for i in xrange(self._fb_buf_i): self._fb_buf_n -= len(self._fb_buffers[i]) self._fb_buffers = self._fb_buffers[self._fb_buf_i:] self._fb_buf_i = 0 if self._fb_buffers: self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:] self._fb_buf_n -= self._fb_buf_o else: self._fb_buf_n = 0 self._fb_buf_o = 0 self._fb_sloppiness = 0 def _fb_got_extradata(self): if self._fb_buf_i != len(self._fb_buffers): return True if self._fb_feeding: return False if not self.file_like: return False if self.file_like.read(1): return True return False def __iter__(self): return self def read_bytes(self, n): return self._fb_read(n) def _fb_rollback(self): self._fb_buf_i = 0 self._fb_buf_o = self._fb_sloppiness def _fb_get_extradata(self): bufs = self._fb_buffers[self._fb_buf_i:] if bufs: bufs[0] = bufs[0][self._fb_buf_o:] return b''.join(bufs) def _fb_read(self, n, write_bytes=None): buffs = self._fb_buffers # We have a redundant codepath for the most common case, such that # pypy optimizes it properly. This is the case that the read fits # in the current buffer. if (write_bytes is None and self._fb_buf_i < len(buffs) and self._fb_buf_o + n < len(buffs[self._fb_buf_i])): self._fb_buf_o += n return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o] # The remaining cases. ret = b'' while len(ret) != n: sliced = n - len(ret) if self._fb_buf_i == len(buffs): if self._fb_feeding: break to_read = sliced if self._read_size > to_read: to_read = self._read_size tmp = self.file_like.read(to_read) if not tmp: break buffs.append(tmp) self._fb_buf_n += len(tmp) continue ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced] self._fb_buf_o += sliced if self._fb_buf_o >= len(buffs[self._fb_buf_i]): self._fb_buf_o = 0 self._fb_buf_i += 1 if len(ret) != n: self._fb_rollback() raise OutOfData if write_bytes is not None: write_bytes(ret) return ret def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None): typ = TYPE_IMMEDIATE n = 0 obj = None c = self._fb_read(1, write_bytes) b = ord(c) if b & 0b10000000 == 0: obj = b elif b & 0b11100000 == 0b11100000: obj = struct.unpack("b", c)[0] elif b & 0b11100000 == 0b10100000: n = b & 0b00011111 obj = self._fb_read(n, write_bytes) typ = TYPE_RAW if n > self._max_str_len: raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) elif b & 0b11110000 == 0b10010000: n = b & 0b00001111 typ = TYPE_ARRAY if n > self._max_array_len: raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) elif b & 0b11110000 == 0b10000000: n = b & 0b00001111 typ = TYPE_MAP if n > self._max_map_len: raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) elif b == 0xc0: obj = None elif b == 0xc2: obj = False elif b == 0xc3: obj = True elif b == 0xc4: typ = TYPE_BIN n = struct.unpack("B", self._fb_read(1, write_bytes))[0] if n > self._max_bin_len: raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._fb_read(n, write_bytes) elif b == 0xc5: typ = TYPE_BIN n = struct.unpack(">H", self._fb_read(2, write_bytes))[0] if n > self._max_bin_len: raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._fb_read(n, write_bytes) elif b == 0xc6: typ = TYPE_BIN n = struct.unpack(">I", self._fb_read(4, write_bytes))[0] if n > self._max_bin_len: raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) obj = self._fb_read(n, write_bytes) elif b == 0xc7: # ext 8 typ = TYPE_EXT L, n = struct.unpack('Bb', self._fb_read(2, write_bytes)) if L > self._max_ext_len: raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._fb_read(L, write_bytes) elif b == 0xc8: # ext 16 typ = TYPE_EXT L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes)) if L > self._max_ext_len: raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._fb_read(L, write_bytes) elif b == 0xc9: # ext 32 typ = TYPE_EXT L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes)) if L > self._max_ext_len: raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) obj = self._fb_read(L, write_bytes) elif b == 0xca: obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0] elif b == 0xcb: obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0] elif b == 0xcc: obj = struct.unpack("B", self._fb_read(1, write_bytes))[0] elif b == 0xcd: obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0] elif b == 0xce: obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0] elif b == 0xcf: obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0] elif b == 0xd0: obj = struct.unpack("b", self._fb_read(1, write_bytes))[0] elif b == 0xd1: obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0] elif b == 0xd2: obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0] elif b == 0xd3: obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0] elif b == 0xd4: # fixext 1 typ = TYPE_EXT if self._max_ext_len < 1: raise ValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes)) elif b == 0xd5: # fixext 2 typ = TYPE_EXT if self._max_ext_len < 2: raise ValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes)) elif b == 0xd6: # fixext 4 typ = TYPE_EXT if self._max_ext_len < 4: raise ValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes)) elif b == 0xd7: # fixext 8 typ = TYPE_EXT if self._max_ext_len < 8: raise ValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes)) elif b == 0xd8: # fixext 16 typ = TYPE_EXT if self._max_ext_len < 16: raise ValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes)) elif b == 0xd9: typ = TYPE_RAW n = struct.unpack("B", self._fb_read(1, write_bytes))[0] if n > self._max_str_len: raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._fb_read(n, write_bytes) elif b == 0xda: typ = TYPE_RAW n = struct.unpack(">H", self._fb_read(2, write_bytes))[0] if n > self._max_str_len: raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._fb_read(n, write_bytes) elif b == 0xdb: typ = TYPE_RAW n = struct.unpack(">I", self._fb_read(4, write_bytes))[0] if n > self._max_str_len: raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) obj = self._fb_read(n, write_bytes) elif b == 0xdc: n = struct.unpack(">H", self._fb_read(2, write_bytes))[0] if n > self._max_array_len: raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) typ = TYPE_ARRAY elif b == 0xdd: n = struct.unpack(">I", self._fb_read(4, write_bytes))[0] if n > self._max_array_len: raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) typ = TYPE_ARRAY elif b == 0xde: n = struct.unpack(">H", self._fb_read(2, write_bytes))[0] if n > self._max_map_len: raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) typ = TYPE_MAP elif b == 0xdf: n = struct.unpack(">I", self._fb_read(4, write_bytes))[0] if n > self._max_map_len: raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) typ = TYPE_MAP else: raise UnpackValueError("Unknown header: 0x%x" % b) return typ, n, obj def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None): typ, n, obj = self._read_header(execute, write_bytes) if execute == EX_READ_ARRAY_HEADER: if typ != TYPE_ARRAY: raise UnpackValueError("Expected array") return n if execute == EX_READ_MAP_HEADER: if typ != TYPE_MAP: raise UnpackValueError("Expected map") return n # TODO should we eliminate the recursion? if typ == TYPE_ARRAY: if execute == EX_SKIP: for i in xrange(n): # TODO check whether we need to call `list_hook` self._fb_unpack(EX_SKIP, write_bytes) return ret = newlist_hint(n) for i in xrange(n): ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes)) if self._list_hook is not None: ret = self._list_hook(ret) # TODO is the interaction between `list_hook` and `use_list` ok? return ret if self._use_list else tuple(ret) if typ == TYPE_MAP: if execute == EX_SKIP: for i in xrange(n): # TODO check whether we need to call hooks self._fb_unpack(EX_SKIP, write_bytes) self._fb_unpack(EX_SKIP, write_bytes) return if self._object_pairs_hook is not None: ret = self._object_pairs_hook( (self._fb_unpack(EX_CONSTRUCT, write_bytes), self._fb_unpack(EX_CONSTRUCT, write_bytes)) for _ in xrange(n)) else: ret = {} for _ in xrange(n): key = self._fb_unpack(EX_CONSTRUCT, write_bytes) ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes) if self._object_hook is not None: ret = self._object_hook(ret) return ret if execute == EX_SKIP: return if typ == TYPE_RAW: if self._encoding is not None: obj = obj.decode(self._encoding, self._unicode_errors) return obj if typ == TYPE_EXT: return self._ext_hook(n, obj) if typ == TYPE_BIN: return obj assert typ == TYPE_IMMEDIATE return obj def next(self): try: ret = self._fb_unpack(EX_CONSTRUCT, None) self._fb_sloppy_consume() return ret except OutOfData: self._fb_consume() raise StopIteration __next__ = next def skip(self, write_bytes=None): self._fb_unpack(EX_SKIP, write_bytes) self._fb_consume() def unpack(self, write_bytes=None): ret = self._fb_unpack(EX_CONSTRUCT, write_bytes) self._fb_consume() return ret def read_array_header(self, write_bytes=None): ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes) self._fb_consume() return ret def read_map_header(self, write_bytes=None): ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes) self._fb_consume() return ret class Packer(object): """ MessagePack Packer usage: packer = Packer() astream.write(packer.pack(a)) astream.write(packer.pack(b)) Packer's constructor has some keyword arguments: :param callable default: Convert user type to builtin type that Packer supports. See also simplejson's document. :param str encoding: Convert unicode to bytes with this encoding. (default: 'utf-8') :param str unicode_errors: Error handler for encoding unicode. (default: 'strict') :param bool use_single_float: Use single precision float type for float. (default: False) :param bool autoreset: Reset buffer after each pack and return it's content as `bytes`. (default: True). If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. :param bool use_bin_type: Use bin type introduced in msgpack spec 2.0 for bytes. It also enable str8 type for unicode. """ def __init__(self, default=None, encoding='utf-8', unicode_errors='strict', use_single_float=False, autoreset=True, use_bin_type=False): self._use_float = use_single_float self._autoreset = autoreset self._use_bin_type = use_bin_type self._encoding = encoding self._unicode_errors = unicode_errors self._buffer = StringIO() if default is not None: if not callable(default): raise TypeError("default must be callable") self._default = default def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance): default_used = False while True: if nest_limit < 0: raise PackValueError("recursion limit exceeded") if obj is None: return self._buffer.write(b"\xc0") if isinstance(obj, bool): if obj: return self._buffer.write(b"\xc3") return self._buffer.write(b"\xc2") if isinstance(obj, int_types): if 0 <= obj < 0x80: return self._buffer.write(struct.pack("B", obj)) if -0x20 <= obj < 0: return self._buffer.write(struct.pack("b", obj)) if 0x80 <= obj <= 0xff: return self._buffer.write(struct.pack("BB", 0xcc, obj)) if -0x80 <= obj < 0: return self._buffer.write(struct.pack(">Bb", 0xd0, obj)) if 0xff < obj <= 0xffff: return self._buffer.write(struct.pack(">BH", 0xcd, obj)) if -0x8000 <= obj < -0x80: return self._buffer.write(struct.pack(">Bh", 0xd1, obj)) if 0xffff < obj <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xce, obj)) if -0x80000000 <= obj < -0x8000: return self._buffer.write(struct.pack(">Bi", 0xd2, obj)) if 0xffffffff < obj <= 0xffffffffffffffff: return self._buffer.write(struct.pack(">BQ", 0xcf, obj)) if -0x8000000000000000 <= obj < -0x80000000: return self._buffer.write(struct.pack(">Bq", 0xd3, obj)) raise PackValueError("Integer value out of range") if self._use_bin_type and isinstance(obj, bytes): n = len(obj) if n <= 0xff: self._buffer.write(struct.pack('>BB', 0xc4, n)) elif n <= 0xffff: self._buffer.write(struct.pack(">BH", 0xc5, n)) elif n <= 0xffffffff: self._buffer.write(struct.pack(">BI", 0xc6, n)) else: raise PackValueError("Bytes is too large") return self._buffer.write(obj) if isinstance(obj, (Unicode, bytes)): if isinstance(obj, Unicode): if self._encoding is None: raise TypeError( "Can't encode unicode string: " "no encoding is specified") obj = obj.encode(self._encoding, self._unicode_errors) n = len(obj) if n <= 0x1f: self._buffer.write(struct.pack('B', 0xa0 + n)) elif self._use_bin_type and n <= 0xff: self._buffer.write(struct.pack('>BB', 0xd9, n)) elif n <= 0xffff: self._buffer.write(struct.pack(">BH", 0xda, n)) elif n <= 0xffffffff: self._buffer.write(struct.pack(">BI", 0xdb, n)) else: raise PackValueError("String is too large") return self._buffer.write(obj) if isinstance(obj, float): if self._use_float: return self._buffer.write(struct.pack(">Bf", 0xca, obj)) return self._buffer.write(struct.pack(">Bd", 0xcb, obj)) if isinstance(obj, ExtType): code = obj.code data = obj.data assert isinstance(code, int) assert isinstance(data, bytes) L = len(data) if L == 1: self._buffer.write(b'\xd4') elif L == 2: self._buffer.write(b'\xd5') elif L == 4: self._buffer.write(b'\xd6') elif L == 8: self._buffer.write(b'\xd7') elif L == 16: self._buffer.write(b'\xd8') elif L <= 0xff: self._buffer.write(struct.pack(">BB", 0xc7, L)) elif L <= 0xffff: self._buffer.write(struct.pack(">BH", 0xc8, L)) else: self._buffer.write(struct.pack(">BI", 0xc9, L)) self._buffer.write(struct.pack("b", code)) self._buffer.write(data) return if isinstance(obj, (list, tuple)): n = len(obj) self._fb_pack_array_header(n) for i in xrange(n): self._pack(obj[i], nest_limit - 1) return if isinstance(obj, dict): return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj), nest_limit - 1) if not default_used and self._default is not None: obj = self._default(obj) default_used = 1 continue raise TypeError("Cannot serialize %r" % obj) def pack(self, obj): self._pack(obj) ret = self._buffer.getvalue() if self._autoreset: self._buffer = StringIO() elif USING_STRINGBUILDER: self._buffer = StringIO(ret) return ret def pack_map_pairs(self, pairs): self._fb_pack_map_pairs(len(pairs), pairs) ret = self._buffer.getvalue() if self._autoreset: self._buffer = StringIO() elif USING_STRINGBUILDER: self._buffer = StringIO(ret) return ret def pack_array_header(self, n): if n >= 2**32: raise ValueError self._fb_pack_array_header(n) ret = self._buffer.getvalue() if self._autoreset: self._buffer = StringIO() elif USING_STRINGBUILDER: self._buffer = StringIO(ret) return ret def pack_map_header(self, n): if n >= 2**32: raise ValueError self._fb_pack_map_header(n) ret = self._buffer.getvalue() if self._autoreset: self._buffer = StringIO() elif USING_STRINGBUILDER: self._buffer = StringIO(ret) return ret def pack_ext_type(self, typecode, data): if not isinstance(typecode, int): raise TypeError("typecode must have int type.") if not 0 <= typecode <= 127: raise ValueError("typecode should be 0-127") if not isinstance(data, bytes): raise TypeError("data must have bytes type") L = len(data) if L > 0xffffffff: raise ValueError("Too large data") if L == 1: self._buffer.write(b'\xd4') elif L == 2: self._buffer.write(b'\xd5') elif L == 4: self._buffer.write(b'\xd6') elif L == 8: self._buffer.write(b'\xd7') elif L == 16: self._buffer.write(b'\xd8') elif L <= 0xff: self._buffer.write(b'\xc7' + struct.pack('B', L)) elif L <= 0xffff: self._buffer.write(b'\xc8' + struct.pack('>H', L)) else: self._buffer.write(b'\xc9' + struct.pack('>I', L)) self._buffer.write(struct.pack('B', typecode)) self._buffer.write(data) def _fb_pack_array_header(self, n): if n <= 0x0f: return self._buffer.write(struct.pack('B', 0x90 + n)) if n <= 0xffff: return self._buffer.write(struct.pack(">BH", 0xdc, n)) if n <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xdd, n)) raise PackValueError("Array is too large") def _fb_pack_map_header(self, n): if n <= 0x0f: return self._buffer.write(struct.pack('B', 0x80 + n)) if n <= 0xffff: return self._buffer.write(struct.pack(">BH", 0xde, n)) if n <= 0xffffffff: return self._buffer.write(struct.pack(">BI", 0xdf, n)) raise PackValueError("Dict is too large") def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): self._fb_pack_map_header(n) for (k, v) in pairs: self._pack(k, nest_limit - 1) self._pack(v, nest_limit - 1) def bytes(self): return self._buffer.getvalue() def reset(self): self._buffer = StringIO() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/unpack_template.h0000644000000000000000000003650513306562377026476 0ustar rootroot/* * MessagePack unpacking routine template * * Copyright (C) 2008-2010 FURUHASHI Sadayuki * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef USE_CASE_RANGE #if !defined(_MSC_VER) #define USE_CASE_RANGE #endif #endif typedef struct unpack_stack { PyObject* obj; size_t size; size_t count; unsigned int ct; PyObject* map_key; } unpack_stack; struct unpack_context { unpack_user user; unsigned int cs; unsigned int trail; unsigned int top; /* unpack_stack* stack; unsigned int stack_size; unpack_stack embed_stack[MSGPACK_EMBED_STACK_SIZE]; */ unpack_stack stack[MSGPACK_EMBED_STACK_SIZE]; }; static inline void unpack_init(unpack_context* ctx) { ctx->cs = CS_HEADER; ctx->trail = 0; ctx->top = 0; /* ctx->stack = ctx->embed_stack; ctx->stack_size = MSGPACK_EMBED_STACK_SIZE; */ ctx->stack[0].obj = unpack_callback_root(&ctx->user); } /* static inline void unpack_destroy(unpack_context* ctx) { if(ctx->stack_size != MSGPACK_EMBED_STACK_SIZE) { free(ctx->stack); } } */ static inline PyObject* unpack_data(unpack_context* ctx) { return (ctx)->stack[0].obj; } template static inline int unpack_execute(unpack_context* ctx, const char* data, size_t len, size_t* off) { assert(len >= *off); const unsigned char* p = (unsigned char*)data + *off; const unsigned char* const pe = (unsigned char*)data + len; const void* n = NULL; unsigned int trail = ctx->trail; unsigned int cs = ctx->cs; unsigned int top = ctx->top; unpack_stack* stack = ctx->stack; /* unsigned int stack_size = ctx->stack_size; */ unpack_user* user = &ctx->user; PyObject* obj; unpack_stack* c = NULL; int ret; #define construct_cb(name) \ construct && unpack_callback ## name #define push_simple_value(func) \ if(construct_cb(func)(user, &obj) < 0) { goto _failed; } \ goto _push #define push_fixed_value(func, arg) \ if(construct_cb(func)(user, arg, &obj) < 0) { goto _failed; } \ goto _push #define push_variable_value(func, base, pos, len) \ if(construct_cb(func)(user, \ (const char*)base, (const char*)pos, len, &obj) < 0) { goto _failed; } \ goto _push #define again_fixed_trail(_cs, trail_len) \ trail = trail_len; \ cs = _cs; \ goto _fixed_trail_again #define again_fixed_trail_if_zero(_cs, trail_len, ifzero) \ trail = trail_len; \ if(trail == 0) { goto ifzero; } \ cs = _cs; \ goto _fixed_trail_again #define start_container(func, count_, ct_) \ if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \ if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \ if((count_) == 0) { obj = stack[top].obj; \ if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \ goto _push; } \ stack[top].ct = ct_; \ stack[top].size = count_; \ stack[top].count = 0; \ ++top; \ /*printf("container %d count %d stack %d\n",stack[top].obj,count_,top);*/ \ /*printf("stack push %d\n", top);*/ \ /* FIXME \ if(top >= stack_size) { \ if(stack_size == MSGPACK_EMBED_STACK_SIZE) { \ size_t csize = sizeof(unpack_stack) * MSGPACK_EMBED_STACK_SIZE; \ size_t nsize = csize * 2; \ unpack_stack* tmp = (unpack_stack*)malloc(nsize); \ if(tmp == NULL) { goto _failed; } \ memcpy(tmp, ctx->stack, csize); \ ctx->stack = stack = tmp; \ ctx->stack_size = stack_size = MSGPACK_EMBED_STACK_SIZE * 2; \ } else { \ size_t nsize = sizeof(unpack_stack) * ctx->stack_size * 2; \ unpack_stack* tmp = (unpack_stack*)realloc(ctx->stack, nsize); \ if(tmp == NULL) { goto _failed; } \ ctx->stack = stack = tmp; \ ctx->stack_size = stack_size = stack_size * 2; \ } \ } \ */ \ goto _header_again #define NEXT_CS(p) ((unsigned int)*p & 0x1f) #ifdef USE_CASE_RANGE #define SWITCH_RANGE_BEGIN switch(*p) { #define SWITCH_RANGE(FROM, TO) case FROM ... TO: #define SWITCH_RANGE_DEFAULT default: #define SWITCH_RANGE_END } #else #define SWITCH_RANGE_BEGIN { if(0) { #define SWITCH_RANGE(FROM, TO) } else if(FROM <= *p && *p <= TO) { #define SWITCH_RANGE_DEFAULT } else { #define SWITCH_RANGE_END } } #endif if(p == pe) { goto _out; } do { switch(cs) { case CS_HEADER: SWITCH_RANGE_BEGIN SWITCH_RANGE(0x00, 0x7f) // Positive Fixnum push_fixed_value(_uint8, *(uint8_t*)p); SWITCH_RANGE(0xe0, 0xff) // Negative Fixnum push_fixed_value(_int8, *(int8_t*)p); SWITCH_RANGE(0xc0, 0xdf) // Variable switch(*p) { case 0xc0: // nil push_simple_value(_nil); //case 0xc1: // never used case 0xc2: // false push_simple_value(_false); case 0xc3: // true push_simple_value(_true); case 0xc4: // bin 8 again_fixed_trail(NEXT_CS(p), 1); case 0xc5: // bin 16 again_fixed_trail(NEXT_CS(p), 2); case 0xc6: // bin 32 again_fixed_trail(NEXT_CS(p), 4); case 0xc7: // ext 8 again_fixed_trail(NEXT_CS(p), 1); case 0xc8: // ext 16 again_fixed_trail(NEXT_CS(p), 2); case 0xc9: // ext 32 again_fixed_trail(NEXT_CS(p), 4); case 0xca: // float case 0xcb: // double case 0xcc: // unsigned int 8 case 0xcd: // unsigned int 16 case 0xce: // unsigned int 32 case 0xcf: // unsigned int 64 case 0xd0: // signed int 8 case 0xd1: // signed int 16 case 0xd2: // signed int 32 case 0xd3: // signed int 64 again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03)); case 0xd4: // fixext 1 case 0xd5: // fixext 2 case 0xd6: // fixext 4 case 0xd7: // fixext 8 again_fixed_trail_if_zero(ACS_EXT_VALUE, (1 << (((unsigned int)*p) & 0x03))+1, _ext_zero); case 0xd8: // fixext 16 again_fixed_trail_if_zero(ACS_EXT_VALUE, 16+1, _ext_zero); case 0xd9: // str 8 again_fixed_trail(NEXT_CS(p), 1); case 0xda: // raw 16 case 0xdb: // raw 32 case 0xdc: // array 16 case 0xdd: // array 32 case 0xde: // map 16 case 0xdf: // map 32 again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01)); default: goto _failed; } SWITCH_RANGE(0xa0, 0xbf) // FixRaw again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero); SWITCH_RANGE(0x90, 0x9f) // FixArray start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM); SWITCH_RANGE(0x80, 0x8f) // FixMap start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY); SWITCH_RANGE_DEFAULT goto _failed; SWITCH_RANGE_END // end CS_HEADER _fixed_trail_again: ++p; default: if((size_t)(pe - p) < trail) { goto _out; } n = p; p += trail - 1; switch(cs) { case CS_EXT_8: again_fixed_trail_if_zero(ACS_EXT_VALUE, *(uint8_t*)n+1, _ext_zero); case CS_EXT_16: again_fixed_trail_if_zero(ACS_EXT_VALUE, _msgpack_load16(uint16_t,n)+1, _ext_zero); case CS_EXT_32: again_fixed_trail_if_zero(ACS_EXT_VALUE, _msgpack_load32(uint32_t,n)+1, _ext_zero); case CS_FLOAT: { union { uint32_t i; float f; } mem; mem.i = _msgpack_load32(uint32_t,n); push_fixed_value(_float, mem.f); } case CS_DOUBLE: { union { uint64_t i; double f; } mem; mem.i = _msgpack_load64(uint64_t,n); #if defined(__arm__) && !(__ARM_EABI__) // arm-oabi // https://github.com/msgpack/msgpack-perl/pull/1 mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL); #endif push_fixed_value(_double, mem.f); } case CS_UINT_8: push_fixed_value(_uint8, *(uint8_t*)n); case CS_UINT_16: push_fixed_value(_uint16, _msgpack_load16(uint16_t,n)); case CS_UINT_32: push_fixed_value(_uint32, _msgpack_load32(uint32_t,n)); case CS_UINT_64: push_fixed_value(_uint64, _msgpack_load64(uint64_t,n)); case CS_INT_8: push_fixed_value(_int8, *(int8_t*)n); case CS_INT_16: push_fixed_value(_int16, _msgpack_load16(int16_t,n)); case CS_INT_32: push_fixed_value(_int32, _msgpack_load32(int32_t,n)); case CS_INT_64: push_fixed_value(_int64, _msgpack_load64(int64_t,n)); case CS_BIN_8: again_fixed_trail_if_zero(ACS_BIN_VALUE, *(uint8_t*)n, _bin_zero); case CS_BIN_16: again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load16(uint16_t,n), _bin_zero); case CS_BIN_32: again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load32(uint32_t,n), _bin_zero); case ACS_BIN_VALUE: _bin_zero: push_variable_value(_bin, data, n, trail); case CS_RAW_8: again_fixed_trail_if_zero(ACS_RAW_VALUE, *(uint8_t*)n, _raw_zero); case CS_RAW_16: again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero); case CS_RAW_32: again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero); case ACS_RAW_VALUE: _raw_zero: push_variable_value(_raw, data, n, trail); case ACS_EXT_VALUE: _ext_zero: push_variable_value(_ext, data, n, trail); case CS_ARRAY_16: start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM); case CS_ARRAY_32: /* FIXME security guard */ start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM); case CS_MAP_16: start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY); case CS_MAP_32: /* FIXME security guard */ start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY); default: goto _failed; } } _push: if(top == 0) { goto _finish; } c = &stack[top-1]; switch(c->ct) { case CT_ARRAY_ITEM: if(construct_cb(_array_item)(user, c->count, &c->obj, obj) < 0) { goto _failed; } if(++c->count == c->size) { obj = c->obj; if (construct_cb(_array_end)(user, &obj) < 0) { goto _failed; } --top; /*printf("stack pop %d\n", top);*/ goto _push; } goto _header_again; case CT_MAP_KEY: c->map_key = obj; c->ct = CT_MAP_VALUE; goto _header_again; case CT_MAP_VALUE: if(construct_cb(_map_item)(user, c->count, &c->obj, c->map_key, obj) < 0) { goto _failed; } if(++c->count == c->size) { obj = c->obj; if (construct_cb(_map_end)(user, &obj) < 0) { goto _failed; } --top; /*printf("stack pop %d\n", top);*/ goto _push; } c->ct = CT_MAP_KEY; goto _header_again; default: goto _failed; } _header_again: cs = CS_HEADER; ++p; } while(p != pe); goto _out; _finish: if (!construct) unpack_callback_nil(user, &obj); stack[0].obj = obj; ++p; ret = 1; /*printf("-- finish --\n"); */ goto _end; _failed: /*printf("** FAILED **\n"); */ ret = -1; goto _end; _out: ret = 0; goto _end; _end: ctx->cs = cs; ctx->trail = trail; ctx->top = top; *off = p - (const unsigned char*)data; return ret; #undef construct_cb } #undef SWITCH_RANGE_BEGIN #undef SWITCH_RANGE #undef SWITCH_RANGE_DEFAULT #undef SWITCH_RANGE_END #undef push_simple_value #undef push_fixed_value #undef push_variable_value #undef again_fixed_trail #undef again_fixed_trail_if_zero #undef start_container template static inline int unpack_container_header(unpack_context* ctx, const char* data, size_t len, size_t* off) { assert(len >= *off); uint32_t size; const unsigned char *const p = (unsigned char*)data + *off; #define inc_offset(inc) \ if (len - *off < inc) \ return 0; \ *off += inc; switch (*p) { case var_offset: inc_offset(3); size = _msgpack_load16(uint16_t, p + 1); break; case var_offset + 1: inc_offset(5); size = _msgpack_load32(uint32_t, p + 1); break; #ifdef USE_CASE_RANGE case fixed_offset + 0x0 ... fixed_offset + 0xf: #else case fixed_offset + 0x0: case fixed_offset + 0x1: case fixed_offset + 0x2: case fixed_offset + 0x3: case fixed_offset + 0x4: case fixed_offset + 0x5: case fixed_offset + 0x6: case fixed_offset + 0x7: case fixed_offset + 0x8: case fixed_offset + 0x9: case fixed_offset + 0xa: case fixed_offset + 0xb: case fixed_offset + 0xc: case fixed_offset + 0xd: case fixed_offset + 0xe: case fixed_offset + 0xf: #endif ++*off; size = ((unsigned int)*p) & 0x0f; break; default: PyErr_SetString(PyExc_ValueError, "Unexpected type header on stream"); return -1; } unpack_callback_uint32(&ctx->user, size, &ctx->stack[0].obj); return 1; } #undef SWITCH_RANGE_BEGIN #undef SWITCH_RANGE #undef SWITCH_RANGE_DEFAULT #undef SWITCH_RANGE_END static const execute_fn unpack_construct = &unpack_execute; static const execute_fn unpack_skip = &unpack_execute; static const execute_fn read_array_header = &unpack_container_header<0x90, 0xdc>; static const execute_fn read_map_header = &unpack_container_header<0x80, 0xde>; #undef NEXT_CS /* vim: set ts=4 sw=4 sts=4 expandtab */ tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/unpack.h0000644000000000000000000001703413306562377024577 0ustar rootroot/* * MessagePack for Python unpacking routine * * Copyright (C) 2009 Naoki INADA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define MSGPACK_EMBED_STACK_SIZE (1024) #include "unpack_define.h" typedef struct unpack_user { int use_list; PyObject *object_hook; bool has_pairs_hook; PyObject *list_hook; PyObject *ext_hook; const char *encoding; const char *unicode_errors; Py_ssize_t max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len; } unpack_user; typedef PyObject* msgpack_unpack_object; struct unpack_context; typedef struct unpack_context unpack_context; typedef int (*execute_fn)(unpack_context *ctx, const char* data, size_t len, size_t* off); static inline msgpack_unpack_object unpack_callback_root(unpack_user* u) { return NULL; } static inline int unpack_callback_uint16(unpack_user* u, uint16_t d, msgpack_unpack_object* o) { PyObject *p = PyInt_FromLong((long)d); if (!p) return -1; *o = p; return 0; } static inline int unpack_callback_uint8(unpack_user* u, uint8_t d, msgpack_unpack_object* o) { return unpack_callback_uint16(u, d, o); } static inline int unpack_callback_uint32(unpack_user* u, uint32_t d, msgpack_unpack_object* o) { PyObject *p = PyInt_FromSize_t((size_t)d); if (!p) return -1; *o = p; return 0; } static inline int unpack_callback_uint64(unpack_user* u, uint64_t d, msgpack_unpack_object* o) { PyObject *p; if (d > LONG_MAX) { p = PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)d); } else { p = PyInt_FromSize_t((size_t)d); } if (!p) return -1; *o = p; return 0; } static inline int unpack_callback_int32(unpack_user* u, int32_t d, msgpack_unpack_object* o) { PyObject *p = PyInt_FromLong(d); if (!p) return -1; *o = p; return 0; } static inline int unpack_callback_int16(unpack_user* u, int16_t d, msgpack_unpack_object* o) { return unpack_callback_int32(u, d, o); } static inline int unpack_callback_int8(unpack_user* u, int8_t d, msgpack_unpack_object* o) { return unpack_callback_int32(u, d, o); } static inline int unpack_callback_int64(unpack_user* u, int64_t d, msgpack_unpack_object* o) { PyObject *p; if (d > LONG_MAX || d < LONG_MIN) { p = PyLong_FromLongLong((unsigned PY_LONG_LONG)d); } else { p = PyInt_FromLong((long)d); } *o = p; return 0; } static inline int unpack_callback_double(unpack_user* u, double d, msgpack_unpack_object* o) { PyObject *p = PyFloat_FromDouble(d); if (!p) return -1; *o = p; return 0; } static inline int unpack_callback_float(unpack_user* u, float d, msgpack_unpack_object* o) { return unpack_callback_double(u, d, o); } static inline int unpack_callback_nil(unpack_user* u, msgpack_unpack_object* o) { Py_INCREF(Py_None); *o = Py_None; return 0; } static inline int unpack_callback_true(unpack_user* u, msgpack_unpack_object* o) { Py_INCREF(Py_True); *o = Py_True; return 0; } static inline int unpack_callback_false(unpack_user* u, msgpack_unpack_object* o) { Py_INCREF(Py_False); *o = Py_False; return 0; } static inline int unpack_callback_array(unpack_user* u, unsigned int n, msgpack_unpack_object* o) { if (n > u->max_array_len) { PyErr_Format(PyExc_ValueError, "%u exceeds max_array_len(%zd)", n, u->max_array_len); return -1; } PyObject *p = u->use_list ? PyList_New(n) : PyTuple_New(n); if (!p) return -1; *o = p; return 0; } static inline int unpack_callback_array_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object o) { if (u->use_list) PyList_SET_ITEM(*c, current, o); else PyTuple_SET_ITEM(*c, current, o); return 0; } static inline int unpack_callback_array_end(unpack_user* u, msgpack_unpack_object* c) { if (u->list_hook) { PyObject *new_c = PyObject_CallFunctionObjArgs(u->list_hook, *c, NULL); if (!new_c) return -1; Py_DECREF(*c); *c = new_c; } return 0; } static inline int unpack_callback_map(unpack_user* u, unsigned int n, msgpack_unpack_object* o) { if (n > u->max_map_len) { PyErr_Format(PyExc_ValueError, "%u exceeds max_map_len(%zd)", n, u->max_map_len); return -1; } PyObject *p; if (u->has_pairs_hook) { p = PyList_New(n); // Or use tuple? } else { p = PyDict_New(); } if (!p) return -1; *o = p; return 0; } static inline int unpack_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v) { if (u->has_pairs_hook) { msgpack_unpack_object item = PyTuple_Pack(2, k, v); if (!item) return -1; Py_DECREF(k); Py_DECREF(v); PyList_SET_ITEM(*c, current, item); return 0; } else if (PyDict_SetItem(*c, k, v) == 0) { Py_DECREF(k); Py_DECREF(v); return 0; } return -1; } static inline int unpack_callback_map_end(unpack_user* u, msgpack_unpack_object* c) { if (u->object_hook) { PyObject *new_c = PyObject_CallFunctionObjArgs(u->object_hook, *c, NULL); if (!new_c) return -1; Py_DECREF(*c); *c = new_c; } return 0; } static inline int unpack_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o) { if (l > u->max_str_len) { PyErr_Format(PyExc_ValueError, "%u exceeds max_str_len(%zd)", l, u->max_str_len); return -1; } PyObject *py; if(u->encoding) { py = PyUnicode_Decode(p, l, u->encoding, u->unicode_errors); } else { py = PyBytes_FromStringAndSize(p, l); } if (!py) return -1; *o = py; return 0; } static inline int unpack_callback_bin(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o) { if (l > u->max_bin_len) { PyErr_Format(PyExc_ValueError, "%u exceeds max_bin_len(%zd)", l, u->max_bin_len); return -1; } PyObject *py = PyBytes_FromStringAndSize(p, l); if (!py) return -1; *o = py; return 0; } static inline int unpack_callback_ext(unpack_user* u, const char* base, const char* pos, unsigned int length, msgpack_unpack_object* o) { PyObject *py; int8_t typecode = (int8_t)*pos++; if (!u->ext_hook) { PyErr_SetString(PyExc_AssertionError, "u->ext_hook cannot be NULL"); return -1; } if (length-1 > u->max_ext_len) { PyErr_Format(PyExc_ValueError, "%u exceeds max_ext_len(%zd)", length, u->max_ext_len); return -1; } // length also includes the typecode, so the actual data is length-1 #if PY_MAJOR_VERSION == 2 py = PyObject_CallFunction(u->ext_hook, "(is#)", typecode, pos, length-1); #else py = PyObject_CallFunction(u->ext_hook, "(iy#)", typecode, pos, length-1); #endif if (!py) return -1; *o = py; return 0; } #include "unpack_template.h" tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/_packer.pyx0000644000000000000000000002477313306562377025323 0ustar rootroot# coding: utf-8 #cython: embedsignature=True from cpython cimport * from libc.stdlib cimport * from libc.string cimport * from libc.limits cimport * from msgpack.exceptions import PackValueError from msgpack import ExtType cdef extern from "pack.h": struct msgpack_packer: char* buf size_t length size_t buf_size bint use_bin_type int msgpack_pack_int(msgpack_packer* pk, int d) int msgpack_pack_nil(msgpack_packer* pk) int msgpack_pack_true(msgpack_packer* pk) int msgpack_pack_false(msgpack_packer* pk) int msgpack_pack_long(msgpack_packer* pk, long d) int msgpack_pack_long_long(msgpack_packer* pk, long long d) int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d) int msgpack_pack_float(msgpack_packer* pk, float d) int msgpack_pack_double(msgpack_packer* pk, double d) int msgpack_pack_array(msgpack_packer* pk, size_t l) int msgpack_pack_map(msgpack_packer* pk, size_t l) int msgpack_pack_raw(msgpack_packer* pk, size_t l) int msgpack_pack_bin(msgpack_packer* pk, size_t l) int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l) int msgpack_pack_ext(msgpack_packer* pk, char typecode, size_t l) cdef int DEFAULT_RECURSE_LIMIT=511 cdef class Packer(object): """ MessagePack Packer usage:: packer = Packer() astream.write(packer.pack(a)) astream.write(packer.pack(b)) Packer's constructor has some keyword arguments: :param callable default: Convert user type to builtin type that Packer supports. See also simplejson's document. :param str encoding: Convert unicode to bytes with this encoding. (default: 'utf-8') :param str unicode_errors: Error handler for encoding unicode. (default: 'strict') :param bool use_single_float: Use single precision float type for float. (default: False) :param bool autoreset: Reset buffer after each pack and return it's content as `bytes`. (default: True). If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. :param bool use_bin_type: Use bin type introduced in msgpack spec 2.0 for bytes. It also enable str8 type for unicode. """ cdef msgpack_packer pk cdef object _default cdef object _bencoding cdef object _berrors cdef char *encoding cdef char *unicode_errors cdef bool use_float cdef bint autoreset def __cinit__(self): cdef int buf_size = 1024*1024 self.pk.buf = malloc(buf_size); if self.pk.buf == NULL: raise MemoryError("Unable to allocate internal buffer.") self.pk.buf_size = buf_size self.pk.length = 0 def __init__(self, default=None, encoding='utf-8', unicode_errors='strict', use_single_float=False, bint autoreset=1, bint use_bin_type=0): """ """ self.use_float = use_single_float self.autoreset = autoreset self.pk.use_bin_type = use_bin_type if default is not None: if not PyCallable_Check(default): raise TypeError("default must be a callable.") self._default = default if encoding is None: self.encoding = NULL self.unicode_errors = NULL else: if isinstance(encoding, unicode): self._bencoding = encoding.encode('ascii') else: self._bencoding = encoding self.encoding = PyBytes_AsString(self._bencoding) if isinstance(unicode_errors, unicode): self._berrors = unicode_errors.encode('ascii') else: self._berrors = unicode_errors self.unicode_errors = PyBytes_AsString(self._berrors) def __dealloc__(self): free(self.pk.buf); cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: cdef long long llval cdef unsigned long long ullval cdef long longval cdef float fval cdef double dval cdef char* rawval cdef int ret cdef dict d cdef size_t L cdef int default_used = 0 if nest_limit < 0: raise PackValueError("recursion limit exceeded.") while True: if o is None: ret = msgpack_pack_nil(&self.pk) elif isinstance(o, bool): if o: ret = msgpack_pack_true(&self.pk) else: ret = msgpack_pack_false(&self.pk) elif PyLong_Check(o): # PyInt_Check(long) is True for Python 3. # Sow we should test long before int. if o > 0: ullval = o ret = msgpack_pack_unsigned_long_long(&self.pk, ullval) else: llval = o ret = msgpack_pack_long_long(&self.pk, llval) elif PyInt_Check(o): longval = o ret = msgpack_pack_long(&self.pk, longval) elif PyFloat_Check(o): if self.use_float: fval = o ret = msgpack_pack_float(&self.pk, fval) else: dval = o ret = msgpack_pack_double(&self.pk, dval) elif PyBytes_Check(o): L = len(o) if L > (2**32)-1: raise ValueError("bytes is too large") rawval = o ret = msgpack_pack_bin(&self.pk, L) if ret == 0: ret = msgpack_pack_raw_body(&self.pk, rawval, L) elif PyUnicode_Check(o): if not self.encoding: raise TypeError("Can't encode unicode string: no encoding is specified") o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) L = len(o) if L > (2**32)-1: raise ValueError("dict is too large") rawval = o ret = msgpack_pack_raw(&self.pk, len(o)) if ret == 0: ret = msgpack_pack_raw_body(&self.pk, rawval, len(o)) elif PyDict_CheckExact(o): d = o L = len(d) if L > (2**32)-1: raise ValueError("dict is too large") ret = msgpack_pack_map(&self.pk, L) if ret == 0: for k, v in d.iteritems(): ret = self._pack(k, nest_limit-1) if ret != 0: break ret = self._pack(v, nest_limit-1) if ret != 0: break elif PyDict_Check(o): L = len(o) if L > (2**32)-1: raise ValueError("dict is too large") ret = msgpack_pack_map(&self.pk, L) if ret == 0: for k, v in o.items(): ret = self._pack(k, nest_limit-1) if ret != 0: break ret = self._pack(v, nest_limit-1) if ret != 0: break elif isinstance(o, ExtType): # This should be before Tuple because ExtType is namedtuple. longval = o.code rawval = o.data L = len(o.data) if L > (2**32)-1: raise ValueError("EXT data is too large") ret = msgpack_pack_ext(&self.pk, longval, L) ret = msgpack_pack_raw_body(&self.pk, rawval, L) elif PyTuple_Check(o) or PyList_Check(o): L = len(o) if L > (2**32)-1: raise ValueError("list is too large") ret = msgpack_pack_array(&self.pk, L) if ret == 0: for v in o: ret = self._pack(v, nest_limit-1) if ret != 0: break elif not default_used and self._default: o = self._default(o) default_used = 1 continue else: raise TypeError("can't serialize %r" % (o,)) return ret cpdef pack(self, object obj): cdef int ret ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) if ret == -1: raise MemoryError elif ret: # should not happen. raise TypeError if self.autoreset: buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) self.pk.length = 0 return buf def pack_ext_type(self, typecode, data): msgpack_pack_ext(&self.pk, typecode, len(data)) msgpack_pack_raw_body(&self.pk, data, len(data)) def pack_array_header(self, size_t size): if size > (2**32-1): raise ValueError cdef int ret = msgpack_pack_array(&self.pk, size) if ret == -1: raise MemoryError elif ret: # should not happen raise TypeError if self.autoreset: buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) self.pk.length = 0 return buf def pack_map_header(self, size_t size): if size > (2**32-1): raise ValueError cdef int ret = msgpack_pack_map(&self.pk, size) if ret == -1: raise MemoryError elif ret: # should not happen raise TypeError if self.autoreset: buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) self.pk.length = 0 return buf def pack_map_pairs(self, object pairs): """ Pack *pairs* as msgpack map type. *pairs* should sequence of pair. (`len(pairs)` and `for k, v in pairs:` should be supported.) """ cdef int ret = msgpack_pack_map(&self.pk, len(pairs)) if ret == 0: for k, v in pairs: ret = self._pack(k) if ret != 0: break ret = self._pack(v) if ret != 0: break if ret == -1: raise MemoryError elif ret: # should not happen raise TypeError if self.autoreset: buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) self.pk.length = 0 return buf def reset(self): """Clear internal buffer.""" self.pk.length = 0 def bytes(self): """Return buffer content.""" return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/pack.h0000644000000000000000000000663313306562377024237 0ustar rootroot/* * MessagePack for Python packing routine * * Copyright (C) 2009 Naoki INADA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include "sysdep.h" #include #include #ifdef __cplusplus extern "C" { #endif #ifdef _MSC_VER #define inline __inline #endif typedef struct msgpack_packer { char *buf; size_t length; size_t buf_size; bool use_bin_type; } msgpack_packer; typedef struct Packer Packer; static inline int msgpack_pack_int(msgpack_packer* pk, int d); static inline int msgpack_pack_long(msgpack_packer* pk, long d); static inline int msgpack_pack_long_long(msgpack_packer* pk, long long d); static inline int msgpack_pack_unsigned_short(msgpack_packer* pk, unsigned short d); static inline int msgpack_pack_unsigned_int(msgpack_packer* pk, unsigned int d); static inline int msgpack_pack_unsigned_long(msgpack_packer* pk, unsigned long d); //static inline int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d); static inline int msgpack_pack_uint8(msgpack_packer* pk, uint8_t d); static inline int msgpack_pack_uint16(msgpack_packer* pk, uint16_t d); static inline int msgpack_pack_uint32(msgpack_packer* pk, uint32_t d); static inline int msgpack_pack_uint64(msgpack_packer* pk, uint64_t d); static inline int msgpack_pack_int8(msgpack_packer* pk, int8_t d); static inline int msgpack_pack_int16(msgpack_packer* pk, int16_t d); static inline int msgpack_pack_int32(msgpack_packer* pk, int32_t d); static inline int msgpack_pack_int64(msgpack_packer* pk, int64_t d); static inline int msgpack_pack_float(msgpack_packer* pk, float d); static inline int msgpack_pack_double(msgpack_packer* pk, double d); static inline int msgpack_pack_nil(msgpack_packer* pk); static inline int msgpack_pack_true(msgpack_packer* pk); static inline int msgpack_pack_false(msgpack_packer* pk); static inline int msgpack_pack_array(msgpack_packer* pk, unsigned int n); static inline int msgpack_pack_map(msgpack_packer* pk, unsigned int n); static inline int msgpack_pack_raw(msgpack_packer* pk, size_t l); static inline int msgpack_pack_bin(msgpack_packer* pk, size_t l); static inline int msgpack_pack_raw_body(msgpack_packer* pk, const void* b, size_t l); static inline int msgpack_pack_ext(msgpack_packer* pk, char typecode, size_t l); static inline int msgpack_pack_write(msgpack_packer* pk, const char *data, size_t l) { char* buf = pk->buf; size_t bs = pk->buf_size; size_t len = pk->length; if (len + l > bs) { bs = (len + l) * 2; buf = (char*)realloc(buf, bs); if (!buf) return -1; } memcpy(buf + len, data, l); len += l; pk->buf = buf; pk->buf_size = bs; pk->length = len; return 0; } #define msgpack_pack_append_buffer(user, buf, len) \ return msgpack_pack_write(user, (const char*)buf, len) #include "pack_template.h" #ifdef __cplusplus } #endif tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/__init__.py0000644000000000000000000000255113306562377025254 0ustar rootroot# coding: utf-8 from msgpack._version import version from msgpack.exceptions import * from collections import namedtuple class ExtType(namedtuple('ExtType', 'code data')): """ExtType represents ext type in msgpack.""" def __new__(cls, code, data): if not isinstance(code, int): raise TypeError("code must be int") if not isinstance(data, bytes): raise TypeError("data must be bytes") if not 0 <= code <= 127: raise ValueError("code must be 0~127") return super(ExtType, cls).__new__(cls, code, data) import os if os.environ.get('MSGPACK_PUREPYTHON'): from msgpack.fallback import Packer, unpack, unpackb, Unpacker else: try: from msgpack._packer import Packer from msgpack._unpacker import unpack, unpackb, Unpacker except ImportError: from msgpack.fallback import Packer, unpack, unpackb, Unpacker def pack(o, stream, **kwargs): """ Pack object `o` and write it to `stream` See :class:`Packer` for options. """ packer = Packer(**kwargs) stream.write(packer.pack(o)) def packb(o, **kwargs): """ Pack object `o` and return packed bytes See :class:`Packer` for options. """ return Packer(**kwargs).pack(o) # alias for compatibility to simplejson/marshal/pickle. load = unpack loads = unpackb dump = pack dumps = packb tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/sysdep.h0000644000000000000000000001450013306562377024620 0ustar rootroot/* * MessagePack system dependencies * * Copyright (C) 2008-2010 FURUHASHI Sadayuki * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MSGPACK_SYSDEP_H__ #define MSGPACK_SYSDEP_H__ #include #include #if defined(_MSC_VER) && _MSC_VER < 1600 typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; #elif defined(_MSC_VER) // && _MSC_VER >= 1600 #include #else #include #include #endif #ifdef _WIN32 #define _msgpack_atomic_counter_header typedef long _msgpack_atomic_counter_t; #define _msgpack_sync_decr_and_fetch(ptr) InterlockedDecrement(ptr) #define _msgpack_sync_incr_and_fetch(ptr) InterlockedIncrement(ptr) #elif defined(__GNUC__) && ((__GNUC__*10 + __GNUC_MINOR__) < 41) #define _msgpack_atomic_counter_header "gcc_atomic.h" #else typedef unsigned int _msgpack_atomic_counter_t; #define _msgpack_sync_decr_and_fetch(ptr) __sync_sub_and_fetch(ptr, 1) #define _msgpack_sync_incr_and_fetch(ptr) __sync_add_and_fetch(ptr, 1) #endif #ifdef _WIN32 #ifdef __cplusplus /* numeric_limits::min,max */ #ifdef max #undef max #endif #ifdef min #undef min #endif #endif #else #include /* __BYTE_ORDER */ #endif #if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) #if __BYTE_ORDER == __LITTLE_ENDIAN #define __LITTLE_ENDIAN__ #elif __BYTE_ORDER == __BIG_ENDIAN #define __BIG_ENDIAN__ #elif _WIN32 #define __LITTLE_ENDIAN__ #endif #endif #ifdef __LITTLE_ENDIAN__ #ifdef _WIN32 # if defined(ntohs) # define _msgpack_be16(x) ntohs(x) # elif defined(_byteswap_ushort) || (defined(_MSC_VER) && _MSC_VER >= 1400) # define _msgpack_be16(x) ((uint16_t)_byteswap_ushort((unsigned short)x)) # else # define _msgpack_be16(x) ( \ ((((uint16_t)x) << 8) ) | \ ((((uint16_t)x) >> 8) ) ) # endif #else # define _msgpack_be16(x) ntohs(x) #endif #ifdef _WIN32 # if defined(ntohl) # define _msgpack_be32(x) ntohl(x) # elif defined(_byteswap_ulong) || (defined(_MSC_VER) && _MSC_VER >= 1400) # define _msgpack_be32(x) ((uint32_t)_byteswap_ulong((unsigned long)x)) # else # define _msgpack_be32(x) \ ( ((((uint32_t)x) << 24) ) | \ ((((uint32_t)x) << 8) & 0x00ff0000U ) | \ ((((uint32_t)x) >> 8) & 0x0000ff00U ) | \ ((((uint32_t)x) >> 24) ) ) # endif #else # define _msgpack_be32(x) ntohl(x) #endif #if defined(_byteswap_uint64) || (defined(_MSC_VER) && _MSC_VER >= 1400) # define _msgpack_be64(x) (_byteswap_uint64(x)) #elif defined(bswap_64) # define _msgpack_be64(x) bswap_64(x) #elif defined(__DARWIN_OSSwapInt64) # define _msgpack_be64(x) __DARWIN_OSSwapInt64(x) #else #define _msgpack_be64(x) \ ( ((((uint64_t)x) << 56) ) | \ ((((uint64_t)x) << 40) & 0x00ff000000000000ULL ) | \ ((((uint64_t)x) << 24) & 0x0000ff0000000000ULL ) | \ ((((uint64_t)x) << 8) & 0x000000ff00000000ULL ) | \ ((((uint64_t)x) >> 8) & 0x00000000ff000000ULL ) | \ ((((uint64_t)x) >> 24) & 0x0000000000ff0000ULL ) | \ ((((uint64_t)x) >> 40) & 0x000000000000ff00ULL ) | \ ((((uint64_t)x) >> 56) ) ) #endif #define _msgpack_load16(cast, from) ((cast)( \ (((uint16_t)((uint8_t*)(from))[0]) << 8) | \ (((uint16_t)((uint8_t*)(from))[1]) ) )) #define _msgpack_load32(cast, from) ((cast)( \ (((uint32_t)((uint8_t*)(from))[0]) << 24) | \ (((uint32_t)((uint8_t*)(from))[1]) << 16) | \ (((uint32_t)((uint8_t*)(from))[2]) << 8) | \ (((uint32_t)((uint8_t*)(from))[3]) ) )) #define _msgpack_load64(cast, from) ((cast)( \ (((uint64_t)((uint8_t*)(from))[0]) << 56) | \ (((uint64_t)((uint8_t*)(from))[1]) << 48) | \ (((uint64_t)((uint8_t*)(from))[2]) << 40) | \ (((uint64_t)((uint8_t*)(from))[3]) << 32) | \ (((uint64_t)((uint8_t*)(from))[4]) << 24) | \ (((uint64_t)((uint8_t*)(from))[5]) << 16) | \ (((uint64_t)((uint8_t*)(from))[6]) << 8) | \ (((uint64_t)((uint8_t*)(from))[7]) ) )) #else #define _msgpack_be16(x) (x) #define _msgpack_be32(x) (x) #define _msgpack_be64(x) (x) #define _msgpack_load16(cast, from) ((cast)( \ (((uint16_t)((uint8_t*)from)[0]) << 8) | \ (((uint16_t)((uint8_t*)from)[1]) ) )) #define _msgpack_load32(cast, from) ((cast)( \ (((uint32_t)((uint8_t*)from)[0]) << 24) | \ (((uint32_t)((uint8_t*)from)[1]) << 16) | \ (((uint32_t)((uint8_t*)from)[2]) << 8) | \ (((uint32_t)((uint8_t*)from)[3]) ) )) #define _msgpack_load64(cast, from) ((cast)( \ (((uint64_t)((uint8_t*)from)[0]) << 56) | \ (((uint64_t)((uint8_t*)from)[1]) << 48) | \ (((uint64_t)((uint8_t*)from)[2]) << 40) | \ (((uint64_t)((uint8_t*)from)[3]) << 32) | \ (((uint64_t)((uint8_t*)from)[4]) << 24) | \ (((uint64_t)((uint8_t*)from)[5]) << 16) | \ (((uint64_t)((uint8_t*)from)[6]) << 8) | \ (((uint64_t)((uint8_t*)from)[7]) ) )) #endif #define _msgpack_store16(to, num) \ do { uint16_t val = _msgpack_be16(num); memcpy(to, &val, 2); } while(0) #define _msgpack_store32(to, num) \ do { uint32_t val = _msgpack_be32(num); memcpy(to, &val, 4); } while(0) #define _msgpack_store64(to, num) \ do { uint64_t val = _msgpack_be64(num); memcpy(to, &val, 8); } while(0) /* #define _msgpack_load16(cast, from) \ ({ cast val; memcpy(&val, (char*)from, 2); _msgpack_be16(val); }) #define _msgpack_load32(cast, from) \ ({ cast val; memcpy(&val, (char*)from, 4); _msgpack_be32(val); }) #define _msgpack_load64(cast, from) \ ({ cast val; memcpy(&val, (char*)from, 8); _msgpack_be64(val); }) */ #endif /* msgpack/sysdep.h */ tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/msgpack/exceptions.py0000644000000000000000000000076713306562377025705 0ustar rootrootclass UnpackException(Exception): pass class BufferFull(UnpackException): pass class OutOfData(UnpackException): pass class UnpackValueError(UnpackException, ValueError): pass class ExtraData(ValueError): def __init__(self, unpacked, extra): self.unpacked = unpacked self.extra = extra def __str__(self): return "unpack(b) received extra data." class PackException(Exception): pass class PackValueError(PackException, ValueError): pass tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/ChangeLog.rst0000644000000000000000000001436313306562377024103 0ustar rootroot0.4.6 ===== :release date: 2015-03-13 Bugs fixed ---------- * fallback.Unpacker: Fix Data corruption when OutOfData. This bug only affects "Streaming unpacking." 0.4.5 ===== :release date: 2015-01-25 Incompatible Changes -------------------- Changes ------- Bugs fixed ---------- * Fix test failure on pytest 2.3. (by @ktdreyer) * Fix typos in ChangeLog. (Thanks to @dmick) * Improve README.rst (by @msabramo) 0.4.4 ===== :release date: 2015-01-09 Incompatible Changes -------------------- Changes ------- Bugs fixed ---------- * Fix compile error. 0.4.3 ===== :release date: 2015-01-07 Incompatible Changes -------------------- Changes ------- Bugs fixed ---------- * Unpacker may unpack wrong uint32 value on 32bit or LLP64 environment. (#101) * Build failed on Windows Python 2.7. 0.4.2 ===== :release date: 2014-03-26 Incompatible Changes -------------------- Changes ------- Bugs fixed ---------- * Unpacker doesn't increment refcount of ExtType hook. * Packer raises no exception for inputs doesn't fit to msgpack format. 0.4.1 ===== :release date: 2014-02-17 Incompatible Changes -------------------- Changes ------- * fallback.Unpacker.feed() supports bytearray. Bugs fixed ---------- * Unpacker doesn't increment refcount of hooks. Hooks may be GCed while unpacking. * Unpacker may read unfilled internal buffer. 0.4.0 ===== :release date: 2013-10-21 Incompatible Changes -------------------- * Raises TypeError instead of ValueError when packer receives unsupported type. Changes ------- * Support New msgpack spec. 0.3.0 ===== Incompatible Changes -------------------- * Default value of ``use_list`` is ``True`` for now. (It was ``False`` for 0.2.x) You should pass it explicitly for compatibility to 0.2.x. * `Unpacker.unpack()` and some unpack methods now raise `OutOfData` instead of `StopIteration`. `StopIteration` is used for iterator protocol only. Changes ------- * Pure Python fallback module is added. (thanks to bwesterb) * Add ``.skip()`` method to ``Unpacker`` (thanks to jnothman) * Add capturing feature. You can pass the writable object to ``Unpacker.unpack()`` as a second parameter. * Add ``Packer.pack_array_header`` and ``Packer.pack_map_header``. These methods only pack header of each type. * Add ``autoreset`` option to ``Packer`` (default: True). Packer doesn't return packed bytes and clear internal buffer. * Add ``Packer.pack_map_pairs``. It packs sequence of pair to map type. 0.2.4 ======= :release date: 2012-12-22 Bugs fixed ---------- * Fix SEGV when object_hook or object_pairs_hook raise Exception. (#39) 0.2.3 ======= :release date: 2012-12-11 Changes ------- * Warn when use_list is not specified. It's default value will be changed in 0.3. Bugs fixed ----------- * Can't pack subclass of dict. 0.2.2 ======= :release date: 2012-09-21 Changes ------- * Add ``use_single_float`` option to ``Packer``. When it is true, packs float object in single precision format. Bugs fixed ----------- * ``unpack()`` didn't restores gc state when it called with gc disabled. ``unpack()`` doesn't control gc now instead of restoring gc state collectly. User can control gc state when gc cause performance issue. * ``Unpacker``'s ``read_size`` option didn't used. 0.2.1 ======= :release date: 2012-08-20 Changes ------- * Add ``max_buffer_size`` parameter to Unpacker. It limits internal buffer size and allows unpack data from untrusted source safely. * Unpacker's buffer reallocation algorithm is less greedy now. It cause perforamce derease in rare case but memory efficient and don't allocate than ``max_buffer_size``. Bugs fixed ---------- * Fix msgpack didn't work on SPARC Solaris. It was because choosing wrong byteorder on compilation time. Use ``sys.byteorder`` to get correct byte order. Very thanks to Chris Casey for giving test environment to me. 0.2.0 ======= :release date: 2012-06-27 Changes ------- * Drop supporting Python 2.5 and unify tests for Py2 and Py3. * Use new version of msgpack-c. It packs correctly on big endian platforms. * Remove deprecated packs and unpacks API. Bugs fixed ---------- * #8 Packing subclass of dict raises TypeError. (Thanks to Steeve Morin.) 0.1.13 ======= :release date: 2012-04-21 New ---- * Don't accept subtype of list and tuple as msgpack list. (Steeve Morin) It allows customize how it serialized with ``default`` argument. Bugs fixed ----------- * Fix wrong error message. (David Wolever) * Fix memory leak while unpacking when ``object_hook`` or ``list_hook`` is used. (Steeve Morin) Other changes ------------- * setup.py works on Python 2.5 (Steffen Siering) * Optimization for serializing dict. 0.1.12 ======= :release date: 2011-12-27 Bugs fixed ------------- * Re-enable packs/unpacks removed at 0.1.11. It will be removed when 0.2 is released. 0.1.11 ======= :release date: 2011-12-26 Bugs fixed ------------- * Include test code for Python3 to sdist. (Johan Bergström) * Fix compilation error on MSVC. (davidgaleano) 0.1.10 ====== :release date: 2011-08-22 New feature ----------- * Add ``encoding`` and ``unicode_errors`` option to packer and unpacker. When this option is specified, (un)packs unicode object instead of bytes. This enables using msgpack as a replacement of json. (tailhook) 0.1.9 ====== :release date: 2011-01-29 New feature ----------- * ``use_list`` option is added to unpack(b) like Unpacker. (Use keyword argument because order of parameters are different) Bugs fixed ---------- * Fix typo. * Add MemoryError check. 0.1.8 ====== :release date: 2011-01-10 New feature ------------ * Support ``loads`` and ``dumps`` aliases for API compatibility with simplejson and pickle. * Add *object_hook* and *list_hook* option to unpacker. It allows you to hook unpacing mapping type and array type. * Add *default* option to packer. It allows you to pack unsupported types. * unpacker accepts (old) buffer types. Bugs fixed ---------- * Fix segv around ``Unpacker.feed`` or ``Unpacker(file)``. 0.1.7 ====== :release date: 2010-11-02 New feature ------------ * Add *object_hook* and *list_hook* option to unpacker. It allows you to hook unpacing mapping type and array type. * Add *default* option to packer. It allows you to pack unsupported types. * unpacker accepts (old) buffer types. Bugs fixed ---------- * Compilation error on win32. tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/0000755000000000000000000000000013306562377022472 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_extension.py0000644000000000000000000000426513306562377026126 0ustar rootrootfrom __future__ import print_function import array import msgpack from msgpack import ExtType def test_pack_ext_type(): def p(s): packer = msgpack.Packer() packer.pack_ext_type(0x42, s) return packer.bytes() assert p(b'A') == b'\xd4\x42A' # fixext 1 assert p(b'AB') == b'\xd5\x42AB' # fixext 2 assert p(b'ABCD') == b'\xd6\x42ABCD' # fixext 4 assert p(b'ABCDEFGH') == b'\xd7\x42ABCDEFGH' # fixext 8 assert p(b'A'*16) == b'\xd8\x42' + b'A'*16 # fixext 16 assert p(b'ABC') == b'\xc7\x03\x42ABC' # ext 8 assert p(b'A'*0x0123) == b'\xc8\x01\x23\x42' + b'A'*0x0123 # ext 16 assert p(b'A'*0x00012345) == b'\xc9\x00\x01\x23\x45\x42' + b'A'*0x00012345 # ext 32 def test_unpack_ext_type(): def check(b, expected): assert msgpack.unpackb(b) == expected check(b'\xd4\x42A', ExtType(0x42, b'A')) # fixext 1 check(b'\xd5\x42AB', ExtType(0x42, b'AB')) # fixext 2 check(b'\xd6\x42ABCD', ExtType(0x42, b'ABCD')) # fixext 4 check(b'\xd7\x42ABCDEFGH', ExtType(0x42, b'ABCDEFGH')) # fixext 8 check(b'\xd8\x42' + b'A'*16, ExtType(0x42, b'A'*16)) # fixext 16 check(b'\xc7\x03\x42ABC', ExtType(0x42, b'ABC')) # ext 8 check(b'\xc8\x01\x23\x42' + b'A'*0x0123, ExtType(0x42, b'A'*0x0123)) # ext 16 check(b'\xc9\x00\x01\x23\x45\x42' + b'A'*0x00012345, ExtType(0x42, b'A'*0x00012345)) # ext 32 def test_extension_type(): def default(obj): print('default called', obj) if isinstance(obj, array.array): typecode = 123 # application specific typecode data = obj.tostring() return ExtType(typecode, data) raise TypeError("Unknwon type object %r" % (obj,)) def ext_hook(code, data): print('ext_hook called', code, data) assert code == 123 obj = array.array('d') obj.fromstring(data) return obj obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])] s = msgpack.packb(obj, default=default) obj2 = msgpack.unpackb(s, ext_hook=ext_hook) assert obj == obj2 tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_sequnpack.py0000644000000000000000000000555013306562377026102 0ustar rootroot#!/usr/bin/env python # coding: utf-8 import io from msgpack import Unpacker, BufferFull from msgpack.exceptions import OutOfData from pytest import raises def test_partialdata(): unpacker = Unpacker() unpacker.feed(b'\xa5') with raises(StopIteration): next(iter(unpacker)) unpacker.feed(b'h') with raises(StopIteration): next(iter(unpacker)) unpacker.feed(b'a') with raises(StopIteration): next(iter(unpacker)) unpacker.feed(b'l') with raises(StopIteration): next(iter(unpacker)) unpacker.feed(b'l') with raises(StopIteration): next(iter(unpacker)) unpacker.feed(b'o') assert next(iter(unpacker)) == b'hallo' def test_foobar(): unpacker = Unpacker(read_size=3, use_list=1) unpacker.feed(b'foobar') assert unpacker.unpack() == ord(b'f') assert unpacker.unpack() == ord(b'o') assert unpacker.unpack() == ord(b'o') assert unpacker.unpack() == ord(b'b') assert unpacker.unpack() == ord(b'a') assert unpacker.unpack() == ord(b'r') with raises(OutOfData): unpacker.unpack() unpacker.feed(b'foo') unpacker.feed(b'bar') k = 0 for o, e in zip(unpacker, 'foobarbaz'): assert o == ord(e) k += 1 assert k == len(b'foobar') def test_foobar_skip(): unpacker = Unpacker(read_size=3, use_list=1) unpacker.feed(b'foobar') assert unpacker.unpack() == ord(b'f') unpacker.skip() assert unpacker.unpack() == ord(b'o') unpacker.skip() assert unpacker.unpack() == ord(b'a') unpacker.skip() with raises(OutOfData): unpacker.unpack() def test_maxbuffersize(): with raises(ValueError): Unpacker(read_size=5, max_buffer_size=3) unpacker = Unpacker(read_size=3, max_buffer_size=3, use_list=1) unpacker.feed(b'fo') with raises(BufferFull): unpacker.feed(b'ob') unpacker.feed(b'o') assert ord('f') == next(unpacker) unpacker.feed(b'b') assert ord('o') == next(unpacker) assert ord('o') == next(unpacker) assert ord('b') == next(unpacker) def test_readbytes(): unpacker = Unpacker(read_size=3) unpacker.feed(b'foobar') assert unpacker.unpack() == ord(b'f') assert unpacker.read_bytes(3) == b'oob' assert unpacker.unpack() == ord(b'a') assert unpacker.unpack() == ord(b'r') # Test buffer refill unpacker = Unpacker(io.BytesIO(b'foobar'), read_size=3) assert unpacker.unpack() == ord(b'f') assert unpacker.read_bytes(3) == b'oob' assert unpacker.unpack() == ord(b'a') assert unpacker.unpack() == ord(b'r') def test_issue124(): unpacker = Unpacker() unpacker.feed(b'\xa1?\xa1!') assert tuple(unpacker) == (b'?', b'!') assert tuple(unpacker) == () unpacker.feed(b"\xa1?\xa1") assert tuple(unpacker) == (b'?',) assert tuple(unpacker) == () unpacker.feed(b"!") assert tuple(unpacker) == (b'!',) assert tuple(unpacker) == () tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_unpack_raw.py0000644000000000000000000000142313306562377026235 0ustar rootroot"""Tests for cases where the user seeks to obtain packed msgpack objects""" import io from msgpack import Unpacker, packb def test_write_bytes(): unpacker = Unpacker() unpacker.feed(b'abc') f = io.BytesIO() assert unpacker.unpack(f.write) == ord('a') assert f.getvalue() == b'a' f = io.BytesIO() assert unpacker.skip(f.write) is None assert f.getvalue() == b'b' f = io.BytesIO() assert unpacker.skip() is None assert f.getvalue() == b'' def test_write_bytes_multi_buffer(): long_val = (5) * 100 expected = packb(long_val) unpacker = Unpacker(io.BytesIO(expected), read_size=3, max_buffer_size=3) f = io.BytesIO() unpacked = unpacker.unpack(f.write) assert unpacked == long_val assert f.getvalue() == expected tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_obj.py0000644000000000000000000000372213306562377024661 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from pytest import raises from msgpack import packb, unpackb def _decode_complex(obj): if b'__complex__' in obj: return complex(obj[b'real'], obj[b'imag']) return obj def _encode_complex(obj): if isinstance(obj, complex): return {b'__complex__': True, b'real': 1, b'imag': 2} return obj def test_encode_hook(): packed = packb([3, 1+2j], default=_encode_complex) unpacked = unpackb(packed, use_list=1) assert unpacked[1] == {b'__complex__': True, b'real': 1, b'imag': 2} def test_decode_hook(): packed = packb([3, {b'__complex__': True, b'real': 1, b'imag': 2}]) unpacked = unpackb(packed, object_hook=_decode_complex, use_list=1) assert unpacked[1] == 1+2j def test_decode_pairs_hook(): packed = packb([3, {1: 2, 3: 4}]) prod_sum = 1 * 2 + 3 * 4 unpacked = unpackb(packed, object_pairs_hook=lambda l: sum(k * v for k, v in l), use_list=1) assert unpacked[1] == prod_sum def test_only_one_obj_hook(): with raises(TypeError): unpackb(b'', object_hook=lambda x: x, object_pairs_hook=lambda x: x) def test_bad_hook(): with raises(TypeError): packed = packb([3, 1+2j], default=lambda o: o) unpacked = unpackb(packed, use_list=1) def _arr_to_str(arr): return ''.join(str(c) for c in arr) def test_array_hook(): packed = packb([1,2,3]) unpacked = unpackb(packed, list_hook=_arr_to_str, use_list=1) assert unpacked == '123' class DecodeError(Exception): pass def bad_complex_decoder(o): raise DecodeError("Ooops!") def test_an_exception_in_objecthook1(): with raises(DecodeError): packed = packb({1: {'__complex__': True, 'real': 1, 'imag': 2}}) unpackb(packed, object_hook=bad_complex_decoder) def test_an_exception_in_objecthook2(): with raises(DecodeError): packed = packb({1: [{'__complex__': True, 'real': 1, 'imag': 2}]}) unpackb(packed, list_hook=bad_complex_decoder, use_list=1) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_newspec.py0000644000000000000000000000502313306562377025547 0ustar rootroot# coding: utf-8 from msgpack import packb, unpackb, ExtType def test_str8(): header = b'\xd9' data = b'x' * 32 b = packb(data.decode(), use_bin_type=True) assert len(b) == len(data) + 2 assert b[0:2] == header + b'\x20' assert b[2:] == data assert unpackb(b) == data data = b'x' * 255 b = packb(data.decode(), use_bin_type=True) assert len(b) == len(data) + 2 assert b[0:2] == header + b'\xff' assert b[2:] == data assert unpackb(b) == data def test_bin8(): header = b'\xc4' data = b'' b = packb(data, use_bin_type=True) assert len(b) == len(data) + 2 assert b[0:2] == header + b'\x00' assert b[2:] == data assert unpackb(b) == data data = b'x' * 255 b = packb(data, use_bin_type=True) assert len(b) == len(data) + 2 assert b[0:2] == header + b'\xff' assert b[2:] == data assert unpackb(b) == data def test_bin16(): header = b'\xc5' data = b'x' * 256 b = packb(data, use_bin_type=True) assert len(b) == len(data) + 3 assert b[0:1] == header assert b[1:3] == b'\x01\x00' assert b[3:] == data assert unpackb(b) == data data = b'x' * 65535 b = packb(data, use_bin_type=True) assert len(b) == len(data) + 3 assert b[0:1] == header assert b[1:3] == b'\xff\xff' assert b[3:] == data assert unpackb(b) == data def test_bin32(): header = b'\xc6' data = b'x' * 65536 b = packb(data, use_bin_type=True) assert len(b) == len(data) + 5 assert b[0:1] == header assert b[1:5] == b'\x00\x01\x00\x00' assert b[5:] == data assert unpackb(b) == data def test_ext(): def check(ext, packed): assert packb(ext) == packed assert unpackb(packed) == ext check(ExtType(0x42, b'Z'), b'\xd4\x42Z') # fixext 1 check(ExtType(0x42, b'ZZ'), b'\xd5\x42ZZ') # fixext 2 check(ExtType(0x42, b'Z'*4), b'\xd6\x42' + b'Z'*4) # fixext 4 check(ExtType(0x42, b'Z'*8), b'\xd7\x42' + b'Z'*8) # fixext 8 check(ExtType(0x42, b'Z'*16), b'\xd8\x42' + b'Z'*16) # fixext 16 # ext 8 check(ExtType(0x42, b''), b'\xc7\x00\x42') check(ExtType(0x42, b'Z'*255), b'\xc7\xff\x42' + b'Z'*255) # ext 16 check(ExtType(0x42, b'Z'*256), b'\xc8\x01\x00\x42' + b'Z'*256) check(ExtType(0x42, b'Z'*0xffff), b'\xc8\xff\xff\x42' + b'Z'*0xffff) # ext 32 check(ExtType(0x42, b'Z'*0x10000), b'\xc9\x00\x01\x00\x00\x42' + b'Z'*0x10000) # needs large memory #check(ExtType(0x42, b'Z'*0xffffffff), # b'\xc9\xff\xff\xff\xff\x42' + b'Z'*0xffffffff) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_seq.py0000644000000000000000000000235713306562377024702 0ustar rootroot#!/usr/bin/env python # coding: utf-8 import io import msgpack binarydata = bytes(bytearray(range(256))) def gen_binary_data(idx): return binarydata[:idx % 300] def test_exceeding_unpacker_read_size(): dumpf = io.BytesIO() packer = msgpack.Packer() NUMBER_OF_STRINGS = 6 read_size = 16 # 5 ok for read_size=16, while 6 glibc detected *** python: double free or corruption (fasttop): # 20 ok for read_size=256, while 25 segfaults / glibc detected *** python: double free or corruption (!prev) # 40 ok for read_size=1024, while 50 introduces errors # 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected *** python: double free or corruption (!prev): for idx in range(NUMBER_OF_STRINGS): data = gen_binary_data(idx) dumpf.write(packer.pack(data)) f = io.BytesIO(dumpf.getvalue()) dumpf.close() unpacker = msgpack.Unpacker(f, read_size=read_size, use_list=1) read_count = 0 for idx, o in enumerate(unpacker): assert type(o) == bytes assert o == gen_binary_data(idx) read_count += 1 assert read_count == NUMBER_OF_STRINGS tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_subtype.py0000644000000000000000000000063413306562377025601 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from msgpack import packb, unpackb from collections import namedtuple class MyList(list): pass class MyDict(dict): pass class MyTuple(tuple): pass MyNamedTuple = namedtuple('MyNamedTuple', 'x y') def test_types(): assert packb(MyDict()) == packb(dict()) assert packb(MyList()) == packb(list()) assert packb(MyNamedTuple(1, 2)) == packb((1, 2)) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_read_size.py0000644000000000000000000000350013306562377026046 0ustar rootroot"""Test Unpacker's read_array_header and read_map_header methods""" from msgpack import packb, Unpacker, OutOfData UnexpectedTypeException = ValueError def test_read_array_header(): unpacker = Unpacker() unpacker.feed(packb(['a', 'b', 'c'])) assert unpacker.read_array_header() == 3 assert unpacker.unpack() == b'a' assert unpacker.unpack() == b'b' assert unpacker.unpack() == b'c' try: unpacker.unpack() assert 0, 'should raise exception' except OutOfData: assert 1, 'okay' def test_read_map_header(): unpacker = Unpacker() unpacker.feed(packb({'a': 'A'})) assert unpacker.read_map_header() == 1 assert unpacker.unpack() == B'a' assert unpacker.unpack() == B'A' try: unpacker.unpack() assert 0, 'should raise exception' except OutOfData: assert 1, 'okay' def test_incorrect_type_array(): unpacker = Unpacker() unpacker.feed(packb(1)) try: unpacker.read_array_header() assert 0, 'should raise exception' except UnexpectedTypeException: assert 1, 'okay' def test_incorrect_type_map(): unpacker = Unpacker() unpacker.feed(packb(1)) try: unpacker.read_map_header() assert 0, 'should raise exception' except UnexpectedTypeException: assert 1, 'okay' def test_correct_type_nested_array(): unpacker = Unpacker() unpacker.feed(packb({'a': ['b', 'c', 'd']})) try: unpacker.read_array_header() assert 0, 'should raise exception' except UnexpectedTypeException: assert 1, 'okay' def test_incorrect_type_nested_map(): unpacker = Unpacker() unpacker.feed(packb([{'a': 'b'}])) try: unpacker.read_map_header() assert 0, 'should raise exception' except UnexpectedTypeException: assert 1, 'okay' tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_unpack.py0000644000000000000000000000360513306562377025370 0ustar rootrootfrom io import BytesIO import sys from msgpack import Unpacker, packb, OutOfData, ExtType from pytest import raises, mark def test_unpack_array_header_from_file(): f = BytesIO(packb([1,2,3,4])) unpacker = Unpacker(f) assert unpacker.read_array_header() == 4 assert unpacker.unpack() == 1 assert unpacker.unpack() == 2 assert unpacker.unpack() == 3 assert unpacker.unpack() == 4 with raises(OutOfData): unpacker.unpack() @mark.skipif("not hasattr(sys, 'getrefcount') == True", reason='sys.getrefcount() is needed to pass this test') def test_unpacker_hook_refcnt(): result = [] def hook(x): result.append(x) return x basecnt = sys.getrefcount(hook) up = Unpacker(object_hook=hook, list_hook=hook) assert sys.getrefcount(hook) >= basecnt + 2 up.feed(packb([{}])) up.feed(packb([{}])) assert up.unpack() == [{}] assert up.unpack() == [{}] assert result == [{}, [{}], {}, [{}]] del up assert sys.getrefcount(hook) == basecnt def test_unpacker_ext_hook(): class MyUnpacker(Unpacker): def __init__(self): super(MyUnpacker, self).__init__(ext_hook=self._hook, encoding='utf-8') def _hook(self, code, data): if code == 1: return int(data) else: return ExtType(code, data) unpacker = MyUnpacker() unpacker.feed(packb({'a': 1}, encoding='utf-8')) assert unpacker.unpack() == {'a': 1} unpacker.feed(packb({'a': ExtType(1, b'123')}, encoding='utf-8')) assert unpacker.unpack() == {'a': 123} unpacker.feed(packb({'a': ExtType(2, b'321')}, encoding='utf-8')) assert unpacker.unpack() == {'a': ExtType(2, b'321')} if __name__ == '__main__': test_unpack_array_header_from_file() test_unpacker_hook_refcnt() test_unpacker_ext_hook() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_format.py0000644000000000000000000000406513306562377025400 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from msgpack import unpackb def check(src, should, use_list=0): assert unpackb(src, use_list=use_list) == should def testSimpleValue(): check(b"\x93\xc0\xc2\xc3", (None, False, True,)) def testFixnum(): check(b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff", ((0,64,127,), (-32,-16,-1,),) ) def testFixArray(): check(b"\x92\x90\x91\x91\xc0", ((),((None,),),), ) def testFixRaw(): check(b"\x94\xa0\xa1a\xa2bc\xa3def", (b"", b"a", b"bc", b"def",), ) def testFixMap(): check( b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80", {False: {None: None}, True:{None:{}}}, ) def testUnsignedInt(): check( b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00" b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00" b"\xce\xff\xff\xff\xff", (0, 128, 255, 0, 32768, 65535, 0, 2147483648, 4294967295,), ) def testSignedInt(): check(b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00" b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00" b"\xd2\xff\xff\xff\xff", (0, -128, -1, 0, -32768, -1, 0, -2147483648, -1,)) def testRaw(): check(b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00" b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab", (b"", b"a", b"ab", b"", b"a", b"ab")) def testArray(): check(b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00" b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02" b"\xc2\xc3", ((), (None,), (False,True), (), (None,), (False,True)) ) def testMap(): check( b"\x96" b"\xde\x00\x00" b"\xde\x00\x01\xc0\xc2" b"\xde\x00\x02\xc0\xc2\xc3\xc2" b"\xdf\x00\x00\x00\x00" b"\xdf\x00\x00\x00\x01\xc0\xc2" b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2", ({}, {None: False}, {True: False, None: False}, {}, {None: False}, {True: False, None: False})) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_except.py0000644000000000000000000000153513306562377025377 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from pytest import raises from msgpack import packb, unpackb import datetime class DummyException(Exception): pass def test_raise_on_find_unsupported_value(): with raises(TypeError): packb(datetime.datetime.now()) def test_raise_from_object_hook(): def hook(obj): raise DummyException raises(DummyException, unpackb, packb({}), object_hook=hook) raises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_hook=hook) raises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_pairs_hook=hook) raises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_hook=hook) raises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook) def test_invalidvalue(): with raises(ValueError): unpackb(b'\xd9\x97#DL_') tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_buffer.py0000644000000000000000000000075113306562377025357 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from msgpack import packb, unpackb def test_unpack_buffer(): from array import array buf = array('b') buf.fromstring(packb((b'foo', b'bar'))) obj = unpackb(buf, use_list=1) assert [b'foo', b'bar'] == obj def test_unpack_bytearray(): buf = bytearray(packb(('foo', 'bar'))) obj = unpackb(buf, use_list=1) assert [b'foo', b'bar'] == obj expected_type = bytes assert all(type(s) == expected_type for s in obj) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_limits.py0000644000000000000000000000614013306562377025405 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from __future__ import absolute_import, division, print_function, unicode_literals import pytest from msgpack import packb, unpackb, Packer, Unpacker, ExtType def test_integer(): x = -(2 ** 63) assert unpackb(packb(x)) == x with pytest.raises((OverflowError, ValueError)): packb(x-1) x = 2 ** 64 - 1 assert unpackb(packb(x)) == x with pytest.raises((OverflowError, ValueError)): packb(x+1) def test_array_header(): packer = Packer() packer.pack_array_header(2**32-1) with pytest.raises((OverflowError, ValueError)): packer.pack_array_header(2**32) def test_map_header(): packer = Packer() packer.pack_map_header(2**32-1) with pytest.raises((OverflowError, ValueError)): packer.pack_array_header(2**32) def test_max_str_len(): d = 'x' * 3 packed = packb(d) unpacker = Unpacker(max_str_len=3, encoding='utf-8') unpacker.feed(packed) assert unpacker.unpack() == d unpacker = Unpacker(max_str_len=2, encoding='utf-8') with pytest.raises(ValueError): unpacker.feed(packed) unpacker.unpack() def test_max_bin_len(): d = b'x' * 3 packed = packb(d, use_bin_type=True) unpacker = Unpacker(max_bin_len=3) unpacker.feed(packed) assert unpacker.unpack() == d unpacker = Unpacker(max_bin_len=2) with pytest.raises(ValueError): unpacker.feed(packed) unpacker.unpack() def test_max_array_len(): d = [1,2,3] packed = packb(d) unpacker = Unpacker(max_array_len=3) unpacker.feed(packed) assert unpacker.unpack() == d unpacker = Unpacker(max_array_len=2) with pytest.raises(ValueError): unpacker.feed(packed) unpacker.unpack() def test_max_map_len(): d = {1: 2, 3: 4, 5: 6} packed = packb(d) unpacker = Unpacker(max_map_len=3) unpacker.feed(packed) assert unpacker.unpack() == d unpacker = Unpacker(max_map_len=2) with pytest.raises(ValueError): unpacker.feed(packed) unpacker.unpack() def test_max_ext_len(): d = ExtType(42, b"abc") packed = packb(d) unpacker = Unpacker(max_ext_len=3) unpacker.feed(packed) assert unpacker.unpack() == d unpacker = Unpacker(max_ext_len=2) with pytest.raises(ValueError): unpacker.feed(packed) unpacker.unpack() # PyPy fails following tests because of constant folding? # https://bugs.pypy.org/issue1721 #@pytest.mark.skipif(True, reason="Requires very large memory.") #def test_binary(): # x = b'x' * (2**32 - 1) # assert unpackb(packb(x)) == x # del x # x = b'x' * (2**32) # with pytest.raises(ValueError): # packb(x) # # #@pytest.mark.skipif(True, reason="Requires very large memory.") #def test_string(): # x = 'x' * (2**32 - 1) # assert unpackb(packb(x)) == x # x += 'y' # with pytest.raises(ValueError): # packb(x) # # #@pytest.mark.skipif(True, reason="Requires very large memory.") #def test_array(): # x = [0] * (2**32 - 1) # assert unpackb(packb(x)) == x # x.append(0) # with pytest.raises(ValueError): # packb(x) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_case.py0000644000000000000000000000511013306562377025013 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from msgpack import packb, unpackb def check(length, obj): v = packb(obj) assert len(v) == length, \ "%r length should be %r but get %r" % (obj, length, len(v)) assert unpackb(v, use_list=0) == obj def test_1(): for o in [None, True, False, 0, 1, (1 << 6), (1 << 7) - 1, -1, -((1<<5)-1), -(1<<5)]: check(1, o) def test_2(): for o in [1 << 7, (1 << 8) - 1, -((1<<5)+1), -(1<<7) ]: check(2, o) def test_3(): for o in [1 << 8, (1 << 16) - 1, -((1<<7)+1), -(1<<15)]: check(3, o) def test_5(): for o in [1 << 16, (1 << 32) - 1, -((1<<15)+1), -(1<<31)]: check(5, o) def test_9(): for o in [1 << 32, (1 << 64) - 1, -((1<<31)+1), -(1<<63), 1.0, 0.1, -0.1, -1.0]: check(9, o) def check_raw(overhead, num): check(num + overhead, b" " * num) def test_fixraw(): check_raw(1, 0) check_raw(1, (1<<5) - 1) def test_raw16(): check_raw(3, 1<<5) check_raw(3, (1<<16) - 1) def test_raw32(): check_raw(5, 1<<16) def check_array(overhead, num): check(num + overhead, (None,) * num) def test_fixarray(): check_array(1, 0) check_array(1, (1 << 4) - 1) def test_array16(): check_array(3, 1 << 4) check_array(3, (1<<16)-1) def test_array32(): check_array(5, (1<<16)) def match(obj, buf): assert packb(obj) == buf assert unpackb(buf, use_list=0) == obj def test_match(): cases = [ (None, b'\xc0'), (False, b'\xc2'), (True, b'\xc3'), (0, b'\x00'), (127, b'\x7f'), (128, b'\xcc\x80'), (256, b'\xcd\x01\x00'), (-1, b'\xff'), (-33, b'\xd0\xdf'), (-129, b'\xd1\xff\x7f'), ({1:1}, b'\x81\x01\x01'), (1.0, b"\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00"), ((), b'\x90'), (tuple(range(15)),b"\x9f\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e"), (tuple(range(16)),b"\xdc\x00\x10\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"), ({}, b'\x80'), (dict([(x,x) for x in range(15)]), b'\x8f\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e'), (dict([(x,x) for x in range(16)]), b'\xde\x00\x10\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e\x0f\x0f'), ] for v, p in cases: match(v, p) def test_unicode(): assert unpackb(packb('foobar'), use_list=1) == b'foobar' tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/test/test_pack.py0000644000000000000000000001107013306562377025020 0ustar rootroot#!/usr/bin/env python # coding: utf-8 from __future__ import absolute_import, division, print_function, unicode_literals import struct from pytest import raises, xfail from msgpack import packb, unpackb, Unpacker, Packer from io import BytesIO def check(data, use_list=False): re = unpackb(packb(data), use_list=use_list) assert re == data def testPack(): test_data = [ 0, 1, 127, 128, 255, 256, 65535, 65536, 4294967295, 4294967296, -1, -32, -33, -128, -129, -32768, -32769, -4294967296, -4294967297, 1.0, b"", b"a", b"a"*31, b"a"*32, None, True, False, (), ((),), ((), None,), {None: 0}, (1<<23), ] for td in test_data: check(td) def testPackUnicode(): test_data = ["", "abcd", ["defgh"], "Русский текст"] for td in test_data: re = unpackb(packb(td, encoding='utf-8'), use_list=1, encoding='utf-8') assert re == td packer = Packer(encoding='utf-8') data = packer.pack(td) re = Unpacker(BytesIO(data), encoding=str('utf-8'), use_list=1).unpack() assert re == td def testPackUTF32(): try: test_data = [ "", "abcd", ["defgh"], "Русский текст", ] for td in test_data: re = unpackb(packb(td, encoding='utf-32'), use_list=1, encoding='utf-32') assert re == td except LookupError as e: xfail(e) def testPackBytes(): test_data = [ b"", b"abcd", (b"defgh",), ] for td in test_data: check(td) def testIgnoreUnicodeErrors(): re = unpackb(packb(b'abc\xeddef'), encoding='utf-8', unicode_errors='ignore', use_list=1) assert re == "abcdef" def testStrictUnicodeUnpack(): with raises(UnicodeDecodeError): unpackb(packb(b'abc\xeddef'), encoding='utf-8', use_list=1) def testStrictUnicodePack(): with raises(UnicodeEncodeError): packb("abc\xeddef", encoding='ascii', unicode_errors='strict') def testIgnoreErrorsPack(): re = unpackb(packb("abcФФФdef", encoding='ascii', unicode_errors='ignore'), encoding='utf-8', use_list=1) assert re == "abcdef" def testNoEncoding(): with raises(TypeError): packb("abc", encoding=None) def testDecodeBinary(): re = unpackb(packb(b"abc"), encoding=None, use_list=1) assert re == b"abc" def testPackFloat(): assert packb(1.0, use_single_float=True) == b'\xca' + struct.pack(str('>f'), 1.0) assert packb(1.0, use_single_float=False) == b'\xcb' + struct.pack(str('>d'), 1.0) def testArraySize(sizes=[0, 5, 50, 1000]): bio = BytesIO() packer = Packer() for size in sizes: bio.write(packer.pack_array_header(size)) for i in range(size): bio.write(packer.pack(i)) bio.seek(0) unpacker = Unpacker(bio, use_list=1) for size in sizes: assert unpacker.unpack() == list(range(size)) def test_manualreset(sizes=[0, 5, 50, 1000]): packer = Packer(autoreset=False) for size in sizes: packer.pack_array_header(size) for i in range(size): packer.pack(i) bio = BytesIO(packer.bytes()) unpacker = Unpacker(bio, use_list=1) for size in sizes: assert unpacker.unpack() == list(range(size)) packer.reset() assert packer.bytes() == b'' def testMapSize(sizes=[0, 5, 50, 1000]): bio = BytesIO() packer = Packer() for size in sizes: bio.write(packer.pack_map_header(size)) for i in range(size): bio.write(packer.pack(i)) # key bio.write(packer.pack(i * 2)) # value bio.seek(0) unpacker = Unpacker(bio) for size in sizes: assert unpacker.unpack() == dict((i, i * 2) for i in range(size)) class odict(dict): '''Reimplement OrderedDict to run test on Python 2.6''' def __init__(self, seq): self._seq = seq dict.__init__(self, seq) def items(self): return self._seq[:] def iteritems(self): return iter(self._seq) def keys(self): return [x[0] for x in self._seq] def test_odict(): seq = [(b'one', 1), (b'two', 2), (b'three', 3), (b'four', 4)] od = odict(seq) assert unpackb(packb(od), use_list=1) == dict(seq) def pair_hook(seq): return list(seq) assert unpackb(packb(od), object_pairs_hook=pair_hook, use_list=1) == seq def test_pairlist(): pairlist = [(b'a', 1), (2, b'b'), (b'foo', b'bar')] packer = Packer() packed = packer.pack_map_pairs(pairlist) unpacked = unpackb(packed, object_pairs_hook=list) assert pairlist == unpacked tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/Makefile0000644000000000000000000000050013306562377023146 0ustar rootroot.PHONY: test all python3 all: cython python setup.py build_ext -i -f doc-serve: all cd docs && make serve doc: cd docs && make zip upload-doc: python setup.py upload_docs --upload-dir docs/_build/html cython: cython --cplus msgpack/*.pyx python3: cython python3 setup.py build_ext -i -f test: py.test test tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/MANIFEST.in0000644000000000000000000000017613306562377023255 0ustar rootrootinclude setup.py include COPYING include README.rst recursive-include msgpack *.h *.c *.pyx *.cpp recursive-include test *.py tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/COPYING0000644000000000000000000000114613306562377022550 0ustar rootrootCopyright (C) 2008-2011 INADA Naoki Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/setup.py0000644000000000000000000000706113306562377023231 0ustar rootroot#!/usr/bin/env python # coding: utf-8 import os import sys from glob import glob from distutils.command.sdist import sdist from setuptools import setup, Extension from distutils.command.build_ext import build_ext class NoCython(Exception): pass try: import Cython.Compiler.Main as cython_compiler have_cython = True except ImportError: have_cython = False def cythonize(src): sys.stderr.write("cythonize: %r\n" % (src,)) cython_compiler.compile([src], cplus=True, emit_linenums=True) def ensure_source(src): pyx = os.path.splitext(src)[0] + '.pyx' if not os.path.exists(src): if not have_cython: raise NoCython cythonize(pyx) elif (os.path.exists(pyx) and os.stat(src).st_mtime < os.stat(pyx).st_mtime and have_cython): cythonize(pyx) return src class BuildExt(build_ext): def build_extension(self, ext): try: ext.sources = list(map(ensure_source, ext.sources)) except NoCython: print("WARNING") print("Cython is required for building extension from checkout.") print("Install Cython >= 0.16 or install msgpack from PyPI.") print("Falling back to pure Python implementation.") return try: return build_ext.build_extension(self, ext) except Exception as e: print("WARNING: Failed to compile extensiom modules.") print("msgpack uses fallback pure python implementation.") print(e) exec(open('msgpack/_version.py').read()) version_str = '.'.join(str(x) for x in version[:3]) if len(version) > 3 and version[3] != 'final': version_str += version[3] # take care of extension modules. if have_cython: class Sdist(sdist): def __init__(self, *args, **kwargs): for src in glob('msgpack/*.pyx'): cythonize(src) sdist.__init__(self, *args, **kwargs) else: Sdist = sdist libraries = [] if sys.platform == 'win32': libraries.append('ws2_32') if sys.byteorder == 'big': macros = [('__BIG_ENDIAN__', '1')] else: macros = [('__LITTLE_ENDIAN__', '1')] ext_modules = [] if not hasattr(sys, 'pypy_version_info'): ext_modules.append(Extension('msgpack._packer', sources=['msgpack/_packer.cpp'], libraries=libraries, include_dirs=['.'], define_macros=macros, )) ext_modules.append(Extension('msgpack._unpacker', sources=['msgpack/_unpacker.cpp'], libraries=libraries, include_dirs=['.'], define_macros=macros, )) del libraries, macros desc = 'MessagePack (de)serializer.' f = open('README.rst') long_desc = f.read() f.close() del f setup(name='msgpack-python', author='INADA Naoki', author_email='songofacandy@gmail.com', version=version_str, cmdclass={'build_ext': BuildExt, 'sdist': Sdist}, ext_modules=ext_modules, packages=['msgpack'], description=desc, long_description=long_desc, url='http://msgpack.org/', download_url='http://pypi.python.org/pypi/msgpack/', classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', ] ) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/upload_windows.bat0000644000000000000000000000040413306562377025237 0ustar rootrootc:\Python27\python setup.py bdist_egg bdist_wininst upload c:\Python33\python setup.py bdist_egg bdist_wininst upload c:\Python27_amd64\python setup.py bdist_egg bdist_wininst upload c:\Python33_amd64\python setup.py bdist_egg bdist_wininst upload tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/README.rst0000644000000000000000000001623213306562377023206 0ustar rootroot======================= MessagePack for Python ======================= :author: INADA Naoki :version: 0.4.6 :date: 2015-03-13 .. image:: https://secure.travis-ci.org/msgpack/msgpack-python.png :target: https://travis-ci.org/#!/msgpack/msgpack-python .. image:: https://pypip.in/version/msgpack-python/badge.svg :target: https://pypi.python.org/pypi/msgpack-python/ :alt: Latest Version What's this ------------ `MessagePack `_ is a fast, compact binary serialization format, suitable for similar data to JSON. This package provides CPython bindings for reading and writing MessagePack data. Install --------- :: $ pip install msgpack-python PyPy ^^^^^ msgpack-python provides pure python implementation. PyPy can use this. Windows ^^^^^^^ When you can't use binary distribution, you need to install Visual Studio or Windows SDK on Windows. (NOTE: Visual C++ Express 2010 doesn't support amd64. Windows SDK is recommended way to build amd64 msgpack without any fee.) Without extension, using pure python implementation on CPython runs slowly. Notes ----- Note for msgpack 2.0 support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ msgpack 2.0 adds two types: *bin* and *ext*. *raw* was bytes or string type like Python 2's ``str``. To distinguish string and bytes, msgpack 2.0 adds *bin*. It is non-string binary like Python 3's ``bytes``. To use *bin* type for packing ``bytes``, pass ``use_bin_type=True`` to packer argument. .. code-block:: pycon >>> import msgpack >>> packed = msgpack.packb([b'spam', u'egg'], use_bin_type=True) >>> msgpack.unpackb(packed, encoding='utf-8') ['spam', u'egg'] You shoud use it carefully. When you use ``use_bin_type=True``, packed binary can be unpacked by unpackers supporting msgpack-2.0. To use *ext* type, pass ``msgpack.ExtType`` object to packer. .. code-block:: pycon >>> import msgpack >>> packed = msgpack.packb(msgpack.ExtType(42, b'xyzzy')) >>> msgpack.unpackb(packed) ExtType(code=42, data='xyzzy') You can use it with ``default`` and ``ext_hook``. See below. Note for msgpack 0.2.x users ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The msgpack 0.3 have some incompatible changes. The default value of ``use_list`` keyword argument is ``True`` from 0.3. You should pass the argument explicitly for backward compatibility. `Unpacker.unpack()` and some unpack methods now raises `OutOfData` instead of `StopIteration`. `StopIteration` is used for iterator protocol only. How to use ----------- One-shot pack & unpack ^^^^^^^^^^^^^^^^^^^^^^ Use ``packb`` for packing and ``unpackb`` for unpacking. msgpack provides ``dumps`` and ``loads`` as alias for compatibility with ``json`` and ``pickle``. ``pack`` and ``dump`` packs to file-like object. ``unpack`` and ``load`` unpacks from file-like object. .. code-block:: pycon >>> import msgpack >>> msgpack.packb([1, 2, 3]) '\x93\x01\x02\x03' >>> msgpack.unpackb(_) [1, 2, 3] ``unpack`` unpacks msgpack's array to Python's list, but can unpack to tuple: .. code-block:: pycon >>> msgpack.unpackb(b'\x93\x01\x02\x03', use_list=False) (1, 2, 3) You should always pass the ``use_list`` keyword argument. See performance issues relating to `use_list option`_ below. Read the docstring for other options. Streaming unpacking ^^^^^^^^^^^^^^^^^^^ ``Unpacker`` is a "streaming unpacker". It unpacks multiple objects from one stream (or from bytes provided through its ``feed`` method). .. code-block:: python import msgpack from io import BytesIO buf = BytesIO() for i in range(100): buf.write(msgpack.packb(range(i))) buf.seek(0) unpacker = msgpack.Unpacker(buf) for unpacked in unpacker: print unpacked Packing/unpacking of custom data type ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It is also possible to pack/unpack custom data types. Here is an example for ``datetime.datetime``. .. code-block:: python import datetime import msgpack useful_dict = { "id": 1, "created": datetime.datetime.now(), } def decode_datetime(obj): if b'__datetime__' in obj: obj = datetime.datetime.strptime(obj["as_str"], "%Y%m%dT%H:%M:%S.%f") return obj def encode_datetime(obj): if isinstance(obj, datetime.datetime): return {'__datetime__': True, 'as_str': obj.strftime("%Y%m%dT%H:%M:%S.%f")} return obj packed_dict = msgpack.packb(useful_dict, default=encode_datetime) this_dict_again = msgpack.unpackb(packed_dict, object_hook=decode_datetime) ``Unpacker``'s ``object_hook`` callback receives a dict; the ``object_pairs_hook`` callback may instead be used to receive a list of key-value pairs. Extended types ^^^^^^^^^^^^^^^ It is also possible to pack/unpack custom data types using the msgpack 2.0 feature. .. code-block:: pycon >>> import msgpack >>> import array >>> def default(obj): ... if isinstance(obj, array.array) and obj.typecode == 'd': ... return msgpack.ExtType(42, obj.tostring()) ... raise TypeError("Unknown type: %r" % (obj,)) ... >>> def ext_hook(code, data): ... if code == 42: ... a = array.array('d') ... a.fromstring(data) ... return a ... return ExtType(code, data) ... >>> data = array.array('d', [1.2, 3.4]) >>> packed = msgpack.packb(data, default=default) >>> unpacked = msgpack.unpackb(packed, ext_hook=ext_hook) >>> data == unpacked True Advanced unpacking control ^^^^^^^^^^^^^^^^^^^^^^^^^^ As an alternative to iteration, ``Unpacker`` objects provide ``unpack``, ``skip``, ``read_array_header`` and ``read_map_header`` methods. The former two read an entire message from the stream, respectively deserialising and returning the result, or ignoring it. The latter two methods return the number of elements in the upcoming container, so that each element in an array, or key-value pair in a map, can be unpacked or skipped individually. Each of these methods may optionally write the packed data it reads to a callback function: .. code-block:: python from io import BytesIO def distribute(unpacker, get_worker): nelems = unpacker.read_map_header() for i in range(nelems): # Select a worker for the given key key = unpacker.unpack() worker = get_worker(key) # Send the value as a packed message to worker bytestream = BytesIO() unpacker.skip(bytestream.write) worker.send(bytestream.getvalue()) Note about performance ------------------------ GC ^^ CPython's GC starts when growing allocated object. This means unpacking may cause useless GC. You can use ``gc.disable()`` when unpacking large message. use_list option ^^^^^^^^^^^^^^^^ List is the default sequence type of Python. But tuple is lighter than list. You can use ``use_list=False`` while unpacking when performance is important. Python's dict can't use list as key and MessagePack allows array for key of mapping. ``use_list=False`` allows unpacking such message. Another way to unpacking such object is using ``object_pairs_hook``. Test ---- MessagePack uses `pytest` for testing. Run test with following command: $ py.test .. vim: filetype=rst tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/benchmark/0000755000000000000000000000000013306562377023445 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/benchmark/benchmark.py0000644000000000000000000000166013306562377025754 0ustar rootrootfrom msgpack import fallback try: from msgpack import _unpacker, _packer has_ext = True except ImportError: has_ext = False import timeit def profile(name, func): times = timeit.repeat(func, number=1000, repeat=4) times = ', '.join(["%8f" % t for t in times]) print("%-30s %40s" % (name, times)) def simple(name, data): if has_ext: packer = _packer.Packer() profile("packing %s (ext)" % name, lambda: packer.pack(data)) packer = fallback.Packer() profile('packing %s (fallback)' % name, lambda: packer.pack(data)) data = packer.pack(data) if has_ext: profile('unpacking %s (ext)' % name, lambda: _unpacker.unpackb(data)) profile('unpacking %s (fallback)' % name, lambda: fallback.unpackb(data)) def main(): simple("integers", [7]*10000) simple("bytes", [b'x'*n for n in range(100)]*10) simple("lists", [[]]*10000) simple("dicts", [{}]*10000) main() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/docs/0000755000000000000000000000000013306562377022443 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/docs/Makefile0000644000000000000000000001307413306562377024110 0ustar rootroot# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -E -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/msgpack.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/msgpack.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/msgpack" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/msgpack" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." serve: html cd _build/html && python3.3 -m http.server zip: html cd _build/html && zip -r ../../../msgpack-doc.zip . tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/docs/conf.py0000644000000000000000000002143413306562377023746 0ustar rootroot# -*- coding: utf-8 -*- # # msgpack documentation build configuration file, created by # sphinx-quickstart on Sun Feb 24 14:20:50 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'msgpack' copyright = u'2013, INADA Naoki' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # The full version, including alpha/beta/rc tags. version = release = '0.4' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' today_fmt = "%Y-%m-%d" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'msgpackdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'msgpack.tex', u'msgpack Documentation', u'Author', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'msgpack', u'msgpack Documentation', [u'Author'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'msgpack', u'msgpack Documentation', u'Author', 'msgpack', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'msgpack' epub_author = u'Author' epub_publisher = u'Author' epub_copyright = u'2013, Author' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/docs/api.rst0000644000000000000000000000124213306562377023745 0ustar rootrootAPI reference ============= .. module:: msgpack .. autofunction:: pack :func:`dump` is alias for :func:`pack` .. autofunction:: packb :func:`dumps` is alias for :func:`packb` .. autofunction:: unpack :func:`load` is alias for :func:`unpack` .. autofunction:: unpackb :func:`loads` is alias for :func:`unpackb` .. autoclass:: Packer :members: .. autoclass:: Unpacker :members: .. autoclass:: ExtType exceptions ----------- These exceptions are accessible via `msgpack` package. (For example, `msgpack.OutOfData` is shortcut for `msgpack.exceptions.OutOfData`) .. automodule:: msgpack.exceptions :members: :undoc-members: :show-inheritance: tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/docs/index.rst0000644000000000000000000000024713306562377024307 0ustar rootrootmsgpack document ================== `MessagePack `_ is a efficient format for inter language data exchange. .. toctree:: :maxdepth: 1 api tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/.travis.yml0000644000000000000000000000115113306562377023622 0ustar rootrootsudo: false cache: directories: - wheelhouse language: python python: - 2.7 branches: only: - master env: - TOXENV=py26-c,py27-c - TOXENV=py32-c,py33-c,py34-c - TOXENV=py26-pure,py27-pure - TOXENV=py32-pure,py33-pure,py34-pure - TOXENV=pypy-pure,pypy3-pure install: - pip install wheel tox - ls -la wheelhouse - if [ ! -f wheelhouse/Cython-0.22-cp27-none-linux_x86_64.whl ] ; then pip wheel cython==0.22 ; fi - pip install wheelhouse/Cython-0.22-cp27-none-linux_x86_64.whl - cython --cplus msgpack/_packer.pyx msgpack/_unpacker.pyx script: tox tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/build_windows.bat0000644000000000000000000000155013306562377025055 0ustar rootrootset MSSdk=1 set DISTUTILS_USE_SDK=1 rem Python27 x86 rem call "C:\Program Files\Microsoft SDKs\Windows\v6.1\Bin\SetEnv.cmd" /Release /x86 /xp call "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvars32.bat" c:\Python27\python setup.py build_ext -f build install pause rem Python27 amd64 rem call "C:\Program Files\Microsoft SDKs\Windows\v6.1\Bin\SetEnv.cmd" /Release /x64 /xp call "C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvars64.bat" c:\Python27_amd64\python setup.py build_ext -f build install pause rem Python33 x86 call "C:\Program Files\Microsoft SDKs\Windows\v7.1\bin\SetEnv.cmd" /Release /x86 /xp c:\Python33\python setup.py build_ext -f build install pause rem Python33 amd64 call "C:\Program Files\Microsoft SDKs\Windows\v7.1\bin\SetEnv.cmd" /Release /x64 /xp c:\Python33_amd64\python setup.py build_ext -f build install pause tarantool_1.9.1.26.g63eb81e3c/test-run/lib/msgpack-python/.gitignore0000644000000000000000000000014113306562377023477 0ustar rootrootMANIFEST build/* dist/* .tox *.pyc *.pyo *.so *~ msgpack/__version__.py msgpack/*.cpp *.egg-info tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/0000755000000000000000000000000013306562377022071 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/Makefile0000644000000000000000000000066113306562377023534 0ustar rootroottest: python setup.py test coverage: python -m coverage run -p --source=. setup.py test cov-html: python -m coverage html -i cov-report: python -m coverage report dist: python setup.py sdist --format=gztar,bztar,zip dist-upload: python setup.py sdist --format=gztar,bztar,zip upload dist-upload-2: python setup.py sdist --format=ztar upload docs: python setup.py build_sphinx docs-upload: docs python setup.py upload_sphinx tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/test.sh0000644000000000000000000000047313306562377023410 0ustar rootrootcurl http://tarantool.org/dist/public.key | sudo apt-key add - echo "deb http://tarantool.org/dist/master/ubuntu/ `lsb_release -c -s` main" | sudo tee -a /etc/apt/sources.list.d/tarantool.list sudo apt-get update > /dev/null sudo apt-get -q -y install tarantool pip install -r requirements.txt python setup.py test tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/0000755000000000000000000000000013306562377022636 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/index.ru.rst0000644000000000000000000000253513306562377025131 0ustar rootroot.. encoding: utf-8 Клиентская библиотека для базы данных Tarantool =============================================== :Версия: |version| .. sidebar:: Загрузить * `PyPI`_ * `GitHub`_ **Установить** .. code-block:: none $ pip install tarantool `Tarantool`_ – это очень быстрая in-memory база данных "ключ-значение". Изначально разработана в `Mail.Ru`_ и выпущена под лицензией `BSD`_. Документация ------------ .. toctree:: :maxdepth: 1 quick-start.ru guide.ru .. seealso:: `Tarantool/Box User Guide`_ Справочник по API ----------------- .. toctree:: :maxdepth: 2 api/module-tarantool.rst api/class-connection.rst api/class-space.rst api/class-response.rst .. Indices and tables .. ================== .. .. * :ref:`genindex` .. * :ref:`modindex` .. * :ref:`search` .. _`Tarantool`: .. _`Tarantool homepage`: http://tarantool.org .. _`Tarantool/Box User Guide`: http://tarantool.org/tarantool_user_guide.html .. _`Mail.Ru`: http://mail.ru .. _`BSD`: .. _`BSD license`: http://www.gnu.org/licenses/license-list.html#ModifiedBSD .. _`PyPI`: http://pypi.python.org/pypi/tarantool .. _`GitHub`: https://github.com/coxx/tarantool-python tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/api/0000755000000000000000000000000013306562377023407 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/api/class-response.rst0000644000000000000000000000022313306562377027077 0ustar rootroot .. currentmodule:: tarantool.response class :class:`Response` ------------------------- .. autoclass:: Response :members: :undoc-members: tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/api/module-tarantool.rst0000644000000000000000000000016113306562377027425 0ustar rootrootmodule :py:mod:`tarantool` ========================== .. automodule:: tarantool :members: :undoc-members: tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/api/class-space.rst0000644000000000000000000000022513306562377026336 0ustar rootroot .. currentmodule:: tarantool.space class :class:`Space` -------------------- .. autoclass:: tarantool.space.Space :members: :undoc-members: tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/api/class-connection.rst0000644000000000000000000000030713306562377027403 0ustar rootroot .. currentmodule:: tarantool.connection class :class:`Connection` ------------------------- .. autoclass:: Connection :members: close, ping, space .. automethod:: call(func_name, *args) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/guide.en.rst0000644000000000000000000002300213306562377025063 0ustar rootroot.. encoding: utf-8 Developer's guide ================= Basic concepts -------------- Spaces ^^^^^^ Spaces is a collections of tuples. Usually, tuples in one space represent objects of the same type, although this is not necessary. .. note:: The analogue of spaces is tables in traditional (SQL) databases. Spaces have integer identifiers defined in the server configuration. To access the space as a named object it is possible to use the method :meth:`Connection.space() ` and an instance of :class:`~tarantool.space.Space`. Example:: >>> customer = connection.space(0) >>> customer.insert(('FFFF', 'Foxtrot')) Field types ^^^^^^^^^^^ Three field types are supported in Tarantool: ``STR``, ``NUM`` and ``NUM64``. These types are used only for index configuration but not saved in tuple's data and not transferred between the client and server. Thus, from the client point of view, fields are raw byte arrays without explicitly definde types. It is much easier to use native types for python developer: ``int``, ``long``, ``unicode`` (``int`` and ``str`` for Python 3.x). For raw binary data ``bytes`` should be used (in this case the type casting is not performed). Tarantool data types corresponds to the following Python types: • ``RAW`` - ``bytes`` • ``STR`` - ``unicode`` (``str`` for Python 3.x) • ``NUM`` - ``int`` • ``NUM64`` - ``int`` or ``long`` (``int`` for Python 3.x) Please define spaces schema to enable automatic type casting: >>> import tarantool >>> schema = { 0: { # Space description 'name': 'users', # Space name 'default_type': tarantool.STR, # Type that used to decode fields that are not listed below 'fields': { 0: ('numfield', tarantool.NUM), # (field name, field type) 1: ('num64field', tarantool.NUM64), 2: ('strfield', tarantool.STR), #2: { 'name': 'strfield', 'type': tarantool.STR }, # Alternative syntax #2: tarantool.STR # Alternative syntax }, 'indexes': { 0: ('pk', [0]), # (name, [field_no]) #0: { 'name': 'pk', 'fields': [0]}, # Alternative syntax #0: [0], # Alternative syntax } } } >>> connection = tarantool.connect(host = 'localhost', port=33013, schema = schema) >>> demo = connection.space('users') >>> demo.insert((0, 12, u'this is unicode string')) >>> demo.select(0) [(0, 12, u'this is unicode string')] As you can see, original "raw" fields were casted to native types as defined in the schema. Tarantool's tuple can contain any number of fields. If some fields are not defined then ``default_type`` will be used. To prevent implicit type casting for strings use ``RAW`` type. Raw byte fields should be used if the application uses binary data (eg, images or python objects packed with ``picke``). You can also specify schema for CALL results: >>> ... # Copy schema decription from 'users' space >>> connection.call("box.select", '0', '0', 0L, space_name='users'); [(0, 12, u'this is unicode string')] # Provide schema description explicitly >>> field_defs = [('numfield', tarantool.NUM), ('num64field', tarantool.NUM)] >>> connection.call("box.select", '0', '1', 184L, field_defs = field_defs, default_type = tarantool.STR); [(0, 12, u'this is unicode string')] .. note:: Python 2.6 adds :class:`bytes` as a synonym for the :class:`str` type, and it also supports the ``b''`` notation. .. note:: **utf-8** allways used for type conversion between ``unicode`` and ``bytes`` Request response ^^^^^^^^^^^^^^^^ Requests (:meth:`insert() `, :meth:`delete() `, :meth:`update() `, :meth:`select() `) return a :class:`~tarantool.response.Response` instance. Class :class:`~tarantool.response.Response` inherited from `list`, so in fact response can be used as a list of a tuples. In addition :class:`~tarantool.response.Response` instance has the ``rowcount`` attribute. The value of ``rowcount`` equals to the number of records affected by the request. For example for :meth:`delete() ` request ``rowcount`` is equals to ``1`` if record was deleted. Connect to the server --------------------- To connect to the server it is required to use :meth:`tarantool.connect` method. It returns an :class:`~tarantool.connection.Connection` instance. Example:: >>> import tarantool >>> connection = tarantool.connect("localhost", 33013) >>> type(connection) Data manipulation ----------------- There are four basic operations supported by Tarantool: **insert**, **delete**, **update** and **select**. .. Note:: НЕОБХОДИМО ОБЪЯСНИТЬ КАКИЕ ДАННЫЕ ИСПОЛЬЗУЮТСЯ ДЛЯ ПРИМЕРА Inserting and replacing records ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To insert or replace records :meth:`Space.insert() ` method should be used:: >>> user.insert((user_id, email, int(time.time()))) The first element of the tuple is always its unique primary key. If an entry with the same key already exists, it will be replaced without any warning or error message. .. note:: In case of ``insert`` request ``Response.rowcount`` is always equals to ``1`` Deleting Records ^^^^^^^^^^^^^^^^ To delete records :meth:`Space.delete() ` method should be used:: >>> user.delete(primary_key) .. note:: If the record was deleted ``Response.rowcount`` equals to ``1``. If the record was not found ``Response.rowcount`` equals to ``0``. Updating Records ^^^^^^^^^^^^^^^^ *Update* request in Tarantool allows to simultaneous and atomic update multiple fields of a tuple. To update records :meth:`Space.update() ` method should be used. Example:: >>> user.update(1001, [('=', 1, 'John'), ('=', 2, 'Smith')]) In this example new values for fields ``1`` and ``2`` are assigned. :meth:`Space.update() ` method allows to change multiple fields of the tuple at a time. The following update operations are supported by Tarantool: • ``'='`` – assign new value to the field • ``'+'`` – add argument to the field (*both arguments are treated as signed 32-bit ints*) • ``'^'`` – bitwise AND (*only for 32-bit integers*) • ``'|'`` – bitwise XOR (*only for 32-bit integers*) • ``'&'`` – bitwise OR (*only for 32-bit integers*) • ``'splice'`` – implementation of `Perl splice `_ function .. note:: The zero (i.e. [0]) field of the tuple can not be updated, because it is the primary key .. seealso:: See :meth:`Space.update() ` documentation for details .. warning:: ``'splice'`` operation is not implemented yet Selecting Records ^^^^^^^^^^^^^^^^^ To select records :meth:`Space.select() ` method should be used. *SELECT* query can return one or many records. .. rubric:: Select by primary key Select a record using its primary key ``3800``:: >>> world.select(3800) [(3800, u'USA', u'Texas', u'Dallas', 1188580)] .. rubric:: Select using secondary index :: >>> world.select('USA', index=1) [(3796, u'USA', u'Texas', u'Houston', 1953631), (3801, u'USA', u'Texas', u'Huston', 10000), (3802, u'USA', u'California', u'Los Angeles', 10000), (3805, u'USA', u'California', u'San Francisco', 776733), (3800, u'USA', u'Texas', u'Dallas', 1188580), (3794, u'USA', u'California', u'Los Angeles', 3694820)] Argument ``index = 1`` indicates that secondary index (``1``) should be used. The primary key (``index=0``) is used by default. .. note:: Secondary indexes must be explicitly declared in the server configuration .. rubric:: Select records using several keys .. note:: This conforms to ``where key in (k1, k2, k3...)`` Select records with primary key values ``3800``, ``3805`` and ``3796``:: >>> world.select([3800, 3805, 3796]) [(3800, u'USA', u'Texas', u'Dallas', 1188580), (3805, u'USA', u'California', u'San Francisco', 776733), (3796, u'USA', u'Texas', u'Houston', 1953631)] .. rubric:: Retrieve a record by using a composite index Select data on cities in Texas:: >>> world.select([('USA', 'Texas')], index=1) [(3800, u'USA', u'Texas', u'Dallas', 1188580), (3796, u'USA', u'Texas', u'Houston', 1953631)] .. rubric:: Select records explicitly specifying field types Tarantool has no strict schema so all fields are raw binary byte arrays. You can specify field types in the ``schema`` parameter to a connection. Call server-side functions -------------------------- A server-side function written in Lua can select and modify data, access configuration and perform administrative tasks. To call stored function :meth:`Connection.call() ` method should be used. Also, this method has an alias :meth:`Space.call() `. Example:: >>> server.call("box.select_range", (1, 3, 2, 'AAAA')) [(3800, u'USA', u'Texas', u'Dallas', 1188580), (3794, u'USA', u'California', u'Los Angeles', 3694820)] .. seealso:: Tarantool/Box User Guide » `Writing stored procedures in Lua `_ tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/_static/0000755000000000000000000000000013306562377024264 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/_static/tarantool.css0000644000000000000000000000045713306562377027007 0ustar rootroot@import url("default.css"); cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; } dl.attribute, dl.class, dl.method { margin-top: 2em; margin-bottom: 2em; } tt { font-size: 100%; } th { background-color: #fff; } tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/conf.py0000644000000000000000000002055713306562377024146 0ustar rootroot# -*- coding: utf-8 -*- # # Tarantool python client library documentation build configuration file, created by # sphinx-quickstart on Tue Nov 29 06:29:57 2011. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath('.')), "src")) # Read package version without importing it for line in open(os.path.join(os.path.dirname(os.path.abspath('.')), "tarantool", "__init__.py")): if line.startswith("__version__"): exec line break # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Tarantool python client library' copyright = u'2011, Konstantin Cherkasoff' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- html_style = 'tarantool.css' # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Tarantoolpythonclientlibrarydoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Tarantoolpythonclientlibrary.tex', u'Tarantool python client library Documentation', u'Konstantin Cherkasoff', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'tarantoolpythonclientlibrary', u'Tarantool python client library Documentation', [u'Konstantin Cherkasoff'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Tarantoolpythonclientlibrary', u'Tarantool python client library Documentation', u'Konstantin Cherkasoff', 'Tarantoolpythonclientlibrary', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'python':('http://docs.python.org/', None)} autoclass_content = "both" tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/quick-start.ru.rst0000644000000000000000000000616113306562377026270 0ustar rootrootКраткое руководство =================== Подключение к серверу --------------------- Создаем подключение к серверу:: >>> import tarantool >>> server = tarantool.connect("localhost", 33013) Создаем объект доступа к пространству ------------------------------------- Экземпляр :class:`~tarantool.space.Space` - это именованный объект для доступа к пространству ключей. Создаем объект ``demo``, который будет использоваться для доступа к пространству ``0``:: >>> demo = server.space(0) Все последующие операции с пространством ``0`` выполняются при помощи методов объекта ``demo``. Работа с данными ---------------- Select ^^^^^^ Извлечь одну запись с id ``'AAAA'`` из пространства ``demo`` по первичному ключу (нулевой индекс):: >>> demo.select('AAAA') Извлечь несколько записей используя первичный индекс:: >>> demo.select(['AAAA', 'BBBB', 'CCCC']) [('AAAA', 'Alpha'), ('BBBB', 'Bravo'), ('CCCC', 'Charlie')] Insert ^^^^^^ Вставить кортеж ``('DDDD', 'Delta')`` в пространство ``demo``:: >>> demo.insert(('DDDD', 'Delta')) Первый элемент является первичным ключом для данного кортежа. Update ^^^^^^ Обновить запись с id ``'DDDD'``, поместив значение ``'Denver'`` в поле ``1``:: >>> demo.update('DDDD', [(1, '=', 'Denver')]) [('DDDD', 'Denver')] Для поиска записи :meth:`~tarantool.space.Space.update` всгеда использует первичный индекс. Номера полей начинаются с нуля. Таким образом, поле ``0`` - это первый элемент кортежа. Delete ^^^^^^ Удалить одиночную запись с идентификатором ``'DDDD'``:: >>> demo.delete('DDDD') [('DDDD', 'Denver')] Для поиска записи :meth:`~tarantool.space.Space.delete` всгеда использует первичный индекс. Вызов хранимых функций ---------------------- Для вызова хранимых функций можно использовать метод :meth:`Connection.call() `:: >>> server.call("box.select_range", (0, 0, 2, 'AAAA')) [('AAAA', 'Alpha'), ('BBBB', 'Bravo')] Тоже самое можно получить при помощи метода :meth:`Space.call() `:: >>> demo.call("box.select_range", (0, 0, 2, 'AAAA')) [('AAAA', 'Alpha'), ('BBBB', 'Bravo')] Метод :meth:`Space.call() ` - это просто псевдоним для :meth:`Connection.call() ` tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/quick-start.en.rst0000644000000000000000000000432413306562377026243 0ustar rootrootQuick start =========== Connecting to the server ------------------------ Create connection to the server:: >>> import tarantool >>> server = tarantool.connect("localhost", 33013) Creating a space instance ------------------------- Instance of :class:`~tarantool.space.Space` is a named object to access the key space. Create `` demo `` object which will be used to access the space `` 0 `` :: >>> demo = server.space(0) All subsequent operations with space ``0`` performed using methods of the ``demo``. Data Manipulation ----------------- Select ^^^^^^ Select one single record with id ``'AAAA'`` from the space ``demo`` using primary key (index zero):: >>> demo.select('AAAA') Select several records using primary index:: >>> demo.select(['AAAA', 'BBBB', 'CCCC']) [('AAAA', 'Alpha'), ('BBBB', 'Bravo'), ('CCCC', 'Charlie')] Insert ^^^^^^ Insert tuple ``('DDDD', 'Delta')`` into the space ``demo``:: >>> demo.insert(('DDDD', 'Delta')) The first element is the primary key for the tuple. Update ^^^^^^ Update the record with id ``'DDDD'`` placing the value ``'Denver'`` into the field ``1``:: >>> demo.update('DDDD', [(1, '=', 'Denver')]) [('DDDD', 'Denver')] To find the record :meth:`~tarantool.space.Space.update` always uses the primary index. Fields numbers are starting from zero. So field ``0`` is the first element in the tuple. Delete ^^^^^^ Delete single record identified by id ``'DDDD'``:: >>> demo.delete('DDDD') [('DDDD', 'Denver')] To find the record :meth:`~tarantool.space.Space.delete` always uses the primary index. Call server-side functions -------------------------- To call stored function method :meth:`Connection.call() ` can be used:: >>> server.call("box.select_range", (0, 0, 2, 'AAAA')) [('AAAA', 'Alpha'), ('BBBB', 'Bravo')] The same can be done using :meth:`Space.call() ` method:: >>> demo = server.space(0) >>> demo.call("box.select_range", (0, 0, 2, 'AAAA')) [('AAAA', 'Alpha'), ('BBBB', 'Bravo')] Method :meth:`Space.call() ` is just an alias for :meth:`Connection.call() ` tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/index.rst0000644000000000000000000000222413306562377024477 0ustar rootroot.. encoding: utf-8 Python client library for Tarantool Database ============================================ :Version: |version| .. sidebar:: Download * `PyPI`_ * `GitHub`_ **Install** .. code-block:: none $ pip install tarantool `Tarantool`_ is a damn fast key/value data store originally designed by `Mail.Ru`_ and released under the terms of `BSD license`_. Documentation ------------- .. toctree:: :maxdepth: 1 quick-start.en guide.en .. seealso:: `Tarantool/Box User Guide`_ API Reference ------------- .. toctree:: :maxdepth: 2 api/module-tarantool.rst api/class-connection.rst api/class-space.rst api/class-response.rst .. Indices and tables .. ================== .. .. * :ref:`genindex` .. * :ref:`modindex` .. * :ref:`search` .. _`Tarantool`: .. _`Tarantool homepage`: http://tarantool.org .. _`Tarantool/Box User Guide`: http://tarantool.org/doc/book/index.html .. _`Mail.Ru`: http://mail.ru .. _`BSD`: .. _`BSD license`: http://www.gnu.org/licenses/license-list.html#ModifiedBSD .. _`PyPI`: http://pypi.python.org/pypi/tarantool .. _`GitHub`: https://github.com/tarantool/tarantool-python tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/doc/guide.ru.rst0000644000000000000000000003232713306562377025121 0ustar rootroot.. encoding: utf-8 Руководство разработчика ======================== Базовые понятия --------------- Пространства ^^^^^^^^^^^^ Пространства в Tarantool — это коллекции кортежей. Как правило, кортежи в пространстве представляют собой объекты одного типа, хотя это и не обязательно. .. note:: Аналог пространства — это таблица в традиционных (SQL) базах данных. Пространства имеют целочисленные идентификаторы, которые задаются в конфигурации сервера. Чтобы обращаться к пространству, как к именованному объекту, можно использовать метод :meth:`Connection.space() ` и экземпляр класса :class:`~tarantool.space.Space`. Пример:: >>> customer = connection.space(0) >>> customer.insert(('FFFF', 'Foxtrot')) Типы полей ^^^^^^^^^^ Tarantool поддерживает три типа полей: ``STR``, ``NUM`` и ``NUM64``. Эти типы используются только при конфигурации индексов, но не сохраняются с данными кортежа и не передаются между сервером и клиентом. Таким образом, с точки зрения клиента, поля кортежей являются просто байтовыми массивами без явно заданных типов. Для разработчика на Python намного удобнее использовать родные типы: ``int``, ``long``, ``unicode`` (для Python 3.x - ``int`` и ``str``). Для бинарных данных следует использовать тип ``bytes`` (в этом случае приведение типов не производится). Типы данных Tarantool соответствуют следующим типам Python: • ``RAW`` - ``bytes`` • ``STR`` - ``unicode`` (``str`` for Python 3.x) • ``NUM`` - ``int`` • ``NUM64`` - ``int`` or ``long`` (``int`` for Python 3.x) Для автоматического приведения типов необходимо объявить схему: >>> import tarantool >>> schema = { 0: { # Space description 'name': 'users', # Space name 'default_type': tarantool.STR, # Type that used to decode fields that are not listed below 'fields': { 0: ('user_id', tarantool.NUM), # (field name, field type) 1: ('num64field', tarantool.NUM64), 2: ('strfield', tarantool.STR), #2: { 'name': 'strfield', 'type': tarantool.STR }, # Alternative syntax #2: tarantool.STR # Alternative syntax }, 'indexes': { 0: ('pk', [0]), # (name, [field_no]) #0: { 'name': 'pk', 'fields': [0]}, # Alternative syntax #0: [0], # Alternative syntax } } } >>> connection = tarantool.connect(host = 'localhost', port=33013, schema = schema) >>> demo = connection.space('users') >>> demo.insert((0, 12, u'this is unicode string')) >>> demo.select(0) [(0, 12, u'this is unicode string')] Как видно из примера, все значения были преобразованы в Python-типы в соответствии со схемой. Кортеж Tarantool может содержать произвольное количество полей. Если какие-то поля не объявлены в схеме, то ``default_type`` будет использован для конвертации. Поля с "сырыми" байтами следует использовать, если приложение работает с двоичными данными (например, изображения или python-объекты, сохраненные с помощью ``picke``). Возможно также указать тип для CALL запросов: >>> ... # Copy schema decription from 'users' space >>> connection.call("box.select", '0', '0', 0L, space_name='users'); [(0, 12, u'this is unicode string')] # Provide schema description explicitly >>> field_defs = [('numfield', tarantool.NUM), ('num64field', tarantool.NUM)] >>> connection.call("box.select", '0', '1', 184L, field_defs = field_defs, default_type = tarantool.STR); [(0, 12, u'this is unicode string')] .. note:: Python 2.6 добавляет синоним :class:`bytes` к типу :class:`str` (также поддерживается синтаксис ``b''``). .. note:: Для преобразования между ``bytes`` и ``unicode`` всегда используется **utf-8** Результат запроса ^^^^^^^^^^^^^^^^^ Запросы (:meth:`insert() `, :meth:`delete() `, :meth:`update() `, :meth:`select() `) возвращают экземпляр класса :class:`~tarantool.response.Response`. Класс :class:`~tarantool.response.Response` унаследован от стандартного типа `list`, поэтому, по сути, результат всегда представляет собой список кортежей. Кроме того, у экземпляра :class:`~tarantool.response.Response` есть атрибут ``rowcount``. Этот атрибут содержит число записей, которые затронул запроc. Например, для запроса :meth:`delete() ` ``rowcount`` равен ``1``, если запись была удалена. Подключение к серверу --------------------- Для подключения к серверу следует использовать метод :meth:`tarantool.connect`. Он возвращает экземпляр класса :class:`~tarantool.connection.Connection`. Пример:: >>> import tarantool >>> connection = tarantool.connect("localhost", 33013) >>> type(connection) Работа с данными ---------------- Tarantool поддерживает четыре базовых операции: **insert**, **delete**, **update** и **select**. .. Note:: НЕОБХОДИМО ОБЪЯСНИТЬ КАКИЕ ДАННЫЕ ИСПОЛЬЗУЮТСЯ ДЛЯ ПРИМЕРА Добавление и замещение записей ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Для добавления и замещения записей следует использовать метод :meth:`Space.insert() `:: >>> user.insert((user_id, email, int(time.time()))) Первый элемент кортежа всегда является его уникальным первичным ключом. Если запись с таким ключом уже существует, она будет замещена без какого либо предупреждения или сообщения об ошибке. .. note:: Для :meth:`Space.insert() ` ``Response.rowcount`` всегда равен ``1``. Удаление записей ^^^^^^^^^^^^^^^^ Для удаления записей следует использовать метод :meth:`Space.delete() `:: >>> user.delete(primary_key) .. note:: ``Response.rowcount`` равен ``1``, если запись была удалена. Если запись не найдена, то ``Response.rowcount`` равен ``0``. Обновление записей ^^^^^^^^^^^^^^^^^^ Запрос *update* в Tarantool позволяет одновременно и атомарно обновить несколько полей одного кортежа. Для обновления записей следует использовать метод :meth:`Space.update() `. Пример:: >>> user.update(1001, [(1, '=', 'John'), (2, '=', 'Smith')]) В этом примере для полей ``1`` и ``2`` устанавливаются новые значения. Метод :meth:`Space.update() ` позволяет обновлять сразу несколько полей кортежа. Tarantool поддерживает следующие операции обновления: • ``'='`` – установить новое значение поля • ``'+'`` – прибавить аргумент к значению поля (*оба аргумента рассматриваются как знаковые 32-битные целые числа*) • ``'^'`` – битовый AND (*только для 32-битных полей*) • ``'|'`` – битовый XOR (*только для 32-битных полей*) • ``'&'`` – битовый OR (*только для 32-битных полей*) • ``'splice'`` – аналог функции `splice в Perl `_ .. note:: Нулевое (т.е. [0]) поле кортежа нельзя обновить, поскольку оно является первичным ключом .. seealso:: Подробности в документации по методу :meth:`Space.update() ` .. warning:: Операция ``'splice'`` пока не реализована Выборка записей ^^^^^^^^^^^^^^^ Для выборки записей следует использовать метод :meth:`Space.select() `. Запрос *SELECT* может возвращать одну или множество записей. .. rubric:: Запрос по первичному ключу Извлечь запись по её первичному ключу ``3800``:: >>> world.select(3800) [(3800, u'USA', u'Texas', u'Dallas', 1188580)] .. rubric:: Запрос по вторичному индексу :: >>> world.select('USA', index=1) [(3796, u'USA', u'Texas', u'Houston', 1953631), (3801, u'USA', u'Texas', u'Huston', 10000), (3802, u'USA', u'California', u'Los Angeles', 10000), (3805, u'USA', u'California', u'San Francisco', 776733), (3800, u'USA', u'Texas', u'Dallas', 1188580), (3794, u'USA', u'California', u'Los Angeles', 3694820)] Аргумент ``index=1`` указывает, что при запросе следует использовать индекс ``1``. По умолчанию используется первыичный ключ (``index=0``). .. note:: Вторичные индексы должны быть явно объявлены в конфигурации севера .. rubric:: Запрос записей по нескольким ключам .. note:: Это аналог ``where key in (k1, k2, k3...)`` Извлечь записи со значениями первичного ключа ``3800``, ``3805`` and ``3796``:: >>>> world.select([3800, 3805, 3796]) [(3800, u'USA', u'Texas', u'Dallas', 1188580), (3805, u'USA', u'California', u'San Francisco', 776733), (3796, u'USA', u'Texas', u'Houston', 1953631)] .. rubric:: Запрос по составному индексу Извлечь данные о городах в Техасе:: >>> world.select([('USA', 'Texas')], index=1) [(3800, u'USA', u'Texas', u'Dallas', 1188580), (3796, u'USA', u'Texas', u'Houston', 1953631)] .. rubric:: Запрос с явным указанием типов полей Tarantool не имеет строгой схемы и поля кортежей являются просто байтовыми массивами. Можно указать типа полей непосредственно в параметре ``schema`` для ```Connection`` Вызов хранимых функций ---------------------- Хранимые процедуры на Lua могут делать выборки и изменять данные, имеют доcтуп к конфигурации и могут выполнять административные функции. Для вызова хранимых функций следует использовать метод :meth:`Connection.call() `. Кроме того, у этого метода есть псевдоним: :meth:`Space.call() `. Пример:: >>> server.call("box.select_range", (1, 3, 2, 'AAAA')) [(3800, u'USA', u'Texas', u'Dallas', 1188580), (3794, u'USA', u'California', u'Los Angeles', 3694820)] .. seealso:: Tarantool/Box User Guide » `Writing stored procedures in Lua `_tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/MANIFEST.in0000644000000000000000000000013513306562377023626 0ustar rootrootinclude README.rst include README.txt include setup.py recursive-include src/tarantool/ *.py tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/setup.py0000755000000000000000000000450613306562377023613 0ustar rootroot#!/usr/bin/env python # -*- coding: utf-8 -*- import codecs import os import re try: from setuptools import setup except ImportError: from distutils.core import setup # Extra commands for documentation management cmdclass = {} command_options = {} # Build Sphinx documentation (html) # python setup.py build_sphinx # generates files into build/sphinx/html try: from sphinx.setup_command import BuildDoc cmdclass["build_sphinx"] = BuildDoc except ImportError: pass # Upload Sphinx documentation to PyPI (using Sphinx-PyPI-upload) # python setup.py build_sphinx # updates documentation at http://packages.python.org/tarantool/ try: from sphinx_pypi_upload import UploadDoc cmdclass["upload_sphinx"] = UploadDoc command_options["upload_sphinx"] = { 'upload_dir': ('setup.py', os.path.join("build", "sphinx", "html")) } except ImportError: pass # Test runner # python setup.py test try: from tests.setup_command import test cmdclass["test"] = test except ImportError: pass def read(*parts): filename = os.path.join(os.path.dirname(__file__), *parts) with codecs.open(filename, encoding='utf-8') as fp: return fp.read() def find_version(*file_paths): version_file = read(*file_paths) version_match = re.search(r"""^__version__\s*=\s*(['"])(.+)\1""", version_file, re.M) if version_match: return version_match.group(2) raise RuntimeError("Unable to find version string.") setup( name="tarantool", packages=["tarantool"], package_dir={"tarantool": os.path.join("tarantool")}, version=find_version('tarantool', '__init__.py'), platforms=["all"], author="Konstantin Cherkasoff", author_email="k.cherkasoff@gmail.com", url="https://github.com/tarantool/tarantool-python", license="BSD", description="Python client library for Tarantool 1.6 Database", long_description=read('README.rst'), classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Database :: Front-Ends" ], cmdclass=cmdclass, command_options=command_options, install_requires=[ 'msgpack-python>=0.4', 'six', 'PyYAML>=3.10', ] ) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/rpm/0000755000000000000000000000000013306562377022667 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/rpm/tarantool-python.spec0000644000000000000000000000723313306562377027072 0ustar rootroot%define name tarantool-python Summary: Python client library for Tarantool Database Name: %{name} Version: 0.5.4 Release: 1%{?dist} Source0: tarantool-python-%{version}.tar.gz License: BSD Group: Development/Libraries BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot Prefix: %{_prefix} BuildArch: noarch Vendor: Konstantin Cherkasoff Requires: python-yaml python-msgpack Url: https://github.com/tarantool/tarantool-python %description Python driver for Tarantool 1.6 =============================== This package is a pure-python client library for `Tarantool`_. `Documentation`_ | `Downloads`_ | `PyPI`_ | `GitHub`_ | `Issue tracker`_ .. _`Documentation`: http://tarantool-python.readthedocs.org/en/latest/ .. _`Downloads`: http://pypi.python.org/pypi/tarantool#downloads .. _`PyPI`: http://pypi.python.org/pypi/tarantool .. _`GitHub`: https://github.com/tarantool/tarantool-python .. _`Issue tracker`: https://github.com/tarantool/tarantool-python/issues .. image:: https://travis-ci.org/tarantool/tarantool-python.svg?branch=master :target: https://travis-ci.org/tarantool/tarantool-python Download and Install -------------------- The recommended way to install ``tarantool`` package is using PIP ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For Tarantool version < 1.6.0 you must get ``0.3.*`` connector version:: $ pip install tarantool\<0.4 For later Tarantool use version ``0.5.*`` connector version:: $ pip install tarantool\>0.4 You can also download zip archive, unpack it and run ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :: $ python setup.py install To install development version of the package using pip ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For Tarantool version < 1.6.0 you must get ``stable`` branch:: $ pip install git+https://github.com/tarantool/tarantool-python.git@stable For later Tarantool use ``master`` branch:: $ pip install git+https://github.com/tarantool/tarantool-python.git@master -------------------------------------------------------------------------------- What is Tarantool? ------------------ `Tarantool`_ is a NoSQL database running inside a Lua program. It combines the network programming power of Node.JS with data persistency capabilities of Redis. It's open source, `BSD licensed`_. Features -------- * Lua packages for non-blocking I/O, fibers and HTTP * MsgPack data format and MsgPack based client-server protocol * Two data engines: * 100% in-memory with optional persistence * 2-level disk-based B-tree, to use with large data sets (powered by `Sophia`_) * secondary key and index iterators support (can be non-unique and composite) * multiple index types: HASH, BITSET, TREE * asynchronous master-master replication * authentication and access control See More ^^^^^^^^ * `Tarantool Homepage`_ * `Tarantool at Github`_ * `Tarantool User Guide`_ * `Client-server Protocol Specification`_ .. _`Tarantool`: .. _`Tarantool Database`: .. _`Tarantool Homepage`: http://tarantool.org .. _`Tarantool at Github`: https://github.com/tarantool/tarantool .. _`Tarantool User Guide`: http://tarantool.org/doc/user_guide.html .. _`Client-server protocol specification`: http://tarantool.org/doc/dev_guide/box-protocol.html .. _`Sophia`: http://sphia.org .. _`BSD licensed`: http://www.gnu.org/licenses/license-list.html#ModifiedBSD %prep %setup -q -n %{name}-%{version} %build python setup.py build %install python setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES %clean rm -rf $RPM_BUILD_ROOT %files -f INSTALLED_FILES %defattr(-,root,root) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/INSTALL0000644000000000000000000000052013306562377023117 0ustar rootrootInstalling tarantool-python =========================== The simplest (and recommended) way to install tarantool-python using `pip`:: $ pip install tarantool or `easy_install`:: $ easy_install tarantool You can also download a source tarball and install the package using distutils script:: # python setup.py install tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/README.rst0000644000000000000000000000570313306562377023565 0ustar rootrootPython driver for Tarantool 1.6 =============================== This package is a pure-python client library for `Tarantool`_. `Documentation`_ | `Downloads`_ | `PyPI`_ | `GitHub`_ | `Issue tracker`_ .. _`Documentation`: http://tarantool-python.readthedocs.org/en/latest/ .. _`Downloads`: http://pypi.python.org/pypi/tarantool#downloads .. _`PyPI`: http://pypi.python.org/pypi/tarantool .. _`GitHub`: https://github.com/tarantool/tarantool-python .. _`Issue tracker`: https://github.com/tarantool/tarantool-python/issues .. image:: https://travis-ci.org/tarantool/tarantool-python.svg?branch=master :target: https://travis-ci.org/tarantool/tarantool-python Download and Install -------------------- The recommended way to install ``tarantool`` package is using PIP ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For Tarantool version < 1.6.0 you must get ``0.3.*`` connector version:: $ pip install tarantool\<0.4 For later Tarantool use version ``0.5.*`` connector version:: $ pip install tarantool\>0.4 You can also download zip archive, unpack it and run ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. code-block:: console $ python setup.py install To install development version of the package using pip ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For Tarantool version < 1.6.0 you must get ``stable`` branch:: $ pip install git+https://github.com/tarantool/tarantool-python.git@stable For later Tarantool use ``master`` branch:: $ pip install git+https://github.com/tarantool/tarantool-python.git@master -------------------------------------------------------------------------------- What is Tarantool? ------------------ `Tarantool`_ is a NoSQL database running inside a Lua program. It combines the network programming power of Node.JS with data persistency capabilities of Redis. It's open source, `BSD licensed`_. Features -------- * Lua packages for non-blocking I/O, fibers and HTTP * MsgPack data format and MsgPack based client-server protocol * Two data engines: * 100% in-memory with optional persistence * 2-level disk-based B-tree, to use with large data sets (powered by `Sophia`_) * secondary key and index iterators support (can be non-unique and composite) * multiple index types: HASH, BITSET, TREE * asynchronous master-master replication * authentication and access control See More ^^^^^^^^ * `Tarantool Homepage`_ * `Tarantool at Github`_ * `Tarantool User Guide`_ * `Client-server Protocol Specification`_ .. _`Tarantool`: .. _`Tarantool Database`: .. _`Tarantool Homepage`: http://tarantool.org .. _`Tarantool at Github`: https://github.com/tarantool/tarantool .. _`Tarantool User Guide`: http://tarantool.org/doc/book/index.html .. _`Client-server protocol specification`: http://tarantool.org/doc/dev_guide/box-protocol.html .. _`Sophia`: http://sphia.org .. _`BSD licensed`: http://www.gnu.org/licenses/license-list.html#ModifiedBSD tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/requirements.txt0000644000000000000000000000004713306562377025356 0ustar rootrootmsgpack-python>=0.4.0 pyyaml>=3.10 six tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/README.txt0000644000000000000000000000001713306562377023565 0ustar rootrootSee README.rst tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/.travis.yml0000644000000000000000000000131213306562377024177 0ustar rootrootsudo: required services: - docker language: python cache: false env: matrix: - OS=el DIST=6 PACK=rpm - OS=el DIST=7 PACK=rpm - OS=fedora DIST=22 PACK=rpm - OS=fedora DIST=23 PACK=rpm - OS=fedora DIST=rawhide PACK=rpm - OS=ubuntu DIST=precise PACK=deb - OS=ubuntu DIST=trusty PACK=deb - OS=ubuntu DIST=wily PACK=deb - OS=ubuntu DIST=xenial PACK=deb - OS=debian DIST=wheezy PACK=deb - OS=debian DIST=jessie PACK=deb - OS=debian DIST=stretch PACK=deb - OS=debian DIST=sid PACK=deb - PACK=none script: - git clone https://github.com/tarantool/build.git - ./build/pack/travis.sh notifications: email: true irc: false tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/0000755000000000000000000000000013306562377023233 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/setup_command.py0000755000000000000000000000123513306562377026447 0ustar rootroot#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import unittest import setuptools from glob import glob class test(setuptools.Command): user_options = [] description = 'Run unit tests' def initialize_options(self): pass def finalize_options(self): pass def run(self): ''' Find all tests in test/tarantool/ and run them ''' #root = os.path.dirname(os.path.dirname(__file__)) #sys.path.insert(0, root) tests = unittest.defaultTestLoader.discover('tests') test_runner = unittest.TextTestRunner(verbosity = 2) test_runner.run(tests) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/__init__.py0000644000000000000000000000000013306562377025332 0ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/suites/0000755000000000000000000000000013306562377024547 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/suites/box.lua0000644000000000000000000000036513306562377026046 0ustar rootroot#!/usr/bin/env tarantool os = require('os') require('console').listen(os.getenv("ADMIN_PORT")) box.cfg{ listen = os.getenv("PRIMARY_PORT"), slab_alloc_arena = 0.1, pid_file = "box.pid", rows_per_wal = 50 } tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/suites/lib/0000755000000000000000000000000013306562377025315 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/suites/lib/__init__.py0000644000000000000000000000000013306562377027414 0ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/suites/lib/tarantool_server.py0000644000000000000000000002012213306562377031255 0ustar rootroot# -*- coding: utf-8 -*- import os import os.path import errno import shlex import random import socket import tempfile import yaml import time import shutil import subprocess def check_port(port, rais=True): try: sock = socket.create_connection(("localhost", port)) except socket.error: return True if rais: raise RuntimeError("The server is already running on port {0}".format(port)) return False def find_port(port = None): if port is None: port = random.randrange(3300, 9999) while port < 9999: if check_port(port, False): return port port += 1 return find_port(3300) class RunnerException(object): pass class TarantoolAdmin(object): def __init__(self, host, port): self.host = host self.port = port self.is_connected = False self.socket = None def connect(self): self.socket = socket.create_connection((self.host, self.port)) self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) self.is_connected = True self.socket.recv(256) # skip greating def disconnect(self): if self.is_connected: self.socket.close() self.socket = None self.is_connected = False def reconnect(self): self.disconnect() self.connect() def opt_reconnect(self): """ On a socket which was disconnected, recv of 0 bytes immediately returns with no data. On a socket which is alive, it returns EAGAIN. Make use of this property and detect whether or not the socket is dead. Reconnect a dead socket, do nothing if the socket is good.""" try: if self.socket is None or self.socket.recv(1, socket.MSG_DONTWAIT|socket.MSG_PEEK) == '': self.reconnect() except socket.error as e: if e.errno == errno.EAGAIN: pass else: self.reconnect() def execute(self, command): self.opt_reconnect() return self.execute_no_reconnect(command) def __enter__(self): self.connect() return self def __exit__(self, type, value, tb): self.disconnect() def __call__(self, command): return self.execute(command) def execute_no_reconnect(self, command): if not command: return cmd = command.replace('\n', ' ') + '\n' self.socket.sendall(cmd.encode()) bufsiz = 4096 res = "" while True: buf = self.socket.recv(bufsiz) if not buf: break res = res + buf.decode() if (res.rfind("\n...\n") >= 0 or res.rfind("\r\n...\r\n") >= 0): break return yaml.load(res) class TarantoolServer(object): default_tarantool = { "bin": "tarantool", "logfile": "tarantool.log", "init": "init.lua"} default_cfg = { "custom_proc_title": "\"tarantool-python testing\"", "slab_alloc_arena": 0.5, "pid_file": "\"box.pid\"", "rows_per_wal": 200} @property def logfile_path(self): return os.path.join(self.vardir, self.default_tarantool['logfile']) @property def cfgfile_path(self): return os.path.join(self.vardir, self.default_tarantool['config']) @property def script_path(self): return os.path.join(self.vardir, self.default_tarantool['init']) @property def script_dst(self): return os.path.join(self.vardir, os.path.basename(self.script)) @property def script(self): if not hasattr(self, '_script'): self._script = None return self._script @script.setter def script(self, val): if val is None: if hasattr(self, '_script'): delattr(self, '_script') return self._script = os.path.abspath(val) @property def binary(self): if not hasattr(self, '_binary'): self._binary = self.find_exe() return self._binary @property def _admin(self): if not hasattr(self, 'admin'): self.admin = None return self.admin @_admin.setter def _admin(self, port): try: int(port) except ValueError: raise ValueError("Bad port number: '%s'" % port) if hasattr(self, 'admin'): del self.admin self.admin = TarantoolAdmin('localhost', port) @property def log_des(self): if not hasattr(self, '_log_des'): self._log_des = open(self.logfile_path, 'a') return self._log_des @log_des.deleter def log_des(self): if not hasattr(self, '_log_des'): return if not self._log_des.closed: self._log_des.close() delattr(self, '_log_des') def __init__(self): os.popen('ulimit -c unlimited') self.args = {} self.args['primary'] = find_port() self.args['admin'] = find_port(self.args['primary'] + 1) self._admin = self.args['admin'] self.vardir = tempfile.mkdtemp(prefix='var_', dir=os.getcwd()) self.find_exe() def find_exe(self): if 'TARANTOOL_BOX_PATH' in os.environ: os.environ["PATH"] = os.environ["TARANTOOL_BOX_PATH"] + os.pathsep + os.environ["PATH"] for _dir in os.environ["PATH"].split(os.pathsep): exe = os.path.join(_dir, self.default_tarantool["bin"]) if os.access(exe, os.X_OK): return os.path.abspath(exe) raise RuntimeError("Can't find server executable in " + os.environ["PATH"]) def generate_configuration(self): os.putenv("PRIMARY_PORT", str(self.args['primary'])) os.putenv("ADMIN_PORT", str(self.args['admin'])) def prepare_args(self): return shlex.split(self.binary if not self.script else self.script_dst) def wait_until_started(self): """ Wait until server is started. Server consists of two parts: 1) wait until server is listening on sockets 2) wait until server tells us his status """ while True: try: temp = TarantoolAdmin('localhost', self.args['admin']) while True: ans = temp('box.info.status')[0] if ans in ('running', 'hot_standby', 'orphan') or ans.startswith('replica'): return True elif ans in ('loading',): continue else: raise Exception("Strange output for `box.info.status`: %s" % (ans)) except socket.error as e: if e.errno == errno.ECONNREFUSED: time.sleep(0.1) continue raise def start(self): # Main steps for running Tarantool\Box # * Find binary file --DONE(find_exe -> binary) # * Create vardir --DONE(__init__) # * Generate cfgfile --DONE(generate_configuration) # * (MAYBE) Copy init.lua --INSIDE # * Concatenate arguments and # start Tarantool\Box --DONE(prepare_args) # * Wait unitl Tarantool\Box # started --DONE(wait_until_started) self.generate_configuration() if self.script: shutil.copy(self.script, self.script_dst) os.chmod(self.script_dst, 0o777) args = self.prepare_args() self.process = subprocess.Popen(args, cwd = self.vardir, stdout=self.log_des, stderr=self.log_des) self.wait_until_started() def stop(self): if self.process.poll() is None: self.process.terminate() self.process.wait() def restart(self): self.stop() self.start() def clean(self): if os.path.isdir(self.vardir): shutil.rmtree(self.vardir) def __del__(self): self.stop() self.clean() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/suites/test_schema.py0000755000000000000000000002142713306562377027431 0ustar rootroot#!/usr/bin/env ipython import unittest import tarantool from .lib.tarantool_server import TarantoolServer class TestSuite_Schema(unittest.TestCase): @classmethod def setUpClass(self): print(' SCHEMA '.center(70, '=')) print('-' * 70) self.srv = TarantoolServer() self.srv.script = 'tests/suites/box.lua' self.srv.start() self.con = tarantool.Connection('localhost', self.srv.args['primary']) self.sch = self.con.schema def test_00_authenticate(self): self.assertIsNone(self.srv.admin("box.schema.user.create('test', { password = 'test' })")) self.assertIsNone(self.srv.admin("box.schema.user.grant('test', 'read,write', 'space', '_space')")) self.assertIsNone(self.srv.admin("box.schema.user.grant('test', 'read,write', 'space', '_index')")) self.assertEqual(self.con.authenticate('test', 'test')._data, None) def test_01_space_bad(self): with self.assertRaisesRegexp(tarantool.SchemaError, 'There\'s no space.*'): self.sch.get_space(0) with self.assertRaisesRegexp(tarantool.SchemaError, 'There\'s no space.*'): self.sch.get_space(0) with self.assertRaisesRegexp(tarantool.SchemaError, 'There\'s no space.*'): self.sch.get_space('bad_name') def test_02_index_bad(self): with self.assertRaisesRegexp(tarantool.SchemaError, 'There\'s no space.*'): self.sch.get_index(0, 'primary') with self.assertRaisesRegexp(tarantool.SchemaError, 'There\'s no space.*'): self.sch.get_index('bad_space', 'primary') with self.assertRaisesRegexp(tarantool.SchemaError, 'There\'s no index.*'): self.sch.get_index(280, 'bad_index') with self.assertRaisesRegexp(tarantool.SchemaError, 'There\'s no index.*'): self.sch.get_index(280, 'bad_index') with self.assertRaisesRegexp(tarantool.SchemaError, 'There\'s no index.*'): self.sch.get_index(280, 3) def test_03_01_space_name__(self): self.con.flush_schema() space = self.sch.get_space('_schema') self.assertEqual(space.sid, 272) self.assertEqual(space.name, '_schema') self.assertEqual(space.arity, 1) space = self.sch.get_space('_space') self.assertEqual(space.sid, 280) self.assertEqual(space.name, '_space') self.assertEqual(space.arity, 1) space = self.sch.get_space('_index') self.assertEqual(space.sid, 288) self.assertEqual(space.name, '_index') self.assertEqual(space.arity, 1) def test_03_02_space_number(self): self.con.flush_schema() space = self.sch.get_space(272) self.assertEqual(space.sid, 272) self.assertEqual(space.name, '_schema') self.assertEqual(space.arity, 1) space = self.sch.get_space(280) self.assertEqual(space.sid, 280) self.assertEqual(space.name, '_space') self.assertEqual(space.arity, 1) space = self.sch.get_space(288) self.assertEqual(space.sid, 288) self.assertEqual(space.name, '_index') self.assertEqual(space.arity, 1) def test_04_space_cached(self): space = self.sch.get_space('_schema') self.assertEqual(space.sid, 272) self.assertEqual(space.name, '_schema') self.assertEqual(space.arity, 1) space = self.sch.get_space('_space') self.assertEqual(space.sid, 280) self.assertEqual(space.name, '_space') self.assertEqual(space.arity, 1) space = self.sch.get_space('_index') self.assertEqual(space.sid, 288) self.assertEqual(space.name, '_index') self.assertEqual(space.arity, 1) def test_05_01_index_name___name__(self): self.con.flush_schema() index = self.sch.get_index('_index', 'primary') self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 2) index = self.sch.get_index('_index', 'name') self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 2) index = self.sch.get_index('_space', 'primary') self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 1) index = self.sch.get_index('_space', 'name') self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 1) def test_05_02_index_name___number(self): self.con.flush_schema() index = self.sch.get_index('_index', 0) self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 2) index = self.sch.get_index('_index', 2) self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 2) index = self.sch.get_index('_space', 0) self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 1) index = self.sch.get_index('_space', 2) self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 1) def test_05_03_index_number_name__(self): self.con.flush_schema() index = self.sch.get_index(288, 'primary') self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 2) index = self.sch.get_index(288, 'name') self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 2) index = self.sch.get_index(280, 'primary') self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 1) index = self.sch.get_index(280, 'name') self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 1) def test_05_04_index_number_number(self): self.con.flush_schema() index = self.sch.get_index(288, 0) self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 2) index = self.sch.get_index(288, 2) self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 2) index = self.sch.get_index(280, 0) self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 1) index = self.sch.get_index(280, 2) self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 1) def test_06_index_cached(self): index = self.sch.get_index('_index', 'primary') self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 2) index = self.sch.get_index('_index', 2) self.assertEqual(index.space.name, '_index') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 2) index = self.sch.get_index(280, 'primary') self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 0) self.assertEqual(index.name, 'primary') self.assertEqual(len(index.parts), 1) index = self.sch.get_index(280, 2) self.assertEqual(index.space.name, '_space') self.assertEqual(index.iid, 2) self.assertEqual(index.name, 'name') self.assertEqual(len(index.parts), 1) @classmethod def tearDownClass(self): self.srv.stop() self.srv.clean() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/suites/test_protocol.py0000755000000000000000000000423513306562377030030 0ustar rootroot#!/usr/bin/env ipython import unittest from tarantool.utils import greeting_decode, version_id import uuid class TestSuite_Protocol(unittest.TestCase): @classmethod def setUpClass(self): print(' PROTOCOL '.center(70, '=')) print('-' * 70) def test_00_greeting_1_6(self): buf = "Tarantool 1.6.6 \n" + \ "AtQnb9SAIaKazZZy9lJKvK3urtbjCEJndhRVbslSPGc= \n"; greeting = greeting_decode(buf.encode()) self.assertEqual(greeting.version_id, version_id(1, 6, 6)) self.assertEqual(greeting.protocol, "Binary") self.assertIsNone(greeting.uuid) self.assertIsNotNone(greeting.salt) def test_01_greeting_1_6_with_tag(self): buf = "Tarantool 1.6.6-232-gcf47324 \n" + \ "AtQnb9SAIaKazZZy9lJKvK3urtbjCEJndhRVbslSPGc= \n"; greeting = greeting_decode(buf.encode()) self.assertEqual(greeting.version_id, version_id(1, 6, 6)) self.assertEqual(greeting.protocol, "Binary") self.assertIsNone(greeting.uuid) self.assertIsNotNone(greeting.salt) def test_02_greeting_1_6_console(self): buf = "Tarantool 1.6.6-132-g82f5424 (Lua console) \n" + \ "type 'help' for interactive help \n"; greeting = greeting_decode(buf.encode()) self.assertEqual(greeting.version_id, version_id(1, 6, 6)) self.assertEqual(greeting.protocol, "Lua console") self.assertIsNone(greeting.uuid) self.assertIsNone(greeting.salt) def test_03_greeting_1_6_7(self): buf = "Tarantool 1.6.7 (Binary) 52dc2837-8001-48fe-bdce-c493c04599ce \n" + \ "Z+2F+VRlyK1nKT82xQtxqEggMtkTK5RtPYf27JryRas= \n"; greeting = greeting_decode(buf.encode()) self.assertEqual(greeting.version_id, version_id(1, 6, 7)) self.assertEqual(greeting.protocol, "Binary") self.assertEqual(greeting.uuid, uuid.UUID('52dc2837-8001-48fe-bdce-c493c04599ce')) self.assertIsNotNone(greeting.salt) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/suites/__init__.py0000644000000000000000000000076513306562377026670 0ustar rootrootimport os import unittest __tmp = os.getcwd() os.chdir(os.path.abspath(os.path.dirname(__file__))) from .test_schema import TestSuite_Schema from .test_dml import TestSuite_Request from .test_protocol import TestSuite_Protocol test_cases = (TestSuite_Schema, TestSuite_Request, TestSuite_Protocol) def load_tests(loader, tests, pattern): suite = unittest.TestSuite() for testc in test_cases: suite.addTests(loader.loadTestsFromTestCase(testc)) return suite os.chdir(__tmp) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tests/suites/test_dml.py0000644000000000000000000002541613306562377026744 0ustar rootroot# -*- coding: utf-8 -*- import six import yaml import unittest import tarantool from .lib.tarantool_server import TarantoolServer class TestSuite_Request(unittest.TestCase): @classmethod def setUpClass(self): print(' DML '.center(70, '=')) print('-' * 70) self.srv = TarantoolServer() self.srv.script = 'tests/suites/box.lua' self.srv.start() self.con = tarantool.Connection('localhost', self.srv.args['primary']) self.adm = self.srv.admin self.space_created = self.adm("box.schema.create_space('space_1')") self.adm("box.space['space_1']:create_index('primary', {type = 'tree', parts = {1, 'num'}, unique = true})") self.adm("box.space['space_1']:create_index('secondary', {type = 'tree', parts = {2, 'num', 3, 'str'}, unique = false})") self.adm("json = require('json')") self.adm("fiber = require('fiber')") self.adm("uuid = require('uuid')") def test_00_00_authenticate(self): self.assertIsNone(self.srv.admin("box.schema.user.create('test', { password = 'test' })")) self.assertIsNone(self.srv.admin("box.schema.user.grant('test', 'execute,read,write', 'universe')")) self.assertEqual(self.con.authenticate('test', 'test')._data, None) def test_00_01_space_created(self): # Check that space is created in setUpClass self.assertEqual(self.space_created[1], 'created') def test_00_02_fill_space(self): # Fill space with values for i in range(1, 500): self.assertEqual( self.con.insert('space_1', [i, i%5, 'tuple_'+str(i)])[0], [i, i%5, 'tuple_'+str(i)] ) def test_00_03_answer_repr(self): repr_str = \ '''- [1, 1, tuple_1] ''' self.assertEqual(repr(self.con.select('space_1', 1)), repr_str) def test_02_select(self): # Check that select with different keys are Ok. (With and without index names) self.assertEqual(self.con.select('space_1', 20), [[20, 0, 'tuple_20']]) self.assertEqual(self.con.select('space_1', [21]), [[21, 1, 'tuple_21']]) self.assertEqual(self.con.select('space_1', [22], index='primary'), [[22, 2, 'tuple_22']]) self.assertEqual(self.con.select('space_1', [23], index='primary'), [[23, 3, 'tuple_23']]) # Check that Offset and Limit args are working fine. self.assertEqual(self.con.select('space_1', [20], index='primary', limit=1), [[20, 0, 'tuple_20']]) # With other indexes too self.assertEqual( sorted( self.con.select('space_1', [0], index='secondary', offset=3, limit=0), key = lambda x: x[0]), [] ) self.assertEqual( sorted( self.con.select('space_1', [0], index='secondary', offset=3, limit=1), key = lambda x: x[0]), [[110, 0, 'tuple_110']] ) self.assertEqual( sorted( self.con.select('space_1', [0], index='secondary', offset=3, limit=2), key = lambda x: x[0]), [[110, 0, 'tuple_110'],\ [115, 0, 'tuple_115']] ) select_req = self.con.select('space_1', [0], index='secondary') self.assertEqual(len(select_req), 99) for i in select_req: self.assertTrue(not (i[0] % 5)) self.assertTrue(not i[1]) self.assertTrue(i[2] == 'tuple_' + str(i[0])) # Check limit again. self.assertEqual(len(self.con.select('space_1', [0, 'tuple_20'], index='secondary', limit=0)), 0) self.assertEqual(len(self.con.select('space_1', [0], index='secondary', limit=0)), 0) self.assertEqual(len(self.con.select('space_1', [0], index='secondary', limit=100)), 99) self.assertEqual(len(self.con.select('space_1', [0], index='secondary', limit=50)), 50) # TODO: Check iterator_types def test_03_delete(self): # Check that delete works fine self.assertEqual(self.con.delete('space_1', 20), [[20, 0, 'tuple_20']]) self.assertEqual(self.con.delete('space_1', [20]), []) self.assertEqual(self.con.select('space_1', [20], index='primary'), []) # Check that field has no meaning, yet. with self.assertRaisesRegexp(tarantool.DatabaseError, '(19, .*)'): self.con.delete('space_1', [1, 'tuple_21']) self.assertEqual(self.con.select('space_1', [21], index='primary'), [[21, 1, 'tuple_21']]) def test_04_replace(self): # Check replace that is Ok. self.assertEqual(self.con.replace('space_1', [2, 2, 'tuple_3']), [[2, 2, 'tuple_3']]) self.assertEqual(self.con.select('space_1', 2), [[2, 2, 'tuple_3']]) # Check replace that isn't Ok. with self.assertRaisesRegexp(tarantool.DatabaseError, '(39, .*)'): self.assertEqual(self.con.replace('space_1', [2, 2]), [[2, 2, 'tuple_2']]) def test_05_ping(self): # Simple ping test # * No exceptions are raised # * Ping time > 0 self.assertTrue(self.con.ping() > 0) self.assertEqual(self.con.ping(notime=True), "Success") def test_06_update(self): self.assertEqual(self.con.update('space_1', (2,), [('+', 1, 3)]), [[2, 5, 'tuple_3']]) self.assertEqual(self.con.update('space_1', (2,), [('-', 1, 3)]), [[2, 2, 'tuple_3']]) self.assertEqual(self.con.update('space_1', (2,), [(':', 2, 3, 2, 'lalal')]), [[2, 2, 'tuplalal_3']]) self.assertEqual(self.con.update('space_1', (2,), [('!', 2, '1')]), [[2, 2, '1', 'tuplalal_3']]) self.assertEqual(self.con.update('space_1', (2,), [('!', 2, 'oingo, boingo')]), [[2, 2, 'oingo, boingo', '1', 'tuplalal_3']]) self.assertEqual(self.con.update('space_1', (2,), [('#', 2, 2)]), [[2, 2, 'tuplalal_3']]) def test_07_call(self): self.assertEqual(self.con.call('json.decode', '[123, 234, 345]'), [[123, 234, 345]]) self.assertEqual(self.con.call('json.decode', ['[123, 234, 345]']), [[123, 234, 345]]) self.assertEqual(self.con.call('json.decode', ('[123, 234, 345]',)), [[123, 234, 345]]) with self.assertRaisesRegexp(tarantool.DatabaseError, '(32, .*)'): self.con.call('json.decode') with self.assertRaisesRegexp(tarantool.DatabaseError, '(32, .*)'): self.con.call('json.decode', '{[1, 2]: "world"}') ans = self.con.call('fiber.time') self.assertEqual(len(ans), 1) self.assertEqual(len(ans[0]), 1) self.assertIsInstance(ans[0][0], float) ans = self.con.call('fiber.time64') self.assertEqual(len(ans), 1) self.assertEqual(len(ans[0]), 1) self.assertIsInstance(ans[0][0], six.integer_types) ans = self.con.call('uuid.str') self.assertEqual(len(ans), 1) self.assertEqual(len(ans[0]), 1) self.assertIsInstance(ans[0][0], str) # ans = self.con.call('uuid.hex') # self.assertEqual(len(ans), 1) # self.assertEqual(len(ans[0]), 1) # self.assertIsInstance(ans[0][0], str) # with self.assertRaisesRegexp(tarantool.DatabaseError, # '(12345, \'lol, error\')'): # self.con.call('box.error', [12345, 'lol, error']) self.assertEqual(self.con.call('box.tuple.new', [1, 2, 3, 'fld_1']), [[1, 2, 3, 'fld_1']]) self.assertEqual(self.con.call('box.tuple.new', 'fld_1'), [['fld_1']]) def test_08_eval(self): self.assertEqual(self.con.eval('return json.decode(...)', '[123, 234, 345]'), [[123, 234, 345]]) self.assertEqual(self.con.eval('return json.decode(...)', ['[123, 234, 345]']), [[123, 234, 345]]) self.assertEqual(self.con.eval('return json.decode(...)', ('[123, 234, 345]',)), [[123, 234, 345]]) self.assertEqual(self.con.eval('return json.decode("[123, 234, 345]")'), [[123, 234, 345]]) self.assertEqual(self.con.eval('return json.decode("[123, 234, 345]"), '+ 'json.decode("[123, 234, 345]")'), [[123, 234, 345], [123, 234, 345]]) self.assertEqual(self.con.eval('json.decode("[123, 234, 345]")'), []) def test_09_upsert(self): self.assertEqual(self.con.select('space_1', [22], index='primary'), [[22, 2, 'tuple_22']]) self.assertEqual(self.con.select('space_1', [23], index='primary'), [[23, 3, 'tuple_23']]) self.assertEqual(self.con.select('space_1', [499], index='primary'), [[499, 4, 'tuple_499']]) self.assertEqual(self.con.select('space_1', [500], index='primary'), []) self.assertEqual(self.con.upsert('space_1', [500, 123, 'hello, world'], [(':', 2, 2, 3, "---")]), []) self.assertEqual(self.con.select('space_1', [500], index='primary'), [[500, 123, 'hello, world']]) self.assertEqual(self.con.upsert('space_1', [500, 123, 'hello, world'], [(':', 2, 2, 3, "---")]), []) self.assertEqual(self.con.select('space_1', [500], index='primary'), [[500, 123, 'he---, world']]) def test_10_space(self): space = self.con.space('space_1') self.assertEqual(space.select([22], index='primary'), [[22, 2, 'tuple_22']]) self.assertEqual(space.replace([22, 10, 'lol']), [[22, 10, 'lol']]) self.assertEqual(space.insert([900, 10, 'foo']), [[900, 10, 'foo']]) self.assertEqual(space.select([10], index='secondary'), [ [900, 10, 'foo'], [22, 10, 'lol'] ]) self.assertEqual(space.select([10], index='secondary', limit=1), [ [900, 10, 'foo'] ]) self.assertEqual(space.select([10], index='secondary', limit=1, offset=1), [ [22, 10, 'lol'] ]) self.assertEqual(space.select([501], index='primary'), []) self.assertEqual(space.upsert([501, 123, 'hello, world'], [(':', 2, 2, 3, "---")]), []) self.assertEqual(space.select([501], index='primary'), [[501, 123, 'hello, world']]) self.assertEqual(space.upsert([501, 123, 'hello, world'], [(':', 2, 2, 3, "---")]), []) self.assertEqual(space.update([400], [('!', 2, 'oingo, boingo')]), [[400, 0, 'oingo, boingo', 'tuple_400']]) self.assertEqual(space.update([400], [('#', 2, 1)]), [[400, 0, 'tuple_400']]) self.assertEqual(space.delete([900]), [[900, 10, 'foo']]) @classmethod def tearDownClass(self): self.srv.stop() self.srv.clean() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/.gitignore0000644000000000000000000000010713306562377024057 0ustar rootroot*.pyc *.pyo *.wpr *~ /build/ /dist/ /MANIFEST *.snap sophia *.egg-info tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/0000755000000000000000000000000013306562377024074 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/space.py0000644000000000000000000000473013306562377025545 0ustar rootroot# -*- coding: utf-8 -*- # pylint: disable=C0301,W0105,W0401,W0614 ''' This module provides :class:`~tarantool.space.Space` class. It is an object-oriented wrapper for request over Tarantool space. ''' class Space(object): ''' Object-oriented wrapper for accessing a particular space. Encapsulates the identifier of the space and provides more convenient syntax for database operations. ''' def __init__(self, connection, space_name): ''' Create Space instance. :param connection: Object representing connection to the server :type connection: :class:`~tarantool.connection.Connection` instance :param int space_name: space no or name to insert a record :type space_name: int or str ''' self.connection = connection self.space_no = self.connection.schema.get_space(space_name).sid def insert(self, *args, **kwargs): ''' Execute INSERT request. See `~tarantool.connection.insert` for more information ''' return self.connection.insert(self.space_no, *args, **kwargs) def replace(self, *args, **kwargs): ''' Execute REPLACE request. See `~tarantool.connection.replace` for more information ''' return self.connection.replace(self.space_no, *args, **kwargs) def delete(self, *args, **kwargs): ''' Execute DELETE request. See `~tarantool.connection.delete` for more information ''' return self.connection.delete(self.space_no, *args, **kwargs) def update(self, *args, **kwargs): ''' Execute UPDATE request. See `~tarantool.connection.update` for more information ''' return self.connection.update(self.space_no, *args, **kwargs) def upsert(self, *args, **kwargs): ''' Execute UPDATE request. See `~tarantool.connection.upsert` for more information ''' return self.connection.upsert(self.space_no, *args, **kwargs) def select(self, *args, **kwargs): ''' Execute SELECT request. See `~tarantool.connection.select` for more information ''' return self.connection.select(self.space_no, *args, **kwargs) def call(self, *args, **kwargs): ''' Execute CALL request. Call stored Lua function. It's deprecated, use `~tarantool.connection.call` instead ''' return self.connection.call(func_name, *args, **kwargs) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/utils.py0000644000000000000000000000465013306562377025613 0ustar rootroot# -*- coding: utf-8 -*- import six import base64 import uuid def check_key(*args, **kwargs): if 'first' not in kwargs: kwargs['first'] = True if 'select' not in kwargs: kwargs['select'] = False if len(args) == 0 and kwargs['select']: return [] if len(args) == 1: if isinstance(args[0], (list, tuple)) and kwargs['first']: kwargs['first'] = False return check_key(*args[0], **kwargs) elif args[0] is None and kwargs['select']: return [] for key in args: assert isinstance(key, six.integer_types + six.string_types) return list(args) def version_id(major, minor, patch): return (((major << 8) | minor) << 8) | patch def greeting_decode(greeting_buf): class Greeting: version_id = 0 protocol = None uuid = None salt = None # Tarantool 1.6.6 # Tarantool 1.6.6-102-g4e9bde2 # Tarantool 1.6.8 (Binary) 3b151c25-4c4a-4b5d-8042-0f1b3a6f61c3 # Tarantool 1.6.8-132-g82f5424 (Lua console) result = Greeting() try: (product, _, tail) = str(greeting_buf)[0:63].partition(' ') if product.startswith("Tarantool "): raise Exception() # Parse a version string - 1.6.6-83-gc6b2129 or 1.6.7 (version, _, tail) = tail.partition(' ') version = version.split('-')[0].split('.') result.version_id = version_id(int(version[0]), int(version[1]), int(version[2])) if len(tail) > 0 and tail[0] == '(': (protocol, _, tail) = tail[1:].partition(') ') # Extract protocol name - a string between (parentheses) result.protocol = protocol if result.protocol != "Binary": return result # Parse UUID for binary protocol (uuid_buf, _, tail) = tail.partition(' ') if result.version_id >= version_id(1, 6, 7): result.uuid = uuid.UUID(uuid_buf.strip()) elif result.version_id < version_id(1, 6, 7): # Tarantool < 1.6.7 doesn't add "(Binary)" to greeting result.protocol = "Binary" elif len(tail.strip()) != 0: raise Exception("x") # Unsuported greeting result.salt = base64.decodestring(greeting_buf[64:])[:20] return result except Exception as e: print('exx', e) raise ValueError("Invalid greeting: " + str(greeting_buf)) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/response.py0000644000000000000000000001200413306562377026301 0ustar rootroot# -*- coding: utf-8 -*- # pylint: disable=C0301,W0105,W0401,W0614 import sys import msgpack import yaml from tarantool.const import ( IPROTO_CODE, IPROTO_DATA, IPROTO_ERROR, IPROTO_SYNC, REQUEST_TYPE_ERROR ) from tarantool.error import DatabaseError, tnt_strerror if sys.version_info < (2, 6): bytes = str # pylint: disable=W0622 class Response(list): ''' Represents a single response from the server in compliance with the Tarantool protocol. Responsible for data encapsulation (i.e. received list of tuples) and parses binary packet received from the server. ''' def __init__(self, conn, response): ''' Create an instance of `Response` using data received from the server. __init__() itself reads data from the socket, parses response body and sets appropriate instance attributes. :param body: body of the response :type body: array of bytes ''' # This is not necessary, because underlying list data structures are # created in the __new__(). But let it be. super(Response, self).__init__() if conn.encoding is not None: unpacker = msgpack.Unpacker(use_list=True, encoding=conn.encoding) else: unpacker = msgpack.Unpacker(use_list=True) unpacker.feed(response) header = unpacker.unpack() self._sync = header.get(IPROTO_SYNC, 0) self.conn = conn self._code = header[IPROTO_CODE] self._body = {} try: self._body = unpacker.unpack() except msgpack.OutOfData: pass if self._code < REQUEST_TYPE_ERROR: self._return_code = 0 self._completion_status = 0 self._data = self._body.get(IPROTO_DATA, None) # Backward-compatibility if isinstance(self._data, (list, tuple)): self.extend(self._data) else: self.append(self._data) else: # Separate return_code and completion_code self._return_message = self._body.get(IPROTO_ERROR, "") self._return_code = self._code & (REQUEST_TYPE_ERROR - 1) self._completion_status = 2 self._data = None if self.conn.error: raise DatabaseError(self._return_code, self._return_message) @property def completion_status(self): ''' :type: int Request completion status. There are only three completion status codes in use: * ``0`` -- "success"; the only possible :attr:`return_code` with this status is ``0`` * ``2`` -- "error"; in this case :attr:`return_code` holds the actual error. ''' return self._completion_status @property def rowcount(self): ''' :type: int Number of rows affected or returned by a query. ''' return len(self) @property def body(self): ''' :type: dict Required field in the server response. Contains raw response body. ''' return self._body @property def code(self): ''' :type: int Required field in the server response. Contains response type id. ''' return self._code @property def sync(self): '''\ :type: int Required field in the server response. Contains response header IPROTO_SYNC. ''' return self._sync @property def return_code(self): ''' :type: int Required field in the server response. Value of :attr:`return_code` can be ``0`` if request was sucessfull or contains an error code. If :attr:`return_code` is non-zero than :attr:`return_message` contains an error message. ''' return self._return_code @property def data(self): ''' :type: object Required field in the server response. Contains list of tuples of SELECT, REPLACE and DELETE requests and arbitrary data for CALL. ''' return self._data @property def strerror(self): ''' :type: str It may be ER_OK if request was successful, or contain error code string. ''' return tnt_strerror(self._return_code) @property def return_message(self): ''' :type: str The error message returned by the server in case of :attr:`return_code` is non-zero. ''' return self._return_message def __str__(self): ''' Return user friendy string representation of the object. Useful for the interactive sessions and debuging. :rtype: str or None ''' if self.completion_status: return yaml.dump({'error': { 'code': self.strerror[0], 'reason': self.return_message }}) return yaml.dump(self._data) __repr__ = __str__ tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/const.py0000644000000000000000000000327613306562377025604 0ustar rootroot# -*- coding: utf-8 -*- # pylint: disable=C0301,W0105,W0401,W0614 import six IPROTO_CODE = 0x00 IPROTO_SYNC = 0x01 IPROTO_SPACE_ID = 0x10 IPROTO_INDEX_ID = 0x11 IPROTO_LIMIT = 0x12 IPROTO_OFFSET = 0x13 IPROTO_ITERATOR = 0x14 IPROTO_KEY = 0x20 IPROTO_TUPLE = 0x21 IPROTO_FUNCTION_NAME = 0x22 IPROTO_USER_NAME = 0x23 IPROTO_SERVER_UUID = 0x24 IPROTO_CLUSTER_UUID = 0x25 IPROTO_VCLOCK = 0x26 IPROTO_EXPR = 0x27 IPROTO_OPS = 0x28 IPROTO_DATA = 0x30 IPROTO_ERROR = 0x31 IPROTO_GREETING_SIZE = 128 REQUEST_TYPE_OK = 0 REQUEST_TYPE_SELECT = 1 REQUEST_TYPE_INSERT = 2 REQUEST_TYPE_REPLACE = 3 REQUEST_TYPE_UPDATE = 4 REQUEST_TYPE_DELETE = 5 REQUEST_TYPE_CALL = 6 REQUEST_TYPE_AUTHENTICATE = 7 REQUEST_TYPE_EVAL = 8 REQUEST_TYPE_UPSERT = 9 REQUEST_TYPE_PING = 64 REQUEST_TYPE_JOIN = 65 REQUEST_TYPE_SUBSCRIBE = 66 REQUEST_TYPE_ERROR = 1 << 15 SPACE_SCHEMA = 272 SPACE_SPACE = 280 SPACE_INDEX = 288 SPACE_FUNC = 296 SPACE_VSPACE = 281 SPACE_VINDEX = 289 SPACE_VFUNC = 297 SPACE_USER = 304 SPACE_PRIV = 312 SPACE_CLUSTER = 320 INDEX_SPACE_PRIMARY = 0 INDEX_SPACE_NAME = 2 INDEX_INDEX_PRIMARY = 0 INDEX_INDEX_NAME = 2 ITERATOR_EQ = 0 ITERATOR_REQ = 1 ITERATOR_ALL = 2 ITERATOR_LT = 3 ITERATOR_LE = 4 ITERATOR_GE = 5 ITERATOR_GT = 6 ITERATOR_BITSET_ALL_SET = 7 ITERATOR_BITSET_ANY_SET = 8 ITERATOR_BITSET_ALL_NOT_SET = 9 # Default value for socket timeout (seconds) SOCKET_TIMEOUT = None # Default maximum number of attempts to reconnect RECONNECT_MAX_ATTEMPTS = 10 # Default delay between attempts to reconnect (seconds) RECONNECT_DELAY = 0.1 # Number of reattempts in case of server # return completion_status == 1 (try again) RETRY_MAX_ATTEMPTS = 10 if six.PY2: ENCODING_DEFAULT = None else: ENCODING_DEFAULT = "utf-8" tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/__init__.py0000644000000000000000000000240513306562377026206 0ustar rootroot# -*- coding: utf-8 -*- # pylint: disable=C0301,W0105,W0401,W0614 __version__ = "0.5.4" from tarantool.connection import Connection from tarantool.const import ( SOCKET_TIMEOUT, RECONNECT_MAX_ATTEMPTS, RECONNECT_DELAY, ENCODING_DEFAULT ) from tarantool.error import ( Error, DatabaseError, NetworkError, NetworkWarning, RetryWarning ) from tarantool.schema import ( Schema, SchemaError ) def connect(host="localhost", port=33013, user=None, password=None, encoding=ENCODING_DEFAULT): '''\ Create a connection to the Tarantool server. :param str host: Server hostname or IP-address :param int port: Server port :rtype: :class:`~tarantool.connection.Connection` :raise: `NetworkError` ''' return Connection(host, port, user=user, password=password, socket_timeout=SOCKET_TIMEOUT, reconnect_max_attempts=RECONNECT_MAX_ATTEMPTS, reconnect_delay=RECONNECT_DELAY, connect_now=True, encoding=encoding) __all__ = ['connect', 'Connection', 'Schema', 'Error', 'DatabaseError', 'NetworkError', 'NetworkWarning', 'RetryWarning', 'SchemaError'] tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/connection.py0000644000000000000000000005747113306562377026623 0ustar rootroot# -*- coding: utf-8 -*- # pylint: disable=C0301,W0105,W0401,W0614 ''' This module provides low-level API for Tarantool ''' import six import time import errno import ctypes import ctypes.util import socket import msgpack try: from ctypes import c_ssize_t except ImportError: from ctypes import c_longlong as c_ssize_t import tarantool from tarantool.response import Response from tarantool.request import ( Request, RequestOK, RequestCall, RequestDelete, RequestEval, RequestInsert, RequestJoin, RequestReplace, RequestPing, RequestSelect, RequestSubscribe, RequestUpdate, RequestUpsert, RequestAuthenticate) from tarantool.space import Space from tarantool.const import ( SOCKET_TIMEOUT, RECONNECT_MAX_ATTEMPTS, RECONNECT_DELAY, RETRY_MAX_ATTEMPTS, REQUEST_TYPE_OK, REQUEST_TYPE_ERROR, IPROTO_GREETING_SIZE, ENCODING_DEFAULT) from tarantool.error import ( NetworkError, DatabaseError, warn, RetryWarning, NetworkWarning) from .schema import Schema from .utils import check_key, greeting_decode, version_id class Connection(object): ''' Represents connection to the Tarantool server. This class is responsible for connection and network exchange with the server. Also this class provides low-level interface to data manipulation (insert/delete/update/select). ''' Error = tarantool.error DatabaseError = tarantool.error.DatabaseError InterfaceError = tarantool.error.InterfaceError SchemaError = tarantool.error.SchemaError NetworkError = tarantool.error.NetworkError def __init__(self, host, port, user=None, password=None, socket_timeout=SOCKET_TIMEOUT, reconnect_max_attempts=RECONNECT_MAX_ATTEMPTS, reconnect_delay=RECONNECT_DELAY, connect_now=True, encoding=ENCODING_DEFAULT): ''' Initialize a connection to the server. :param str host: Server hostname or IP-address :param int port: Server port :param bool connect_now: if True (default) than __init__() actually creates network connection. if False than you have to call connect() manualy. ''' libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) recv = self._sys_recv = libc.recv recv.argtypes = [ ctypes.c_int, ctypes.c_void_p, c_ssize_t, ctypes.c_int] recv.restype = c_ssize_t self.host = host self.port = port self.user = user self.password = password self.socket_timeout = socket_timeout self.reconnect_delay = reconnect_delay self.reconnect_max_attempts = reconnect_max_attempts self.schema = Schema(self) self._socket = None self.connected = False self.error = True self.encoding = encoding if connect_now: self.connect() def close(self): ''' Close connection to the server ''' self._socket.close() self._socket = None def connect_basic(self): ''' Create connection to the host and port specified in __init__(). :raise: `NetworkError` ''' try: # If old socket already exists - close it and re-create self.connected = True if self._socket: self._socket.close() self._socket = socket.create_connection((self.host, self.port)) self._socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) except socket.error as e: self.connected = False raise NetworkError(e) def handshake(self): greeting_buf = self._recv(IPROTO_GREETING_SIZE) greeting = greeting_decode(greeting_buf) if greeting.protocol != "Binary": raise NetworkError("Unsupported protocol: " + greeting.protocol) self.version_id = greeting.version_id self.uuid = greeting.uuid self._salt = greeting.salt if self.user: self.authenticate(self.user, self.password) def connect(self): ''' Create connection to the host and port specified in __init__(). Usually there is no need to call this method directly, since it is called when you create an `Connection` instance. :raise: `NetworkError` ''' try: self.connect_basic() self.handshake() # It is important to set socket timeout *after* connection. # Otherwise the timeout exception will be raised, even when # the connection fails because the server is simply # not bound to port self._socket.settimeout(self.socket_timeout) except socket.error as e: self.connected = False raise NetworkError(e) def _recv(self, to_read): buf = b"" while to_read > 0: try: tmp = self._socket.recv(to_read) except socket.error: raise NetworkError(socket.error(errno.ECONNRESET, "Lost connection to server during query")) else: if len(tmp) == 0: raise NetworkError(socket.error(errno.ECONNRESET, "Lost connection to server during query")) to_read -= len(tmp) buf += tmp return buf def _read_response(self): ''' Read response from the transport (socket) :return: tuple of the form (header, body) :rtype: tuple of two byte arrays ''' # Read packet length length = msgpack.unpackb(self._recv(5)) # Read the packet return self._recv(length) def _send_request_wo_reconnect(self, request): ''' :rtype: `Response` instance :raise: NetworkError ''' assert isinstance(request, Request) # Repeat request in a loop if the server returns completion_status == 1 # (try again) for attempt in range(RETRY_MAX_ATTEMPTS): # pylint: disable=W0612 self._socket.sendall(bytes(request)) response = Response(self, self._read_response()) if response.completion_status != 1: return response warn(response.return_message, RetryWarning) # Raise an error if the maximum number of attempts have been made raise DatabaseError(response.return_code, response.return_message) def _opt_reconnect(self): ''' Check that connection is alive using low-level recv from libc(ctypes) **Due to bug in python - timeout is internal python construction. ''' if not self._socket: return self.connect() def check(): # Check that connection is alive buf = ctypes.create_string_buffer(2) try: sock_fd = self._socket.fileno() except socket.error as e: if e.errno == errno.EBADF: return errno.ECONNRESET else: self._sys_recv(sock_fd, buf, 1, socket.MSG_DONTWAIT | socket.MSG_PEEK) if ctypes.get_errno() == errno.EAGAIN: ctypes.set_errno(0) return errno.EAGAIN return (ctypes.get_errno() if ctypes.get_errno() else errno.ECONNRESET) last_errno = check() if self.connected and last_errno == errno.EAGAIN: return attempt = 0 last_errno = errno.ECONNRESET while True: time.sleep(self.reconnect_delay) try: self.connect_basic() except NetworkError as e: pass else: if self.connected: break warn("Reconnect attempt %d of %d" % (attempt, self.reconnect_max_attempts), NetworkWarning) if attempt == self.reconnect_max_attempts: raise NetworkError( socket.error(last_errno, errno.errorcode[last_errno])) attempt += 1 self.handshake() # It is important to set socket timeout *after* connection. # Otherwise the timeout exception will be raised, even when # the connection fails because the server is simply # not bound to port self._socket.settimeout(self.socket_timeout) def _send_request(self, request): ''' Send the request to the server through the socket. Return an instance of `Response` class. :param request: object representing a request :type request: `Request` instance :rtype: `Response` instance ''' assert isinstance(request, Request) self._opt_reconnect() response = self._send_request_wo_reconnect( request) return response def flush_schema(self): self.schema.flush() def call(self, func_name, *args): ''' Execute CALL request. Call stored Lua function. :param func_name: stored Lua function name :type func_name: str :param args: list of function arguments :type args: list or tuple :rtype: `Response` instance ''' assert isinstance(func_name, str) # This allows to use a tuple or list as an argument if len(args) == 1 and isinstance(args[0], (list, tuple)): args = args[0] request = RequestCall(self, func_name, args) response = self._send_request(request) return response def eval(self, expr, *args): ''' Execute EVAL request. Eval Lua expression. :param expr: Lua expression :type expr: str :param args: list of function arguments :type args: list or tuple :rtype: `Response` instance ''' assert isinstance(expr, str) # This allows to use a tuple or list as an argument if len(args) == 1 and isinstance(args[0], (list, tuple)): args = args[0] request = RequestEval(self, expr, args) response = self._send_request(request) return response def replace(self, space_name, values): ''' Execute REPLACE request. It won't throw error if there's no tuple with this PK exists :param int space_name: space id to insert a record :type space_name: int or str :param values: record to be inserted. The tuple must contain only scalar (integer or strings) values :type values: tuple :rtype: `Response` instance ''' if isinstance(space_name, six.string_types): space_name = self.schema.get_space(space_name).sid request = RequestReplace(self, space_name, values) return self._send_request(request) def authenticate(self, user, password): ''' Execute AUTHENTICATE request. :param string user: user to authenticate with :param string password: password for the user :rtype: `Response` instance ''' self.user = user self.password = password if not self._socket: return self._opt_reconnect() request = RequestAuthenticate(self, self._salt, self.user, self.password) return self._send_request_wo_reconnect(request) def _join_v16(self, server_uuid): request = RequestJoin(self, server_uuid) self._socket.sendall(bytes(request)) while True: resp = Response(self, self._read_response()); yield resp if resp.code == REQUEST_TYPE_OK or resp.code >= REQUEST_TYPE_ERROR: return self.close() # close connection after JOIN def _join_v17(self, server_uuid): class JoinState: Handshake, Initial, Final, Done = range(4) request = RequestJoin(self, server_uuid) self._socket.sendall(bytes(request)) state = JoinState.Handshake while True: resp = Response(self, self._read_response()) yield resp if resp.code >= REQUEST_TYPE_ERROR: return elif resp.code == REQUEST_TYPE_OK: state = state + 1 if state == JoinState.Done: return def join(self, server_uuid): self._opt_reconnect() if self.version_id < version_id(1, 7, 0): return self._join_v16(server_uuid) return self._join_v17(server_uuid) def subscribe(self, cluster_uuid, server_uuid, vclock=None): vclock = vclock or {} request = RequestSubscribe(self, cluster_uuid, server_uuid, vclock) self._socket.sendall(bytes(request)) while True: resp = Response(self, self._read_response()) yield resp if resp.code >= REQUEST_TYPE_ERROR: return self.close() # close connection after SUBSCRIBE def insert(self, space_name, values): ''' Execute INSERT request. It will throw error if there's tuple with same PK exists. :param int space_name: space id to insert a record :type space_name: int or str :param values: record to be inserted. The tuple must contain only scalar (integer or strings) values :type values: tuple :rtype: `Response` instance ''' if isinstance(space_name, six.string_types): space_name = self.schema.get_space(space_name).sid request = RequestInsert(self, space_name, values) return self._send_request(request) def delete(self, space_name, key, **kwargs): ''' Execute DELETE request. Delete single record identified by `key`. If you're using secondary index, it must be unique. :param space_name: space number or name to delete a record :type space_name: int or name :param key: key that identifies a record :type key: int or str :rtype: `Response` instance ''' index_name = kwargs.get("index", 0) key = check_key(key) if isinstance(space_name, six.string_types): space_name = self.schema.get_space(space_name).sid if isinstance(index_name, six.string_types): index_name = self.schema.get_index(space_name, index_name).iid request = RequestDelete(self, space_name, index_name, key) return self._send_request(request) def upsert(self, space_name, tuple_value, op_list, **kwargs): ''' Execute UPSERT request. If there is an existing tuple which matches the key fields of `tuple_value`, then the request has the same effect as UPDATE and the [(field_1, symbol_1, arg_1), ...] parameter is used. If there is no existing tuple which matches the key fields of `tuple_value`, then the request has the same effect as INSERT and the `tuple_value` parameter is used. However, unlike insert or update, upsert will not read a tuple and perform error checks before returning -- this is a design feature which enhances throughput but requires more caution on the part of the user. If you're using secondary index, it must be unique. List of operations allows to update individual fields. *Allowed operations:* (For every operation you must provide field number, to apply this operation to) * `+` for addition (values must be numeric) * `-` for subtraction (values must be numeric) * `&` for bitwise AND (values must be unsigned numeric) * `|` for bitwise OR (values must be unsigned numeric) * `^` for bitwise XOR (values must be unsigned numeric) * `:` for string splice (you must provide `offset`, `count` and `value` for this operation) * `!` for insertion (provide any element to insert) * `=` for assignment (provide any element to assign) * `#` for deletion (provide count of fields to delete) :param space_name: space number or name to update a record :type space_name: int or str :param index: index number or name to update a record :type index: int or str :param tuple_value: tuple, that :type tuple_value: :param op_list: list of operations. Each operation is tuple of three (or more) values :type op_list: a list of the form [(symbol_1, field_1, arg_1), (symbol_2, field_2, arg_2_1, arg_2_2, arg_2_3),...] :rtype: `Response` instance Operation examples: .. code-block:: python # 'ADD' 55 to second field # Assign 'x' to third field [('+', 2, 55), ('=', 3, 'x')] # 'OR' third field with '1' # Cut three symbols starting from second and replace them with '!!' # Insert 'hello, world' field before fifth element of tuple [('|', 3, 1), (':', 2, 2, 3, '!!'), ('!', 5, 'hello, world')] # Delete two fields starting with second field [('#', 2, 2)] ''' index_name = kwargs.get("index", 0) if isinstance(space_name, six.string_types): space_name = self.schema.get_space(space_name).sid if isinstance(index_name, six.string_types): index_name = self.schema.get_index(space_name, index_name).iid request = RequestUpsert(self, space_name, index_name, tuple_value, op_list) return self._send_request(request) def update(self, space_name, key, op_list, **kwargs): ''' Execute UPDATE request. The `update` function supports operations on fields — assignment, arithmetic (if the field is unsigned numeric), cutting and pasting fragments of a field, deleting or inserting a field. Multiple operations can be combined in a single update request, and in this case they are performed atomically and sequentially. Each operation requires specification of a field number. When multiple operations are present, the field number for each operation is assumed to be relative to the most recent state of the tuple, that is, as if all previous operations in a multi-operation update have already been applied. In other words, it is always safe to merge multiple update invocations into a single invocation, with no change in semantics. Update single record identified by `key`. List of operations allows to update individual fields. *Allowed operations:* (For every operation you must provide field number, to apply this operation to) * `+` for addition (values must be numeric) * `-` for subtraction (values must be numeric) * `&` for bitwise AND (values must be unsigned numeric) * `|` for bitwise OR (values must be unsigned numeric) * `^` for bitwise XOR (values must be unsigned numeric) * `:` for string splice (you must provide `offset`, `count` and `value` for this operation) * `!` for insertion (before) (provide any element to insert) * `=` for assignment (provide any element to assign) * `#` for deletion (provide count of fields to delete) :param space_name: space number or name to update a record :type space_name: int or str :param index: index number or name to update a record :type index: int or str :param key: key that identifies a record :type key: int or str :param op_list: list of operations. Each operation is tuple of three (or more) values :type op_list: a list of the form [(symbol_1, field_1, arg_1), (symbol_2, field_2, arg_2_1, arg_2_2, arg_2_3), ...] :rtype: ``Response`` instance Operation examples: .. code-block:: python # 'ADD' 55 to second field # Assign 'x' to third field [('+', 2, 55), ('=', 3, 'x')] # 'OR' third field with '1' # Cut three symbols starting from second and replace them with '!!' # Insert 'hello, world' field before fifth element of tuple [('|', 3, 1), (':', 2, 2, 3, '!!'), ('!', 5, 'hello, world')] # Delete two fields starting with second field [('#', 2, 2)] ''' index_name = kwargs.get("index", 0) key = check_key(key) if isinstance(space_name, six.string_types): space_name = self.schema.get_space(space_name).sid if isinstance(index_name, six.string_types): index_name = self.schema.get_index(space_name, index_name).iid request = RequestUpdate(self, space_name, index_name, key, op_list) return self._send_request(request) def ping(self, notime=False): ''' Execute PING request. Send empty request and receive empty response from server. :return: response time in seconds :rtype: float ''' request = RequestPing(self) t0 = time.time() self._send_request(request) t1 = time.time() if notime: return "Success" return t1 - t0 def select(self, space_name, key=None, **kwargs): ''' Execute SELECT request. Select and retrieve data from the database. :param space_name: specifies which space to query :type space_name: int or str :param values: values to search over the index :type values: list, tuple, set, frozenset of tuples :param index: specifies which index to use (default is **0** which means that the **primary index** will be used) :type index: int or str :param offset: offset in the resulting tuple set :type offset: int :param limit: limits the total number of returned tuples :type limit: int :rtype: `Response` instance You may use names for index/space. Matching id's -> names connector will get from server. Select one single record (from space=0 and using index=0) >>> select(0, 1) Select single record from space=0 (with name='space') using composite index=1 (with name '_name'). >>> select(0, [1,'2'], index=1) # OR >>> select(0, [1,'2'], index='_name') # OR >>> select('space', [1,'2'], index='_name') # OR >>> select('space', [1,'2'], index=1) Select all records >>> select(0) # OR >>> select(0, []) ''' # Initialize arguments and its defaults from **kwargs offset = kwargs.get("offset", 0) limit = kwargs.get("limit", 0xffffffff) index_name = kwargs.get("index", 0) iterator_type = kwargs.get("iterator", 0) # Perform smart type checking (scalar / list of scalars / list of # tuples) key = check_key(key, select=True) if isinstance(space_name, six.string_types): space_name = self.schema.get_space(space_name).sid if isinstance(index_name, six.string_types): index_name = self.schema.get_index(space_name, index_name).iid request = RequestSelect(self, space_name, index_name, key, offset, limit, iterator_type) response = self._send_request(request) return response def space(self, space_name): ''' Create `Space` instance for particular space `Space` instance encapsulates the identifier of the space and provides more convenient syntax for accessing the database space. :param space_name: identifier of the space :type space_name: int or str :rtype: `Space` instance ''' return Space(self, space_name) def generate_sync(self): """\ Need override for async io connection """ return 0 tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/schema.py0000644000000000000000000000773113306562377025716 0ustar rootroot# -*- coding: utf-8 -*- # pylint: disable=R0903 ''' This module provides :class:`~tarantool.schema.Schema` class. It is a Tarantool schema description. ''' import six from tarantool.error import SchemaError, DatabaseError import tarantool.const as const class SchemaIndex(object): def __init__(self, index_row, space): self.iid = index_row[1] self.name = index_row[2] if isinstance(self.name, bytes): self.name = self.name.decode() self.index = index_row[3] self.unique = index_row[4] self.parts = [] if isinstance(index_row[5], (list, tuple)): for k, v in index_row[5]: self.parts.append((k, v)) else: for i in range(index_row[5]): self.parts.append((index_row[5 + 1 + i * 2], index_row[5 + 2 + i * 2])) self.space = space self.space.indexes[self.iid] = self if self.name: self.space.indexes[self.name] = self def flush(self): del self.space.indexes[self.iid] if self.name: del self.space.indexes[self.name] class SchemaSpace(object): def __init__(self, space_row, schema): self.sid = space_row[0] self.arity = space_row[1] self.name = space_row[2] if isinstance(self.name, bytes): self.name = self.name.decode() self.indexes = {} self.schema = schema self.schema[self.sid] = self if self.name: self.schema[self.name] = self def flush(self): del self.schema[self.sid] if self.name: del self.schema[self.name] class Schema(object): def __init__(self, con): self.schema = {} self.con = con def get_space(self, space): try: return self.schema[space] except KeyError: pass _index = (const.INDEX_SPACE_NAME if isinstance(space, six.string_types) else const.INDEX_SPACE_PRIMARY) space_row = None try: space_row = self.con.select(const.SPACE_VSPACE, space, index=_index) except DatabaseError as e: if e.args[0] != 36: raise if space_row is None: space_row = self.con.select(const.SPACE_SPACE, space, index=_index) if len(space_row) > 1: raise SchemaError('Some strange output from server: \n' + space_row) elif len(space_row) == 0 or not len(space_row[0]): temp_name = ('name' if isinstance(space, six.string_types) else 'id') raise SchemaError( "There's no space with {1} '{0}'".format(space, temp_name)) space_row = space_row[0] return SchemaSpace(space_row, self.schema) def get_index(self, space, index): _space = self.get_space(space) try: return _space.indexes[index] except KeyError: pass _index = (const.INDEX_INDEX_NAME if isinstance(index, six.string_types) else const.INDEX_INDEX_PRIMARY) index_row = None try: index_row = self.con.select(const.SPACE_VINDEX, [_space.sid, index], index=_index) except DatabaseError as e: if e.args[0] != 36: raise if index_row is None: index_row = self.con.select(const.SPACE_INDEX, [_space.sid, index], index=_index) if len(index_row) > 1: raise SchemaError('Some strange output from server: \n' + index_row) elif len(index_row) == 0 or not len(index_row[0]): temp_name = ('name' if isinstance(index, six.string_types) else 'id') raise SchemaError( "There's no index with {2} '{0}' in space '{1}'".format( index, _space.name, temp_name)) index_row = index_row[0] return SchemaIndex(index_row, _space) def flush(self): self.schema.clear() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/error.py0000644000000000000000000002044113306562377025600 0ustar rootroot# -*- coding: utf-8 -*- # pylint: disable=C0301,W0105,W0401,W0614 ''' Python DB API compatible exceptions http://www.python.org/dev/peps/pep-0249/ The PEP-249 says that database related exceptions must be inherited as follows: StandardError |__Warning |__Error |__InterfaceError |__DatabaseError |__DataError |__OperationalError |__IntegrityError |__InternalError |__ProgrammingError |__NotSupportedError ''' import os import socket import sys import warnings try: class Error(StandardError): '''Base class for error exceptions''' except NameError: class Error(Exception): '''Base class for error exceptions''' class DatabaseError(Error): '''Error related to the database engine''' class InterfaceError(Error): ''' Error related to the database interface rather than the database itself ''' # Monkey patch os.strerror for win32 if sys.platform == "win32": # Windows Sockets Error Codes (not all, but related on network errors) # http://msdn.microsoft.com/en-us/library/windows/desktop/ms740668(v=vs.85).aspx _code2str = { 10004: "Interrupted system call", 10009: "Bad file descriptor", 10013: "Permission denied", 10014: "Bad address", 10022: "Invalid argument", 10024: "Too many open files", 10035: "Resource temporarily unavailable", 10036: "Operation now in progress", 10037: "Operation already in progress", 10038: "Socket operation on nonsocket", 10039: "Destination address required", 10040: "Message too long", 10041: "Protocol wrong type for socket", 10042: "Bad protocol option", 10043: "Protocol not supported", 10044: "Socket type not supported", 10045: "Operation not supported", 10046: "Protocol family not supported", 10047: "Address family not supported by protocol family", 10048: "Address already in use", 10049: "Cannot assign requested address", 10050: "Network is down", 10051: "Network is unreachable", 10052: "Network dropped connection on reset", 10053: "Software caused connection abort", 10054: "Connection reset by peer", 10055: "No buffer space available", 10056: "Socket is already connected", 10057: "Socket is not connected", 10058: "Cannot send after transport endpoint shutdown", 10060: "Connection timed out", 10061: "Connection refused", 10062: "Cannot translate name", 10063: "File name too long", 10064: "Host is down", 10065: "No route to host", 11001: "Host not found", 11004: "Name or service not known" } os_strerror_orig = os.strerror def os_strerror_patched(code): ''' Return cross-platform message about socket-related errors This function exists because under Windows os.strerror returns 'Unknown error' on all socket-related errors. And socket-related exception contain broken non-ascii encoded messages. ''' message = os_strerror_orig(code) if not message.startswith("Unknown"): return message else: return _code2str.get(code, "Unknown error %s" % code) os.strerror = os_strerror_patched del os_strerror_patched class SchemaError(DatabaseError): def __init__(self, value): super(SchemaError, self).__init__(0, value) self.value = value def __str__(self): return str(self.value) class NetworkError(DatabaseError): '''Error related to network''' def __init__(self, orig_exception=None, *args): self.errno = 0 if hasattr(orig_exception, 'errno'): self.errno = orig_exception.errno if orig_exception: if isinstance(orig_exception, socket.timeout): self.message = "Socket timeout" super(NetworkError, self).__init__(0, self.message) elif isinstance(orig_exception, socket.error): self.message = os.strerror(orig_exception.errno) super(NetworkError, self).__init__( orig_exception.errno, self.message) else: super(NetworkError, self).__init__(orig_exception, *args) class NetworkWarning(UserWarning): '''Warning related to network''' pass class RetryWarning(UserWarning): ''' Warning is emited in case of server return completion_status == 1 (try again) ''' pass # always print this warnings warnings.filterwarnings("always", category=NetworkWarning) warnings.filterwarnings("always", category=RetryWarning) def warn(message, warning_class): ''' Emit warinig message. Just like standard warnings.warn() but don't output full filename. ''' frame = sys._getframe(2) # pylint: disable=W0212 module_name = frame.f_globals.get("__name__") line_no = frame.f_lineno warnings.warn_explicit(message, warning_class, module_name, line_no) _strerror = { 0: ("ER_OK", "OK"), 1: ("ER_ILLEGAL_PARAMS", "Illegal parameters, %s"), 2: ("ER_MEMORY_ISSUE", "Failed to allocate %u bytes in %s for %s"), 3: ("ER_TUPLE_FOUND", "Duplicate key exists in unique index %u"), 4: ("ER_TUPLE_NOT_FOUND", "Tuple doesn't exist in index %u"), 5: ("ER_UNSUPPORTED", "%s does not support %s"), 6: ("ER_NONMASTER", "Can't modify data on a replication slave. My master is: %s"), 7: ("ER_SECONDARY", "Can't modify data upon a request on the secondary port."), 8: ("ER_INJECTION", "Error injection '%s'"), 9: ("ER_CREATE_SPACE", "Failed to create space %u: %s"), 10: ("ER_SPACE_EXISTS", "Space %u already exists"), 11: ("ER_DROP_SPACE", "Can't drop space %u: %s"), 12: ("ER_ALTER_SPACE", "Can't modify space %u: %s"), 13: ("ER_INDEX_TYPE", "Unsupported index type supplied for index %u in space %u"), 14: ("ER_MODIFY_INDEX", "Can't create or modify index %u in space %u: %s"), 15: ("ER_LAST_DROP", "Can't drop the primary key in a system space, space id %u"), 16: ("ER_TUPLE_FORMAT_LIMIT", "Tuple format limit reached: %u"), 17: ("ER_DROP_PRIMARY_KEY", "Can't drop primary key in space %u while secondary keys exist"), 18: ("ER_KEY_FIELD_TYPE", ("Supplied key type of part %u does not match index part type:" " expected %s")), 19: ("ER_EXACT_MATCH", "Invalid key part count in an exact match (expected %u, got %u)"), 20: ("ER_INVALID_MSGPACK", "Invalid MsgPack - %s"), 21: ("ER_PROC_RET", "msgpack.encode: can not encode Lua type '%s'"), 22: ("ER_TUPLE_NOT_ARRAY", "Tuple/Key must be MsgPack array"), 23: ("ER_FIELD_TYPE", ("Tuple field %u type does not match one required by operation:" " expected %s")), 24: ("ER_FIELD_TYPE_MISMATCH", ("Ambiguous field type in index %u, key part %u. Requested type" " is %s but the field has previously been defined as %s")), 25: ("ER_SPLICE", "Field SPLICE error: %s"), 26: ("ER_ARG_TYPE", ("Argument type in operation on field %u does not match field type:" " expected a %s")), 27: ("ER_TUPLE_IS_TOO_LONG", "Tuple is too long %u"), 28: ("ER_UNKNOWN_UPDATE_OP", "Unknown UPDATE operation"), 29: ("ER_UPDATE_FIELD", "Field %u UPDATE error: %s"), 30: ("ER_FIBER_STACK", "Can not create a new fiber: recursion limit reached"), 31: ("ER_KEY_PART_COUNT", "Invalid key part count (expected [0..%u], got %u)"), 32: ("ER_PROC_LUA", "%s"), 33: ("ER_NO_SUCH_PROC", "Procedure '%.*s' is not defined"), 34: ("ER_NO_SUCH_TRIGGER", "Trigger is not found"), 35: ("ER_NO_SUCH_INDEX", "No index #%u is defined in space %u"), 36: ("ER_NO_SUCH_SPACE", "Space %u does not exist"), 37: ("ER_NO_SUCH_FIELD", "Field %u was not found in the tuple"), 38: ("ER_SPACE_ARITY", "Tuple field count %u does not match space %u arity %u"), 39: ("ER_INDEX_ARITY", ("Tuple field count %u is less than required by a defined index" " (expected %u)")), 40: ("ER_WAL_IO", "Failed to write to disk"), 41: ("ER_MORE_THAN_ONE_TUPLE", "More than one tuple found"), } def tnt_strerror(num): if num in _strerror: return _strerror[num] return "UNDEFINED" tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool-python/tarantool/request.py0000644000000000000000000002116613306562377026144 0ustar rootroot# -*- coding: utf-8 -*- # pylint: disable=C0301,W0105,W0401,W0614 ''' Request types definitions ''' import six import msgpack import hashlib from tarantool.const import ( IPROTO_CODE, IPROTO_SYNC, IPROTO_SPACE_ID, IPROTO_INDEX_ID, IPROTO_LIMIT, IPROTO_OFFSET, IPROTO_KEY, IPROTO_USER_NAME, IPROTO_TUPLE, IPROTO_FUNCTION_NAME, IPROTO_ITERATOR, IPROTO_SERVER_UUID, IPROTO_CLUSTER_UUID, IPROTO_VCLOCK, IPROTO_EXPR, IPROTO_OPS, REQUEST_TYPE_OK, REQUEST_TYPE_PING, REQUEST_TYPE_SELECT, REQUEST_TYPE_INSERT, REQUEST_TYPE_REPLACE, REQUEST_TYPE_DELETE, REQUEST_TYPE_UPDATE, REQUEST_TYPE_UPSERT, REQUEST_TYPE_CALL, REQUEST_TYPE_EVAL, REQUEST_TYPE_AUTHENTICATE, REQUEST_TYPE_JOIN, REQUEST_TYPE_SUBSCRIBE ) class Request(object): ''' Represents a single request to the server in compliance with the Tarantool protocol. Responsible for data encapsulation and builds binary packet to be sent to the server. This is the abstract base class. Specific request types are implemented by the inherited classes. ''' request_type = None def __init__(self, conn): self._bytes = None self.conn = conn self._sync = None def __bytes__(self): return self._bytes __str__ = __bytes__ @property def sync(self): '''\ :type: int Required field in the server request. Contains request header IPROTO_SYNC. ''' return self._sync def header(self, length): self._sync = self.conn.generate_sync() header = msgpack.dumps({IPROTO_CODE: self.request_type, IPROTO_SYNC: self._sync}) return msgpack.dumps(length + len(header)) + header class RequestInsert(Request): ''' Represents INSERT request ''' request_type = REQUEST_TYPE_INSERT # pylint: disable=W0231 def __init__(self, conn, space_no, values): ''' ''' super(RequestInsert, self).__init__(conn) assert isinstance(values, (tuple, list)) request_body = msgpack.dumps({IPROTO_SPACE_ID: space_no, IPROTO_TUPLE: values}) self._bytes = self.header(len(request_body)) + request_body class RequestAuthenticate(Request): ''' Represents AUTHENTICATE request ''' request_type = REQUEST_TYPE_AUTHENTICATE def __init__(self, conn, salt, user, password): super(RequestAuthenticate, self).__init__(conn) def sha1(values): sha = hashlib.sha1() for i in values: if i is not None: sha.update(i if isinstance(i, six.binary_type) else i.encode()) return sha.digest() def strxor(rhs, lhs): if six.PY2: return "".join(chr(ord(x) ^ ord(y)) for x, y in zip(rhs, lhs)) return bytes([x ^ y for x, y in zip(rhs, lhs)]) hash1 = sha1((password,)) hash2 = sha1((hash1,)) scramble = sha1((salt, hash2)) scramble = strxor(hash1, scramble) request_body = msgpack.dumps({IPROTO_USER_NAME: user, IPROTO_TUPLE: ("chap-sha1", scramble)}) self._bytes = self.header(len(request_body)) + request_body class RequestReplace(Request): ''' Represents REPLACE request ''' request_type = REQUEST_TYPE_REPLACE # pylint: disable=W0231 def __init__(self, conn, space_no, values): ''' ''' super(RequestReplace, self).__init__(conn) assert isinstance(values, (tuple, list)) request_body = msgpack.dumps({IPROTO_SPACE_ID: space_no, IPROTO_TUPLE: values}) self._bytes = self.header(len(request_body)) + request_body class RequestDelete(Request): ''' Represents DELETE request ''' request_type = REQUEST_TYPE_DELETE # pylint: disable=W0231 def __init__(self, conn, space_no, index_no, key): ''' ''' super(RequestDelete, self).__init__(conn) request_body = msgpack.dumps({IPROTO_SPACE_ID: space_no, IPROTO_INDEX_ID: index_no, IPROTO_KEY: key}) self._bytes = self.header(len(request_body)) + request_body class RequestSelect(Request): ''' Represents SELECT request ''' request_type = REQUEST_TYPE_SELECT # pylint: disable=W0231 def __init__(self, conn, space_no, index_no, key, offset, limit, iterator): super(RequestSelect, self).__init__(conn) request_body = msgpack.dumps({IPROTO_SPACE_ID: space_no, IPROTO_INDEX_ID: index_no, IPROTO_OFFSET: offset, IPROTO_LIMIT: limit, IPROTO_ITERATOR: iterator, IPROTO_KEY: key}) self._bytes = self.header(len(request_body)) + request_body class RequestUpdate(Request): ''' Represents UPDATE request ''' request_type = REQUEST_TYPE_UPDATE # pylint: disable=W0231 def __init__(self, conn, space_no, index_no, key, op_list): super(RequestUpdate, self).__init__(conn) request_body = msgpack.dumps({IPROTO_SPACE_ID: space_no, IPROTO_INDEX_ID: index_no, IPROTO_KEY: key, IPROTO_TUPLE: op_list}) self._bytes = self.header(len(request_body)) + request_body class RequestCall(Request): ''' Represents CALL request ''' request_type = REQUEST_TYPE_CALL # pylint: disable=W0231 def __init__(self, conn, name, args): super(RequestCall, self).__init__(conn) assert isinstance(args, (list, tuple)) request_body = msgpack.dumps({IPROTO_FUNCTION_NAME: name, IPROTO_TUPLE: args}) self._bytes = self.header(len(request_body)) + request_body class RequestEval(Request): ''' Represents EVAL request ''' request_type = REQUEST_TYPE_EVAL # pylint: disable=W0231 def __init__(self, conn, name, args): super(RequestEval, self).__init__(conn) assert isinstance(args, (list, tuple)) request_body = msgpack.dumps({IPROTO_EXPR: name, IPROTO_TUPLE: args}) self._bytes = self.header(len(request_body)) + request_body class RequestPing(Request): ''' Ping body is empty, so body_length == 0 and there's no body ''' request_type = REQUEST_TYPE_PING def __init__(self, conn): super(RequestPing, self).__init__(conn) self._bytes = self.header(0) class RequestUpsert(Request): ''' Represents UPSERT request ''' request_type = REQUEST_TYPE_UPSERT # pylint: disable=W0231 def __init__(self, conn, space_no, index_no, tuple_value, op_list): super(RequestUpsert, self).__init__(conn) request_body = msgpack.dumps({IPROTO_SPACE_ID: space_no, IPROTO_INDEX_ID: index_no, IPROTO_TUPLE: tuple_value, IPROTO_OPS: op_list}) self._bytes = self.header(len(request_body)) + request_body class RequestJoin(Request): ''' Represents JOIN request ''' request_type = REQUEST_TYPE_JOIN # pylint: disable=W0231 def __init__(self, conn, server_uuid): super(RequestJoin, self).__init__(conn) request_body = msgpack.dumps({IPROTO_SERVER_UUID: server_uuid}) self._bytes = self.header(len(request_body)) + request_body class RequestSubscribe(Request): ''' Represents SUBSCRIBE request ''' request_type = REQUEST_TYPE_SUBSCRIBE # pylint: disable=W0231 def __init__(self, conn, cluster_uuid, server_uuid, vclock): super(RequestSubscribe, self).__init__(conn) assert isinstance(vclock, dict) request_body = msgpack.dumps({ IPROTO_CLUSTER_UUID: cluster_uuid, IPROTO_SERVER_UUID: server_uuid, IPROTO_VCLOCK: vclock }) self._bytes = self.header(len(request_body)) + request_body class RequestOK(Request): ''' Represents OK acknowledgement ''' request_type = REQUEST_TYPE_OK # pylint: disable=W0231 def __init__(self, conn, sync): super(RequestOK, self).__init__(conn) header = msgpack.dumps({IPROTO_CODE: self.request_type, IPROTO_SYNC: sync}) self._bytes = msgpack.dumps(len(header)) + header tarantool_1.9.1.26.g63eb81e3c/test-run/lib/unittest_server.py0000644000000000000000000000461113306562360022360 0ustar rootrootimport os import sys import glob from subprocess import Popen, PIPE, STDOUT from lib.server import Server from lib.tarantool_server import Test class UnitTest(Test): def __init__(self, *args, **kwargs): Test.__init__(self, *args, **kwargs) self.valgrind = kwargs.get('valgrind', False) def execute(self, server): server.current_test = self execs = server.prepare_args() proc = Popen(execs, stdout=PIPE, stderr=STDOUT) sys.stdout.write(proc.communicate()[0]) class UnittestServer(Server): """A dummy server implementation for unit test suite""" def __new__(cls, ini=None, *args, **kwargs): cls = Server.get_mixed_class(cls, ini) return object.__new__(cls) def __init__(self, _ini=None, test_suite=None): if _ini is None: _ini = {} ini = { 'vardir': None, }; ini.update(_ini) Server.__init__(self, ini, test_suite) self.testdir = os.path.abspath(os.curdir) self.vardir = ini['vardir'] self.builddir = ini['builddir'] self.debug = False self.name = 'unittest_server' @property def logfile(self): return self.current_test.tmp_result @property def binary(self): return UnittestServer.prepare_args(self)[0] def prepare_args(self, args=[]): return [os.path.join(self.builddir, "test", self.current_test.name)] + args def deploy(self, vardir=None, silent=True, wait=True): self.vardir = vardir if not os.access(self.vardir, os.F_OK): os.makedirs(self.vardir) @classmethod def find_exe(cls, builddir): cls.builddir = builddir @staticmethod def find_tests(test_suite, suite_path): def patterned(test, patterns): answer = [] for i in patterns: if test.name.find(i) != -1: answer.append(test) return answer test_suite.ini['suite'] = suite_path tests = glob.glob(os.path.join(suite_path, "*.test" )) if not tests: tests = glob.glob(os.path.join(test_suite.args.builddir, 'test', suite_path, '*.test')) test_suite.tests = [UnitTest(k, test_suite.args, test_suite.ini) for k in sorted(tests)] test_suite.tests = sum([patterned(x, test_suite.args.tests) for x in test_suite.tests], []) def print_log(self, lines): pass tarantool_1.9.1.26.g63eb81e3c/test-run/lib/test.py0000644000000000000000000003071313306562360020074 0ustar rootrootimport os import re import sys import time import filecmp import difflib import traceback import gevent import pytap13 import pprint import shutil try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import lib from lib.utils import non_empty_valgrind_logs, print_tail_n from lib.colorer import color_stdout class TestExecutionError(OSError): """To be raised when a test execution fails""" pass class TestRunGreenlet(gevent.Greenlet): def __init__(self, green_callable, *args, **kwargs): self.callable = green_callable self.callable_args = args self.callable_kwargs = kwargs super(TestRunGreenlet, self).__init__() def _run(self, *args, **kwargs): self.callable(*self.callable_args, **self.callable_kwargs) def __repr__(self): return "" % (hex(id(self)), getattr(self, "info", None)) class FilteredStream: """Helper class to filter .result file output""" def __init__(self, filename): # # always open the output stream in line-buffered mode, # to see partial results of a failed test # self.stream = open(filename, "w+", 1) self.filters = [] self.inspector = None def write(self, fragment): """Apply all filters, then write result to the undelrying stream. Do line-oriented filtering: the fragment doesn't have to represent just one line.""" fragment_stream = StringIO(fragment) skipped = False for line in fragment_stream: original_len = len(line.strip()) for pattern, replacement in self.filters: line = re.sub(pattern, replacement, line) # don't write lines that are completely filtered out: skipped = original_len and not line.strip() if skipped: break if not skipped: self.stream.write(line) def push_filter(self, pattern, replacement): self.filters.append([pattern, replacement]) def pop_filter(self): self.filters.pop() def clear_all_filters(self): self.filters = [] def close(self): self.clear_all_filters() self.stream.close() def flush(self): self.stream.flush() class Test: """An individual test file. A test object can run itself and remembers completion state of the run. If file .skipcond is exists it will be executed before test and if it sets self.skip to True value the test will be skipped. """ rg = re.compile('\.test.*') def __init__(self, name, args, suite_ini, params={}, conf_name=None): """Initialize test properties: path to test file, path to temporary result file, path to the client program, test status.""" self.name = name self.args = args self.suite_ini = suite_ini self.result = os.path.join(suite_ini['suite'], os.path.basename(self.rg.sub('.result', name))) self.skip_cond = os.path.join(suite_ini['suite'], os.path.basename(self.rg.sub('.skipcond', name))) self.tmp_result = os.path.join(self.suite_ini['vardir'], os.path.basename(self.result)) self.reject = self.rg.sub('.reject', name) self.is_executed = False self.is_executed_ok = None self.is_equal_result = None self.is_valgrind_clean = True self.is_terminated = False self.run_params = params self.conf_name = conf_name # filled in execute() when a greenlet runs self.current_test_greenlet = None # prevent double/triple reporting self.is_crash_reported = False @property def id(self): return self.name, self.conf_name def passed(self): """Return true if this test was run successfully.""" return self.is_executed and self.is_executed_ok and self.is_equal_result def execute(self, server): # Note: don't forget to set 'server.current_test = self' in # inherited classes. Crash reporting relying on that. server.current_test = self def run(self, server): """ Execute the test assuming it's a python program. If the test aborts, print its output to stdout, and raise an exception. Else, comprare result and reject files. If there is a difference, print it to stdout. Returns short status of the test as a string: 'skip', 'pass', 'new', or 'fail'. There is also one possible value for short_status, 'disabled', but it returned in the caller, TestSuite.run_test(). """ # Note: test was created before certain worker become known, so we need # to update temporary result directory here as it depends on 'vardir'. self.tmp_result = os.path.join(self.suite_ini['vardir'], os.path.basename(self.result)) diagnostics = "unknown" save_stdout = sys.stdout try: self.skip = False if os.path.exists(self.skip_cond): sys.stdout = FilteredStream(self.tmp_result) stdout_fileno = sys.stdout.stream.fileno() execfile(self.skip_cond, dict(locals(), **server.__dict__)) sys.stdout.close() sys.stdout = save_stdout if not self.skip: sys.stdout = FilteredStream(self.tmp_result) stdout_fileno = sys.stdout.stream.fileno() self.execute(server) sys.stdout.flush() self.is_executed_ok = True except TestExecutionError: self.is_executed_ok = False except Exception as e: if e.__class__.__name__ == 'TarantoolStartError': # worker should stop raise color_stdout('\nTest.run() received the following error:\n' + traceback.format_exc() + '\n', schema='error') diagnostics = str(e) finally: if sys.stdout and sys.stdout != save_stdout: sys.stdout.close() sys.stdout = save_stdout self.is_executed = True sys.stdout.flush() is_tap = False if not self.skip: if self.is_executed_ok and os.path.isfile(self.result): self.is_equal_result = filecmp.cmp(self.result, self.tmp_result) elif self.is_executed_ok: if lib.Options().args.is_verbose: color_stdout('\n') with open(self.tmp_result, 'r') as f: color_stdout(f.read(), schema='log') is_tap, is_ok = self.check_tap_output() self.is_equal_result = is_ok else: self.is_equal_result = 1 if self.args.valgrind: non_empty_logs = non_empty_valgrind_logs( server.current_valgrind_logs(for_test=True)) self.is_valgrind_clean = not bool(non_empty_logs) short_status = None if self.skip: short_status = 'skip' color_stdout("[ skip ]\n", schema='test_skip') if os.path.exists(self.tmp_result): os.remove(self.tmp_result) elif self.is_executed_ok and self.is_equal_result and self.is_valgrind_clean: short_status = 'pass' color_stdout("[ pass ]\n", schema='test_pass') if os.path.exists(self.tmp_result): os.remove(self.tmp_result) elif (self.is_executed_ok and not self.is_equal_result and not os.path.isfile(self.result)) and not is_tap: shutil.copy(self.tmp_result, self.result) short_status = 'new' color_stdout("[ new ]\n", schema='test_new') else: shutil.copy(self.tmp_result, self.reject) short_status = 'fail' color_stdout("[ fail ]\n", schema='test_fail') where = "" if not self.is_crash_reported and not self.is_executed_ok: self.print_diagnostics(self.reject, "Test failed! Last 15 lines of the result file:\n") server.print_log(15) where = ": test execution aborted, reason '{0}'".format(diagnostics) elif not self.is_crash_reported and not self.is_equal_result: self.print_unidiff() server.print_log(15) where = ": wrong test output" elif not self.is_crash_reported and not self.is_valgrind_clean: os.remove(self.reject) for log_file in non_empty_logs: self.print_diagnostics(log_file, "Test failed! Last 10 lines of {}:\n".format( log_file)) where = ": there were warnings in the valgrind log file(s)" return short_status def print_diagnostics(self, log_file, message): """Print 10 lines of client program output leading to test failure. Used to diagnose a failure of the client program""" color_stdout(message, schema='error') print_tail_n(log_file, 10) def print_unidiff(self): """Print a unified diff between .test and .result files. Used to establish the cause of a failure when .test differs from .result.""" color_stdout("\nTest failed! Result content mismatch:\n", schema='error') with open(self.result, "r") as result: with open(self.reject, "r") as reject: result_time = time.ctime(os.stat(self.result).st_mtime) reject_time = time.ctime(os.stat(self.reject).st_mtime) diff = difflib.unified_diff(result.readlines(), reject.readlines(), self.result, self.reject, result_time, reject_time) color_stdout.writeout_unidiff(diff) def tap_parse_print_yaml(self, yml): if 'expected' in yml and 'got' in yml: color_stdout('Expected: %s\n' % yml['expected'], schema='error') color_stdout('Got: %s\n' % yml['got'], schema='error') del yml['expected'] del yml['got'] if 'trace' in yml: color_stdout('Traceback:\n', schema='error') for fr in yml['trace']: fname = fr.get('name', '') if fname: fname = " function '%s'" % fname line = '[%-4s]%s at <%s:%d>\n' % ( fr['what'], fname, fr['filename'], fr['line'] ) color_stdout(line, schema='error') del yml['trace'] if 'filename' in yml: del yml['filename'] if 'line' in yml: del yml['line'] yaml_str = pprint.pformat(yml) color_stdout('\n', schema='error') if len(yml): for line in yaml_str.splitlines(): color_stdout(line + '\n', schema='error') color_stdout('\n', schema='error') def check_tap_output(self): """ Returns is_tap, is_ok """ if not os.path.isfile(self.tmp_result): color_strout('\nCannot find %s\n' % self.tmp_result, schema='error') self.is_crash_reported = True return False with open(self.tmp_result, 'r') as f: content = f.read() tap = pytap13.TAP13() try: tap.parse(content) except ValueError as e: color_stdout('\nTAP13 parse failed: %s\n' % str(e), schema='error') self.is_crash_reported = True return False, False is_ok = True for test_case in tap.tests: if test_case.result == 'ok': continue if is_ok: color_stdout('\n') color_stdout('%s %s %s # %s %s\n' % ( test_case.result, test_case.id or '', test_case.description or '-', test_case.directive or '', test_case.comment or ''), schema='error') if test_case.yaml: self.tap_parse_print_yaml(test_case.yaml) is_ok = False if not is_ok: color_stdout('Rejected result file: %s\n' % self.reject, schema='test_var') self.is_crash_reported = True return True, is_ok tarantool_1.9.1.26.g63eb81e3c/test-run/lib/pytap13.py0000644000000000000000000001642313306562360020420 0ustar rootroot# Copyright 2013, Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Author: Josef Skladanka import re try: from CStringIO import StringIO except ImportError: from StringIO import StringIO import yaml RE_VERSION = re.compile(r"^\s*TAP version 13\s*$") RE_PLAN = re.compile(r"^\s*(?P\d+)\.\.(?P\d+)\s*(#\s*(?P.*))?\s*$") RE_TEST_LINE = re.compile(r"^\s*(?P(not\s+)?ok)\s*(?P\d+)?\s*(?P[^#]+)?\s*(#\s*(?PTODO|SKIP)?\s*(?P.+)?)?\s*$", re.IGNORECASE) RE_DIAGNOSTIC = re.compile(r"^\s*#\s*(?P.+)?\s*$") RE_YAMLISH_START = re.compile(r"^\s*---.*$") RE_YAMLISH_END = re.compile(r"^\s*\.\.\.\s*$") class Test(object): def __init__(self, result, id, description = None, directive = None, comment = None): self.result = result self.id = id self.description = description try: self.directive = directive.upper() except AttributeError: self.directive = directive self.comment = comment self.yaml = None self._yaml_buffer = StringIO() self.diagnostics = [] class TAP13(object): def __init__(self, strict = False): self.tests = [] self.__tests_counter = 0 self.tests_planned = None self.strict = strict def _parse(self, source): seek_version = True seek_plan = False seek_test = False in_test = False in_yaml = False for line in source: if not seek_version and RE_VERSION.match(line): raise ValueError("Bad TAP format, multiple TAP headers") if in_yaml: if RE_YAMLISH_END.match(line): test = self.tests[-1] try: test.yaml = yaml.load(test._yaml_buffer.getvalue()) except Exception as e: if not self.strict: continue test_num = len(self.tests) + 1 self.tests.append(Test( 'not ok', test_num, comment = 'DIAG: Test %s has wrong YAML: %s' % ( test_num, str(e)))) in_yaml = False else: self.tests[-1]._yaml_buffer.write(line) continue if in_test: if RE_DIAGNOSTIC.match(line): self.tests[-1].diagnostics.append(line.strip()) continue if RE_YAMLISH_START.match(line): in_yaml = True continue on_top_level = not line.startswith(' ') raw_line = line.rstrip('\n') line = line.strip() if RE_DIAGNOSTIC.match(line): continue # this is "beginning" of the parsing, skip all lines until # version is found (in non-strict mode) if seek_version: m = RE_VERSION.match(line) if m: seek_version = False seek_plan = True seek_test = True continue elif not self.strict: continue m = RE_PLAN.match(line) if m: if seek_plan and on_top_level: d = m.groupdict() self.tests_planned = int(d.get('end', 0)) seek_plan = False # Stop processing if tests were found before the plan # if plan is at the end, it must be the last line -> stop processing if self.__tests_counter > 0: break continue elif not on_top_level: continue if seek_test: m = RE_TEST_LINE.match(line) if m and on_top_level: self.__tests_counter += 1 t_attrs = m.groupdict() if t_attrs['id'] is None: t_attrs['id'] = self.__tests_counter t_attrs['id'] = int(t_attrs['id']) if t_attrs['id'] < self.__tests_counter: raise ValueError("Descending test id on line: %r" % line) # according to TAP13 specs, missing tests must be handled as 'not ok' # here we add the missing tests in sequence while t_attrs['id'] > self.__tests_counter: comment = 'DIAG: Test %s not present' % self.__tests_counter self.tests.append(Test('not ok', self.__tests_counter, comment = comment)) self.__tests_counter += 1 t = Test(**t_attrs) self.tests.append(t) in_test = True continue elif not on_top_level: continue if self.strict: raise ValueError('Wrong TAP line: [' + raw_line + ']') if self.tests_planned is None: # TODO: raise better error than ValueError raise ValueError("Missing plan in the TAP source") if len(self.tests) != self.tests_planned: self.tests.append(Test('not ok', len(self.tests), comment = 'DIAG: Expected %s tests, got %s' % \ (self.tests_planned, len(self.tests)))) def parse(self, source): if isinstance(source, (str, unicode)): self._parse(StringIO(source)) elif hasattr(source, "__iter__"): self._parse(source) if __name__ == "__main__": input = """ TAP version 13 ok 1 - Input file opened not ok 2 - First line of the input valid --- message: 'First line invalid' severity: fail data: got: 'Flirble' expect: 'Fnible' ... ok - Read the rest of the file not ok 5 - Summarized correctly # TODO Not written yet --- message: "Can't make summary yet" severity: todo ... ok Description # Diagnostic --- message: 'Failure message' severity: fail data: got: - 1 - 3 - 2 expect: - 1 - 2 - 3 ... 1..6 """ t = TAP13() t.parse(input) import pprint for test in t.tests: print test.result, test.id, test.description, "#", test.directive, test.comment pprint.pprint(test._yaml_buffer) pprint.pprint(test.yaml) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/singleton.py0000644000000000000000000000037713306562360021122 0ustar rootrootclass Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__( *args, **kwargs) return cls._instances[cls] tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool_connection.py0000644000000000000000000001367313306562360023345 0ustar rootroot__author__ = "Konstantin Osipov " # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import ctypes import errno import re import socket from contextlib import contextmanager import gevent from gevent import socket as gsocket from connpool import ConnectionPool from test import TestRunGreenlet from utils import warn_unix_socket class TarantoolPool(ConnectionPool): def __init__(self, host, port, *args, **kwargs): self.host = host self.port = port super(TarantoolPool, self).__init__(*args, **kwargs) def _new_connection(self): result = None if self.host == 'unix/' or re.search(r'^/', str(self.port)): warn_unix_socket(self.port) result = gsocket.socket(gsocket.AF_UNIX, gsocket.SOCK_STREAM) result.connect(self.port) else: result = gsocket.create_connection((self.host, self.port)) result.setsockopt(gsocket.SOL_TCP, gsocket.TCP_NODELAY, 1) return result def _addOne(self): stime = 0.1 while True: try: c = self._new_connection() except gsocket.error: c = None if c: break gevent.sleep(stime) if stime < 400: stime *= 2 self.conn.append(c) self.lock.release() @contextmanager def get(self): self.lock.acquire() try: c = self.conn.pop() yield c except self.exc_classes: greenlet = TestRunGreenlet(self._addOne) greenlet.start_later(1) raise except: self.conn.append(c) self.lock.release() raise else: self.conn.append(c) self.lock.release() def close_all(self): self.conn.clear() class TarantoolConnection(object): @property def uri(self): if self.host == 'unix/' or re.search(r'^/', str(self.port)): return self.port else: return self.host+':'+str(self.port) def __init__(self, host, port): self.host = host self.port = port self.is_connected = False if self.host == 'unix/' or re.search(r'^/', str(self.port)): warn_unix_socket(self.port) def connect(self): if self.host == 'unix/' or re.search(r'^/', str(self.port)): self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.socket.connect(self.port) else: self.socket = socket.create_connection((self.host, self.port)) self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) self.is_connected = True def disconnect(self): if self.is_connected: self.socket.close() self.is_connected = False def reconnect(self): self.disconnect() self.connect() def opt_reconnect(self): """ On a socket which was disconnected, recv of 0 bytes immediately returns with no data. On a socket which is alive, it returns EAGAIN. Make use of this property and detect whether or not the socket is dead. Reconnect a dead socket, do nothing if the socket is good.""" try: if not self.is_connected or self.socket.recv( 1, socket.MSG_DONTWAIT | socket.MSG_PEEK) == '': self.reconnect() except socket.error as e: if e.errno == errno.EAGAIN: pass else: self.reconnect() def clone(self): return type(self)(self.host, self.port) def execute(self, command, silent=True): self.opt_reconnect() return self.execute_no_reconnect(command, silent) def __enter__(self): self.connect() return self def __exit__(self, type, value, tb): self.disconnect() def __call__(self, command, silent=False, simple=False): return self.execute(command, silent) class TarantoolAsyncConnection(TarantoolConnection): pool = TarantoolPool def __init__(self, host, port): super(TarantoolAsyncConnection, self).__init__(host, port) self.connections = None libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) self._sys_recv = libc.recv @property def socket(self): with self.connections.get() as c: result = c return result def connect(self): self.connections = self.pool(self.host, self.port, 3) self.is_connected = True def disconnect(self): if self.is_connected: self.connections.close_all() self.is_connected = False def execute(self, command, silent=True): return self.execute_no_reconnect(command, silent) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/admin_connection.py0000644000000000000000000001007513306562360022423 0ustar rootroot__author__ = "Konstantin Osipov " # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import re import sys import yaml from tarantool_connection import TarantoolConnection, TarantoolPool, TarantoolAsyncConnection ADMIN_SEPARATOR = '\n' def get_handshake(sock, length=128, max_try=100): """ Correct way to get tarantool handshake """ result = "" i = 0 while len(result) != length and i < max_try: result = "%s%s" % (result, sock.recv(length-len(result))) # max_try counter for tarantool/gh-1362 i += 1 return result class AdminPool(TarantoolPool): def _new_connection(self): s = super(AdminPool, self)._new_connection() handshake = get_handshake(s) if handshake and not re.search(r'^Tarantool.*console.*', str(handshake)): # tarantool/gh-1163 # 1. raise only if handshake is not full # 2. be silent on crashes or if it's server.stop() operation print 'Handshake error {\n', handshake, '\n}' raise RuntimeError('Broken tarantool console handshake') return s class ExecMixIn(object): def cmd(self, socket, cmd, silent): socket.sendall(cmd) bufsiz = 4096 res = "" while True: buf = socket.recv(bufsiz) if not buf: break res = res + buf if (res.rfind("\n...\n") >= 0 or res.rfind("\r\n...\r\n") >= 0): break try: yaml.load(res) finally: if not silent: sys.stdout.write(res.replace("\r\n", "\n")) return res class AdminConnection(TarantoolConnection, ExecMixIn): def execute_no_reconnect(self, command, silent): if not command: return if not silent: sys.stdout.write(command + ADMIN_SEPARATOR) cmd = command.replace('\n', ' ') + ADMIN_SEPARATOR return self.cmd(self.socket, cmd, silent) def connect(self): super(AdminConnection, self).connect() handshake = get_handshake(self.socket) if not re.search(r'^Tarantool.*console.*', str(handshake)): raise RuntimeError('Broken tarantool console handshake') class AdminAsyncConnection(TarantoolAsyncConnection, ExecMixIn): pool = AdminPool def execute_no_reconnect(self, command, silent): if not command: return if not silent: sys.stdout.write(command + ADMIN_SEPARATOR) cmd = command.replace('\n', ' ') + ADMIN_SEPARATOR result = None with self.connections.get() as sock: result = self.cmd(sock, cmd, silent) return result def execute(self, command, silent=True): if not self.is_connected: self.connect() try: return self.execute_no_reconnect(command, silent) except Exception: return None tarantool_1.9.1.26.g63eb81e3c/test-run/lib/__init__.py0000644000000000000000000000325113306562360020651 0ustar rootrootimport os import sys import shutil from lib.options import Options from lib.tarantool_server import TarantoolServer from lib.unittest_server import UnittestServer from utils import warn_unix_sockets_at_start from lib.colorer import color_stdout __all__ = ['Options'] def setenv(): """Find where is tarantool dir by check_file""" check_file = 'src/fiber.h' path = os.path.abspath('../') while path != '/': if os.path.isfile('%s/%s' % (path, check_file)): os.putenv('TARANTOOL_SRC_DIR', path) break path = os.path.abspath(os.path.join(path, '../')) def module_init(): """ Called at import """ args = Options().args # Change the current working directory to where all test # collections are supposed to reside # If script executed with (python test-run.py) dirname is '' # so we need to make it . path = os.path.dirname(sys.argv[0]) if not path: path = '.' os.chdir(path) setenv() warn_unix_sockets_at_start(args.vardir) # always run with clean (non-existent) 'var' directory try: shutil.rmtree(args.vardir) except OSError: pass args.builddir = os.path.abspath(os.path.expanduser(args.builddir)) SOURCEDIR = os.path.dirname(os.getcwd()) BUILDDIR = args.builddir os.environ["SOURCEDIR"] = SOURCEDIR os.environ["BUILDDIR"] = BUILDDIR soext = sys.platform == 'darwin' and 'dylib' or 'so' os.environ["LUA_PATH"] = SOURCEDIR+"/?.lua;"+SOURCEDIR+"/?/init.lua;;" os.environ["LUA_CPATH"] = BUILDDIR+"/?."+soext+";;" TarantoolServer.find_exe(args.builddir) UnittestServer.find_exe(args.builddir) # Init ###### module_init() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/inspector.py0000644000000000000000000000624713306562360021130 0ustar rootrootimport os import yaml import traceback import gevent from gevent.lock import Semaphore from gevent.server import StreamServer from lib.utils import find_port from lib.colorer import color_stdout from lib.tarantool_server import TarantoolStartError # Module initialization ####################### def gevent_propagate_exc(): """Don't print backtraces and propagate the exception to the parent greenlet when Ctrl+C or startup fail hit the process when the active greenlet is one of the StreamServer owned. """ ghub = gevent.get_hub() for exc_t in [KeyboardInterrupt, TarantoolStartError]: if exc_t not in ghub.NOT_ERROR: ghub.NOT_ERROR = ghub.NOT_ERROR + (exc_t,) if exc_t not in ghub.SYSTEM_ERROR: ghub.SYSTEM_ERROR = ghub.SYSTEM_ERROR + (exc_t,) gevent_propagate_exc() # TarantoolInspector #################### class TarantoolInspector(StreamServer): """ Tarantool inspector daemon. Usage: inspector = TarantoolInspector('localhost', 8080) inspector.start() # run some tests inspector.stop() """ def __init__(self, host, port): # When specific port range was acquired for current worker, don't allow # OS set port for us that isn't from specified range. if port == 0: port = find_port() super(TarantoolInspector, self).__init__((host, port)) self.parser = None def start(self): super(TarantoolInspector, self).start() os.environ['INSPECTOR'] = str(self.server_port) def stop(self): del os.environ['INSPECTOR'] def set_parser(self, parser): self.parser = parser self.sem = Semaphore() @staticmethod def readline(socket, delimiter='\n', size=4096): result = '' data = True while data: try: data = socket.recv(size) except IOError: # catch instance halt connection refused errors data = '' result += data while result.find(delimiter) != -1: line, result = result.split(delimiter, 1) yield line return def handle(self, socket, addr): if self.parser is None: raise AttributeError('Parser is not defined') self.sem.acquire() for line in self.readline(socket): try: result = self.parser.parse_preprocessor(line) except (KeyboardInterrupt, TarantoolStartError): # propagate to the main greenlet raise except Exception as e: self.parser.kill_current_test() color_stdout('\nTarantoolInpector.handle() received the following error:\n' + traceback.format_exc() + '\n', schema='error') result = { "error": repr(e) } if result == None: result = True result = yaml.dump(result) if not result.endswith('...\n'): result = result + '...\n' socket.sendall(result) self.sem.release() def cleanup_nondefault(self): if self.parser: self.parser.cleanup_nondefault() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/options.py0000644000000000000000000001614113306562360020607 0ustar rootrootimport os import sys import argparse from itertools import product from lib.singleton import Singleton from lib.colorer import color_stdout def env_int(name, default): try: value = os.environ.get(name) return default if value is None else int(value) except ValueError: return default def env_list(name, default): value_str = os.environ.get(name) if value_str is None: return default value_list = value_str.split() return value_list or default class Options: """Handle options of test-runner""" __metaclass__ = Singleton def __init__(self): """Add all program options, with their defaults.""" parser = argparse.ArgumentParser( description = "Tarantool regression test suite front-end.") parser.epilog = "For a complete description, use 'pydoc ./" +\ os.path.basename(sys.argv[0]) + "'" parser.add_argument( "tests", metavar="test", nargs="*", default = env_list('TEST_RUN_TESTS', ['']), help="""Can be empty. List of test names, to look for in suites. Each name is used as a substring to look for in the path to test file, e.g. "show" will run all tests that have "show" in their name in all suites, "box/show" will only enable tests starting with "show" in "box" suite. Default: run all tests in all specified suites.""") parser.add_argument( "--suite", dest = 'suites', metavar = "suite", nargs="*", default = [], help = """List of test suites to look for tests in. Default: "" - means find all available.""") parser.add_argument( "--verbose", dest = 'is_verbose', action = "store_true", default = False, help = """Print TAP13 test output to log. Default: false.""") parser.add_argument( "--force", dest = "is_force", action = "store_true", default = False, help = """Go on with other tests in case of an individual test failure. Default: false.""") parser.add_argument( "--gdb", dest = "gdb", action = "store_true", default = False, help = """Start the server under 'gdb' debugger in detached Screen. This option is mutually exclusive with --valgrind, --gdbserver, --lldb and --strace. Default: false.""") parser.add_argument( "--gdbserver", dest = "gdbserver", action = "store_true", default = False, help = """Start the server under 'gdbserver'. This option is mutually exclusive with --valgrind, --gdb, --lldb and --strace. Default: false.""") parser.add_argument( "--lldb", dest = "lldb", action = "store_true", default = False, help = """Start the server under 'lldb' debugger in detached Screen. This option is mutually exclusive with --valgrind, --gdb, --gdbserver and --strace. Default: false.""") parser.add_argument( "--valgrind", dest = "valgrind", action = "store_true", default = False, help = """Run the server under 'valgrind'. This option is mutually exclusive with --gdb, --gdbserver, --lldb and --strace. Default: false.""") parser.add_argument( "--strace", dest = "strace", action = "store_true", default = False, help = """Run the server under 'strace'. This option is mutually exclusive with --valgrind, --gdb, --gdbserver, --lldb and --strace. Default: false.""") parser.add_argument( "--builddir", dest = "builddir", default = "..", help = """Path to project build directory. Default: ".." """) parser.add_argument( "--tarantool-port", dest = "tarantool_port", default = None, help = """Listen port number to run tests against. Admin port number must be listen + 1""") parser.add_argument( "--vardir", dest = "vardir", default = "var", help = """Path to data directory. Default: var.""") parser.add_argument( "--long", dest="long", default=False, action='store_true', help="""Enable long run tests""") parser.add_argument( "--conf", dest="conf", default=None, help="""Force set test configuration mode""") parser.add_argument( "-j", "--jobs", dest="jobs", const=0, nargs='?', default=env_int('TEST_RUN_JOBS', 0), type=int, help="""Workers count. Default: ${TEST_RUN_JOBS} or 0 (0 means 2 x CPU count). -1 means everything running consistently (single process). """) parser.add_argument( "--reproduce", dest="reproduce", default=None, help="""Run tests in the order given by the file. Such files created by workers in the "var/reproduce" directory. Note: The option works now only with parallel testing.""") parser.add_argument( "--no-output-timeout", dest="no_output_timeout", default=0, type=int, help="""Exit if there was no output from workers during this amount of seconds. Set it to -1 to disable hang detection. Default: 120 [seconds] (but disabled when one of --gdb, --llgb, --valgrind, --long options is passed). Note: The option works now only with parallel testing.""") self.args = parser.parse_args() self.check() def check(self): """Check the arguments for correctness.""" check_error = False conflict_options = ('valgrind', 'gdb', 'lldb', 'strace') for op1, op2 in product(conflict_options, repeat=2): if op1 != op2 and getattr(self, op1, '') and \ getattr(self, op2, ''): format_str = "Error: option --{} is not compatible \ with option --{}" color_stdout(format_str.format(op1, op2), schema='error') check_error = True if check_error: exit(-1) tarantool_1.9.1.26.g63eb81e3c/test-run/lib/tarantool_server.py0000644000000000000000000007121213306562360022505 0ustar rootrootimport errno import gc import glob import os import os.path import random import re import shlex import shutil import signal import subprocess import sys import time import gevent import yaml from gevent import socket try: from cStringIO import StringIO except ImportError: from StringIO import StringIO import inspect # for caller_globals from lib.test import Test from lib.server import Server from lib.preprocessor import TestState from lib.box_connection import BoxConnection from lib.admin_connection import AdminConnection, AdminAsyncConnection from lib.utils import find_port from lib.utils import signame from lib.utils import warn_unix_socket from lib.utils import format_process from greenlet import greenlet, GreenletExit from test import TestRunGreenlet, TestExecutionError from lib.colorer import color_stdout, color_log def save_join(green_obj, timeout=None): """ Gevent join wrapper for test-run stop-on-crash feature :return True in case of crash and False otherwise """ try: green_obj.join(timeout=timeout) except GreenletExit: return True except TarantoolStartError: return True return False class FuncTest(Test): def execute(self, server): server.current_test = self execfile(self.name, dict(locals(), **server.__dict__)) class LuaTest(FuncTest): TIMEOUT = 60 * 10 def exec_loop(self, ts): cmd = None def send_command(command): result = ts.curcon[0](command, silent=True) for conn in ts.curcon[1:]: conn(command, silent=True) # gh-24 fix if result is None: result = '[Lost current connection]\n' return result for line in open(self.name, 'r'): if not line.endswith('\n'): line += '\n' # context switch for inspector after each line if not cmd: cmd = StringIO() if line.find('--') == 0: sys.stdout.write(line) else: if line.strip() or cmd.getvalue(): cmd.write(line) delim_len = -len(ts.delimiter) if len(ts.delimiter) else None if line.endswith(ts.delimiter + '\n') and cmd.getvalue().strip()[:delim_len].strip(): sys.stdout.write(cmd.getvalue()) rescom = cmd.getvalue()[:delim_len].replace('\n\n', '\n') result = send_command(rescom) sys.stdout.write(result.replace("\r\n", "\n")) cmd.close() cmd = None # join inspector handler self.inspector.sem.wait() # stop any servers created by the test, except the default one ts.stop_nondefault() def killall_servers(self, server, ts, crash_occured): """ kill all servers and crash detectors before stream swap """ check_list = ts.servers.values() + [server, ] # check that all servers stopped correctly for server in check_list: crash_occured = crash_occured or server.process.returncode not in (None, 0, -signal.SIGKILL, -signal.SIGTERM) for server in check_list: server.process.poll() if crash_occured: # kill all servers and crash detectors on crash if server.process.returncode is None: server.process.kill() gevent.kill(server.crash_detector) elif server.process.returncode is not None: # join crash detectors of stopped servers save_join(server.crash_detector) def execute(self, server): server.current_test = self cls_name = server.__class__.__name__.lower() if 'gdb' in cls_name or 'lldb' in cls_name or 'strace' in cls_name: # don't propagate gdb/lldb/strace mixin to non-default servers, it doesn't # work properly for now # TODO: strace isn't interactive, so it's easy to make it works for # non-default server create_server = TarantoolServer else: # propagate valgrind mixin to non-default servers create_server = server.__class__ ts = TestState( self.suite_ini, server, create_server, self.run_params ) self.inspector.set_parser(ts) lua = TestRunGreenlet(self.exec_loop, ts) self.current_test_greenlet = lua lua.start() crash_occured = True try: crash_occured = save_join(lua, timeout=self.TIMEOUT) self.killall_servers(server, ts, crash_occured) except KeyboardInterrupt: # prevent tests greenlet from writing to the real stdout lua.kill() ts.stop_nondefault() raise class PythonTest(FuncTest): def execute(self, server): server.current_test = self execfile(self.name, dict(locals(), test_run_current_test=self, **server.__dict__)) # crash was detected (possibly on non-default server) if server.current_test.is_crash_reported: raise TestExecutionError CON_SWITCH = { 'lua': AdminAsyncConnection, 'python': AdminConnection } class TarantoolStartError(OSError): pass class TarantoolLog(object): def __init__(self, path): self.path = path self.log_begin = 0 def positioning(self): if os.path.exists(self.path): with open(self.path, 'r') as f: f.seek(0, os.SEEK_END) self.log_begin = f.tell() return self def seek_once(self, msg): if not os.path.exists(self.path): return -1 with open(self.path, 'r') as f: f.seek(self.log_begin, os.SEEK_SET) while True: log_str = f.readline() if not log_str: return -1 pos = log_str.find(msg) if pos != -1: return pos def seek_wait(self, msg, proc=None): while True: if os.path.exists(self.path): break time.sleep(0.001) with open(self.path, 'r') as f: f.seek(self.log_begin, os.SEEK_SET) cur_pos = self.log_begin while True: if not (proc is None): if not (proc.poll() is None): raise TarantoolStartError log_str = f.readline() if not log_str: time.sleep(0.001) f.seek(cur_pos, os.SEEK_SET) continue if re.findall(msg, log_str): return cur_pos = f.tell() class TarantoolServer(Server): default_tarantool = { "bin": "tarantool", "logfile": "tarantool.log", "pidfile": "tarantool.pid", "name": "default", "ctl": "tarantoolctl", } # ----------------------------------PROPERTIES----------------------------------# @property def debug(self): return self.test_debug() @property def name(self): if not hasattr(self, '_name') or not self._name: return self.default_tarantool["name"] return self._name @name.setter def name(self, val): self._name = val @property def logfile(self): if not hasattr(self, '_logfile') or not self._logfile: return os.path.join(self.vardir, self.default_tarantool["logfile"]) return self._logfile @logfile.setter def logfile(self, val): self._logfile = os.path.join(self.vardir, val) @property def pidfile(self): if not hasattr(self, '_pidfile') or not self._pidfile: return os.path.join(self.vardir, self.default_tarantool["pidfile"]) return self._pidfile @pidfile.setter def pidfile(self, val): self._pidfile = os.path.join(self.vardir, val) @property def builddir(self): if not hasattr(self, '_builddir'): raise ValueError("No build-dir is specified") return self._builddir @builddir.setter def builddir(self, val): if val is None: return self._builddir = os.path.abspath(val) @property def script_dst(self): return os.path.join(self.vardir, os.path.basename(self.script)) @property def logfile_pos(self): if not hasattr(self, '_logfile_pos'): self._logfile_pos = None return self._logfile_pos @logfile_pos.setter def logfile_pos(self, val): self._logfile_pos = TarantoolLog(val).positioning() @property def script(self): if not hasattr(self, '_script'): self._script = None return self._script @script.setter def script(self, val): if val is None: if hasattr(self, '_script'): delattr(self, '_script') return self._script = os.path.abspath(val) self.name = os.path.basename(self._script).split('.')[0] @property def _admin(self): if not hasattr(self, 'admin'): self.admin = None return self.admin @_admin.setter def _admin(self, port): if hasattr(self, 'admin'): del self.admin if not hasattr(self, 'tests_type'): self.tests_type = 'lua' self.admin = CON_SWITCH[self.tests_type]('localhost', port) @property def _iproto(self): if not hasattr(self, 'iproto'): self.iproto = None return self.iproto @_iproto.setter def _iproto(self, port): try: port = int(port) except ValueError as e: raise ValueError("Bad port number: '%s'" % port) if hasattr(self, 'iproto'): del self.iproto self.iproto = BoxConnection('localhost', port) @property def log_des(self): if not hasattr(self, '_log_des'): self._log_des = open(self.logfile, 'a') return self._log_des @log_des.deleter def log_des(self): if not hasattr(self, '_log_des'): return if not self._log_des.closed: self._log_des.closed() delattr(self, _log_des) @property def rpl_master(self): if not hasattr(self, '_rpl_master'): self._rpl_master = None return self._rpl_master @rpl_master.setter def rpl_master(self, val): if not isinstance(self, (TarantoolServer, None)): raise ValueError('Replication master must be Tarantool' ' Server class, his derivation or None') self._rpl_master = val # ------------------------------------------------------------------------------# def __new__(cls, ini=None, *args, **kwargs): cls = Server.get_mixed_class(cls, ini) return object.__new__(cls) def __init__(self, _ini=None, test_suite=None): if _ini is None: _ini = {} ini = { 'core': 'tarantool', 'gdb': False, 'lldb': False, 'script': None, 'lua_libs': [], 'valgrind': False, 'vardir': None, 'use_unix_sockets': False, 'tarantool_port': None, 'strace': False } ini.update(_ini) Server.__init__(self, ini, test_suite) self.testdir = os.path.abspath(os.curdir) self.sourcedir = os.path.abspath(os.path.join(os.path.basename( sys.argv[0]), "..", "..")) self.re_vardir_cleanup += [ "*.snap", "*.xlog", "*.vylog", "*.inprogress", "*.sup", "*.lua", "*.pid", "[0-9]*/"] self.name = "default" self.conf = {} self.status = None # -----InitBasicVars-----# self.core = ini['core'] self.gdb = ini['gdb'] self.lldb = ini['lldb'] self.script = ini['script'] self.lua_libs = ini['lua_libs'] self.valgrind = ini['valgrind'] self.strace = ini['strace'] self.use_unix_sockets = ini['use_unix_sockets'] self._start_against_running = ini['tarantool_port'] self.crash_detector = None # use this option with inspector # to enable crashes in test self.crash_enabled = False # set in from a test let test-run ignore server's crashes self.crash_expected = False # filled in {Test,FuncTest,LuaTest,PythonTest}.execute() # or passed through execfile() for PythonTest self.current_test = None caller_globals = inspect.stack()[1][0].f_globals if 'test_run_current_test' in caller_globals.keys(): self.current_test = caller_globals['test_run_current_test'] def __del__(self): self.stop() @classmethod def version(cls): p = subprocess.Popen([cls.binary, "--version"], stdout=subprocess.PIPE) version = p.stdout.read().rstrip() p.wait() return version @classmethod def find_exe(cls, builddir, silent=True): cls.builddir = os.path.abspath(builddir) builddir = os.path.join(builddir, "src") path = builddir + os.pathsep + os.environ["PATH"] color_log("Looking for server binary in ", schema='serv_text') color_log(path + ' ...\n', schema='path') for _dir in path.split(os.pathsep): exe = os.path.join(_dir, cls.default_tarantool["bin"]) ctl_dir = _dir # check local tarantoolctl source if _dir == builddir: ctl_dir = os.path.join(_dir, '../extra/dist') ctl = os.path.join(ctl_dir, cls.default_tarantool['ctl']) need_lua_path = False if os.path.isdir(ctl) or not os.access(ctl, os.X_OK): ctl_dir = os.path.join(_dir, '../extra/dist') ctl = os.path.join(ctl_dir, cls.default_tarantool['ctl']) need_lua_path = True if os.access(exe, os.X_OK) and os.access(ctl, os.X_OK): cls.binary = os.path.abspath(exe) cls.ctl_path = os.path.abspath(ctl) cls.ctl_plugins = os.path.abspath( os.path.join(ctl_dir, '..') ) os.environ["PATH"] = os.pathsep.join([ os.path.abspath(ctl_dir), os.path.abspath(_dir), os.environ["PATH"] ]) os.environ["TARANTOOLCTL"] = ctl if need_lua_path: os.environ["LUA_PATH"] = ctl_dir + '/?.lua;' + \ ctl_dir + '/?/init.lua;' + \ os.environ.get("LUA_PATH", ";;") return exe raise RuntimeError("Can't find server executable in " + path) @classmethod def print_exe(cls): color_stdout('Installing the server ...\n', schema='serv_text') if cls.binary: color_stdout(' Found executable at ', schema='serv_text') color_stdout(cls.binary + '\n', schema='path') if cls.ctl_path: color_stdout(' Found tarantoolctl at ', schema='serv_text') color_stdout(cls.ctl_path + '\n', schema='path') color_stdout("\n", cls.version(), "\n", schema='version') def install(self, silent=True): if self._start_against_running: self._iproto = self._start_against_running self._admin = int(self._start_against_running) + 1 return color_log('Installing the server ...\n', schema='serv_text') color_log(' Found executable at ', schema='serv_text') color_log(self.binary + '\n', schema='path') color_log(' Found tarantoolctl at ', schema='serv_text') color_log(self.ctl_path + '\n', schema='path') color_log(' Creating and populating working directory in ', schema='serv_text') color_log(self.vardir + ' ...\n', schema='path') if not os.path.exists(self.vardir): os.makedirs(self.vardir) else: color_log(' Found old vardir, deleting ...\n', schema='serv_text') self.kill_old_server() self.cleanup() self.copy_files() if self.use_unix_sockets: self._admin = os.path.join(self.vardir, "socket-admin") else: self._admin = find_port() self._iproto = find_port() # these sockets will be created by tarantool itself path = os.path.join(self.vardir, self.name + '.control') warn_unix_socket(path) def deploy(self, silent=True, **kwargs): self.install(silent) self.start(silent=silent, **kwargs) def copy_files(self): if self.script: shutil.copy(self.script, self.script_dst) os.chmod(self.script_dst, 0777) if self.lua_libs: for i in self.lua_libs: source = os.path.join(self.testdir, i) try: if os.path.isdir(source): shutil.copytree(source, os.path.join(self.vardir, os.path.basename(source))) else: shutil.copy(source, self.vardir) except IOError as e: if (e.errno == errno.ENOENT): continue raise shutil.copy('.tarantoolctl', self.vardir) shutil.copy(os.path.join(self.TEST_RUN_DIR, 'test_run.lua'), self.vardir) def prepare_args(self, args=[]): return [self.ctl_path, 'start', os.path.basename(self.script)] + args def start(self, silent=True, wait=True, wait_load=True, rais=True, args=[], **kwargs): if self._start_against_running: return if self.status == 'started': if not silent: color_stdout('The server is already started.\n', schema='lerror') return args = self.prepare_args(args) self.pidfile = '%s.pid' % self.name self.logfile = '%s.log' % self.name path = self.script_dst if self.script else \ os.path.basename(self.binary) color_log("Starting the server ...\n", schema='serv_text') color_log("Starting ", schema='serv_text') color_log(path + " \n", schema='path') color_log(self.version() + "\n", schema='version') os.putenv("LISTEN", self.iproto.uri) os.putenv("ADMIN", self.admin.uri) if self.rpl_master: os.putenv("MASTER", self.rpl_master.iproto.uri) self.logfile_pos = self.logfile # redirect stdout from tarantoolctl and tarantool os.putenv("TEST_WORKDIR", self.vardir) self.process = subprocess.Popen(args, cwd=self.vardir, stdout=self.log_des, stderr=self.log_des) # gh-19 crash detection self.crash_detector = TestRunGreenlet(self.crash_detect) self.crash_detector.info = "Crash detector: %s" % self.process self.crash_detector.start() wait = wait wait_load = wait_load if wait: try: self.wait_until_started(wait_load) except TarantoolStartError: # Raise exception when caller ask for it (e.g. in case of # non-default servers) if rais: raise # Python tests expect we raise an exception when non-default # server fails if self.crash_expected: raise if not self.current_test or not self.current_test.is_crash_reported: if self.current_test: self.current_test.is_crash_reported = True color_stdout('\n[Instance "{}"] Tarantool server failed to start\n'.format( self.name), schema='error') self.print_log(15) # if the server fails before any test started, we should inform # a caller by the exception if not self.current_test: raise self.kill_current_test() port = self.admin.port self.admin.disconnect() self.admin = CON_SWITCH[self.tests_type]('localhost', port) self.status = 'started' def crash_detect(self): if self.crash_expected: return while self.process.returncode is None: self.process.poll() if self.process.returncode is None: gevent.sleep(0.1) if self.process.returncode in [0, -signal.SIGKILL, -signal.SIGTERM]: return self.kill_current_test() if not os.path.exists(self.logfile): return if not self.current_test.is_crash_reported: self.current_test.is_crash_reported = True self.crash_grep() def crash_grep(self): print_log_lines = 15 assert_fail_re = re.compile(r'^.*: Assertion .* failed\.$') # find and save backtrace or assertion fail assert_lines = list() bt = list() with open(self.logfile, 'r') as log: lines = log.readlines() for rpos, line in enumerate(reversed(lines)): if line.startswith('Segmentation fault'): bt = lines[-rpos - 1:] break if assert_fail_re.match(line): pos = len(lines) - rpos assert_lines = lines[max(0, pos - print_log_lines):pos] break else: bt = list() # print insident meat if self.process.returncode < 0: color_stdout('\n\n[Instance "%s" killed by signal: %d (%s)]\n' % ( self.name, -self.process.returncode, signame(-self.process.returncode)), schema='error') else: color_stdout('\n\n[Instance "%s" returns with non-zero exit code: %d]\n' % ( self.name, self.process.returncode), schema='error') # print assert line if any and return if assert_lines: color_stdout('Found assertion fail in the results file [%s]:\n' % self.logfile, schema='error') sys.stderr.flush() for line in assert_lines: sys.stderr.write(line) sys.stderr.flush() return # print backtrace if any sys.stderr.flush() for trace in bt: sys.stderr.write(trace) # print log otherwise (if backtrace was not found) if not bt: self.print_log(print_log_lines) sys.stderr.flush() def kill_current_test(self): """ Unblock save_join() call inside LuaTest.execute(), which doing necessary servers/greenlets clean up. """ # current_test_greenlet is None for PythonTest if self.current_test.current_test_greenlet: gevent.kill(self.current_test.current_test_greenlet) def wait_stop(self): self.process.wait() def cleanup(self, full=False): try: shutil.rmtree(os.path.join(self.vardir, self.name)) except OSError: pass def stop(self, silent=True): if self._start_against_running: return if self.status != 'started': if not silent: raise Exception('Server is not started') return if not silent: color_stdout('Stopping the server ...\n', schema='serv_text') # kill only if process is alive if self.process is not None and self.process.returncode is None: color_log('TarantoolServer.stop(): stopping the %s\n' % format_process(self.process.pid), schema='test_var') try: self.process.terminate() except OSError: pass if self.crash_detector is not None: save_join(self.crash_detector) self.wait_stop() self.status = None if re.search(r'^/', str(self._admin.port)): if os.path.exists(self._admin.port): os.unlink(self._admin.port) def restart(self): self.stop() self.start() def kill_old_server(self, silent=True): pid = self.read_pidfile() if pid == -1: return False if not silent: color_stdout(' Found old server, pid {0}, killing ...'.format(pid), schema='info') try: os.kill(pid, signal.SIGTERM) except OSError: pass self.wait_until_stopped(pid) return True def wait_until_started(self, wait_load=True): """ Wait until server is started. Server consists of two parts: 1) wait until server is listening on sockets 2) wait until server tells us his status """ if wait_load: msg = 'entering the event loop|will retry binding|hot standby mode' self.logfile_pos.seek_wait( msg, self.process if not self.gdb and not self.lldb else None) while True: try: temp = AdminConnection('localhost', self.admin.port) if not wait_load: ans = yaml.load(temp.execute("2 + 2")) return True ans = yaml.load(temp.execute('box.info.status'))[0] if ans in ('running', 'hot_standby', 'orphan'): return True elif ans in ('loading'): continue else: raise Exception("Strange output for `box.info.status`: %s" % (ans)) except socket.error as e: if e.errno == errno.ECONNREFUSED: time.sleep(0.1) continue raise def wait_until_stopped(self, pid): while True: try: time.sleep(0.01) os.kill(pid, 0) continue except OSError as err: break def read_pidfile(self): pid = -1 if os.path.exists(self.pidfile): try: with open(self.pidfile) as f: pid = int(f.read()) except: pass return pid def test_option_get(self, option_list_str, silent=False): args = [self.binary] + shlex.split(option_list_str) if not silent: print " ".join([os.path.basename(self.binary)] + args[1:]) output = subprocess.Popen(args, cwd=self.vardir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read() return output def test_option(self, option_list_str): print self.test_option_get(option_list_str) def test_debug(self): if re.findall(r"-Debug", self.test_option_get("-V", True), re.I): return True return False @staticmethod def find_tests(test_suite, suite_path): test_suite.ini['suite'] = suite_path get_tests = lambda x: sorted(glob.glob(os.path.join(suite_path, x))) tests = [PythonTest(k, test_suite.args, test_suite.ini) for k in get_tests("*.test.py") ] for k in get_tests("*.test.lua"): runs = test_suite.get_multirun_params(k) is_correct = lambda x: test_suite.args.conf is None or \ test_suite.args.conf == x if runs: tests.extend([LuaTest( k, test_suite.args, test_suite.ini, runs[r], r ) for r in runs.keys() if is_correct(r)]) else: tests.append(LuaTest(k, test_suite.args, test_suite.ini)) test_suite.tests = [] # don't sort, command line arguments must be run in # the specified order for name in test_suite.args.tests: for test in tests: if test.name.find(name) != -1: test_suite.tests.append(test) def get_param(self, param=None): if not param is None: return yaml.load(self.admin("box.info." + param, silent=True))[0] return yaml.load(self.admin("box.info", silent=True)) def get_lsn(self, node_id): nodes = self.get_param("vclock") if type(nodes) == dict and node_id in nodes: return int(nodes[node_id]) elif type(nodes) == list and node_id <= len(nodes): return int(nodes[node_id - 1]) else: return -1 def wait_lsn(self, node_id, lsn): while (self.get_lsn(node_id) < lsn): # print("wait_lsn", node_id, lsn, self.get_param("vclock")) time.sleep(0.01) def get_log(self): return TarantoolLog(self.logfile).positioning() tarantool_1.9.1.26.g63eb81e3c/test-run/lib/server_mixins.py0000644000000000000000000002126613306562360022015 0ustar rootrootimport os import glob import shlex from lib.utils import find_in_path from lib.utils import print_tail_n from lib.utils import non_empty_valgrind_logs from lib.colorer import color_stdout, color_log from six.moves import shlex_quote def shlex_join(strings): return ' '.join(shlex_quote(s) for s in strings) class Mixin(object): pass class ValgrindMixin(Mixin): default_valgr = { "suppress_path": "share/", "suppress_name": "tarantool.sup" } def format_valgrind_log_path(self, suite_name, test_name, conf, server_name, num): basename = '{}.{}.{}.{}.{}.valgrind.log'.format( suite_name, test_name, conf, server_name, str(num)) return os.path.join(self.vardir, basename) @property def valgrind_log(self): # suite.{test/default}.{conf/none}.instance.num.valgrind.log # Why 'TarantoolServer' is special case here? Consider: # * TarantoolServer runs once, then execute tests in the one process # (we run the server itself under valgrind). # * AppServer / UnittestServer just create separate processes for each # tests (we run these processes under valgrind). if 'TarantoolServer' in self.__class__.__name__ and self.test_suite: suite_name = os.path.basename(self.test_suite.suite_path) path = self.format_valgrind_log_path( suite_name, 'default', 'none', self.name, 1) else: suite_name = os.path.basename(self.current_test.suite_ini['suite']) test_name = os.path.basename(self.current_test.name) conf_name = self.current_test.conf_name or 'none' num = 1 while True: path = self.format_valgrind_log_path( suite_name, test_name, conf_name, self.name, num) if not os.path.isfile(path): break num += 1 return path def current_valgrind_logs(self, for_suite=False, for_test=False): if not self.test_suite or not self.current_test: raise ValueError( "The method should be called on a default suite's server.") if for_suite == for_test: raise ValueError('Set for_suite OR for_test to True') suite_name = os.path.basename(self.test_suite.suite_path) if for_test: test_name = os.path.basename(self.current_test.name) default_tmpl = self.format_valgrind_log_path(suite_name, 'default', '*', '*', '*') non_default_tmpl = self.format_valgrind_log_path(suite_name, test_name, '*', '*', '*') return sorted(glob.glob(default_tmpl) + glob.glob(non_default_tmpl)) else: suite_tmpl = self.format_valgrind_log_path(suite_name, '*', '*', '*', '*') return sorted(glob.glob(suite_tmpl)) @property def valgrind_sup(self): if not hasattr(self, '_valgrind_sup') or not self._valgrind_sup: return os.path.join(self.testdir, self.default_valgr['suppress_path'], self.default_valgr['suppress_name']) return self._valgrind_sup @valgrind_sup.setter def valgrind_sup(self, val): self._valgrind_sup = os.path.abspath(val) @property def valgrind_sup_output(self): return os.path.join(self.vardir, self.default_valgr['suppress_name']) @property def valgrind_cmd_args(self): return shlex.split("valgrind --log-file={log} --suppressions={sup} \ --gen-suppressions=all --trace-children=yes --leak-check=full \ --read-var-info=yes --quiet".format( log=self.valgrind_log, sup=self.valgrind_sup)) def prepare_args(self, args=[]): if not find_in_path('valgrind'): raise OSError('`valgrind` executables not found in PATH') orig_args = super(ValgrindMixin, self).prepare_args(args) res_args = self.valgrind_cmd_args + orig_args color_log('\nRUN: ' + shlex_join(res_args) + '\n', schema='test_var') return res_args def wait_stop(self): return self.process.wait() def crash_grep(self): if self.process.returncode < 0 or \ not non_empty_valgrind_logs([self.valgrind_log]): super(ValgrindMixin, self).crash_grep() return lines_cnt = 50 color_stdout('\n\nValgrind for [Instance "%s"] returns non-zero exit code: %d\n' % ( self.name, self.process.returncode), schema='error') color_stdout("It's known that it can be valgrind's \"the 'impossible' happened\" error\n", schema='error') color_stdout('Last %d lines of valgring log file [%s]:\n' % ( lines_cnt, self.valgrind_log), schema='error') print_tail_n(self.valgrind_log, 50) class StraceMixin(Mixin): @property def strace_log(self): # TODO: don't overwrite log, like in the 'valgrind_log' property above return os.path.join(self.vardir, 'strace.log') def prepare_args(self, args=[]): if not find_in_path('strace'): raise OSError('`strace` executables not found in PATH') orig_args = super(StraceMixin, self).prepare_args(args) res_args = shlex.split("strace -o {log} -f -tt -T -x -I1 {bin}".format( bin=' '.join(orig_args), log=self.strace_log )) color_log('\nRUN: ' + shlex_join(res_args) + '\n', schema='test_var') return res_args def wait_stop(self): self.kill_old_server() return self.process.wait() class DebugMixin(Mixin): debugger_args = { "screen_name": None, "debugger": None, "sh_string": None } def prepare_args(self, args=[]): screen_name = self.debugger_args['screen_name'] debugger = self.debugger_args['debugger'] gdbserver_port = self.debugger_args['gdbserver_port'] gdbserver_opts = self.debugger_args['gdbserver_opts'] sh_string = self.debugger_args['sh_string'] is_under_gdbserver = 'GdbServer' in self.__class__.__name__ if not is_under_gdbserver and not find_in_path('screen'): raise OSError('`screen` executables not found in PATH') if not find_in_path(debugger): raise OSError('`%s` executables not found in PATH' % debugger) is_tarantoolserver = 'TarantoolServer' in self.__class__.__name__ if is_tarantoolserver or is_under_gdbserver: color_stdout('\nYou started the server in %s mode.\n' % debugger, schema='info') if is_under_gdbserver: color_stdout("To attach, use `gdb -ex 'target remote :%s'`\n" % gdbserver_port, schema='info') else: color_stdout('To attach, use `screen -r %s`\n' % screen_name, schema='info') # detach only for TarantoolServer screen_opts = '-d' if is_tarantoolserver else '' orig_args = super(DebugMixin, self).prepare_args(args) res_args = shlex.split(sh_string.format( screen_name=screen_name, screen_opts=screen_opts, binary=self.binary, args=' '.join(orig_args), logfile=self.logfile, debugger=debugger, gdbserver_port=gdbserver_port, gdbserver_opts=gdbserver_opts)) color_log('\nRUN: ' + shlex_join(res_args) + '\n', schema='test_var') return res_args def wait_stop(self): self.kill_old_server() self.process.wait() class GdbMixin(DebugMixin): debugger_args = { "screen_name": "tarantool", "debugger": "gdb", "gdbserver_port": None, "gdbserver_opts": None, "sh_string": """screen {screen_opts} -mS {screen_name} {debugger} {binary} -ex 'b main' -ex 'run {args} >> {logfile} 2>> {logfile}' """ } # this would be good for running unit tests: # https://cygwin.com/ml/gdb-patches/2015-03/msg01051.html class GdbServerMixin(DebugMixin): debugger_args = { "screen_name": None, "debugger": "gdbserver", "gdbserver_port": "8888", "gdbserver_opts": "", "sh_string": """gdbserver :{gdbserver_port} {binary} {args} -- {gdbserver_opts} """ } class LLdbMixin(DebugMixin): debugger_args = { "screen_name": "tarantool", "debugger": "lldb", "gdbserver_port": None, "gdbserver_opts": None, "sh_string": """screen {screen_opts} -mS {screen_name} {debugger} -f {binary} -o 'b main' -o 'settings set target.run-args {args}' -o 'process launch -o {logfile} -e {logfile}' """ } tarantool_1.9.1.26.g63eb81e3c/test-run/lib/test_suite.py0000644000000000000000000001675413306562360021316 0ustar rootrootimport ConfigParser import json import os import lib from lib.colorer import color_stdout from lib.inspector import TarantoolInspector from lib.server import Server from lib.tarantool_server import TarantoolServer from lib.app_server import AppServer from lib.unittest_server import UnittestServer from lib.utils import non_empty_valgrind_logs, print_tail_n try: from cStringIO import StringIO except ImportError: from StringIO import StringIO class ConfigurationError(RuntimeError): def __init__(self, name, value, expected): self.name = name self.value = value self.expected = expected def __str__(self): return "Bad value for %s: expected %s, got %s" % ( repr(self.name), self.expected, repr(self.value) ) class TestSuite: """Each test suite contains a number of related tests files, located in the same directory on disk. Each test file has extention .test and contains a listing of server commands, followed by their output. The commands are executed, and obtained results are compared with pre-recorded output. In case of a comparision difference, an exception is raised. A test suite must also contain suite.ini, which describes how to start the server for this suite, the client program to execute individual tests and other suite properties. The server is started once per suite.""" def get_multirun_conf(self, suite_path): conf_name = self.ini.get('config', None) if conf_name is None: return None path = os.path.join(suite_path, conf_name) result = None with open(path) as cfg: try: result = json.load(cfg) except ValueError: raise RuntimeError('Ivalid multirun json') return result def get_multirun_params(self, test_path): test = test_path.split('/')[-1] if self.multi_run is None: return result = self.multi_run.get(test, None) if result is not None: return result result = self.multi_run.get('*', None) return result def __init__(self, suite_path, args): """Initialize a test suite: check that it exists and contains a syntactically correct configuration file. Then create a test instance for each found test.""" self.args = args self.tests = [] self.ini = {} self.suite_path = suite_path self.ini["core"] = "tarantool" if os.access(suite_path, os.F_OK) == False: raise RuntimeError("Suite %s doesn't exist" % repr(suite_path)) # read the suite config config = ConfigParser.ConfigParser() config.read(os.path.join(suite_path, "suite.ini")) self.ini.update(dict(config.items("default"))) self.ini.update(self.args.__dict__) self.multi_run = self.get_multirun_conf(suite_path) # list of long running tests if 'long_run' not in self.ini: self.ini['long_run'] = [] for i in ["script"]: self.ini[i] = os.path.join(suite_path, self.ini[i]) if i in self.ini else None for i in ["disabled", "valgrind_disabled", "release_disabled"]: self.ini[i] = dict.fromkeys(self.ini[i].split()) if i in self.ini else dict() for i in ["lua_libs"]: self.ini[i] = map(lambda x: os.path.join(suite_path, x), dict.fromkeys(self.ini[i].split()) if i in self.ini else dict()) def find_tests(self): if self.ini['core'] == 'tarantool': TarantoolServer.find_tests(self, self.suite_path) elif self.ini['core'] == 'app': AppServer.find_tests(self, self.suite_path) elif self.ini['core'] == 'unittest': UnittestServer.find_tests(self, self.suite_path) elif self.ini['core'] == 'stress': # parallel tests are not supported and disabled for now return [] else: raise ValueError('Cannot collect tests of unknown type') if not lib.Options().args.reproduce: color_stdout("Collecting tests in ", schema='ts_text') color_stdout( '%s (Found %s tests)' % ( repr(self.suite_path).ljust(16), str(len(self.tests)).ljust(3) ), schema='path' ) color_stdout(": ", self.ini["description"], ".\n", schema='ts_text') return self.tests def gen_server(self): try: return Server(self.ini, test_suite=self) except Exception as e: print e raise RuntimeError("Unknown server: core = {0}".format( self.ini["core"])) def is_test_enabled(self, test, conf, server): test_name = os.path.basename(test.name) tconf = '%s:%s' % (test_name, conf) checks = [ (True, self.ini["disabled"]), (not server.debug, self.ini["release_disabled"]), (self.args.valgrind, self.ini["valgrind_disabled"]), (not self.args.long, self.ini["long_run"]) ] for check in checks: check_enabled, disabled_tests = check if check_enabled and (test_name in disabled_tests or tconf in disabled_tests): return False return True def start_server(self, server): # create inspector daemon for cluster tests inspector = TarantoolInspector( 'localhost', server.inspector_port ) inspector.start() # fixme: remove this string if we fix all legacy tests suite_name = os.path.basename(self.suite_path) server.tests_type = 'python' if suite_name.endswith('-py') else 'lua' server.deploy(silent=False) return inspector def stop_server(self, server, inspector, silent=False, cleanup=True): server.stop(silent=silent) # don't delete core files or state of the data dir # in case of exception, which is raised when the # server crashes if inspector: inspector.stop() if cleanup: inspector.cleanup_nondefault() server.cleanup() def run_test(self, test, server, inspector): """ Returns short status of the test as a string: 'skip', 'pass', 'new', 'fail', or 'disabled'. """ test.inspector = inspector color_stdout( os.path.join( self.ini['suite'], os.path.basename(test.name) ).ljust(48), schema='t_name' ) # for better diagnostics in case of a long-running test conf = '' if test.run_params: conf = test.conf_name color_stdout(conf.ljust(16), schema='test_var') test_name = os.path.basename(test.name) if self.is_test_enabled(test, conf, server): short_status = test.run(server) else: color_stdout("[ disabled ]\n", schema='t_name') short_status = 'disabled' # cleanup only if test passed or if --force mode enabled if lib.Options().args.is_force or short_status == 'pass': inspector.cleanup_nondefault() return short_status def is_parallel(self): val = self.ini.get('is_parallel', 'False').lower() if val == 'true': val = True elif val == 'false': val = False else: raise ConfigurationError() pass return val tarantool_1.9.1.26.g63eb81e3c/test-run/lib/app_server.py0000644000000000000000000001244513306562360021265 0ustar rootrootimport os import sys import glob import errno import shutil from gevent.subprocess import Popen, PIPE from lib.server import Server from lib.tarantool_server import Test, TarantoolServer from lib.preprocessor import TestState from lib.utils import find_port from test import TestRunGreenlet, TestExecutionError from lib.colorer import color_log def run_server(execs, cwd, server, logfile, retval): server.process = Popen(execs, stdout=PIPE, stderr=PIPE, cwd=cwd) stdout, stderr = server.process.communicate() sys.stdout.write(stdout) with open(logfile, 'a') as f: f.write(stderr) retval['returncode'] = server.process.wait() server.process = None class AppTest(Test): def execute(self, server): server.current_test = self ts = TestState(self.suite_ini, None, TarantoolServer, self.run_params, default_server_no_connect=server) self.inspector.set_parser(ts) execs = server.prepare_args() retval = dict() tarantool = TestRunGreenlet(run_server, execs, server.vardir, server, server.logfile, retval) self.current_test_greenlet = tarantool tarantool.start() tarantool.join() if retval['returncode'] != 0: raise TestExecutionError class AppServer(Server): """A dummy server implementation for application server tests""" def __new__(cls, ini=None, *args, **kwargs): cls = Server.get_mixed_class(cls, ini) return object.__new__(cls) def __init__(self, _ini=None, test_suite=None): if _ini is None: _ini = {} ini = { 'vardir': None }; ini.update(_ini) Server.__init__(self, ini, test_suite) self.testdir = os.path.abspath(os.curdir) self.vardir = ini['vardir'] self.re_vardir_cleanup += [ "*.snap", "*.xlog", "*.vylog", "*.inprogress", "*.sup", "*.lua", "*.pid" ] self.cleanup() self.builddir = ini['builddir'] self.debug = False self.lua_libs = ini['lua_libs'] self.name = 'app_server' self.process = None self.binary = TarantoolServer.binary @property def logfile(self): # remove suite name using basename test_name = os.path.basename(self.current_test.name) # add :conf_name if any if self.current_test.conf_name is not None: test_name += ':' + self.current_test.conf_name # add '.tarantool.log' file_name = test_name + '.tarantool.log' # put into vardir return os.path.join(self.vardir, file_name) def prepare_args(self, args=[]): return [os.path.join(os.getcwd(), self.current_test.name)] + args def deploy(self, vardir=None, silent=True, need_init=True): self.vardir = vardir if not os.access(self.vardir, os.F_OK): os.makedirs(self.vardir) if self.lua_libs: for i in self.lua_libs: source = os.path.join(self.testdir, i) try: if os.path.isdir(source): shutil.copytree(source, os.path.join(self.vardir, os.path.basename(source))) else: shutil.copy(source, self.vardir) except IOError as e: if (e.errno == errno.ENOENT): continue raise os.putenv("LISTEN", str(find_port())) shutil.copy(os.path.join(self.TEST_RUN_DIR, 'test_run.lua'), self.vardir) # Note: we don't know the instance name of the tarantool server, so # cannot check length of path of *.control unix socket created by it. # So for 'app' tests type we don't check *.control unix sockets paths. def stop(self, silent): if not self.process: return color_log('AppServer.stop(): stopping the %s\n' % format_process(self.process.pid), schema='test_var') try: self.process.terminate() except OSError: pass @classmethod def find_exe(cls, builddir): cls.builddir = builddir @staticmethod def find_tests(test_suite, suite_path): def patterned(test_name, patterns): answer = [] for i in patterns: if test_name.find(i) != -1: answer.append(test_name) return answer test_suite.ini['suite'] = suite_path test_names = sorted(glob.glob(os.path.join(suite_path, "*.test.lua"))) test_names = sum(map((lambda x: patterned(x, test_suite.args.tests)), test_names), []) tests = [] for k in test_names: runs = test_suite.get_multirun_params(k) is_correct = lambda x: test_suite.args.conf is None or \ test_suite.args.conf == x if runs: tests.extend([AppTest( k, test_suite.args, test_suite.ini, runs[r], r ) for r in runs.keys() if is_correct(r)]) else: tests.append(AppTest(k, test_suite.args, test_suite.ini)) test_suite.tests = tests tarantool_1.9.1.26.g63eb81e3c/test-run/requirements.txt0000644000000000000000000000011213306562360021247 0ustar rootrootPyYAML==3.10 argparse==1.1 msgpack-python==0.4.6 gevent==1.1b5 six>=1.8.0 tarantool_1.9.1.26.g63eb81e3c/test-run/dispatcher.py0000644000000000000000000003416313306562360020500 0ustar rootrootimport os import signal import time import select import random import functools import yaml import multiprocessing from multiprocessing.queues import SimpleQueue import listeners import lib from lib.worker import WorkerTaskResult, WorkerDone from lib.colorer import color_stdout class TcpPortDispatcher: """ Helper class holds available and occupied TCP port ranges. This ranges intended to distributes between workers. """ def __init__(self): self.range_size = 1000 self.ranges_cnt = 60 self.lowest_port = 3000 self.available_ranges = set() for i in range(self.ranges_cnt): start_port = self.lowest_port + i * self.range_size end_port = start_port + self.range_size - 1 tcp_port_range = (start_port, end_port) self.available_ranges.add(tcp_port_range) self.acquired_ranges = dict() def acquire_range(self, _id): tcp_port_range = self.available_ranges.pop() self.acquired_ranges[_id] = tcp_port_range return tcp_port_range def release_range(self, _id): tcp_port_range = self.acquired_ranges.pop(_id) self.available_ranges.add(tcp_port_range) class Dispatcher: """Run specified count of worker processes ('max_workers_cnt' arg), pass task IDs (via 'task_queue'), receive results and output (via 'result_queue') and pass it to listeners. Workers as well as tasks have types and certain task can be run only on worker of that type. To being abstract we get 'task_groups' argument contains worker generators (the callable working as factory of workers) and IDs of task that can be executed on such workers. The structure of this argument is the following: ``` task_groups = { 'some_key_1': { 'gen_worker': gen_worker_1, 'task_ids': task_ids_1, } ... } ``` Usage (simplified and w/o exception catching): ``` task_groups = ... dispatcher = Dispatcher(task_groups, max_workers_count=8, randomize=True) dispatcher.start() dispatcher.wait() dispatcher.statistics.print_statistics() dispatcher.wait_processes() ``` """ def __init__(self, task_groups, max_workers_cnt, randomize): self.pids = [] self.processes = [] self.result_queues = [] self.task_queues = [] self.workers_cnt = 0 self.worker_next_id = 1 tasks_cnt = 0 self.task_queue_disps = dict() for key, task_group in task_groups.items(): tasks_cnt += len(task_group['task_ids']) task_queue_disp = TaskQueueDispatcher(key, task_group, randomize) self.task_queue_disps[key] = task_queue_disp self.result_queues.append(task_queue_disp.result_queue) self.task_queues.append(task_queue_disp.task_queue) self.report_timeout = 1.0 self.statistics = None self.fail_watcher = None self.listeners = None self.init_listeners() self.max_workers_cnt = min(max_workers_cnt, tasks_cnt) self.pid_to_worker_id = dict() self.worker_id_to_pid = dict() self.randomize = randomize self.tcp_port_dispatcher = TcpPortDispatcher() def terminate_all_workers(self): for process in self.processes: if process.is_alive(): process.terminate() def kill_all_workers(self): for pid in self.pids: try: os.kill(pid, signal.SIGKILL) except OSError: pass def init_listeners(self): args = lib.Options().args watch_hang = args.no_output_timeout >= 0 and \ not args.gdb and \ not args.gdbserver and \ not args.lldb and \ not args.valgrind and \ not args.long watch_fail = not lib.Options().args.is_force log_output_watcher = listeners.LogOutputWatcher() self.statistics = listeners.StatisticsWatcher( log_output_watcher.get_logfile) output_watcher = listeners.OutputWatcher() self.listeners = [self.statistics, log_output_watcher, output_watcher] if watch_fail: self.fail_watcher = listeners.FailWatcher( self.terminate_all_workers) self.listeners.append(self.fail_watcher) if watch_hang: warn_timeout = 10.0 no_output_timeout = float(args.no_output_timeout or 120) hang_watcher = listeners.HangWatcher( output_watcher.not_done_worker_ids, self.kill_all_workers, warn_timeout, no_output_timeout) self.listeners.append(hang_watcher) def run_max_workers(self): ok = True new_workers_cnt = self.max_workers_cnt - self.workers_cnt while ok and new_workers_cnt > 0: ok = self.add_worker() new_workers_cnt = self.max_workers_cnt - self.workers_cnt def start(self): self.run_max_workers() def find_nonempty_task_queue_disp(self): """Find TaskQueueDispatcher that doesn't reported it's 'done' (don't want more workers created for working on its task queue). """ task_queue_disps_rnd = list( self.task_queue_disps.values()) if self.randomize: random.shuffle(task_queue_disps_rnd) # run all parallel groups first for task_queue_disp in task_queue_disps_rnd: if not task_queue_disp.is_parallel: continue if task_queue_disp.done: continue return task_queue_disp # then run all rest groups in a sequence self.max_workers_cnt = 1 for task_queue_disp in task_queue_disps_rnd: if len(task_queue_disp.worker_ids) > 0: continue if task_queue_disp.done: continue return task_queue_disp return None def get_task_queue_disp(self, worker_id): """Get TaskQueueDispatcher instance which contains certain worker by worker_id. """ for task_queue_disp in self.task_queue_disps.values(): if worker_id in task_queue_disp.worker_ids: return task_queue_disp return None def add_worker(self): # don't add new workers if fail occured and --force not passed if self.fail_watcher and self.fail_watcher.got_fail: return False task_queue_disp = self.find_nonempty_task_queue_disp() if not task_queue_disp: return False tcp_port_range = self.tcp_port_dispatcher.acquire_range( self.worker_next_id) process = task_queue_disp.add_worker(self.worker_next_id, tcp_port_range) self.processes.append(process) self.pids.append(process.pid) self.pid_to_worker_id[process.pid] = self.worker_next_id self.worker_id_to_pid[self.worker_next_id] = process.pid self.workers_cnt += 1 self.worker_next_id += 1 return True def del_worker(self, worker_id): pid = self.worker_id_to_pid[worker_id] task_queue_disp = self.get_task_queue_disp(worker_id) task_queue_disp.del_worker(worker_id) self.workers_cnt -= 1 self.tcp_port_dispatcher.release_range(worker_id) self.pids.remove(pid) del self.worker_id_to_pid[worker_id] del self.pid_to_worker_id[pid] for process in self.processes: if process.pid == pid: self.processes.remove(process) break def mark_task_done(self, worker_id, task_id): task_queue_disp = self.get_task_queue_disp(worker_id) task_queue_disp.mark_task_done(task_id) def undone_tasks(self): res = [] for task_queue_disp in self.task_queue_disps.values(): res.extend(task_queue_disp.undone_tasks()) return res def report_undone(self, verbose): undone = self.undone_tasks() if not bool(undone): return False if verbose: color_stdout( 'The following tasks were dispatched on some worker task ' 'queue, but were not reported as done (does not matters ' 'success or fail):\n', schema='test_var') for task_id in undone: color_stdout('- %s' % yaml.safe_dump(task_id)) else: color_stdout("Count of didn't processed tasks: %d\n" % len(undone), schema='test_var') return True def wait(self): """Wait all workers reported its done via result_queues. But in the case when some worker process terminated prematurely 'invoke_listeners' can add fake WorkerDone markers (see also 'check_for_dead_processes'). """ while self.workers_cnt > 0: try: inputs = [q._reader for q in self.result_queues] ready_inputs, _, _ = select.select( inputs, [], [], self.report_timeout) except KeyboardInterrupt: self.flush_ready(inputs) raise objs = self.invoke_listeners(inputs, ready_inputs) for obj in objs: if isinstance(obj, WorkerTaskResult): self.mark_task_done(obj.worker_id, obj.task_id) elif isinstance(obj, WorkerDone): self.del_worker(obj.worker_id) if not objs: self.check_for_dead_processes() self.run_max_workers() def invoke_listeners(self, inputs, ready_inputs): """Returns received objects from result queue to allow Dispatcher update its structures. """ # process timeout if not ready_inputs: for listener in self.listeners: listener.process_timeout(self.report_timeout) return [] # collect received objects objs = [] for ready_input in ready_inputs: result_queue = self.result_queues[inputs.index(ready_input)] while not result_queue.empty(): objs.append(result_queue.get()) # process received objects for obj in objs: for listener in self.listeners: listener.process_result(obj) return objs def flush_ready(self, inputs): """Write output from workers to stdout.""" # leave only output listeners in self.listeners save_listeners = self.listeners new_listeners = [] for listener in self.listeners: if isinstance(listener, (listeners.LogOutputWatcher, listeners.OutputWatcher)): listener.report_at_timeout = False new_listeners.append(listener) self.listeners = new_listeners # wait some time until processes in our group get its SIGINTs and give # us some last output time.sleep(0.1) # collect and process ready inputs ready_inputs, _, _ = select.select(inputs, [], [], 0) self.invoke_listeners(inputs, ready_inputs) def check_for_dead_processes(self): for pid in self.pids: exited = False try: os.waitpid(pid, os.WNOHANG) except OSError: exited = True if exited: worker_id = self.pid_to_worker_id[pid] color_stdout( "[Main process] Worker %d don't reported work " "done using results queue, but the corresponding " "process seems dead. Removing it from Dispatcher.\n" % worker_id, schema='test_var') self.del_worker(worker_id) def wait_processes(self): for process in self.processes: process.join() self.processes = [] class TaskQueueDispatcher: """Incapsulate data structures necessary for dispatching workers working on the one task queue. """ def __init__(self, key, task_group, randomize): self.key = key self.gen_worker = task_group['gen_worker'] self.task_ids = task_group['task_ids'] self.is_parallel = task_group['is_parallel'] if self.is_parallel: self.randomize = randomize if self.randomize: random.shuffle(self.task_ids) else: self.randomize = False self.result_queue = SimpleQueue() self.task_queue = SimpleQueue() for task_id in self.task_ids: self.task_queue.put(task_id) self.worker_ids = set() self.done = False self.done_task_ids = set() def _run_worker(self, worker_id, tcp_port_range): """Entry function for worker processes.""" os.environ['TEST_RUN_WORKER_ID'] = str(worker_id) os.environ['TEST_RUN_TCP_PORT_START'] = str(tcp_port_range[0]) os.environ['TEST_RUN_TCP_PORT_END'] = str(tcp_port_range[1]) color_stdout.queue = self.result_queue worker = self.gen_worker(worker_id) worker.run_all(self.task_queue, self.result_queue) def add_worker(self, worker_id, tcp_port_range): # Note: each of our workers should consume only one None, but for the # case of abnormal circumstances we listen for processes termination # (method 'check_for_dead_processes') and for time w/o output from # workers (class 'HangWatcher'). self.task_queue.put(None) # 'stop worker' marker entry = functools.partial(self._run_worker, worker_id, tcp_port_range) self.worker_ids.add(worker_id) process = multiprocessing.Process(target=entry) process.start() return process def del_worker(self, worker_id): self.worker_ids.remove(worker_id) # mark task queue as done when the first worker done to prevent cycling # with add-del workers self.done = True def mark_task_done(self, task_id): self.done_task_ids.add(task_id) def undone_tasks(self): # keeps an original order res = [] for task_id in self.task_ids: if task_id not in self.done_task_ids: res.append(task_id) return res tarantool_1.9.1.26.g63eb81e3c/test-run/listeners.py0000644000000000000000000001471413306562360020362 0ustar rootrootimport os import re import sys import yaml import lib from lib.worker import get_reproduce_file from lib.worker import WorkerOutput, WorkerDone, WorkerTaskResult from lib.colorer import color_stdout class BaseWatcher(object): """Base class for all listeners intended to be called when some message arrive to a result queue from some worker. """ def process_result(self, obj): raise ValueError('override me') def process_timeout(self, delta_seconds): """Called after delta_seconds time of inactivity.""" # optionally override pass class StatisticsWatcher(BaseWatcher): def __init__(self, get_logfile): self.stats = dict() self.failed_tasks = [] self.get_logfile = get_logfile def process_result(self, obj): if not isinstance(obj, WorkerTaskResult): return if obj.short_status not in self.stats: self.stats[obj.short_status] = 0 self.stats[obj.short_status] += 1 if obj.short_status == 'fail': self.failed_tasks.append((obj.task_id, obj.worker_name)) def print_statistics(self): """Returns are there failed tasks.""" if self.stats: color_stdout('Statistics:\n', schema='test_var') for short_status, cnt in self.stats.items(): color_stdout('* %s: %d\n' % (short_status, cnt), schema='test_var') if not self.failed_tasks: return False color_stdout('Failed tasks:\n', schema='test_var') for task_id, worker_name in self.failed_tasks: logfile = self.get_logfile(worker_name) reproduce_file = get_reproduce_file(worker_name) color_stdout('- %s' % yaml.safe_dump(task_id), schema='test_var') color_stdout('# logfile: %s\n' % logfile) color_stdout('# reproduce file: %s\n' % reproduce_file) return True class LogOutputWatcher(BaseWatcher): def __init__(self): self.fds = dict() self.logdir = os.path.join(lib.Options().args.vardir, 'log') try: os.makedirs(self.logdir) except OSError: pass def get_logfile(self, worker_name): filename = '%s.log' % worker_name filepath = os.path.join(self.logdir, filename) return os.path.realpath(filepath) def process_result(self, obj): if isinstance(obj, WorkerDone): self.fds[obj.worker_id].close() del self.fds[obj.worker_id] if not isinstance(obj, WorkerOutput): return if obj.worker_id not in self.fds.keys(): filepath = self.get_logfile(obj.worker_name) self.fds[obj.worker_id] = open(filepath, 'w') fd = self.fds[obj.worker_id] fd.write(obj.output) fd.flush() def __del__(self): for fd in self.fds.values(): try: fd.close() except IOError: pass class OutputWatcher(BaseWatcher): color_re = re.compile('\033' + r'\[\d(?:;\d\d)?m') def __init__(self): self.buffer = dict() @staticmethod def add_prefix(output, worker_id): prefix_max_len = len('[xxx] ') prefix = ('[%03d] ' % worker_id).ljust(prefix_max_len) output = output.rstrip('\n') lines = [(line + '\n') for line in output.split('\n')] output = prefix + prefix.join(lines) return output @staticmethod def _write(output, worker_id): output = OutputWatcher.add_prefix(output, worker_id) sys.stdout.write(output) @staticmethod def _decolor(obj): return OutputWatcher.color_re.sub('', obj) def process_result(self, obj): if isinstance(obj, WorkerDone): bufferized = self.buffer.get(obj.worker_id, '') if bufferized: OutputWatcher._write(bufferized, obj.worker_id) if obj.worker_id in self.buffer.keys(): del self.buffer[obj.worker_id] return if not isinstance(obj, WorkerOutput) or obj.log_only: return bufferized = self.buffer.get(obj.worker_id, '') if OutputWatcher._decolor(obj.output).endswith('\n'): OutputWatcher._write(bufferized + obj.output, obj.worker_id) self.buffer[obj.worker_id] = '' else: self.buffer[obj.worker_id] = bufferized + obj.output def not_done_worker_ids(self): return self.buffer.keys() class FailWatcher(BaseWatcher): def __init__(self, terminate_all_workers): self.terminate_all_workers = terminate_all_workers self.got_fail = False def process_result(self, obj): if not isinstance(obj, WorkerTaskResult): return if obj.short_status == 'fail': color_stdout('[Main process] Got failed test; ' 'gently terminate all workers...\n', schema='test_var') self.got_fail = True self.terminate_all_workers() class HangError(Exception): pass class HangWatcher(BaseWatcher): """Terminate all workers if no output received 'no_output_times' time.""" def __init__(self, get_not_done_worker_ids, kill_all_workers, warn_timeout, kill_timeout): self.get_not_done_worker_ids = get_not_done_worker_ids self.kill_all_workers = kill_all_workers self.warn_timeout = warn_timeout self.kill_timeout = kill_timeout self.warned_seconds_ago = 0.0 self.inactivity = 0.0 def process_result(self, obj): self.warned_seconds_ago = 0.0 self.inactivity = 0.0 def process_timeout(self, delta_seconds): self.warned_seconds_ago += delta_seconds self.inactivity += delta_seconds worker_ids = self.get_not_done_worker_ids() if self.warned_seconds_ago < self.warn_timeout: return color_stdout("No output during %d seconds. " "List of workers not reporting the status: %s; " "Will abort after %d seconds without output.\n" % ( self.inactivity, worker_ids, self.kill_timeout), schema='test_var') self.warned_seconds_ago = 0.0 if self.inactivity < self.kill_timeout: return color_stdout('\n[Main process] No output from workers. ' 'It seems that we hang. Send SIGKILL to workers; ' 'exiting...\n', schema='test_var') self.kill_all_workers() raise HangError() tarantool_1.9.1.26.g63eb81e3c/test-run/test-run.py0000755000000000000000000001701413306562360020132 0ustar rootroot#!/usr/bin/env python2 """Tarantool regression test suite front-end.""" # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # How it works (briefly, simplified) # ################################## # # * Get task groups; each task group correspond to a test suite; each task # group contains workers generator (factory) and task IDs (test_name + # conf_name). # * Put task groups to Dispatcher, which: # * Create task (input) and result (output) queues for each task group. # * Create and run specified count of workers on these queues. # * Wait for results on the result queues and calls registered listeners. # * If some worker done its work, the Dispatcher will run the new one if # there are tasks. # * Listeners received messages from workers and timeouts when no messages # received. Its: # * Count results statistics. # * Multiplex screen's output. # * Log output to per worker log files. # * Exit us when some test failed. # * Exit us when no output received from workers during some time. # * When all workers reported it's done (or exceptional situation occured) the # main process kill all processes in the same process group as its own to # prevent 'orphan' worker or tarantool servers from flooding an OS. # * Exit status is zero (success) when no errors detected and all requested # tests passed. Otherwise non-zero. import os import signal import sys import time import subprocess import collections import multiprocessing import lib import lib.worker from lib.colorer import color_stdout from lib.utils import signame, format_process from lib.tarantool_server import TarantoolServer from listeners import HangError from dispatcher import Dispatcher EXIT_SUCCESS = 0 EXIT_HANG = 1 EXIT_INTERRUPTED = 2 EXIT_FAILED_TEST = 3 EXIT_NOTDONE_TEST = 4 EXIT_UNKNOWN_ERROR = 50 def main_loop_parallel(): color_stdout("Started {0}\n".format(" ".join(sys.argv)), schema='tr_text') jobs = lib.Options().args.jobs if jobs < 1: # faster result I got was with 2 * cpu_count jobs = 2 * multiprocessing.cpu_count() if jobs > 0: color_stdout("Running in parallel with %d workers\n\n" % jobs, schema='tr_text') randomize = True task_groups = lib.worker.get_task_groups() if lib.Options().args.reproduce: task_groups = lib.worker.reproduce_task_groups(task_groups) jobs = 1 randomize = False dispatcher = Dispatcher(task_groups, jobs, randomize) dispatcher.start() lib.worker.print_greetings() color_stdout("\n", '=' * 86, "\n", schema='separator') color_stdout("WORKR".ljust(6), schema='t_name') color_stdout("TEST".ljust(48), schema='t_name') color_stdout("PARAMS".ljust(16), schema='test_var') color_stdout("RESULT\n", schema='test_pass') color_stdout('-' * 81, "\n", schema='separator') try: is_force = lib.Options().args.is_force dispatcher.wait() dispatcher.wait_processes() color_stdout('-' * 81, "\n", schema='separator') has_failed = dispatcher.statistics.print_statistics() has_undone = dispatcher.report_undone(verbose=is_force) if has_failed: return EXIT_FAILED_TEST if is_force and has_undone: return EXIT_NOTDONE_TEST except KeyboardInterrupt: color_stdout('-' * 81, "\n", schema='separator') dispatcher.statistics.print_statistics() dispatcher.report_undone(verbose=is_force) raise except HangError: color_stdout('-' * 81, "\n", schema='separator') dispatcher.statistics.print_statistics() dispatcher.report_undone(verbose=is_force) return EXIT_HANG return EXIT_SUCCESS def main_parallel(): res = EXIT_UNKNOWN_ERROR try: res = main_loop_parallel() except KeyboardInterrupt: color_stdout('\n[Main process] Caught keyboard interrupt\n', schema='test_var') res = EXIT_INTERRUPTED return res def main_loop_consistent(failed_test_ids): # find and prepare all tasks/groups, print information task_groups = lib.worker.get_task_groups().items() lib.worker.print_greetings() for name, task_group in task_groups: # print information about current test suite color_stdout("\n", '=' * 80, "\n", schema='separator') color_stdout("TEST".ljust(48), schema='t_name') color_stdout("PARAMS".ljust(16), schema='test_var') color_stdout("RESULT\n", schema='test_pass') color_stdout('-' * 75, "\n", schema='separator') task_ids = task_group['task_ids'] if not task_ids: continue worker_id = 1 worker = task_group['gen_worker'](worker_id) for task_id in task_ids: short_status = worker.run_task(task_id) if short_status == 'fail': failed_test_ids.append(task_id) if not lib.Options().args.is_force: worker.stop_server(cleanup=False) return color_stdout('-' * 75, "\n", schema='separator') worker.stop_server(silent=False) color_stdout() def main_consistent(): color_stdout("Started {0}\n".format(" ".join(sys.argv)), schema='tr_text') failed_test_ids = [] try: main_loop_consistent(failed_test_ids) except KeyboardInterrupt: color_stdout('[Main loop] Caught keyboard interrupt\n', schema='test_var') except RuntimeError as e: color_stdout("\nFatal error: %s. Execution aborted.\n" % e, schema='error') if lib.Options().args.gdb: time.sleep(100) return -1 if failed_test_ids and lib.Options().args.is_force: color_stdout("\n===== %d tests failed:\n" % len(failed_test_ids), schema='error') for test_id in failed_test_ids: color_stdout("----- %s\n" % str(test_id), schema='info') return (-1 if failed_test_ids else 0) if __name__ == "__main__": # don't sure why, but it values 1 or 2 gives 1.5x speedup for parallel # test-run (and almost doesn't affect consistent test-run) os.environ['OMP_NUM_THREADS'] = '2' status = 0 force_parallel = bool(lib.Options().args.reproduce) if not force_parallel and lib.Options().args.jobs == -1: status = main_consistent() else: status = main_parallel() exit(status) tarantool_1.9.1.26.g63eb81e3c/test-run/README.md0000644000000000000000000001774113306562360017262 0ustar rootroot# Tarantool Functional testing framework ### Test Suite Bunch of tests, that lay down in the subfolder (recursively) with `suite.ini` file. `suite.ini` is basic ini-file, that consists of one section `default`, and a number of fields: * `core` * `description` - Test Suite description * `script` - shebang file to start tarantool with * disables: * `disabled` - tests that must be skipped * `release_disabled` - tests that must be skipped when Tarantool has been builded with `Release` * `valgrind_disabled` - tests that must be skipped when Valgrind is enabled * `lua_libs` - paths for lua files, that should be copied into the folder, where server is started (delimited with the space, e.g. `lua_libs=lua/1.lua lua/2.lua`) * `long_run` - mark tests as long, enabled only with `--long` option (delimited with the space, e.g. `long_run=t1.test.lua t2.test.lua`) * `config` - test configuration file name Field `core` must be one of: * `tarantool` - Test-Suite for Functional Testing * `app` - Another functional Test-Suite * `unit` - Unit-Testing Test Suite ### Test Each test consists of files `*.test(.lua|.py)?`, `*.result`, and may have skip condition file `*.skipcond`. On first run (without `.result`) `.result` is generated from output. Each run, in the beggining, `.skipcond` file is executed. In the local env there's object `self`, that's `Test` object. If test must be skipped - you must put `self.skip = 1` in this file. Next, `.test(.lua|.py)?` is executed and file `.reject` is created, then `.reject` is compared with `.result`. If something differs, then 15 last string of this diff file are printed and `.reject` file is saving in the folder, where `.result` file is. If not, then `.reject` is deleted. ### Test configuration Test configuration file contains config for multiple run. For each test section system runs separated test and compares result with common `.result` file. For example we need to run one test for different db engines("*" means default configuration): ```json { "my.test.lua": { "first": {"a": 1, "b": 2}, "second": {"a": 1, "b": 3} }, "*": { "memtx": {"engine": "memtx"}, "sophia": {"engine": "sophia"} } } ``` In test case we can get configuration from inspector: ```lua engine = test_run:get_cfg('engine') -- first run engine is 'memtx' -- second run engine is 'sophia' ``` #### Python Files: `.test.py`, `.result` and `.skipcond`(optionaly). Environment: * `sql` - `BoxConnection` class. Convert our subclass of SQL into IProto query and then decode it. Print into `.result` in YAML. Examples: * `sql("select * from t where k=[ limit ]")` * `sql("insert into t values ([ [, ]*])")` * `sql("delete from t where k=")` * `sql("call ([string|number]*)")` * `sql("update t set [k= [, k=]*] where k="")` * `sql("ping")` * `admin` - `AdminConnection` - simply send admin query on admin port (LUA), then, receive answer. Examples * `admin('box.info')` **Example:** ```python import os import time from lib.admin_connection import AdminConnection from lib.tarantool_server import TarantoolServer master = server admin("box.info.lsn") # equivalent to master.admin("box.info.lsn") and server.admin(...) sql("select * from t0 where k0=1") replica = TarantoolServer() replica.script = 'replication/replica.lua' replica.vardir = os.path.join(server.vardir, "replica") replica.deploy() master.admin("box.insert(0, 1, 'hello')") print('sleep_1') time.sleep(0.1) print('sleep_finished') print('sleep_2') admin("require('fiber').sleep(0.1)") print('sleep_finished') replica.admin("box.select(0, 0, 1)") con2 = AdminConnection('localhost', server.admin.port) con2("box.info.lsn") replica.stop() replica.cleanup(True) con2.disconnect() ``` **Result:** ```yaml box.info.lsn --- - null ... select * from t0 where k0=1 --- - error: errcode: ER_NO_SUCH_SPACE errmsg: Space '#0' does not exist ... box.insert(0, 1, 'hello') --- - error: '[string "return box.insert(0, 1, ''hello'')"]:1: attempt to call field ''insert'' (a nil value)' ... sleep_1 sleep_finished sleep_2 require('fiber').sleep(0.1) --- ... sleep_finished box.select(0, 0, 1) --- - error: '[string "return box.select(0, 0, 1)"]:1: attempt to call field ''select'' (a nil value)' ... box.info.lsn --- - null ... ``` #### Lua Files: `.test.lua`, `.result` and `.skipcond`(optionaly). Tests interact only with `AdminConnection`. Supports some preprocessor functions (eg `delimiter`) **Delimiter example:** ``` env = require('test_run') test_run = env.new() box.schema.space.create('temp') t1 = box.space.temp t1:create_index('primary', { type = 'hash', parts = {1, 'num'}, unique = true}) t1:insert{0, 1, 'hello'} test_run:cmd("setopt delimiter ';'") function test() return {1,2,3} end; test( ); test_run:cmd("setopt delimiter ''"); test( ); test ``` **Delimiter result:** ``` env = require('test_run') test_run = env.new() box.schema.space.create('temp') --- - index: [] on_replace: 'function: 0x40e4fdf0' temporary: false id: 512 engine: memtx enabled: false name: temp field_count: 0 - created ... t1 = box.space.temp --- ... t1:create_index('primary', { type = 'hash', parts = {1, 'num'}, unique = true}) --- ... t1:insert{0, 1, 'hello'} --- - [0, 1, 'hello'] ... test_run:cmd("setopt delimiter ';'") function test() return {1,2,3} end; --- ... test( ); --- - - 1 - 2 - 3 ... test_run:cmd("setopt delimiter ''"); test( --- - error: '[string "test( "]:1: unexpected symbol near ''''' ... ); --- - error: '[string "); "]:1: unexpected symbol near '')''' ... test --- - 'function: 0x40e533b8' ... ``` ##### Interaction with the test environment In lua test you can use `test_run` module to interact with the test environement ```lua env = require('test_run') test_run = env.new() test_run:cmd("") ``` __Base directives:__ * `setopt delimiter ''` - Sets delimiter to \n __Server directives:__ * `create server with ...` - Create server with name , where `...` may be: * `script = ''` - script to start * `rpl_master = ` - replication master server name * `start server ` - Run server * `stop server ` - Stop server * `cleanup server ` - Cleanup (basically after server has been stopped) * `restart server ` - Restart server (you can restart yourself from lua!) __Connection switch:__ * `switch ` - Switch connection to server and add test run into global scope __Connection directives(low level):__ * `create connection to ` - create connection named to server * `drop connection ` - Turn connection off and delete it * `set connection ` - Set connection to be main, for next commands __Filter directives:__ * `push filter '' to ''` - e,g, `push filter 'listen: .*' to 'listen: '` __Set variables:__ * `set variables '' to ''` - execute ` = *` where * is value of where. Where must be * `.admin` - admin port of this server * `.master` - listen port of master of this replica * `.listen` - listen port of this server __Dev ops features:__ You can power on any tarantool replicas in a loop ```lua test_run:cmd('setopt delimiter ";"') function join(inspector, n) for i=1,n do local rid = tostring(i) os.execute('mkdir -p tmp') os.execute('cp ../replication/replica.lua ./tmp/replica'..rid..'.lua') os.execute('chmod +x ./tmp/replica'..rid..'.lua') inspector:cmd("create server replica"..rid.." with rpl_master=default, script='./var/tmp/replica"..rid..".lua'") inspector:cmd("start server replica"..rid) end end; test_run:cmd('setopt delimiter ""'); -- create 30 replicas for current tarantool join(test_run, 30) ``` tarantool_1.9.1.26.g63eb81e3c/test-run/.gitmodules0000644000000000000000000000035213306562360020146 0ustar rootroot[submodule "lib/msgpack-python"] path = lib/msgpack-python url = https://github.com/msgpack/msgpack-python.git [submodule "lib/tarantool-python"] path = lib/tarantool-python url = https://github.com/tarantool/tarantool-python.git tarantool_1.9.1.26.g63eb81e3c/test-run/.gitignore0000644000000000000000000000126413306562360017764 0ustar rootroot# General *~ .*.sw[a-z] # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover # Translations *.mo *.pot # Sphinx documentation docs/_build/ # PyBuilder target/ tarantool_1.9.1.26.g63eb81e3c/rump/0000775000000000000000000000000013306560010015163 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/rump/Makefile0000664000000000000000000000040213306560010016617 0ustar rootrootCHOST ?= x86_64-rumprun-netbsd CBUILDROOT ?= /usr/rumprun-x86_64 all: cd .. && ${CHOST}-cmake . \ -DCMAKE_BUILD_TYPE=Debug \ -DENABLE_BUNDLED_LIBYAML=ON \ -DENABLE_DIST=OFF $(MAKE) -C .. $(MAKEOPTS) tarantool cp -p ../src/tarantool ../../tarantool tarantool_1.9.1.26.g63eb81e3c/extra/0000775000000000000000000000000013306565107015337 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/extra/dmg/0000775000000000000000000000000013306560010016072 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/extra/dmg/LICENSE.rtf0000664000000000000000000000341313306560010017672 0ustar rootroot{\rtf1\ansi\ansicpg1252\cocoartf1265 {\fonttbl\f0\fswiss\fcharset0 Helvetica;} {\colortbl;\red255\green255\blue255;\red0\green41\blue57;} \margl1440\margr1440\vieww10800\viewh8400\viewkind0 \deftab720 \pard\pardeftab720 \f0\fs24 \cf2 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\ \ 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\ 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\ \ THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\ \ Copyright (C) 2010-2013 Tarantool AUTHORS:\ \ Aleksey Demakov, Aleksey Mashanov, Alexandre Kalendarev,\ Damien Lefortier, Dmitry E. Oboukhov, Dmitry Simonenko,\ Konstantin Osipov, Konstantin Shulgin, Mons Anderson,\ Pavel Cherenkov, Roman Antipin, Roman Tokarev, Roman Tsisyk,\ Teodor Sigaev, Timofey Khryukin, Yuriy Nevinitsin, Yuriy Vostrikov\ Veniamin Gvozdikov\ }tarantool_1.9.1.26.g63eb81e3c/extra/dmg/postflight.in0000664000000000000000000000063513306560010020611 0ustar rootroot#!/usr/bin/env sh PLIST_PRODUCT=@LUANCHD_PLIST@ LUANCHD_PLIST=/Library/LaunchDaemons/${PLIST_PRODUCT} if [ -f ${LUANCHD_PLIST} ] then launchctl unload ${LUANCHD_PLIST} rm -f ${LUANCHD_PLIST} fi cp ${3}@CMAKE_INSTALL_PREFIX@/share/tarantool/${PLIST_PRODUCT} ${LUANCHD_PLIST} if [ -f ${LUANCHD_PLIST} ] then launchctl load ${LUANCHD_PLIST} else echo "${LUANCHD_PLIST} could not install" exit 1 fi exit 0 tarantool_1.9.1.26.g63eb81e3c/extra/dmg/README.rtf0000664000000000000000000000715413306560010017553 0ustar rootroot{\rtf1\ansi\ansicpg1252\cocoartf1265 {\fonttbl\f0\froman\fcharset0 Times-Roman;} {\colortbl;\red255\green255\blue255;\red26\green26\blue26;\red48\green48\blue48;} \margl1440\margr1440\vieww12600\viewh7800\viewkind0 \deftab720 \pard\pardeftab720\sa298 \f0\b\fs24 \cf2 What is Tarantool?\ \pard\pardeftab720\sa380 \b0 \cf2 Tarantool is an in-memory NoSQL database. The code is available for free under the terms of {\field{\*\fldinst{HYPERLINK "http://www.gnu.org/licenses/license-list.html#ModifiedBSD"}}{\fldrslt \i \ul \ulc3 BSD license}}. Supported platforms are GNU/Linux, Mac OS and FreeBSD.\ \pard\pardeftab720\sa298 \b \cf2 An overview of the architecture\ \pard\pardeftab720\sa380 \b0 \cf2 The server \b maintains all its data in random-access memory \b0 , and therefore has very low read latency. At the same time, a copy of the data is kept on non-volatile storage (a disk drive), and inserts and updates are performed atomically.\ To ensure atomicity, consistency and crash-safety of the persistent copy, a write-ahead log (WAL) is maintained, and each change is recorded in the WAL before it is considered complete. The logging subsystem supports group commit.\ If update and delete rate is high, a constantly growing write-ahead log file (or files) can pose a disk space problem, and significantly increase time necessary to restart from disk. A simple solution is employed: the server \b can be requested to save a concise snapshot \b0 of its current data. The underlying operating system's \'93copy-on-write\'94 feature is employed to take the snapshot in a quick, resource-savvy and non-blocking manner. The \'93copy-on-write\'94 technique guarantees that snapshotting has minimal impact on server performance.\ \pard\pardeftab720\sa380 \b \cf2 Tarantool is lock-free \b0 . Instead of the operating system's concurrency primitives, such as threads and mutexes, Tarantool uses a cooperative multitasking environment to simultaneously operate on thousands of connections. A fixed number of independent execution threads within the server do not share state, but exchange data using low overhead message queues. While this approach limits server scalability to a few CPU cores, it removes competition for the memory bus and sets the scalability limit to the top of memory and network throughput. CPU utilization of a typical highly-loaded Tarantool server is under 10%.\ \pard\pardeftab720\sa298 \b \cf2 Key features\ \pard\pardeftab720\sa380 \b0 \cf2 Unlike most of NoSQL databases, Tarantool supports primary, \b secondary keys, multi-part keys \b0 , HASH, TREE and BITSET index types.\ Tarantool supports \b Lua stored procedures \b0 , which can access and modify data atomically. Procedures can be created, modified and dropped at runtime.\ Use of Lua as an extension language does not end with stored procedures: Lua programs can be used during startup, to define triggers and background tasks, interact with networked peers. Unlike popular application development frameworks implemented around "reactor" pattern, networking in server-side Lua is sequential, yet very efficient, as is built on top of the cooperating multitasking environment used by the server itself.\ Extended with Lua, Tarantool typically replaces more not one but a few existing components with a single well-performing system, changing and simplifying complex multi-tier Web application architectures.\ Tarantool supports replication. Replicas may run locally or on a remote host. Tarantool replication is asynchronous and does not block writes to the master. When or if the master becomes unavailable, the replica can be switched to assume the role of the master without server restart.\ }tarantool_1.9.1.26.g63eb81e3c/extra/dmg/DESCRIPTION.rtf0000664000000000000000000000274513306560010020402 0ustar rootroot{\rtf1\ansi\ansicpg1252\cocoartf1265 {\fonttbl\f0\froman\fcharset0 Times-Roman;} {\colortbl;\red255\green255\blue255;\red0\green41\blue57;} {\*\listtable{\list\listtemplateid1\listhybrid{\listlevel\levelnfc23\levelnfcn23\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{disc\}}{\leveltext\leveltemplateid1\'01\uc0\u8226 ;}{\levelnumbers;}\fi-360\li720\lin720 }{\listname ;}\listid1}} {\*\listoverridetable{\listoverride\listid1\listoverridecount0\ls1}} \margl1440\margr1440\vieww10800\viewh8400\viewkind0 \deftab720 \pard\pardeftab720\sl480\sa321 \f0\b\fs30 \cf2 Tarantool \fs24 \ \pard\pardeftab720\sa380 \b0 \cf2 Tarantool is an in-memory database designed to store the most volatile and highly accessible web content. Tarantool has been extensively used in production since 2009. It's \b open source \b0 , BSD licensed.\ \pard\pardeftab720\sa241 \b \cf2 Features\ \pard\tx220\tx720\pardeftab720\li720\fi-720 \ls1\ilvl0 \b0 \cf2 {\listtext \'95 }{\field{\*\fldinst{HYPERLINK "http://tarantool.org/benchmark.html"}}{\fldrslt \ul lowest CPU overhead}} to store or serve a piece of content,\ {\listtext \'95 }optional Write Ahead Logging for persistency and reliability,\ {\listtext \'95 }universal data access with {\field{\*\fldinst{HYPERLINK "http://github.com/mailru/tntlua"}}{\fldrslt \ul rich Lua stored procedures}}, which can exchange messages between each other or networked peers,\ {\listtext \'95 }asynchronous master-slave replication and hot backup.\ }tarantool_1.9.1.26.g63eb81e3c/extra/org.tarantool.in0000664000000000000000000000156713306560010020455 0ustar rootroot StandardOutPath /var/log/tarantool.log StandardErrorPath /var/log/tarantool.log WorkingDirectory @CMAKE_INSTALL_PREFIX@ EnvironmentVariables PATH @CMAKE_INSTALL_PREFIX@/bin:/bin:/sbin:/opt/bin KeepAlive SuccessfulExit Label @DARWIN_PACKAGE_ID@.@CPACK_PACKAGE_VENDOR@.tarantool ProgramArguments tarantool RunAtLoad tarantool_1.9.1.26.g63eb81e3c/extra/exports0000664000000000000000000001220213306560010016747 0ustar rootroot# Symbols exported by the main Tarantool executable # FFI password_prepare lbox_socket_local_resolve lbox_socket_nonblock base64_decode base64_encode base64_bufsize SHA1internal guava random_bytes fiber_time fiber_time64 fiber_clock fiber_clock64 tarantool_lua_slab_cache ibuf_create ibuf_reinit ibuf_destroy ibuf_reserve_slow port_destroy csv_create csv_destroy csv_setopt csv_iterator_create csv_next csv_feed csv_escape_field title_update title_get title_set_interpretor_name title_get_interpretor_name title_set_script_name title_get_script_name title_set_custom title_get_custom title_set_status title_get_status exception_get_string exception_get_int tarantool_lua_ibuf uuid_nil tt_uuid_create tt_uuid_str tt_uuid_is_equal tt_uuid_is_nil tt_uuid_bswap tt_uuid_from_string log_level log_format uri_parse uri_format PMurHash32 PMurHash32_Process PMurHash32_Result crc32_calc mp_encode_double mp_encode_float mp_decode_double mp_decode_float say_set_log_level say_logrotate say_set_log_format tarantool_uptime log_pid space_by_id space_run_triggers space_bsize box_schema_version tnt_openssl_init tnt_EVP_CIPHER_key_length tnt_EVP_CIPHER_iv_length tnt_EVP_MD_CTX_new tnt_EVP_MD_CTX_free tnt_HMAC_CTX_new tnt_HMAC_CTX_free # Module API _say fiber_attr_new fiber_attr_delete fiber_attr_setstacksize fiber_attr_getstacksize fiber_self fiber_new fiber_new_ex fiber_yield fiber_start fiber_wakeup fiber_cancel fiber_set_cancellable fiber_set_joinable fiber_join fiber_sleep fiber_is_cancelled fiber_time fiber_time64 fiber_reschedule fiber_cond_new fiber_cond_delete fiber_cond_signal fiber_cond_broadcast fiber_cond_wait_timeout fiber_cond_wait cord_slab_cache coio_wait coio_close coio_call coio_getaddrinfo luaL_pushcdata luaL_checkcdata luaL_setcdatagc luaL_ctypeid luaL_cdef luaL_pushuint64 luaL_pushint64 luaL_checkuint64 luaL_checkint64 luaL_touint64 luaL_toint64 luaT_pushtuple luaT_istuple luaT_error luaT_call luaT_cpcall luaT_state box_txn box_txn_begin box_txn_commit box_txn_savepoint box_txn_rollback box_txn_rollback_to_savepoint box_txn_alloc box_txn_id box_key_def_new box_key_def_delete box_tuple_format_default box_tuple_new box_tuple_ref box_tuple_unref box_tuple_field_count box_tuple_bsize box_tuple_to_buf box_tuple_format box_tuple_format_new box_tuple_format_ref box_tuple_format_unref box_tuple_field box_tuple_iterator box_tuple_iterator_free box_tuple_position box_tuple_rewind box_tuple_seek box_tuple_next box_tuple_update box_tuple_upsert box_tuple_extract_key box_tuple_compare box_tuple_compare_with_key box_return_tuple box_space_id_by_name box_index_id_by_name box_select box_insert box_replace box_delete box_update box_upsert box_truncate box_index_iterator box_iterator_next box_iterator_free box_index_len box_index_bsize box_index_random box_index_get box_index_min box_index_max box_index_count box_error_type box_error_code box_error_message box_error_last box_error_clear box_error_set box_latch_new box_latch_delete box_latch_lock box_latch_trylock box_latch_unlock clock_realtime clock_monotonic clock_process clock_thread clock_realtime64 clock_monotonic64 clock_process64 clock_thread64 # Lua / LuaJIT lua_newstate lua_close lua_newthread lua_atpanic lua_gettop lua_settop lua_pushvalue lua_remove lua_insert lua_replace lua_checkstack lua_xmove lua_isnumber lua_isstring lua_iscfunction lua_isuserdata lua_type lua_typename lua_equal lua_rawequal lua_lessthan lua_tonumber lua_tointeger lua_toboolean lua_tolstring lua_objlen lua_tocfunction lua_touserdata lua_tothread lua_topointer lua_pushnil lua_pushnumber lua_pushinteger lua_pushlstring lua_pushstring lua_pushvfstring lua_pushfstring lua_pushcclosure lua_pushboolean lua_pushlightuserdata lua_pushthread lua_gettable lua_getfield lua_rawget lua_rawgeti lua_createtable lua_newuserdata lua_getmetatable lua_getfenv lua_settable lua_setfield lua_rawset lua_rawseti lua_setmetatable lua_setfenv lua_call lua_pcall lua_cpcall lua_load lua_dump lua_yield lua_resume lua_status lua_gc lua_error lua_next lua_concat lua_getallocf lua_setallocf lua_getstack lua_getinfo lua_getlocal lua_setlocal lua_getupvalue lua_setupvalue lua_sethook lua_gethook lua_gethookmask lua_gethookcount lua_upvalueid lua_upvaluejoin lua_loadx luaopen_base luaopen_math luaopen_string luaopen_table luaopen_io luaopen_os luaopen_package luaopen_debug luaopen_bit luaopen_jit luaopen_ffi luaL_openlibs luaL_openlib luaL_register luaL_getmetafield luaL_callmeta luaL_typerror luaL_argerror luaL_checklstring luaL_optlstring luaL_checknumber luaL_optnumber luaL_checkinteger luaL_optinteger luaL_checkstack luaL_checktype luaL_checkany luaL_newmetatable luaL_checkudata luaL_where luaL_error luaL_checkoption luaL_ref luaL_unref luaL_loadfile luaL_loadbuffer luaL_loadstring luaL_newstate luaL_gsub luaL_findtable luaL_fileresult luaL_execresult luaL_loadfilex luaL_loadbufferx luaL_traceback luaL_setfuncs luaL_pushmodule luaL_testudata luaL_setmetatable luaL_buffinit luaL_prepbuffer luaL_addlstring luaL_addstring luaL_addvalue luaL_pushresult luaJIT_setmode luaJIT_profile_start luaJIT_profile_stop luaJIT_profile_dumpstack # Temporary # Important! This function will be removed from exports soon (in the scope of # 1.9.1). Don't use it. key_def_new_with_parts tarantool_1.9.1.26.g63eb81e3c/extra/apigen0000775000000000000000000000012413306560010016511 0ustar rootroot#!/bin/sh sed -n '/^\/\*\* \\cond public \*\/$/,/^\/\*\* \\endcond public \*\/$/P' tarantool_1.9.1.26.g63eb81e3c/extra/empty.snap0000664000000000000000000000034313306565107017360 0ustar rootrootSNAP 0.12 Server: c14bc71e-d81d-4365-b679-333eb280b38c VClock: {1: 29} պ <8PPPPPPP!cluster$a12be9e2-7b80-4c0a-9920-e186337da2ceպ 5θoMPPPPPPP@!$c14bc71e-d81d-4365-b679-333eb280b38ctarantool_1.9.1.26.g63eb81e3c/extra/mkexports0000775000000000000000000000044113306560010017304 0ustar rootroot#! /bin/sh # $1 - in file # $2 - out file # $3 - os if [ "x$3x" = xDarwinx ]; then # _func1 # _func2 sed -e 's/#.*//; /^[[:space:]]*$/d; s/^/_/;' $1 > $2 else # { # func1; # func2; # }; ( echo "{" && sed -e '/^\s*$/d;s/$/;/;' $1 && echo "};" ) > $2 fi tarantool_1.9.1.26.g63eb81e3c/extra/bin2c.c0000664000000000000000000000236613306560010016473 0ustar rootroot/* * txt2c: Converts text files to C strings * * Compile with: * gcc txt2cs.c -o txt2cs * * Public domain. */ #include #include #include #include int main(int argc, char** argv) { const char *prefix = ""; const char *suffix = "\n"; FILE *in = stdin; FILE *out = stdout; int c; while ((c = getopt(argc, argv, "np:s:h")) != -1) { switch (c) { case 'p': prefix = optarg; break; case 's': suffix = optarg; break; case 'h': printf("Usage: %s [-n] [-p prefix] [-s suffix] [infile] [outfile]\n", argv[0]); exit(0); break; } } if (optind < argc) { if (strcmp(argv[optind], "-") != 0) { if (!(in = fopen(argv[optind], "r"))) { fprintf(stderr, "Can't open %s\n", argv[optind]); perror(argv[0]); exit(1); } } if (optind + 1 < argc) { if (strcmp(argv[optind + 1], "-") != 0) { if (!(out = fopen(argv[optind + 1], "w"))) { fprintf(stderr, "Can't open %s\n", argv[optind + 1]); perror(argv[0]); exit(1); } } } } fputs(prefix, out); int col = 1; while ((c = fgetc(in)) != -1) { if (col >= 78 - 6) { fputs("\n", out); col = 0; } fprintf(out, " 0x%.2x,", c); col += 6; } fputs(suffix, out); return 0; } tarantool_1.9.1.26.g63eb81e3c/extra/txt2c.c0000664000000000000000000000314113306560010016532 0ustar rootroot/* * txt2c: Converts text files to C strings * * Compile with: * gcc txt2cs.c -o txt2cs * * Public domain. */ #include #include #include #include int main(int argc, char** argv) { const char *prefix = ""; const char *suffix = "\n"; int no_quote = 0; /* if 1, do not prepend and append quotation marks (") */ FILE *in = stdin; FILE *out = stdout; int c; while ((c = getopt(argc, argv, "np:s:h")) != -1) { switch (c) { case 'n': no_quote = 1; break; case 'p': prefix = optarg; break; case 's': suffix = optarg; break; case 'h': printf("Usage: %s [-n] [-p prefix] [-s suffix] [infile] [outfile]\n", argv[0]); exit(0); break; } } if (optind < argc) { if (strcmp(argv[optind], "-") != 0) { if (!(in = fopen(argv[optind], "r"))) { fprintf(stderr, "Can't open %s\n", argv[optind]); perror(argv[0]); exit(1); } } if (optind + 1 < argc) { if (strcmp(argv[optind + 1], "-") != 0) { if (!(out = fopen(argv[optind + 1], "w"))) { fprintf(stderr, "Can't open %s\n", argv[optind + 1]); perror(argv[0]); exit(1); } } } } fputs(prefix, out); if (!no_quote) fputs("\"", out); while ((c = fgetc(in)) != -1) { switch (c) { case '\0': fputs("\\0", out); break; case '\t': fputs("\\t", out); break; case '\n': fputs("\\n\"\n\"", out); break; case '\r': fputs("\\r", out); break; case '\\': fputs("\\\\", out); break; case '\"': fputs("\\\"", out); break; default: fputc(c, out); break; } } if (!no_quote) fputs("\"", out); fputs(suffix, out); return 0; } tarantool_1.9.1.26.g63eb81e3c/extra/CMakeLists.txt0000664000000000000000000000262013306565107020077 0ustar rootrootadd_subdirectory(dist) add_subdirectory(luarocks) if (TARGET_OS_DARWIN) # NOTE: need add execution 'plutil -lint org.tarantool.tarantool.plist # to check syntax of plist file. # Also cmake doesn't support changing package id from 'com.' to 'org.' # Need chage to 'org.' after update lines this file: # https://github.com/Kitware/CMake/blob/v2.8.11.2/Source/CPack/cmCPackPackageMakerGenerator.cxx#L763 # # ^^^ DO NOT CHANGE DARWIN_PACKAGE_ID BEFORE SEE URL ABOVE ^^^ # set (DARWIN_PACKAGE_ID "com") set (LUANCHD_PLIST "${DARWIN_PACKAGE_ID}.${CPACK_PACKAGE_VENDOR}.tarantool.plist") # Configure scripts for *.pkg and luanchd daemon by templates configure_file(dmg/postflight.in postflight @ONLY) configure_file(org.tarantool.in ${LUANCHD_PLIST} @ONLY) # chmod +x 644 install (FILES ${CMAKE_BINARY_DIR}/extra/${LUANCHD_PLIST} DESTINATION share/tarantool PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ GROUP_READ WORLD_READ WORLD_READ) endif() if (CMAKE_CROSSCOMPILING) add_custom_target(txt2c COMMAND ${CMAKE_HOST_C_COMPILER} txt2c.c -o "${CMAKE_CURRENT_BINARY_DIR}/txt2c" DEPENDS txt2c.c) add_custom_target(bin2c COMMAND ${CMAKE_HOST_C_COMPILER} bin2c.c -o "${CMAKE_CURRENT_BINARY_DIR}/bin2c" DEPENDS bin2c.c) else() add_executable(txt2c txt2c.c) add_executable(bin2c bin2c.c) endif() tarantool_1.9.1.26.g63eb81e3c/extra/dist/0000775000000000000000000000000013306560010016266 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/extra/dist/tarantool@.service.in0000664000000000000000000000347113306560010022365 0ustar rootroot# Please don't modify this file in-place, because it will be overwrriten # during package upgrades. It is recommended to copy this file to # /etc/systemd/system and then modify the chosen settings. Alternatively, # one can create a directory named service.d/ within /etc/systemd/system and # place a drop-in file name.conf there that only changes the specific # settings one is interested in. # # For example, if you want to change the maximum number of open files # for example.lua instance, you need to perform the following steps: # # 1. Create directory /etc/systemd/system/tarantool@example.service.d/ with # file limits.conf containing: # # [Service] # LimitNOFILE=10000 # # 2. Reload systemd daemon # # systemctl daemon-reload # # 3. Check new settings # # systemctl show tarantool@example|grep LimitNOFILE # # 4. Restart tarantool@example service # # systemctl restart tarantool@example # # Please see http://tarantool.org/doc/book/administration.html and # systemd.service(5) man page for additional information. # [Unit] Description=Tarantool Database Server After=network.target Documentation=man:tarantool(1) # Instance file ConditionPathExists=@TARANTOOL_INSTANCEDIR@/%i.lua [Service] Type=notify User=@TARANTOOL_USER@ Group=@TARANTOOL_USER@ # Disable OOM killer OOMScoreAdjust=-1000 # Increase fd limit for Vinyl LimitNOFILE=65535 ExecStart=@CMAKE_INSTALL_FULL_BINDIR@/tarantoolctl start %i ExecStop=@CMAKE_INSTALL_FULL_BINDIR@/tarantoolctl stop %i ## NYI: https://github.com/tarantool/tarantool/issues/1229 #ExecReload=@CMAKE_INSTALL_FULL_BINDIR@/tarantoolctl reload %i # Systemd waits until all xlogs are recovered TimeoutStartSec=86400s # Give a reasonable amount of time to close xlogs TimeoutStopSec=10s Restart=on-failure RestartSec=100ms [Install] WantedBy=multi-user.target DefaultInstance=example tarantool_1.9.1.26.g63eb81e3c/extra/dist/tarantool.init0000664000000000000000000000427413306560010021165 0ustar rootroot#! /bin/sh # /etc/init.d/tarantool ### BEGIN INIT INFO # Provides: tarantool # Required-Start: $remote_fs # Required-Stop: $remote_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Tarantool init script # Description: This file should be used to construct scripts to be # placed in /etc/init.d. ### END INIT INFO # Author: Dmitry E. Oboukhov PATH=/sbin:/usr/sbin:/bin:/usr/bin:bin SCRIPTNAME=/etc/init.d/tarantool DAEMON=/usr/bin/tarantool DIST_LUA=/usr/bin/tarantoolctl if [ -e "/lib/lsb/init-functions" ]; then . /lib/lsb/init-functions fi # Exit if the package is not installed [ -x "$DAEMON" ] || exit 0 if [ -e "/lib/init/vars.sh" ]; then . /lib/init/vars.sh elif [ -e "/etc/rc.d/init.d/functions" ]; then . /etc/rc.d/init.d/functions fi if [ -e "/etc/sysconfig/tarantool" ]; then sysconfig_tarantool="/etc/sysconfig/tarantool" elif [ -e "/etc/default/tarantool" ]; then sysconfig_tarantool="/etc/default/tarantool" fi if [ -n "$sysconfig_tarantool" ]; then CONF_DIR=`echo "dofile('$sysconfig_tarantool') print(instance_dir)" | tarantool` fi if [ -z "$sysconfig_tarantool" -o "$CONF_DIR" = "nil" ]; then CONF_DIR="/etc/tarantool/instances.enabled" fi INSTANCES=`find $CONF_DIR -xtype f -name '*lua'` if test -z "$INSTANCES"; then echo "tarantool: There are no instances (*.lua) in $CONF_DIR" exit 0 fi # # Function that starts the daemon/service # do_start() { echo "tarantool: Starting instances" for inst in $INSTANCES; do $DAEMON $DIST_LUA start `basename $inst .lua` done return 0 } # # Function that stops the daemon/service # do_stop() { echo "tarantool: Stopping instances" for inst in $INSTANCES; do $DAEMON $DIST_LUA stop `basename $inst .lua` done return 0 } # # Function that sends a SIGHUP to the daemon/service # do_reload() { do_stop do_start } case "$1" in start) do_start ;; stop) do_stop ;; status) ;; restart|force-reload) do_stop do_start ;; *) echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2 exit 3 ;; esac : tarantool_1.9.1.26.g63eb81e3c/extra/dist/example.lua0000664000000000000000000001562113306560010020431 0ustar rootroot-- This is default tarantool initialization file -- with easy to use configuration examples including -- replication, sharding and all major features -- Complete documentation available in: http://tarantool.org/doc/ -- -- To start this instance please run `systemctl start tarantool@example` or -- use init scripts provided by binary packages. -- To connect to the instance, use "sudo tarantoolctl enter example" -- Features: -- 1. Database configuration -- 2. Binary logging and automatic checkpoints -- 3. Replication -- 4. Automatinc sharding -- 5. Message queue -- 6. Data expiration ----------------- -- Configuration ----------------- box.cfg { ------------------------ -- Network configuration ------------------------ -- The read/write data port number or URI -- Has no default value, so must be specified if -- connections will occur from remote clients -- that do not use “admin address” listen = 'localhost:3301'; -- listen = '*:3301'; -- The server is considered to be a Tarantool replica -- it will try to connect to the master -- which replication_source specifies with a URI -- for example konstantin:secret_password@tarantool.org:3301 -- by default username is "guest" -- replication_source="127.0.0.1:3102"; -- The server will sleep for io_collect_interval seconds -- between iterations of the event loop io_collect_interval = nil; -- The size of the read-ahead buffer associated with a client connection readahead = 16320; ---------------------- -- Memtx configuration ---------------------- -- An absolute path to directory where snapshot (.snap) files are stored. -- If not specified, defaults to /var/lib/tarantool/INSTANCE -- memtx_dir = nil; -- How much memory Memtx engine allocates -- to actually store tuples, in bytes. memtx_memory = 128 * 1024 * 1024; -- 128Mb -- Size of the smallest allocation unit, in bytes. -- It can be tuned up if most of the tuples are not so small memtx_min_tuple_size = 16; -- Size of the largest allocation unit, in bytes. -- It can be tuned up if it is necessary to store large tuples memtx_max_tuple_size = 128 * 1024 * 1024; -- 128Mb -- Reduce the throttling effect of box.snapshot() on -- INSERT/UPDATE/DELETE performance by setting a limit -- on how many megabytes per second it can write to disk -- memtx_snap_io_rate_limit = nil; ---------------------- -- Vinyl configuration ---------------------- -- An absolute path to directory where Vinyl files are stored. -- If not specified, defaults to /var/lib/tarantool/INSTANCE -- vinyl_dir = nil; -- How much memory Vinyl engine can use for in-memory level, in bytes. vinyl_memory = 128 * 1024 * 1024; -- 128Mb -- How much memory Vinyl engine can use for caches, in bytes. vinyl_cache = 128 * 1024 * 1024; -- 128Mb -- Size of the largest allocation unit, in bytes. -- It can be tuned up if it is necessary to store large tuples vinyl_max_tuple_size = 128 * 1024 * 1024; -- 128Mb -- The maximum number of background workers for compaction. vinyl_write_threads = 2; ------------------------------ -- Binary logging and recovery ------------------------------ -- An absolute path to directory where write-ahead log (.xlog) files are -- stored. If not specified, defaults to /var/lib/tarantool/INSTANCE -- wal_dir = nil; -- Specify fiber-WAL-disk synchronization mode as: -- "none": write-ahead log is not maintained; -- "write": fibers wait for their data to be written to the write-ahead log; -- "fsync": fibers wait for their data, fsync follows each write; wal_mode = "none"; -- The maximal size of a single write-ahead log file wal_max_size = 256 * 1024 * 1024; -- The interval between actions by the checkpoint daemon, in seconds checkpoint_interval = 60 * 60; -- one hour -- The maximum number of checkpoints that the daemon maintans checkpoint_count = 6; -- Don't abort recovery if there is an error while reading -- files from the disk at server start. force_recovery = true; ---------- -- Logging ---------- -- How verbose the logging is. There are six log verbosity classes: -- 1 – SYSERROR -- 2 – ERROR -- 3 – CRITICAL -- 4 – WARNING -- 5 – INFO -- 6 – VERBOSE -- 7 – DEBUG log_level = 5; -- By default, the log is sent to /var/log/tarantool/INSTANCE.log -- If logger is specified, the log is sent to the file named in the string -- logger = "example.log"; -- If true, tarantool does not block on the log file descriptor -- when it’s not ready for write, and drops the message instead log_nonblock = true; -- If processing a request takes longer than -- the given value (in seconds), warn about it in the log too_long_threshold = 0.5; -- Inject the given string into server process title -- custom_proc_title = 'example'; } local function bootstrap() local space = box.schema.create_space('example') space:create_index('primary') -- Comment this if you need fine grained access control (without it, guest -- will have access to everything) box.schema.user.grant('guest', 'read,write,execute', 'universe') -- Keep things safe by default -- box.schema.user.create('example', { password = 'secret' }) -- box.schema.user.grant('example', 'replication') -- box.schema.user.grant('example', 'read,write,execute', 'space', 'example') end -- for first run create a space and add set up grants box.once('example-1.0', bootstrap) ----------------------- -- Automatinc sharding ----------------------- -- N.B. you need install tarantool-shard package to use shadring -- Docs: https://github.com/tarantool/shard/blob/master/README.md -- Example: -- local shard = require('shard') -- local shards = { -- servers = { -- { uri = [[host1.com:4301]]; zone = [[0]]; }; -- { uri = [[host2.com:4302]]; zone = [[1]]; }; -- }; -- login = 'tester'; -- password = 'pass'; -- redundancy = 2; -- binary = '127.0.0.1:3301'; -- monitor = false; -- } -- shard.init(shards) ----------------- -- Message queue ----------------- -- N.B. you need to install tarantool-queue package to use queue -- Docs: https://github.com/tarantool/queue/blob/master/README.md -- Example: -- local queue = require('queue') -- queue.create_tube(tube_name, 'fifottl') ------------------- -- Data expiration ------------------- -- N.B. you need to install tarantool-expirationd package to use expirationd -- Docs: https://github.com/tarantool/expirationd/blob/master/README.md -- Example (deletion of all tuples): -- local expirationd = require('expirationd') -- local function is_expired(args, tuple) -- return true -- end -- expirationd.start("clean_all", space.id, is_expired { -- tuple_per_item = 50, -- full_scan_time = 3600 -- }) tarantool_1.9.1.26.g63eb81e3c/extra/dist/tarantoolctl.in0000775000000000000000000011227413306560010021336 0ustar rootroot#!/usr/bin/env tarantool local io = require('io') local os = require('os') local ffi = require('ffi') local fio = require('fio') local fun = require('fun') local log = require('log') local uri = require('uri') local json = require('json') local xlog = require('xlog') local yaml = require('yaml') local errno = require('errno') local fiber = require('fiber') local netbox = require('net.box') local socket = require('socket') local console = require('console') local argparse = require('internal.argparse').parse ffi.cdef[[ int kill(int pid, int sig); int isatty(int fd); int getppid(void); int chdir(const char *path); ]] local TIMEOUT_INFINITY = 100 * 365 * 86400 -- name of tarantoolctl binary local self_name = fio.basename(arg[0]) -- command that we're executing local command_name = arg[1] -- true if we're running in user's HOME directory local usermode = false -- true if tarantoolctl is a symlink and name != tarantoolctl local linkmode = false -- a file with system-wide settings local default_file -- current instance settings local instance_name local instance_path local console_sock local group_name -- overrides for defaults files local instance_dir local default_cfg local positional_arguments local keyword_arguments local lua_arguments = arg -- function for printing usage reference local usage -- shift argv to remove 'tarantoolctl' from arg[0] local function shift_argv(arg, argno, argcount) for i = argno, 128 do arg[i] = arg[i + argcount] if arg[i] == nil then break end end end local function check_user_level() local uid = os.getenv('UID') local udir = nil if uid == 0 or os.getenv("NOTIFY_SOCKET") then return nil end -- local dir configuration local pwd = os.getenv('PWD') udir = pwd and pwd .. '/.tarantoolctl' udir = udir and fio.stat(udir) and udir or nil -- or home dir configuration local homedir = os.getenv('HOME') udir = udir or homedir and homedir .. '/.config/tarantool/tarantool' udir = udir and fio.stat(udir) and udir or nil -- if one of previous is not nil if udir ~= nil then usermode = true return udir end return nil end -- -- Find if we're running under a user, and this user has a default file in their -- home directory. If present, use it. Otherwise assume a system-wide default. -- If it's missing, it's OK as well. -- local function find_default_file() -- try to find local dir or user config local user_level = check_user_level() if user_level ~= nil then return user_level end -- no user-level defaults, use system-wide ones local cfg = '@CMAKE_INSTALL_FULL_SYSCONFDIR@/@SYSCONFIG_DEFAULT@/tarantool' if fio.stat(cfg) then return cfg end -- It's OK if there is no default file. -- load_default_file() will assume some defaults return nil end local function check_file(path) local rv, err = loadfile(path) if rv == nil then log.error("%s", debug.traceback()) log.error("Failed to check instance file '%s'", err) return err end return nil end -- -- System-wide default file may be missing, this is OK, -- we'll assume built-in defaults. -- It uses sandboxing for isolation. -- It's not completely safe, but it won't -- allow pollution of global variables. -- local function load_default_file(default_file) if default_file then local env = setmetatable({}, { __index = _G }) local ufunc, msg = loadfile(default_file) -- if load fails - show the last 10 lines of the log file if not ufunc then log.error("Failed to load defaults file: %s", msg) end debug.setfenv(ufunc, env) local state, msg = pcall(ufunc) if not state then log.error('Failed to execute defaults file: %s', msg) end default_cfg = env.default_cfg instance_dir = env.instance_dir end local d = default_cfg or {} d.pid_file = d.pid_file or "/var/run/tarantool" d.wal_dir = d.wal_dir or "/var/lib/tarantool" d.memtx_dir = d.memtx_dir or d.snap_dir or "/var/lib/tarantool" d.snap_dir = nil d.log = d.log or d.logger or "/var/log/tarantool" d.logger = nil d.vinyl_dir = d.vinyl_dir or "/var/lib/tarantool" d.pid_file = fio.pathjoin(d.pid_file, instance_name .. '.pid') d.wal_dir = fio.pathjoin(d.wal_dir, instance_name) d.memtx_dir = fio.pathjoin(d.memtx_dir, instance_name) d.vinyl_dir = fio.pathjoin(d.vinyl_dir, instance_name) d.log = fio.pathjoin(d.log, instance_name .. '.log') default_cfg = d if not usermode then -- change user name only if not running locally d.username = d.username or "tarantool" -- instance_dir must be set in the defaults file, but don't try to set -- it to the global instance dir if the user-local defaults file is in -- use instance_dir = instance_dir or '/etc/tarantool/instances.enabled' -- get user data local user_data = ffi.C.getpwnam(ffi.cast('const char*', d.username)) if user_data == nil then log.error('Unknown user: %s', d.username) os.exit(1) end -- get group data local group = ffi.C.getgrgid(user_data.pw_gid) if group == nil then log.error('Group lookup by gid failed: %d', user_data.pw_gid) os.exit(1) end group_name = ffi.string(group.gr_name) end if instance_dir == nil then log.error('Instance directory (instance_dir) is not set in %s', default_file) os.exit(1) end if not fio.stat(instance_dir) then log.error('Instance directory %s does not exist', instance_dir) os.exit(1) end end -- -- In case there is no explicit instance name, check whether arg[0] is a -- symlink. In that case, the name of the symlink is the instance name. -- local function find_instance_name(arg0, arg2) if arg2 ~= nil then return fio.basename(arg2, '.lua') end local istat = fio.lstat(arg0) if istat == nil then log.error("Can't stat %s: %s", arg0, errno.strerror()) os.exit(1) end if not istat:is_link() then usage(command_name) end arg[2] = arg0 linkmode = true return fio.basename(arg0, '.lua') end local function mkdir(dirname) log.info("mkdir %s", dirname) if not fio.mkdir(dirname, tonumber('0750', 8)) then log.error("Can't mkdir %s: %s", dirname, errno.strerror()) os.exit(1) end if not usermode and not fio.chown(dirname, default_cfg.username, group_name) then log.error("Can't chown(%s, %s, %s): %s", default_cfg.username, group_name, dirname, errno.strerror()) end end local function read_file(filename) local file = fio.open(filename, {'O_RDONLY'}) if file == nil then return nil, errno.strerror() end local buf = {} local i = 1 while true do buf[i] = file:read(1024) if buf[i] == nil then return nil, errno.strerror() elseif buf[i] == '' then break end i = i + 1 end return table.concat(buf) end -- Removes leading and trailing whitespaces local function string_trim(str) return str:gsub("^%s*(.-)%s*$", "%1") end local function logger_parse(logger) -- syslog if logger:find("syslog:") then logger = string_trim(logger:sub(8)) local args = {} logger:gsub("([^,]+)", function(keyval) keyval:gsub("([^=]+)=([^=]+)", function(key, val) args[key] = val end) end) return 'syslog', args -- pipes elseif logger:find("pipe:") then logger = string_trim(logger:sub(6)) return 'pipe', logger elseif logger:find("|") then logger = string_trim(logger:sub(2)) return 'pipe', logger -- files elseif logger:find("file:") then logger = string_trim(logger:sub(6)) return 'file', logger else logger = string_trim(logger) return 'file', logger end end local function mk_default_dirs(cfg) local init_dirs = { fio.dirname(cfg.pid_file), cfg.wal_dir, cfg.snap_dir, cfg.vinyl_dir, } local log_type, log_args = logger_parse(cfg.log) if log_type == 'file' then table.insert(init_dirs, fio.dirname(log_args)) end for _, dir in ipairs(init_dirs) do if fio.stat(dir) == nil then mkdir(dir) end end end -- systemd detection based on http://unix.stackexchange.com/a/164092 local function under_systemd() if not usermode then local rv = os.execute("systemctl 2>/dev/null | grep '\\-\\.mount' " .. "1>/dev/null 2>/dev/null") if rv == 0 then return true end end return false end local function forward_to_systemd() return under_systemd() and ffi.C.getppid() >= 2 end -- -------------------------------------------------------------------------- -- -- CAT command helpers -- -- -------------------------------------------------------------------------- -- local function find_in_list(id, list) if type(list) == 'number' then return id == list end for _, v in ipairs(list) do if v == id then return true end end return false end local write_lua_table = nil -- escaped string will be written local function write_lua_string(string) io.stdout:write("'") local pos, byte = 1, string:byte(1) while byte ~= nil do io.stdout:write(("\\x%02x"):format(byte)) pos = pos + 1 byte = string:byte(pos) end io.stdout:write("'") end local function write_lua_value(value) if type(value) == 'string' then write_lua_string(value) elseif type(value) == 'table' then write_lua_table(value) else io.stdout:write(tostring(value)) end end local function write_lua_fieldpair(key, val) io.stdout:write("[") write_lua_value(key) io.stdout:write("] = ") write_lua_value(val) end write_lua_table = function(tuple) io.stdout:write('{') local is_begin = true for key, val in pairs(tuple) do if is_begin == false then io.stdout:write(', ') else is_begin = false end write_lua_fieldpair(key, val) end io.stdout:write('}') end local function cat_lua_cb(record) io.stdout:write(('box.space[%d]'):format(record.BODY.space_id)) local op = record.HEADER.type:lower() io.stdout:write((':%s('):format(op)) if op == 'insert' or op == 'replace' then write_lua_table(record.BODY.tuple) elseif op == 'delete' then write_lua_table(record.BODY.key) elseif op == 'update' then write_lua_table(record.BODY.key) io.stdout:write(', ') write_lua_table(record.BODY.tuple) elseif op == 'upsert' then write_lua_table(record.BODY.tuple) io.stdout:write(', ') write_lua_table(record.BODY.operations) end io.stdout:write(')\n') end local function cat_yaml_cb(record) print(yaml.encode(record):sub(1, -6)) end local function cat_json_cb(record) print(json.encode(record)) end local cat_formats = setmetatable({ yaml = cat_yaml_cb, json = cat_json_cb, lua = cat_lua_cb, }, { __index = function(self, cmd) error(("Unknown formatter '%s'"):format(cmd)) end }) -- -------------------------------------------------------------------------- -- -- Commands -- -- -------------------------------------------------------------------------- -- local orig_cfg = box.cfg local function wrapper_cfg(cfg) fiber.name(instance_name, {truncate=true}) log.info('Run console at %s', console_sock) console.listen(console_sock) if not usermode then -- gh-2782: socket can be owned by root local console_sock = uri.parse(console_sock).service if not fio.chown(console_sock, default_cfg.username, group_name) then log.error("Can't chown(%s, %s, %s) [%d]: %s", console_sock, default_cfg.username, group_name, errno(), errno.strerror()) end -- gh-1293: members of `tarantool` group should be able to do `enter` local mode = '0664' if not fio.chmod(console_sock, tonumber(mode, 8)) then log.error("Can't chmod(%s, %s) [%d]: %s", console_sock, mode, errno(), errno.strerror()) end end cfg = cfg or {} for i, v in pairs(default_cfg) do if cfg[i] == nil then cfg[i] = v end end -- force these startup options cfg.pid_file = default_cfg.pid_file if os.getenv('USER') ~= default_cfg.username then cfg.username = default_cfg.username else cfg.username = nil end if os.getenv("NOTIFY_SOCKET") then cfg.background = false elseif cfg.background == nil then cfg.background = true end mk_default_dirs(cfg) local success, data = pcall(orig_cfg, cfg) if not success then log.error("Configuration failed: %s", data) if type(cfg) ~= 'function' then local log_type, log_args = logger_parse(cfg.log) if log_type == 'file' and fio.stat(log_args) then os.execute('tail -n 10 ' .. log_args) end end os.exit(1) end return data end -- It's not 100% result guaranteed function, but it's ok for most cases -- Won't help in multiple race-conditions -- Returns nil if Tarantool already started, PID otherwise local function start_check() local pid_file = default_cfg.pid_file local fh = fio.open(pid_file, 'O_RDONLY') if fh == nil then return nil end local pid = tonumber(fh:read(64)) fh:close() if pid == nil or (ffi.C.kill(pid, 0) < 0 and errno() == errno.ESRCH) then return nil end return pid end local function start() log.info("Starting instance %s...", instance_name) if forward_to_systemd() then local cmd = "systemctl start tarantool@" .. instance_name log.info("Forwarding to '" .. cmd .. "'") os.execute(cmd) return end local stat = check_file(instance_path) if stat ~= nil then log.error("Error while checking syntax: halting") os.exit(1) end local pid = start_check() if pid then log.error("The daemon is already running: PID %s", pid) os.exit(1) end box.cfg = wrapper_cfg require('title').update{ script_name = instance_path, __defer_update = true } shift_argv(arg, 0, 2) local success, data = pcall(dofile, instance_path) -- if load fails - show last 10 lines of the log file and exit if not success then log.error("Start failed: %s", data) if type(box.cfg) ~= 'function' then local log_type, log_args = logger_parse(box.cfg.log) if log_type == 'file' and fio.stat(log_args) then os.execute('tail -n 10 ' .. log_args) end end os.exit(1) end return 0 end local function stop() log.info("Stopping instance %s...", instance_name) if forward_to_systemd() then local cmd = "systemctl stop tarantool@" .. instance_name log.info("Forwarding to '" .. cmd .. "'") os.execute(cmd) return end -- remove console socket local console_sock = uri.parse(console_sock).service if fio.stat(console_sock) then fio.unlink(console_sock) end -- kill process and remove pid file local pid_file = default_cfg.pid_file if fio.stat(pid_file) == nil then log.error("Process is not running (pid: %s)", pid_file) return 0 end local f = fio.open(pid_file, 'O_RDONLY') if f == nil then log.error("Can't read pid file %s: %s", pid_file, errno.strerror()) return 1 end local pid = tonumber(f:read(64)) f:close() if pid == nil or pid <= 0 then log.error("Broken pid file %s", pid_file) fio.unlink(pid_file) return 1 end if ffi.C.kill(pid, 15) < 0 then log.error("Can't kill process %d: %s", pid, errno.strerror()) fio.unlink(pid_file) return 1 end return 0 end local function check() local rv = check_file(instance_path) if rv ~= nil then return 1 end log.info("File '%s' is OK", instance_path) return 0 end local function restart() local stat = check_file(instance_path) if stat ~= nil then log.error("Error while checking syntax: halting") return 1 end stop() fiber.sleep(1) start() return 0 end local function logrotate() local console_sock = uri.parse(console_sock).service if fio.stat(console_sock) == nil then -- process is not running, do nothing return 0 end local s = socket.tcp_connect('unix/', console_sock) if s == nil then -- socket is not opened, do nothing return 0 end s:write[[ require('log'):rotate() require('log').info("Rotate log file") ]] s:read({ '[.][.][.]' }, 2) return 0 end local function enter() local console_sock_path = uri.parse(console_sock).service if fio.stat(console_sock_path) == nil then log.error("Can't connect to %s (%s)", console_sock_path, errno.strerror()) if not usermode and errno() == errno.EACCES then log.error("Please add $USER to group '%s': usermod -a -G %s $USER", group_name, group_name) end return 1 end local cmd = string.format( "require('console').connect('%s', { connect_timeout = %s })", console_sock, TIMEOUT_INFINITY ) console.on_start(function(self) self:eval(cmd) end) console.on_client_disconnect(function(self) self.running = false end) console.start() return 0 end local function stdin_isatty() return ffi.C.isatty(0) == 1 end local function execute_remote(uri, code) local remote = netbox.connect(uri, { console = true, connect_timeout = TIMEOUT_INFINITY }) if remote == nil then return nil end return true, remote:eval(code) end local function connect() if not stdin_isatty() then local code = io.stdin:read("*a") if code == nil then usage(command_name) return 1 end local status, full_response = execute_remote(arg[2], code) if not status then log.error("Failed to connect to Tarantool") return 2 end local error_response = yaml.decode(full_response)[1] if type(error_response) == 'table' and error_response.error then log.error("Error while executing remote command:") log.error(error_response.error) return 3 end print(full_response) return 0 end -- Otherwise we're starting console console.on_start(function(self) local status, reason status, reason = pcall(function() require('console').connect(arg[2], { connect_timeout = TIMEOUT_INFINITY }) end) if not status then self:print(reason) self.running = false end end) console.on_client_disconnect(function(self) self.running = false end) console.start() return 0 end local function status() if forward_to_systemd() then local cmd = "systemctl status tarantool@" .. instance_name log.info("Forwarding to '" .. cmd .. "'") os.execute(cmd) return end local pid_file = default_cfg.pid_file local console_sock = uri.parse(console_sock).service if fio.stat(pid_file) == nil then if errno() == errno.ENOENT then log.info('%s is stopped (pid file does not exist)', instance_name) return 1 end log.error("Can't access pidfile %s: %s", pid_file, errno.strerror()) end if fio.stat(console_sock) == nil and errno() == errno.ENOENT then log.error("Pid file exists, but the control socket (%s) doesn't", console_sock) return 2 end local s = socket.tcp_connect('unix/', console_sock) if s == nil then if errno() ~= errno.EACCES then log.warn("Can't access control socket '%s' [%d]: %s", console_sock, errno(), errno.strerror()) return 2 end return 0 end s:close() log.info('%s is running (pid: %s)', instance_name, default_cfg.pid_file) return 0 end local function eval() local console_sock_path = uri.parse(console_sock).service local filename = arg[3] local code = nil if filename == nil then if stdin_isatty() then log.error("Usage:") log.error(" - tarantoolctl eval instance_name file.lua") log.error(" - | tarantoolctl eval instance_name") return 1 end code = io.stdin:read("*a") else local err code, err = read_file(filename) if code == nil then log.error("%s: %s", filename, err) return 2 end end assert(code ~= nil, "Check that we've successfully loaded file") if fio.stat(console_sock_path) == nil then log.warn("Pid file exists, but the control socket (%s) doesn't", console_sock_path) return 2 end local status, full_response = execute_remote(console_sock, code) if status == false then log.error("Control socket exists, but Tarantool doesn't listen on it") return 2 end local error_response = yaml.decode(full_response)[1] if type(error_response) == 'table' and error_response.error then log.error("Error while reloading config:") log.error(error_response.error) return 3 end print(full_response) return 0 end local function cat() local options = keyword_arguments local from, to, spaces = options.from, options.to, options.space local show_system, cat_format = options['show-system'], options.format local replicas = options.replica local format_cb = cat_formats[cat_format] local is_printed = false for id, file in ipairs(positional_arguments) do log.error("Processing file '%s'", file) for lsn, record in xlog.pairs(file) do local sid = record.BODY.space_id local rid = record.HEADER.replica_id if (lsn < from) or (not spaces and sid and sid < 512 and not show_system) or (spaces and (sid == nil or not find_in_list(sid, spaces))) or (replicas and not find_in_list(rid, replicas)) then -- pass this tuple elseif lsn >= to then -- stop, as we've finished reading tuple with lsn == to -- and the next lsn's will be bigger break else is_printed = true format_cb(record) io.stdout:flush() end end if options.format == 'yaml' and is_printed then is_printed = false print('...\n') end end end local function play() local options = keyword_arguments local from, to, spaces = options.from, options.to, options.space local show_system = options['show-system'] local uri = table.remove(positional_arguments, 1) local replicas = options.replica if uri == nil then error("Empty URI is provided") end local remote = netbox.new(uri) if not remote:wait_connected() then error(("Error while connecting to host '%s'"):format(uri)) end for id, file in ipairs(positional_arguments) do log.info(("Processing file '%s'"):format(file)) for lsn, record in xlog.pairs(file) do local sid = record.BODY.space_id local rid = record.HEADER.replica_id if (lsn < from) or (not spaces and sid and sid < 512 and not show_system) or (spaces and (sid == nil or not find_in_list(sid, spaces))) or (replicas and not find_in_list(rid, replicas)) then -- pass this tuple elseif lsn >= to then -- stop, as we've finished reading tuple with lsn == to -- and the next lsn's will be bigger break else local args, so = {}, remote.space[sid] if so == nil then error(("No space #%s, stopping"):format(sid)) end table.insert(args, so) table.insert(args, record.BODY.key) table.insert(args, record.BODY.tuple) table.insert(args, record.BODY.operations) so[record.HEADER.type:lower()](unpack(args)) end end end remote:close() end local function rocks() local cfg = require("luarocks.cfg") local util = require("luarocks.util") local loader = require("luarocks.loader") local command_line = require("luarocks.command_line") -- Tweak help messages util.see_help = function(command, program) -- TODO: print extended help message here return "See Tarantool documentation for help." end -- Enable only useful commands local commands = { install = "luarocks.install", search = "luarocks.search", list = "luarocks.list", remove = "luarocks.remove", show = "luarocks.show", make = "luarocks.make", } rawset(_G, 'commands', commands) if keyword_arguments.chdir then ffi.C.chdir(keyword_arguments.chdir) end -- Call LuaRocks command_line.run_command(unpack(positional_arguments)) end local function exit_wrapper(func) return function() os.exit(func()) end end local function process_remote(cmd_function) cmd_function() end local function process_local(cmd_function) instance_name = find_instance_name(arg[0], arg[2]) default_file = find_default_file() load_default_file(default_file) if #arg < 2 then log.error("Not enough arguments for '%s' command", command_name) usage(command_name) end instance_path = fio.pathjoin(instance_dir, instance_name .. '.lua') if not fio.stat(instance_path) then log.error('Instance %s is not found in %s', instance_name, instance_dir) os.exit(1) end -- create a path to the control socket (admin console) console_sock = instance_name .. '.control' console_sock = fio.pathjoin(fio.dirname(default_cfg.pid_file), console_sock) console_sock = 'unix/:' .. console_sock cmd_function() end local commands = setmetatable({ start = { func = start, process = process_local, help = { header = "%s start INSTANCE", linkmode = "%s start", description = [=[ Start a Tarantool instance (if not started; fail otherwise). ]=], weight = 10, deprecated = false, } }, stop = { func = exit_wrapper(stop), process = process_local, help = { header = "%s stop INSTANCE", linkmode = "%s stop", description = [=[ Stop a Tarantool instance (if not stopped; fail otherwise). ]=], weight = 20, deprecated = false, } }, logrotate = { func = exit_wrapper(logrotate), process = process_local, help = { header = "%s logrotate INSTANCE", linkmode = "%s logrotate", description = [=[ Rotate logs of a started Tarantool instance. Works only if logging-into-file is enabled in the instance file. Pipe/syslog make no effect. ]=], weight = 50, deprecated = false, } }, status = { func = exit_wrapper(status), process = process_local, help = { header = "%s status INSTANCE", linkmode = "%s status", description = [=[ Show an instance's status (started/stopped). If pid file exists and an alive control socket exists, the return code is C<0>. Otherwise, the return code is not C<0>. Reports typical problems to stderr (e.g. pid file exists and control socket doesn't). ]=], weight = 30, deprecated = false, } }, enter = { func = exit_wrapper(enter), process = process_local, help = { header = "%s enter INSTANCE", linkmode = "%s enter", description = [=[ Enter an instance's interactive Lua console. ]=], weight = 65, deprecated = false, } }, restart = { func = restart, process = process_local, help = { header = "%s restart INSTANCE", linkmode = "%s restart", description = [=[ Stop and start a Tarantool instance (if started; fail otherwise). ]=], weight = 40, deprecated = false, } }, reload = { func = exit_wrapper(eval), process = process_local, help = { header = "%s reload INSTANCE FILE", linkmode = "%s reload FILE", description = [=[ DEPRECATED in favor of "eval" ]=], weight = 0, deprecated = true, } }, eval = { func = exit_wrapper(eval), process = process_local, help = { header = { "%s eval INSTANCE FILE", "COMMAND | %s eval INSTANCE" }, linkmode = { "%s eval FILE", "COMMAND | %s eval" }, description = [=[ Evaluate a local Lua file on a Tarantool instance (if started; fail otherwise). ]=], weight = 70, deprecated = false, } }, check = { func = exit_wrapper(check), process = process_local, help = { header = "%s check INSTANCE", linkmode = "%s check", description = [=[ Check an instance file for syntax errors. ]=], weight = 60, deprecated = false, } }, connect = { func = exit_wrapper(connect), process = process_remote, help = { header = { "%s connect URI", "COMMAND | %s connect URI" }, description = [=[ Connect to a Tarantool instance on an admin-console port. Supports both TCP/Unix sockets. ]=], weight = 80, deprecated = false, } }, cat = { func = exit_wrapper(cat), process = process_remote, help = { header = "%s cat FILE.. [--space=space_no ..] [--show-system]" .. " [--from=from_lsn] [--to=to_lsn] [--replica=replica_id ..]", description = [=[ Print into stdout the contents of .snap/.xlog files. Supported options: * --space=space_no to filter the output by space number. May be passed more than once. * --show-system to show the contents of system spaces. * --from=from_lsn to show operations starting from the given lsn. * --to=to_lsn to show operations ending with the given lsn. * --replica=replica_id to filter the output by replica id. May be passed more than once. ]=], weight = 90, deprecated = false, } }, play = { func = exit_wrapper(play), process = process_remote, help = { header = "%s play URI FILE.. [--space=space_no ..]" .. " [--show-system] [--from=from_lsn] [--to=to_lsn]" .. " [--replica=replica_id ..]", description = [=[ Play the contents of .snap/.xlog files to another Tarantool instance. Supported options: * --space=space_no to filter the output by space number. May be passed more than once. * --show-system to show the contents of system spaces. * --from=from_lsn to show operations starting from the given lsn. * --to=to_lsn to show operations ending with the given lsn. * --replica=replica_id to filter the output by replica id. May be passed more than once. ]=], weight = 100, deprecated = false, } }, rocks = { func = exit_wrapper(rocks), process = process_remote, help = { header = "%s rocks [install|remove|show|search|list]", description = [=[ Package management. ]=], weight = 100, deprecated = false, }, subcommands = { install = { weight = 100, help = { header = "%s rocks install ROCKNAME", description = [=[ Install a rock. ]=], } }, remove = { weight = 101, help = { header = "%s rocks remove ROCKNAME", description = [=[ Uninstall a rock. ]=], } }, show = { weight = 110, help = { header = "%s rocks list ", description = [=[ Show information about an installed rock ]=], } }, search = { weight = 120, help = { header = "%s rocks search ", description = [=[ Search the rocks list for a pattern. ]=], } }, list = { weight = 121, help = { header = "%s rocks list ", description = [=[ List all installed rocks. ]=], } }, } } }, { __index = function() log.error("Unknown command '%s'", command_name) usage() end }) local function usage_command(name, cmd) local header = cmd.help.header if linkmode then header = cmd.help.linkmode end if type(header) == 'string' then header = { header } end for no, line in ipairs(header) do log.error(" " .. line, name) end end local function usage_header() log.error("Tarantool client utility (%s)", _TARANTOOL) end local function usage_commands(commands, verbose) local names = fun.iter(commands):map( function(self_name, cmd) return {self_name, cmd.help.weight or 0} end ):totable() table.sort(names, function(left, right) return left[2] < right[2] end) for _, cmd_name in ipairs(names) do local cmd = commands[cmd_name[1]] if cmd.help.deprecated ~= true then usage_command(self_name, cmd, false) if verbose then log.error("") log.error(cmd.help.description) end if cmd.subcommands then usage_commands(cmd.subcommands, verbose) end end end end usage = function(command, verbose) do -- in case a command is passed and is a valid command local command_struct = rawget(commands, command) if command ~= nil and command_struct then log.error("Usage:\n") usage_command(self_name, command_struct, true) log.error("") log.error(command_struct.help.description) os.exit(1) end end -- do this otherwise usage_header() if default_file ~= nil then log.error("Config file: %s", default_file) end log.error("") log.error("Usage:") usage_commands(commands, verbose) os.exit(1) end -- parse parameters and put the result into positional/keyword_arguments do local function keyword_arguments_populate(ka) ka = ka or {} ka.from = ka.from or 0 ka.to = ka.to or -1ULL ka['show-system'] = ka['show-system'] or false ka.format = ka.format or 'yaml' return ka end -- returns the command name, file list and named parameters local function parameters_parse(parameters) local command_name = table.remove(parameters, 1) local positional_arguments, keyword_arguments = {}, {} for k, v in pairs(parameters) do if type(k) == 'number' then positional_arguments[k] = v else keyword_arguments[k] = v end end return command_name, positional_arguments, keyword_arguments end local parameters = argparse(arg, { { 'space', 'number+' }, { 'show-system', 'boolean' }, { 'from', 'number' }, { 'to', 'number' }, { 'help', 'boolean' }, { 'format', 'string' }, { 'replica', 'number+' }, { 'chdir', 'string' }, }) local cmd_name cmd_name, positional_arguments, keyword_arguments = parameters_parse(parameters) if cmd_name == 'help' or parameters.help == true or #arg < 1 then usage(cmd_name, true) end keyword_arguments = keyword_arguments_populate(parameters) end local cmd_pair = commands[command_name] if #arg < 2 then log.error("Not enough arguments for '%s' command\n", command_name) usage(command_name) end cmd_pair.process(cmd_pair.func) -- vim: syntax=lua tarantool_1.9.1.26.g63eb81e3c/extra/dist/tarantool.service0000664000000000000000000000053213306560010021653 0ustar rootroot# systemd service for managing all Tarantool instances on the system. This # service is actually a systemd target, but we are using a service since # targets cannot be reloaded. [Unit] Description=Tarantool Database Server [Service] Type=oneshot ExecStart=/bin/true ExecReload=/bin/true RemainAfterExit=on [Install] WantedBy=multi-user.target tarantool_1.9.1.26.g63eb81e3c/extra/dist/CMakeLists.txt0000664000000000000000000001320313306560010021025 0ustar rootroot# config file for tarantoolctl if (TARGET_OS_FREEBSD) set(SYSCONFIG_DEFAULT "tarantool/default") elseif (NOT IS_DIRECTORY "${CMAKE_INSTALL_SYSCONFDIR}/sysconfig") # Debian/Ubuntu/etc. set(SYSCONFIG_DEFAULT "default") else() # RedHat/Fedora/etc. set(SYSCONFIG_DEFAULT "sysconfig") endif() configure_file(tarantoolctl.in tarantoolctl @ONLY) option(ENABLE_DIST "Enable install of init scripts" OFF) if (NOT ENABLE_DIST) return () endif() include(systemd) # # tarantoolctl # # Default path to data in default/tarantool if (TARGET_OS_FREEBSD) set(TARANTOOL_DATADIR "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/db/tarantool") else() set(TARANTOOL_DATADIR "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/tarantool") endif() message (STATUS "tarantoolctl datadir: ${TARANTOOL_DATADIR}") set(TARANTOOL_LOGDIR "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/tarantool") message (STATUS "tarantoolctl logdir: ${TARANTOOL_LOGDIR}") set(TARANTOOL_RUNDIR "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/run/tarantool") message (STATUS "tarantoolctl rundir: ${TARANTOOL_RUNDIR}") set(TARANTOOL_USER "tarantool") set(SYSCONFIG_AVAILABLEDIR "tarantool/instances.available") set(SYSCONFIG_ENABLEDDIR "tarantool/instances.enabled") set(TARANTOOL_AVAILABLEDIR "${CMAKE_INSTALL_FULL_SYSCONFDIR}/${SYSCONFIG_AVAILABLEDIR}") set(TARANTOOL_ENABLEDDIR "${CMAKE_INSTALL_FULL_SYSCONFDIR}/${SYSCONFIG_ENABLEDDIR}") if(NOT WITH_SYSVINIT) # Don't use instances.enabled on systemd distros set(TARANTOOL_INSTANCEDIR "${TARANTOOL_AVAILABLEDIR}") else() set(TARANTOOL_INSTANCEDIR "${TARANTOOL_ENABLEDDIR}") endif() configure_file(default/tarantool.in default/tarantool @ONLY) install (FILES ${CMAKE_CURRENT_BINARY_DIR}/default/tarantool DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/${SYSCONFIG_DEFAULT}/ PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ) # tarantoolctl itself install (FILES ${CMAKE_CURRENT_BINARY_DIR}/tarantoolctl DESTINATION ${CMAKE_INSTALL_BINDIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE ) # directories in /etc/ for tarantoolctl install(DIRECTORY DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/${SYSCONFIG_AVAILABLEDIR} ) if (WITH_SYSVINIT) install(DIRECTORY DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/${SYSCONFIG_ENABLEDDIR} ) endif() # an example instance script for tarantoolctl install (FILES example.lua DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/${SYSCONFIG_AVAILABLEDIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_READ GROUP_READ WORLD_READ ) # directories for data, logs and pid files # Sic: chmod and chown are performed by rpm/deb install(DIRECTORY DESTINATION ${TARANTOOL_DATADIR}) install(DIRECTORY DESTINATION ${TARANTOOL_LOGDIR}) # /var/run/tarantool is only needed with sysvinit # systemd creates this directory automatically using systemd-tmpfiles if (WITH_SYSVINIT) install(DIRECTORY DESTINATION ${TARANTOOL_RUNDIR}) endif() if (NOT TARGET_OS_FREEBSD) set (WITH_LOGROTATE_DEFAULT ON) endif() option(WITH_LOGROTATE "Enable logrotate configuration" ${WITH_LOGROTATE_DEFAULT}) file(APPEND "${_OptionalPackagesFile}" "-- WITH_LOGROTATE=${WITH_LOGROTATE}\n") if (WITH_LOGROTATE) # logrotate files configure_file(tarantool.logrotate.in tarantool.logrotate @ONLY) install (FILES ${CMAKE_CURRENT_BINARY_DIR}/tarantool.logrotate DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/logrotate.d/ RENAME "tarantool" PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ) endif() # man page for tarantoolctl pod2man ( tarantoolctl.pod "tarantoolctl" 1 "Tarantool instances control" ) # # Init scripts # if (WITH_SYSTEMD) message (STATUS "Using scripts for systemd") # NOTE: always install tarantool.init to ${CMAKE_INSTALL_PREFIX}/lib # instead of ${CMAKE_INSTALL_LIBDIR} because LIBDIR depends on the target # architecture, but tarantool-common is noarch package. set(SYSV_INITD_DIR ${CMAKE_INSTALL_PREFIX}/lib/tarantool) configure_file("tarantool@.service.in" "tarantool@.service" @ONLY) install (FILES ${CMAKE_CURRENT_BINARY_DIR}/tarantool@.service DESTINATION ${SYSTEMD_UNIT_DIR} PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ GROUP_READ WORLD_READ WORLD_READ) if(WITH_SYSVINIT) # Install generator and tarantool.service file to allow # sysvinit alongside systemd (Debian/Ubuntu) # configure_file("tarantool-generator.in" "tarantool-generator" @ONLY) install (FILES ${CMAKE_CURRENT_BINARY_DIR}/tarantool-generator DESTINATION ${SYSTEMD_GENERATOR_DIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_READ WORLD_EXECUTE) install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/tarantool.service DESTINATION ${SYSTEMD_UNIT_DIR} PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ GROUP_READ WORLD_READ WORLD_READ) endif() configure_file(tarantool.tmpfiles.conf.in tarantool.tmpfiles.conf @ONLY) install (FILES "${CMAKE_CURRENT_BINARY_DIR}/tarantool.tmpfiles.conf" DESTINATION "${SYSTEMD_TMPFILES_DIR}" RENAME "tarantool.conf" PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ GROUP_READ WORLD_READ WORLD_READ) endif() if (WITH_SYSVINIT) message (STATUS "Using scripts for sysvinit") install (FILES tarantool.init DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/init.d/ RENAME tarantool PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) endif() tarantool_1.9.1.26.g63eb81e3c/extra/dist/tarantoolctl.pod0000664000000000000000000001205013306560010021476 0ustar rootroot=head1 NAME tarantoolctl - a utility to control Tarantool instances =head1 SYNOPSIS tarantoolctl COMMAND [INSTANCE] [FILE] [URI] [OPTIONS...] =head1 DESCRIPTION tarantoolctl may be used to introspect and control the state of Tarantool instances. The C represents the name of an instance file. =head1 COMMANDS The following commands are understood: =over =item start INSTANCE Start the Tarantool instance specified on the command line if the instance is not running. This does nothing if an instance is running. =item stop INSTANCE Stop the Tarantool instance specified on the command line if the instance is running. This does nothing if an instance is not running. =item status INSTANCE Show status of the Tarantool instance specified on the command line (started/stopped). If pid file exists and an alive control socket exists, the return code is C<0>. Otherwise, the return code is not C<0>. Reports typical problems to stderr (e.g. pid file exists and control socket does not). =item restart INSTANCE Stop and start the Tarantool instance specified on the command line if the instance is running. This does nothing if an instance is not running. =item logrotate INSTANCE Rotate logs of the Tarantool instance specified on the command line if the instance is running. This works only if logging-into-file is enabled in the instance file (C parameter). Pipe/syslog make no effect. =item check INSTANCE Check if there are syntax errors in the instance script of the Tarantool instance specified on the command line. =item enter INSTANCE Enter the interactive console of the Tarantool instance specified on the command line. =item eval INSTANCE FILE =item COMMAND | tarantoolctl eval INSTANCE Evaluate a local file on the Tarantool instance specified on the command line if the instance is running. This does nothing if an instance is not running. =item connect URI =item COMMAND | tarantoolctl connect URI Connect on an admin-console port to the Tarantool instance with the URI specified on the command line. This supports both TCP/Unix sockets. =item cat FILE... [--space=space_no...] [--show-system] [--from=from_lsn] [--to=to_lsn] [--replica=replica_id] Print into stdout the contents of .snap/.xlog files specified on the command line. =item play URI FILE... [--space=space_no...] [--show-system] [--from=from_lsn] [--to=to_lsn] [--replica=replica_id] Play the contents of .snap/.xlog files to another Tarantool instance with URI specified on the command line. =back =head1 OPTIONS The following options are understood: =over =item --space=space_no Filter the output by space number. May be passed more than once. =item --show-system Show/play the contents of system spaces. =item --from=from_lsn Show/play operations starting from the given lsn. =item --to=to_lsn Show/play operations ending with the given lsn. =item --replica=replica_id Filter the output by replica ID. May be passed more than once. =back =head1 CONFIGURATION The file with system-wide defaults for tarantoolctl is installed in C. This file is used when tarantoolctl is invoked by root. When invoked by a local user, tarantoolctl first looks for its defaults file in the current directory (C<$PWD/.tarantoolctl>), and then in the current user's home directory (C<$HOME/.config/tarantool/tarantool>). If not found, tarantoolctl falls back to built-in defaults: default_cfg = { pid_file = "/var/run/tarantool", wal_dir = "/var/lib/tarantool", memtx_dir = "/var/lib/tarantool", vinyl_dir = "/var/lib/tarantool", log = "/var/log/tarantool", username = "tarantool", } instance_dir = "/etc/tarantool/instances.enabled" Most of these parameters are similar to those in C: =over =item pid_file Directory for the pid file and control-socket file; tarantoolctl will add "/instance_name" to the directory name. =item wal_dir Directory for write-ahead *.xlog files; tarantoolctl will add "/instance_name" to the directory name. =item memtx_dir Directory for snapshot *.snap files; tarantoolctl will add "/instance_name" to the directory name. =item vinyl_dir Directory for vinyl files; tarantoolctl will add "/instance_name" to the directory name. =item log The place where the application log will go; tarantoolctl will add "/instance_name.log" to the name. =item username The user that runs the Tarantool instance. This is the operating-system user name rather than the Tarantool-client user name. Tarantool will change its effective user to this user after becoming a daemon. =item instance_dir The directory where all instance files for this host are stored. Put instance files in this directory, or create symbolic links. As a full-featured example, you can take C script that ships with Tarantool and defines all configuration options. =back =head1 EXIT STATUS On success, C<0> is returned, a non-zero failure code otherwise. =head1 SEE ALSO tarantool(1), Tarantool manual at http://tarantool.org/doc/ =head1 COPYRIGHT Copyright (C) 2010-2017 Tarantool AUTHORS: please see AUTHORS file. =cut tarantool_1.9.1.26.g63eb81e3c/extra/dist/tarantool.tmpfiles.conf.in0000664000000000000000000000007613306560010023372 0ustar rootrootd @TARANTOOL_RUNDIR@ 0750 @TARANTOOL_USER@ @TARANTOOL_USER@ - tarantool_1.9.1.26.g63eb81e3c/extra/dist/tarantool-generator.in0000775000000000000000000000073013306560010022610 0ustar rootroot#!/bin/sh # This systemd generator creates dependency symlinks that make all Tarantool # instances in @TARANTOOL_ENABLEDDIR@ be started/stopped/reloaded # when tarantool.service is started/stopped/reloaded. set -eu wantdir="$1/tarantool.service.wants" service="@SYSTEMD_UNIT_DIR@/tarantool@.service" mkdir -p "$wantdir" for file in @TARANTOOL_ENABLEDDIR@/*.lua; do instance=`basename $file .lua` ln -s "$service" "$wantdir/tarantool@$instance.service" done exit 0 tarantool_1.9.1.26.g63eb81e3c/extra/dist/tarantool.logrotate.in0000664000000000000000000000040113306560010022613 0ustar rootroot@TARANTOOL_LOGDIR@/*.log { daily size 512k missingok rotate 10 compress delaycompress create 0640 @TARANTOOL_USER@ adm postrotate @CMAKE_INSTALL_FULL_BINDIR@/tarantoolctl logrotate `basename ${1%%.*}` endscript } tarantool_1.9.1.26.g63eb81e3c/extra/dist/default/0000775000000000000000000000000013306560010017712 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/extra/dist/default/tarantool.in0000664000000000000000000000207213306560010022246 0ustar rootroot-- -- System-wide settings for tarantoolctl and init scripts -- -- This file was configured by the package maintainers and you probably -- don't want to change it. Please complain about your custom configuration -- directly to upstream's bug tracker rather than to your distro. -- -- Settings below should be kept in sync with: -- -- * logrotate configuration -- * tarantool.service unit -- * systemd-tmpfiles configuration -- * directory structure and permissions -- default_cfg = { pid_file = "@TARANTOOL_RUNDIR@", -- @TARANTOOL_RUNDIR@/${INSTANCE}.pid wal_dir = "@TARANTOOL_DATADIR@", -- @TARANTOOL_DATADIR@/${INSTANCE}/ memtx_dir = "@TARANTOOL_DATADIR@", -- @TARANTOOL_DATADIR@/${INSTANCE} vinyl_dir = "@TARANTOOL_DATADIR@", -- @TARANTOOL_DATADIR@/${INSTANCE} log = "@TARANTOOL_LOGDIR@", -- @TARANTOOL_LOGDIR@/${INSTANCE}.log username = "@TARANTOOL_USER@", } -- instances.available - all available instances -- instances.enabled - instances to autostart by sysvinit instance_dir = "@TARANTOOL_INSTANCEDIR@" -- vim: set ft=lua : tarantool_1.9.1.26.g63eb81e3c/extra/luarocks/0000775000000000000000000000000013306560010017146 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/extra/luarocks/site_config.lua.cmake0000664000000000000000000000235313306560010023224 0ustar rootrootlocal site_config = {} site_config.LUAROCKS_PREFIX=[[@CMAKE_INSTALL_PREFIX@]] site_config.LUA_INCDIR=[[@MODULE_FULL_INCLUDEDIR@]] site_config.LUA_BINDIR=[[@CMAKE_INSTALL_FULL_BINDIR@]] site_config.LUA_INTERPRETER=[[tarantool]] site_config.LUA_MODULES_LIB_SUBDIR=[[/lib/tarantool]] site_config.LUA_MODULES_LUA_SUBDIR=[[/share/tarantool]] site_config.LUAROCKS_SYSCONFDIR=[[@CMAKE_INSTALL_FULL_SYSCONFDIR@/tarantool/rocks]] site_config.LUAROCKS_FORCE_CONFIG=true site_config.LUAROCKS_ROCKS_TREE=[[/usr/local/]] site_config.LUAROCKS_ROCKS_SUBDIR=[[/share/tarantool/rocks]] site_config.LUAROCKS_ROCKS_SERVERS={ [[http://rocks.tarantool.org/]] }; site_config.LUAROCKS_LOCALDIR = require('fio').cwd() site_config.LUAROCKS_HOME_TREE_SUBDIR=[[/.rocks]] site_config.LUA_DIR_SET=true site_config.LUAROCKS_UNAME_S=[[@CMAKE_SYSTEM_NAME@]] site_config.LUAROCKS_UNAME_M=[[@CMAKE_SYSTEM_PROCESSOR@]] site_config.LUAROCKS_DOWNLOADER=[[curl]] site_config.LUAROCKS_MD5CHECKER=[[openssl]] site_config.LUAROCKS_EXTERNAL_DEPS_SUBDIRS={ bin="bin", lib={ "lib", [[@MULTILIB@]] }, include="include" } site_config.LUAROCKS_RUNTIME_EXTERNAL_DEPS_SUBDIRS={ bin="bin", lib={ "lib", [[@MULTILIB@]] }, include="include" } site_config.LUAROCKS_LOCAL_BY_DEFAULT = true return site_config tarantool_1.9.1.26.g63eb81e3c/extra/luarocks/CMakeLists.txt0000664000000000000000000000053613306560010021712 0ustar rootrootconfigure_file(site_config.lua.cmake site_config.lua @ONLY) install(DIRECTORY ${PROJECT_SOURCE_DIR}/third_party/luarocks/src/luarocks DESTINATION ${MODULE_LUADIR}) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/site_config.lua DESTINATION ${MODULE_LUADIR}/luarocks/ PERMISSIONS OWNER_READ OWNER_WRITE OWNER_READ GROUP_READ WORLD_READ) tarantool_1.9.1.26.g63eb81e3c/doc/0000775000000000000000000000000013306565107014761 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/doc/man/0000775000000000000000000000000013306560010015520 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/doc/man/tarantool.pod0000664000000000000000000000300213306560010020222 0ustar rootroot=head1 NAME tarantool - a Lua application server and a database management system =head1 SYNOPSIS tarantool [OPTIONS] [SCRIPT [ARGS]] =head1 DESCRIPTION Tarantool is a Lua application server fully compatible with Lua 5.1. It includes a mature database to persist, replicate and recover application state and a curated set of Lua modules for networking, I/O, messaging, data formats and more. It is designed to let developers quickly create efficient, reliable and powerful micro-services and backend applications for the Internet. Key features: =over =item * Fully compatible with Lua 5.1. =item * Coroutines and asynchronous I/O to implement high-performance lock-free access to data. =item * ACID transactions. =item * ANSI SQL, Lua stored procedures and triggers. =item * Authentication and access control. =item * Master-master replication and automatic sharding. =back =head1 OPTIONS You can use these options before (or instead of) the interpreted 'SCRIPT' name: =over =item -h, --help Display the help screen and exit. =item -V, -v, --version Print the program version and exit. =item -e EXRP Execute the string 'EXPR'. =item -l NAME Require the library 'NAME'. =item -i Enter the interactive mode after executing 'SCRIPT'. =item -- Stop handling options. =item - Execute stdin and stop handling options. =back =head1 SEE ALSO tarantoolctl(1), Tarantool documentation at http://tarantool.org =head1 COPYRIGHT Copyright (C) 2010-2017 Tarantool AUTHORS: please see AUTHORS file. =cut tarantool_1.9.1.26.g63eb81e3c/doc/man/CMakeLists.txt0000664000000000000000000000015013306560010020254 0ustar rootrootpod2man ( tarantool.pod "tarantool" 1 "Lua application server and database management system" ) tarantool_1.9.1.26.g63eb81e3c/doc/README.md0000664000000000000000000000024113306560010016221 0ustar rootrootPlease check out http://github.com/tarantool/doc for the main repository with Tarantool documentation, or read documentation online at http://tarantool.org/doc/ tarantool_1.9.1.26.g63eb81e3c/doc/CMakeLists.txt0000664000000000000000000000002613306560010017503 0ustar rootrootadd_subdirectory(man) tarantool_1.9.1.26.g63eb81e3c/AUTHORS0000664000000000000000000000144513306560010015254 0ustar rootrootTarantool is a collective effort, and incorporates many contributions from the community. Below follows a list of people, who contributed their code. Aleksandr Lyapunov, Aleksey Demakov, Aleksey Mashanov, Alexandre Kalendarev, Andrey Drozdov, Anton Barabanov, Damien Lefortier, Dmitry E. Oboukhov, Dmitry Simonenko, Elena Shebunyaeva, Eugene Blikh, Eugene Shadrin, Georgy Kirichenko, Konstantin Knizhnik, Konstantin Nazarov, Konstantin Osipov, Konstantin Shulgin, Mons Anderson, Marko Kevac, Nick Zavaritsky, Oleg Tsarev, Pavel Cherenkov, Roman Antipin, Roman Tokarev, Roman Tsisyk, Teodor Sigaev, Timofey Khryukin, Veniamin Gvozdikov, Vassiliy Soshnikov, Vladimir Rudnyh, Yuriy Nevinitsin, Yuriy Vostrikov NOTE: If you can commit a change to this list, please do not hesitate to add your name to it. tarantool_1.9.1.26.g63eb81e3c/src/0000775000000000000000000000000013306565107015003 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/trigger.cc0000664000000000000000000000316413306560010016745 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trigger.h" #include "exception.h" int trigger_run(struct rlist *list, void *event) { try { struct trigger *trigger, *tmp; rlist_foreach_entry_safe(trigger, list, link, tmp) trigger->run(trigger, event); } catch (Exception *e) { return -1; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/trivia/0000775000000000000000000000000013306565107016301 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/trivia/config.h.cmake0000664000000000000000000001475613306560010020777 0ustar rootroot#ifndef TARANTOOL_CONFIG_H_INCLUDED #define TARANTOOL_CONFIG_H_INCLUDED /* * This file is generated by CMake. The original file is called * config.h.cmake. Please do not modify. */ /** \cond public */ /** * Package major version - 1 for 1.6.7 */ #define PACKAGE_VERSION_MAJOR @CPACK_PACKAGE_VERSION_MAJOR@ /** * Package minor version - 6 for 1.6.7 */ #define PACKAGE_VERSION_MINOR @CPACK_PACKAGE_VERSION_MINOR@ /** * Package patch version - 7 for 1.6.7 */ #define PACKAGE_VERSION_PATCH @CPACK_PACKAGE_VERSION_PATCH@ /** * A string with major-minor-patch-commit-id identifier of the * release, e.g. 1.6.6-113-g8399d0e. */ #define PACKAGE_VERSION "@TARANTOOL_VERSION@" /** \endcond public */ #define PACKAGE "@PACKAGE@" /* Defined if building for Linux */ #cmakedefine TARGET_OS_LINUX 1 /* Defined if building for FreeBSD */ #cmakedefine TARGET_OS_FREEBSD 1 /* Defined if building for NetBSD */ #cmakedefine TARGET_OS_NETBSD 1 /* Defined if building for Darwin */ #cmakedefine TARGET_OS_DARWIN 1 #ifdef TARGET_OS_DARWIN #define TARANTOOL_LIBEXT "dylib" #else #define TARANTOOL_LIBEXT "so" #endif /** * Defined if cpuid() instruction is available. */ #cmakedefine HAVE_CPUID 1 /* * Defined if gcov instrumentation should be enabled. */ #cmakedefine ENABLE_GCOV 1 /* * Defined if configured with ENABLE_BACKTRACE ('show fiber' * showing fiber call stack. */ #cmakedefine ENABLE_BACKTRACE 1 /* * Set if the system has bfd.h header and GNU bfd library. */ #cmakedefine HAVE_BFD 1 #cmakedefine HAVE_MAP_ANON 1 #cmakedefine HAVE_MAP_ANONYMOUS 1 #if !defined(HAVE_MAP_ANONYMOUS) && defined(HAVE_MAP_ANON) /* * MAP_ANON is deprecated, MAP_ANONYMOUS should be used instead. * Unfortunately, it's not universally present (e.g. not present * on FreeBSD. */ #define MAP_ANONYMOUS MAP_ANON #endif /* * Defined if O_DSYNC mode exists for open(2). */ #cmakedefine HAVE_O_DSYNC 1 #if defined(HAVE_O_DSYNC) #define WAL_SYNC_FLAG O_DSYNC #else #define WAL_SYNC_FLAG O_SYNC #endif /* * Defined if fdatasync(2) call is present. */ #cmakedefine HAVE_FDATASYNC 1 #ifndef HAVE_FDATASYNC #if defined(__APPLE__) #include #define fdatasync(fd) fcntl(fd, F_FULLFSYNC) #else #define fdatasync fsync #endif #endif /* * Defined if this platform has GNU specific memmem(). */ #cmakedefine HAVE_MEMMEM 1 /* * Defined if this platform has GNU specific memrchr(). */ #cmakedefine HAVE_MEMRCHR 1 /* * Defined if this platform has sendfile(..). */ #cmakedefine HAVE_SENDFILE 1 /* * Defined if this platform has Linux specific sendfile(..). */ #cmakedefine HAVE_SENDFILE_LINUX 1 /* * Defined if this platform has BSD specific sendfile(..). */ #cmakedefine HAVE_SENDFILE_BSD 1 /* * Set if this is a GNU system and libc has __libc_stack_end. */ #cmakedefine HAVE_LIBC_STACK_END 1 /* * Defined if this is a big-endian system. */ #cmakedefine HAVE_BYTE_ORDER_BIG_ENDIAN 1 /* * Defined if this platform supports openmp and it is enabled */ #cmakedefine HAVE_OPENMP 1 /* * Defined if compatible with GNU readline installed. */ #cmakedefine HAVE_GNU_READLINE 1 /* * Defined if `st_mtim' is a member of `struct stat'. */ #cmakedefine HAVE_STRUCT_STAT_ST_MTIM 1 /* * Defined if `st_mtimensec' is a member of `struct stat'. */ #cmakedefine HAVE_STRUCT_STAT_ST_MTIMENSEC 1 /* * Set if compiler has __builtin_XXX methods. */ #cmakedefine HAVE_BUILTIN_CTZ 1 #cmakedefine HAVE_BUILTIN_CTZLL 1 #cmakedefine HAVE_BUILTIN_CLZ 1 #cmakedefine HAVE_BUILTIN_CLZLL 1 #cmakedefine HAVE_BUILTIN_POPCOUNT 1 #cmakedefine HAVE_BUILTIN_POPCOUNTLL 1 #cmakedefine HAVE_BUILTIN_BSWAP32 1 #cmakedefine HAVE_BUILTIN_BSWAP64 1 #cmakedefine HAVE_FFSL 1 #cmakedefine HAVE_FFSLL 1 /* * pthread have problems with -std=c99 */ #cmakedefine HAVE_NON_C99_PTHREAD_H 1 #cmakedefine ENABLE_BUNDLED_LIBEV 1 #cmakedefine ENABLE_BUNDLED_LIBEIO 1 #cmakedefine ENABLE_BUNDLED_LIBCORO 1 #cmakedefine HAVE_PTHREAD_YIELD 1 #cmakedefine HAVE_SCHED_YIELD 1 #cmakedefine HAVE_POSIX_FADVISE 1 #cmakedefine HAVE_MREMAP 1 #cmakedefine HAVE_PRCTL_H 1 #cmakedefine HAVE_UUIDGEN 1 #cmakedefine HAVE_CLOCK_GETTIME 1 #cmakedefine HAVE_CLOCK_GETTIME_DECL 1 /** pthread_np.h - non-portable stuff */ #cmakedefine HAVE_PTHREAD_NP_H 1 /** pthread_setname_np(pthread_self(), "") - Linux */ #cmakedefine HAVE_PTHREAD_SETNAME_NP 1 /** pthread_setname_np("") - OSX */ #cmakedefine HAVE_PTHREAD_SETNAME_NP_1 1 /** pthread_set_name_np(pthread_self(), "") - *BSD */ #cmakedefine HAVE_PTHREAD_SET_NAME_NP 1 #cmakedefine HAVE_PTHREAD_GETATTR_NP 1 #cmakedefine HAVE_PTHREAD_ATTR_GET_NP 1 #cmakedefine HAVE_PTHREAD_GET_STACKSIZE_NP 1 #cmakedefine HAVE_PTHREAD_GET_STACKADDR_NP 1 #cmakedefine HAVE_SETPROCTITLE 1 #cmakedefine HAVE_SETPROGNAME 1 #cmakedefine HAVE_GETPROGNAME 1 /* * Defined if ICU library has ucol_strcollUTF8 method. */ #cmakedefine HAVE_ICU_STRCOLLUTF8 1 /* * Defined if systemd is enabled */ #cmakedefine WITH_SYSTEMD 1 /** \cond public */ /** System configuration dir (e.g /etc) */ #define SYSCONF_DIR "@CMAKE_INSTALL_SYSCONFDIR@" /** Install prefix (e.g. /usr) */ #define INSTALL_PREFIX "@CMAKE_INSTALL_PREFIX@" /** Build type, e.g. Debug or Release */ #define BUILD_TYPE "@CMAKE_BUILD_TYPE@" /** CMake build type signature, e.g. Linux-x86_64-Debug */ #define BUILD_INFO "@TARANTOOL_BUILD@" /** Command line used to run CMake */ #define BUILD_OPTIONS "cmake . @TARANTOOL_OPTIONS@" /** Pathes to C and CXX compilers */ #define COMPILER_INFO "@CMAKE_C_COMPILER@ @CMAKE_CXX_COMPILER@" /** C compile flags used to build Tarantool */ #define TARANTOOL_C_FLAGS "@TARANTOOL_C_FLAGS@" /** CXX compile flags used to build Tarantool */ #define TARANTOOL_CXX_FLAGS "@TARANTOOL_CXX_FLAGS@" /** A path to install *.lua module files */ #define MODULE_LIBDIR "@MODULE_FULL_LIBDIR@" /** A path to install *.so / *.dylib module files */ #define MODULE_LUADIR "@MODULE_FULL_LUADIR@" /** A path to Lua includes (the same directory where this file is contained) */ #define MODULE_INCLUDEDIR "@MODULE_FULL_INCLUDEDIR@" /** A constant added to package.path in Lua to find *.lua module files */ #define MODULE_LUAPATH "@MODULE_LUAPATH@" /** A constant added to package.cpath in Lua to find *.so module files */ #define MODULE_LIBPATH "@MODULE_LIBPATH@" /** Shared library suffix - ".so" on Linux, ".dylib" on Mac */ #define MODULE_LIBSUFFIX "@MODULE_LIBSUFFIX@" /** \endcond public */ #define DEFAULT_CFG_FILENAME "tarantool.cfg" #define DEFAULT_CFG SYSCONF_DIR "/" DEFAULT_CFG_FILENAME #cmakedefine ENABLE_ASAN 1 /* Cacheline size to calculate alignments */ #define CACHELINE_SIZE 64 /* * vim: syntax=c */ #endif /* TARANTOOL_CONFIG_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/trivia/util.h0000664000000000000000000004111013306565107017424 0ustar rootroot#ifndef TARANTOOL_UTIL_H_INCLUDED #define TARANTOOL_UTIL_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/config.h" #include #include #include #include #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #define restrict __restrict__ #ifndef NDEBUG #define TRASH(ptr) memset(ptr, '#', sizeof(*ptr)) #else #define TRASH(ptr) #endif #ifndef MAX # define MAX(a, b) ((a) > (b) ? (a) : (b)) # define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif #define SWAP(a, b) do { \ typeof(a) tmp = (a); \ (a) = (b); \ (b) = tmp; \ } while (0) #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) /* Macros to define enum and corresponding strings. */ #define ENUM0_MEMBER(s, ...) s, #define ENUM_MEMBER(s, v, ...) s = v, #define ENUM0(enum_name, enum_members) enum enum_name { enum_members(ENUM0_MEMBER) enum_name##_MAX } #define ENUM(enum_name, enum_members) enum enum_name { enum_members(ENUM_MEMBER) enum_name##_MAX } #if defined(__cplusplus) #define ENUM_STRS_MEMBER(s, v, ...) names[s] = #s; /* A special hack to emulate C99 designated initializers */ #define STRS(enum_name, enum_members) \ const char *enum_name##_strs[enum_name##_MAX]; \ namespace { \ const struct enum_name##_strs_init { \ enum_name##_strs_init(const char **names) { \ memset(names, 0, sizeof(*names) * \ enum_name##_MAX); \ enum_members(ENUM_STRS_MEMBER) \ } \ } enum_name##_strs_init(enum_name##_strs); \ } #else /* !defined(__cplusplus) */ #define ENUM_STRS_MEMBER(s, v, ...) [s] = #s, #define STRS(enum_name, enum_members) \ const char *enum_name##_strs[(unsigned) enum_name##_MAX + 1] = {enum_members(ENUM_STRS_MEMBER) 0} #endif #define STR2ENUM(enum_name, str) ((enum enum_name) strindex(enum_name##_strs, str, enum_name##_MAX)) #define STRN2ENUM(enum_name, str, len) ((enum enum_name) strnindex(enum_name##_strs, str, len, enum_name##_MAX)) uint32_t strindex(const char **haystack, const char *needle, uint32_t hmax); uint32_t strnindex(const char **haystack, const char *needle, uint32_t len, uint32_t hmax); #define nelem(x) (sizeof((x))/sizeof((x)[0])) #define field_sizeof(compound_type, field) sizeof(((compound_type *)NULL)->field) #ifndef lengthof #define lengthof(array) (sizeof (array) / sizeof ((array)[0])) #endif /** \cond public */ /** * Feature test macroses for -std=c11 / -std=c++11 * * Sic: clang aims to be gcc-compatible and thus defines __GNUC__ */ #ifndef __has_feature # define __has_feature(x) 0 #endif #ifndef __has_builtin # define __has_builtin(x) 0 #endif #ifndef __has_attribute # define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute # define __has_cpp_attribute(x) 0 #endif /** * Compiler-independent built-ins. * * \see https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html * * {{{ Built-ins */ /** * You may use likely()/unlikely() to provide the compiler with branch * prediction information. */ #if __has_builtin(__builtin_expect) || defined(__GNUC__) # define likely(x) __builtin_expect(!! (x),1) # define unlikely(x) __builtin_expect(!! (x),0) #else # define likely(x) (x) # define unlikely(x) (x) #endif /** * This macro is used to minimize cache-miss latency by moving data into * a cache before it is accessed. You can insert calls to prefetch() into * code for which you know addresses of data in memory that is likely to be * accessed soon. If the target supports them, data prefetch instructions * will be generated. If the prefetch is done early enough before the access * then the data will be in the cache by the time it is accessed. * * The value of addr is the address of the memory to prefetch. There are two * optional arguments, rw and locality. The value of rw is a compile-time * constant one or zero; one means that the prefetch is preparing for a write * to the memory address and zero, the default, means that the prefetch is * preparing for a read. The value locality must be a compile-time constant * integer between zero and three. A value of zero means that the data has * no temporal locality, so it need not be left in the cache after the access. * A value of three means that the data has a high degree of temporal locality * and should be left in all levels of cache possible. Values of one and two * mean, respectively, a low or moderate degree of temporal locality. * The default is three. */ #if __has_builtin(__builtin_prefetch) || defined(__GNUC__) # define prefetch(addr, ...) (__builtin_prefetch(addr, __VA_ARGS__)) #else # define prefetch(addr, ...) ((void) addr) #endif /** * If control flow reaches the point of the unreachable(), the program is * undefined. It is useful in situations where the compiler cannot deduce * the unreachability of the code. */ #if __has_builtin(__builtin_unreachable) || defined(__GNUC__) # define unreachable() (assert(0), __builtin_unreachable()) #else # define unreachable() (assert(0)) #endif /** * The macro offsetof expands to an integral constant expression of * type size_t, the value of which is the offset, in bytes, from * the beginning of an object of specified type to its specified member, * including padding if any. */ #ifndef offsetof #define offsetof(type, member) ((size_t) &((type *)0)->member) #endif /** * This macro is used to retrieve an enclosing structure from a pointer to * a nested element. */ #ifndef container_of #define container_of(ptr, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) #endif /** * C11/C++11 keyword. Appears in the declaration syntax as one of the type * specifiers to modify the alignment requirement of the object being * declared. * * Sic: alignas() doesn't work on anonymous strucrs on gcc < 4.9 * * \example struct obuf { int a; int b; alignas(16) int c; }; */ #if !defined(alignas) && !defined(__alignas_is_defined) # if __has_feature(c_alignas) || (defined(__GNUC__) && __GNUC__ >= 5) # include # elif __has_attribute(aligned) || defined(__GNUC__) # define alignas(_n) __attribute__((aligned(_n))) # define __alignas_is_defined 1 # else # define alignas(_n) # endif #endif /** * C11/C++11 operator. Returns the alignment, in bytes, required for any * instance of the type indicated by type-id, which is either complete type, * an array type, or a reference type. */ #if !defined(alignof) && !defined(__alignof_is_defined) # if __has_feature(c_alignof) || (defined(__GNUC__) && __GNUC__ >= 5) # include # elif defined(__GNUC__) # define alignof(_T) __alignof(_T) # define __alignof_is_defined 1 # else # define alignof(_T) offsetof(struct { char c; _T member; }, member) # define __alignof_is_defined 1 # endif #endif /** Built-ins }}} */ /** * Compiler-indepedent function attributes. * * \see https://gcc.gnu.org/onlinedocs/gcc/Type-Attributes.html * \see http://clang.llvm.org/docs/AttributeReference.html#function-attributes * \see http://en.cppreference.com/w/cpp/language/attributes * * {{{ Function Attributes */ /** * The MAYBE_UNUSED function attribute can be used to silence -Wunused * diagnostics when the entity cannot be removed. For instance, a local * variable may exist solely for use in an assert() statement, which makes * the local variable unused when NDEBUG is defined. * * \example int fun(MAYBE_UNUSED int unused_arg); */ #if defined(__cplusplus) && __has_cpp_attribute(maybe_unused) # define MAYBE_UNUSED [[maybe_unused]] #elif __has_attribute(unused) || defined(__GNUC__) # define MAYBE_UNUSED __attribute__((unused)) #else # define MAYBE_UNUSED #endif /** * A diagnostic is generated when a function is marked with NODISCARD and * the function call appears as a potentially-evaluated discarded-value * expression that is not explicitly cast to void. * * \example NODISCARD int function() { return -1 }; */ #if defined(__cplusplus) && __has_cpp_attribute(nodiscard) # define NODISCARD [[nodiscard]] #elif __has_attribute(warn_unused_result) || defined(__GNUC__) # define NODISCARD __attribute__((warn_unused_result)) #else # define NODISCARD #endif /** * This function attribute prevents a function from being considered for * inlining. * * \example NOINLINE int function() { return 0; }; */ #if __has_attribute(noinline) || defined(__GNUC__) # define NOINLINE __attribute__((noinline)) #else # define NOINLINE #endif /** * A function declared as NORETURN shall not return to its caller. * The compiler will generate a diagnostic for a function declared as * NORETURN that appears to be capable of returning to its caller. * * \example NORETURN void abort(); */ #if defined(__cplusplus) && __has_cpp_attribute(noreturn) # define NORETURN [[noreturn]] #elif __has_attribute(noreturn) || defined(__GNUC__) # define NORETURN __attribute__((noreturn)) #else # define NORETURN #endif /** * The DEPRECATED attribute can be applied to a function, a variable, or * a type. This is useful when identifying functions, variables, or types * that are expected to be removed in a future version of a program. */ #if defined(__cplusplus) && __has_cpp_attribute(deprecated) # define DEPRECATED(_msg) [[deprecated(_msg)]] #elif __has_attribute(deprecated) || defined(__GNUC__) # define DEPREACTED __attribute__((deprecated(_msg))) #else # define DEPRECATED(_msg) #endif /** * The API_EXPORT attribute declares public C API function. */ #if defined(__cplusplus) && defined(__GNUC__) # define API_EXPORT extern "C" __attribute__ ((nothrow, visibility ("default"))) #elif defined(__cplusplus) # define API_EXPORT extern "C" #elif defined(__GNUC__) # define API_EXPORT extern __attribute__ ((nothrow, visibility ("default"))) #else # define API_EXPORT extern #endif /** * The CFORMAT attribute specifies that a function takes printf, scanf, * strftime or strfmon style arguments that should be type-checked against * a format string. * * \see https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html */ #if __has_attribute(format) || defined(__GNUC__) # define CFORMAT(_archetype, _stringindex, _firsttocheck) \ __attribute__((format(_archetype, _stringindex, _firsttocheck))) #else # define CFORMAT(archetype, stringindex, firsttocheck) #endif /** * The PACKED qualifier is useful to map a structure to an external data * structure, or for accessing unaligned data, but it is generally not * useful to save data size because of the relatively high cost of * unaligned access on some architectures. * * \example struct PACKED name { char a; int b; }; */ #if __has_attribute(packed) || defined(__GNUC__) # define PACKED __attribute__((packed)) #elif defined(__CC_ARM) # define PACKED __packed #else # define PACKED #endif /** Function Attributes }}} */ /** {{{ Statement Attributes */ /** * The fallthrough attribute with a null statement serves as a fallthrough * statement. It hints to the compiler that a statement that falls through * to another case label, or user-defined label in a switch statement is * intentional and thus the -Wimplicit-fallthrough warning must not trigger. * The fallthrough attribute may appear at most once in each attribute list, * and may not be mixed with other attributes. It can only be used in a switch * statement (the compiler will issue an error otherwise), after a preceding * statement and before a logically succeeding case label, or user-defined * label. */ #if defined(__cplusplus) && __has_cpp_attribute(fallthrough) # define FALLTHROUGH [[fallthrough]] #elif __has_attribute(fallthrough) || (defined(__GNUC__) && __GNUC__ >= 7) # define FALLTHROUGH __attribute__((fallthrough)) #else # define FALLTHROUGH #endif /** Statement Attributes }}} */ /** \endcond public */ void close_all_xcpt(int fdc, ...); void __gcov_flush(); /** * Async-signal-safe implementation of printf(), to * be able to write messages into the error log * inside a signal handler. */ ssize_t fdprintf(int fd, const char *format, ...) __attribute__((format(printf, 2, 3))); char * find_path(const char *argv0); char * abspath(const char *filename); char * int2str(long long int val); void fpconv_check(void); enum { FPCONV_G_FMT_BUFSIZE = 32, FPCONV_G_FMT_MAX_PRECISION = 14 }; extern const char *precision_fmts[]; /** * @brief Locale-independent printf("%.(precision)lg") * @sa snprintf() */ static inline int fpconv_g_fmt(char *str, double num, int precision) { if (precision <= 0 || precision > FPCONV_G_FMT_MAX_PRECISION) precision = FPCONV_G_FMT_MAX_PRECISION; const char *fmt = precision_fmts[precision]; return snprintf(str, FPCONV_G_FMT_BUFSIZE, fmt, num); } /** * @brief Locale-independent strtod. * @sa strtod() */ static inline double fpconv_strtod(const char *nptr, char **endptr) { return strtod(nptr, endptr); } /** * Check that @a str is valid utf-8 sequence and can be printed * unescaped. * @param str string * @param length string length */ int utf8_check_printable(const char *str, size_t length); #ifndef HAVE_MEMMEM /* Declare memmem(). */ void * memmem(const void *block, size_t blen, const void *pat, size_t plen); #endif /* HAVE_MEMMEM */ #ifndef HAVE_MEMRCHR /* Declare memrchr(). */ void * memrchr(const void *s, int c, size_t n); #endif /* HAVE_MEMRCHR */ #include #include #ifndef HAVE_CLOCK_GETTIME_DECL /* Declare clock_gettime(). */ int clock_gettime(uint32_t clock_id, struct timespec *tp); #define CLOCK_REALTIME 0 #define CLOCK_MONOTONIC 1 #define CLOCK_PROCESS_CPUTIME_ID 2 #define CLOCK_THREAD_CPUTIME_ID 3 #endif #define TT_STATIC_BUF_LEN 1024 /** * Return a thread-local statically allocated temporary buffer of size * \a TT_STATIC_BUF_LEN */ static inline char * tt_static_buf(void) { enum { TT_STATIC_BUFS = 4 }; static __thread char bufs[TT_STATIC_BUFS][TT_STATIC_BUF_LEN + 1]; static __thread int bufno = TT_STATIC_BUFS - 1; bufno = (bufno + 1) % TT_STATIC_BUFS; return bufs[bufno]; } /** * Return a null-terminated string for \a str of length \a len */ static inline const char * tt_cstr(const char *str, size_t len) { char *buf = tt_static_buf(); len = MIN(len, TT_STATIC_BUF_LEN - 1); memcpy(buf, str, len); buf[len] = '\0'; return buf; } /** * Wrapper around sprintf() that prints the result to * the static buffer returned by tt_static_buf(). */ static inline const char * tt_vsprintf(const char *format, va_list ap) { char *buf = tt_static_buf(); vsnprintf(buf, TT_STATIC_BUF_LEN, format, ap); return buf; } /** @copydoc tt_vsprintf() */ static inline const char * tt_sprintf(const char *format, ...) { va_list ap; va_start(ap, format); const char *result = tt_vsprintf(format, ap); va_end(ap); return result; } /** * Escape special characters in @a data to @a buf */ int json_escape(char *buf, int size, const char *data); /** * Helper macro to handle easily snprintf() result */ #define SNPRINT(_total, _fun, _buf, _size, ...) do { \ int written =_fun(_buf, _size, ##__VA_ARGS__); \ if (written < 0) \ return -1; \ _total += written; \ if (written < _size) { \ _buf += written, _size -= written; \ } else { \ _buf = NULL, _size = 0; \ } \ } while(0) #if !defined(__cplusplus) && !defined(static_assert) # define static_assert _Static_assert #endif #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_UTIL_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/backtrace.cc0000664000000000000000000002647713306560010017235 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "backtrace.h" #include "trivia/util.h" #include #include #include #include "say.h" #include "fiber.h" #include "assoc.h" #define CRLF "\n" #ifdef ENABLE_BACKTRACE #include #include "small/region.h" /* * We use a global static buffer because it is too late to do any * allocation when we are printing backtrace and fiber stack is * small. */ #define BACKTRACE_NAME_MAX 200 static char backtrace_buf[4096 * 4]; static __thread struct region cache_region; static __thread struct mh_i64ptr_t *proc_cache = NULL; struct proc_cache_entry { char name[BACKTRACE_NAME_MAX]; unw_word_t offset; }; void backtrace_proc_cache_clear() { if (proc_cache == NULL) return; region_destroy(&cache_region); mh_i64ptr_delete(proc_cache); proc_cache = NULL; } const char * get_proc_name(unw_cursor_t *unw_cur, unw_word_t *offset, bool skip_cache) { static __thread char proc_name[BACKTRACE_NAME_MAX]; unw_word_t ip; unw_get_reg(unw_cur, UNW_REG_IP, &ip); if (skip_cache) { unw_get_proc_name(unw_cur, proc_name, sizeof(proc_name), offset); return proc_name; } struct proc_cache_entry *entry; struct mh_i64ptr_node_t node; mh_int_t k; if (proc_cache == NULL) { region_create(&cache_region, &cord()->slabc); proc_cache = mh_i64ptr_new(); if (proc_cache == NULL) { unw_get_proc_name(unw_cur, proc_name, sizeof(proc_name), offset); goto error; } } k = mh_i64ptr_find(proc_cache, ip, NULL); if (k != mh_end(proc_cache)) { entry = (struct proc_cache_entry *) mh_i64ptr_node(proc_cache, k)->val; snprintf(proc_name, BACKTRACE_NAME_MAX, "%s", entry->name); *offset = entry->offset; } else { unw_get_proc_name(unw_cur, proc_name, sizeof(proc_name), offset); entry = (struct proc_cache_entry *) region_alloc(&cache_region, sizeof(struct proc_cache_entry)); if (entry == NULL) goto error; node.key = ip; node.val = entry; snprintf(entry->name, BACKTRACE_NAME_MAX, "%s", proc_name); entry->offset = *offset; k = mh_i64ptr_put(proc_cache, &node, NULL, NULL); if (k == mh_end(proc_cache)) { free(entry); goto error; } } error: return proc_name; } char * backtrace() { int frame_no = 0; unw_word_t sp = 0, old_sp = 0, ip, offset; unw_context_t unw_context; unw_getcontext(&unw_context); unw_cursor_t unw_cur; unw_init_local(&unw_cur, &unw_context); char *p = backtrace_buf; char *end = p + sizeof(backtrace_buf) - 1; int unw_status; while ((unw_status = unw_step(&unw_cur)) > 0) { const char *proc; old_sp = sp; unw_get_reg(&unw_cur, UNW_REG_IP, &ip); unw_get_reg(&unw_cur, UNW_REG_SP, &sp); if (sp == old_sp) { say_debug("unwinding error: previous frame " "identical to this frame (corrupt stack?)"); goto out; } proc = get_proc_name(&unw_cur, &offset, true); p += snprintf(p, end - p, "#%-2d %p in ", frame_no, (void *)ip); if (p >= end) goto out; p += snprintf(p, end - p, "%s+%lx", proc, (long)offset); if (p >= end) goto out; p += snprintf(p, end - p, CRLF); if (p >= end) goto out; ++frame_no; } #ifndef TARGET_OS_DARWIN if (unw_status != 0) say_debug("unwinding error: %s", unw_strerror(unw_status)); #else if (unw_status != 0) say_debug("unwinding error: %i", unw_status); #endif out: *p = '\0'; return backtrace_buf; } /* * Libunwind unw_getcontext wrapper. * unw_getcontext can be a macros on some platform and can not be called * directly from asm code. Stack variable pass through the wrapper to * preserve a old stack pointer during the wrapper call. * * @param unw_context unwind context to store execution state * @param stack pointer to preserve. * @retval preserved stack pointer. */ static void * unw_getcontext_f(unw_context_t *unw_context, void *stack) { unw_getcontext(unw_context); return stack; } /* * Restore target coro context and call unw_getcontext over it. * Work is done in four parts: * 1. Save current fiber context to a stack and save a stack pointer * 2. Restore target fiber context, stack pointer is not incremented because * all target stack context should be preserved across a call. No stack * changes are allowed until unwinding is done. * 3. Setup new stack frame and call unw_getcontext wrapper. All callee-safe * registers are used by target fiber context, so old stack pointer is * passed as second arg to wrapper func. * 4. Restore old stack pointer from wrapper return and restore old fiber * contex. * * @param @unw_context unwind context to store execution state. * @param @coro_ctx fiber context to unwind. */ static void coro_unwcontext(unw_context_t *unw_context, struct coro_context *coro_ctx) { #if __amd64 __asm__ volatile( /* Preserve current context */ "\tpushq %%rbp\n" "\tpushq %%rbx\n" "\tpushq %%r12\n" "\tpushq %%r13\n" "\tpushq %%r14\n" "\tpushq %%r15\n" /* Setup second arg as old sp */ "\tmovq %%rsp, %%rsi\n" /* Restore target context, but not increment sp to preserve it */ "\tmovq 0(%1), %%rsp\n" "\tmovq 0(%%rsp), %%r15\n" "\tmovq 8(%%rsp), %%r14\n" "\tmovq 16(%%rsp), %%r13\n" "\tmovq 24(%%rsp), %%r12\n" "\tmovq 32(%%rsp), %%rbx\n" "\tmovq 40(%%rsp), %%rbp\n" /* Set first arg and call */ "\tmovq %0, %%rdi\n" #ifdef TARGET_OS_DARWIN "\tandq $0xfffffffffffffff0, %%rsp\n" #endif "\tleaq %P2(%%rip), %%rax\n" "\tcall *%%rax\n" /* Restore old sp and context */ "\tmov %%rax, %%rsp\n" "\tpopq %%r15\n" "\tpopq %%r14\n" "\tpopq %%r13\n" "\tpopq %%r12\n" "\tpopq %%rbx\n" "\tpopq %%rbp\n" : : "r" (unw_context), "r" (coro_ctx), "i" (unw_getcontext_f) : "rdi", "rsi", "rax"//, "r8"//"rsp", "r11", "r10", "r9", "r8" ); #elif __i386 __asm__ volatile( /* Save current context */ "\tpushl %%ebp\n" "\tpushl %%ebx\n" "\tpushl %%esi\n" "\tpushl %%edi\n" /* Setup second arg as old sp */ "\tmovl %%esp, %%ecx\n" /* Restore target context ,but not increment sp to preserve it */ "\tmovl (%1), %%esp\n" "\tmovl 0(%%esp), %%edi\n" "\tmovl 4(%%esp), %%esi\n" "\tmovl 8(%%esp), %%ebx\n" "\tmovl 12(%%esp), %%ebp\n" /* Setup first arg and call */ "\tpushl %%ecx\n" "\tpushl %0\n" "\tmovl %2, %%ecx\n" "\tcall *%%ecx\n" /* Restore old sp and context */ "\tmovl %%eax, %%esp\n" "\tpopl %%edi\n" "\tpopl %%esi\n" "\tpopl %%ebx\n" "\tpopl %%ebp\n" : : "r" (unw_context), "r" (coro_ctx), "i" (unw_getcontext_f) : "ecx", "eax" ); #elif __ARM_ARCH==7 __asm__ volatile( /* Save current context */ ".syntax unified\n" "\tvpush {d8-d15}\n" "\tpush {r4-r11,lr}\n" /* Save sp */ "\tmov r1, sp\n" /* Restore target context, but not increment sp to preserve it */ "\tldr sp, [%1]\n" "\tldmia sp, {r4-r11,lr}\n" "\tvldmia sp, {d8-d15}\n" /* Setup first arg */ "\tmov r0, %0\n" /* Setup stack frame */ "\tpush {r7, lr}\n" "\tsub sp, #8\n" "\tstr r0, [sp, #4]\n" "\tstr r1, [sp, #0]\n" "\tmov r7, sp\n" "\tbl %2\n" /* Old sp is returned via r0 */ "\tmov sp, r0\n" "\tpop {r4-r11,lr}\n" "\tvpop {d8-d15}\n" : : "r" (unw_context), "r" (coro_ctx), "i" (unw_getcontext_f) : "lr", "r0", "r1", "ip" ); #elif __aarch64__ __asm__ volatile( /* Save current context */ "\tsub x1, sp, #8 * 20\n" "\tstp x19, x20, [x1, #16 * 0]\n" "\tstp x21, x22, [x1, #16 * 1]\n" "\tstp x23, x24, [x1, #16 * 2]\n" "\tstp x25, x26, [x1, #16 * 3]\n" "\tstp x27, x28, [x1, #16 * 4]\n" "\tstp x29, x30, [x1, #16 * 5]\n" "\tstp d8, d9, [x1, #16 * 6]\n" "\tstp d10, d11, [x1, #16 * 7]\n" "\tstp d12, d13, [x1, #16 * 8]\n" "\tstp d14, d15, [x1, #16 * 9]\n" /* Restore target context */ "\tldr x2, [%1]\n" "\tldp x19, x20, [x2, #16 * 0]\n" "\tldp x21, x22, [x2, #16 * 1]\n" "\tldp x23, x24, [x2, #16 * 2]\n" "\tldp x25, x26, [x2, #16 * 3]\n" "\tldp x27, x28, [x2, #16 * 4]\n" "\tldp x29, x30, [x2, #16 * 5]\n" "\tldp d8, d9, [x2, #16 * 6]\n" "\tldp d10, d11, [x2, #16 * 7]\n" "\tldp d12, d13, [x2, #16 * 8]\n" "\tldp d14, d15, [x2, #16 * 9]\n" "\tmov sp, x2\n" /* Setup fisrst arg */ "\tmov x0, %0\n" "\tbl %2\n" /* Restore context (old sp in x0) */ "\tldp x19, x20, [x0, #16 * 0]\n" "\tldp x21, x22, [x0, #16 * 1]\n" "\tldp x23, x24, [x0, #16 * 2]\n" "\tldp x25, x26, [x0, #16 * 3]\n" "\tldp x27, x28, [x0, #16 * 4]\n" "\tldp x29, x30, [x0, #16 * 5]\n" "\tldp d8, d9, [x0, #16 * 6]\n" "\tldp d10, d11, [x0, #16 * 7]\n" "\tldp d12, d13, [x0, #16 * 8]\n" "\tldp d14, d15, [x0, #16 * 9]\n" "\tadd sp, x0, #8 * 20\n" : : "r" (unw_context), "r" (coro_ctx), "i" (unw_getcontext_f) : /*"lr", "r0", "r1", "ip" */ "x0", "x1", "x2", "x30" ); #endif } void backtrace_foreach(backtrace_cb cb, coro_context *coro_ctx, void *cb_ctx) { unw_cursor_t unw_cur; unw_context_t unw_ctx; coro_unwcontext(&unw_ctx, coro_ctx); unw_init_local(&unw_cur, &unw_ctx); int frame_no = 0; unw_word_t sp = 0, old_sp = 0, ip, offset; int unw_status, demangle_status; char *demangle_buf = NULL; size_t demangle_buf_len = 0; while ((unw_status = unw_step(&unw_cur)) > 0) { const char *proc; old_sp = sp; unw_get_reg(&unw_cur, UNW_REG_IP, &ip); unw_get_reg(&unw_cur, UNW_REG_SP, &sp); if (sp == old_sp) { say_debug("unwinding error: previous frame " "identical to this frame (corrupt stack?)"); goto out; } proc = get_proc_name(&unw_cur, &offset, false); char *cxxname = abi::__cxa_demangle(proc, demangle_buf, &demangle_buf_len, &demangle_status); if (cxxname != NULL) demangle_buf = cxxname; if (frame_no > 0 && (cb(frame_no - 1, (void *)ip, cxxname != NULL ? cxxname : proc, offset, cb_ctx) != 0)) goto out; ++frame_no; } #ifndef TARGET_OS_DARWIN if (unw_status != 0) say_debug("unwinding error: %s", unw_strerror(unw_status)); #else if (unw_status != 0) say_debug("unwinding error: %i", unw_status); #endif out: free(demangle_buf); } void print_backtrace() { fdprintf(STDERR_FILENO, "%s", backtrace()); } #endif /* ENABLE_BACKTRACE */ NORETURN void assert_fail(const char *assertion, const char *file, unsigned int line, const char *function) { fprintf(stderr, "%s:%i: %s: assertion %s failed.\n", file, line, function, assertion); #ifdef ENABLE_BACKTRACE print_backtrace(); #endif /* ENABLE_BACKTRACE */ close_all_xcpt(0); abort(); } tarantool_1.9.1.26.g63eb81e3c/src/exception.cc0000664000000000000000000001706313306565107017317 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "exception.h" #include #include #include #include "fiber.h" #include "reflection.h" extern "C" { static void exception_destroy(struct error *e) { delete (Exception *) e; } static void exception_raise(struct error *error) { Exception *e = (Exception *) error; e->raise(); } static void exception_log(struct error *error) { Exception *e = (Exception *) error; e->log(); } const char * exception_get_string(struct error *e, const struct method_info *method) { /* A workaround for for vtable */ Exception *ex = (Exception *) e; if (!method_invokable(method, ex)) return NULL; return method_invoke(method, ex); } int exception_get_int(struct error *e, const struct method_info *method) { /* A workaround for vtable */ Exception *ex = (Exception *) e; if (!method_invokable(method, ex)) return 0; return method_invoke(method, ex); } } /* extern "C" */ /** out_of_memory::size is zero-initialized by the linker. */ static OutOfMemory out_of_memory(__FILE__, __LINE__, sizeof(OutOfMemory), "malloc", "exception"); static const struct method_info exception_methods[] = { make_method(&type_Exception, "message", &Exception::get_errmsg), make_method(&type_Exception, "log", &Exception::log), METHODS_SENTINEL }; const struct type_info type_Exception = make_type("Exception", NULL, exception_methods); void * Exception::operator new(size_t size) { void *buf = malloc(size); if (buf != NULL) return buf; diag_add_error(diag_get(), &out_of_memory); throw &out_of_memory; } void Exception::operator delete(void *ptr) { free(ptr); } Exception::~Exception() { if (this != &out_of_memory) { assert(refs == 0); } } Exception::Exception(const struct type_info *type_arg, const char *file, unsigned line) { error_create(this, exception_destroy, exception_raise, exception_log, type_arg, file, line); } void Exception::log() const { say_file_line(S_ERROR, file, line, errmsg, "%s", type->name); } static const struct method_info systemerror_methods[] = { make_method(&type_SystemError, "errno", &SystemError::get_errno), METHODS_SENTINEL }; const struct type_info type_SystemError = make_type("SystemError", &type_Exception, systemerror_methods); SystemError::SystemError(const struct type_info *type, const char *file, unsigned line) :Exception(type, file, line), m_errno(errno) { /* nothing */ } SystemError::SystemError(const char *file, unsigned line, const char *format, ...) : Exception(&type_SystemError, file, line), m_errno(errno) { va_list ap; va_start(ap, format); error_vformat_msg(this, format, ap); va_end(ap); } void SystemError::log() const { say_file_line(S_SYSERROR, file, line, strerror(m_errno), "SystemError %s", errmsg); } const struct type_info type_OutOfMemory = make_type("OutOfMemory", &type_SystemError); OutOfMemory::OutOfMemory(const char *file, unsigned line, size_t amount, const char *allocator, const char *object) : SystemError(&type_OutOfMemory, file, line) { m_errno = ENOMEM; error_format_msg(this, "Failed to allocate %u bytes in %s for %s", (unsigned) amount, allocator, object); } const struct type_info type_TimedOut = make_type("TimedOut", &type_SystemError); TimedOut::TimedOut(const char *file, unsigned line) : SystemError(&type_TimedOut, file, line) { m_errno = ETIMEDOUT; error_format_msg(this, "timed out"); } const struct type_info type_ChannelIsClosed = make_type("ChannelIsClosed", &type_Exception); ChannelIsClosed::ChannelIsClosed(const char *file, unsigned line) : Exception(&type_ChannelIsClosed, file, line) { error_format_msg(this, "channel is closed"); } const struct type_info type_FiberIsCancelled = make_type("FiberIsCancelled", &type_Exception); FiberIsCancelled::FiberIsCancelled(const char *file, unsigned line) : Exception(&type_FiberIsCancelled, file, line) { error_format_msg(this, "fiber is cancelled"); } void FiberIsCancelled::log() const { say_info("fiber `%s' has been cancelled", fiber_name(fiber())); say_info("fiber `%s': exiting", fiber_name(fiber())); } const struct type_info type_LuajitError = make_type("LuajitError", &type_Exception); LuajitError::LuajitError(const char *file, unsigned line, const char *msg) : Exception(&type_LuajitError, file, line) { snprintf(errmsg, sizeof(errmsg), "%s", msg ? msg : ""); } const struct type_info type_IllegalParams = make_type("IllegalParams", &type_Exception); IllegalParams::IllegalParams(const char *file, unsigned line, const char *format, ...) : Exception(&type_IllegalParams, file, line) { va_list ap; va_start(ap, format); error_vformat_msg(this, format, ap); va_end(ap); } #define BuildAlloc(type) \ void *p = malloc(sizeof(type)); \ if (p == NULL) \ return &out_of_memory; struct error * BuildOutOfMemory(const char *file, unsigned line, size_t amount, const char *allocator, const char *object) { BuildAlloc(OutOfMemory); return new (p) OutOfMemory(file, line, amount, allocator, object); } struct error * BuildTimedOut(const char *file, unsigned line) { BuildAlloc(TimedOut); return new (p) TimedOut(file, line); } struct error * BuildChannelIsClosed(const char *file, unsigned line) { BuildAlloc(ChannelIsClosed); return new (p) ChannelIsClosed(file, line); } struct error * BuildFiberIsCancelled(const char *file, unsigned line) { BuildAlloc(FiberIsCancelled); return new (p) FiberIsCancelled(file, line); } struct error * BuildLuajitError(const char *file, unsigned line, const char *msg) { BuildAlloc(LuajitError); return new (p) LuajitError(file, line, msg); } struct error * BuildIllegalParams(const char *file, unsigned line, const char *format, ...) { BuildAlloc(IllegalParams); IllegalParams *e = new (p) IllegalParams(file, line, ""); va_list ap; va_start(ap, format); error_vformat_msg(e, format, ap); va_end(ap); return e; } struct error * BuildSystemError(const char *file, unsigned line, const char *format, ...) { BuildAlloc(SystemError); SystemError *e = new (p) SystemError(file, line, ""); va_list ap; va_start(ap, format); error_vformat_msg(e, format, ap); va_end(ap); return e; } void exception_init() { /* A special workaround for out_of_memory static init */ out_of_memory.refs = 1; } #undef BuildAlloc tarantool_1.9.1.26.g63eb81e3c/src/cbus.c0000664000000000000000000003732413306560010016100 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "cbus.h" #include #include "fiber.h" #include "trigger.h" /** * Cord interconnect. */ struct cbus { /** cbus statistics */ struct rmean *stats; /** A mutex to protect bus join. */ pthread_mutex_t mutex; /** Condition for synchronized start of the bus. */ pthread_cond_t cond; /** Connected endpoints */ struct rlist endpoints; }; /** A singleton for all cords. */ static struct cbus cbus; const char *cbus_stat_strings[CBUS_STAT_LAST] = { "EVENTS", "LOCKS", }; /** * Find a joined cbus endpoint by name. * This is an internal helper method which should be called * under cbus::mutex. * * @return endpoint or NULL if not found */ static struct cbus_endpoint * cbus_find_endpoint_locked(struct cbus *bus, const char *name) { struct cbus_endpoint *endpoint; rlist_foreach_entry(endpoint, &bus->endpoints, in_cbus) { if (strcmp(endpoint->name, name) == 0) return endpoint; } return NULL; } static struct cbus_endpoint * cbus_find_endpoint(struct cbus *bus, const char *name) { tt_pthread_mutex_lock(&bus->mutex); struct cbus_endpoint *endpoint = cbus_find_endpoint_locked(bus, name); tt_pthread_mutex_unlock(&bus->mutex); return endpoint; } static void cpipe_flush_cb(ev_loop * /* loop */, struct ev_async *watcher, int /* events */); void cpipe_create(struct cpipe *pipe, const char *consumer) { stailq_create(&pipe->input); pipe->n_input = 0; pipe->max_input = INT_MAX; pipe->producer = cord()->loop; ev_async_init(&pipe->flush_input, cpipe_flush_cb); pipe->flush_input.data = pipe; rlist_create(&pipe->on_flush); tt_pthread_mutex_lock(&cbus.mutex); struct cbus_endpoint *endpoint = cbus_find_endpoint_locked(&cbus, consumer); while (endpoint == NULL) { tt_pthread_cond_wait(&cbus.cond, &cbus.mutex); endpoint = cbus_find_endpoint_locked(&cbus, consumer); } pipe->endpoint = endpoint; ++pipe->endpoint->n_pipes; tt_pthread_mutex_unlock(&cbus.mutex); } struct cmsg_poison { struct cmsg msg; struct cbus_endpoint *endpoint; }; static void cbus_endpoint_poison_f(struct cmsg *msg) { struct cbus_endpoint *endpoint = ((struct cmsg_poison *)msg)->endpoint; tt_pthread_mutex_lock(&cbus.mutex); assert(endpoint->n_pipes > 0); --endpoint->n_pipes; tt_pthread_mutex_unlock(&cbus.mutex); fiber_cond_signal(&endpoint->cond); free(msg); } void cpipe_destroy(struct cpipe *pipe) { ev_async_stop(pipe->producer, &pipe->flush_input); static const struct cmsg_hop route[1] = { {cbus_endpoint_poison_f, NULL} }; trigger_destroy(&pipe->on_flush); struct cbus_endpoint *endpoint = pipe->endpoint; struct cmsg_poison *poison = malloc(sizeof(struct cmsg_poison)); cmsg_init(&poison->msg, route); poison->endpoint = pipe->endpoint; /* * Avoid the general purpose cpipe_push_input() since * we want to control the way the poison message is * delivered. */ tt_pthread_mutex_lock(&endpoint->mutex); /* Flush input */ stailq_concat(&endpoint->output, &pipe->input); pipe->n_input = 0; /* Add the pipe shutdown message as the last one. */ stailq_add_tail_entry(&endpoint->output, poison, msg.fifo); /* Count statistics */ rmean_collect(cbus.stats, CBUS_STAT_EVENTS, 1); /* * Keep the lock for the duration of ev_async_send(): * this will avoid a race condition between * ev_async_send() and execution of the poison * message, after which the endpoint may disappear. */ ev_async_send(endpoint->consumer, &endpoint->async); tt_pthread_mutex_unlock(&endpoint->mutex); TRASH(pipe); } static void cbus_create(struct cbus *bus) { bus->stats = rmean_new(cbus_stat_strings, CBUS_STAT_LAST); if (bus->stats == NULL) panic_syserror("cbus_create"); /* Initialize queue lock mutex. */ (void) tt_pthread_mutex_init(&bus->mutex, NULL); (void) tt_pthread_cond_init(&bus->cond, NULL); rlist_create(&bus->endpoints); } static void cbus_destroy(struct cbus *bus) { /* * Lock the mutex to ensure we do not destroy a mutex * while it is locked, happens in at_exit() handler. */ (void) tt_pthread_mutex_lock(&bus->mutex); (void) tt_pthread_mutex_unlock(&bus->mutex); (void) tt_pthread_mutex_destroy(&bus->mutex); (void) tt_pthread_cond_destroy(&bus->cond); rmean_delete(bus->stats); } /** * Join a new endpoint (message consumer) to the bus. The endpoint * must have a unique name. Wakes up all producers (@sa cpipe_create()) * who are blocked waiting for this endpoint to become available. */ int cbus_endpoint_create(struct cbus_endpoint *endpoint, const char *name, void (*fetch_cb)(ev_loop *, struct ev_watcher *, int), void *fetch_data) { tt_pthread_mutex_lock(&cbus.mutex); if (cbus_find_endpoint_locked(&cbus, name) != NULL) { tt_pthread_mutex_unlock(&cbus.mutex); return 1; } snprintf(endpoint->name, sizeof(endpoint->name), "%s", name); endpoint->consumer = loop(); endpoint->n_pipes = 0; fiber_cond_create(&endpoint->cond); tt_pthread_mutex_init(&endpoint->mutex, NULL); stailq_create(&endpoint->output); ev_async_init(&endpoint->async, (void (*)(ev_loop *, struct ev_async *, int)) fetch_cb); endpoint->async.data = fetch_data; ev_async_start(endpoint->consumer, &endpoint->async); rlist_add_tail(&cbus.endpoints, &endpoint->in_cbus); tt_pthread_mutex_unlock(&cbus.mutex); /* * Alert all waiting producers. * * POSIX: pthread_cond_broadcast() function shall * have no effect if there are no threads currently * blocked on cond. */ tt_pthread_cond_broadcast(&cbus.cond); return 0; } int cbus_endpoint_destroy(struct cbus_endpoint *endpoint, void (*process_cb)(struct cbus_endpoint *endpoint)) { tt_pthread_mutex_lock(&cbus.mutex); /* * Remove endpoint from cbus registry, so no new pipe can * be created for this endpoint. */ rlist_del(&endpoint->in_cbus); tt_pthread_mutex_unlock(&cbus.mutex); while (true) { if (process_cb) process_cb(endpoint); if (endpoint->n_pipes == 0 && stailq_empty(&endpoint->output)) break; fiber_cond_wait(&endpoint->cond); } /* * Pipe flush func can still lock mutex, so just lock and unlock * it. */ tt_pthread_mutex_lock(&endpoint->mutex); tt_pthread_mutex_unlock(&endpoint->mutex); tt_pthread_mutex_destroy(&endpoint->mutex); ev_async_stop(endpoint->consumer, &endpoint->async); fiber_cond_destroy(&endpoint->cond); TRASH(endpoint); return 0; } static void cpipe_flush_cb(ev_loop *loop, struct ev_async *watcher, int events) { (void) loop; (void) events; struct cpipe *pipe = (struct cpipe *) watcher->data; struct cbus_endpoint *endpoint = pipe->endpoint; if (pipe->n_input == 0) return; trigger_run(&pipe->on_flush, pipe); /* Trigger task processing when the queue becomes non-empty. */ bool output_was_empty; tt_pthread_mutex_lock(&endpoint->mutex); output_was_empty = stailq_empty(&endpoint->output); /** Flush input */ stailq_concat(&endpoint->output, &pipe->input); tt_pthread_mutex_unlock(&endpoint->mutex); pipe->n_input = 0; if (output_was_empty) { /* Count statistics */ rmean_collect(cbus.stats, CBUS_STAT_EVENTS, 1); ev_async_send(endpoint->consumer, &endpoint->async); } } void cbus_init() { cbus_create(&cbus); } void cbus_free() { cbus_destroy(&cbus); } /* {{{ cmsg */ /** * Dispatch the message to the next hop. */ static inline void cmsg_dispatch(struct cpipe *pipe, struct cmsg *msg) { /** * 'pipe' pointer saved in class constructor works as * a guard that the message is alive. If a message route * has the next pipe, then the message mustn't have been * destroyed on this hop. Otherwise msg->hop->pipe could * be already pointing to garbage. */ if (pipe) { /* * Once we pushed the message to the bus, * we relinquished all write access to it, * so we must increase the current hop *before* * push. */ msg->hop++; cpipe_push(pipe, msg); } } /** * Deliver the message and dispatch it to the next hop. */ void cmsg_deliver(struct cmsg *msg) { /* * Save the pointer to the last pipe, * the memory where it is stored may be destroyed * on the last hop. */ struct cpipe *pipe = msg->hop->pipe; msg->hop->f(msg); cmsg_dispatch(pipe, msg); } /* }}} cmsg */ /** * Call the target function and store the results (diag, rc) in * struct cbus_call_msg. */ void cbus_call_perform(struct cmsg *m) { struct cbus_call_msg *msg = (struct cbus_call_msg *)m; msg->rc = msg->func(msg); if (msg->rc) diag_move(&fiber()->diag, &msg->diag); } /** * Wake up the caller fiber to reap call results. * If the fiber is gone, e.g. in case of call timeout * or cancellation, invoke free_cb to free message state. */ void cbus_call_done(struct cmsg *m) { struct cbus_call_msg *msg = (struct cbus_call_msg *)m; if (msg->caller == NULL) { if (msg->free_cb) msg->free_cb(msg); return; } msg->complete = true; fiber_wakeup(msg->caller); } /** * Execute a synchronous call over cbus. */ int cbus_call(struct cpipe *callee, struct cpipe *caller, struct cbus_call_msg *msg, cbus_call_f func, cbus_call_f free_cb, double timeout) { int rc; diag_create(&msg->diag); msg->caller = fiber(); msg->complete = false; msg->route[0].f = cbus_call_perform; msg->route[0].pipe = caller; msg->route[1].f = cbus_call_done; msg->route[1].pipe = NULL; cmsg_init(cmsg(msg), msg->route); msg->func = func; msg->free_cb = free_cb; msg->rc = 0; cpipe_push(callee, cmsg(msg)); fiber_yield_timeout(timeout); if (msg->complete == false) { /* timed out or cancelled */ msg->caller = NULL; if (fiber_is_cancelled()) diag_set(FiberIsCancelled); else diag_set(TimedOut); return -1; } if ((rc = msg->rc)) diag_move(&msg->diag, &fiber()->diag); return rc; } struct cbus_flush_msg { struct cmsg cmsg; bool complete; struct fiber_cond cond; }; static void cbus_flush_perform(struct cmsg *cmsg) { (void)cmsg; } static void cbus_flush_complete(struct cmsg *cmsg) { struct cbus_flush_msg *msg = container_of(cmsg, struct cbus_flush_msg, cmsg); msg->complete = true; fiber_cond_signal(&msg->cond); } void cbus_flush(struct cpipe *callee, struct cpipe *caller, void (*process_cb)(struct cbus_endpoint *endpoint)) { struct cmsg_hop route[] = { {cbus_flush_perform, caller}, {cbus_flush_complete, NULL}, }; struct cbus_flush_msg msg; cmsg_init(&msg.cmsg, route); msg.complete = false; fiber_cond_create(&msg.cond); cpipe_push(callee, &msg.cmsg); while (true) { if (process_cb != NULL) process_cb(caller->endpoint); if (msg.complete) break; fiber_cond_wait(&msg.cond); } } struct cbus_pair_msg { struct cmsg cmsg; void (*pair_cb)(void *); void *pair_arg; const char *src_name; struct cpipe *src_pipe; bool complete; struct fiber_cond cond; }; static void cbus_pair_complete(struct cmsg *cmsg); static void cbus_pair_perform(struct cmsg *cmsg) { struct cbus_pair_msg *msg = container_of(cmsg, struct cbus_pair_msg, cmsg); static struct cmsg_hop route[] = { {cbus_pair_complete, NULL}, }; cmsg_init(cmsg, route); cpipe_create(msg->src_pipe, msg->src_name); if (msg->pair_cb != NULL) msg->pair_cb(msg->pair_arg); cpipe_push(msg->src_pipe, cmsg); } static void cbus_pair_complete(struct cmsg *cmsg) { struct cbus_pair_msg *msg = container_of(cmsg, struct cbus_pair_msg, cmsg); msg->complete = true; fiber_cond_signal(&msg->cond); } void cbus_pair(const char *dest_name, const char *src_name, struct cpipe *dest_pipe, struct cpipe *src_pipe, void (*pair_cb)(void *), void *pair_arg, void (*process_cb)(struct cbus_endpoint *)) { static struct cmsg_hop route[] = { {cbus_pair_perform, NULL}, }; struct cbus_pair_msg msg; cmsg_init(&msg.cmsg, route); msg.pair_cb = pair_cb; msg.pair_arg = pair_arg; msg.complete = false; msg.src_name = src_name; msg.src_pipe = src_pipe; fiber_cond_create(&msg.cond); struct cbus_endpoint *endpoint = cbus_find_endpoint(&cbus, src_name); assert(endpoint != NULL); cpipe_create(dest_pipe, dest_name); cpipe_push(dest_pipe, &msg.cmsg); while (true) { if (process_cb != NULL) process_cb(endpoint); if (msg.complete) break; fiber_cond_wait(&msg.cond); } } struct cbus_unpair_msg { struct cmsg cmsg; void (*unpair_cb)(void *); void *unpair_arg; struct cpipe *src_pipe; bool complete; struct fiber_cond cond; }; static void cbus_unpair_prepare(struct cmsg *cmsg) { struct cbus_unpair_msg *msg = container_of(cmsg, struct cbus_unpair_msg, cmsg); if (msg->unpair_cb != NULL) msg->unpair_cb(msg->unpair_arg); } static void cbus_unpair_flush(struct cmsg *cmsg) { (void)cmsg; } static void cbus_unpair_complete(struct cmsg *cmsg); static void cbus_unpair_perform(struct cmsg *cmsg) { struct cbus_unpair_msg *msg = container_of(cmsg, struct cbus_unpair_msg, cmsg); static struct cmsg_hop route[] = { {cbus_unpair_complete, NULL}, }; cmsg_init(cmsg, route); cpipe_push(msg->src_pipe, cmsg); cpipe_destroy(msg->src_pipe); } static void cbus_unpair_complete(struct cmsg *cmsg) { struct cbus_unpair_msg *msg = container_of(cmsg, struct cbus_unpair_msg, cmsg); msg->complete = true; fiber_cond_signal(&msg->cond); } void cbus_unpair(struct cpipe *dest_pipe, struct cpipe *src_pipe, void (*unpair_cb)(void *), void *unpair_arg, void (*process_cb)(struct cbus_endpoint *)) { struct cmsg_hop route[] = { {cbus_unpair_prepare, src_pipe}, {cbus_unpair_flush, dest_pipe}, {cbus_unpair_perform, NULL}, }; struct cbus_unpair_msg msg; cmsg_init(&msg.cmsg, route); msg.unpair_cb = unpair_cb; msg.unpair_arg = unpair_arg; msg.src_pipe = src_pipe; msg.complete = false; fiber_cond_create(&msg.cond); cpipe_push(dest_pipe, &msg.cmsg); struct cbus_endpoint *endpoint = src_pipe->endpoint; while (true) { if (process_cb != NULL) process_cb(endpoint); if (msg.complete) break; fiber_cond_wait(&msg.cond); } cpipe_destroy(dest_pipe); } void cbus_process(struct cbus_endpoint *endpoint) { struct stailq output; stailq_create(&output); cbus_endpoint_fetch(endpoint, &output); struct cmsg *msg, *msg_next; stailq_foreach_entry_safe(msg, msg_next, &output, fifo) cmsg_deliver(msg); } void cbus_loop(struct cbus_endpoint *endpoint) { while (true) { cbus_process(endpoint); if (fiber_is_cancelled()) break; fiber_yield(); } } static void cbus_stop_loop_f(struct cmsg *msg) { fiber_cancel(fiber()); free(msg); } void cbus_stop_loop(struct cpipe *pipe) { /* * Hack: static message only works because cmsg_deliver() * is a no-op on the second hop. */ static const struct cmsg_hop route[1] = { {cbus_stop_loop_f, NULL} }; struct cmsg *cancel = malloc(sizeof(struct cmsg)); cmsg_init(cancel, route); cpipe_push(pipe, cancel); ev_invoke(pipe->producer, &pipe->flush_input, EV_CUSTOM); } tarantool_1.9.1.26.g63eb81e3c/src/cbus.h0000664000000000000000000003231613306560010016101 0ustar rootroot#ifndef TARANTOOL_CBUS_H_INCLUDED #define TARANTOOL_CBUS_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "fiber.h" #include "fiber_cond.h" #include "rmean.h" #include "small/rlist.h" #include "salad/stailq.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** cbus, cmsg - inter-cord bus and messaging */ struct cmsg; struct cpipe; typedef void (*cmsg_f)(struct cmsg *); enum cbus_stat_name { CBUS_STAT_EVENTS, CBUS_STAT_LOCKS, CBUS_STAT_LAST, }; extern const char *cbus_stat_strings[CBUS_STAT_LAST]; /** * One hop in a message travel route. A message may need to be * delivered to many destinations before it can be dispensed with. * For example, it may be necessary to return a message to the * sender just to destroy it. * * Message travel route is an array of cmsg_hop entries. The first * entry contains a delivery function at the first destination, * and the next destination. Subsequent entries are alike. The * last entry has a delivery function (most often a message * destructor) and NULL for the next destination. */ struct cmsg_hop { /** The message delivery function. */ cmsg_f f; /** * The next destination to which the message * should be routed after its delivered locally. */ struct cpipe *pipe; }; /** A message traveling between cords. */ struct cmsg { /** * A member of the linked list - fifo of the pipe the * message is stuck in currently, waiting to get * delivered. */ struct stailq_entry fifo; /** The message routing path. */ const struct cmsg_hop *route; /** The current hop the message is at. */ const struct cmsg_hop *hop; }; static inline struct cmsg *cmsg(void *ptr) { return (struct cmsg *) ptr; } /** Initialize the message and set its route. */ static inline void cmsg_init(struct cmsg *msg, const struct cmsg_hop *route) { /** * The first hop can be done explicitly with cbus_push(), * msg->hop thus points to the second hop. */ msg->hop = msg->route = route; } /** * Deliver the message and dispatch it to the next hop. */ void cmsg_deliver(struct cmsg *msg); /** A uni-directional FIFO queue from one cord to another. */ struct cpipe { /** Staging area for pushed messages */ struct stailq input; /** Counters are useful for finer-grained scheduling. */ int n_input; /** * When pushing messages, keep the staged input size under * this limit (speeds up message delivery and reduces * latency, while still keeping the bus mutex cold enough). */ int max_input; /** * Rather than flushing input into the pipe * whenever a single message or a batch is * complete, do it once per event loop iteration * or when max_input is reached. */ struct ev_async flush_input; /** The event loop of the producer cord. */ struct ev_loop *producer; /** * The cbus endpoint at the destination cord to handle * flushed messages. */ struct cbus_endpoint *endpoint; /** * Triggers to call on flush event, if the input queue * is not empty. */ struct rlist on_flush; }; /** * Initialize a pipe and connect it to the consumer. * Must be called by the producer. The call returns * only when the consumer, identified by consumer name, * has joined the bus. */ void cpipe_create(struct cpipe *pipe, const char *consumer); /** * Deinitialize a pipe and disconnect it from the consumer. * Must be called by producer. Will flash queued messages. */ void cpipe_destroy(struct cpipe *pipe); /** * Set pipe max size of staged push area. The default is infinity. * If staged push cap is set, the pushed messages are flushed * whenever the area has more messages than the cap, and also once * per event loop. * Otherwise, the messages flushed once per event loop iteration. * * @todo: collect bus stats per second and adjust max_input once * a second to keep the mutex cold regardless of the message load, * while still keeping the latency low if there are few * long-to-process messages. */ static inline void cpipe_set_max_input(struct cpipe *pipe, int max_input) { pipe->max_input = max_input; } /** * Flush all staged messages into the pipe and eventually to the * consumer. */ static inline void cpipe_flush_input(struct cpipe *pipe) { assert(loop() == pipe->producer); /** Flush may be called with no input. */ if (pipe->n_input > 0) { if (pipe->n_input < pipe->max_input) { /* * Not much input, can deliver all * messages at the end of the event loop * iteration. */ ev_feed_event(pipe->producer, &pipe->flush_input, EV_CUSTOM); } else { /* * Wow, it's a lot of stuff piled up, * deliver immediately. */ ev_invoke(pipe->producer, &pipe->flush_input, EV_CUSTOM); } } } /** * Push a single message to the pipe input. The message is pushed * to a staging area. To be delivered, the input needs to be * flushed with cpipe_flush_input(). */ static inline void cpipe_push_input(struct cpipe *pipe, struct cmsg *msg) { assert(loop() == pipe->producer); stailq_add_tail_entry(&pipe->input, msg, fifo); pipe->n_input++; if (pipe->n_input >= pipe->max_input) ev_invoke(pipe->producer, &pipe->flush_input, EV_CUSTOM); } /** * Push a single message and ensure it's delivered. * A combo of push_input + flush_input for cases when * it's not known at all whether there'll be other * messages coming up. */ static inline void cpipe_push(struct cpipe *pipe, struct cmsg *msg) { cpipe_push_input(pipe, msg); assert(pipe->n_input < pipe->max_input); if (pipe->n_input == 1) ev_feed_event(pipe->producer, &pipe->flush_input, EV_CUSTOM); } void cbus_init(); /** * cbus endpoint */ struct cbus_endpoint { /** * Endpoint name, used to identify the endpoint when * establishing a route. */ char name[FIBER_NAME_MAX]; /** Member of cbus->endpoints */ struct rlist in_cbus; /** The lock around the pipe. */ pthread_mutex_t mutex; /** A queue with incoming messages. */ struct stailq output; /** Consumer cord loop */ ev_loop *consumer; /** Async to notify the consumer */ ev_async async; /** Count of connected pipes */ uint32_t n_pipes; /** Condition for endpoint destroy */ struct fiber_cond cond; }; /** * Fetch incomming messages to output */ static inline void cbus_endpoint_fetch(struct cbus_endpoint *endpoint, struct stailq *output) { tt_pthread_mutex_lock(&endpoint->mutex); stailq_concat(output, &endpoint->output); tt_pthread_mutex_unlock(&endpoint->mutex); } /** Initialize the global singleton bus. */ void cbus_init(); /** Destroy the global singleton bus. */ void cbus_free(); /** * Connect the cord to cbus as a named reciever. * @param name a destination name * @param fetch_cb callback to fetch new messages * @retval 0 for success * @retval 1 if endpoint with given name already registered */ int cbus_endpoint_create(struct cbus_endpoint *endpoint, const char *name, void (*fetch_cb)(ev_loop *, struct ev_watcher *, int), void *fetch_data); /** * One round for message fetch and deliver */ void cbus_process(struct cbus_endpoint *endpoint); /** * Run the message delivery loop until the current fiber is * cancelled. */ void cbus_loop(struct cbus_endpoint *endpoint); /** * Stop the message delivery loop at the destination the pipe * is pointing at. */ void cbus_stop_loop(struct cpipe *pipe); /** * Disconnect the cord from cbus. * @retval 0 for success * @retval 1 if there is connected pipe or unhandled message */ int cbus_endpoint_destroy(struct cbus_endpoint *endpoint, void (*process_cb)(struct cbus_endpoint *)); /** * A helper method to invoke a function on the other side of the * bus. * * Creates the relevant messages, pushes them to the callee pipe and * blocks the caller until func is executed in the correspondent * thread. * Detects which cord to invoke a function in based on the current * cord value (i.e. finds the respective pipes automatically). * Parameter 'data' is passed to the invoked function as context. * * @return This function itself never fails. It returns 0 if the call * was * finished, or -1 if there is a timeout or the caller fiber * is canceled. * If called function times out or the caller fiber is canceled, * then free_cb is invoked to free 'data' or other caller state. * * If the argument function sets an error in the called cord, this * error is safely transferred to the caller cord's diagnostics * area. */ struct cbus_call_msg; typedef int (*cbus_call_f)(struct cbus_call_msg *); struct fiber; /** * The state of a synchronous cross-thread call. Only func and free_cb * (if needed) are significant to the caller, other fields are * initialized during the call preparation internally. */ struct cbus_call_msg { struct cmsg msg; struct diag diag; struct fiber *caller; struct cmsg_hop route[2]; bool complete; int rc; /** The callback to invoke in the peer thread. */ cbus_call_f func; /** * A callback to free affiliated resources if the call * times out or the caller is canceled. */ cbus_call_f free_cb; }; int cbus_call(struct cpipe *callee, struct cpipe *caller, struct cbus_call_msg *msg, cbus_call_f func, cbus_call_f free_cb, double timeout); /** * Block until all messages queued in a pipe have been processed. * Done by submitting a dummy message to the pipe and waiting * until it is complete. */ void cbus_flush(struct cpipe *callee, struct cpipe *caller, void (*process_cb)(struct cbus_endpoint *)); /** * Create a two-way channel between existing cbus endpoints. * Blocks until both pipes are created. * * @param dest_name Name of the destination endpoint, i.e. * the endpoint at a remote cord we are * connecting to. * @param src_name Name of the source endpoint, i.e. * the endpoint at the caller's cord. * @param[out] dest_pipe Pipe from the source to the destination * endpoint. * @param[out] src_pipe Pipe from the destination to the source * endpoint. * @param pair_cb Callback invoked at the destination right * after creating the channel to the source. * May be NULL. * @param pair_arg Argument passed to @pair_cb. * @param process_cb Function invoked to process cbus messages * at the source endpoint. Pass NULL if there * is a fiber processing messages at the source. */ void cbus_pair(const char *dest_name, const char *src_name, struct cpipe *dest_pipe, struct cpipe *src_pipe, void (*pair_cb)(void *), void *pair_arg, void (*process_cb)(struct cbus_endpoint *)); /** * Destroy a two-way channel between cbus endpoints. * Blocks until both pipes are destroyed. * * Before proceeding to pipe destruction, this function flushes * all cbus messages queued at the destination endpoint by sending * a message from the source to the destination and back. The * caller may specify a callback to invoke when the message is * at the destination endpoint. This can be used to notify the * destination that the channel is about to be destroyed and so * it must stop generating new messages for it. * * @param dest_pipe Pipe from the source to the destination * endpoint. * @param src_pipe Pipe from the destination to the source * endpoint. * @param unpair_cb Callback invoked at the destination before * proceeding to pipe destruction (see above). * May be NULL. * @param unpair_arg Argument passed to @unpair_cb. * @param process_cb Function invoked to process cbus messages * at the source endpoint. Pass NULL if there * is a fiber processing messages at the source. */ void cbus_unpair(struct cpipe *dest_pipe, struct cpipe *src_pipe, void (*unpair_cb)(void *), void *unpair_arg, void (*process_cb)(struct cbus_endpoint *)); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_CBUS_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/coio_task.c0000664000000000000000000002427513306560010017120 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coio_task.h" #include #include #include #include #include #include #include #include "fiber.h" #include "third_party/tarantool_ev.h" /* * Asynchronous IO Tasks (libeio wrapper). * --------------------------------------- * * libeio request processing is designed in edge-trigger * manner, when libeio is ready to process some requests it * calls coio_poller callback. * * Due to libeio design, want_poll callback is called while * locks are being held, so it's not possible to call any libeio * function inside this callback. Thus coio_want_poll raises an * async event which will be dealt with normally as part of the * main Tarantool event loop. * * The async event handler, in turn, performs eio_poll(), which * will run on_complete callback for all ready eio tasks. * In case if some of the requests are not complete by the time * eio_poll() has been called, coio_idle watcher is started, which * would periodically invoke eio_poll() until all requests are * complete. * * See for details: * http://pod.tst.eu/http://cvs.schmorp.de/libeio/eio.pod */ struct coio_manager { ev_loop *loop; ev_idle coio_idle; ev_async coio_async; }; static __thread struct coio_manager coio_manager; static void coio_idle_cb(ev_loop *loop, struct ev_idle *w, int events) { (void) events; if (eio_poll() != -1) { /* nothing to do */ ev_idle_stop(loop, w); } } static void coio_async_cb(ev_loop *loop, MAYBE_UNUSED struct ev_async *w, MAYBE_UNUSED int events) { if (eio_poll() == -1) { /* not all tasks are complete. */ ev_idle_start(loop, &coio_manager.coio_idle); } } static void coio_want_poll_cb(void *ptr) { struct coio_manager *manager = ptr; ev_async_send(manager->loop, &manager->coio_async); } static void coio_done_poll_cb(void *ptr) { (void)ptr; } static int coio_on_start(void *data) { (void) data; struct cord *cord = (struct cord *)calloc(sizeof(struct cord), 1); if (!cord) return -1; cord_create(cord, "coio"); return 0; } static int coio_on_stop(void *data) { (void) data; cord_destroy(cord()); return 0; } void coio_init(void) { eio_set_thread_on_start(coio_on_start, NULL); eio_set_thread_on_stop(coio_on_stop, NULL); } /** * Init coio subsystem. * * Create idle and async watchers, init eio. */ void coio_enable(void) { eio_init(&coio_manager, coio_want_poll_cb, coio_done_poll_cb); coio_manager.loop = loop(); ev_idle_init(&coio_manager.coio_idle, coio_idle_cb); ev_async_init(&coio_manager.coio_async, coio_async_cb); ev_async_start(loop(), &coio_manager.coio_async); } void coio_shutdown(void) { eio_set_max_parallel(0); } static void coio_on_feed(eio_req *req) { struct coio_task *task = (struct coio_task *) req; req->result = task->task_cb(task); if (req->result) diag_move(diag_get(), &task->diag); } /** * A callback invoked by eio_poll when associated * eio_request is complete. */ static int coio_on_finish(eio_req *req) { struct coio_task *task = (struct coio_task *) req; if (task->fiber == NULL) { /* * Timed out. Resources will be freed by coio_on_destroy. * NOTE: it is not safe to run timeout_cb handler here. */ return 0; } task->complete = 1; /* Reset on_timeout hook - resources will be freed by coio_task user */ task->base.destroy = NULL; fiber_wakeup(task->fiber); return 0; } /* * Free resources on timeout. */ static void coio_on_destroy(eio_req *req) { struct coio_task *task = (struct coio_task *) req; assert(task->fiber == NULL && task->complete == 0); if (task->timeout_cb != NULL) task->timeout_cb(task); } void coio_task_create(struct coio_task *task, coio_task_cb func, coio_task_cb on_timeout) { assert(func != NULL && on_timeout != NULL); /* from eio.c: REQ() definition */ memset(&task->base, 0, sizeof(task->base)); task->base.type = EIO_CUSTOM; task->base.feed = coio_on_feed; task->base.finish = coio_on_finish; task->base.destroy = coio_on_destroy; /* task->base.pri = 0; */ task->fiber = fiber(); task->task_cb = func; task->timeout_cb = on_timeout; task->complete = 0; diag_create(&task->diag); } void coio_task_destroy(struct coio_task *task) { diag_destroy(&task->diag); } int coio_task_post(struct coio_task *task, double timeout) { assert(task->base.type == EIO_CUSTOM); assert(task->fiber == fiber()); eio_submit(&task->base); if (timeout == 0) { /* * This is a special case: * we don't wait any response from the task * and just perform just asynchronous post. */ task->fiber = NULL; return 0; } fiber_yield_timeout(timeout); if (!task->complete) { /* timed out or cancelled. */ task->fiber = NULL; if (fiber_is_cancelled()) diag_set(FiberIsCancelled); else diag_set(TimedOut); return -1; } return 0; } static void coio_on_call(eio_req *req) { struct coio_task *task = (struct coio_task *) req; req->result = task->call_cb(task->ap); if (req->result) diag_move(diag_get(), &task->diag); } ssize_t coio_call(ssize_t (*func)(va_list ap), ...) { struct coio_task *task = (struct coio_task *) calloc(1, sizeof(*task)); if (task == NULL) return -1; /* errno = ENOMEM */ /* from eio.c: REQ() definition */ task->base.type = EIO_CUSTOM; task->base.feed = coio_on_call; task->base.finish = coio_on_finish; /* task->base.destroy = NULL; */ /* task->base.pri = 0; */ task->fiber = fiber(); task->call_cb = func; task->complete = 0; diag_create(&task->diag); va_start(task->ap, func); eio_submit(&task->base); do { fiber_yield(); } while (task->complete == 0); va_end(task->ap); ssize_t result = task->base.result; int save_errno = errno; if (result) diag_move(&task->diag, diag_get()); free(task); errno = save_errno; return result; } struct async_getaddrinfo_task { struct coio_task base; struct addrinfo *result; int rc; char *host; char *port; struct addrinfo hints; }; #ifndef EAI_ADDRFAMILY #define EAI_ADDRFAMILY EAI_BADFLAGS /* EAI_ADDRFAMILY is deprecated on BSD */ #endif /* * Resolver function, run in separate thread by * coio (libeio). */ static int getaddrinfo_cb(struct coio_task *ptr) { struct async_getaddrinfo_task *task = (struct async_getaddrinfo_task *) ptr; task->rc = getaddrinfo(task->host, task->port, &task->hints, &task->result); /* getaddrinfo can return EAI_ADDRFAMILY on attempt * to resolve ::1, if machine has no public ipv6 addresses * configured. Retry without AI_ADDRCONFIG flag set. * * See for details: https://bugs.launchpad.net/tarantool/+bug/1160877 */ if ((task->rc == EAI_BADFLAGS || task->rc == EAI_ADDRFAMILY) && (task->hints.ai_flags & AI_ADDRCONFIG)) { task->hints.ai_flags &= ~AI_ADDRCONFIG; task->rc = getaddrinfo(task->host, task->port, &task->hints, &task->result); } return 0; } static int getaddrinfo_free_cb(struct coio_task *ptr) { struct async_getaddrinfo_task *task = (struct async_getaddrinfo_task *) ptr; if (task->host != NULL) free(task->host); if (task->port != NULL) free(task->port); if (task->result != NULL) freeaddrinfo(task->result); coio_task_destroy(&task->base); TRASH(task); free(task); return 0; } int coio_getaddrinfo(const char *host, const char *port, const struct addrinfo *hints, struct addrinfo **res, double timeout) { struct async_getaddrinfo_task *task = (struct async_getaddrinfo_task *) calloc(1, sizeof(*task)); if (task == NULL) { diag_set(OutOfMemory, sizeof(*task), "malloc", "getaddrinfo"); return -1; } coio_task_create(&task->base, getaddrinfo_cb, getaddrinfo_free_cb); /* * getaddrinfo() on osx upto osx 10.8 crashes when AI_NUMERICSERV is * set and servername is either NULL or "0" ("00" works fine) * * Based on the workaround in https://bugs.python.org/issue17269 */ #if defined(__APPLE__) && defined(AI_NUMERICSERV) if (hints && (hints->ai_flags & AI_NUMERICSERV) && (port == NULL || (port[0]=='0' && port[1]=='\0'))) port = "00"; #endif /* Fill hinting information for use by connect(2) or bind(2). */ memcpy(&task->hints, hints, sizeof(task->hints)); /* make no difference between empty string and NULL for host */ if (host != NULL && *host) { task->host = strdup(host); if (task->host == NULL) { diag_set(OutOfMemory, strlen(host), "malloc", "getaddrinfo"); getaddrinfo_free_cb(&task->base); return -1; } } if (port != NULL) { task->port = strdup(port); if (task->port == NULL) { diag_set(OutOfMemory, strlen(port), "malloc", "getaddrinfo"); getaddrinfo_free_cb(&task->base); return -1; } } /* Post coio task */ if (coio_task_post(&task->base, timeout) != 0) return -1; /* timed out or cancelled */ /* Task finished */ if (task->rc < 0) { /* getaddrinfo() failed */ errno = EIO; diag_set(SystemError, "getaddrinfo: %s", gai_strerror(task->rc)); getaddrinfo_free_cb(&task->base); return -1; } /* getaddrinfo() succeed */ *res = task->result; task->result = NULL; getaddrinfo_free_cb(&task->base); return 0; } tarantool_1.9.1.26.g63eb81e3c/src/version.h0000664000000000000000000000510613306560010016627 0ustar rootroot#ifndef INCLUDES_TARANTOOL_VERSION_H #define INCLUDES_TARANTOOL_VERSION_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Pack version into uint32_t. * The highest byte or result means major version, next - minor, * middle - patch, last - revision. */ static inline uint32_t version_id(unsigned major, unsigned minor, unsigned patch) { return (((major << 8) | minor) << 8) | patch; } static inline unsigned version_id_major(uint32_t version_id) { return (version_id >> 16) & 0xff; } static inline unsigned version_id_minor(uint32_t version_id) { return (version_id >> 8) & 0xff; } static inline unsigned version_id_patch(uint32_t version_id) { return version_id & 0xff; } /** * Return Tarantool package name as string */ const char * tarantool_package(void); /** * Return Tarantool version as string */ const char * tarantool_version(void); /** * Get version (defined in PACKAGE_VERSION), packed into uint32_t * The highest byte or result means major version, next - minor, * middle - patch, last - revision. */ uint32_t tarantool_version_id(void); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_VERSION_H */ tarantool_1.9.1.26.g63eb81e3c/src/fiber_cond.h0000664000000000000000000001012413306560010017230 0ustar rootroot#ifndef TARANTOOL_FIBER_COND_H_INCLUDED #define TARANTOOL_FIBER_COND_H_INCLUDED 1 /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** \cond public */ /** * Conditional variable for cooperative multitasking (fibers). * * A cond (short for "condition variable") is a synchronization primitive * that allow fibers to yield until some predicate is satisfied. Fiber * conditions have two basic operations - wait() and signal(). wait() * suspends execution of fiber (i.e. yields) until signal() is called. * Unlike pthread_cond, fiber_cond doesn't require mutex/latch wrapping. * */ struct fiber_cond; /** \endcond public */ struct fiber_cond { /** Waiting fibers */ struct rlist waiters; }; /** * Initialize the fiber condition variable. * * @param cond condition */ void fiber_cond_create(struct fiber_cond *cond); /** * Finalize the cond. * Behaviour is undefined if there are fiber waiting for the cond. * @param cond condition */ void fiber_cond_destroy(struct fiber_cond *cond); /** \cond public */ /** * Instantiate a new fiber cond object. */ struct fiber_cond * fiber_cond_new(void); /** * Delete the fiber cond object. * Behaviour is undefined if there are fiber waiting for the cond. */ void fiber_cond_delete(struct fiber_cond *cond); /** * Wake one fiber waiting for the cond. * Does nothing if no one is waiting. * @param cond condition */ void fiber_cond_signal(struct fiber_cond *cond); /** * Wake up all fibers waiting for the cond. * @param cond condition */ void fiber_cond_broadcast(struct fiber_cond *cond); /** * Suspend the execution of the current fiber (i.e. yield) until * fiber_cond_signal() is called. Like pthread_cond, fiber_cond can issue * spurious wake ups caused by explicit fiber_wakeup() or fiber_cancel() * calls. It is highly recommended to wrap calls to this function into a loop * and check an actual predicate and fiber_testcancel() on every iteration. * * @param cond condition * @param timeout timeout in seconds * @retval 0 on fiber_cond_signal() call or a spurious wake up * @retval -1 on timeout, diag is set to TimedOut */ int fiber_cond_wait_timeout(struct fiber_cond *cond, double timeout); /** * Shortcut for fiber_cond_wait_timeout(). * @see fiber_cond_wait_timeout() */ int fiber_cond_wait(struct fiber_cond *cond); /** \endcond public */ /** * Wait until the given condition variable is signaled or the * deadline passed. The deadline is specified as absolute time * in seconds since system start (i.e. monotonic clock). * @see fiber_cond_wait_timeout() */ int fiber_cond_wait_deadline(struct fiber_cond *cond, double deadline); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_FIBER_COND_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/fiber_channel.c0000664000000000000000000003043213306560010017714 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "fiber_channel.h" #include #include "fiber.h" enum fiber_channel_wait_status { FIBER_CHANNEL_WAIT_READER, /* A reader is waiting for writer */ FIBER_CHANNEL_WAIT_WRITER, /* A writer waiting for reader. */ FIBER_CHANNEL_WAIT_DONE, /* Wait is done, message sent/received. */ FIBER_CHANNEL_WAIT_CLOSED /* Wait is aborted, the channel is closed. */ }; /** * Wait pad is a helper data structure for waiting for * an incoming message or a reader. */ struct ipc_wait_pad { struct ipc_msg *msg; enum fiber_channel_wait_status status; }; void fiber_channel_create(struct fiber_channel *ch, uint32_t size) { ch->size = size; ch->count = 0; ch->is_closed = false; rlist_create(&ch->waiters); if (ch->size) { ch->buf = (struct ipc_msg **) (char *) &ch[1]; ch->beg = 0; } } struct fiber_channel * fiber_channel_new(uint32_t size) { struct fiber_channel *res = (struct fiber_channel *) malloc(fiber_channel_memsize(size)); if (res == NULL) { diag_set(OutOfMemory, size, "malloc", "struct fiber_channel"); return NULL; } fiber_channel_create(res, size); return res; } bool fiber_channel_has_waiter(struct fiber_channel *ch, enum fiber_channel_wait_status status) { if (rlist_empty(&ch->waiters)) return false; struct fiber *f = rlist_first_entry(&ch->waiters, struct fiber, state); struct ipc_wait_pad *pad = (struct ipc_wait_pad *) fiber_get_key(f, FIBER_KEY_MSG); return pad->status == status; } bool fiber_channel_has_readers(struct fiber_channel *ch) { return fiber_channel_has_waiter(ch, FIBER_CHANNEL_WAIT_READER); } bool fiber_channel_has_writers(struct fiber_channel *ch) { return fiber_channel_has_waiter(ch, FIBER_CHANNEL_WAIT_WRITER); } /** * Push a message into the channel buffer. * * @pre The buffer has space for a message. */ static inline void fiber_channel_buffer_push(struct fiber_channel *ch, struct ipc_msg *msg) { assert(ch->count < ch->size); /* Find an empty slot in the ring buffer. */ uint32_t i = ch->beg + ch->count; if (i >= ch->size) i -= ch->size; ch->buf[i] = msg; ch->count++; } static inline struct ipc_msg * fiber_channel_buffer_pop(struct fiber_channel *ch) { assert(ch->count > 0); struct ipc_msg *msg = ch->buf[ch->beg]; if (++ch->beg == ch->size) ch->beg = 0; ch->count--; return msg; } static inline void fiber_channel_waiter_wakeup(struct fiber *f, enum fiber_channel_wait_status status) { struct ipc_wait_pad *pad = (struct ipc_wait_pad *) fiber_get_key(f, FIBER_KEY_MSG); /* * Safe to overwrite the status without looking at it: * whoever is touching the status, removes the fiber * from the wait list. */ pad->status = status; /* * fiber_channel allows an asynchronous cancel. If a fiber * is cancelled while waiting on a timeout, it is done via * fiber_wakeup(), which modifies fiber->state link. * This ensures that a fiber is never on two "state" * lists: it's either waiting on a channel, or is * cancelled, ready for execution. This is why * we use fiber->state, and not (imagine) pad->link as * a list link, and store the pad in the fiber key. * * It's important that the sender removes the receiver * from the wait list, not the receiver, after it's woken * up, to ensure the callee doesn't get two messages * delivered to it. Since 'fiber->state' is used, this * works correctly with fiber_cancel(). */ fiber_wakeup(f); } int fiber_channel_check_wait(struct fiber_channel *ch, ev_tstamp start_time, ev_tstamp timeout) { /* * Preconditions of waiting are: * - the channel is not closed, * - the current fiber has not been * cancelled, * - the timeout has not expired. * If timeout is non-zero, yield at least once, otherwise * rounding errors can lead to an infinite loop in the * caller, since ev_now() does not get updated without * a yield. */ if (ch->is_closed) { diag_set(ChannelIsClosed); return -1; } if (fiber_is_cancelled()) { diag_set(FiberIsCancelled); return -1; } if (timeout == 0 || ev_monotonic_now(loop()) > start_time + timeout) { diag_set(TimedOut); return -1; } return 0; } void fiber_channel_close(struct fiber_channel *ch) { if (ch->is_closed) return; while (ch->count) { struct ipc_msg *msg = fiber_channel_buffer_pop(ch); msg->destroy(msg); } struct fiber *f; while (! rlist_empty(&ch->waiters)) { f = rlist_first_entry(&ch->waiters, struct fiber, state); fiber_channel_waiter_wakeup(f, FIBER_CHANNEL_WAIT_CLOSED); } ch->is_closed = true; } void fiber_channel_destroy(struct fiber_channel *ch) { fiber_channel_close(ch); } void fiber_channel_delete(struct fiber_channel *ch) { fiber_channel_destroy(ch); free(ch); } static __thread struct mempool ipc_value_pool; struct ipc_value * ipc_value_new() { if (! mempool_is_initialized(&ipc_value_pool)) { /* * We don't need to bother with * destruction since the entire slab cache * is freed when the thread ends. */ mempool_create(&ipc_value_pool, &cord()->slabc, sizeof(struct ipc_value)); } struct ipc_value *value = (struct ipc_value *) mempool_alloc(&ipc_value_pool); if (value == NULL) { diag_set(OutOfMemory, sizeof(struct ipc_value), "ipc_msg_pool", "struct ipc_value"); return NULL; } value->base.destroy = ipc_value_delete; return value; } void ipc_value_delete(struct ipc_msg *msg) { mempool_free(&ipc_value_pool, msg); } int fiber_channel_put_timeout(struct fiber_channel *ch, void *data, ev_tstamp timeout) { struct ipc_value *value = ipc_value_new(); if (value == NULL) return -1; value->data = data; int rc = fiber_channel_put_msg_timeout(ch, &value->base, timeout); if (rc < 0) ipc_value_delete(&value->base); return rc; } int fiber_channel_get_timeout(struct fiber_channel *ch, void **data, ev_tstamp timeout) { struct ipc_value *value; int rc = fiber_channel_get_msg_timeout(ch, (struct ipc_msg **) &value, timeout); if (rc < 0) return rc; *data = value->data; ipc_value_delete(&value->base); return rc; } int fiber_channel_put_msg_timeout(struct fiber_channel *ch, struct ipc_msg *msg, ev_tstamp timeout) { /** Ensure delivery fairness in case of prolonged wait. */ bool first_try = true; ev_tstamp start_time = ev_monotonic_now(loop()); while (true) { /* * Check if there is a ready reader first, and * only if there is no reader try to put a message * into the channel buffer. */ if (fiber_channel_has_readers(ch)) { /** * There is a reader, push the message * immediately. */ /* * There can be no reader if there is * a buffered message or the channel is * closed. */ assert(ch->count == 0); assert(ch->is_closed == false); struct fiber *f = rlist_first_entry(&ch->waiters, struct fiber, state); /* Place the message on the pad. */ struct ipc_wait_pad *pad = (struct ipc_wait_pad *) fiber_get_key(f, FIBER_KEY_MSG); pad->msg = msg; fiber_channel_waiter_wakeup(f, FIBER_CHANNEL_WAIT_DONE); return 0; } if (ch->count < ch->size) { /* * No reader, but the channel is buffered. * Nice, drop the message in the buffer. */ /* * Closed channels, are, well, closed, * even if there is space in the buffer. */ if (ch->is_closed) { diag_set(ChannelIsClosed); return -1; } fiber_channel_buffer_push(ch, msg); return 0; } /** * No reader and no space in the buffer. * Have to wait. */ struct fiber *f = fiber(); if (fiber_channel_check_wait(ch, start_time, timeout)) return -1; /* Prepare a wait pad. */ struct ipc_wait_pad pad; pad.status = FIBER_CHANNEL_WAIT_WRITER; pad.msg = msg; fiber_set_key(f, FIBER_KEY_MSG, &pad); if (first_try) { rlist_add_tail_entry(&ch->waiters, f, state); first_try = false; } else { rlist_add_entry(&ch->waiters, f, state); } fiber_yield_timeout(timeout); /* * In case of yield timeout, fiber->state * is in the ch->waiters list, remove. * rlist_del_entry() is a no-op if already done. */ rlist_del_entry(f, state); fiber_set_key(f, FIBER_KEY_MSG, NULL); if (pad.status == FIBER_CHANNEL_WAIT_CLOSED) { /* * The channel is closed. Do not touch * the channel object. It might be gone * already. */ diag_set(ChannelIsClosed); return -1; } if (pad.status == FIBER_CHANNEL_WAIT_DONE) return 0; /* OK, someone took the message. */ timeout -= ev_monotonic_now(loop()) - start_time; } } int fiber_channel_get_msg_timeout(struct fiber_channel *ch, struct ipc_msg **msg, ev_tstamp timeout) { /** Ensure delivery fairness in case of prolonged wait. */ bool first_try = true; ev_tstamp start_time = ev_monotonic_now(loop()); while (true) { struct fiber *f; /* * Buffered messages take priority over waiting * fibers, if any, since they arrived earlier. * Try to take a message from the buffer first. */ if (ch->count > 0) { /** * There can't be any buffered stuff in * a closed channel - everything is * destroyed at close. */ assert(ch->is_closed == false); *msg = fiber_channel_buffer_pop(ch); if (fiber_channel_has_writers(ch)) { /* * Move a waiting writer, if any, * from the wait list to the tail * the buffer, to preserve fairness * in message delivery order. */ f = rlist_first_entry(&ch->waiters, struct fiber, state); struct ipc_wait_pad *pad = (struct ipc_wait_pad *) fiber_get_key(f, FIBER_KEY_MSG); fiber_channel_buffer_push(ch, pad->msg); fiber_channel_waiter_wakeup(f, FIBER_CHANNEL_WAIT_DONE); } return 0; } if (fiber_channel_has_writers(ch)) { /** * There is no buffered messages, *but* * there is a writer. This is only * possible when the channel is * unbuffered. * Take the message directly from the * writer and be done with it. */ assert(ch->size == 0); f = rlist_first_entry(&ch->waiters, struct fiber, state); struct ipc_wait_pad *pad = (struct ipc_wait_pad *) fiber_get_key(f, FIBER_KEY_MSG); *msg = pad->msg; fiber_channel_waiter_wakeup(f, FIBER_CHANNEL_WAIT_DONE); return 0; } if (fiber_channel_check_wait(ch, start_time, timeout)) return -1; f = fiber(); /** * No reader and no space in the buffer. * Have to wait. */ struct ipc_wait_pad pad; pad.status = FIBER_CHANNEL_WAIT_READER; fiber_set_key(f, FIBER_KEY_MSG, &pad); if (first_try) { rlist_add_tail_entry(&ch->waiters, f, state); first_try = false; } else { rlist_add_entry(&ch->waiters, f, state); } fiber_yield_timeout(timeout); /* * In case of yield timeout, fiber->state * is in the ch->waiters list, remove. * rlist_del_entry() is a no-op if already done. */ rlist_del_entry(f, state); fiber_set_key(f, FIBER_KEY_MSG, NULL); if (pad.status == FIBER_CHANNEL_WAIT_CLOSED) { diag_set(ChannelIsClosed); return -1; } if (pad.status == FIBER_CHANNEL_WAIT_DONE) { *msg = pad.msg; return 0; } timeout -= ev_monotonic_now(loop()) - start_time; } } tarantool_1.9.1.26.g63eb81e3c/src/histogram.h0000664000000000000000000000514013306565107017151 0ustar rootroot#ifndef TARANTOOL_HISTOGRAM_H_INCLUDED #define TARANTOOL_HISTOGRAM_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif #include #include #include struct histogram_bucket { int64_t max; size_t count; }; struct histogram { int64_t max; size_t total; size_t n_buckets; struct histogram_bucket buckets[0]; }; /** * Create a new histogram given an array of bucket boundaries. * buckets[i] defines the upper bound for bucket i. */ struct histogram * histogram_new(const int64_t *buckets, size_t n_buckets); /** * Destroy a histogram. */ void histogram_delete(struct histogram *hist); /** * Update a histogram with a new observation. */ void histogram_collect(struct histogram *hist, int64_t val); /** * Remove a previously collected observation from a historam. */ void histogram_discard(struct histogram *hist, int64_t val); /** * Calculate a percentile, i.e. the value below which a given * percentage of observations fall. */ int64_t histogram_percentile(struct histogram *hist, int pct); /** * Print string representation of a histogram. */ int histogram_snprint(char *buf, int size, struct histogram *hist); #if defined(__cplusplus) } /* extern "C" */ #endif #endif /* TARANTOOL_HISTOGRAM_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/trigger.h0000664000000000000000000000712213306560010016605 0ustar rootroot#ifndef INCLUDES_TARANTOOL_TRIGGER_H #define INCLUDES_TARANTOOL_TRIGGER_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "small/rlist.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Type of the callback which may be invoked * on an event. */ struct trigger; typedef void (*trigger_f)(struct trigger *trigger, void *event); typedef void (*trigger_f0)(struct trigger *trigger); struct trigger { struct rlist link; trigger_f run; /** * Lua ref in case the trigger is used in Lua, * or other trigger context. */ void *data; /** * Cleanup function, called when the trigger is removed * or the object containing the trigger is destroyed. */ trigger_f0 destroy; }; static inline void trigger_create(struct trigger *trigger, trigger_f run, void *data, trigger_f0 destroy) { rlist_create(&trigger->link); trigger->run = run; trigger->data = data; trigger->destroy = destroy; } static inline void trigger_add(struct rlist *list, struct trigger *trigger) { /* * New triggers are pushed to the beginning of the list. * This ensures that they are not fired right away if * pushed from within a trigger. This also ensures that * the trigger which was set first is fired last. * Alter space code depends on this order. * @todo in future, allow triggers to be pushed * to an arbitrary position on the list. */ rlist_add_entry(list, trigger, link); } static inline void trigger_add_unique(struct rlist *list, struct trigger *trigger) { struct trigger *trg; rlist_foreach_entry(trg, list, link) { if (trg->data == trigger->data && trg->run == trigger->run) return; } trigger_add(list, trigger); } static inline void trigger_clear(struct trigger *trigger) { rlist_del_entry(trigger, link); } static inline void trigger_destroy(struct rlist *list) { struct trigger *trigger, *tmp; rlist_foreach_entry_safe(trigger, list, link, tmp) { trigger_clear(trigger); if (trigger->destroy) trigger->destroy(trigger); } } int trigger_run(struct rlist *list, void *event); #if defined(__cplusplus) } /* extern "C" */ #include "diag.h" static inline void trigger_run_xc(struct rlist *list, void *event) { if (trigger_run(list, event) != 0) diag_raise(); } #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_TRIGGER_H */ tarantool_1.9.1.26.g63eb81e3c/src/fiber_channel.h0000664000000000000000000002331713306560010017725 0ustar rootroot#ifndef TARANTOOL_FIBER_CHANNEL_H_INCLUDED #define TARANTOOL_FIBER_CHANNEL_H_INCLUDED 1 /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "small/rlist.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * @brief CHANNELS */ /** * A base structure for an IPC message. * * A message at any moment can be either: * - new * - in a channel, waiting to get delivered * - delivered * * When a channel is destroyed, all messages buffered by the * channel must be destroyed as well. The destroy callback is * therefore necessary to free any message-specific resources in * case of delivery failure. */ struct ipc_msg { void (*destroy)(struct ipc_msg *msg); }; /** * A message implementation to pass simple value across * a channel. */ struct ipc_value { struct ipc_msg base; union { void *data; int i; }; }; void ipc_value_delete(struct ipc_msg *msg); struct ipc_value * ipc_value_new(); /** * Channel - fiber communication media. * * A channel is a media to deliver messages between fibers. * Any fiber can read or write to/from a channel. Many * readers and writers can work with a channel concurrently. * A message sent to a channel is ready by the first fiber * reading from it. If a channel is empty, the reader blocks * and waits for a message. If a channel has no reader, the * writer waits for the reader to arrive. If a channel is * buffered, i.e. has an associated buffer for messages, it * is possible for a writer to "drop" the message in a channel * until a writer arrives. In case of multiple readers, * messages are delivered in FIFO order. In case of multiple * writers, the first writer to come is released of its message * first. * * If a channel has a buffer of size N, and the buffer * is full (contains N messages), and there is a queue of writers, * the moment the first reader arrives and reads the first message * from a buffer, the first fiber from the wait queue is awoken, * and puts its message to the end of the buffer. * * A channel, once created is "open". I.e. anyone can read or * write to/from a channel. A channel can be closed at any time, * in which case, all messages currently buffered in a channel * are destroyed, waiting readers or writers awoken with an error. * * Waiting for a message, a reader, or space in a buffer can also * return error in case of a wait timeout or cancellation (when the * waiting fiber is cancelled). * * Sending a message to a closed channel, as well as reading * a message from such channel, always fails. * * Channel memory layout * --------------------- * Channel structure has a fixed size. If a channel is created * with a buffer, the buffer must be allocated in a continuous * memory chunk, directly after the channel itself. * fiber_channel_memsize() can be used to find out the amount * of memory necessary to store a channel, given the desired * buffer size. */ struct fiber_channel { /** Channel buffer size, if the channel is buffered. */ uint32_t size; /** The number of messages in the buffer. */ uint32_t count; /** * Readers blocked waiting for messages while the channel * buffers is empty and/or there are no writers, or * Writers blocked waiting for empty space while the * channel buffer is full and/or there are no readers. */ struct rlist waiters; /** Ring buffer read position. */ uint32_t beg; /* True if the channel is closed. */ bool is_closed; /** Channel buffer, if any. */ struct ipc_msg **buf; }; /** * The amount of memory necessary to store a channel, given * buffer size. */ static inline size_t fiber_channel_memsize(uint32_t size) { return sizeof(struct fiber_channel) + sizeof(struct ipc_msg *) * size; } /** * Initialize a channel (the memory should have * been correctly allocated for the channel). */ void fiber_channel_create(struct fiber_channel *ch, uint32_t size); /** Destroy a channel. Does not free allocated memory. */ void fiber_channel_destroy(struct fiber_channel *ch); /** * Allocate and construct a channel. * * Uses malloc(). * * @param size of the channel buffer * @return new channel * @code * struct fiber_channel *ch = fiber_channel_new(10); * @endcode */ struct fiber_channel * fiber_channel_new(uint32_t size); /** * Destroy and free an IPC channel. * * @param ch channel */ void fiber_channel_delete(struct fiber_channel *ch); /** * Check if the channel buffer is empty. * * @param channel * * @retval true channel buffer is empty * (always true for unbuffered * channels) * @retval false otherwise * * @code * if (!fiber_channel_is_empty(ch)) * fiber_channel_get(ch, ...); * @endcode */ static inline bool fiber_channel_is_empty(struct fiber_channel *ch) { return ch->count == 0; } /** * Check if the channel buffer is full. * * @param channel * * @return true if the channel buffer is full * (always true for unbuffered channels) * * @return false otherwise * @code * if (!fiber_channel_is_full(ch)) * fiber_channel_put(ch, "message"); * @endcode */ static inline bool fiber_channel_is_full(struct fiber_channel *ch) { return ch->count >= ch->size; } /** * Put a message into a channel. * This is for cases when messages need to have * a custom destructor. */ int fiber_channel_put_msg_timeout(struct fiber_channel *ch, struct ipc_msg *msg, ev_tstamp timeout); /** * Send a message over a channel within given time. * * @param channel * @param msg * @param timeout * @return 0 success * @return -1, errno=ETIMEDOUT if timeout exceeded, * errno=ECANCEL if the fiber is cancelled * errno=EBADF if the channel is closed * while waiting on it. * */ int fiber_channel_put_timeout(struct fiber_channel *ch, void *data, ev_tstamp timeout); /** * Send a message over a channel. * * Yields current fiber if the channel is full. * The message does not require a custom * destructor. * * @param channel * @param data * * @code * fiber_channel_put(ch, "message"); * @endcode * @return -1 if the channel is closed */ static inline int fiber_channel_put(struct fiber_channel *ch, void *data) { return fiber_channel_put_timeout(ch, data, TIMEOUT_INFINITY); } /** * Get a message from the channel, or time out. * The caller is responsible for message destruction. */ int fiber_channel_get_msg_timeout(struct fiber_channel *ch, struct ipc_msg **msg, ev_tstamp timeout); /** * Get data from a channel within given time. * * @param channel * @param timeout * * @return 0 on success, -1 on error (timeout, channel is * closed) * @code * do { * struct ipc_msg *msg; * int rc = fiber_channel_get_timeout(ch, 0.5, ); * printf("message: %p\n", msg); * } while (msg); * @endcode */ int fiber_channel_get_timeout(struct fiber_channel *ch, void **data, ev_tstamp timeout); /** * Fetch a message from the channel. Yields current fiber if the * channel is empty. * * @param channel * @return 0 on success, -1 on error */ static inline int fiber_channel_get(struct fiber_channel *ch, void **data) { return fiber_channel_get_timeout(ch, data, TIMEOUT_INFINITY); } /** * Check if the channel has reader fibers that wait * for new messages. */ bool fiber_channel_has_readers(struct fiber_channel *ch); /** * Check if the channel has writer fibers that wait * for readers. */ bool fiber_channel_has_writers(struct fiber_channel *ch); /** Channel buffer size. */ static inline uint32_t fiber_channel_size(struct fiber_channel *ch) { return ch->size; } /** * The number of messages in the buffer. * There may be more messages outstanding * if the buffer is full. */ static inline uint32_t fiber_channel_count(struct fiber_channel *ch) { return ch->count; } /** * Close the channel. Discards all messages * and wakes up all readers and writers. */ void fiber_channel_close(struct fiber_channel *ch); /** * True if the channel is closed for both for reading * and writing. */ static inline bool fiber_channel_is_closed(struct fiber_channel *ch) { return ch->is_closed; } #if defined(__cplusplus) } /* extern "C" */ #include "diag.h" static inline void fiber_channel_get_xc(struct fiber_channel *ch, void **data) { if (fiber_channel_get(ch, data) != 0) diag_raise(); } static inline void fiber_channel_put_xc(struct fiber_channel *ch, void *data) { if (fiber_channel_put(ch, data) != 0) diag_raise(); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_FIBER_CHANNEL_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/http_parser.h0000664000000000000000000000402013306560010017467 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef TARANTOOL_HTTP_PARSER_H #define TARANTOOL_HTTP_PARSER_H #define HEADER_LEN 32 enum { HTTP_PARSE_OK, HTTP_PARSE_DONE, HTTP_PARSE_INVALID }; struct http_parser { char *header_value_start; char *header_value_end; int http_major; int http_minor; char header_name[HEADER_LEN]; int header_name_idx; }; /* * @brief Parse line containing http header info * @param parser object * @param bufp pointer to buffer with data * @param end_buf * @return HTTP_DONE - line was parsed * HTTP_OK - header was read * HTTP_PARSE_INVALID - error during parsing */ int http_parse_header_line(struct http_parser *parser, char **bufp, const char *end_buf); #endif //TARANTOOL_HTTP_PARSER_H tarantool_1.9.1.26.g63eb81e3c/src/fiber_cond.c0000664000000000000000000000611213306560010017225 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "fiber_cond.h" #include #include #include "fiber.h" void fiber_cond_create(struct fiber_cond *c) { rlist_create(&c->waiters); } void fiber_cond_destroy(struct fiber_cond *c) { (void)c; assert(rlist_empty(&c->waiters)); } static __thread struct mempool cond_pool; struct fiber_cond * fiber_cond_new() { struct fiber_cond *cond; if (! mempool_is_initialized(&cond_pool)) { /* * We don't need to bother with * destruction since the entire slab cache * is freed when the thread ends. */ mempool_create(&cond_pool, &cord()->slabc, sizeof(*cond)); } cond = mempool_alloc(&cond_pool); if (cond == NULL) { diag_set(OutOfMemory, sizeof(*cond), "fiber_cond_pool", "struct fiber_cond"); return NULL; } fiber_cond_create(cond); return cond; } void fiber_cond_delete(struct fiber_cond *cond) { mempool_free(&cond_pool, cond); } void fiber_cond_signal(struct fiber_cond *e) { if (! rlist_empty(&e->waiters)) { struct fiber *f; f = rlist_shift_entry(&e->waiters, struct fiber, state); fiber_wakeup(f); } } void fiber_cond_broadcast(struct fiber_cond *e) { while (! rlist_empty(&e->waiters)) { struct fiber *f; f = rlist_shift_entry(&e->waiters, struct fiber, state); fiber_wakeup(f); } } int fiber_cond_wait_timeout(struct fiber_cond *c, double timeout) { struct fiber *f = fiber(); rlist_add_tail_entry(&c->waiters, f, state); if (fiber_yield_timeout(timeout)) { diag_set(TimedOut); return -1; } return 0; } int fiber_cond_wait(struct fiber_cond *c) { return fiber_cond_wait_timeout(c, TIMEOUT_INFINITY); } int fiber_cond_wait_deadline(struct fiber_cond *c, double deadline) { double timeout = deadline - ev_monotonic_now(loop()); return fiber_cond_wait_timeout(c, timeout); } tarantool_1.9.1.26.g63eb81e3c/src/proc_title.c0000664000000000000000000002550013306560010017301 0ustar rootroot/* * Copyright (C) 2000-2010 PostgreSQL Global Development Group * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "proc_title.h" #include "trivia/config.h" #include #include #include #include #include #include #include #ifdef HAVE_SYS_PSTAT_H #include /* for HP-UX */ #endif #ifdef HAVE_PS_STRINGS #include /* for old BSD */ #include #endif #if defined(__APPLE__) #include #endif extern char **environ; /* * Alternative ways of updating ps display: * * PS_USE_SETPROCTITLE * use the function setproctitle(const char *, ...) * (newer BSD systems) * PS_USE_PSTAT * use the pstat(PSTAT_SETCMD, ) * (HPUX) * PS_USE_PS_STRINGS * assign PS_STRINGS->ps_argvstr = "string" * (some BSD systems) * PS_USE_CHANGE_ARGV * assign argv[0] = "string" * (some other BSD systems) * PS_USE_CLOBBER_ARGV * write over the argv and environment area * (most SysV-like systems) * PS_USE_WIN32 * push the string out as the name of a Windows event * PS_USE_NONE * don't update ps display * (This is the default, as it is safest.) */ #if defined(HAVE_SETPROCTITLE) #define PS_USE_SETPROCTITLE #elif defined(HAVE_PSTAT) && defined(PSTAT_SETCMD) #define PS_USE_PSTAT #elif defined(HAVE_PS_STRINGS) #define PS_USE_PS_STRINGS #elif (defined(BSD) || defined(__bsdi__) || defined(__hurd__)) && !defined(__APPLE__) #define PS_USE_CHANGE_ARGV #elif defined(__linux__) || defined(_AIX) || defined(__sgi) || (defined(sun) && !defined(BSD)) || defined(ultrix) || defined(__ksr__) || defined(__osf__) || defined(__svr4__) || defined(__svr5__) || defined(__APPLE__) #define PS_USE_CLOBBER_ARGV #elif defined(WIN32) #define PS_USE_WIN32 #else #define PS_USE_NONE #endif /* Different systems want the buffer padded differently */ #if defined(_AIX) || defined(__linux__) || defined(__svr4__) || defined(__APPLE__) #define PS_PADDING '\0' #else #define PS_PADDING ' ' #endif #ifndef PS_USE_CLOBBER_ARGV /* all but one options need a buffer to write their ps line in */ #define PS_BUFFER_SIZE 256 static char ps_buffer[PS_BUFFER_SIZE]; static const size_t ps_buffer_size = PS_BUFFER_SIZE; #else /* PS_USE_CLOBBER_ARGV */ static char *ps_buffer; /* will point to argv area */ static size_t ps_buffer_size; /* space determined at run time */ static size_t ps_last_status_len; /* use to minimize length of clobber */ #endif /* PS_USE_CLOBBER_ARGV */ static size_t ps_sentinel_size; /* that many trailing bytes in ps_buffer * are reserved and must be filled with * PS_PADDING */ #if defined(PS_USE_CHANGE_ARGV) || defined(PS_USE_CLOBBER_ARGV) static volatile void *ps_leaks[2]; /* we leak memory, hello valgrind */ #endif #if defined(PS_USE_CHANGE_ARGV) || defined(PS_USE_CLOBBER_ARGV) /* * A copy of the memory block within clobber_begin, clobber_end * was created to preserve its content. */ struct ps_relocation { char *clobber_begin; char *clobber_end; char *copy_begin; }; /* * If an entity is in a clobber area, hand back a pointer to * an entity in the copy area (the entity and its copy have the same * offset). */ static inline void *ps_relocate( const struct ps_relocation *rel, void *p) { if (rel && (char *)p >= rel->clobber_begin && (char *)p < rel->clobber_end) return rel->copy_begin + ((char *)p - rel->clobber_begin); return p; } static void ps_argv_changed(const struct ps_relocation *rel, char **new_argv) { (void)rel; (void)new_argv; #if defined(__GLIBC__) program_invocation_name = ps_relocate(rel, program_invocation_name); program_invocation_short_name = ps_relocate(rel, program_invocation_short_name); #endif #if defined(HAVE_SETPROGNAME) && defined(HAVE_GETPROGNAME) setprogname(ps_relocate(rel, (void *)getprogname())); #endif #if defined(__APPLE__) /* * Darwin (and perhaps other NeXT-derived platforms?) has a static * copy of the argv pointer, which we may fix like so: */ *_NSGetArgv() = new_argv; #endif } #endif #if defined(PS_USE_CLOBBER_ARGV) static void ps_expand_clobber_area(struct ps_relocation *rel, int argc, char **argv) { int i; for (i = 0; i < argc; i++) { if (rel->clobber_begin == NULL) { rel->clobber_begin = rel->clobber_end = argv[i]; } if (argv[i] != NULL && rel->clobber_end == argv[i]) { rel->clobber_end += strlen(argv[i]) + 1; } } } static void ps_relocate_argv(struct ps_relocation *rel, int argc, char **argv, char **argv_copy) { int i; for (i = 0; i < argc; i++) { argv_copy[i] = ps_relocate(rel, argv[i]); } argv_copy[argc] = NULL; } #endif /* * Call this early in startup to save the original argc/argv values. * If needed, we make a copy of the original argv[] array to preserve it * from being clobbered by subsequent ps_display actions. * * (The original argv[] will not be overwritten by this routine, but may be * overwritten during init_ps_display. Also, the physical location of the * environment strings may be moved, so this should be called before any code * that might try to hang onto a getenv() result.) */ char ** proc_title_init(int argc, char **argv) { (void)argc; #if defined(PS_USE_CLOBBER_ARGV) struct ps_relocation rel = {NULL, NULL, NULL}; char **argv_copy, **environ_copy; char *mem; size_t argv_copy_size, clobber_size; int envc = 0; while (environ[envc]) { envc++; } argv_copy_size = sizeof(argv[0]) * (argc + 1); /* * will be overwriting the memory occupied by argv/environ strings * (clobber area), determine clobber area dimensions */ ps_expand_clobber_area(&rel, argc, argv); ps_expand_clobber_area(&rel, envc, environ); clobber_size = rel.clobber_end - rel.clobber_begin; /* * one memory block to store both argv_copy and the copy of the * clobber area */ mem = malloc(argv_copy_size + clobber_size); if (mem == NULL) { return NULL; } rel.copy_begin = mem + argv_copy_size; memcpy(rel.copy_begin, rel.clobber_begin, clobber_size); argv_copy = (void *)mem; ps_relocate_argv(&rel, argc, argv, argv_copy); /* * environ_copy is allocated separately, this is due to libc calling * realloc on the environ in setenv; * note: do NOT overwrite environ inplace, changing environ pointer * is mandatory to flush internal libc caches on getenv/setenv */ environ_copy = malloc(sizeof(environ[0]) * (envc + 1)); if (environ_copy == NULL) { free(mem); return NULL; } ps_relocate_argv(&rel, envc, environ, environ_copy); ps_argv_changed(&rel, argv_copy); ps_buffer = rel.clobber_begin; ps_buffer_size = ps_last_status_len = clobber_size; ps_leaks[0] = argv = argv_copy; ps_leaks[1] = environ = environ_copy; #ifdef __APPLE__ /* * http://opensource.apple.com/source/adv_cmds/adv_cmds-158/ps/print.c * * ps on osx fetches command line from a process with {CTL_KERN, * KERN_PROCARGS2, } sysctl. The call returns cached argc + a * copy of the memory area where argv/environ strings live. * * If initially there were 10 arguments, ps is expecting to find 10 * \0 separated strings but we've written the process title on top * of that, so ps will try to find more strings; this can result in * a garbage from environment area showing (which we often fail to * overwrite completely). To fix it we write additional \0 * terminators at the end of the title (a 'sentinel'). */ ps_sentinel_size = argc - 1; #endif #endif #if defined(PS_USE_CHANGE_ARGV) size_t size = sizeof(argv[0] * (argc + 1)); char **argv_copy = malloc(size); if (argv_copy == NULL) { return NULL; } memcpy(argv_copy, argv, size); ps_argv_changed(NULL, argv_copy); ps_leaks[0] = argv = argv_copy; #endif return argv; } void proc_title_free(int argc, char **argv) { (void)argc; (void)argv; /* * Intentionally a noop. Undoing proc_title_init is hard and * unsafe because all sorts of code could have grabbed pointers from * argv/environ by now. */ } void proc_title_set(const char *format, ...) { #ifndef PS_USE_NONE va_list ap; int buflen; #ifdef PS_USE_CLOBBER_ARGV /* If ps_buffer is a pointer, it might still be null */ if (!ps_buffer) return; #endif /* Update ps_buffer to contain both fixed part and activity */ va_start(ap, format); buflen = vsnprintf(ps_buffer, ps_buffer_size - ps_sentinel_size, format, ap); va_end(ap); if (buflen < 0) return; /* Transmit new setting to kernel, if necessary */ #ifdef PS_USE_SETPROCTITLE setproctitle("-%s", ps_buffer); #endif #ifdef PS_USE_PSTAT { union pstun pst; pst.pst_command = ps_buffer; pstat(PSTAT_SETCMD, pst, strlen(ps_buffer), 0, 0); } #endif /* PS_USE_PSTAT */ #ifdef PS_USE_PS_STRINGS static char *argvstr[2]; argvstr[0] = ps_buffer; PS_STRINGS->ps_nargvstr = 1; PS_STRINGS->ps_argvstr = argvstr; #endif /* PS_USE_PS_STRINGS */ #ifdef PS_USE_CLOBBER_ARGV { /* clobber remainder of old status string */ if (ps_last_status_len > (size_t)buflen) memset(ps_buffer + buflen, PS_PADDING, ps_last_status_len - buflen); ps_last_status_len = buflen; } #endif /* PS_USE_CLOBBER_ARGV */ #ifdef PS_USE_WIN32 { /* * Win32 does not support showing any changed arguments. To make it at * all possible to track which backend is doing what, we create a * named object that can be viewed with for example Process Explorer. */ static HANDLE ident_handle = INVALID_HANDLE_VALUE; char name[PS_BUFFER_SIZE + 32]; if (ident_handle != INVALID_HANDLE_VALUE) CloseHandle(ident_handle); sprintf(name, "pgident(%d): %s", MyProcPid, ps_buffer); ident_handle = CreateEvent(NULL, TRUE, FALSE, name); } #endif /* PS_USE_WIN32 */ #endif /* not PS_USE_NONE */ } size_t proc_title_max_length() { return ps_buffer_size - ps_sentinel_size; } tarantool_1.9.1.26.g63eb81e3c/src/coio_task.h0000664000000000000000000001157013306560010017117 0ustar rootroot#ifndef TARANTOOL_COIO_TASK_H_INCLUDED #define TARANTOOL_COIO_TASK_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/config.h" #include /* ssize_t */ #include #include "third_party/tarantool_eio.h" #include "diag.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Asynchronous IO Tasks (libeio wrapper) * * Yield the current fiber until a created task is complete. */ void coio_init(void); void coio_enable(void); void coio_shutdown(void); struct coio_task; typedef ssize_t (*coio_call_cb)(va_list ap); typedef int (*coio_task_cb)(struct coio_task *task); /* like eio_req */ /** * A single task context. */ struct coio_task { struct eio_req base; /* eio_task - must be first */ /** The calling fiber. */ struct fiber *fiber; /** Callbacks. */ union { struct { /* coio_task() */ coio_task_cb task_cb; coio_task_cb timeout_cb; }; struct { /* coio_call() */ coio_call_cb call_cb; va_list ap; }; }; /** Callback results. */ int complete; /** Task diag **/ struct diag diag; }; /** * Create coio_task. * * @param task coio task * @param func a callback to execute in EIO thread pool. * @param on_timeout a callback to execute on timeout */ void coio_task_create(struct coio_task *task, coio_task_cb func, coio_task_cb on_timeout); /** * Destroy coio task. * * @param task coio task. */ void coio_task_destroy(struct coio_task *task); /** * Post coio task to EIO thread pool. * * @param task coio task. * @param timeout timeout in seconds. * @retval 0 the task completed successfully. Check the result * code in task->base.result and free the task. * @retval -1 timeout or the waiting fiber was cancelled (check diag); * the caller should not free the task, it * will be freed when it's finished in the timeout * callback. */ int coio_task_post(struct coio_task *task, double timeout); /** \cond public */ /** * Create new eio task with specified function and * arguments. Yield and wait until the task is complete * or a timeout occurs. * * This function doesn't throw exceptions to avoid double error * checking: in most cases it's also necessary to check the return * value of the called function and perform necessary actions. If * func sets errno, the errno is preserved across the call. * * @retval -1 and errno = ENOMEM if failed to create a task * @retval the function return (errno is preserved). * * @code * static ssize_t openfile_cb(va_list ap) * { * const char *filename = va_arg(ap); * int flags = va_arg(ap); * return open(filename, flags); * } * * if (coio_call(openfile_cb, 0.10, "/tmp/file", 0) == -1) * // handle errors. * ... * @endcode */ ssize_t coio_call(ssize_t (*func)(va_list), ...); struct addrinfo; /** * Fiber-friendly version of getaddrinfo(3). * * @param host host name, i.e. "tarantool.org" * @param port service name, i.e. "80" or "http" * @param hints hints, see getaddrinfo(3) * @param res[out] result, see getaddrinfo(3) * @param timeout timeout * @retval 0 on success, please free @a res using freeaddrinfo(3). * @retval -1 on error, check diag. * Please note that the return value is not compatible with * getaddrinfo(3). * @sa getaddrinfo() */ int coio_getaddrinfo(const char *host, const char *port, const struct addrinfo *hints, struct addrinfo **res, double timeout); /** \endcond public */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_COIO_TASK_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/memory.h0000664000000000000000000000357213306560010016457 0ustar rootroot#ifndef TARANTOOL_MEMORY_H_INCLUDED #define TARANTOOL_MEMORY_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "small/region.h" #include "small/small.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Define the global components of Tarantool memory subsystem: * slab caches, allocators, arenas. */ /* Global runtime memory. */ extern struct slab_arena runtime; void memory_init(); void memory_free(); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_MEMORY_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/say.c0000664000000000000000000007123413306565107015752 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "say.h" #include "fiber.h" #include "errinj.h" #include #include #include #include #include #include #include #include #include #include #include #include pid_t log_pid = 0; int log_level = S_INFO; enum say_format log_format = SF_PLAIN; /** List of logs to rotate */ static RLIST_HEAD(log_rotate_list); static const char logger_syntax_reminder[] = "expecting a file name or a prefix, such as '|', 'pipe:', 'syslog:'"; /** * True if Tarantool process runs in background mode, i.e. has no * controlling terminal. */ static bool log_background = true; static void say_default(int level, const char *filename, int line, const char *error, const char *format, ...); static int say_format_boot(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap); static int say_format_syslog(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap); /** A utility function to handle va_list from different varargs functions. */ static inline int log_vsay(struct log *log, int level, const char *filename, int line, const char *error, const char *format, va_list ap); /** Default logger used before logging subsystem is initialized. */ static struct log log_boot = { .fd = STDERR_FILENO, .level = S_INFO, .type = SAY_LOGGER_BOOT, .path = NULL, /* iff type == SAY_LOGGER_FILE */ .nonblock = false, .format_func = say_format_boot, .pid = 0, .syslog_ident = NULL, }; /** Default logger used after bootstrap. */ static struct log log_std; static struct log *log_default = &log_boot; sayfunc_t _say = say_default; static const char level_chars[] = { [S_FATAL] = 'F', [S_SYSERROR] = '!', [S_ERROR] = 'E', [S_CRIT] = 'C', [S_WARN] = 'W', [S_INFO] = 'I', [S_VERBOSE] = 'V', [S_DEBUG] = 'D', }; static char level_to_char(int level) { assert(level >= S_FATAL && level <= S_DEBUG); return level_chars[level]; } static const char *level_strs[] = { [S_FATAL] = "FATAL", [S_SYSERROR] = "SYSERROR", [S_ERROR] = "ERROR", [S_CRIT] = "CRIT", [S_WARN] = "WARN", [S_INFO] = "INFO", [S_VERBOSE] = "VERBOSE", [S_DEBUG] = "DEBUG", }; static const char *say_logger_type_strs[] = { [SAY_LOGGER_BOOT] = "stdout", [SAY_LOGGER_STDERR] = "stderr", [SAY_LOGGER_FILE] = "file", [SAY_LOGGER_PIPE] = "pipe", [SAY_LOGGER_SYSLOG] = "syslog", }; static const char* level_to_string(int level) { assert(level >= S_FATAL && level <= S_DEBUG); return level_strs[level]; } static int level_to_syslog_priority(int level) { switch (level) { case S_FATAL: return LOG_ERR; case S_SYSERROR: return LOG_ERR; case S_ERROR: return LOG_ERR; case S_CRIT: return LOG_ERR; case S_WARN: return LOG_WARNING; case S_INFO: return LOG_INFO; case S_VERBOSE: return LOG_INFO; case S_DEBUG: return LOG_DEBUG; default: return LOG_ERR; } } void log_set_level(struct log *log, enum say_level level) { log->level = level; } void log_set_format(struct log *log, log_format_func_t format_func) { assert(format_func == say_format_plain || log->type == SAY_LOGGER_STDERR || log->type == SAY_LOGGER_PIPE || log->type == SAY_LOGGER_FILE); log->format_func = format_func; } void say_set_log_level(int new_level) { log_level = new_level; log_set_level(log_default, (enum say_level) new_level); } void say_set_log_format(enum say_format format) { /* * For syslog, default or boot log type the log format can * not be changed. */ bool allowed_to_change = log_default->type == SAY_LOGGER_STDERR || log_default->type == SAY_LOGGER_PIPE || log_default->type == SAY_LOGGER_FILE; switch (format) { case SF_JSON: if (!allowed_to_change) { say_error("json log format is not supported when output is '%s'", say_logger_type_strs[log_default->type]); return; } log_set_format(log_default, say_format_json); break; case SF_PLAIN: if (!allowed_to_change) { return; } log_set_format(log_default, say_format_plain); break; default: unreachable(); } log_format = format; } static const char *say_format_strs[] = { [SF_PLAIN] = "plain", [SF_JSON] = "json", [say_format_MAX] = "unknown" }; enum say_format say_format_by_name(const char *format) { return STR2ENUM(say_format, format); } static void write_to_file(struct log *log, int total); static void write_to_syslog(struct log *log, int total); /** * Rotate logs on SIGHUP */ static int log_rotate(struct log *log) { if (pm_atomic_load(&log->type) != SAY_LOGGER_FILE) return 0; ERROR_INJECT(ERRINJ_LOG_ROTATE, { usleep(10); }); int fd = open(log->path, O_WRONLY | O_APPEND | O_CREAT, S_IRUSR | S_IWUSR | S_IRGRP); if (fd < 0) { diag_set(SystemError, "logrotate can't open %s", log->path); return -1; } /* * The whole charade's purpose is to avoid log->fd changing. * Remember, we are a signal handler. */ dup2(fd, log->fd); close(fd); if (log->nonblock) { int flags; if ( (flags = fcntl(log->fd, F_GETFL, 0)) < 0 || fcntl(log->fd, F_SETFL, flags | O_NONBLOCK) < 0) { say_syserror("fcntl, fd=%i", log->fd); } } /* We are in ev signal handler * so we don't have to be worry about async signal safety */ log_say(log, S_INFO, __FILE__, __LINE__, NULL, "log file has been reopened"); /* * log_background applies only to log_default logger */ if (log == log_default && log_background && log->type == SAY_LOGGER_FILE) { dup2(log_default->fd, STDOUT_FILENO); dup2(log_default->fd, STDERR_FILENO); } return 0; } struct rotate_task { struct coio_task base; struct log *log; struct ev_loop *loop; }; static int logrotate_cb(struct coio_task *ptr) { struct rotate_task *task = (struct rotate_task *) ptr; if (log_rotate(task->log) < 0) { diag_log(); } ev_async_send(task->loop, &task->log->log_async); return 0; } static int logrotate_cleanup_cb(struct coio_task *ptr) { struct rotate_task *task = (struct rotate_task *) ptr; coio_task_destroy(&task->base); free(task); return 0; } static void log_rotate_async_cb(struct ev_loop *loop, struct ev_async *watcher, int events) { (void)loop; (void)events; struct log *log = container_of(watcher, struct log, log_async); log->rotating_threads--; fiber_cond_signal(&log->rotate_cond); } void say_logrotate(struct ev_loop *loop, struct ev_signal *w, int revents) { (void) loop; (void) w; (void) revents; int saved_errno = errno; struct log *log; rlist_foreach_entry(log, &log_rotate_list, in_log_list) { struct rotate_task *task = (struct rotate_task *) calloc(1, sizeof(*task)); if (task == NULL) { diag_set(OutOfMemory, sizeof(*task), "malloc", "say_logrotate"); diag_log(); continue; } ev_async_start(loop(), &log->log_async); log->rotating_threads++; coio_task_create(&task->base, logrotate_cb, logrotate_cleanup_cb); task->log = log; task->loop = loop(); coio_task_post(&task->base, 0); } errno = saved_errno; } /** * Initialize the logger pipe: a standalone * process which is fed all log messages. */ static int log_pipe_init(struct log *log, const char *init_str) { int pipefd[2]; char cmd[] = { "/bin/sh" }; char args[] = { "-c" }; char *argv[] = { cmd, args, (char *) init_str, NULL }; log->type = SAY_LOGGER_PIPE; log->format_func = say_format_plain; sigset_t mask; sigemptyset(&mask); sigaddset(&mask, SIGCHLD); if (sigprocmask(SIG_BLOCK, &mask, NULL) == -1) say_syserror("sigprocmask"); if (pipe(pipefd) == -1) { diag_set(SystemError, "failed to create pipe"); return -1; } /* flush buffers to avoid multiple output */ /* https://github.com/tarantool/tarantool/issues/366 */ fflush(stdout); fflush(stderr); log->pid = fork(); if (log->pid == -1) { diag_set(SystemError, "failed to create process"); return -1; } if (log->pid == 0) { sigprocmask(SIG_UNBLOCK, &mask, NULL); close(pipefd[1]); dup2(pipefd[0], STDIN_FILENO); /* * Move to an own process group, to not * receive signals from the controlling * tty. This keeps the log open as long as * the parent is around. When the parent * dies, we get SIGPIPE and terminate. */ setpgid(0, 0); execv(argv[0], argv); /* does not return */ diag_set(SystemError, "can't start logger: %s", init_str); return -1; } #ifndef TARGET_OS_DARWIN /* * A courtesy to a DBA who might have * misconfigured the logger option: check whether * or not the logger process has started, and if * it didn't, abort. Notice, that if the logger * makes a slow start this is futile. */ struct timespec timeout; timeout.tv_sec = 0; timeout.tv_nsec = 1; /* Mostly to trigger preemption. */ if (sigtimedwait(&mask, NULL, &timeout) == SIGCHLD) { diag_set(IllegalParams, "logger process died"); return -1; } #endif /* OK, let's hope for the best. */ sigprocmask(SIG_UNBLOCK, &mask, NULL); close(pipefd[0]); log->fd = pipefd[1]; return 0; } /** * Initialize logging to a file and set up a log * rotation signal. */ static int log_file_init(struct log *log, const char *init_str) { int fd; log->path = abspath(init_str); log->type = SAY_LOGGER_FILE; log->format_func = say_format_plain; if (log->path == NULL) { diag_set(OutOfMemory, strlen(init_str), "malloc", "abspath"); return -1; } fd = open(log->path, O_WRONLY | O_APPEND | O_CREAT, S_IRUSR | S_IWUSR | S_IRGRP); if (fd < 0) { diag_set(SystemError, "can't open log file: %s", log->path); return -1; } log->fd = fd; return 0; } /** * Connect to syslogd using UNIX socket. * @param path UNIX socket path. * @retval not 0 Socket descriptor. * @retval -1 Socket error. */ static inline int syslog_connect_unix(const char *path) { int fd = socket(PF_UNIX, SOCK_DGRAM, 0); if (fd < 0) return -1; struct sockaddr_un un; memset(&un, 0, sizeof(un)); snprintf(un.sun_path, sizeof(un.sun_path), "%s", path); un.sun_family = AF_UNIX; if (connect(fd, (struct sockaddr *) &un, sizeof(un)) != 0) { close(fd); return -1; } return fd; } static inline int log_syslog_connect(struct log *log) { /* * Try two locations: '/dev/log' for Linux and * '/var/run/syslog' for Mac. */ log->fd = syslog_connect_unix("/dev/log"); if (log->fd < 0) log->fd = syslog_connect_unix("/var/run/syslog"); return log->fd; } /** Initialize logging to syslog */ static int log_syslog_init(struct log *log, const char *init_str) { struct say_syslog_opts opts; log->type = SAY_LOGGER_SYSLOG; /* syslog supports only one formatting function */ log->format_func = say_format_syslog; if (say_parse_syslog_opts(init_str, &opts) < 0) return -1; if (opts.identity == NULL) log->syslog_ident = strdup("tarantool"); else log->syslog_ident = strdup(opts.identity); if (opts.facility == syslog_facility_MAX) log->syslog_facility = SYSLOG_LOCAL7; else log->syslog_facility = opts.facility; say_free_syslog_opts(&opts); log->fd = log_syslog_connect(log); if (log->fd < 0) { /* syslog indent is freed in atexit(). */ diag_set(SystemError, "syslog logger: %s", strerror(errno)); return -1; } return 0; } /** * Initialize logging subsystem to use in daemon mode. */ int log_create(struct log *log, const char *init_str, bool nonblock) { log->pid = 0; log->syslog_ident = NULL; log->path = NULL; log->format_func = NULL; log->level = S_INFO; log->nonblock = nonblock; log->rotating_threads = 0; fiber_cond_create(&log->rotate_cond); ev_async_init(&log->log_async, log_rotate_async_cb); setvbuf(stderr, NULL, _IONBF, 0); if (init_str != NULL) { enum say_logger_type type; if (say_parse_logger_type(&init_str, &type)) { diag_set(IllegalParams, logger_syntax_reminder); return -1; } int rc; switch (type) { case SAY_LOGGER_PIPE: rc = log_pipe_init(log, init_str); break; case SAY_LOGGER_SYSLOG: rc = log_syslog_init(log, init_str); break; case SAY_LOGGER_FILE: default: rc = log_file_init(log, init_str); break; } if (rc < 0) { return -1; } /* * Set non-blocking mode if a non-default log * output is set. Avoid setting stdout to * non-blocking: this will garble interactive * console output. */ if (log->nonblock) { int flags; if ( (flags = fcntl(log->fd, F_GETFL, 0)) < 0 || fcntl(log->fd, F_SETFL, flags | O_NONBLOCK) < 0) say_syserror("fcntl, fd=%i", log->fd); } } else { log->type = SAY_LOGGER_STDERR; log->fd = STDERR_FILENO; } if (log->type == SAY_LOGGER_FILE) rlist_add_entry(&log_rotate_list, log, in_log_list); else rlist_create(&log->in_log_list); return 0; } void say_logger_init(const char *init_str, int level, int nonblock, const char *format, int background) { if (log_create(&log_std, init_str, nonblock) < 0) goto fail; log_default = &log_std; switch (log_default->type) { case SAY_LOGGER_PIPE: fprintf(stderr, "started logging into a pipe," " SIGHUP log rotation disabled\n"); break; case SAY_LOGGER_SYSLOG: fprintf(stderr, "started logging into a syslog," " SIGHUP log rotation disabled\n"); default: break; } _say = say_default; say_set_log_level(level); log_background = background; log_pid = log_default->pid; say_set_log_format(say_format_by_name(format)); if (background) { fflush(stderr); fflush(stdout); if (log_default->fd == STDERR_FILENO) { int fd = open("/dev/null", O_WRONLY); if (fd < 0) { diag_set(SystemError, "open /dev/null"); goto fail; } dup2(fd, STDERR_FILENO); dup2(fd, STDOUT_FILENO); close(fd); } else { dup2(log_default->fd, STDERR_FILENO); dup2(log_default->fd, STDOUT_FILENO); } } return; fail: diag_log(); panic("failed to initialize logging subsystem"); } void say_logger_free() { if (log_default == &log_std) log_destroy(&log_std); } /** {{{ Formatters */ /** * Format the log message in compact form: * MESSAGE: ERROR * * Used during boot time, e.g. without box.cfg(). */ static int say_format_boot(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap) { (void) log; (void) filename; (void) line; (void) level; int total = 0; SNPRINT(total, vsnprintf, buf, len, format, ap); if (error != NULL) SNPRINT(total, snprintf, buf, len, ": %s", error); SNPRINT(total, snprintf, buf, len, "\n"); return total; } /** * The common helper for say_format_plain() and say_format_syslog() */ static int say_format_plain_tail(char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap) { int total = 0; struct cord *cord = cord(); if (cord) { SNPRINT(total, snprintf, buf, len, " %s", cord->name); if (fiber() && fiber()->fid != 1) { SNPRINT(total, snprintf, buf, len, "/%i/%s", fiber()->fid, fiber_name(fiber())); } } if (level == S_WARN || level == S_ERROR || level == S_SYSERROR) { /* Primitive basename(filename) */ for (const char *f = filename; *f; f++) if (*f == '/' && *(f + 1) != '\0') filename = f + 1; if (filename) { SNPRINT(total, snprintf, buf, len, " %s:%i", filename, line); } } SNPRINT(total, snprintf, buf, len, " %c> ", level_to_char(level)); SNPRINT(total, vsnprintf, buf, len, format, ap); if (error != NULL) SNPRINT(total, snprintf, buf, len, ": %s", error); SNPRINT(total, snprintf, buf, len, "\n"); return total; } /** * Format the log message in Tarantool format: * YYYY-MM-DD hh:mm:ss.ms [PID]: CORD/FID/FIBERNAME LEVEL> MSG */ int say_format_plain(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap) { (void) log; /* Don't use ev_now() since it requires a working event loop. */ ev_tstamp now = ev_time(); time_t now_seconds = (time_t) now; struct tm tm; localtime_r(&now_seconds, &tm); /* Print time in format 2012-08-07 18:30:00.634 */ int total = strftime(buf, len, "%F %H:%M", &tm); buf += total, len -= total; SNPRINT(total, snprintf, buf, len, ":%06.3f", now - now_seconds + tm.tm_sec); /* Print pid */ SNPRINT(total, snprintf, buf, len, " [%i]", getpid()); /* Print remaining parts */ SNPRINT(total, say_format_plain_tail, buf, len, level, filename, line, error, format, ap); return total; } /** * Format log message in json format: * {"time": 1507026445.23232, "level": "WARN", "message": , * "pid": , "cord_name": , "fiber_id": , * "fiber_name": , filename": , "line": } */ int say_format_json(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap) { (void) log; int total = 0; SNPRINT(total, snprintf, buf, len, "{\"time\": \""); /* Don't use ev_now() since it requires a working event loop. */ ev_tstamp now = ev_time(); time_t now_seconds = (time_t) now; struct tm tm; localtime_r(&now_seconds, &tm); int written = strftime(buf, len, "%FT%H:%M", &tm); buf += written, len -= written, total += written; SNPRINT(total, snprintf, buf, len, ":%06.3f", now - now_seconds + tm.tm_sec); written = strftime(buf, len, "%z", &tm); buf += written, len -= written, total += written; SNPRINT(total, snprintf, buf, len, "\", "); SNPRINT(total, snprintf, buf, len, "\"level\": \"%s\", ", level_to_string(level)); if (strncmp(format, "json", sizeof("json")) == 0) { /* * Message is already JSON-formatted. * Get rid of {} brackets and append to the output buffer. */ const char *str = va_arg(ap, const char *); assert(str != NULL); int str_len = strlen(str); assert(str_len > 2 && str[0] == '{' && str[str_len - 1] == '}'); SNPRINT(total, snprintf, buf, len, "%.*s, ", str_len - 2, str + 1); } else { /* Format message */ char *tmp = tt_static_buf(); if (vsnprintf(tmp, TT_STATIC_BUF_LEN, format, ap) < 0) return -1; SNPRINT(total, snprintf, buf, len, "\"message\": \""); /* Escape and print message */ SNPRINT(total, json_escape, buf, len, tmp); SNPRINT(total, snprintf, buf, len, "\", "); } /* in case of system errors */ if (error) { SNPRINT(total, snprintf, buf, len, "\"error\": \""); SNPRINT(total, json_escape, buf, len, error); SNPRINT(total, snprintf, buf, len, "\", "); } SNPRINT(total, snprintf, buf, len, "\"pid\": %i ", getpid()); struct cord *cord = cord(); if (cord) { SNPRINT(total, snprintf, buf, len, ", \"cord_name\": \""); SNPRINT(total, json_escape, buf, len, cord->name); SNPRINT(total, snprintf, buf, len, "\""); if (fiber() && fiber()->fid != 1) { SNPRINT(total, snprintf, buf, len, ", \"fiber_id\": %i, ", fiber()->fid); SNPRINT(total, snprintf, buf, len, "\"fiber_name\": \""); SNPRINT(total, json_escape, buf, len, fiber()->name); SNPRINT(total, snprintf, buf, len, "\""); } } if (filename) { SNPRINT(total, snprintf, buf, len, ", \"file\": \""); SNPRINT(total, json_escape, buf, len, filename); SNPRINT(total, snprintf, buf, len, "\", \"line\": %i", line); } SNPRINT(total, snprintf, buf, len, "}\n"); return total; } /** * Format the log message in syslog format. * * See RFC 5424 and RFC 3164. RFC 3164 is compatible with RFC 5424, * so it is implemented. * Protocol: * TIMESTAMP IDENTATION[PID]: CORD/FID/FIBERNAME LEVEL> MSG * - Priority value is encoded as message subject * 8 and bitwise * OR with message level; * - Timestamp must be encoded in the format: Mmm dd hh:mm:ss; * Mmm - moth abbreviation; * - Identation is application name. By default it is "tarantool"; */ static int say_format_syslog(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap) { /* Don't use ev_now() since it requires a working event loop. */ ev_tstamp now = ev_time(); time_t now_seconds = (time_t) now; struct tm tm; localtime_r(&now_seconds, &tm); int total = 0; /* Format syslog header according to RFC */ int prio = level_to_syslog_priority(level); SNPRINT(total, snprintf, buf, len, "<%d>", LOG_MAKEPRI(8 * log->syslog_facility, prio)); SNPRINT(total, strftime, buf, len, "%h %e %T ", &tm); SNPRINT(total, snprintf, buf, len, "%s[%d]:", log->syslog_ident, getpid()); /* Format message */ SNPRINT(total, say_format_plain_tail, buf, len, level, filename, line, error, format, ap); return total; } /** Formatters }}} */ /** {{{ Loggers */ /* * From pipe(7): * POSIX.1 says that write(2)s of less than PIPE_BUF bytes must be atomic: * the output data is written to the pipe as a contiguous sequence. Writes * of more than PIPE_BUF bytes may be nonatomic: the kernel may interleave * the data with data written by other processes. PIPE_BUF is 4k on Linux. * * Nevertheless, let's ignore the fact that messages can be interleaved in * some situations and set SAY_BUF_LEN_MAX to 16k for now. */ enum { SAY_BUF_LEN_MAX = 16 * 1024 }; static __thread char buf[SAY_BUF_LEN_MAX]; /** * Wrapper over write which ensures, that writes not more than buffer size. */ static ssize_t safe_write(int fd, const char *buf, int size) { /* Writes at most SAY_BUF_LEN_MAX - 1 * (1 byte was taken for 0 byte in vsnprintf). */ return write(fd, buf, MIN(size, SAY_BUF_LEN_MAX - 1)); } static void say_default(int level, const char *filename, int line, const char *error, const char *format, ...) { int errsv = errno; va_list ap; va_start(ap, format); int total = log_vsay(log_default, level, filename, line, error, format, ap); if (level == S_FATAL && log_default->fd != STDERR_FILENO) { ssize_t r = safe_write(STDERR_FILENO, buf, total); (void) r; /* silence gcc warning */ } va_end(ap); errno = errsv; /* Preserve the errno. */ } /** * File and pipe logger */ static void write_to_file(struct log *log, int total) { assert(log->type == SAY_LOGGER_FILE || log->type == SAY_LOGGER_PIPE || log->type == SAY_LOGGER_STDERR); assert(total >= 0); ssize_t r = safe_write(log->fd, buf, total); (void) r; /* silence gcc warning */ } /** * Syslog logger */ static void write_to_syslog(struct log *log, int total) { assert(log->type == SAY_LOGGER_SYSLOG); assert(total >= 0); if (log->fd < 0 || safe_write(log->fd, buf, total) <= 0) { /* * Try to reconnect, if write to syslog has * failed. Syslog write can fail, if, for example, * syslogd is restarted. In such a case write to * UNIX socket starts return -1 even for UDP. */ if (log->fd >= 0) close(log->fd); log->fd = log_syslog_connect(log); if (log->fd >= 0) { /* * In a case or error the log message is * lost. We can not wait for connection - * it would block thread. Try to reconnect * on next vsay(). */ ssize_t r = safe_write(log->fd, buf, total); (void) r; /* silence gcc warning */ } } } /** Loggers }}} */ /* * Init string parser(s) */ int say_check_init_str(const char *str) { enum say_logger_type type; if (say_parse_logger_type(&str, &type)) { diag_set(IllegalParams, logger_syntax_reminder); return -1; } if (type == SAY_LOGGER_SYSLOG) { struct say_syslog_opts opts; if (say_parse_syslog_opts(str, &opts) < 0) return -1; say_free_syslog_opts(&opts); } return 0; } /** * @retval string after prefix if a prefix is found, * *str also is advanced to the prefix * NULL a prefix is not found, str is left intact */ static const char * say_parse_prefix(const char **str, const char *prefix) { size_t len = strlen(prefix); if (strncmp(*str, prefix, len) == 0) { *str = *str + len; return *str; } return NULL; } int say_parse_logger_type(const char **str, enum say_logger_type *type) { if (say_parse_prefix(str, "|")) *type = SAY_LOGGER_PIPE; else if (say_parse_prefix(str, "file:")) *type = SAY_LOGGER_FILE; else if (say_parse_prefix(str, "pipe:")) *type = SAY_LOGGER_PIPE; else if (say_parse_prefix(str, "syslog:")) *type = SAY_LOGGER_SYSLOG; else if (strchr(*str, ':') == NULL) *type = SAY_LOGGER_FILE; else return -1; return 0; } static const char *syslog_facility_strs[] = { [SYSLOG_KERN] = "kern", [SYSLOG_USER] = "user", [SYSLOG_MAIL] = "mail", [SYSLOG_DAEMON] = "daemon", [SYSLOG_AUTH] = "auth", [SYSLOG_INTERN] = "intern", [SYSLOG_LPR] = "lpr", [SYSLOG_NEWS] = "news", [SYSLOG_UUCP] = "uucp", [SYSLOG_CLOCK] = "clock", [SYSLOG_AUTHPRIV] = "authpriv", [SYSLOG_FTP] = "ftp", [SYSLOG_NTP] = "ntp", [SYSLOG_AUDIT] = "audit", [SYSLOG_ALERT] = "alert", [SYSLOG_CRON] = "cron", [SYSLOG_LOCAL0] = "local0", [SYSLOG_LOCAL1] = "local1", [SYSLOG_LOCAL2] = "local2", [SYSLOG_LOCAL3] = "local3", [SYSLOG_LOCAL4] = "local4", [SYSLOG_LOCAL5] = "local5", [SYSLOG_LOCAL6] = "local6", [SYSLOG_LOCAL7] = "local7", [syslog_facility_MAX] = "unknown", }; enum syslog_facility say_syslog_facility_by_name(const char *facility) { return STR2ENUM(syslog_facility, facility); } int say_parse_syslog_opts(const char *init_str, struct say_syslog_opts *opts) { opts->identity = NULL; opts->facility = syslog_facility_MAX; opts->copy = strdup(init_str); if (opts->copy == NULL) { diag_set(OutOfMemory, strlen(init_str), "malloc", "opts->copy"); return -1; } char *ptr = opts->copy; const char *option, *value; /* strsep() overwrites the separator with '\0' */ while ((option = strsep(&ptr, ","))) { if (*option == '\0') break; value = option; if (say_parse_prefix(&value, "identity=")) { if (opts->identity != NULL) goto duplicate; opts->identity = value; } else if (say_parse_prefix(&value, "facility=")) { if (opts->facility != syslog_facility_MAX) goto duplicate; opts->facility = say_syslog_facility_by_name(value); if (opts->facility == syslog_facility_MAX) { diag_set(IllegalParams, "bad syslog facility option '%s'", value); goto error; } } else { diag_set(IllegalParams, "bad option '%s'", option); goto error; } } return 0; duplicate: /* Terminate the "bad" option, by overwriting '=' sign */ ((char *)value)[-1] = '\0'; diag_set(IllegalParams, "duplicate option '%s'", option); error: free(opts->copy); opts->copy = NULL; return -1; } void say_free_syslog_opts(struct say_syslog_opts *opts) { free(opts->copy); opts->copy = NULL; } void log_destroy(struct log *log) { assert(log != NULL); while(log->rotating_threads > 0) fiber_cond_wait(&log->rotate_cond); pm_atomic_store(&log->type, SAY_LOGGER_BOOT); if (log->fd != -1) close(log->fd); free(log->syslog_ident); free(log->path); rlist_del_entry(log, in_log_list); ev_async_stop(loop(), &log->log_async); fiber_cond_destroy(&log->rotate_cond); } static inline int log_vsay(struct log *log, int level, const char *filename, int line, const char *error, const char *format, va_list ap) { int errsv = errno; if (level > log->level) { return 0; } int total = log->format_func(log, buf, sizeof(buf), level, filename, line, error, format, ap); switch (log->type) { case SAY_LOGGER_FILE: case SAY_LOGGER_PIPE: case SAY_LOGGER_STDERR: write_to_file(log, total); break; case SAY_LOGGER_SYSLOG: write_to_syslog(log, total); if (level == S_FATAL && log->fd != STDERR_FILENO) (void) safe_write(STDERR_FILENO, buf, total); break; case SAY_LOGGER_BOOT: { ssize_t r = safe_write(STDERR_FILENO, buf, total); (void) r; /* silence gcc warning */ break; } default: unreachable(); } errno = errsv; /* Preserve the errno. */ return total; } int log_say(struct log *log, int level, const char *filename, int line, const char *error, const char *format, ...) { va_list ap; va_start(ap, format); int total = log_vsay(log, level, filename, line, error, format, ap); va_end(ap); return total; } tarantool_1.9.1.26.g63eb81e3c/src/errinj.h0000664000000000000000000001245513306565107016454 0ustar rootroot#ifndef TARANTOOL_ERRINJ_H_INCLUDED #define TARANTOOL_ERRINJ_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "trivia/util.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Injection type */ enum errinj_type { /** boolean */ ERRINJ_BOOL = 0, /** uint64_t */ ERRINJ_INT = 1, /** double */ ERRINJ_DOUBLE = 2 }; /** * Injection state */ struct errinj { /** Name, e.g "ERRINJ_WAL_WRITE" */ const char *name; /** Type, e.g. BOOL, U64, DOUBLE */ enum errinj_type type; union { /** bool parameter */ bool bparam; /** integer parameter */ int64_t iparam; /** double parameter */ double dparam; }; }; /** * list of error injection handles. */ #define ERRINJ_LIST(_) \ _(ERRINJ_TESTING, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_WAL_IO, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_WAL_ROTATE, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_WAL_WRITE, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_WAL_WRITE_PARTIAL, ERRINJ_INT, {.iparam = -1}) \ _(ERRINJ_WAL_WRITE_DISK, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_WAL_WRITE_EOF, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_WAL_DELAY, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_INDEX_ALLOC, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_TUPLE_ALLOC, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_TUPLE_FIELD, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_VY_RUN_WRITE, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_VY_RUN_WRITE_TIMEOUT, ERRINJ_DOUBLE, {.dparam = 0}) \ _(ERRINJ_VY_RUN_DISCARD, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_VY_INDEX_DUMP, ERRINJ_INT, {.iparam = -1}) \ _(ERRINJ_VY_TASK_COMPLETE, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_VY_READ_PAGE, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_VY_READ_PAGE_TIMEOUT, ERRINJ_DOUBLE, {.dparam = 0}) \ _(ERRINJ_VY_SQUASH_TIMEOUT, ERRINJ_DOUBLE, {.dparam = 0}) \ _(ERRINJ_VY_SCHED_TIMEOUT, ERRINJ_DOUBLE, {.dparam = 0}) \ _(ERRINJ_VY_GC, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_VY_LOG_FLUSH, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_VY_LOG_FLUSH_DELAY, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_RELAY_TIMEOUT, ERRINJ_DOUBLE, {.dparam = 0}) \ _(ERRINJ_RELAY_REPORT_INTERVAL, ERRINJ_DOUBLE, {.dparam = 0}) \ _(ERRINJ_RELAY_FINAL_SLEEP, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_PORT_DUMP, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_XLOG_GARBAGE, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_XLOG_META, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_XLOG_READ, ERRINJ_INT, {.iparam = -1}) \ _(ERRINJ_VYRUN_INDEX_GARBAGE, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_VYRUN_DATA_READ, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_BUILD_SECONDARY, ERRINJ_INT, {.iparam = -1}) \ _(ERRINJ_VY_POINT_ITER_WAIT, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_RELAY_EXIT_DELAY, ERRINJ_DOUBLE, {.dparam = 0}) \ _(ERRINJ_VY_DELAY_PK_LOOKUP, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_VY_RUN_WRITE_STMT_TIMEOUT, ERRINJ_DOUBLE, {.dparam = 0}) \ _(ERRINJ_IPROTO_TX_DELAY, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_LOG_ROTATE, ERRINJ_BOOL, {.bparam = false}) \ _(ERRINJ_SNAP_COMMIT_DELAY, ERRINJ_BOOL, {.bparam = 0}) \ ENUM0(errinj_id, ERRINJ_LIST); extern struct errinj errinjs[]; /** * Returns the error injection by name * @param name injection name, e.g ERRINJ_WAL_WRITE */ struct errinj * errinj_by_name(char *name); typedef int (*errinj_cb)(struct errinj *e, void *cb_ctx); /** * Iterate over all error injections */ int errinj_foreach(errinj_cb cb, void *cb_ctx); #ifdef NDEBUG # define ERROR_INJECT(ID, CODE) # define errinj(ID, TYPE) ((struct errinj *) NULL) #else # /* Returns the error injection by id */ # define errinj(ID, TYPE) \ ({ \ assert(ID >= 0 && ID < errinj_id_MAX); \ assert(errinjs[ID].type == TYPE); \ &errinjs[ID]; \ }) # define ERROR_INJECT(ID, CODE) \ do { \ if (errinj(ID, ERRINJ_BOOL)->bparam) \ CODE; \ } while (0) #endif #define ERROR_INJECT_RETURN(ID) ERROR_INJECT(ID, return -1) #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TATRANTOOL_ERRINJ_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/version.c0000664000000000000000000000320313306560010016616 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "version.h" #include "trivia/config.h" const char * tarantool_package(void) { return PACKAGE; } const char * tarantool_version(void) { return PACKAGE_VERSION; } uint32_t tarantool_version_id() { return version_id(PACKAGE_VERSION_MAJOR, PACKAGE_VERSION_MINOR, PACKAGE_VERSION_PATCH); } tarantool_1.9.1.26.g63eb81e3c/src/tt_uuid.c0000664000000000000000000000566013306565107016633 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "tt_uuid.h" #include #include #include #include /* Zeroed by the linker. */ const struct tt_uuid uuid_nil; #define CT_ASSERT(e) typedef char __ct_assert_##__LINE__[(e) ? 1 : -1] CT_ASSERT(sizeof(struct tt_uuid) == UUID_LEN); #if defined(HAVE_UUIDGEN) #include CT_ASSERT(sizeof(struct tt_uuid) == sizeof(struct uuid)); void tt_uuid_create(struct tt_uuid *uu) { uuidgen((struct uuid *) uu, 1); /* syscall */ } #else void tt_uuid_create(struct tt_uuid *uu) { random_bytes((char *) uu, sizeof(*uu)); uu->clock_seq_hi_and_reserved &= 0x3f; uu->clock_seq_hi_and_reserved |= 0x80; /* variant 1 = RFC4122 */ uu->time_hi_and_version &= 0x0FFF; uu->time_hi_and_version |= (4 << 12); /* version 4 = random */ } #endif extern inline int tt_uuid_from_string(const char *in, struct tt_uuid *uu); extern inline int tt_uuid_compare(const struct tt_uuid *a, const struct tt_uuid *b); extern inline void tt_uuid_to_string(const struct tt_uuid *uu, char *out); extern inline void tt_uuid_bswap(struct tt_uuid *uu); extern inline bool tt_uuid_is_nil(const struct tt_uuid *uu); extern inline bool tt_uuid_is_equal(const struct tt_uuid *lhs, const struct tt_uuid *rhs); char * tt_uuid_str(const struct tt_uuid *uu) { assert(TT_STATIC_BUF_LEN >= UUID_STR_LEN); char *buf = tt_static_buf(); tt_uuid_to_string(uu, buf); return buf; } int tt_uuid_from_strl(const char *in, size_t len, struct tt_uuid *uu) { char buf[UUID_STR_LEN + 1]; snprintf(buf, sizeof(buf), "%.*s", (int) len, in); return tt_uuid_from_string(buf, uu); } tarantool_1.9.1.26.g63eb81e3c/src/fio.h0000664000000000000000000001562113306560010015722 0ustar rootroot#ifndef TARANTOOL_FIO_H_INCLUDED #define TARANTOOL_FIO_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * POSIX file I/O: take into account EINTR (read and write exactly * the requested number of bytes), log errors nicely, provide batch * writes. */ #include #include #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ const char * fio_filename(int fd); struct iovec; /** * Read up to N bytes from file into the buffer, * re-trying for interrupted reads. In case of a non-transient * error, writes a message to the error log. * * @param fd file descriptor. * @param buf pointer to the buffer. * @param count how many bytes to read. * * @return When count is 0, returns 0. When count > SSIZE_MAX, * the result is unspecified. Otherwise, returns the total * number of bytes read, or -1 if error. In case EOF is * reached and less than count bytes are read, the actual * number of bytes read is returned (can be 0 or more). * * If an error occurs after a few bytes were read, -1 is * returned and current read offset is unspecified. */ ssize_t fio_read(int fd, void *buf, size_t count); /** * Read up to N bytes from file into the buffer, * re-trying for interrupted reads. In case of a non-transient * error, writes a message to the error log. * * * @param fd file descriptor. * @param buf pointer to the buffer. * @param count how many bytes to read. * @param offset file offset * * @return When count is 0, returns 0. When count > SSIZE_MAX, * the result is unspecified. Otherwise, returns the total * number of bytes read, or -1 if error. In case EOF is * reached and less than count bytes are read, the actual * number of bytes read is returned (can be 0 or more). */ ssize_t fio_pread(int fd, void *buf, size_t count, off_t offset); /** * Write the given buffer, re-trying for partial writes * (when interrupted by a signal, for instance). In case * of a non-transient error, writes a message to the error * log. * * @param fd file descriptor. * @param buf pointer to a buffer. * @param count buffer size. * * @retval 0 on success * @retval -1 on error. If an error occurs after a few bytes were written * then current write offset of \a fd is unspecified. */ int fio_writen(int fd, const void *buf, size_t count); /** * A simple wrapper around writev(). * Re-tries write in case of EINTR. * In case of a serious error, writes a message to the error log. * * This function does not retry for partial writes because: * - it requires tedious byte counting, even when there is no * partial write, just to find out what happened * - on most file systems, a partial write happens * only in case of ENOSPC, which won't go away * if we retry. * - there is a remote chance of partial write of a large iov, * (> 4MB) due to a signal interrupt, but this is so rare that * it's not worth slowing down the main case for the sake of it. * - to finish a partial write one has to allocate a copy of iov * * @param fd file descriptor. * @param iov a vector of buffer descriptors (@sa man * writev). * @param count vector size * * @return When count is 0, returns 0. When count is positive, * returns the total number of bytes written, or -1 if error. */ ssize_t fio_writev(int fd, struct iovec *iov, int iovcnt); /** * A wrapper around writev, but retries for partial writes * * @param fd file descriptor. * @param iov a vector of buffer descriptors (@sa man * writev). * @param count vector size * * @return When count is 0, returns 0. When count is positive, * returns the total number of bytes written, or -1 if error. */ ssize_t fio_writevn(int fd, struct iovec *iov, int iovcnt); /** * An error-reporting aware wrapper around lseek(). * * @return file offset value or -1 if error */ off_t fio_lseek(int fd, off_t offset, int whence); /** Truncate a file and log a message in case of error. */ int fio_truncate(int fd, off_t offset); /** * A helper wrapper around writev() to do batched * writes. */ struct fio_batch { /** Total number of bytes in batched rows. */ size_t bytes; /** Total number of batched rows.*/ int iovcnt; /** A cap on how many rows can be batched. Can be set to INT_MAX. */ int max_iov; /* Batched rows. */ struct iovec iov[]; }; struct fio_batch * fio_batch_new(void); void fio_batch_delete(struct fio_batch *batch); static inline void fio_batch_reset(struct fio_batch *batch) { batch->bytes = 0; batch->iovcnt = 0; } static inline size_t fio_batch_size(struct fio_batch *batch) { return batch->bytes; } static inline int fio_batch_unused(struct fio_batch *batch) { return batch->max_iov - batch->iovcnt; } /** * Add a row to a batch. * @pre iovcnt is the number of iov elements previously * booked with fio_batch_book() and filled with data */ size_t fio_batch_add(struct fio_batch *batch, int count); /** * Ensure the iov has at least 'count' elements. */ static inline struct iovec * fio_batch_book(struct fio_batch *batch, int count) { if (batch->iovcnt + count <= batch->max_iov) return batch->iov + batch->iovcnt; return NULL; } /** * Write batch to fd using writev(2) and rotate batch. * In case of partial write batch will contain remaining data. * \sa fio_writev() */ ssize_t fio_batch_write(struct fio_batch *batch, int fd); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_FIO_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/uri.c0000664000000000000000000037000313306560010015735 0ustar rootroot #line 1 "src/uri.rl" /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "uri.h" #include /* SNPRINT */ #include #include /* snprintf */ int uri_parse(struct uri *uri, const char *p) { const char *pe = p + strlen(p); const char *eof = pe; int cs; memset(uri, 0, sizeof(*uri)); if (p == pe) return -1; const char *s = NULL, *login = NULL, *scheme = NULL; size_t login_len = 0, scheme_len = 0; #line 53 "src/uri.c" static const int uri_start = 134; static const int uri_first_final = 134; static const int uri_error = 0; static const int uri_en_main = 134; #line 61 "src/uri.c" { cs = uri_start; } #line 66 "src/uri.c" { if ( p == pe ) goto _test_eof; switch ( cs ) { case 134: switch( (*p) ) { case 33: goto tr140; case 35: goto tr141; case 37: goto tr142; case 47: goto tr143; case 59: goto tr140; case 61: goto tr140; case 63: goto tr145; case 64: goto st194; case 91: goto st38; case 95: goto tr140; case 117: goto tr148; case 126: goto tr140; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto tr140; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr147; } else if ( (*p) >= 65 ) goto tr147; } else goto tr144; goto st0; st0: cs = 0; goto _out; tr140: #line 144 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st135; st135: if ( ++p == pe ) goto _test_eof135; case 135: #line 112 "src/uri.c" switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 47: goto tr151; case 58: goto tr152; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st135; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else goto st135; goto st0; tr141: #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st136; tr149: #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st136; tr160: #line 71 "src/uri.rl" { s = p; } #line 72 "src/uri.rl" { uri->query = s; uri->query_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st136; tr162: #line 72 "src/uri.rl" { uri->query = s; uri->query_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st136; tr165: #line 138 "src/uri.rl" { s = p; } #line 139 "src/uri.rl" { uri->service = s; uri->service_len = p - s; } #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st136; tr176: #line 139 "src/uri.rl" { uri->service = s; uri->service_len = p - s; } #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st136; tr191: #line 108 "src/uri.rl" { uri->host = s; uri->host_len = p - s; uri->host_hint = 1; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st136; tr200: #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st136; tr213: #line 119 "src/uri.rl" { /* * This action is also called for path_* terms. * I absolutely have no idea why. */ if (uri->host_hint != 3) { uri->host_hint = 3; uri->host = URI_HOST_UNIX; uri->host_len = strlen(URI_HOST_UNIX); uri->service = s; uri->service_len = p - s; /* a workaround for grammar limitations */ uri->path = NULL; uri->path_len = 0; }; } #line 168 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st136; tr307: #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 119 "src/uri.rl" { /* * This action is also called for path_* terms. * I absolutely have no idea why. */ if (uri->host_hint != 3) { uri->host_hint = 3; uri->host = URI_HOST_UNIX; uri->host_len = strlen(URI_HOST_UNIX); uri->service = s; uri->service_len = p - s; /* a workaround for grammar limitations */ uri->path = NULL; uri->path_len = 0; }; } #line 193 "src/uri.rl" { s = p; } goto st136; st136: if ( ++p == pe ) goto _test_eof136; case 136: #line 257 "src/uri.c" switch( (*p) ) { case 33: goto tr155; case 37: goto tr156; case 61: goto tr155; case 95: goto tr155; case 124: goto tr155; case 126: goto tr155; } if ( (*p) < 63 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto tr155; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr155; } else goto tr155; goto st0; tr155: #line 75 "src/uri.rl" { s = p; } goto st137; st137: if ( ++p == pe ) goto _test_eof137; case 137: #line 283 "src/uri.c" switch( (*p) ) { case 33: goto st137; case 37: goto st1; case 61: goto st137; case 95: goto st137; case 124: goto st137; case 126: goto st137; } if ( (*p) < 63 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st137; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st137; } else goto st137; goto st0; tr156: #line 75 "src/uri.rl" { s = p; } goto st1; st1: if ( ++p == pe ) goto _test_eof1; case 1: #line 309 "src/uri.c" switch( (*p) ) { case 37: goto st137; case 117: goto st2; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st137; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st137; } else goto st137; goto st0; st2: if ( ++p == pe ) goto _test_eof2; case 2: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st3; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st3; } else goto st3; goto st0; st3: if ( ++p == pe ) goto _test_eof3; case 3: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st4; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st4; } else goto st4; goto st0; st4: if ( ++p == pe ) goto _test_eof4; case 4: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st5; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st5; } else goto st5; goto st0; st5: if ( ++p == pe ) goto _test_eof5; case 5: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st137; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st137; } else goto st137; goto st0; tr142: #line 144 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st6; st6: if ( ++p == pe ) goto _test_eof6; case 6: #line 385 "src/uri.c" switch( (*p) ) { case 37: goto st135; case 117: goto st7; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st135; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st135; } else goto st135; goto st0; st7: if ( ++p == pe ) goto _test_eof7; case 7: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st8; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st8; } else goto st8; goto st0; st8: if ( ++p == pe ) goto _test_eof8; case 8: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st9; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st9; } else goto st9; goto st0; st9: if ( ++p == pe ) goto _test_eof9; case 9: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st10; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st10; } else goto st10; goto st0; st10: if ( ++p == pe ) goto _test_eof10; case 10: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st135; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st135; } else goto st135; goto st0; tr151: #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } goto st138; tr167: #line 138 "src/uri.rl" { s = p; } #line 139 "src/uri.rl" { uri->service = s; uri->service_len = p - s; } #line 169 "src/uri.rl" { s = p; } goto st138; tr177: #line 139 "src/uri.rl" { uri->service = s; uri->service_len = p - s; } #line 169 "src/uri.rl" { s = p; } goto st138; tr192: #line 108 "src/uri.rl" { uri->host = s; uri->host_len = p - s; uri->host_hint = 1; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } goto st138; tr201: #line 169 "src/uri.rl" { s = p; } goto st138; st138: if ( ++p == pe ) goto _test_eof138; case 138: #line 488 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr141; case 37: goto st11; case 61: goto st138; case 63: goto tr145; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else goto st138; goto st0; st11: if ( ++p == pe ) goto _test_eof11; case 11: switch( (*p) ) { case 37: goto st138; case 117: goto st12; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st138; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st138; } else goto st138; goto st0; st12: if ( ++p == pe ) goto _test_eof12; case 12: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st13; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st13; } else goto st13; goto st0; st13: if ( ++p == pe ) goto _test_eof13; case 13: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st14; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st14; } else goto st14; goto st0; st14: if ( ++p == pe ) goto _test_eof14; case 14: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st15; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st15; } else goto st15; goto st0; st15: if ( ++p == pe ) goto _test_eof15; case 15: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st138; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st138; } else goto st138; goto st0; tr145: #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st139; tr153: #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st139; tr169: #line 138 "src/uri.rl" { s = p; } #line 139 "src/uri.rl" { uri->service = s; uri->service_len = p - s; } #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st139; tr179: #line 139 "src/uri.rl" { uri->service = s; uri->service_len = p - s; } #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st139; tr195: #line 108 "src/uri.rl" { uri->host = s; uri->host_len = p - s; uri->host_hint = 1; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st139; tr203: #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st139; tr215: #line 119 "src/uri.rl" { /* * This action is also called for path_* terms. * I absolutely have no idea why. */ if (uri->host_hint != 3) { uri->host_hint = 3; uri->host = URI_HOST_UNIX; uri->host_len = strlen(URI_HOST_UNIX); uri->service = s; uri->service_len = p - s; /* a workaround for grammar limitations */ uri->path = NULL; uri->path_len = 0; }; } #line 168 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 193 "src/uri.rl" { s = p; } goto st139; tr308: #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 119 "src/uri.rl" { /* * This action is also called for path_* terms. * I absolutely have no idea why. */ if (uri->host_hint != 3) { uri->host_hint = 3; uri->host = URI_HOST_UNIX; uri->host_len = strlen(URI_HOST_UNIX); uri->service = s; uri->service_len = p - s; /* a workaround for grammar limitations */ uri->path = NULL; uri->path_len = 0; }; } #line 193 "src/uri.rl" { s = p; } goto st139; st139: if ( ++p == pe ) goto _test_eof139; case 139: #line 686 "src/uri.c" switch( (*p) ) { case 33: goto tr159; case 35: goto tr160; case 37: goto tr161; case 61: goto tr159; case 95: goto tr159; case 124: goto tr159; case 126: goto tr159; } if ( (*p) < 63 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto tr159; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr159; } else goto tr159; goto st0; tr159: #line 71 "src/uri.rl" { s = p; } goto st140; st140: if ( ++p == pe ) goto _test_eof140; case 140: #line 713 "src/uri.c" switch( (*p) ) { case 33: goto st140; case 35: goto tr162; case 37: goto st16; case 61: goto st140; case 95: goto st140; case 124: goto st140; case 126: goto st140; } if ( (*p) < 63 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st140; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st140; } else goto st140; goto st0; tr161: #line 71 "src/uri.rl" { s = p; } goto st16; st16: if ( ++p == pe ) goto _test_eof16; case 16: #line 740 "src/uri.c" switch( (*p) ) { case 37: goto st140; case 117: goto st17; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st140; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st140; } else goto st140; goto st0; st17: if ( ++p == pe ) goto _test_eof17; case 17: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st18; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st18; } else goto st18; goto st0; st18: if ( ++p == pe ) goto _test_eof18; case 18: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st19; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st19; } else goto st19; goto st0; st19: if ( ++p == pe ) goto _test_eof19; case 19: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st20; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st20; } else goto st20; goto st0; st20: if ( ++p == pe ) goto _test_eof20; case 20: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st140; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st140; } else goto st140; goto st0; tr152: #line 145 "src/uri.rl" { login = s; login_len = p - s; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} goto st141; tr229: #line 145 "src/uri.rl" { login = s; login_len = p - s; } #line 108 "src/uri.rl" { uri->host = s; uri->host_len = p - s; uri->host_hint = 1; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} goto st141; st141: if ( ++p == pe ) goto _test_eof141; case 141: #line 825 "src/uri.c" switch( (*p) ) { case 33: goto tr164; case 35: goto tr165; case 37: goto tr166; case 47: goto tr167; case 59: goto tr164; case 61: goto tr164; case 63: goto tr169; case 64: goto tr170; case 95: goto tr164; case 126: goto tr164; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto tr164; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr171; } else if ( (*p) >= 65 ) goto tr171; } else goto tr168; goto st0; tr164: #line 148 "src/uri.rl" { s = p; } goto st21; st21: if ( ++p == pe ) goto _test_eof21; case 21: #line 858 "src/uri.c" switch( (*p) ) { case 33: goto st21; case 37: goto st22; case 59: goto st21; case 61: goto st21; case 64: goto tr23; case 95: goto st21; case 126: goto st21; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st21; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st21; } else if ( (*p) >= 65 ) goto st21; } else goto st21; goto st0; tr166: #line 148 "src/uri.rl" { s = p; } goto st22; st22: if ( ++p == pe ) goto _test_eof22; case 22: #line 888 "src/uri.c" switch( (*p) ) { case 37: goto st21; case 117: goto st23; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st21; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st21; } else goto st21; goto st0; st23: if ( ++p == pe ) goto _test_eof23; case 23: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st24; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st24; } else goto st24; goto st0; st24: if ( ++p == pe ) goto _test_eof24; case 24: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st25; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st25; } else goto st25; goto st0; st25: if ( ++p == pe ) goto _test_eof25; case 25: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st26; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st26; } else goto st26; goto st0; st26: if ( ++p == pe ) goto _test_eof26; case 26: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st21; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st21; } else goto st21; goto st0; tr23: #line 149 "src/uri.rl" { uri->password = s; uri->password_len = p - s; } #line 153 "src/uri.rl" { uri->login = login; uri->login_len = login_len; } goto st27; tr154: #line 145 "src/uri.rl" { login = s; login_len = p - s; } #line 153 "src/uri.rl" { uri->login = login; uri->login_len = login_len; } goto st27; tr170: #line 148 "src/uri.rl" { s = p; } #line 149 "src/uri.rl" { uri->password = s; uri->password_len = p - s; } #line 153 "src/uri.rl" { uri->login = login; uri->login_len = login_len; } goto st27; st27: if ( ++p == pe ) goto _test_eof27; case 27: #line 978 "src/uri.c" switch( (*p) ) { case 33: goto tr28; case 37: goto tr29; case 47: goto tr30; case 59: goto tr28; case 61: goto tr28; case 91: goto st38; case 95: goto tr28; case 117: goto tr33; case 126: goto tr28; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto tr28; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr28; } else if ( (*p) >= 65 ) goto tr28; } else goto tr31; goto st0; tr28: #line 100 "src/uri.rl" { s = p; } goto st142; st142: if ( ++p == pe ) goto _test_eof142; case 142: #line 1010 "src/uri.c" switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 47: goto tr151; case 58: goto tr173; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st142; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else goto st142; goto st0; tr29: #line 100 "src/uri.rl" { s = p; } goto st28; st28: if ( ++p == pe ) goto _test_eof28; case 28: #line 1039 "src/uri.c" switch( (*p) ) { case 37: goto st142; case 117: goto st29; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st142; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st142; } else goto st142; goto st0; st29: if ( ++p == pe ) goto _test_eof29; case 29: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st30; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st30; } else goto st30; goto st0; st30: if ( ++p == pe ) goto _test_eof30; case 30: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st31; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st31; } else goto st31; goto st0; st31: if ( ++p == pe ) goto _test_eof31; case 31: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st32; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st32; } else goto st32; goto st0; st32: if ( ++p == pe ) goto _test_eof32; case 32: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st142; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st142; } else goto st142; goto st0; tr173: #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} goto st143; tr194: #line 108 "src/uri.rl" { uri->host = s; uri->host_len = p - s; uri->host_hint = 1; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} goto st143; st143: if ( ++p == pe ) goto _test_eof143; case 143: #line 1120 "src/uri.c" switch( (*p) ) { case 35: goto tr165; case 47: goto tr167; case 63: goto tr169; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto tr174; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr175; } else goto tr175; goto st0; tr174: #line 138 "src/uri.rl" { s = p; } goto st144; st144: if ( ++p == pe ) goto _test_eof144; case 144: #line 1143 "src/uri.c" switch( (*p) ) { case 35: goto tr176; case 47: goto tr177; case 63: goto tr179; } if ( 48 <= (*p) && (*p) <= 57 ) goto st144; goto st0; tr175: #line 138 "src/uri.rl" { s = p; } goto st145; st145: if ( ++p == pe ) goto _test_eof145; case 145: #line 1160 "src/uri.c" switch( (*p) ) { case 35: goto tr176; case 47: goto tr177; case 63: goto tr179; } if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st145; } else if ( (*p) >= 65 ) goto st145; goto st0; tr30: #line 190 "src/uri.rl" { s = p; } goto st146; st146: if ( ++p == pe ) goto _test_eof146; case 146: #line 1180 "src/uri.c" switch( (*p) ) { case 33: goto st147; case 37: goto st33; case 61: goto st147; case 95: goto st147; case 124: goto st147; case 126: goto st147; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st147; } else if ( (*p) > 59 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st147; } else if ( (*p) >= 64 ) goto st147; } else goto st147; goto st0; st147: if ( ++p == pe ) goto _test_eof147; case 147: switch( (*p) ) { case 33: goto st147; case 37: goto st33; case 61: goto st147; case 95: goto st147; case 124: goto st147; case 126: goto st147; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st147; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st147; } else goto st147; goto st0; st33: if ( ++p == pe ) goto _test_eof33; case 33: switch( (*p) ) { case 37: goto st147; case 117: goto st34; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st147; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st147; } else goto st147; goto st0; st34: if ( ++p == pe ) goto _test_eof34; case 34: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st35; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st35; } else goto st35; goto st0; st35: if ( ++p == pe ) goto _test_eof35; case 35: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st36; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st36; } else goto st36; goto st0; st36: if ( ++p == pe ) goto _test_eof36; case 36: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st37; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st37; } else goto st37; goto st0; st37: if ( ++p == pe ) goto _test_eof37; case 37: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st147; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st147; } else goto st147; goto st0; tr31: #line 107 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st148; st148: if ( ++p == pe ) goto _test_eof148; case 148: #line 1301 "src/uri.c" switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 46: goto st149; case 47: goto tr151; case 58: goto tr173; case 59: goto st142; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st161; goto st0; st149: if ( ++p == pe ) goto _test_eof149; case 149: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 47: goto tr151; case 58: goto tr173; case 59: goto st142; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st150; goto st0; st150: if ( ++p == pe ) goto _test_eof150; case 150: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 46: goto st151; case 47: goto tr151; case 58: goto tr173; case 59: goto st142; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st159; goto st0; st151: if ( ++p == pe ) goto _test_eof151; case 151: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 47: goto tr151; case 58: goto tr173; case 59: goto st142; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st152; goto st0; st152: if ( ++p == pe ) goto _test_eof152; case 152: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 46: goto st153; case 47: goto tr151; case 58: goto tr173; case 59: goto st142; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st157; goto st0; st153: if ( ++p == pe ) goto _test_eof153; case 153: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 47: goto tr151; case 58: goto tr173; case 59: goto st142; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st154; goto st0; st154: if ( ++p == pe ) goto _test_eof154; case 154: switch( (*p) ) { case 33: goto st142; case 35: goto tr191; case 37: goto st28; case 47: goto tr192; case 58: goto tr194; case 59: goto st142; case 61: goto st142; case 63: goto tr195; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st155; goto st0; st155: if ( ++p == pe ) goto _test_eof155; case 155: switch( (*p) ) { case 33: goto st142; case 35: goto tr191; case 37: goto st28; case 47: goto tr192; case 58: goto tr194; case 59: goto st142; case 61: goto st142; case 63: goto tr195; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st156; goto st0; st156: if ( ++p == pe ) goto _test_eof156; case 156: switch( (*p) ) { case 33: goto st142; case 35: goto tr191; case 37: goto st28; case 47: goto tr192; case 58: goto tr194; case 61: goto st142; case 63: goto tr195; case 95: goto st142; case 126: goto st142; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st142; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else goto st142; goto st0; st157: if ( ++p == pe ) goto _test_eof157; case 157: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 46: goto st153; case 47: goto tr151; case 58: goto tr173; case 59: goto st142; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st158; goto st0; st158: if ( ++p == pe ) goto _test_eof158; case 158: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 46: goto st153; case 47: goto tr151; case 58: goto tr173; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st142; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else goto st142; goto st0; st159: if ( ++p == pe ) goto _test_eof159; case 159: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 46: goto st151; case 47: goto tr151; case 58: goto tr173; case 59: goto st142; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st160; goto st0; st160: if ( ++p == pe ) goto _test_eof160; case 160: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 46: goto st151; case 47: goto tr151; case 58: goto tr173; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st142; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else goto st142; goto st0; st161: if ( ++p == pe ) goto _test_eof161; case 161: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 46: goto st149; case 47: goto tr151; case 58: goto tr173; case 59: goto st142; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st142; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else if ( (*p) >= 65 ) goto st142; } else goto st162; goto st0; st162: if ( ++p == pe ) goto _test_eof162; case 162: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 46: goto st149; case 47: goto tr151; case 58: goto tr173; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st142; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else goto st142; goto st0; st38: if ( ++p == pe ) goto _test_eof38; case 38: if ( (*p) == 58 ) goto tr45; if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto tr44; } else if ( (*p) >= 48 ) goto tr44; goto st0; tr44: #line 114 "src/uri.rl" { s = p; } goto st39; st39: if ( ++p == pe ) goto _test_eof39; case 39: #line 1731 "src/uri.c" if ( (*p) == 58 ) goto st43; if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st40; } else if ( (*p) >= 48 ) goto st40; goto st0; st40: if ( ++p == pe ) goto _test_eof40; case 40: if ( (*p) == 58 ) goto st43; if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st41; } else if ( (*p) >= 48 ) goto st41; goto st0; st41: if ( ++p == pe ) goto _test_eof41; case 41: if ( (*p) == 58 ) goto st43; if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st42; } else if ( (*p) >= 48 ) goto st42; goto st0; st42: if ( ++p == pe ) goto _test_eof42; case 42: if ( (*p) == 58 ) goto st43; goto st0; st43: if ( ++p == pe ) goto _test_eof43; case 43: switch( (*p) ) { case 58: goto st48; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st44; } else if ( (*p) >= 48 ) goto st44; goto st0; st44: if ( ++p == pe ) goto _test_eof44; case 44: switch( (*p) ) { case 58: goto st48; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st45; } else if ( (*p) >= 48 ) goto st45; goto st0; st45: if ( ++p == pe ) goto _test_eof45; case 45: switch( (*p) ) { case 58: goto st48; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st46; } else if ( (*p) >= 48 ) goto st46; goto st0; st46: if ( ++p == pe ) goto _test_eof46; case 46: switch( (*p) ) { case 58: goto st48; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st47; } else if ( (*p) >= 48 ) goto st47; goto st0; st47: if ( ++p == pe ) goto _test_eof47; case 47: switch( (*p) ) { case 58: goto st48; case 93: goto tr52; } goto st0; st48: if ( ++p == pe ) goto _test_eof48; case 48: switch( (*p) ) { case 58: goto st53; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st49; } else if ( (*p) >= 48 ) goto st49; goto st0; st49: if ( ++p == pe ) goto _test_eof49; case 49: switch( (*p) ) { case 58: goto st53; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st50; } else if ( (*p) >= 48 ) goto st50; goto st0; st50: if ( ++p == pe ) goto _test_eof50; case 50: switch( (*p) ) { case 58: goto st53; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st51; } else if ( (*p) >= 48 ) goto st51; goto st0; st51: if ( ++p == pe ) goto _test_eof51; case 51: switch( (*p) ) { case 58: goto st53; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st52; } else if ( (*p) >= 48 ) goto st52; goto st0; st52: if ( ++p == pe ) goto _test_eof52; case 52: switch( (*p) ) { case 58: goto st53; case 93: goto tr52; } goto st0; st53: if ( ++p == pe ) goto _test_eof53; case 53: switch( (*p) ) { case 58: goto st58; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st54; } else if ( (*p) >= 48 ) goto st54; goto st0; st54: if ( ++p == pe ) goto _test_eof54; case 54: switch( (*p) ) { case 58: goto st58; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st55; } else if ( (*p) >= 48 ) goto st55; goto st0; st55: if ( ++p == pe ) goto _test_eof55; case 55: switch( (*p) ) { case 58: goto st58; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st56; } else if ( (*p) >= 48 ) goto st56; goto st0; st56: if ( ++p == pe ) goto _test_eof56; case 56: switch( (*p) ) { case 58: goto st58; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st57; } else if ( (*p) >= 48 ) goto st57; goto st0; st57: if ( ++p == pe ) goto _test_eof57; case 57: switch( (*p) ) { case 58: goto st58; case 93: goto tr52; } goto st0; st58: if ( ++p == pe ) goto _test_eof58; case 58: switch( (*p) ) { case 58: goto st63; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st59; } else if ( (*p) >= 48 ) goto st59; goto st0; st59: if ( ++p == pe ) goto _test_eof59; case 59: switch( (*p) ) { case 58: goto st63; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st60; } else if ( (*p) >= 48 ) goto st60; goto st0; st60: if ( ++p == pe ) goto _test_eof60; case 60: switch( (*p) ) { case 58: goto st63; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st61; } else if ( (*p) >= 48 ) goto st61; goto st0; st61: if ( ++p == pe ) goto _test_eof61; case 61: switch( (*p) ) { case 58: goto st63; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st62; } else if ( (*p) >= 48 ) goto st62; goto st0; st62: if ( ++p == pe ) goto _test_eof62; case 62: switch( (*p) ) { case 58: goto st63; case 93: goto tr52; } goto st0; st63: if ( ++p == pe ) goto _test_eof63; case 63: switch( (*p) ) { case 58: goto st68; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st64; } else if ( (*p) >= 48 ) goto st64; goto st0; st64: if ( ++p == pe ) goto _test_eof64; case 64: switch( (*p) ) { case 58: goto st68; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st65; } else if ( (*p) >= 48 ) goto st65; goto st0; st65: if ( ++p == pe ) goto _test_eof65; case 65: switch( (*p) ) { case 58: goto st68; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st66; } else if ( (*p) >= 48 ) goto st66; goto st0; st66: if ( ++p == pe ) goto _test_eof66; case 66: switch( (*p) ) { case 58: goto st68; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st67; } else if ( (*p) >= 48 ) goto st67; goto st0; st67: if ( ++p == pe ) goto _test_eof67; case 67: switch( (*p) ) { case 58: goto st68; case 93: goto tr52; } goto st0; st68: if ( ++p == pe ) goto _test_eof68; case 68: switch( (*p) ) { case 58: goto st73; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st69; } else if ( (*p) >= 48 ) goto st69; goto st0; st69: if ( ++p == pe ) goto _test_eof69; case 69: switch( (*p) ) { case 58: goto st73; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st70; } else if ( (*p) >= 48 ) goto st70; goto st0; st70: if ( ++p == pe ) goto _test_eof70; case 70: switch( (*p) ) { case 58: goto st73; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st71; } else if ( (*p) >= 48 ) goto st71; goto st0; st71: if ( ++p == pe ) goto _test_eof71; case 71: switch( (*p) ) { case 58: goto st73; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st72; } else if ( (*p) >= 48 ) goto st72; goto st0; st72: if ( ++p == pe ) goto _test_eof72; case 72: switch( (*p) ) { case 58: goto st73; case 93: goto tr52; } goto st0; st73: if ( ++p == pe ) goto _test_eof73; case 73: switch( (*p) ) { case 58: goto st78; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st74; } else if ( (*p) >= 48 ) goto st74; goto st0; st74: if ( ++p == pe ) goto _test_eof74; case 74: switch( (*p) ) { case 58: goto st78; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st75; } else if ( (*p) >= 48 ) goto st75; goto st0; st75: if ( ++p == pe ) goto _test_eof75; case 75: switch( (*p) ) { case 58: goto st78; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st76; } else if ( (*p) >= 48 ) goto st76; goto st0; st76: if ( ++p == pe ) goto _test_eof76; case 76: switch( (*p) ) { case 58: goto st78; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st77; } else if ( (*p) >= 48 ) goto st77; goto st0; st77: if ( ++p == pe ) goto _test_eof77; case 77: switch( (*p) ) { case 58: goto st78; case 93: goto tr52; } goto st0; st78: if ( ++p == pe ) goto _test_eof78; case 78: if ( (*p) == 93 ) goto tr52; if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st79; } else if ( (*p) >= 48 ) goto st79; goto st0; st79: if ( ++p == pe ) goto _test_eof79; case 79: if ( (*p) == 93 ) goto tr52; if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st80; } else if ( (*p) >= 48 ) goto st80; goto st0; st80: if ( ++p == pe ) goto _test_eof80; case 80: if ( (*p) == 93 ) goto tr52; if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st81; } else if ( (*p) >= 48 ) goto st81; goto st0; st81: if ( ++p == pe ) goto _test_eof81; case 81: if ( (*p) == 93 ) goto tr52; if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st82; } else if ( (*p) >= 48 ) goto st82; goto st0; st82: if ( ++p == pe ) goto _test_eof82; case 82: if ( (*p) == 93 ) goto tr52; goto st0; tr52: #line 115 "src/uri.rl" { uri->host = s; uri->host_len = p - s; uri->host_hint = 2; } goto st163; st163: if ( ++p == pe ) goto _test_eof163; case 163: #line 2290 "src/uri.c" switch( (*p) ) { case 35: goto tr200; case 47: goto tr201; case 58: goto st143; case 63: goto tr203; } goto st0; tr45: #line 114 "src/uri.rl" { s = p; } goto st83; st83: if ( ++p == pe ) goto _test_eof83; case 83: #line 2306 "src/uri.c" switch( (*p) ) { case 58: goto st84; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st44; } else if ( (*p) >= 48 ) goto st44; goto st0; st84: if ( ++p == pe ) goto _test_eof84; case 84: switch( (*p) ) { case 58: goto st53; case 93: goto tr52; case 102: goto st85; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 101 ) goto st49; } else if ( (*p) >= 48 ) goto st49; goto st0; st85: if ( ++p == pe ) goto _test_eof85; case 85: switch( (*p) ) { case 58: goto st53; case 93: goto tr52; case 102: goto st86; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 101 ) goto st50; } else if ( (*p) >= 48 ) goto st50; goto st0; st86: if ( ++p == pe ) goto _test_eof86; case 86: switch( (*p) ) { case 58: goto st53; case 93: goto tr52; case 102: goto st87; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 101 ) goto st51; } else if ( (*p) >= 48 ) goto st51; goto st0; st87: if ( ++p == pe ) goto _test_eof87; case 87: switch( (*p) ) { case 58: goto st53; case 93: goto tr52; case 102: goto st88; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 101 ) goto st52; } else if ( (*p) >= 48 ) goto st52; goto st0; st88: if ( ++p == pe ) goto _test_eof88; case 88: switch( (*p) ) { case 58: goto st89; case 93: goto tr52; } goto st0; st89: if ( ++p == pe ) goto _test_eof89; case 89: switch( (*p) ) { case 58: goto st58; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st54; } else if ( (*p) >= 48 ) goto st90; goto st0; st90: if ( ++p == pe ) goto _test_eof90; case 90: switch( (*p) ) { case 46: goto st91; case 58: goto st58; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st55; } else if ( (*p) >= 48 ) goto st102; goto st0; st91: if ( ++p == pe ) goto _test_eof91; case 91: if ( 48 <= (*p) && (*p) <= 57 ) goto st92; goto st0; st92: if ( ++p == pe ) goto _test_eof92; case 92: if ( (*p) == 46 ) goto st93; if ( 48 <= (*p) && (*p) <= 57 ) goto st100; goto st0; st93: if ( ++p == pe ) goto _test_eof93; case 93: if ( 48 <= (*p) && (*p) <= 57 ) goto st94; goto st0; st94: if ( ++p == pe ) goto _test_eof94; case 94: if ( (*p) == 46 ) goto st95; if ( 48 <= (*p) && (*p) <= 57 ) goto st98; goto st0; st95: if ( ++p == pe ) goto _test_eof95; case 95: if ( 48 <= (*p) && (*p) <= 57 ) goto st96; goto st0; st96: if ( ++p == pe ) goto _test_eof96; case 96: if ( (*p) == 93 ) goto tr52; if ( 48 <= (*p) && (*p) <= 57 ) goto st97; goto st0; st97: if ( ++p == pe ) goto _test_eof97; case 97: if ( (*p) == 93 ) goto tr52; if ( 48 <= (*p) && (*p) <= 57 ) goto st82; goto st0; st98: if ( ++p == pe ) goto _test_eof98; case 98: if ( (*p) == 46 ) goto st95; if ( 48 <= (*p) && (*p) <= 57 ) goto st99; goto st0; st99: if ( ++p == pe ) goto _test_eof99; case 99: if ( (*p) == 46 ) goto st95; goto st0; st100: if ( ++p == pe ) goto _test_eof100; case 100: if ( (*p) == 46 ) goto st93; if ( 48 <= (*p) && (*p) <= 57 ) goto st101; goto st0; st101: if ( ++p == pe ) goto _test_eof101; case 101: if ( (*p) == 46 ) goto st93; goto st0; st102: if ( ++p == pe ) goto _test_eof102; case 102: switch( (*p) ) { case 46: goto st91; case 58: goto st58; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st56; } else if ( (*p) >= 48 ) goto st103; goto st0; st103: if ( ++p == pe ) goto _test_eof103; case 103: switch( (*p) ) { case 46: goto st91; case 58: goto st58; case 93: goto tr52; } if ( (*p) > 57 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st57; } else if ( (*p) >= 48 ) goto st57; goto st0; tr33: #line 100 "src/uri.rl" { s = p; } goto st164; st164: if ( ++p == pe ) goto _test_eof164; case 164: #line 2542 "src/uri.c" switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 47: goto tr151; case 58: goto tr173; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 110: goto st165; case 126: goto st142; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st142; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else goto st142; goto st0; st165: if ( ++p == pe ) goto _test_eof165; case 165: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 47: goto tr151; case 58: goto tr173; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 105: goto st166; case 126: goto st142; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st142; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else goto st142; goto st0; st166: if ( ++p == pe ) goto _test_eof166; case 166: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 47: goto tr151; case 58: goto tr173; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 120: goto st167; case 126: goto st142; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st142; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else goto st142; goto st0; st167: if ( ++p == pe ) goto _test_eof167; case 167: switch( (*p) ) { case 33: goto st142; case 35: goto tr149; case 37: goto st28; case 47: goto tr207; case 58: goto tr173; case 61: goto st142; case 63: goto tr153; case 95: goto st142; case 126: goto st142; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st142; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st142; } else goto st142; goto st0; tr207: #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } goto st168; st168: if ( ++p == pe ) goto _test_eof168; case 168: #line 2648 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr141; case 37: goto st11; case 58: goto st169; case 61: goto st138; case 63: goto tr145; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else goto st138; goto st0; st169: if ( ++p == pe ) goto _test_eof169; case 169: switch( (*p) ) { case 33: goto st138; case 35: goto tr141; case 37: goto st11; case 46: goto tr209; case 47: goto tr210; case 61: goto st138; case 63: goto tr145; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else goto st138; goto st0; tr209: #line 135 "src/uri.rl" { s = p;} goto st170; st170: if ( ++p == pe ) goto _test_eof170; case 170: #line 2702 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr141; case 37: goto st11; case 47: goto st171; case 61: goto st138; case 63: goto tr145; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else goto st138; goto st0; tr210: #line 135 "src/uri.rl" { s = p;} goto st171; st171: if ( ++p == pe ) goto _test_eof171; case 171: #line 2731 "src/uri.c" switch( (*p) ) { case 33: goto st172; case 35: goto tr141; case 37: goto st104; case 47: goto st138; case 58: goto st138; case 61: goto st172; case 63: goto tr145; case 95: goto st172; case 124: goto st138; case 126: goto st172; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st172; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st172; } else goto st172; goto st0; st172: if ( ++p == pe ) goto _test_eof172; case 172: switch( (*p) ) { case 33: goto st172; case 35: goto tr213; case 37: goto st104; case 47: goto st171; case 58: goto tr214; case 61: goto st172; case 63: goto tr215; case 95: goto st172; case 124: goto st138; case 126: goto st172; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st172; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st172; } else goto st172; goto st0; st104: if ( ++p == pe ) goto _test_eof104; case 104: switch( (*p) ) { case 37: goto st172; case 117: goto st105; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st172; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st172; } else goto st172; goto st0; st105: if ( ++p == pe ) goto _test_eof105; case 105: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st106; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st106; } else goto st106; goto st0; st106: if ( ++p == pe ) goto _test_eof106; case 106: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st107; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st107; } else goto st107; goto st0; st107: if ( ++p == pe ) goto _test_eof107; case 107: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st108; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st108; } else goto st108; goto st0; st108: if ( ++p == pe ) goto _test_eof108; case 108: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st172; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st172; } else goto st172; goto st0; tr214: #line 119 "src/uri.rl" { /* * This action is also called for path_* terms. * I absolutely have no idea why. */ if (uri->host_hint != 3) { uri->host_hint = 3; uri->host = URI_HOST_UNIX; uri->host_len = strlen(URI_HOST_UNIX); uri->service = s; uri->service_len = p - s; /* a workaround for grammar limitations */ uri->path = NULL; uri->path_len = 0; }; } goto st173; st173: if ( ++p == pe ) goto _test_eof173; case 173: #line 2869 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr200; case 37: goto st11; case 47: goto tr201; case 61: goto st138; case 63: goto tr203; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else goto st138; goto st0; tr168: #line 148 "src/uri.rl" { s = p; } #line 138 "src/uri.rl" { s = p; } goto st174; st174: if ( ++p == pe ) goto _test_eof174; case 174: #line 2900 "src/uri.c" switch( (*p) ) { case 33: goto st21; case 35: goto tr176; case 37: goto st22; case 47: goto tr177; case 59: goto st21; case 61: goto st21; case 63: goto tr179; case 64: goto tr23; case 95: goto st21; case 126: goto st21; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st21; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st21; } else if ( (*p) >= 65 ) goto st21; } else goto st174; goto st0; tr171: #line 148 "src/uri.rl" { s = p; } #line 138 "src/uri.rl" { s = p; } goto st175; st175: if ( ++p == pe ) goto _test_eof175; case 175: #line 2935 "src/uri.c" switch( (*p) ) { case 33: goto st21; case 35: goto tr176; case 37: goto st22; case 47: goto tr177; case 59: goto st21; case 61: goto st21; case 63: goto tr179; case 64: goto tr23; case 95: goto st21; case 126: goto st21; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 57 ) goto st21; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st175; } else goto st175; goto st0; tr143: #line 190 "src/uri.rl" { s = p; } goto st176; st176: if ( ++p == pe ) goto _test_eof176; case 176: #line 2965 "src/uri.c" switch( (*p) ) { case 33: goto st177; case 35: goto tr141; case 37: goto st109; case 61: goto st177; case 63: goto tr145; case 95: goto st177; case 124: goto st177; case 126: goto st177; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st177; } else if ( (*p) > 59 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st177; } else if ( (*p) >= 64 ) goto st177; } else goto st177; goto st0; st177: if ( ++p == pe ) goto _test_eof177; case 177: switch( (*p) ) { case 33: goto st177; case 35: goto tr141; case 37: goto st109; case 61: goto st177; case 63: goto tr145; case 95: goto st177; case 124: goto st177; case 126: goto st177; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st177; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st177; } else goto st177; goto st0; st109: if ( ++p == pe ) goto _test_eof109; case 109: switch( (*p) ) { case 37: goto st177; case 117: goto st110; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st177; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st177; } else goto st177; goto st0; st110: if ( ++p == pe ) goto _test_eof110; case 110: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st111; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st111; } else goto st111; goto st0; st111: if ( ++p == pe ) goto _test_eof111; case 111: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st112; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st112; } else goto st112; goto st0; st112: if ( ++p == pe ) goto _test_eof112; case 112: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st113; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st113; } else goto st113; goto st0; st113: if ( ++p == pe ) goto _test_eof113; case 113: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st177; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st177; } else goto st177; goto st0; tr144: #line 144 "src/uri.rl" { s = p; } #line 107 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } #line 186 "src/uri.rl" { uri->service = p; } goto st178; st178: if ( ++p == pe ) goto _test_eof178; case 178: #line 3094 "src/uri.c" switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 46: goto st179; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st191; goto st0; st179: if ( ++p == pe ) goto _test_eof179; case 179: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st180; goto st0; st180: if ( ++p == pe ) goto _test_eof180; case 180: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 46: goto st181; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st189; goto st0; st181: if ( ++p == pe ) goto _test_eof181; case 181: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st182; goto st0; st182: if ( ++p == pe ) goto _test_eof182; case 182: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 46: goto st183; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st187; goto st0; st183: if ( ++p == pe ) goto _test_eof183; case 183: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st184; goto st0; st184: if ( ++p == pe ) goto _test_eof184; case 184: switch( (*p) ) { case 33: goto st135; case 35: goto tr191; case 37: goto st6; case 47: goto tr192; case 58: goto tr229; case 59: goto st135; case 61: goto st135; case 63: goto tr195; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st185; goto st0; st185: if ( ++p == pe ) goto _test_eof185; case 185: switch( (*p) ) { case 33: goto st135; case 35: goto tr191; case 37: goto st6; case 47: goto tr192; case 58: goto tr229; case 59: goto st135; case 61: goto st135; case 63: goto tr195; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st186; goto st0; st186: if ( ++p == pe ) goto _test_eof186; case 186: switch( (*p) ) { case 33: goto st135; case 35: goto tr191; case 37: goto st6; case 47: goto tr192; case 58: goto tr229; case 61: goto st135; case 63: goto tr195; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st135; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else goto st135; goto st0; st187: if ( ++p == pe ) goto _test_eof187; case 187: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 46: goto st183; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st188; goto st0; st188: if ( ++p == pe ) goto _test_eof188; case 188: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 46: goto st183; case 47: goto tr151; case 58: goto tr152; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st135; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else goto st135; goto st0; st189: if ( ++p == pe ) goto _test_eof189; case 189: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 46: goto st181; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st190; goto st0; st190: if ( ++p == pe ) goto _test_eof190; case 190: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 46: goto st181; case 47: goto tr151; case 58: goto tr152; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st135; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else goto st135; goto st0; st191: if ( ++p == pe ) goto _test_eof191; case 191: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 46: goto st179; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st192; goto st0; st192: if ( ++p == pe ) goto _test_eof192; case 192: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 46: goto st179; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st193; goto st0; st193: if ( ++p == pe ) goto _test_eof193; case 193: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 47: goto tr151; case 58: goto tr152; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st135; } else if ( (*p) >= 65 ) goto st135; } else goto st193; goto st0; st194: if ( ++p == pe ) goto _test_eof194; case 194: switch( (*p) ) { case 35: goto tr141; case 47: goto st138; case 63: goto tr145; } goto st0; tr147: #line 158 "src/uri.rl" { s = p; } #line 144 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st195; st195: if ( ++p == pe ) goto _test_eof195; case 195: #line 3574 "src/uri.c" switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 43: goto st195; case 47: goto tr151; case 58: goto tr236; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 45 ) { if ( 36 <= (*p) && (*p) <= 44 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st195; } else if ( (*p) >= 65 ) goto st195; } else goto st195; goto st0; tr236: #line 160 "src/uri.rl" {scheme = s; scheme_len = p - s; } #line 145 "src/uri.rl" { login = s; login_len = p - s; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} goto st196; st196: if ( ++p == pe ) goto _test_eof196; case 196: #line 3613 "src/uri.c" switch( (*p) ) { case 33: goto tr164; case 35: goto tr165; case 37: goto tr166; case 47: goto tr237; case 59: goto tr164; case 61: goto tr164; case 63: goto tr169; case 64: goto tr170; case 95: goto tr164; case 126: goto tr164; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto tr164; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr171; } else if ( (*p) >= 65 ) goto tr171; } else goto tr168; goto st0; tr237: #line 177 "src/uri.rl" { uri->scheme = scheme; uri->scheme_len = scheme_len;} #line 138 "src/uri.rl" { s = p; } #line 139 "src/uri.rl" { uri->service = s; uri->service_len = p - s; } #line 169 "src/uri.rl" { s = p; } goto st197; st197: if ( ++p == pe ) goto _test_eof197; case 197: #line 3652 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr141; case 37: goto st11; case 47: goto st198; case 61: goto st138; case 63: goto tr145; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else goto st138; goto st0; st198: if ( ++p == pe ) goto _test_eof198; case 198: switch( (*p) ) { case 33: goto tr239; case 35: goto tr141; case 37: goto tr240; case 47: goto st138; case 58: goto st138; case 59: goto tr239; case 61: goto tr239; case 63: goto tr145; case 64: goto st138; case 91: goto st38; case 95: goto tr239; case 117: goto tr242; case 124: goto st138; case 126: goto tr239; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto tr239; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr239; } else if ( (*p) >= 65 ) goto tr239; } else goto tr241; goto st0; tr239: #line 144 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st199; st199: if ( ++p == pe ) goto _test_eof199; case 199: #line 3715 "src/uri.c" switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 47: goto tr151; case 58: goto tr244; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st199; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else goto st199; goto st0; tr240: #line 144 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st114; st114: if ( ++p == pe ) goto _test_eof114; case 114: #line 3748 "src/uri.c" switch( (*p) ) { case 37: goto st199; case 117: goto st115; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st199; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st199; } else goto st199; goto st0; st115: if ( ++p == pe ) goto _test_eof115; case 115: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st116; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st116; } else goto st116; goto st0; st116: if ( ++p == pe ) goto _test_eof116; case 116: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st117; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st117; } else goto st117; goto st0; st117: if ( ++p == pe ) goto _test_eof117; case 117: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st118; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st118; } else goto st118; goto st0; st118: if ( ++p == pe ) goto _test_eof118; case 118: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st199; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st199; } else goto st199; goto st0; tr244: #line 145 "src/uri.rl" { login = s; login_len = p - s; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} goto st200; tr293: #line 145 "src/uri.rl" { login = s; login_len = p - s; } #line 108 "src/uri.rl" { uri->host = s; uri->host_len = p - s; uri->host_hint = 1; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} goto st200; st200: if ( ++p == pe ) goto _test_eof200; case 200: #line 3833 "src/uri.c" switch( (*p) ) { case 33: goto tr246; case 35: goto tr165; case 37: goto tr247; case 47: goto tr167; case 58: goto st138; case 59: goto tr246; case 61: goto tr246; case 63: goto tr169; case 64: goto tr249; case 95: goto tr246; case 124: goto st138; case 126: goto tr246; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto tr246; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr250; } else if ( (*p) >= 65 ) goto tr250; } else goto tr248; goto st0; tr246: #line 148 "src/uri.rl" { s = p; } goto st201; st201: if ( ++p == pe ) goto _test_eof201; case 201: #line 3868 "src/uri.c" switch( (*p) ) { case 33: goto st201; case 35: goto tr141; case 37: goto st119; case 47: goto st138; case 58: goto st138; case 61: goto st201; case 63: goto tr145; case 64: goto tr252; case 95: goto st201; case 124: goto st138; case 126: goto st201; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st201; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st201; } else goto st201; goto st0; tr247: #line 148 "src/uri.rl" { s = p; } goto st119; st119: if ( ++p == pe ) goto _test_eof119; case 119: #line 3899 "src/uri.c" switch( (*p) ) { case 37: goto st201; case 117: goto st120; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st201; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st201; } else goto st201; goto st0; st120: if ( ++p == pe ) goto _test_eof120; case 120: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st121; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st121; } else goto st121; goto st0; st121: if ( ++p == pe ) goto _test_eof121; case 121: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st122; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st122; } else goto st122; goto st0; st122: if ( ++p == pe ) goto _test_eof122; case 122: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st123; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st123; } else goto st123; goto st0; st123: if ( ++p == pe ) goto _test_eof123; case 123: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st201; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st201; } else goto st201; goto st0; tr252: #line 149 "src/uri.rl" { uri->password = s; uri->password_len = p - s; } #line 153 "src/uri.rl" { uri->login = login; uri->login_len = login_len; } goto st202; tr245: #line 145 "src/uri.rl" { login = s; login_len = p - s; } #line 153 "src/uri.rl" { uri->login = login; uri->login_len = login_len; } goto st202; tr249: #line 148 "src/uri.rl" { s = p; } #line 149 "src/uri.rl" { uri->password = s; uri->password_len = p - s; } #line 153 "src/uri.rl" { uri->login = login; uri->login_len = login_len; } goto st202; st202: if ( ++p == pe ) goto _test_eof202; case 202: #line 3989 "src/uri.c" switch( (*p) ) { case 33: goto tr253; case 35: goto tr141; case 37: goto tr254; case 47: goto st138; case 58: goto st138; case 59: goto tr253; case 61: goto tr253; case 63: goto tr145; case 64: goto st138; case 91: goto st38; case 95: goto tr253; case 117: goto tr256; case 124: goto st138; case 126: goto tr253; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto tr253; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr253; } else if ( (*p) >= 65 ) goto tr253; } else goto tr255; goto st0; tr253: #line 100 "src/uri.rl" { s = p; } goto st203; st203: if ( ++p == pe ) goto _test_eof203; case 203: #line 4026 "src/uri.c" switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 47: goto tr151; case 58: goto tr258; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st203; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else goto st203; goto st0; tr254: #line 100 "src/uri.rl" { s = p; } goto st124; st124: if ( ++p == pe ) goto _test_eof124; case 124: #line 4057 "src/uri.c" switch( (*p) ) { case 37: goto st203; case 117: goto st125; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st203; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st203; } else goto st203; goto st0; st125: if ( ++p == pe ) goto _test_eof125; case 125: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st126; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st126; } else goto st126; goto st0; st126: if ( ++p == pe ) goto _test_eof126; case 126: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st127; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st127; } else goto st127; goto st0; st127: if ( ++p == pe ) goto _test_eof127; case 127: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st128; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st128; } else goto st128; goto st0; st128: if ( ++p == pe ) goto _test_eof128; case 128: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st203; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st203; } else goto st203; goto st0; tr258: #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} goto st204; tr273: #line 108 "src/uri.rl" { uri->host = s; uri->host_len = p - s; uri->host_hint = 1; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} goto st204; st204: if ( ++p == pe ) goto _test_eof204; case 204: #line 4138 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr165; case 37: goto st11; case 47: goto tr167; case 61: goto st138; case 63: goto tr169; case 64: goto st138; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 58 ) { if ( (*p) > 46 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto tr259; } else if ( (*p) >= 36 ) goto st138; } else if ( (*p) > 59 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto tr260; } else if ( (*p) >= 65 ) goto tr260; } else goto st138; goto st0; tr259: #line 138 "src/uri.rl" { s = p; } goto st205; st205: if ( ++p == pe ) goto _test_eof205; case 205: #line 4174 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr176; case 37: goto st11; case 47: goto tr177; case 61: goto st138; case 63: goto tr179; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 58 ) { if ( (*p) > 46 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st205; } else if ( (*p) >= 36 ) goto st138; } else if ( (*p) > 59 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else if ( (*p) >= 64 ) goto st138; } else goto st138; goto st0; tr260: #line 138 "src/uri.rl" { s = p; } goto st206; st206: if ( ++p == pe ) goto _test_eof206; case 206: #line 4209 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr176; case 37: goto st11; case 47: goto tr177; case 61: goto st138; case 63: goto tr179; case 64: goto st138; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st206; } else goto st206; goto st0; tr255: #line 107 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st207; st207: if ( ++p == pe ) goto _test_eof207; case 207: #line 4241 "src/uri.c" switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 46: goto st208; case 47: goto tr151; case 58: goto tr258; case 59: goto st203; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st220; goto st0; st208: if ( ++p == pe ) goto _test_eof208; case 208: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 47: goto tr151; case 58: goto tr258; case 59: goto st203; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st209; goto st0; st209: if ( ++p == pe ) goto _test_eof209; case 209: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 46: goto st210; case 47: goto tr151; case 58: goto tr258; case 59: goto st203; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st218; goto st0; st210: if ( ++p == pe ) goto _test_eof210; case 210: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 47: goto tr151; case 58: goto tr258; case 59: goto st203; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st211; goto st0; st211: if ( ++p == pe ) goto _test_eof211; case 211: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 46: goto st212; case 47: goto tr151; case 58: goto tr258; case 59: goto st203; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st216; goto st0; st212: if ( ++p == pe ) goto _test_eof212; case 212: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 47: goto tr151; case 58: goto tr258; case 59: goto st203; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st213; goto st0; st213: if ( ++p == pe ) goto _test_eof213; case 213: switch( (*p) ) { case 33: goto st203; case 35: goto tr191; case 37: goto st124; case 47: goto tr192; case 58: goto tr273; case 59: goto st203; case 61: goto st203; case 63: goto tr195; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st214; goto st0; st214: if ( ++p == pe ) goto _test_eof214; case 214: switch( (*p) ) { case 33: goto st203; case 35: goto tr191; case 37: goto st124; case 47: goto tr192; case 58: goto tr273; case 59: goto st203; case 61: goto st203; case 63: goto tr195; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st215; goto st0; st215: if ( ++p == pe ) goto _test_eof215; case 215: switch( (*p) ) { case 33: goto st203; case 35: goto tr191; case 37: goto st124; case 47: goto tr192; case 58: goto tr273; case 61: goto st203; case 63: goto tr195; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st203; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else goto st203; goto st0; st216: if ( ++p == pe ) goto _test_eof216; case 216: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 46: goto st212; case 47: goto tr151; case 58: goto tr258; case 59: goto st203; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st217; goto st0; st217: if ( ++p == pe ) goto _test_eof217; case 217: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 46: goto st212; case 47: goto tr151; case 58: goto tr258; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st203; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else goto st203; goto st0; st218: if ( ++p == pe ) goto _test_eof218; case 218: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 46: goto st210; case 47: goto tr151; case 58: goto tr258; case 59: goto st203; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st219; goto st0; st219: if ( ++p == pe ) goto _test_eof219; case 219: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 46: goto st210; case 47: goto tr151; case 58: goto tr258; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st203; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else goto st203; goto st0; st220: if ( ++p == pe ) goto _test_eof220; case 220: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 46: goto st208; case 47: goto tr151; case 58: goto tr258; case 59: goto st203; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st203; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else if ( (*p) >= 65 ) goto st203; } else goto st221; goto st0; st221: if ( ++p == pe ) goto _test_eof221; case 221: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 46: goto st208; case 47: goto tr151; case 58: goto tr258; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st203; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else goto st203; goto st0; tr256: #line 100 "src/uri.rl" { s = p; } goto st222; st222: if ( ++p == pe ) goto _test_eof222; case 222: #line 4689 "src/uri.c" switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 47: goto tr151; case 58: goto tr258; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 110: goto st223; case 124: goto st138; case 126: goto st203; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st203; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else goto st203; goto st0; st223: if ( ++p == pe ) goto _test_eof223; case 223: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 47: goto tr151; case 58: goto tr258; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 105: goto st224; case 124: goto st138; case 126: goto st203; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st203; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else goto st203; goto st0; st224: if ( ++p == pe ) goto _test_eof224; case 224: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 47: goto tr151; case 58: goto tr258; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 120: goto st225; case 124: goto st138; case 126: goto st203; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st203; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else goto st203; goto st0; st225: if ( ++p == pe ) goto _test_eof225; case 225: switch( (*p) ) { case 33: goto st203; case 35: goto tr149; case 37: goto st124; case 47: goto tr207; case 58: goto tr258; case 61: goto st203; case 63: goto tr153; case 64: goto st138; case 95: goto st203; case 124: goto st138; case 126: goto st203; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st203; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st203; } else goto st203; goto st0; tr248: #line 148 "src/uri.rl" { s = p; } #line 138 "src/uri.rl" { s = p; } goto st226; st226: if ( ++p == pe ) goto _test_eof226; case 226: #line 4803 "src/uri.c" switch( (*p) ) { case 33: goto st201; case 35: goto tr176; case 37: goto st119; case 47: goto tr177; case 58: goto st138; case 59: goto st201; case 61: goto st201; case 63: goto tr179; case 64: goto tr252; case 95: goto st201; case 124: goto st138; case 126: goto st201; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st201; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st201; } else if ( (*p) >= 65 ) goto st201; } else goto st226; goto st0; tr250: #line 148 "src/uri.rl" { s = p; } #line 138 "src/uri.rl" { s = p; } goto st227; st227: if ( ++p == pe ) goto _test_eof227; case 227: #line 4840 "src/uri.c" switch( (*p) ) { case 33: goto st201; case 35: goto tr176; case 37: goto st119; case 47: goto tr177; case 58: goto st138; case 61: goto st201; case 63: goto tr179; case 64: goto tr252; case 95: goto st201; case 124: goto st138; case 126: goto st201; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st201; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st227; } else goto st227; goto st0; tr241: #line 144 "src/uri.rl" { s = p; } #line 107 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st228; st228: if ( ++p == pe ) goto _test_eof228; case 228: #line 4875 "src/uri.c" switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 46: goto st229; case 47: goto tr151; case 58: goto tr244; case 59: goto st199; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st241; goto st0; st229: if ( ++p == pe ) goto _test_eof229; case 229: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 47: goto tr151; case 58: goto tr244; case 59: goto st199; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st230; goto st0; st230: if ( ++p == pe ) goto _test_eof230; case 230: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 46: goto st231; case 47: goto tr151; case 58: goto tr244; case 59: goto st199; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st239; goto st0; st231: if ( ++p == pe ) goto _test_eof231; case 231: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 47: goto tr151; case 58: goto tr244; case 59: goto st199; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st232; goto st0; st232: if ( ++p == pe ) goto _test_eof232; case 232: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 46: goto st233; case 47: goto tr151; case 58: goto tr244; case 59: goto st199; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st237; goto st0; st233: if ( ++p == pe ) goto _test_eof233; case 233: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 47: goto tr151; case 58: goto tr244; case 59: goto st199; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st234; goto st0; st234: if ( ++p == pe ) goto _test_eof234; case 234: switch( (*p) ) { case 33: goto st199; case 35: goto tr191; case 37: goto st114; case 47: goto tr192; case 58: goto tr293; case 59: goto st199; case 61: goto st199; case 63: goto tr195; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st235; goto st0; st235: if ( ++p == pe ) goto _test_eof235; case 235: switch( (*p) ) { case 33: goto st199; case 35: goto tr191; case 37: goto st114; case 47: goto tr192; case 58: goto tr293; case 59: goto st199; case 61: goto st199; case 63: goto tr195; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 46 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st236; goto st0; st236: if ( ++p == pe ) goto _test_eof236; case 236: switch( (*p) ) { case 33: goto st199; case 35: goto tr191; case 37: goto st114; case 47: goto tr192; case 58: goto tr293; case 61: goto st199; case 63: goto tr195; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st199; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else goto st199; goto st0; st237: if ( ++p == pe ) goto _test_eof237; case 237: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 46: goto st233; case 47: goto tr151; case 58: goto tr244; case 59: goto st199; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st238; goto st0; st238: if ( ++p == pe ) goto _test_eof238; case 238: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 46: goto st233; case 47: goto tr151; case 58: goto tr244; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st199; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else goto st199; goto st0; st239: if ( ++p == pe ) goto _test_eof239; case 239: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 46: goto st231; case 47: goto tr151; case 58: goto tr244; case 59: goto st199; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st240; goto st0; st240: if ( ++p == pe ) goto _test_eof240; case 240: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 46: goto st231; case 47: goto tr151; case 58: goto tr244; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st199; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else goto st199; goto st0; st241: if ( ++p == pe ) goto _test_eof241; case 241: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 46: goto st229; case 47: goto tr151; case 58: goto tr244; case 59: goto st199; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 48 ) { if ( 36 <= (*p) && (*p) <= 45 ) goto st199; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else if ( (*p) >= 65 ) goto st199; } else goto st242; goto st0; st242: if ( ++p == pe ) goto _test_eof242; case 242: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 46: goto st229; case 47: goto tr151; case 58: goto tr244; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st199; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else goto st199; goto st0; tr242: #line 144 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st243; st243: if ( ++p == pe ) goto _test_eof243; case 243: #line 5325 "src/uri.c" switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 47: goto tr151; case 58: goto tr244; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 110: goto st244; case 124: goto st138; case 126: goto st199; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st199; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else goto st199; goto st0; st244: if ( ++p == pe ) goto _test_eof244; case 244: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 47: goto tr151; case 58: goto tr244; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 105: goto st245; case 124: goto st138; case 126: goto st199; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st199; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else goto st199; goto st0; st245: if ( ++p == pe ) goto _test_eof245; case 245: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 47: goto tr151; case 58: goto tr244; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 120: goto st246; case 124: goto st138; case 126: goto st199; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st199; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else goto st199; goto st0; st246: if ( ++p == pe ) goto _test_eof246; case 246: switch( (*p) ) { case 33: goto st199; case 35: goto tr149; case 37: goto st114; case 47: goto tr301; case 58: goto tr244; case 61: goto st199; case 63: goto tr153; case 64: goto tr245; case 95: goto st199; case 124: goto st138; case 126: goto st199; } if ( (*p) < 65 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st199; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st199; } else goto st199; goto st0; tr301: #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } goto st247; st247: if ( ++p == pe ) goto _test_eof247; case 247: #line 5439 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr141; case 37: goto st11; case 58: goto st248; case 61: goto st138; case 63: goto tr145; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else goto st138; goto st0; st248: if ( ++p == pe ) goto _test_eof248; case 248: switch( (*p) ) { case 33: goto st138; case 35: goto tr141; case 37: goto st11; case 46: goto tr303; case 47: goto tr304; case 61: goto st138; case 63: goto tr145; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else goto st138; goto st0; tr303: #line 135 "src/uri.rl" { s = p;} goto st249; st249: if ( ++p == pe ) goto _test_eof249; case 249: #line 5493 "src/uri.c" switch( (*p) ) { case 33: goto st138; case 35: goto tr141; case 37: goto st11; case 47: goto st250; case 61: goto st138; case 63: goto tr145; case 95: goto st138; case 124: goto st138; case 126: goto st138; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st138; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st138; } else goto st138; goto st0; tr304: #line 135 "src/uri.rl" { s = p;} goto st250; st250: if ( ++p == pe ) goto _test_eof250; case 250: #line 5522 "src/uri.c" switch( (*p) ) { case 33: goto st251; case 35: goto tr141; case 37: goto st129; case 47: goto st138; case 58: goto st138; case 61: goto st251; case 63: goto tr145; case 95: goto st251; case 124: goto st138; case 126: goto st251; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st251; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st251; } else goto st251; goto st0; st251: if ( ++p == pe ) goto _test_eof251; case 251: switch( (*p) ) { case 33: goto st251; case 35: goto tr307; case 37: goto st129; case 47: goto st250; case 58: goto tr214; case 61: goto st251; case 63: goto tr308; case 95: goto st251; case 124: goto st138; case 126: goto st251; } if ( (*p) < 64 ) { if ( 36 <= (*p) && (*p) <= 59 ) goto st251; } else if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st251; } else goto st251; goto st0; st129: if ( ++p == pe ) goto _test_eof129; case 129: switch( (*p) ) { case 37: goto st251; case 117: goto st130; } if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st251; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st251; } else goto st251; goto st0; st130: if ( ++p == pe ) goto _test_eof130; case 130: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st131; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st131; } else goto st131; goto st0; st131: if ( ++p == pe ) goto _test_eof131; case 131: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st132; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st132; } else goto st132; goto st0; st132: if ( ++p == pe ) goto _test_eof132; case 132: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st133; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st133; } else goto st133; goto st0; st133: if ( ++p == pe ) goto _test_eof133; case 133: if ( (*p) < 65 ) { if ( 48 <= (*p) && (*p) <= 57 ) goto st251; } else if ( (*p) > 70 ) { if ( 97 <= (*p) && (*p) <= 102 ) goto st251; } else goto st251; goto st0; tr148: #line 158 "src/uri.rl" { s = p; } #line 144 "src/uri.rl" { s = p; } #line 100 "src/uri.rl" { s = p; } goto st252; st252: if ( ++p == pe ) goto _test_eof252; case 252: #line 5650 "src/uri.c" switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 43: goto st195; case 47: goto tr151; case 58: goto tr236; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 110: goto st253; case 126: goto st135; } if ( (*p) < 45 ) { if ( 36 <= (*p) && (*p) <= 44 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st195; } else if ( (*p) >= 65 ) goto st195; } else goto st195; goto st0; st253: if ( ++p == pe ) goto _test_eof253; case 253: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 43: goto st195; case 47: goto tr151; case 58: goto tr236; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 105: goto st254; case 126: goto st135; } if ( (*p) < 45 ) { if ( 36 <= (*p) && (*p) <= 44 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st195; } else if ( (*p) >= 65 ) goto st195; } else goto st195; goto st0; st254: if ( ++p == pe ) goto _test_eof254; case 254: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 43: goto st195; case 47: goto tr151; case 58: goto tr236; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 120: goto st255; case 126: goto st135; } if ( (*p) < 45 ) { if ( 36 <= (*p) && (*p) <= 44 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st195; } else if ( (*p) >= 65 ) goto st195; } else goto st195; goto st0; st255: if ( ++p == pe ) goto _test_eof255; case 255: switch( (*p) ) { case 33: goto st135; case 35: goto tr149; case 37: goto st6; case 43: goto st195; case 47: goto tr301; case 58: goto tr236; case 59: goto st135; case 61: goto st135; case 63: goto tr153; case 64: goto tr154; case 95: goto st135; case 126: goto st135; } if ( (*p) < 45 ) { if ( 36 <= (*p) && (*p) <= 44 ) goto st135; } else if ( (*p) > 57 ) { if ( (*p) > 90 ) { if ( 97 <= (*p) && (*p) <= 122 ) goto st195; } else if ( (*p) >= 65 ) goto st195; } else goto st195; goto st0; } _test_eof135: cs = 135; goto _test_eof; _test_eof136: cs = 136; goto _test_eof; _test_eof137: cs = 137; goto _test_eof; _test_eof1: cs = 1; goto _test_eof; _test_eof2: cs = 2; goto _test_eof; _test_eof3: cs = 3; goto _test_eof; _test_eof4: cs = 4; goto _test_eof; _test_eof5: cs = 5; goto _test_eof; _test_eof6: cs = 6; goto _test_eof; _test_eof7: cs = 7; goto _test_eof; _test_eof8: cs = 8; goto _test_eof; _test_eof9: cs = 9; goto _test_eof; _test_eof10: cs = 10; goto _test_eof; _test_eof138: cs = 138; goto _test_eof; _test_eof11: cs = 11; goto _test_eof; _test_eof12: cs = 12; goto _test_eof; _test_eof13: cs = 13; goto _test_eof; _test_eof14: cs = 14; goto _test_eof; _test_eof15: cs = 15; goto _test_eof; _test_eof139: cs = 139; goto _test_eof; _test_eof140: cs = 140; goto _test_eof; _test_eof16: cs = 16; goto _test_eof; _test_eof17: cs = 17; goto _test_eof; _test_eof18: cs = 18; goto _test_eof; _test_eof19: cs = 19; goto _test_eof; _test_eof20: cs = 20; goto _test_eof; _test_eof141: cs = 141; goto _test_eof; _test_eof21: cs = 21; goto _test_eof; _test_eof22: cs = 22; goto _test_eof; _test_eof23: cs = 23; goto _test_eof; _test_eof24: cs = 24; goto _test_eof; _test_eof25: cs = 25; goto _test_eof; _test_eof26: cs = 26; goto _test_eof; _test_eof27: cs = 27; goto _test_eof; _test_eof142: cs = 142; goto _test_eof; _test_eof28: cs = 28; goto _test_eof; _test_eof29: cs = 29; goto _test_eof; _test_eof30: cs = 30; goto _test_eof; _test_eof31: cs = 31; goto _test_eof; _test_eof32: cs = 32; goto _test_eof; _test_eof143: cs = 143; goto _test_eof; _test_eof144: cs = 144; goto _test_eof; _test_eof145: cs = 145; goto _test_eof; _test_eof146: cs = 146; goto _test_eof; _test_eof147: cs = 147; goto _test_eof; _test_eof33: cs = 33; goto _test_eof; _test_eof34: cs = 34; goto _test_eof; _test_eof35: cs = 35; goto _test_eof; _test_eof36: cs = 36; goto _test_eof; _test_eof37: cs = 37; goto _test_eof; _test_eof148: cs = 148; goto _test_eof; _test_eof149: cs = 149; goto _test_eof; _test_eof150: cs = 150; goto _test_eof; _test_eof151: cs = 151; goto _test_eof; _test_eof152: cs = 152; goto _test_eof; _test_eof153: cs = 153; goto _test_eof; _test_eof154: cs = 154; goto _test_eof; _test_eof155: cs = 155; goto _test_eof; _test_eof156: cs = 156; goto _test_eof; _test_eof157: cs = 157; goto _test_eof; _test_eof158: cs = 158; goto _test_eof; _test_eof159: cs = 159; goto _test_eof; _test_eof160: cs = 160; goto _test_eof; _test_eof161: cs = 161; goto _test_eof; _test_eof162: cs = 162; goto _test_eof; _test_eof38: cs = 38; goto _test_eof; _test_eof39: cs = 39; goto _test_eof; _test_eof40: cs = 40; goto _test_eof; _test_eof41: cs = 41; goto _test_eof; _test_eof42: cs = 42; goto _test_eof; _test_eof43: cs = 43; goto _test_eof; _test_eof44: cs = 44; goto _test_eof; _test_eof45: cs = 45; goto _test_eof; _test_eof46: cs = 46; goto _test_eof; _test_eof47: cs = 47; goto _test_eof; _test_eof48: cs = 48; goto _test_eof; _test_eof49: cs = 49; goto _test_eof; _test_eof50: cs = 50; goto _test_eof; _test_eof51: cs = 51; goto _test_eof; _test_eof52: cs = 52; goto _test_eof; _test_eof53: cs = 53; goto _test_eof; _test_eof54: cs = 54; goto _test_eof; _test_eof55: cs = 55; goto _test_eof; _test_eof56: cs = 56; goto _test_eof; _test_eof57: cs = 57; goto _test_eof; _test_eof58: cs = 58; goto _test_eof; _test_eof59: cs = 59; goto _test_eof; _test_eof60: cs = 60; goto _test_eof; _test_eof61: cs = 61; goto _test_eof; _test_eof62: cs = 62; goto _test_eof; _test_eof63: cs = 63; goto _test_eof; _test_eof64: cs = 64; goto _test_eof; _test_eof65: cs = 65; goto _test_eof; _test_eof66: cs = 66; goto _test_eof; _test_eof67: cs = 67; goto _test_eof; _test_eof68: cs = 68; goto _test_eof; _test_eof69: cs = 69; goto _test_eof; _test_eof70: cs = 70; goto _test_eof; _test_eof71: cs = 71; goto _test_eof; _test_eof72: cs = 72; goto _test_eof; _test_eof73: cs = 73; goto _test_eof; _test_eof74: cs = 74; goto _test_eof; _test_eof75: cs = 75; goto _test_eof; _test_eof76: cs = 76; goto _test_eof; _test_eof77: cs = 77; goto _test_eof; _test_eof78: cs = 78; goto _test_eof; _test_eof79: cs = 79; goto _test_eof; _test_eof80: cs = 80; goto _test_eof; _test_eof81: cs = 81; goto _test_eof; _test_eof82: cs = 82; goto _test_eof; _test_eof163: cs = 163; goto _test_eof; _test_eof83: cs = 83; goto _test_eof; _test_eof84: cs = 84; goto _test_eof; _test_eof85: cs = 85; goto _test_eof; _test_eof86: cs = 86; goto _test_eof; _test_eof87: cs = 87; goto _test_eof; _test_eof88: cs = 88; goto _test_eof; _test_eof89: cs = 89; goto _test_eof; _test_eof90: cs = 90; goto _test_eof; _test_eof91: cs = 91; goto _test_eof; _test_eof92: cs = 92; goto _test_eof; _test_eof93: cs = 93; goto _test_eof; _test_eof94: cs = 94; goto _test_eof; _test_eof95: cs = 95; goto _test_eof; _test_eof96: cs = 96; goto _test_eof; _test_eof97: cs = 97; goto _test_eof; _test_eof98: cs = 98; goto _test_eof; _test_eof99: cs = 99; goto _test_eof; _test_eof100: cs = 100; goto _test_eof; _test_eof101: cs = 101; goto _test_eof; _test_eof102: cs = 102; goto _test_eof; _test_eof103: cs = 103; goto _test_eof; _test_eof164: cs = 164; goto _test_eof; _test_eof165: cs = 165; goto _test_eof; _test_eof166: cs = 166; goto _test_eof; _test_eof167: cs = 167; goto _test_eof; _test_eof168: cs = 168; goto _test_eof; _test_eof169: cs = 169; goto _test_eof; _test_eof170: cs = 170; goto _test_eof; _test_eof171: cs = 171; goto _test_eof; _test_eof172: cs = 172; goto _test_eof; _test_eof104: cs = 104; goto _test_eof; _test_eof105: cs = 105; goto _test_eof; _test_eof106: cs = 106; goto _test_eof; _test_eof107: cs = 107; goto _test_eof; _test_eof108: cs = 108; goto _test_eof; _test_eof173: cs = 173; goto _test_eof; _test_eof174: cs = 174; goto _test_eof; _test_eof175: cs = 175; goto _test_eof; _test_eof176: cs = 176; goto _test_eof; _test_eof177: cs = 177; goto _test_eof; _test_eof109: cs = 109; goto _test_eof; _test_eof110: cs = 110; goto _test_eof; _test_eof111: cs = 111; goto _test_eof; _test_eof112: cs = 112; goto _test_eof; _test_eof113: cs = 113; goto _test_eof; _test_eof178: cs = 178; goto _test_eof; _test_eof179: cs = 179; goto _test_eof; _test_eof180: cs = 180; goto _test_eof; _test_eof181: cs = 181; goto _test_eof; _test_eof182: cs = 182; goto _test_eof; _test_eof183: cs = 183; goto _test_eof; _test_eof184: cs = 184; goto _test_eof; _test_eof185: cs = 185; goto _test_eof; _test_eof186: cs = 186; goto _test_eof; _test_eof187: cs = 187; goto _test_eof; _test_eof188: cs = 188; goto _test_eof; _test_eof189: cs = 189; goto _test_eof; _test_eof190: cs = 190; goto _test_eof; _test_eof191: cs = 191; goto _test_eof; _test_eof192: cs = 192; goto _test_eof; _test_eof193: cs = 193; goto _test_eof; _test_eof194: cs = 194; goto _test_eof; _test_eof195: cs = 195; goto _test_eof; _test_eof196: cs = 196; goto _test_eof; _test_eof197: cs = 197; goto _test_eof; _test_eof198: cs = 198; goto _test_eof; _test_eof199: cs = 199; goto _test_eof; _test_eof114: cs = 114; goto _test_eof; _test_eof115: cs = 115; goto _test_eof; _test_eof116: cs = 116; goto _test_eof; _test_eof117: cs = 117; goto _test_eof; _test_eof118: cs = 118; goto _test_eof; _test_eof200: cs = 200; goto _test_eof; _test_eof201: cs = 201; goto _test_eof; _test_eof119: cs = 119; goto _test_eof; _test_eof120: cs = 120; goto _test_eof; _test_eof121: cs = 121; goto _test_eof; _test_eof122: cs = 122; goto _test_eof; _test_eof123: cs = 123; goto _test_eof; _test_eof202: cs = 202; goto _test_eof; _test_eof203: cs = 203; goto _test_eof; _test_eof124: cs = 124; goto _test_eof; _test_eof125: cs = 125; goto _test_eof; _test_eof126: cs = 126; goto _test_eof; _test_eof127: cs = 127; goto _test_eof; _test_eof128: cs = 128; goto _test_eof; _test_eof204: cs = 204; goto _test_eof; _test_eof205: cs = 205; goto _test_eof; _test_eof206: cs = 206; goto _test_eof; _test_eof207: cs = 207; goto _test_eof; _test_eof208: cs = 208; goto _test_eof; _test_eof209: cs = 209; goto _test_eof; _test_eof210: cs = 210; goto _test_eof; _test_eof211: cs = 211; goto _test_eof; _test_eof212: cs = 212; goto _test_eof; _test_eof213: cs = 213; goto _test_eof; _test_eof214: cs = 214; goto _test_eof; _test_eof215: cs = 215; goto _test_eof; _test_eof216: cs = 216; goto _test_eof; _test_eof217: cs = 217; goto _test_eof; _test_eof218: cs = 218; goto _test_eof; _test_eof219: cs = 219; goto _test_eof; _test_eof220: cs = 220; goto _test_eof; _test_eof221: cs = 221; goto _test_eof; _test_eof222: cs = 222; goto _test_eof; _test_eof223: cs = 223; goto _test_eof; _test_eof224: cs = 224; goto _test_eof; _test_eof225: cs = 225; goto _test_eof; _test_eof226: cs = 226; goto _test_eof; _test_eof227: cs = 227; goto _test_eof; _test_eof228: cs = 228; goto _test_eof; _test_eof229: cs = 229; goto _test_eof; _test_eof230: cs = 230; goto _test_eof; _test_eof231: cs = 231; goto _test_eof; _test_eof232: cs = 232; goto _test_eof; _test_eof233: cs = 233; goto _test_eof; _test_eof234: cs = 234; goto _test_eof; _test_eof235: cs = 235; goto _test_eof; _test_eof236: cs = 236; goto _test_eof; _test_eof237: cs = 237; goto _test_eof; _test_eof238: cs = 238; goto _test_eof; _test_eof239: cs = 239; goto _test_eof; _test_eof240: cs = 240; goto _test_eof; _test_eof241: cs = 241; goto _test_eof; _test_eof242: cs = 242; goto _test_eof; _test_eof243: cs = 243; goto _test_eof; _test_eof244: cs = 244; goto _test_eof; _test_eof245: cs = 245; goto _test_eof; _test_eof246: cs = 246; goto _test_eof; _test_eof247: cs = 247; goto _test_eof; _test_eof248: cs = 248; goto _test_eof; _test_eof249: cs = 249; goto _test_eof; _test_eof250: cs = 250; goto _test_eof; _test_eof251: cs = 251; goto _test_eof; _test_eof129: cs = 129; goto _test_eof; _test_eof130: cs = 130; goto _test_eof; _test_eof131: cs = 131; goto _test_eof; _test_eof132: cs = 132; goto _test_eof; _test_eof133: cs = 133; goto _test_eof; _test_eof252: cs = 252; goto _test_eof; _test_eof253: cs = 253; goto _test_eof; _test_eof254: cs = 254; goto _test_eof; _test_eof255: cs = 255; goto _test_eof; _test_eof: {} if ( p == eof ) { switch ( cs ) { case 140: #line 72 "src/uri.rl" { uri->query = s; uri->query_len = p - s; } break; case 137: #line 76 "src/uri.rl" { uri->fragment = s; uri->fragment_len = p - s; } break; case 146: case 147: #line 119 "src/uri.rl" { /* * This action is also called for path_* terms. * I absolutely have no idea why. */ if (uri->host_hint != 3) { uri->host_hint = 3; uri->host = URI_HOST_UNIX; uri->host_len = strlen(URI_HOST_UNIX); uri->service = s; uri->service_len = p - s; /* a workaround for grammar limitations */ uri->path = NULL; uri->path_len = 0; }; } break; case 134: case 138: case 168: case 169: case 170: case 171: case 194: case 197: case 198: case 201: case 202: case 247: case 248: case 249: case 250: #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } break; case 139: #line 71 "src/uri.rl" { s = p; } #line 72 "src/uri.rl" { uri->query = s; uri->query_len = p - s; } break; case 136: #line 75 "src/uri.rl" { s = p; } #line 76 "src/uri.rl" { uri->fragment = s; uri->fragment_len = p - s; } break; case 163: case 173: #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } break; case 176: case 177: case 251: #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 119 "src/uri.rl" { /* * This action is also called for path_* terms. * I absolutely have no idea why. */ if (uri->host_hint != 3) { uri->host_hint = 3; uri->host = URI_HOST_UNIX; uri->host_len = strlen(URI_HOST_UNIX); uri->service = s; uri->service_len = p - s; /* a workaround for grammar limitations */ uri->path = NULL; uri->path_len = 0; }; } break; case 135: case 142: case 148: case 149: case 150: case 151: case 152: case 153: case 157: case 158: case 159: case 160: case 161: case 162: case 164: case 165: case 166: case 167: case 179: case 180: case 181: case 182: case 183: case 187: case 188: case 189: case 190: case 195: case 199: case 203: case 207: case 208: case 209: case 210: case 211: case 212: case 216: case 217: case 218: case 219: case 220: case 221: case 222: case 223: case 224: case 225: case 228: case 229: case 230: case 231: case 232: case 233: case 237: case 238: case 239: case 240: case 241: case 242: case 243: case 244: case 245: case 246: case 252: case 253: case 254: case 255: #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } break; case 172: #line 119 "src/uri.rl" { /* * This action is also called for path_* terms. * I absolutely have no idea why. */ if (uri->host_hint != 3) { uri->host_hint = 3; uri->host = URI_HOST_UNIX; uri->host_len = strlen(URI_HOST_UNIX); uri->service = s; uri->service_len = p - s; /* a workaround for grammar limitations */ uri->path = NULL; uri->path_len = 0; }; } #line 168 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } break; case 144: case 145: case 174: case 175: case 205: case 206: case 226: case 227: #line 139 "src/uri.rl" { uri->service = s; uri->service_len = p - s; } #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } break; case 178: case 191: case 192: case 193: #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } #line 187 "src/uri.rl" { uri->service_len = p - uri->service; uri->host = NULL; uri->host_len = 0; } break; case 154: case 155: case 156: case 184: case 185: case 186: case 213: case 214: case 215: case 234: case 235: case 236: #line 108 "src/uri.rl" { uri->host = s; uri->host_len = p - s; uri->host_hint = 1; } #line 101 "src/uri.rl" { uri->host = s; uri->host_len = p - s;} #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } break; case 141: case 143: case 196: case 200: case 204: #line 138 "src/uri.rl" { s = p; } #line 139 "src/uri.rl" { uri->service = s; uri->service_len = p - s; } #line 169 "src/uri.rl" { s = p; } #line 173 "src/uri.rl" { uri->path = s; uri->path_len = p - s; } break; #line 6276 "src/uri.c" } } _out: {} } #line 200 "src/uri.rl" if (uri->path_len == 0) uri->path = NULL; if (uri->service_len == 0) uri->service = NULL; if (uri->service_len >= URI_MAXSERVICE) return -1; if (uri->host_len >= URI_MAXHOST) return -1; (void)uri_first_final; (void)uri_error; (void)uri_en_main; (void)eof; return cs >= uri_first_final ? 0 : -1; } int uri_format(char *str, int len, const struct uri *uri, bool write_password) { int total = 0; if (uri->scheme_len > 0) { SNPRINT(total, snprintf, str, len, "%.*s://", (int)uri->scheme_len, uri->scheme); } if (uri->host_len > 0) { if (uri->login_len > 0) { SNPRINT(total, snprintf, str, len, "%.*s", (int)uri->login_len, uri->login); if (uri->password_len > 0 && write_password) { SNPRINT(total, snprintf, str, len, ":%.*s", (int)uri->password_len, uri->password); } SNPRINT(total, snprintf, str, len, "@"); } SNPRINT(total, snprintf, str, len, "%.*s", (int)uri->host_len, uri->host); if (uri->service_len > 0) { SNPRINT(total, snprintf, str, len, ":%.*s", (int)uri->service_len, uri->service); } } if (uri->path_len > 0) { SNPRINT(total, snprintf, str, len, "%.*s", (int)uri->path_len, uri->path); } if (uri->query_len > 0) { SNPRINT(total, snprintf, str, len, "?%.*s", (int)uri->query_len, uri->query); } if (uri->fragment_len > 0) { SNPRINT(total, snprintf, str, len, "#%.*s", (int)uri->fragment_len, uri->fragment); } return total; } /* vim: set ft=ragel: */ tarantool_1.9.1.26.g63eb81e3c/src/main.cc0000664000000000000000000005033213306565107016241 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "main.h" #include "trivia/config.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(TARGET_OS_LINUX) && defined(HAVE_PRCTL_H) # include #endif #include "fiber.h" #include "cbus.h" #include "coio_task.h" #include #include "memory.h" #include #include #include #include "trivia/util.h" #include "backtrace.h" #include "tt_pthread.h" #include "lua/init.h" #include "box/box.h" #include "box/error.h" #include "scoped_guard.h" #include "random.h" #include "tt_uuid.h" #include "cfg.h" #include "version.h" #include #include "title.h" #include #include "box/lua/init.h" /* box_lua_init() */ #include "box/session.h" #include "systemd.h" static pid_t master_pid = getpid(); static struct pidfh *pid_file_handle; static char *script = NULL; static char *pid_file = NULL; static char **main_argv; static int main_argc; /** Signals handled after start as part of the event loop. */ static ev_signal ev_sigs[6]; static const int ev_sig_count = sizeof(ev_sigs)/sizeof(*ev_sigs); static double start_time; double tarantool_uptime(void) { return ev_monotonic_now(loop()) - start_time; } /** * Create a checkpoint from signal handler (SIGUSR1) */ static void sig_checkpoint(ev_loop * /* loop */, struct ev_signal * /* w */, int /* revents */) { if (box_checkpoint_is_in_progress) { say_warn("Checkpoint is already in progress," " the signal is ignored"); return; } fiber_start(fiber_new_xc("checkpoint", (fiber_func)box_checkpoint)); } static void signal_cb(ev_loop *loop, struct ev_signal *w, int revents) { (void) w; (void) revents; /** * If running in daemon mode, complain about possibly * sudden and unexpected death. * Real case: an ops A kills the server and ops B files * a bug that the server suddenly died. Make such case * explicit in the log. */ if (pid_file) say_crit("got signal %d - %s", w->signum, strsignal(w->signum)); start_loop = false; /* Terminate the main event loop */ ev_break(loop, EVBREAK_ALL); } static void signal_sigwinch_cb(ev_loop *loop, struct ev_signal *w, int revents) { (void) loop; (void) w; (void) revents; if (rl_instream) rl_resize_terminal(); } #if defined(__linux__) && defined(__amd64) inline void dump_x86_64_register(const char *reg_name, unsigned long long val) { fprintf(stderr, " %-9s0x%-17llx%lld\n", reg_name, val, val); } void dump_x86_64_registers(ucontext_t *uc) { dump_x86_64_register("rax", uc->uc_mcontext.gregs[REG_RAX]); dump_x86_64_register("rbx", uc->uc_mcontext.gregs[REG_RBX]); dump_x86_64_register("rcx", uc->uc_mcontext.gregs[REG_RCX]); dump_x86_64_register("rdx", uc->uc_mcontext.gregs[REG_RDX]); dump_x86_64_register("rsi", uc->uc_mcontext.gregs[REG_RSI]); dump_x86_64_register("rdi", uc->uc_mcontext.gregs[REG_RDI]); dump_x86_64_register("rsp", uc->uc_mcontext.gregs[REG_RSP]); dump_x86_64_register("rbp", uc->uc_mcontext.gregs[REG_RBP]); dump_x86_64_register("r8", uc->uc_mcontext.gregs[REG_R8]); dump_x86_64_register("r9", uc->uc_mcontext.gregs[REG_R9]); dump_x86_64_register("r10", uc->uc_mcontext.gregs[REG_R10]); dump_x86_64_register("r11", uc->uc_mcontext.gregs[REG_R11]); dump_x86_64_register("r12", uc->uc_mcontext.gregs[REG_R12]); dump_x86_64_register("r13", uc->uc_mcontext.gregs[REG_R13]); dump_x86_64_register("r14", uc->uc_mcontext.gregs[REG_R14]); dump_x86_64_register("r15", uc->uc_mcontext.gregs[REG_R15]); dump_x86_64_register("rip", uc->uc_mcontext.gregs[REG_RIP]); dump_x86_64_register("eflags", uc->uc_mcontext.gregs[REG_EFL]); dump_x86_64_register("cs", (uc->uc_mcontext.gregs[REG_CSGSFS] >> 0) & 0xffff); dump_x86_64_register("gs", (uc->uc_mcontext.gregs[REG_CSGSFS] >> 16) & 0xffff); dump_x86_64_register("fs", (uc->uc_mcontext.gregs[REG_CSGSFS] >> 32) & 0xffff); dump_x86_64_register("cr2", uc->uc_mcontext.gregs[REG_CR2]); dump_x86_64_register("err", uc->uc_mcontext.gregs[REG_ERR]); dump_x86_64_register("oldmask", uc->uc_mcontext.gregs[REG_OLDMASK]); dump_x86_64_register("trapno", uc->uc_mcontext.gregs[REG_TRAPNO]); } #endif /* defined(__linux__) && defined(__amd64) */ /** Try to log as much as possible before dumping a core. * * Core files are not aways allowed and it takes an effort to * extract useful information from them. * * *Recursive invocation* * * Unless SIGSEGV is sent by kill(), Linux * resets the signal a default value before invoking * the handler. * * Despite that, as an extra precaution to avoid infinite * recursion, we count invocations of the handler, and * quietly _exit() when called for a second time. */ static void sig_fatal_cb(int signo, siginfo_t *siginfo, void *context) { static volatile sig_atomic_t in_cb = 0; int fd = STDERR_FILENO; struct sigaction sa; /* Got a signal while running the handler. */ if (in_cb) { fdprintf(fd, "Fatal %d while backtracing", signo); goto end; } in_cb = 1; if (signo == SIGSEGV) { fdprintf(fd, "Segmentation fault\n"); const char *signal_code_repr = 0; switch (siginfo->si_code) { case SEGV_MAPERR: signal_code_repr = "SEGV_MAPERR"; break; case SEGV_ACCERR: signal_code_repr = "SEGV_ACCERR"; break; } if (signal_code_repr) fdprintf(fd, " code: %s\n", signal_code_repr); else fdprintf(fd, " code: %d\n", siginfo->si_code); /* * fprintf is used insted of fdprintf, because * fdprintf does not understand %p */ fprintf(stderr, " addr: %p\n", siginfo->si_addr); } else fdprintf(fd, "Got a fatal signal %d\n", signo); fprintf(stderr, " context: %p\n", context); fprintf(stderr, " siginfo: %p\n", siginfo); #if defined(__linux__) && defined(__amd64) dump_x86_64_registers((ucontext_t *)context); #endif fdprintf(fd, "Current time: %u\n", (unsigned) time(0)); fdprintf(fd, "Please file a bug at http://github.com/tarantool/tarantool/issues\n"); #ifdef ENABLE_BACKTRACE fdprintf(fd, "Attempting backtrace... Note: since the server has " "already crashed, \nthis may fail as well\n"); print_backtrace(); #endif end: /* Try to dump core. */ memset(&sa, 0, sizeof(sa)); sigemptyset(&sa.sa_mask); sa.sa_handler = SIG_DFL; sigaction(SIGABRT, &sa, NULL); abort(); } static void signal_free(void) { int i; for (i = 0; i < ev_sig_count; i++) ev_signal_stop(loop(), &ev_sigs[i]); } /** Make sure the child has a default signal disposition. */ static void signal_reset() { for (int i = 0; i < ev_sig_count; i++) ev_signal_stop(loop(), &ev_sigs[i]); struct sigaction sa; /* Reset all signals to their defaults. */ memset(&sa, 0, sizeof(sa)); sigemptyset(&sa.sa_mask); sa.sa_handler = SIG_DFL; if (sigaction(SIGUSR1, &sa, NULL) == -1 || sigaction(SIGINT, &sa, NULL) == -1 || sigaction(SIGTERM, &sa, NULL) == -1 || sigaction(SIGHUP, &sa, NULL) == -1 || sigaction(SIGWINCH, &sa, NULL) == -1 || sigaction(SIGSEGV, &sa, NULL) == -1 || sigaction(SIGFPE, &sa, NULL) == -1) say_syserror("sigaction"); /* Unblock any signals blocked by libev. */ sigset_t sigset; sigfillset(&sigset); if (sigprocmask(SIG_UNBLOCK, &sigset, NULL) == -1) say_syserror("sigprocmask"); } static void tarantool_atfork() { signal_reset(); box_atfork(); } /** * Adjust the process signal mask and add handlers for signals. */ static void signal_init(void) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_IGN; sigemptyset(&sa.sa_mask); if (sigaction(SIGPIPE, &sa, 0) == -1) panic_syserror("sigaction"); /* * SA_RESETHAND resets handler action to the default * one when entering handler. * SA_NODEFER allows receiving the same signal during handler. */ sa.sa_flags = SA_RESETHAND | SA_NODEFER | SA_SIGINFO; sa.sa_sigaction = sig_fatal_cb; if (sigaction(SIGSEGV, &sa, 0) == -1 || sigaction(SIGFPE, &sa, 0) == -1) { panic_syserror("sigaction"); } ev_signal_init(&ev_sigs[0], sig_checkpoint, SIGUSR1); ev_signal_init(&ev_sigs[1], signal_cb, SIGINT); ev_signal_init(&ev_sigs[2], signal_cb, SIGTERM); ev_signal_init(&ev_sigs[3], signal_cb, SIGHUP); ev_signal_init(&ev_sigs[4], signal_sigwinch_cb, SIGWINCH); ev_signal_init(&ev_sigs[5], say_logrotate, SIGHUP); for (int i = 0; i < ev_sig_count; i++) ev_signal_start(loop(), &ev_sigs[i]); (void) tt_pthread_atfork(NULL, NULL, tarantool_atfork); } /** Run in the background. */ static void daemonize() { pid_t pid; int fd; /* flush buffers to avoid multiple output */ /* https://github.com/tarantool/tarantool/issues/366 */ fflush(stdin); fflush(stdout); fflush(stderr); pid = fork(); switch (pid) { case -1: goto error; case 0: /* child */ master_pid = getpid(); break; default: /* parent */ /* Tell systemd about new main program using */ errno = 0; master_pid = pid; exit(EXIT_SUCCESS); } if (setsid() == -1) goto error; /* * tell libev we've just forked, this is necessary to re-initialize * kqueue on FreeBSD. */ ev_loop_fork(cord()->loop); /* * reinit signals after fork, because fork() implicitly calls * signal_reset() via pthread_atfork() hook installed by signal_init(). */ signal_init(); /* redirect stdin; stdout and stderr handled in say_logger_init */ fd = open("/dev/null", O_RDONLY); if (fd < 0) goto error; dup2(fd, STDIN_FILENO); close(fd); return; error: exit(EXIT_FAILURE); } extern "C" void load_cfg() { const char *work_dir = cfg_gets("work_dir"); if (work_dir != NULL && chdir(work_dir) == -1) panic_syserror("can't chdir to `%s'", work_dir); const char *username = cfg_gets("username"); if (username != NULL) { if (getuid() == 0 || geteuid() == 0) { struct passwd *pw; errno = 0; if ((pw = getpwnam(username)) == 0) { if (errno) { say_syserror("getpwnam: %s", username); } else { say_error("User not found: %s", username); } exit(EX_NOUSER); } if (setgid(pw->pw_gid) < 0 || setgroups(0, NULL) < 0 || setuid(pw->pw_uid) < 0 || seteuid(pw->pw_uid)) { say_syserror("setgid/setuid"); exit(EX_OSERR); } } else { say_error("can't switch to %s: i'm not root", username); } } if (cfg_geti("coredump")) { struct rlimit c = { 0, 0 }; if (getrlimit(RLIMIT_CORE, &c) < 0) { say_syserror("getrlimit"); exit(EX_OSERR); } c.rlim_cur = c.rlim_max; if (setrlimit(RLIMIT_CORE, &c) < 0) { say_syserror("setrlimit"); exit(EX_OSERR); } #if defined(TARGET_OS_LINUX) && defined(HAVE_PRCTL_H) if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) < 0) { say_syserror("prctl"); exit(EX_OSERR); } #endif } int background = cfg_geti("background"); const char *log = cfg_gets("log"); const char *log_format = cfg_gets("log_format"); pid_file = (char *)cfg_gets("pid_file"); if (pid_file != NULL) { pid_file = abspath(pid_file); if (pid_file == NULL) panic("out of memory"); } if (background) { if (log == NULL) { say_crit( "'background' requires " "'log' configuration option to be set"); exit(EXIT_FAILURE); } if (pid_file == NULL) { say_crit( "'background' requires " "'pid_file' configuration option to be set"); exit(EXIT_FAILURE); } } /* * pid file check must happen before logger init in order for the * error message to show in stderr */ if (pid_file != NULL) { pid_t other_pid = -1; pid_file_handle = pidfile_open(pid_file, 0644, &other_pid); if (pid_file_handle == NULL) { if (errno == EEXIST) { say_crit( "the daemon is already running: PID %d", (int)other_pid); } else { say_syserror( "failed to create pid file '%s'", pid_file); } exit(EXIT_FAILURE); } } /* * logger init must happen before daemonising in order for the error * to show and for the process to exit with a failure status */ say_logger_init(log, cfg_geti("log_level"), cfg_geti("log_nonblock"), log_format, background); systemd_init(); if (background) daemonize(); /* * after (optional) daemonising to avoid confusing messages with * different pids */ say_crit("%s %s", tarantool_package(), tarantool_version()); say_crit("log level %i", cfg_geti("log_level")); if (pid_file_handle != NULL) { if (pidfile_write(pid_file_handle) == -1) say_syserror("failed to update pid file '%s'", pid_file); } title_set_custom(cfg_gets("custom_proc_title")); title_update(); box_cfg(); } void tarantool_free(void) { /* * Do nothing in a fork. * Note: technically we should do pidfile_close(), however since our * forks do exec immediately we can get away without it, thanks to * the magic O_CLOEXEC */ if (getpid() != master_pid) return; /* * It's better to do nothing and keep xlogs opened when * we are called by exit() from a non-main thread. */ if (!cord_is_main()) return; /* Shutdown worker pool. Waits until threads terminate. */ coio_shutdown(); box_free(); title_free(main_argc, main_argv); /* unlink pidfile. */ if (pid_file_handle != NULL && pidfile_remove(pid_file_handle) == -1) say_syserror("failed to remove pid file '%s'", pid_file); free(pid_file); signal_free(); #ifdef ENABLE_GCOV __gcov_flush(); #endif /* tarantool_lua_free() was formerly reponsible for terminal reset, * but it is no longer called */ if (isatty(STDIN_FILENO)) { /* * Restore terminal state. Doesn't hurt if exiting not * due to a signal. */ rl_cleanup_after_signal(); } cbus_free(); #if 0 /* * This doesn't work reliably since things * are too interconnected. */ tarantool_lua_free(); session_free(); user_cache_free(); fiber_free(); memory_free(); random_free(); #endif systemd_free(); say_logger_free(); } static void print_version(void) { printf("%s %s\n", tarantool_package(), tarantool_version()); printf("Target: %s\n", BUILD_INFO); printf("Build options: %s\n", BUILD_OPTIONS); printf("Compiler: %s\n", COMPILER_INFO); printf("C_FLAGS:%s\n", TARANTOOL_C_FLAGS); printf("CXX_FLAGS:%s\n", TARANTOOL_CXX_FLAGS); } static void print_help(const char *program) { puts("Tarantool - a Lua application server"); puts(""); printf("Usage: %s script.lua [OPTIONS] [SCRIPT [ARGS]]\n", program); puts(""); puts("All command line options are passed to the interpreted script."); puts("When no script name is provided, the server responds to:"); puts(" -h, --help\t\t\tdisplay this help and exit"); puts(" -v, --version\t\t\tprint program version and exit"); puts(" -e EXPR\t\t\texecute string 'EXPR'"); puts(" -l NAME\t\t\trequire library 'NAME'"); puts(" -i\t\t\t\tenter interactive mode after executing 'SCRIPT'"); puts(" --\t\t\t\tstop handling options"); puts(" -\t\t\t\texecute stdin and stop handling options"); puts(""); puts("Please visit project home page at http://tarantool.org"); puts("to see online documentation, submit bugs or contribute a patch."); } int main(int argc, char **argv) { /* set locale to make iswXXXX function work */ if (setlocale(LC_CTYPE, "C.UTF-8") == NULL && setlocale(LC_CTYPE, "en_US.UTF-8") == NULL && setlocale(LC_CTYPE, "en_US.utf8") == NULL) fprintf(stderr, "Failed to set locale to C.UTF-8\n"); fpconv_check(); /* Enter interactive mode after executing 'script' */ bool interactive = false; /* Lua interpeter options, e.g. -e and -l */ int optc = 0; char **optv = NULL; auto guard = make_scoped_guard([=]{ if (optc) free(optv); }); static struct option longopts[] = { {"help", no_argument, 0, 'h'}, {"version", no_argument, 0, 'v'}, {NULL, 0, 0, 0}, }; static const char *opts = "+hVvie:l:"; int ch; while ((ch = getopt_long(argc, argv, opts, longopts, NULL)) != -1) { switch (ch) { case 'V': case 'v': print_version(); return 0; case 'h': print_help(basename(argv[0])); return 0; case 'i': /* Force interactive mode */ interactive = true; break; case 'l': case 'e': /* Save Lua interepter options to optv as is */ if (optc == 0) { optv = (char **) calloc(argc, sizeof(char *)); if (optv == NULL) panic_syserror("No enough memory for arguments"); } /* * The variable optind is the index of the next * element to be processed in argv. */ optv[optc++] = argv[optind - 2]; optv[optc++] = argv[optind - 1]; break; default: /* "invalid option" is printed by getopt */ return EX_USAGE; } } /* Shift arguments */ argc = 1 + (argc - optind); for (int i = 1; i < argc; i++) argv[i] = argv[optind + i - 1]; if (argc > 1 && strcmp(argv[1], "-") && access(argv[1], R_OK) != 0) { /* * Somebody made a mistake in the file * name. Be nice: open the file to set * errno. */ int fd = open(argv[1], O_RDONLY); int save_errno = errno; if (fd >= 0) close(fd); printf("Can't open script %s: %s\n", argv[1], strerror(save_errno)); return save_errno; } argv = title_init(argc, argv); /* * Support only #!/usr/bin/tarantol but not * #!/usr/bin/tarantool -a -b because: * - not all shells support it, * - those shells that do support it, do not * split multiple options, so "-a -b" comes as * a single value in argv[1]. * - in case one uses #!/usr/bin/env tarantool * such options (in script line) don't work */ char *tarantool_bin = find_path(argv[0]); if (!tarantool_bin) tarantool_bin = argv[0]; if (argc > 1) { argv++; argc--; script = argv[0]; title_set_script_name(argv[0]); } random_init(); crc32_init(); memory_init(); main_argc = argc; main_argv = argv; exception_init(); fiber_init(fiber_cxx_invoke); coio_init(); coio_enable(); signal_init(); cbus_init(); tarantool_lua_init(tarantool_bin, main_argc, main_argv); start_time = ev_monotonic_time(); try { box_init(); box_lua_init(tarantool_L); /* main core cleanup routine */ atexit(tarantool_free); if (!loop()) panic("%s", "can't init event loop"); int events = ev_activecnt(loop()); /* * Load user init script. The script should have access * to Tarantool Lua API (box.cfg, box.fiber, etc...) that * is why script must run only after the server was fully * initialized. */ tarantool_lua_run_script(script, interactive, optc, optv, main_argc, main_argv); /* * Start event loop after executing Lua script if signal_cb() * wasn't triggered and there is some new events. Initial value * of start_loop can be set to false by signal_cb(). */ start_loop = start_loop && ev_activecnt(loop()) > events; region_free(&fiber()->gc); if (start_loop) { say_crit("entering the event loop"); systemd_snotify("READY=1"); ev_now_update(loop()); ev_run(loop(), 0); } } catch (struct error *e) { error_log(e); systemd_snotify("STATUS=Failed to startup: %s", box_error_message(e)); panic("%s", "fatal error, exiting the event loop"); } catch (...) { /* This can only happen in case of a server bug. */ panic("unknown exception"); } if (start_loop) say_crit("exiting the event loop"); /* freeing resources */ return 0; } tarantool_1.9.1.26.g63eb81e3c/src/latch.h0000664000000000000000000001215713306560010016241 0ustar rootroot#ifndef TARANTOOL_LATCH_H_INCLUDED #define TARANTOOL_LATCH_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "small/rlist.h" #include "fiber.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** Latch of cooperative multitasking environment. */ struct latch { /** * The fiber that locked the latch, or NULL * if the latch is unlocked. */ struct fiber *owner; /** * The queue of fibers waiting on the latch. */ struct rlist queue; }; /** * latch initializer */ #define LATCH_INITIALIZER(name) { 0, RLIST_HEAD_INITIALIZER(name.queue) } /** * Initialize the given latch. * * @param l - latch to be initialized. */ static inline void latch_create(struct latch *l) { l->owner = NULL; rlist_create(&l->queue); } /** * Destroy the given latch. * * @param l - latch to be destroyed. */ static inline void latch_destroy(struct latch *l) { assert(l->owner == NULL); assert(rlist_empty(&l->queue)); (void) l; } /** * Return the fiber that locked the given latch, or NULL * if the latch is unlocked. * * @param l - latch to be checked. */ static inline struct fiber * latch_owner(struct latch *l) { return l->owner; } /** * Lock a latch. If the latch is already locked by another fiber, * waits for timeout. * * @param l - latch to be locked. * @param timeout - maximal time to wait * * @retval 0 - success * @retval 1 - timeout */ static inline int latch_lock_timeout(struct latch *l, ev_tstamp timeout) { assert(l->owner != fiber()); if (l->owner == NULL && rlist_empty(&l->queue)) { l->owner = fiber(); return 0; } if (timeout <= 0) return 1; rlist_add_tail_entry(&l->queue, fiber(), state); bool was_cancellable = fiber_set_cancellable(false); ev_tstamp start = ev_monotonic_now(loop()); int result = 0; while (true) { fiber_yield_timeout(timeout); if (l->owner == fiber()) { /* Current fiber was woken by previous latch owner. */ break; } timeout -= ev_monotonic_now(loop()) - start; if (timeout <= 0) { result = 1; break; } rlist_add_entry(&l->queue, fiber(), state); } fiber_set_cancellable(was_cancellable); return result; } /** * \copydoc box_latch_lock */ static inline void latch_lock(struct latch *l) { (void) latch_lock_timeout(l, TIMEOUT_INFINITY); } /** * \copydoc box_latch_trylock */ static inline int latch_trylock(struct latch *l) { return latch_lock_timeout(l, 0); } /** * \copydoc box_latch_unlock */ static inline void latch_unlock(struct latch *l) { assert(l->owner == fiber()); l->owner = NULL; if (!rlist_empty(&l->queue)) { struct fiber *f = rlist_first_entry(&l->queue, struct fiber, state); /* * Set this fiber as latch owner because fiber_wakeup remove * its from waiting queue and any other already scheduled * fiber can intercept this latch. */ l->owner = f; fiber_wakeup(f); } } /** \cond public */ /** * A lock for cooperative multitasking environment */ typedef struct box_latch box_latch_t; /** * Allocate and initialize the new latch. * \returns latch */ box_latch_t* box_latch_new(void); /** * Destroy and free the latch. * \param latch latch */ void box_latch_delete(box_latch_t *latch); /** * Lock a latch. Waits indefinitely until the current fiber can gain access to * the latch. * * \param latch a latch */ void box_latch_lock(box_latch_t *latch); /** * Try to lock a latch. Return immediately if the latch is locked. * \param latch a latch * \retval 0 - success * \retval 1 - the latch is locked. */ int box_latch_trylock(box_latch_t *latch); /** * Unlock a latch. The fiber calling this function must * own the latch. * * \param latch a latch */ void box_latch_unlock(box_latch_t *latch); /** \endcond public */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LATCH_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/proc_title.h0000664000000000000000000000334113306560010017305 0ustar rootroot#ifndef TARANTOOL_PROCTITLE_H_INCLUDED #define TARANTOOL_PROCTITLE_H_INCLUDED /* * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #ifdef __cplusplus extern "C" { #endif char **proc_title_init(int argc, char **argv); void proc_title_free(int argc, char **argv); CFORMAT(printf, 1, 2) void proc_title_set(const char *format, ...); size_t proc_title_max_length(); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* TARANTOOL_PROCTITLE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/uri.rl0000664000000000000000000001655113306560010016135 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "uri.h" #include /* SNPRINT */ #include #include /* snprintf */ int uri_parse(struct uri *uri, const char *p) { const char *pe = p + strlen(p); const char *eof = pe; int cs; memset(uri, 0, sizeof(*uri)); if (p == pe) return -1; const char *s = NULL, *login = NULL, *scheme = NULL; size_t login_len = 0, scheme_len = 0; %%{ machine uri; write data; # # Line by line translation of RFC3986 # http://tools.ietf.org/html/rfc3986#appendix-A # gen_delims = (":" | "/" | "?" | "#" | "[" | "]" | "@"); sub_delims = ("!" | "$" | "&" | "'" | "(" | ")" | "*" | "+" | "," | ";" | "="); reserved = (gen_delims | sub_delims); unreserved = alpha | digit | "-" | "_" | "~" | "."; pct_encoded = ("%%" | ("%" xdigit xdigit?) | ("%u" xdigit xdigit xdigit xdigit)); pchar_nc = unreserved | pct_encoded | sub_delims | "@"; pchar = pchar_nc | ":" | "|"; query = (pchar | "/" | "?")* >{ s = p; } %{ uri->query = s; uri->query_len = p - s; }; fragment = (pchar | "/" | "?")* >{ s = p; } %{ uri->fragment = s; uri->fragment_len = p - s; }; segment = pchar*; segment_nz = pchar+; segment_nz_nc = pchar_nc+; path_abempty = ( "/" segment )*; path_absolute = ("/" ( segment_nz ( "/" segment )* )?); path_noscheme = (segment_nz_nc ( "/" segment )*); path_rootless = (pchar_nc ( "/" segment )*); path_empty = ""; path = path_abempty # begins with "/" or is empty | path_absolute # begins with "/" but not "//" | path_noscheme # begins with a non-colon segment | path_rootless # begins with a segment | path_empty; # zero characters socket_path_absolute = ("/" segment_nz_nc)+; socket_path_relative = ("." socket_path_absolute); socket_path = socket_path_absolute | socket_path_relative; reg_name = (unreserved | pct_encoded | sub_delims)+ >{ s = p; } %{ uri->host = s; uri->host_len = p - s;}; hex1_4 = ([0-9a-fa-f]{1,4}); ip4addr = ((digit{1,3}) (("." digit{1,3}){3})); ip4 = ip4addr >{ s = p; } %{ uri->host = s; uri->host_len = p - s; uri->host_hint = 1; }; ip6 = ("[" ( ((hex1_4?) ((":" (hex1_4?)){1,8})) | ("::" [ff][ff][ff][ff] ":" ip4addr)) >{ s = p; } %{ uri->host = s; uri->host_len = p - s; uri->host_hint = 2; } "]"); action unix{ /* * This action is also called for path_* terms. * I absolutely have no idea why. */ if (uri->host_hint != 3) { uri->host_hint = 3; uri->host = URI_HOST_UNIX; uri->host_len = strlen(URI_HOST_UNIX); uri->service = s; uri->service_len = p - s; /* a workaround for grammar limitations */ uri->path = NULL; uri->path_len = 0; }; } # Non-standard: "unix/" support unix = ("unix/:" %{ s = p;} socket_path) %unix; service = (digit+ | alpha*) >{ s = p; } %{ uri->service = s; uri->service_len = p - s; }; host = (ip4 | ip6 | reg_name); login = (unreserved | pct_encoded | sub_delims )+ >{ s = p; } %{ login = s; login_len = p - s; }; password = (unreserved | pct_encoded | sub_delims )* >{ s = p; } %{ uri->password = s; uri->password_len = p - s; }; # Non-standard: split userinfo to login and password userinfo = login (":" password)? %{ uri->login = login; uri->login_len = login_len; }; # Non-standard: use service instead of port here + support unix authority = (userinfo "@")? ((host (":" service)?) | (unix ":")); scheme = alpha > { s = p; } (alpha | digit | "+" | "-" | ".")* %{scheme = s; scheme_len = p - s; }; # relative_part = "//" authority > { s = p } path_abempty | # path_absolute | # path_noscheme | # path_empty; # Non-standard: allow URI without scheme hier_part_noscheme = (((userinfo "@" unix) %{ s = p; }) | ((authority %{ s = p; } path_abempty? | path_absolute? | path_rootless? | path_empty? ) %{ uri->path = s; uri->path_len = p - s; }) | unix); hier_part = "//" >{ uri->scheme = scheme; uri->scheme_len = scheme_len;} hier_part_noscheme; # relative_ref = relative_part ("?" >{ s = p; } query)? # ("#" >{ s = p; } fragment)?; # absolute_URI = scheme ":" hier_part ("?" >{ s = p; } query); PORT = digit+ >{ uri->service = p; } %{ uri->service_len = p - uri->service; uri->host = NULL; uri->host_len = 0; }; PATH = ((userinfo "@")? %{ s = p; } path_absolute %unix); URI = ((scheme ":" hier_part) | hier_part_noscheme) ("?" >{ s = p; } query)? ("#" >{ s = p; } fragment)?; # Non-RFC: support port and absolute path main := URI | PORT | PATH; write init; write exec; }%% if (uri->path_len == 0) uri->path = NULL; if (uri->service_len == 0) uri->service = NULL; if (uri->service_len >= URI_MAXSERVICE) return -1; if (uri->host_len >= URI_MAXHOST) return -1; (void)uri_first_final; (void)uri_error; (void)uri_en_main; (void)eof; return cs >= uri_first_final ? 0 : -1; } int uri_format(char *str, int len, const struct uri *uri, bool write_password) { int total = 0; if (uri->scheme_len > 0) { SNPRINT(total, snprintf, str, len, "%.*s://", (int)uri->scheme_len, uri->scheme); } if (uri->host_len > 0) { if (uri->login_len > 0) { SNPRINT(total, snprintf, str, len, "%.*s", (int)uri->login_len, uri->login); if (uri->password_len > 0 && write_password) { SNPRINT(total, snprintf, str, len, ":%.*s", (int)uri->password_len, uri->password); } SNPRINT(total, snprintf, str, len, "@"); } SNPRINT(total, snprintf, str, len, "%.*s", (int)uri->host_len, uri->host); if (uri->service_len > 0) { SNPRINT(total, snprintf, str, len, ":%.*s", (int)uri->service_len, uri->service); } } if (uri->path_len > 0) { SNPRINT(total, snprintf, str, len, "%.*s", (int)uri->path_len, uri->path); } if (uri->query_len > 0) { SNPRINT(total, snprintf, str, len, "?%.*s", (int)uri->query_len, uri->query); } if (uri->fragment_len > 0) { SNPRINT(total, snprintf, str, len, "#%.*s", (int)uri->fragment_len, uri->fragment); } return total; } /* vim: set ft=ragel: */ tarantool_1.9.1.26.g63eb81e3c/src/httpc.c0000664000000000000000000002552213306565107016277 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "httpc.h" #include #include #include "fiber.h" /** * libcurl callback for CURLOPT_WRITEFUNCTION * @see https://curl.haxx.se/libcurl/c/CURLOPT_WRITEFUNCTION.html */ static size_t curl_easy_write_cb(char *ptr, size_t size, size_t nmemb, void *ctx) { struct httpc_request *req = (struct httpc_request *) ctx; const size_t bytes = size * nmemb; char *p = region_alloc(&req->resp_body, bytes); if (p == NULL) { diag_set(OutOfMemory, bytes, "ibuf", "httpc body"); return 0; } memcpy(p, ptr, bytes); return bytes; } /** * libcurl callback for CURLOPT_HEADERFUNCTION * @see https://curl.haxx.se/libcurl/c/CURLOPT_HEADERFUNCTION.html */ static size_t curl_easy_header_cb(char *buffer, size_t size, size_t nitems, void *ctx) { struct httpc_request *req = (struct httpc_request *) ctx; const size_t bytes = size * nitems; char *p = region_alloc(&req->resp_headers, bytes); if (p == NULL) { diag_set(OutOfMemory, bytes, "ibuf", "httpc header"); return 0; } memcpy(p, buffer, bytes); return bytes; } int httpc_env_create(struct httpc_env *env, int max_conns) { memset(env, 0, sizeof(*env)); mempool_create(&env->req_pool, &cord()->slabc, sizeof(struct httpc_request)); return curl_env_create(&env->curl_env, max_conns); } void httpc_env_destroy(struct httpc_env *ctx) { assert(ctx); curl_env_destroy(&ctx->curl_env); mempool_destroy(&ctx->req_pool); } struct httpc_request * httpc_request_new(struct httpc_env *env, const char *method, const char *url) { struct httpc_request *req = mempool_alloc(&env->req_pool); if (req == NULL) { diag_set(OutOfMemory, sizeof(struct httpc_request), "mempool", "httpc_request"); return NULL; } memset(req, 0, sizeof(*req)); req->env = env; region_create(&req->resp_headers, &cord()->slabc); region_create(&req->resp_body, &cord()->slabc); if (curl_request_create(&req->curl_request) != 0) return NULL; if (strcmp(method, "GET") == 0) { curl_easy_setopt(req->curl_request.easy, CURLOPT_HTTPGET, 1L); } else if (strcmp(method, "HEAD") == 0) { curl_easy_setopt(req->curl_request.easy, CURLOPT_NOBODY, 1L); } else if (strcmp(method, "POST") == 0 || strcmp(method, "PUT") == 0 || strcmp(method, "PATCH")) { /* * Set CURLOPT_POSTFIELDS to "" and CURLOPT_POSTFIELDSSIZE 0 * to avoid the read callback in any cases even if user * forgot to call httpc_set_body() for POST request. * @see https://curl.haxx.se/libcurl/c/CURLOPT_POSTFIELDS.html */ curl_easy_setopt(req->curl_request.easy, CURLOPT_POST, 1L); curl_easy_setopt(req->curl_request.easy, CURLOPT_POSTFIELDS, ""); curl_easy_setopt(req->curl_request.easy, CURLOPT_POSTFIELDSIZE, 0); curl_easy_setopt(req->curl_request.easy, CURLOPT_CUSTOMREQUEST, method); if (httpc_set_header(req, "Accept: */*") < 0) goto error; } else { curl_easy_setopt(req->curl_request.easy, CURLOPT_CUSTOMREQUEST, method); } curl_easy_setopt(req->curl_request.easy, CURLOPT_URL, url); curl_easy_setopt(req->curl_request.easy, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(req->curl_request.easy, CURLOPT_SSL_VERIFYPEER, 1); curl_easy_setopt(req->curl_request.easy, CURLOPT_WRITEFUNCTION, curl_easy_write_cb); curl_easy_setopt(req->curl_request.easy, CURLOPT_HEADERFUNCTION, curl_easy_header_cb); curl_easy_setopt(req->curl_request.easy, CURLOPT_NOPROGRESS, 1L); curl_easy_setopt(req->curl_request.easy, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1); ibuf_create(&req->body, &cord()->slabc, 1); return req; error: mempool_free(&env->req_pool, req); return NULL; } void httpc_request_delete(struct httpc_request *req) { if (req->headers != NULL) curl_slist_free_all(req->headers); curl_request_destroy(&req->curl_request); ibuf_destroy(&req->body); region_destroy(&req->resp_headers); region_destroy(&req->resp_body); mempool_free(&req->env->req_pool, req); } int httpc_set_header(struct httpc_request *req, const char *fmt, ...) { va_list ap; va_start(ap, fmt); const char *header = tt_vsprintf(fmt, ap); va_end(ap); struct curl_slist *l = curl_slist_append(req->headers, header); if (l == NULL) { diag_set(OutOfMemory, strlen(header), "curl", "http header"); return -1; } req->headers = l; return 0; } int httpc_set_body(struct httpc_request *req, const char *body, size_t size) { ibuf_reset(&req->body); char *chunk = ibuf_alloc(&req->body, size); if (chunk == NULL) { diag_set(OutOfMemory, size, "ibuf", "http request body"); return -1; } memcpy(chunk, body, size); curl_easy_setopt(req->curl_request.easy, CURLOPT_POSTFIELDS, req->body.buf); curl_easy_setopt(req->curl_request.easy, CURLOPT_POSTFIELDSIZE, size); if (httpc_set_header(req, "Content-Length: %zu", size) != 0) return -1; return 0; } int httpc_set_keepalive(struct httpc_request *req, long idle, long interval) { #if (LIBCURL_VERSION_MAJOR >= 7 && LIBCURL_VERSION_MINOR >= 25) if (idle > 0 && interval > 0) { curl_easy_setopt(req->curl_request.easy, CURLOPT_TCP_KEEPALIVE, 1L); curl_easy_setopt(req->curl_request.easy, CURLOPT_TCP_KEEPIDLE, idle); curl_easy_setopt(req->curl_request.easy, CURLOPT_TCP_KEEPINTVL, interval); if (httpc_set_header(req, "Connection: Keep-Alive") < 0 || httpc_set_header(req, "Keep-Alive: timeout=%d", (int) idle) < 0) { return -1; } } else { if (httpc_set_header(req, "Connection: close") < 0) { return -1; } } #else /** < 7.25.0 */ /** Libcurl version < 7.25.0 doesn't support keep-alive feature */ (void) req; (void) idle; (void) interval; #endif return 0; } void httpc_set_low_speed_time(struct httpc_request *req, long low_speed_time) { curl_easy_setopt(req->curl_request.easy, CURLOPT_LOW_SPEED_TIME, low_speed_time); } void httpc_set_low_speed_limit(struct httpc_request *req, long low_speed_limit) { curl_easy_setopt(req->curl_request.easy, CURLOPT_LOW_SPEED_LIMIT, low_speed_limit); } void httpc_set_verbose(struct httpc_request *req, bool curl_verbose) { curl_easy_setopt(req->curl_request.easy, CURLOPT_VERBOSE, curl_verbose); } void httpc_set_ca_path(struct httpc_request *req, const char *ca_path) { curl_easy_setopt(req->curl_request.easy, CURLOPT_CAPATH, ca_path); } void httpc_set_ca_file(struct httpc_request *req, const char *ca_file) { curl_easy_setopt(req->curl_request.easy, CURLOPT_CAINFO, ca_file); } int httpc_set_unix_socket(struct httpc_request *req, const char *unix_socket) { #ifdef CURL_VERSION_UNIX_SOCKETS curl_easy_setopt(req->curl_request.easy, CURLOPT_UNIX_SOCKET_PATH, unix_socket); return 0; #else #pragma message "unix sockets not supported, please upgrade libcurl to 7.40.0" (void) req; (void) unix_socket; diag_set(IllegalParams, "tarantool was built without unix socket support," " please upgrade libcurl to 7.40.0 and rebuild"); return -1; #endif } void httpc_set_verify_host(struct httpc_request *req, long verify) { curl_easy_setopt(req->curl_request.easy, CURLOPT_SSL_VERIFYHOST, verify); } void httpc_set_verify_peer(struct httpc_request *req, long verify) { curl_easy_setopt(req->curl_request.easy, CURLOPT_SSL_VERIFYPEER, verify); } void httpc_set_ssl_key(struct httpc_request *req, const char *ssl_key) { curl_easy_setopt(req->curl_request.easy, CURLOPT_SSLKEY, ssl_key); } void httpc_set_ssl_cert(struct httpc_request *req, const char *ssl_cert) { curl_easy_setopt(req->curl_request.easy, CURLOPT_SSLCERT, ssl_cert); } int httpc_execute(struct httpc_request *req, double timeout) { struct httpc_env *env = req->env; curl_easy_setopt(req->curl_request.easy, CURLOPT_WRITEDATA, (void *) req); curl_easy_setopt(req->curl_request.easy, CURLOPT_HEADERDATA, (void *) req); curl_easy_setopt(req->curl_request.easy, CURLOPT_PRIVATE, (void *) &req->curl_request); curl_easy_setopt(req->curl_request.easy, CURLOPT_HTTPHEADER, req->headers); ++env->stat.total_requests; if (curl_execute(&req->curl_request, &env->curl_env, timeout) != CURLM_OK) return -1; long longval = 0; switch (req->curl_request.code) { case CURLE_OK: curl_easy_getinfo(req->curl_request.easy, CURLINFO_RESPONSE_CODE, &longval); req->status = (int) longval; /* TODO: get real response string from resp->headers */ req->reason = "Ok"; if (req->status == 200) { ++env->stat.http_200_responses; } else { ++env->stat.http_other_responses; } break; case CURLE_SSL_CACERT: case CURLE_PEER_FAILED_VERIFICATION: /* 495 SSL Certificate Error (nginx non-standard) */ req->status = 495; req->reason = curl_easy_strerror(req->curl_request.code); ++env->stat.failed_requests; break; case CURLE_OPERATION_TIMEDOUT: /* 408 Request Timeout (nginx non-standard) */ req->status = 408; req->reason = curl_easy_strerror(req->curl_request.code); ++env->stat.failed_requests; break; case CURLE_GOT_NOTHING: /* 444 No Response */ req->status = 444; req->reason = curl_easy_strerror(req->curl_request.code); ++env->stat.failed_requests; break; case CURLE_COULDNT_RESOLVE_HOST: case CURLE_COULDNT_CONNECT: /* 595 Connection Problem (AnyEvent non-standard) */ req->status = 595; req->reason = curl_easy_strerror(req->curl_request.code); ++env->stat.failed_requests; break; case CURLE_WRITE_ERROR: /* Diag is already set by curl_write_cb() */ assert(!diag_is_empty(&fiber()->diag)); ++env->stat.failed_requests; return -1; case CURLE_OUT_OF_MEMORY: diag_set(OutOfMemory, 0, "curl", "internal"); ++env->stat.failed_requests; return -1; default: curl_easy_getinfo(req->curl_request.easy, CURLINFO_OS_ERRNO, &longval); errno = longval ? longval : EINVAL; diag_set(SystemError, "curl: %s", curl_easy_strerror(req->curl_request.code)); ++env->stat.failed_requests; return -1; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/main.h0000664000000000000000000000327513306560010016073 0ustar rootroot#ifndef TARANTOOL_H_INCLUDED #define TARANTOOL_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "trivia/util.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ double tarantool_uptime(void); void load_cfg(); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/tt_pthread.h0000664000000000000000000002351013306560010017277 0ustar rootroot#ifndef TARANTOOL_PTHREAD_H_INCLUDED #define TARANTOOL_PTHREAD_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/config.h" #include "trivia/util.h" #include #include #include #if HAVE_PTHREAD_NP_H #include #endif #include "say.h" /** * Assert on any pthread* error in debug mode. In release, * write into the log file where and what has failed. * * Still give the user an opportunity to manually * check for error, by assigning pthread_* function status * to errno and returning -1. */ #define tt_pthread_error(e) \ if (e != 0) { \ say_syserror("%s error %d", __func__, e);\ errno = e; \ } \ assert(e == 0); \ e != 0 ? -1 : 0 /** * Debug/logging friendly wrappers around pthread * functions. */ #ifndef NDEBUG #define tt_pthread_mutex_init(mutex, attr) \ ({ \ pthread_mutexattr_t *p_attr = attr; \ pthread_mutexattr_t errorcheck; \ if (p_attr == NULL) { \ (void) tt_pthread_mutexattr_init(&errorcheck);\ (void) pthread_mutexattr_settype(&errorcheck, \ PTHREAD_MUTEX_ERRORCHECK); \ p_attr = &errorcheck; \ } \ int e__ = pthread_mutex_init(mutex, p_attr);\ (void) tt_pthread_mutexattr_destroy(&errorcheck); \ tt_pthread_error(e__); \ }) #else #define tt_pthread_mutex_init(mutex, attr) \ ({ \ int e__ = pthread_mutex_init(mutex, attr);\ tt_pthread_error(e__); \ }) #endif #define tt_pthread_mutex_destroy(mutex) \ ({ int e__ = pthread_mutex_destroy(mutex); \ tt_pthread_error(e__); \ }) #define tt_pthread_mutex_lock(mutex) \ ({ int e__ = pthread_mutex_lock(mutex); \ say_debug("%s: locking %s", __func__, #mutex);\ tt_pthread_error(e__);\ }) #define tt_pthread_mutex_trylock(mutex) \ ({ int e__ = pthread_mutex_trylock(mutex); \ if (e__ != 0 && e__ != EBUSY) \ say_error("%s error %d at %s:%d", __func__, e__, __FILE__, __LINE__);\ assert(e__ == 0 || e__ == EBUSY); \ e__; \ }) #define tt_pthread_mutex_unlock(mutex) \ ({ int e__ = pthread_mutex_unlock(mutex); \ say_debug("%s: unlocking %s", __func__, #mutex);\ tt_pthread_error(e__); \ }) #define tt_pthread_mutex_destroy(mutex) \ ({ int e__ = pthread_mutex_destroy(mutex); \ tt_pthread_error(e__); \ }) #define tt_pthread_mutexattr_init(attr) \ ({ int e__ = pthread_mutexattr_init(attr); \ tt_pthread_error(e__); \ }) #define tt_pthread_mutexattr_destroy(attr) \ ({ int e__ = pthread_mutexattr_destroy(attr);\ tt_pthread_error(e__); \ }) #define tt_pthread_mutexattr_gettype(attr, type)\ ({ int e__ = pthread_mutexattr_gettype(attr, type);\ tt_pthread_error(e__); \ }) #define tt_pthread_mutexattr_settype(attr, type)\ ({ int e__ = pthread_mutexattr_settype(attr, type);\ tt_pthread_error(e__); \ }) #define tt_pthread_rwlock_init(rwlock, attr) \ ({ \ int e__ = pthread_rwlock_init(rwlock, attr);\ tt_pthread_error(e__); \ }) #define tt_pthread_rwlock_destroy(rwlock) \ ({ int e__ = pthread_rwlock_destroy(rwlock); \ tt_pthread_error(e__); \ }) #define tt_pthread_rwlock_rdlock(rwlock) \ ({ int e__ = pthread_rwlock_rdlock(rwlock); \ say_debug("%s: locking %s", __func__, #rwlock);\ tt_pthread_error(e__);\ }) #define tt_pthread_rwlock_tryrdlock(rwlock) \ ({ int e__ = pthread_rwlock_tryrdlock(rwlock); \ if (e__ != 0 && e__ != EBUSY) \ say_error("%s error %d at %s:%d", __func__, e__, __FILE__, __LINE__);\ assert(e__ == 0 || e__ == EBUSY); \ e__; \ }) #define tt_pthread_rwlock_wrlock(rwlock) \ ({ int e__ = pthread_rwlock_wrlock(rwlock); \ say_debug("%s: locking %s", __func__, #rwlock);\ tt_pthread_error(e__);\ }) #define tt_pthread_rwlock_trywrlock(rwlock) \ ({ int e__ = pthread_rwlock_trywrlock(rwlock); \ if (e__ != 0 && e__ != EBUSY) \ say_error("%s error %d at %s:%d", __func__, e__, __FILE__, __LINE__);\ assert(e__ == 0 || e__ == EBUSY); \ e__; \ }) #define tt_pthread_rwlock_unlock(rwlock) \ ({ int e__ = pthread_rwlock_unlock(rwlock); \ say_debug("%s: unlocking %s", __func__, #rwlock);\ tt_pthread_error(e__); \ }) #define tt_pthread_rwlock_destroy(rwlock) \ ({ int e__ = pthread_rwlock_destroy(rwlock); \ tt_pthread_error(e__); \ }) #define tt_pthread_rwlockattr_init(attr) \ ({ int e__ = pthread_rwlockattr_init(attr); \ tt_pthread_error(e__); \ }) #define tt_pthread_rwlockattr_destroy(attr) \ ({ int e__ = pthread_rwlockattr_destroy(attr);\ tt_pthread_error(e__); \ }) #define tt_pthread_rwlockattr_gettype(attr, type)\ ({ int e__ = pthread_rwlockattr_gettype(attr, type);\ tt_pthread_error(e__); \ }) #define tt_pthread_rwlockattr_settype(attr, type)\ ({ int e__ = pthread_rwlockattr_settype(attr, type);\ tt_pthread_error(e__); \ }) #define tt_pthread_condattr_init(attr) \ ({ int e__ = pthread_condattr_init(attr); \ tt_pthread_error(e__); \ }) #define tt_pthread_condattr_destroy(attr) \ ({ int e__ = pthread_condattr_destroy(attr); \ tt_pthread_error(e__); \ }) #define tt_pthread_cond_init(cond, attr) \ ({ int e__ = pthread_cond_init(cond, attr);\ tt_pthread_error(e__); \ }) #define tt_pthread_cond_destroy(cond) \ ({ int e__ = pthread_cond_destroy(cond); \ tt_pthread_error(e__); \ }) #define tt_pthread_cond_signal(cond) \ ({ int e__ = pthread_cond_signal(cond); \ tt_pthread_error(e__); \ }) #define tt_pthread_cond_broadcast(cond) \ ({ int e__ = pthread_cond_broadcast(cond); \ tt_pthread_error(e__); \ }) #define tt_pthread_cond_wait(cond, mutex) \ ({ int e__ = pthread_cond_wait(cond, mutex);\ tt_pthread_error(e__); \ }) #define tt_pthread_cond_timedwait(cond, mutex, timeout) \ ({ int e__ = pthread_cond_timedwait(cond, mutex, timeout);\ if (ETIMEDOUT != e__ && e__ != 0) \ say_error("%s error %d", __func__, e__);\ assert(e__ == 0 || e__ == ETIMEDOUT); \ e__; \ }) #define tt_pthread_once(control, function) \ ({ int e__ = pthread_once(control, function);\ tt_pthread_error(e__); \ }) #define tt_pthread_atfork(prepare, parent, child)\ ({ int e__ = pthread_atfork(prepare, parent, child);\ tt_pthread_error(e__); \ }) /** Make sure the created thread blocks all signals, * they are handled in the main thread. */ #define tt_pthread_create(thread, attr, run, arg) \ ({ sigset_t set, oldset; \ sigfillset(&set); \ pthread_sigmask(SIG_BLOCK, &set, &oldset); \ int e__ = pthread_create(thread, attr, run, arg);\ pthread_sigmask(SIG_SETMASK, &oldset, NULL); \ tt_pthread_error(e__); \ }) #define tt_pthread_join(thread, ret) \ ({ int e__ = pthread_join(thread, ret); \ tt_pthread_error(e__); \ }) #define tt_pthread_key_create(key, dtor) \ ({ int e__ = pthread_key_create(key, dtor); \ tt_pthread_error(e__); \ }) #define tt_pthread_key_delete(key) \ ({ int e__ = pthread_key_delete(key); \ tt_pthread_error(e__); \ }) #define tt_pthread_setspecific(key, value) \ ({ int e__ = pthread_setspecific(key, value); \ tt_pthread_error(e__); \ }) #define tt_pthread_getspecific(key) pthread_getspecific(key) /** Set the current thread's name */ static inline void tt_pthread_setname(const char *name) { /* Setting the name fails if the name was too long. Linux limits a * name to 16 bytes (including the trailing NUL), other OS don't * even bother to document the limit. */ char short_name[16]; snprintf(short_name, sizeof(short_name), "%s", name); #if HAVE_PTHREAD_SETNAME_NP pthread_setname_np(pthread_self(), short_name); #elif HAVE_PTHREAD_SETNAME_NP_1 pthread_setname_np(short_name); #elif HAVE_PTHREAD_SET_NAME_NP pthread_set_name_np(pthread_self(), short_name); #endif } static inline void tt_pthread_attr_getstack(pthread_t thread, void **stackaddr, size_t *stacksize) { #if HAVE_PTHREAD_GETATTR_NP /* * GLIBC * * From glib-2.24/sysdeps/nptl/pthread.h pthread_getattr_np(): * It shall be called on uninitialized ATTR and destroyed with * pthread_attr_destroy when no longer needed. */ pthread_attr_t thread_attr; pthread_getattr_np(thread, &thread_attr); pthread_attr_getstack(&thread_attr, stackaddr, stacksize); pthread_attr_destroy(&thread_attr); #elif HAVE_PTHREAD_ATTR_GET_NP /* * xBSD/new macOS * * From pthread_attr_get_np(3): * It is HIGHLY RECOMMENDED to use pthread_attr_init(3) function to * allocate attribute storage. */ pthread_attr_t thread_attr; pthread_attr_init(&thread_attr); pthread_attr_get_np(thread, &thread_attr); pthread_attr_getstack(&thread_attr, stackaddr, stacksize); pthread_attr_destroy(&thread_attr); #elif (HAVE_PTHREAD_GET_STACKSIZE_NP && HAVE_PTHREAD_GET_STACKADDR_NP) /* Old macOS */ *stacksize = pthread_get_stacksize_np(thread); *stackaddr = pthread_get_stackaddr_np(thread); #else #error Unable to get thread stack #endif } #endif /* TARANTOOL_PTHREAD_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/module_header.h0000664000000000000000000000344313306560010017741 0ustar rootroot#ifndef TARANTOOL_MODULE_H_INCLUDED #define TARANTOOL_MODULE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * \file */ #include #include /* va_list */ #include #include /* strerror(3) */ #include #include #include /* ssize_t for Apple */ #include /* ssize_t */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #include "lua.h" /* does not have extern C wrappers */ tarantool_1.9.1.26.g63eb81e3c/src/evio.h0000664000000000000000000001103213306560010016077 0ustar rootroot#ifndef TARANTOOL_EVIO_H_INCLUDED #define TARANTOOL_EVIO_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * Asynchronous IO in libev event loop. * Requires a running libev loop. */ #include #include "tarantool_ev.h" #include "sio.h" #include "uri.h" /** * Exception-aware way to add a listening socket to the event * loop. Callbacks are invoked on bind and accept events. * * Coroutines/fibers are not used for port listeners * since listener's job is usually simple and only involves * creating a session for the accepted socket. The session itself * can be built around simple libev callbacks, or around * cooperative multitasking (on_accept callback can create * a fiber and use coio.h (cooperative multi-tasking I/O)) API. * * How to use a service: * struct evio_service *service; * service = malloc(sizeof(struct evio_service)); * evio_service_init(service, ..., on_accept_cb, ...); * evio_service_bind(service); * evio_service_listen(service); * ... * evio_service_stop(service); * free(service); * * If a service is not started, but only initialized, no * dedicated cleanup/destruction is necessary. */ struct evio_service { /** Service name. E.g. 'primary', 'secondary', etc. */ char name[SERVICE_NAME_MAXLEN]; /** Bind host:service, useful for logging */ char host[URI_MAXHOST]; char serv[URI_MAXSERVICE]; /** Interface/port to bind to */ union { struct sockaddr addr; struct sockaddr_storage addrstorage; }; socklen_t addr_len; /** * A callback invoked on every accepted client socket. * It's OK to throw an exception in the callback: * when it happens, the exception is logged, and the * accepted socket is closed. */ void (*on_accept)(struct evio_service *, int, struct sockaddr *, socklen_t); void *on_accept_param; /** libev io object for the acceptor socket. */ struct ev_io ev; ev_loop *loop; }; /** Initialize the service. Don't bind to the port yet. */ void evio_service_init(ev_loop *loop, struct evio_service *service, const char *name, void (*on_accept)(struct evio_service *, int, struct sockaddr *, socklen_t), void *on_accept_param); /** Bind service to specified uri */ void evio_service_bind(struct evio_service *service, const char *uri); /** * Listen on bounded socket * * @retval 0 for success */ void evio_service_listen(struct evio_service *service); /** If started, stop event flow and close the acceptor socket. */ void evio_service_stop(struct evio_service *service); void evio_socket(struct ev_io *coio, int domain, int type, int protocol); void evio_close(ev_loop *loop, struct ev_io *evio); static inline bool evio_service_is_active(struct evio_service *service) { return service->ev.fd >= 0; } static inline bool evio_has_fd(struct ev_io *ev) { return ev->fd >= 0; } static inline void evio_timeout_init(ev_loop *loop, ev_tstamp *start, ev_tstamp *delay, ev_tstamp timeout) { *start = ev_monotonic_now(loop); *delay = timeout; } static inline void evio_timeout_update(ev_loop *loop, ev_tstamp start, ev_tstamp *delay) { ev_tstamp elapsed = ev_monotonic_now(loop) - start; *delay = (elapsed >= *delay) ? 0 : *delay - elapsed; } void evio_setsockopt_client(int fd, int family, int type); #endif /* TARANTOOL_EVIO_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/errinj.c0000664000000000000000000000414613306560010016431 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include "trivia/config.h" #include "trivia/util.h" #include "say.h" #include "errinj.h" #define ERRINJ_MEMBER(n, t, s) { /* .name = */ #n, /* .type = */ t, /* .state = */ s }, struct errinj errinjs[errinj_id_MAX] = { ERRINJ_LIST(ERRINJ_MEMBER) }; struct errinj * errinj_by_name(char *name) { for (enum errinj_id i = 0 ; i < errinj_id_MAX ; i++) { if (strcmp(errinjs[i].name, name) == 0) return &errinjs[i]; } return NULL; } /** * Dump error injection states to the callback function. */ int errinj_foreach(errinj_cb cb, void *cb_ctx) { int i; for (i = 0 ; i < errinj_id_MAX ; i++) { int res = cb(&errinjs[i], cb_ctx); if (res != 0) return res; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/diag.h0000664000000000000000000001414713306565107016067 0ustar rootroot#ifndef TARANTOOL_DIAG_H_INCLUDED #define TARANTOOL_DIAG_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include "say.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { DIAG_ERRMSG_MAX = 512, DIAG_FILENAME_MAX = 256 }; struct type_info; struct error; typedef void (*error_f)(struct error *e); /** * Error diagnostics needs to be equally usable in C and C++ * code. This is why there is a common infrastructure for errors. * * Any error or warning or note is represented by an instance of * struct error. * * struct error has the most common members, but more * importantly it has a type descriptor, which makes it possible * to work with C++ exceptions and extra members via reflection, * in pure C. * * (destroy) is there to gracefully delete C++ exceptions from C. */ struct error { error_f destroy; error_f raise; error_f log; const struct type_info *type; int refs; /** Line number. */ unsigned line; /* Source file name. */ char file[DIAG_FILENAME_MAX]; /* Error description. */ char errmsg[DIAG_ERRMSG_MAX]; }; static inline void error_ref(struct error *e) { e->refs++; } static inline void error_unref(struct error *e) { assert(e->refs > 0); --e->refs; if (e->refs == 0) e->destroy(e); } NORETURN static inline void error_raise(struct error *e) { e->raise(e); unreachable(); } static inline void error_log(struct error *e) { e->log(e); } void error_create(struct error *e, error_f create, error_f raise, error_f log, const struct type_info *type, const char *file, unsigned line); void error_format_msg(struct error *e, const char *format, ...); void error_vformat_msg(struct error *e, const char *format, va_list ap); /** * Diagnostics Area - a container for errors */ struct diag { /* \cond private */ struct error *last; /* \endcond private */ }; /** * Create a new diagnostics area * \param diag diagnostics area to initialize */ static inline void diag_create(struct diag *diag) { diag->last = NULL; } /** * Return true if diagnostics area is empty * \param diag diagnostics area to initialize */ static inline bool diag_is_empty(struct diag *diag) { return diag->last == NULL; } /** * Remove all errors from the diagnostics area * \param diag diagnostics area */ static inline void diag_clear(struct diag *diag) { if (diag->last == NULL) return; error_unref(diag->last); diag->last = NULL; } /** * Add a new error to the diagnostics area * \param diag diagnostics area * \param e error to add */ static inline void diag_add_error(struct diag *diag, struct error *e) { assert(e != NULL); error_ref(e); diag_clear(diag); diag->last = e; } /** * Move all errors from \a from to \a to. * \param from source * \param to destination * \post diag_is_empty(from) */ static inline void diag_move(struct diag *from, struct diag *to) { diag_clear(to); if (from->last == NULL) return; to->last = from->last; from->last = NULL; } /** * Destroy diagnostics area * \param diag diagnostics area to clean */ static inline void diag_destroy(struct diag *diag) { diag_clear(diag); } /** * Return last error * \return last error * \param diag diagnostics area */ static inline struct error * diag_last_error(struct diag *diag) { return diag->last; } struct diag * diag_get(); NORETURN static inline void diag_raise(void) { struct error *e = diag_last_error(diag_get()); assert(e != NULL); error_raise(e); } static inline void diag_log(void) { struct error *e = diag_last_error(diag_get()); assert(e != NULL); error_log(e); } struct error * BuildOutOfMemory(const char *file, unsigned line, size_t amount, const char *allocator, const char *object); struct error * BuildFiberIsCancelled(const char *file, unsigned line); struct error * BuildTimedOut(const char *file, unsigned line); struct error * BuildChannelIsClosed(const char *file, unsigned line); struct error * BuildLuajitError(const char *file, unsigned line, const char *msg); struct error * BuildIllegalParams(const char *file, unsigned line, const char *format, ...); struct error * BuildSystemError(const char *file, unsigned line, const char *format, ...); struct error * BuildXlogError(const char *file, unsigned line, const char *format, ...); struct index_def; struct error * BuildUnsupportedIndexFeature(const char *file, unsigned line, struct index_def *index_def, const char *what); #define diag_set(class, ...) do { \ say_debug("%s at %s:%i", #class, __FILE__, __LINE__); \ struct error *e; \ e = Build##class(__FILE__, __LINE__, ##__VA_ARGS__); \ diag_add_error(diag_get(), e); \ } while (0) #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_DIAG_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/coio_buf.cc0000664000000000000000000000257713306560010017076 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coio_buf.h" tarantool_1.9.1.26.g63eb81e3c/src/cpu_feature.c0000664000000000000000000000526513306560010017445 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/config.h" #include #include #include #include #include "cpu_feature.h" #if defined(HAVE_CPUID) && (defined (__x86_64__) || defined (__i386__)) #include #define SCALE_F sizeof(unsigned long) #if defined (__x86_64__) #define REX_PRE "0x48, " #elif defined (__i386__) #define REX_PRE #endif static uint32_t crc32c_hw_byte(uint32_t crc, unsigned char const *data, unsigned int length) { while (length--) { __asm__ __volatile__( ".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1" :"=S"(crc) :"0"(crc), "c"(*data) ); data++; } return crc; } uint32_t crc32c_hw(uint32_t crc, const char *buf, unsigned int len) { unsigned int iquotient = len / SCALE_F; unsigned int iremainder = len % SCALE_F; unsigned long *ptmp = (unsigned long *)buf; while (iquotient--) { __asm__ __volatile__( ".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;" :"=S"(crc) :"0"(crc), "c"(*ptmp) ); ptmp++; } if (iremainder) { crc = crc32c_hw_byte(crc, (unsigned char const*)ptmp, iremainder); } return crc; } bool sse42_enabled_cpu() { unsigned int ax, bx, cx, dx; if (__get_cpuid(1, &ax, &bx, &cx, &dx) == 0) return 0; return (cx & (1 << 20)) != 0; } #else /* !(defined (__x86_64__) || defined (__i386__)) */ bool sse42_enabled_cpu() { return false; } #endif tarantool_1.9.1.26.g63eb81e3c/src/httpc.h0000664000000000000000000002053513306560010016267 0ustar rootroot#ifndef TARNATOOL_HTTPC_H_INCLUDED #define TARANTOOL_HTTPC_H_INCLUDED 1 /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include "diag.h" #include "curl.h" /** {{{ Environment */ typedef void CURLM; typedef void CURL; struct curl_slist; /** * HTTP Client Statistics */ struct httpc_stat { uint64_t total_requests; uint64_t http_200_responses; uint64_t http_other_responses; uint64_t failed_requests; uint64_t active_requests; }; /** * HTTP Client Environment */ struct httpc_env { /** Curl enviroment. */ struct curl_env curl_env; /** Memory pool for requests */ struct mempool req_pool; /** Statistics */ struct httpc_stat stat; }; /** * @brief Creates new HTTP client environment * @param env pointer to a structure to initialize * @param max_conn The maximum number of entries in connection cache * @retval 0 on success * @retval -1 on error, check diag */ int httpc_env_create(struct httpc_env *ctx, int max_conns); /** * Destroy HTTP client environment * @param env pointer to a structure to destroy */ void httpc_env_destroy(struct httpc_env *env); /** Environment }}} */ /** {{{ Request */ /** * HTTP request */ struct httpc_request { /** Environment */ struct httpc_env *env; /** HTTP headers */ struct curl_slist *headers; /** Buffer for the request body */ struct ibuf body; /** curl resuest. */ struct curl_request curl_request; /** HTTP status code */ int status; /** Error message */ const char *reason; /** buffer of headers */ struct region resp_headers; /** buffer of body */ struct region resp_body; }; /** * @brief Create a new HTTP request * @param ctx - reference to context * @return a new HTTP request object */ struct httpc_request * httpc_request_new(struct httpc_env *env, const char *method, const char *url); /** * @brief Delete HTTP request * @param request - reference to object * @details Should be called even if error in execute appeared */ void httpc_request_delete(struct httpc_request *req); /** * Set HTTP header * @param req request * @param fmt format string * @param ... format arguments */ int httpc_set_header(struct httpc_request *req, const char *fmt, ...); /** * Sets body of request * @param req request * @param body body * @param bytes sizeof body */ int httpc_set_body(struct httpc_request *req, const char *body, size_t size); /** * Set TCP keep-alive probing * @param req request * @param idle delay, in seconds, that the operating system will wait * while the connection is idle before sending keepalive probes * @param interval the interval, in seconds, that the operating system * will wait between sending keepalive probes * @details Does nothing on libcurl < 7.25.0 * @see https://curl.haxx.se/libcurl/c/CURLOPT_TCP_KEEPALIVE.html */ int httpc_set_keepalive(struct httpc_request *req, long idle, long interval); /** * Set the "low speed time" - the time that the transfer speed should be * below the "low speed limit" for the library to consider it too slow and * abort. * @param req request * @param low_speed_time low speed time * @details If the download receives less than "low speed limit" bytes/second * during "low speed time" seconds, the operations is aborted. * You could i.e if you have a pretty high speed Connection, * abort if it is less than 2000 bytes/sec during 20 seconds; * @see httpc_set_low_speed_limit() * @see https://curl.haxx.se/libcurl/c/CURLOPT_LOW_SPEED_TIME.html */ void httpc_set_low_speed_time(struct httpc_request *req, long low_speed_time); /** * Set the "low speed limit" - the average transfer speed in bytes per second * that the transfer should be below during "low speed time" seconds for the * library to consider it to be too slow and abort. * @param req request * @param low_speed_limit low speed limit * @details If the download receives less than "low speed limit" bytes/second * during "low speed time" seconds, the operations is aborted. * You could i.e if you have a pretty high speed Connection, * abort if it is less than 2000 bytes/sec during 20 seconds. * @see httpc_set_low_speed_time() * @see https://curl.haxx.se/libcurl/c/CURLOPT_LOW_SPEED_LIMIT.html */ void httpc_set_low_speed_limit(struct httpc_request *req, long low_speed_limit); /** * Enables/Disables libcurl verbose mode * @param req request * @param verbose flag */ void httpc_set_verbose(struct httpc_request *req, bool verbose); /** * Specify directory holding CA certificates * @param req request * @param ca_path path to directory holding one or more certificates * to verify the peer with. The application does not have to keep the string * around after setting this option. */ void httpc_set_ca_path(struct httpc_request *req, const char *ca_path); /** * Specify path to Certificate Authority (CA) bundle * @param req request * @param ca_file - File holding one or more certificates * to verify the peer with. The application does not have to keep the string * around after setting this option. * @see https://curl.haxx.se/libcurl/c/CURLOPT_CAINFO.html */ void httpc_set_ca_file(struct httpc_request *req, const char *ca_file); /** * Specify path to Unix domain socket * @param req request * @param unix_socket path to Unix domain socket used as connection * endpoint instead of TCP. The application does not have to keep the string * around after setting this option. * @see https://curl.haxx.se/libcurl/c/CURLOPT_UNIX_SOCKET_PATH.html * @return 0 on success */ int httpc_set_unix_socket(struct httpc_request *req, const char *unix_socket); /** * Enables/disables verification of the certificate's name (CN) against host * @param req request * @param verify flag * @see https://curl.haxx.se/libcurl/c/CURLOPT_SSL_VERIFYHOST.html */ void httpc_set_verify_host(struct httpc_request *req, long verify); /** * Enables/disables verification of the peer's SSL certificate * @param req request * @param verify flag * @see https://curl.haxx.se/libcurl/c/CURLOPT_SSL_VERIFYPEER.html */ void httpc_set_verify_peer(struct httpc_request *req, long verify); /** * Specify path to private key for TLS ans SSL client certificate * @param req request * @param ssl_key - path to the private key. The application does not have to * keep the string around after setting this option. * @see https://curl.haxx.se/libcurl/c/CURLOPT_SSLKEY.html */ void httpc_set_ssl_key(struct httpc_request *req, const char *ssl_key); /** * Specify path to SSL client certificate * @param req request * @param ssl_cert - path to the client certificate. The application does not * have to keep the string around after setting this option. * @see https://curl.haxx.se/libcurl/c/CURLOPT_SSLCERT.html */ void httpc_set_ssl_cert(struct httpc_request *req, const char *ssl_cert); /** * This function does async HTTP request * @param request - reference to request object with filled fields * @param timeout - timeout of waiting for libcurl api * @return 0 for success or NULL */ int httpc_execute(struct httpc_request *req, double timeout); /** Request }}} */ #endif /* TARANTOOL_HTTPC_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/sio.cc0000664000000000000000000003163413306565107016113 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "sio.h" #include #include #include #include #include #include #include /* TCP_NODELAY */ #include /* TCP_NODELAY */ #include /* inet_ntoa */ #include #include /* lseek for sending file */ #include /* fstat for sending file */ #ifdef TARGET_OS_LINUX #include /* sendfile system call */ #endif /* #ifdef TARGET_OS_LINUX */ #include "say.h" #include "trivia/util.h" const struct type_info type_SocketError = make_type("SocketError", &type_SystemError); SocketError::SocketError(const char *file, unsigned line, int fd, const char *format, ...) : SystemError(&type_SocketError, file, line) { int save_errno = errno; char buf[DIAG_ERRMSG_MAX]; va_list ap; va_start(ap, format); vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); const char *socketname = sio_socketname(fd); error_format_msg(this, "%s, called on %s", buf, socketname); errno = save_errno; } /** Pretty print socket name and peer (for exceptions) */ const char * sio_socketname(int fd) { static __thread char name[2 * SERVICE_NAME_MAXLEN]; int n = snprintf(name, sizeof(name), "fd %d", fd); if (fd >= 0) { struct sockaddr_storage addr; socklen_t addrlen = sizeof(addr); int rc = getsockname(fd, (struct sockaddr *) &addr, &addrlen); if (rc == 0) { n += snprintf(name + n, sizeof(name) - n, ", aka %s", sio_strfaddr((struct sockaddr *)&addr, addrlen)); } addrlen = sizeof(addr); rc = getpeername(fd, (struct sockaddr *) &addr, &addrlen); if (rc == 0) { n += snprintf(name + n, sizeof(name) - n, ", peer of %s", sio_strfaddr((struct sockaddr *)&addr, addrlen)); } } return name; } /** Get a string representation of a socket option name, * for logging. */ static const char * sio_option_name(int option) { #define CASE_OPTION(opt) case opt: return #opt switch (option) { CASE_OPTION(SO_KEEPALIVE); CASE_OPTION(SO_LINGER); CASE_OPTION(SO_ERROR); CASE_OPTION(SO_REUSEADDR); CASE_OPTION(TCP_NODELAY); #ifdef __linux__ CASE_OPTION(TCP_KEEPCNT); CASE_OPTION(TCP_KEEPINTVL); #endif default: return "undefined"; } #undef CASE_OPTION } /** shut down part of a full-duplex connection */ int sio_shutdown(int fd, int how) { int rc = shutdown(fd, how); if (rc < 0) tnt_raise(SocketError, fd, "shutdown"); return rc; } /** Try to automatically configure a listen backlog. * On Linux, use the system setting, which defaults * to 128. This way a system administrator can tune * the backlog as needed. On other systems, use SOMAXCONN. */ int sio_listen_backlog() { #ifdef TARGET_OS_LINUX FILE *proc = fopen("/proc/sys/net/core/somaxconn", "r"); if (proc) { int backlog; int rc = fscanf(proc, "%d", &backlog); fclose(proc); if (rc == 1) return backlog; } #endif /* TARGET_OS_LINUX */ return SOMAXCONN; } /** Create a TCP socket. */ int sio_socket(int domain, int type, int protocol) { /* AF_UNIX can't use tcp protocol */ if (domain == AF_UNIX) protocol = 0; int fd = socket(domain, type, protocol); if (fd < 0) tnt_raise(SocketError, fd, "socket"); return fd; } /** Get socket flags, raise an exception if error. */ int sio_getfl(int fd) { int flags = fcntl(fd, F_GETFL, 0); if (flags < 0) tnt_raise(SocketError, fd, "fcntl(..., F_GETFL, ...)"); return flags; } /** Set socket flags, raise an exception if error. */ int sio_setfl(int fd, int flag, int on) { int flags = sio_getfl(fd); flags = fcntl(fd, F_SETFL, on ? flags | flag : flags & ~flag); if (flags < 0) tnt_raise(SocketError, fd, "fcntl(..., F_SETFL, ...)"); return flags; } /** Set an option on a socket. */ void sio_setsockopt(int fd, int level, int optname, const void *optval, socklen_t optlen) { int rc = setsockopt(fd, level, optname, optval, optlen); if (rc) { tnt_raise(SocketError, fd, "setsockopt(%s)", sio_option_name(optname)); } } /** Get a socket option value. */ void sio_getsockopt(int fd, int level, int optname, void *optval, socklen_t *optlen) { int rc = getsockopt(fd, level, optname, optval, optlen); if (rc) { tnt_raise(SocketError, fd, "getsockopt(%s)", sio_option_name(optname)); } } /** Connect a client socket to a server. */ int sio_connect(int fd, struct sockaddr *addr, socklen_t addrlen) { /* Establish the connection. */ int rc = connect(fd, (struct sockaddr *) addr, addrlen); if (rc < 0 && errno != EINPROGRESS) { tnt_raise(SocketError, fd, "connect to %s", sio_strfaddr((struct sockaddr *)addr, addrlen)); } return rc; } /** Bind a socket to the given address. */ int sio_bind(int fd, struct sockaddr *addr, socklen_t addrlen) { int rc = bind(fd, addr, addrlen); if (rc < 0 && errno != EADDRINUSE) tnt_raise(SocketError, fd, "bind"); return rc; } /** Mark a socket as accepting connections. */ int sio_listen(int fd) { int rc = listen(fd, sio_listen_backlog()); if (rc < 0 && errno != EADDRINUSE) tnt_raise(SocketError, fd, "listen"); return rc; } /** Accept a client connection on a server socket. */ int sio_accept(int fd, struct sockaddr *addr, socklen_t *addrlen) { /* Accept a connection. */ int newfd = accept(fd, addr, addrlen); if (newfd < 0 && (errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR)) tnt_raise(SocketError, fd, "accept"); return newfd; } /** Read up to 'count' bytes from a socket. */ ssize_t sio_read(int fd, void *buf, size_t count) { ssize_t n = read(fd, buf, count); if (n < 0) { if (errno == EWOULDBLOCK) errno = EINTR; switch (errno) { case EAGAIN: case EINTR: break; /* * Happens typically when the client closes * socket on timeout without reading the previous * query's response completely. Treat the same as * EOF. */ case ECONNRESET: errno = 0; n = 0; break; default: tnt_raise(SocketError, fd, "read(%zd)", count); } } return n; } /** Write up to 'count' bytes to a socket. */ ssize_t sio_write(int fd, const void *buf, size_t count) { ssize_t n = write(fd, buf, count); if (n < 0 && errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR) tnt_raise(SocketError, fd, "write(%zd)", count); return n; } /** Write to a socket with iovec. */ ssize_t sio_writev(int fd, const struct iovec *iov, int iovcnt) { int cnt = iovcnt < IOV_MAX ? iovcnt : IOV_MAX; ssize_t n = writev(fd, iov, cnt); if (n < 0 && errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR) { tnt_raise(SocketError, fd, "writev(%d)", iovcnt); } return n; } /** Blocking I/O writev */ ssize_t sio_writev_all(int fd, struct iovec *iov, int iovcnt) { ssize_t bytes_total = 0; struct iovec *iovend = iov + iovcnt; while (1) { int cnt = iovend - iov; if (cnt > IOV_MAX) cnt = IOV_MAX; ssize_t write_res = writev(fd, iov, cnt); if (write_res < 0) { if (errno == EINTR) continue; tnt_raise(SocketError, fd, "writev(%d)", cnt); } size_t bytes_written = (size_t)write_res; bytes_total += bytes_written; /* * Check for iov < iovend, since otherwise * if iovend->iov_len is 0, iov may go beyond * iovend */ while (bytes_written >= iov->iov_len) { bytes_written -= (iov++)->iov_len; if (iov == iovend) break; } if (iov == iovend) break; iov->iov_base = (char *) iov->iov_base + bytes_written; iov->iov_len -= bytes_written; } return bytes_total; } ssize_t sio_readn_ahead(int fd, void *buf, size_t count, size_t buf_size) { size_t read_count = 0; while (read_count < count) { ssize_t read_res = read(fd, (char *) buf + read_count, buf_size - read_count); if (read_res < 0 && (errno == EWOULDBLOCK || errno == EINTR || errno == EAGAIN)) continue; if (read_res <= 0) tnt_raise(SocketError, fd, "read (%zd)", count); read_count += read_res; } return read_count; } ssize_t sio_writen(int fd, const void *buf, size_t count) { size_t write_count = 0; while (write_count < count) { ssize_t write_res = write(fd, (char *) buf + write_count, count - write_count); if (write_res < 0 && (errno == EWOULDBLOCK || errno == EINTR || errno == EAGAIN)) continue; if (write_res <= 0) tnt_raise(SocketError, fd, "write (%zd)", count); write_count += write_res; } return write_count; } static inline off_t sio_lseek(int fd, off_t offset, int whence) { off_t res = lseek(fd, offset, whence); if (res == -1) tnt_raise(SocketError, fd, "lseek"); return res; } #if defined(HAVE_SENDFILE_LINUX) ssize_t sio_sendfile(int sock_fd, int file_fd, off_t *offset, size_t size) { ssize_t send_res = sendfile(sock_fd, file_fd, offset, size); if (send_res < 0 || (size_t)send_res < size) tnt_raise(SocketError, sock_fd, "sendfile"); return send_res; } #else ssize_t sio_sendfile(int sock_fd, int file_fd, off_t *offset, size_t size) { if (offset) sio_lseek(file_fd, *offset, SEEK_SET); const size_t buffer_size = 8192; char buffer[buffer_size]; size_t bytes_sent = 0; while (bytes_sent < size) { size_t to_send_now = MIN(size - bytes_sent, buffer_size); ssize_t n = sio_read(file_fd, buffer, to_send_now); sio_writen(sock_fd, buffer, n); bytes_sent += n; } if (offset) lseek(file_fd, *offset, SEEK_SET); return bytes_sent; } #endif ssize_t sio_recvfile(int sock_fd, int file_fd, off_t *offset, size_t size) { if (offset) sio_lseek(file_fd, *offset, SEEK_SET); const size_t buffer_size = 8192; char buffer[buffer_size]; size_t bytes_read = 0; while (bytes_read < size) { size_t to_read_now = MIN(size - bytes_read, buffer_size); ssize_t n = sio_read(sock_fd, buffer, to_read_now); if (n < 0) return -1; sio_writen(file_fd, buffer, n); bytes_read += n; } if (offset) sio_lseek(file_fd, *offset, SEEK_SET); return bytes_read; } /** Send a message on a socket. */ ssize_t sio_sendto(int fd, const void *buf, size_t len, int flags, const struct sockaddr *dest_addr, socklen_t addrlen) { ssize_t n = sendto(fd, buf, len, flags, (struct sockaddr*)dest_addr, addrlen); if (n < 0 && errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR) tnt_raise(SocketError, fd, "sendto(%zd)", len); return n; } /** Receive a message on a socket. */ ssize_t sio_recvfrom(int fd, void *buf, size_t len, int flags, struct sockaddr *src_addr, socklen_t *addrlen) { ssize_t n = recvfrom(fd, buf, len, flags, (struct sockaddr*)src_addr, addrlen); if (n < 0 && errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR) tnt_raise(SocketError, fd, "recvfrom(%zd)", len); return n; } /** Get socket peer name. */ int sio_getpeername(int fd, struct sockaddr *addr, socklen_t *addrlen) { if (getpeername(fd, addr, addrlen) < 0) { say_syserror("getpeername"); return -1; } /* XXX: I've no idea where this is copy-pasted from. */ /* if (addr->sin_addr.s_addr == 0) { say_syserror("getpeername: empty peer"); return -1; } */ return 0; } /** Pretty print a peer address. */ const char * sio_strfaddr(struct sockaddr *addr, socklen_t addrlen) { static __thread char name[NI_MAXHOST + _POSIX_PATH_MAX + 2]; switch(addr->sa_family) { case AF_UNIX: if (addrlen >= sizeof(sockaddr_un)) { snprintf(name, sizeof(name), "unix/:%s", ((struct sockaddr_un *)addr)->sun_path); } else { snprintf(name, sizeof(name), "unix/:(socket)"); } break; default: { char host[NI_MAXHOST], serv[NI_MAXSERV]; if (getnameinfo(addr, addrlen, host, sizeof(host), serv, sizeof(serv), NI_NUMERICHOST | NI_NUMERICSERV) == 0) { snprintf(name, sizeof(name), addr->sa_family == AF_INET ? "%s:%s" : "[%s]:%s", host, serv); } else { snprintf(name, sizeof(name), "(host):(port)"); } break; } } return name; } tarantool_1.9.1.26.g63eb81e3c/src/histogram.c0000664000000000000000000000771013306565107017151 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include "memory.h" #include "histogram.h" #include struct histogram * histogram_new(const int64_t *buckets, size_t n_buckets) { struct histogram *hist; assert(n_buckets > 0); for (size_t i = 1; i < n_buckets; i++) assert(buckets[i - 1] < buckets[i]); hist = malloc(sizeof(*hist) + n_buckets * sizeof(*hist->buckets)); if (hist == NULL) return NULL; hist->max = buckets[n_buckets - 1]; hist->total = 0; hist->n_buckets = n_buckets; for (size_t i = 0; i < n_buckets; i++) { hist->buckets[i].count = 0; hist->buckets[i].max = buckets[i]; } return hist; } void histogram_delete(struct histogram *hist) { free(hist); } static struct histogram_bucket * histogram_lookup_bucket(struct histogram *hist, int64_t val) { size_t begin, end, mid; struct histogram_bucket *bucket; begin = 0; end = hist->n_buckets - 1; while (1) { if (begin == end) { bucket = &hist->buckets[begin]; if (val > bucket->max) bucket = &hist->buckets[end]; break; } else { mid = begin / 2 + end / 2; bucket = &hist->buckets[mid]; } if (val > hist->buckets[mid].max) begin = mid + 1; else end = mid; }; if (val <= bucket->max) return bucket; return NULL; } void histogram_collect(struct histogram *hist, int64_t val) { struct histogram_bucket *bucket; bucket = histogram_lookup_bucket(hist, val); if (bucket != NULL) bucket->count++; if (hist->max < val) hist->max = val; hist->total++; } void histogram_discard(struct histogram *hist, int64_t val) { struct histogram_bucket *bucket; bucket = histogram_lookup_bucket(hist, val); if (bucket != NULL) { assert(bucket->count > 0); bucket->count--; } assert(hist->total > 0); hist->total--; } int64_t histogram_percentile(struct histogram *hist, int pct) { size_t count = 0; for (size_t i = 0; i < hist->n_buckets; i++) { struct histogram_bucket *bucket = &hist->buckets[i]; count += bucket->count; if (count * 100 > hist->total * pct) return bucket->max; } return hist->max; } int histogram_snprint(char *buf, int size, struct histogram *hist) { int total = 0; bool first = true; for (size_t i = 0; i < hist->n_buckets; i++) { int64_t count = hist->buckets[i].count; if (count == 0) continue; int64_t min = (i > 0) ? hist->buckets[i - 1].max + 1 : 0; int64_t max = hist->buckets[i].max; if (!first) SNPRINT(total, snprintf, buf, size, " "); SNPRINT(total, snprintf, buf, size, "[%"PRIi64, min); if (max != min) SNPRINT(total, snprintf, buf, size, "-%"PRIi64, max); SNPRINT(total, snprintf, buf, size, "]:%"PRIi64, count); first = false; } return total; } tarantool_1.9.1.26.g63eb81e3c/src/fiber_pool.h0000664000000000000000000000577113306565107017306 0ustar rootroot#ifndef TARANTOOL_FIBER_POOL_H_INCLUDED #define TARANTOOL_FIBER_POOL_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/config.h" #include "fiber.h" #include "cbus.h" #include "small/rlist.h" #include "salad/stailq.h" #include "tarantool_ev.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { FIBER_POOL_SIZE = 4096, FIBER_POOL_IDLE_TIMEOUT = 1 }; /** * A pool of worker fibers to handle messages, * so that each message is handled in its own fiber. */ struct fiber_pool { struct { /** Cache of fibers which work on incoming messages. */ alignas(CACHELINE_SIZE) struct rlist idle; /** The number of fibers in the pool. */ int size; /** The limit on the number of fibers working on tasks. */ int max_size; /** * Fibers in leave the pool if they have nothing to do * for longer than this. */ float idle_timeout; /** Staged messages (for fibers to work on) */ struct stailq output; /** Timer for idle workers */ struct ev_timer idle_timer; /** Condition for worker exit signaling */ struct fiber_cond worker_cond; }; struct { /** The consumer thread loop. */ alignas(CACHELINE_SIZE) struct ev_loop *consumer; /** cbus endpoint to fetch messages from */ struct cbus_endpoint endpoint; }; }; /** * Initialize a fiber pool and connect it to a pipe. Currently * must be done before the pipe is actively used by a bus. */ void fiber_pool_create(struct fiber_pool *pool, const char *name, int max_pool_size, float idle_timeout); /** * Destroy a fiber pool */ void fiber_pool_destroy(struct fiber_pool *pool); #if defined(__cplusplus) } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_FIBER_POOL_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/title.h0000664000000000000000000000565613306560010016275 0ustar rootroot#ifndef TARANTOOL_TITLE_H_INCLUDED #define TARANTOOL_TITLE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * * tarantool script.lua : my lovely pony * * ^^^^^^^^^^ ^^^^^^^^^ ^^^^^^^^^^^^^^ * ^^^^^^^^^ script name status custom title * interpretor name * * * Parts missing: * * 1) no custom title * * tarantool script.lua * * 2) script name missing * * tarantool : my lovely pony * * 3) scriptname.matches(tarantool.*) * * tarantoolctl : my lovely pony * * 4) no status * * tarantool script.lua: my lovely pony */ #include #if defined(__cplusplus) extern "C" { #endif /** * Prepares for customizing process title but doesn't change the * title yet. Creates and returns a copy of argv if necessary, may * relocate environ as well. * * On Linux customized title is writen on top of argv/environ memory block. */ char **title_init(int argc, char **argv); void title_free(int argc, char **argv); /** generate and update process title */ void title_update(); /** query current title */ const char *title_get(); /* parts: invoke title_update() to propagate changes */ /* interpretor name */ void title_set_interpretor_name(const char *name); const char *title_get_interpretor_name(); /* script name */ void title_set_script_name(const char *name); const char *title_get_script_name(); /* custom */ void title_set_custom(const char *); const char *title_get_custom(); /* status */ void title_set_status(const char *); const char *title_get_status(); #if defined(__cplusplus) } /* extern "C" */ #endif #endif /* TARANTOOL_TITLE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/memory.c0000664000000000000000000000365113306560010016450 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "memory.h" #include "small/quota.h" struct slab_arena runtime; void memory_init() { static struct quota runtime_quota; const size_t SLAB_SIZE = 4 * 1024 * 1024; /* default quota initialization */ quota_init(&runtime_quota, QUOTA_MAX); /* No limit on the runtime memory. */ slab_arena_create(&runtime, &runtime_quota, 0, SLAB_SIZE, MAP_PRIVATE); } void memory_free() { /* * If this is called from a fiber != sched, then * %rsp is pointing at the memory that we * would be trying to unmap. Don't. */ #if 0 slab_arena_destroy(&runtime); #endif } tarantool_1.9.1.26.g63eb81e3c/src/coio_buf.h0000664000000000000000000000601613306560010016730 0ustar rootroot#ifndef TARANTOOL_COIO_BUF_H_INCLUDED #define TARANTOOL_COIO_BUF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coio.h" #include /** Buffered cooperative IO */ /** * Read at least sz bytes, buffered. * Return the number of bytes read (can be less than n in case * of EOF). */ static inline ssize_t coio_bread(struct ev_io *coio, struct ibuf *buf, size_t sz) { ibuf_reserve_xc(buf, sz); ssize_t n = coio_read_ahead(coio, buf->wpos, sz, ibuf_unused(buf)); buf->wpos += n; return n; } /** * Read at least sz bytes buffered or until a timeout reached. * Return the amount of bytes read (can be less than sz * in case of EOF or timeout). */ static inline ssize_t coio_bread_timeout(struct ev_io *coio, struct ibuf *buf, size_t sz, ev_tstamp timeout) { ibuf_reserve_xc(buf, sz); ssize_t n = coio_read_ahead_timeout(coio, buf->wpos, sz, ibuf_unused(buf), timeout); buf->wpos += n; return n; } /** Read at least sz bytes, buffered. Throw an exception in case of EOF. */ static inline ssize_t coio_breadn(struct ev_io *coio, struct ibuf *buf, size_t sz) { ibuf_reserve_xc(buf, sz); ssize_t n = coio_readn_ahead(coio, buf->wpos, sz, ibuf_unused(buf)); buf->wpos += n; return n; } /** Reat at least sz bytes, buffered. Throw an exception in case * of EOF. * @return the number of bytes read. Can be less than sz in * case of timeout. */ static inline ssize_t coio_breadn_timeout(struct ev_io *coio, struct ibuf *buf, size_t sz, ev_tstamp timeout) { ibuf_reserve_xc(buf, sz); ssize_t n = coio_readn_ahead_timeout(coio, buf->wpos, sz, ibuf_unused(buf), timeout); buf->wpos += n; return n; } #endif /* TARANTOOL_COIO_BUF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/coio.h0000664000000000000000000001362613306565107016115 0ustar rootroot#ifndef TARANTOOL_COIO_H_INCLUDED #define TARANTOOL_COIO_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "fiber.h" #include "trivia/util.h" #if defined(__cplusplus) #include "evio.h" /** * Co-operative I/O * Yield the current fiber until IO is ready. */ struct coio_service { struct evio_service evio_service; /* Fiber function. */ fiber_func handler; /** Passed to the created fiber. */ void *handler_param; }; int coio_connect_timeout(struct ev_io *coio, struct uri *uri, struct sockaddr *addr, socklen_t *addr_len, ev_tstamp timeout); static inline int coio_connect(struct ev_io *coio, struct uri *uri, struct sockaddr *addr, socklen_t *addr_len) { return coio_connect_timeout(coio, uri, addr, addr_len, TIMEOUT_INFINITY); } void coio_bind(struct ev_io *coio, struct sockaddr *addr, socklen_t addrlen); int coio_accept(struct ev_io *coio, struct sockaddr *addr, socklen_t addrlen, ev_tstamp timeout); void coio_create(struct ev_io *coio, int fd); static inline void coio_close(ev_loop *loop, struct ev_io *coio) { return evio_close(loop, coio); } ssize_t coio_read_ahead_timeout(struct ev_io *coio, void *buf, size_t sz, size_t bufsiz, ev_tstamp timeout); static inline void coio_timeout_init(ev_tstamp *start, ev_tstamp *delay, ev_tstamp timeout) { return evio_timeout_init(loop(), start, delay, timeout); } static inline void coio_timeout_update(ev_tstamp start, ev_tstamp *delay) { return evio_timeout_update(loop(), start, delay); } /** * Reat at least sz bytes, with readahead. * * Returns 0 in case of EOF. */ static inline ssize_t coio_read_ahead(struct ev_io *coio, void *buf, size_t sz, size_t bufsiz) { return coio_read_ahead_timeout(coio, buf, sz, bufsiz, TIMEOUT_INFINITY); } ssize_t coio_readn_ahead(struct ev_io *coio, void *buf, size_t sz, size_t bufsiz); static inline ssize_t coio_read(struct ev_io *coio, void *buf, size_t sz) { return coio_read_ahead(coio, buf, sz, sz); } static inline ssize_t coio_read_timeout(struct ev_io *coio, void *buf, size_t sz, ev_tstamp timeout) { return coio_read_ahead_timeout(coio, buf, sz, sz, timeout); } static inline ssize_t coio_readn(struct ev_io *coio, void *buf, size_t sz) { return coio_readn_ahead(coio, buf, sz, sz); } ssize_t coio_readn_ahead_timeout(struct ev_io *coio, void *buf, size_t sz, size_t bufsiz, ev_tstamp timeout); ssize_t coio_write_timeout(struct ev_io *coio, const void *buf, size_t sz, ev_tstamp timeout); static inline void coio_write(struct ev_io *coio, const void *buf, size_t sz) { coio_write_timeout(coio, buf, sz, TIMEOUT_INFINITY); } ssize_t coio_writev_timeout(struct ev_io *coio, struct iovec *iov, int iovcnt, size_t size, ev_tstamp timeout); static inline ssize_t coio_writev(struct ev_io *coio, struct iovec *iov, int iovcnt, size_t size) { return coio_writev_timeout(coio, iov, iovcnt, size, TIMEOUT_INFINITY); } ssize_t coio_sendto_timeout(struct ev_io *coio, const void *buf, size_t sz, int flags, const struct sockaddr *dest_addr, socklen_t addrlen, ev_tstamp timeout); ssize_t coio_recvfrom_timeout(struct ev_io *coio, void *buf, size_t sz, int flags, struct sockaddr *src_addr, socklen_t addrlen, ev_tstamp timeout); void coio_service_init(struct coio_service *service, const char *name, fiber_func handler, void *handler_param); /** Wait until the service binds to the port. */ void coio_service_start(struct evio_service *service, const char *uri); void coio_stat_init(ev_stat *stat, const char *path); void coio_stat_stat_timeout(ev_stat *stat, ev_tstamp delay); /** * Wait for a child to end. * @note this is a cancellation point (can throw * FiberIsCancelled). * * @retval exit status of the child. * * This call only works in the main thread. */ int coio_waitpid(pid_t pid); extern "C" { #endif /* defined(__cplusplus) */ /** \cond public */ enum { /** READ event */ COIO_READ = 0x1, /** WRITE event */ COIO_WRITE = 0x2, }; /** * Wait until READ or WRITE event on socket (\a fd). Yields. * \param fd - non-blocking socket file description * \param events - requested events to wait. * Combination of TNT_IO_READ | TNT_IO_WRITE bit flags. * \param timeoout - timeout in seconds. * \retval 0 - timeout * \retval >0 - returned events. Combination of TNT_IO_READ | TNT_IO_WRITE * bit flags. */ API_EXPORT int coio_wait(int fd, int event, double timeout); /** * Close the fd and wake any fiber blocked in * coio_wait() call on this fd. */ API_EXPORT int coio_close(int fd); /** \endcond public */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_COIO_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/0000775000000000000000000000000013306565107015551 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/csv/0000775000000000000000000000000013306560010016330 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/csv/CMakeLists.txt0000664000000000000000000000016313306560010021070 0ustar rootrootset(lib_sources csv.c ) set_source_files_compile_flags(${lib_sources}) add_library(csv STATIC ${lib_sources}) tarantool_1.9.1.26.g63eb81e3c/src/lib/csv/csv.h0000664000000000000000000001021213306560010017270 0ustar rootroot#ifndef TARANTOOL_CSV_H_INCLUDED #define TARANTOOL_CSV_H_INCLUDED /* * Copyright 2010-2016 Tarantool AUTHORS: please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif typedef void (*csv_emit_row_t)(void *ctx); typedef void (*csv_emit_field_t)(void *ctx, const char *field, const char *end); struct csv { void *emit_ctx; csv_emit_row_t emit_row; csv_emit_field_t emit_field; char delimiter; char quote_char; char prev_symbol; int error_status; int ending_spaces; void *(*realloc)(void*, size_t); int state; char *buf; char *bufp; size_t buf_len; }; enum csv_parser_option { CSV_OPT_DELIMITER, CSV_OPT_QUOTE, CSV_OPT_REALLOC, CSV_OPT_EMIT_FIELD, CSV_OPT_EMIT_ROW, CSV_OPT_EMIT_CTX }; enum csv_iteraion_state { CSV_IT_OK, CSV_IT_EOL, CSV_IT_NEEDMORE, CSV_IT_EOF, CSV_IT_ERROR }; enum csv_parser_state { CSV_LEADING_SPACES, CSV_OUT_OF_QUOTES, CSV_IN_QUOTES, CSV_QUOTE_OPENING, CSV_QUOTE_CLOSING, CSV_LINE_BREAKING, CSV_NEWFIELD, CSV_END_OF_LAST_LINE }; enum csv_error_status { CSV_ER_OK, CSV_ER_INVALID, CSV_ER_MEMORY_ERROR }; void csv_create(struct csv *csv); void csv_destroy(struct csv *csv); /** * Set a parser option. */ void csv_setopt(struct csv *csv, int opt, ...); /** * Parse input and call emit_row/emit_line. * Save tail to inside buffer, * next call will concatenate tail and string from args */ void csv_parse_chunk(struct csv *csv, const char *s, const char *end); /** * emits all remaining symbols from buffer */ void csv_finish_parsing(struct csv *csv); /** * @return 0 is ok */ int csv_get_error_status(struct csv *csv); /** * @brief The csv_iterator struct allows iterate field by field through csv */ struct csv_iterator { struct csv *csv; const char *buf_begin; //input buffer const char *buf_end; const char *field; //output buffer size_t field_len; }; void csv_iterator_create(struct csv_iterator *it, struct csv *csv); /** * Receives next element from csv * element is field or end of line * @return iteration state */ int csv_next(struct csv_iterator *); /** * @brief csv_feed delivers buffer to iterator * empty buffer means end of iteration */ void csv_feed(struct csv_iterator *it, const char *buf, size_t buf_len); /** * @brief csv_escape_field prepares field to out in file. * Adds pair quote and if there is comma or linebreak in field, adds surrounding quotes. * At worst escaped field will 2 times more symbols than input field. * @return length of escaped field or -1 if not enough space in buffer. */ size_t csv_escape_field(struct csv *csv, const char *field, size_t field_len, char *dst, size_t dst_size); static inline const char * csv_iterator_get_field(struct csv_iterator *it) { return it->field; } static inline size_t csv_iterator_get_field_len(struct csv_iterator *it) { return it->field_len; } #if defined(__cplusplus) } #endif /* extern "C" */ #endif tarantool_1.9.1.26.g63eb81e3c/src/lib/csv/csv.c0000664000000000000000000002356313306560010017300 0ustar rootroot/* * Copyright 2010-2016 Tarantool AUTHORS: please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "csv.h" #include #include #include #include #include #include #include static void csv_emit_row_empty(void *ctx) { (void) ctx; } static void csv_emit_field_empty(void *ctx, const char *field, const char *end) { (void) ctx; (void) field; (void) end; } void csv_create(struct csv *csv) { memset(csv, 0, sizeof(struct csv)); csv->delimiter= ','; csv->quote_char = '\"'; csv->realloc = realloc; csv->emit_field = csv_emit_field_empty; csv->emit_row = csv_emit_row_empty; } void csv_destroy(struct csv *csv) { if (csv->buf) { csv->realloc(csv->buf, 0); csv->buf = NULL; } } int csv_isvalid(struct csv *csv) { if (csv->error_status == CSV_ER_OK && csv->state == CSV_IN_QUOTES) csv->error_status = CSV_ER_INVALID; return !csv->error_status; } int csv_get_error_status(struct csv *csv) { return csv->error_status; } void csv_setopt(struct csv *csv, int opt, ...) { va_list args; va_start(args, opt); switch(opt) { case CSV_OPT_DELIMITER: csv->delimiter = va_arg(args, int); break; case CSV_OPT_QUOTE: csv->quote_char = va_arg(args, int); break; case CSV_OPT_REALLOC: csv->realloc = va_arg(args, void* (*)(void*, size_t)); break; case CSV_OPT_EMIT_FIELD: csv->emit_field = va_arg(args, csv_emit_field_t); break; case CSV_OPT_EMIT_ROW: csv->emit_row = va_arg(args, csv_emit_row_t); break; case CSV_OPT_EMIT_CTX: csv->emit_ctx = va_arg(args, void*); break; default: unreachable(); } va_end(args); } /** * both of methods (emitting and iterating) are implementing by one function * firstonly == true means iteration method. * @return unprocessed tail **/ const char * csv_parse_impl(struct csv *csv, const char *s, const char *end, bool firstonly) { if (end - s == 0) return NULL; assert(end - s > 0); assert(csv->emit_field); assert(csv->emit_row); for (const char *p = s; p != end; p++) { bool is_line_end = (*p == '\n' || *p == '\r'); /* realloc buffer */ if (csv->buf == NULL || (csv->bufp && csv->buf_len < (size_t)(csv->bufp - csv->buf + 1))) { size_t new_size = csv->buf_len * 2; if (csv->buf_len == 0 || csv->buf == NULL) new_size = 256; char *new_buf = (char *)csv->realloc(csv->buf, new_size); if (new_buf == NULL) { csv->error_status = CSV_ER_MEMORY_ERROR; return NULL; } csv->buf_len = new_size; csv->bufp = csv->bufp - csv->buf + new_buf; csv->buf = new_buf; } /* \r\n (or \n\r) linebreak, not in quotes */ if (is_line_end && csv->state != CSV_IN_QUOTES && *p != csv->prev_symbol && (csv->prev_symbol == '\n' || csv->prev_symbol == '\r')) { csv->prev_symbol = '\0'; continue; } csv->prev_symbol = *p; /* 2 switches to avoid code dublicates */ switch (csv->state) { case CSV_LEADING_SPACES: csv->bufp = csv->buf; if (*p == ' ') /* skip spaces */ continue; csv->state = CSV_OUT_OF_QUOTES; /* symbol not handled, continue to the next switch */ break; case CSV_QUOTE_OPENING: if (*p == csv->quote_char && csv->bufp) { /* double-quote "" */ *csv->bufp++ = csv->quote_char; csv->state = CSV_OUT_OF_QUOTES; continue; } csv->state = CSV_IN_QUOTES; /* symbol not handled, continue to the next switch */ break; case CSV_QUOTE_CLOSING: if (*p == csv->quote_char) { /* double-quote "" */ *csv->bufp++ = csv->quote_char; csv->state = CSV_IN_QUOTES; continue; } csv->state = CSV_OUT_OF_QUOTES; /* symbol not handled, continue to the next switch */ break; } switch (csv->state) { case CSV_OUT_OF_QUOTES: if (is_line_end || *p == csv->delimiter) { /* end of field */ csv->state = CSV_LEADING_SPACES; csv->bufp -= csv->ending_spaces; if (firstonly) { csv->state = CSV_NEWFIELD; return p; } else { csv->emit_field(csv->emit_ctx, csv->buf, csv->bufp); } csv->bufp = csv->buf; } else if (*p == csv->quote_char) { csv->state = CSV_QUOTE_OPENING; } else { *csv->bufp++ = *p; } if (*p == ' ') { csv->ending_spaces++; } else { csv->ending_spaces = 0; } if (is_line_end) { /* * bufp == buf means an empty field, * but bufp == 0 means no field at the moment, * it may be an end of the line or file */ csv->bufp = 0; csv->emit_row(csv->emit_ctx); } break; case CSV_IN_QUOTES: /* * Bufp can became NULL in two cases: * CSV_NEWFIELD and CSV_OUT_OF_QUOTES. * * In a case of 'newfield' the csv after * nullifying bufp and returning to the * iteration starts from * CSV_LEADING_SPACES. Here bufp is set * to not NULL (see 'leading_spaces'). * * In a case of 'out_of_quotes' it can be * set to NULL only if * is_line_end == true. So at the * beginning of 'out_of_quotes' * is_line_end was true also * (see "if (is_line_end || ..." above). * In this 'if' the state of the csv is * set to CSV_LEADING_SPACES, so on a * next iteration the bufp are set to not * NULL. */ assert(csv->bufp != NULL); if (*p == csv->quote_char) { csv->state = CSV_QUOTE_CLOSING; } else { *csv->bufp++ = *p; } break; case CSV_NEWFIELD: csv->bufp = csv->buf; csv->state = CSV_LEADING_SPACES; if (is_line_end) { csv->bufp = 0; if (p + 1 == end) return NULL; else return p + 1; } break; } } return end; } void csv_parse_chunk(struct csv *csv, const char *s, const char *end) { csv_parse_impl(csv, s, end, false); } void csv_finish_parsing(struct csv *csv) { if (csv_isvalid(csv)){ if (csv->bufp) { csv->bufp -= csv->ending_spaces; csv->emit_field(csv->emit_ctx, csv->buf, csv->bufp); csv->emit_row(csv->emit_ctx); } if (csv->buf) csv->realloc(csv->buf, 0); csv->bufp = NULL; csv->buf = NULL; csv->buf_len = 0; } } void csv_iterator_create(struct csv_iterator *it, struct csv *csv) { memset(it, 0, sizeof(struct csv_iterator)); it->csv = csv; } /** * next iteration step **/ int csv_next(struct csv_iterator *it) { it->field = NULL; it->field_len = 0; if (it->buf_begin == NULL) /* buffer isn't set */ return CSV_IT_NEEDMORE; /** * length of buffer is zero * it means end of file, but if there is no \n * function must emit last field, EOL and EOF. **/ if (it->buf_begin == it->buf_end) { /** bufp == buf means empty field, * but bufp == 0 means no field at the moment, it may be * end of line or end of file **/ if (it->csv->bufp == NULL) { /* nothing to emit, end of file */ return CSV_IT_EOF; } if (!it->csv->error_status && !csv_isvalid(it->csv)) { it->csv->realloc(it->csv->buf, 0); it->csv->buf = NULL; it->csv->bufp = NULL; it->csv->buf_len = 0; return CSV_IT_ERROR; } if (it->csv->state != CSV_END_OF_LAST_LINE) { /* last field */ it->csv->state = CSV_END_OF_LAST_LINE; it->csv->bufp -= it->csv->ending_spaces; it->field = it->csv->buf; it->field_len = it->csv->bufp - it->csv->buf; it->csv->bufp = it->csv->buf; return CSV_IT_OK; } if (it->csv->state == CSV_END_OF_LAST_LINE) { /* last line */ it->csv->realloc(it->csv->buf, 0); it->csv->buf = NULL; it->csv->bufp = NULL; it->csv->buf_len = 0; return CSV_IT_EOL; } } const char *tail = csv_parse_impl(it->csv, it->buf_begin, it->buf_end, true); if (csv_get_error_status(it->csv) == CSV_ER_MEMORY_ERROR) return CSV_IT_ERROR; it->buf_begin = tail; if (tail == it->buf_end) /* buffer is empty */ return CSV_IT_NEEDMORE; /* bufp == NULL means end of line */ if (it->csv->bufp == NULL) return CSV_IT_EOL; /* return field via iterator structure */ it->field = it->csv->buf; it->field_len = it->csv->bufp - it->csv->buf; return CSV_IT_OK; } void csv_feed(struct csv_iterator *it, const char *buf, size_t buf_len) { it->buf_begin = buf; it->buf_end = buf + buf_len; } size_t csv_escape_field(struct csv *csv, const char *field, size_t field_len, char *dst, size_t buf_size) { char *p = dst; (void) buf_size; int inquotes = 0; /* surround quotes, only if there is delimiter \n or \r */ if (memchr(field, csv->delimiter, field_len) || memchr(field, '\n', field_len) || memchr(field, '\r', field_len)) { inquotes = 1; *p++ = csv->quote_char; } while (*field) { /* double-quote "" */ if (*field == csv->quote_char) { assert((size_t)(p - dst) < buf_size); *p++ = csv->quote_char; } assert((size_t)(p - dst) < buf_size); *p++ = *field++; } /* adds ending quote */ if (inquotes) { assert((size_t)(p - dst) < buf_size); *p++ = csv->quote_char; } *p = 0; return p - dst; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/0000775000000000000000000000000013306562360016657 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/0000755000000000000000000000000013306562360017634 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/obuf.result0000644000000000000000000000005613306562360022030 0ustar rootroot *** obuf_basic *** *** obuf_basic: done *** tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/quota_lessor.result0000644000000000000000000000223413306562360023615 0ustar rootroot1..2 1..23 ok 1 - lease 100 bytes ok 2 - leased 100 bytes ok 3 - UNIT_SIZE - 100 available ok 4 - source quota used ok 5 - lease 200 bytes ok 6 - leased 300 bytes at all ok 7 - UNIT_SIZE - 300 available ok 8 - source quota used did not change ok 9 - lease big size ok 10 - leased size ok 11 - available size ok 12 - update source quota used ok 13 - end small lease ok 14 - decrease leased ok 15 - source quota did not change - too small size to free ok 16 - decrease leased with big chunk ok 17 - return big chunks into source quota ok 18 - release source quota ok 19 - lessor is empty ok 20 - lessor avoids oscillation ok 21 - source quota isn't empty ok 22 - lessor has no memory ok 23 - source quota is empty ok 1 - subtests 1..12 ok 1 - lease 1Mb ok 2 - available 0 ok 3 - leased 1Mb ok 4 - source quota used ok 5 - lease too big ok 6 - hard lease ok 7 - leased changed ok 8 - available the part of 1MB ok 9 - source quota fully used ok 10 - lessor is empty ok 11 - lessor is empty ok 12 - sourcr quota is empty ok 2 - subtests tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/small_alloc.result0000644000000000000000000000017013306562360023354 0ustar rootroot *** small_alloc_basic *** *** small_alloc_basic: done *** *** small_alloc_large *** *** small_alloc_large: done *** tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/suite.ini0000644000000000000000000000007413306562360021467 0ustar rootroot[default] core = unittest description = libsmall unit tests tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/slab_arena.c0000644000000000000000000000221513306562360022067 0ustar rootroot#include #include #include #include #include #include #include "unit.h" void slab_arena_print(struct slab_arena *arena) { printf("arena->prealloc = %zu\narena->maxalloc = %zu\n" "arena->used = %zu\narena->slab_size = %u\n", arena->prealloc, quota_total(arena->quota), arena->used, arena->slab_size); } int main() { struct quota quota; struct slab_arena arena; quota_init("a, 0); slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE); slab_arena_print(&arena); slab_arena_destroy(&arena); quota_init("a, SLAB_MIN_SIZE); slab_arena_create(&arena, "a, 1, 1, MAP_PRIVATE); slab_arena_print(&arena); void *ptr = slab_map(&arena); slab_arena_print(&arena); void *ptr1 = slab_map(&arena); printf("going beyond the limit: %s\n", ptr1 ? "(ptr)" : "(nil)"); slab_arena_print(&arena); slab_unmap(&arena, ptr); slab_unmap(&arena, ptr1); slab_arena_print(&arena); slab_arena_destroy(&arena); quota_init("a, 2000000); slab_arena_create(&arena, "a, 3000000, 1, MAP_PRIVATE); slab_arena_print(&arena); slab_arena_destroy(&arena); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/region.c0000644000000000000000000000224613306562360021267 0ustar rootroot#include #include #include #include "unit.h" struct slab_cache cache; struct slab_arena arena; struct quota quota; void region_basic() { header(); struct region region; region_create(®ion, &cache); fail_unless(region_used(®ion) == 0); void *ptr = region_alloc(®ion, 10); fail_unless(ptr); fail_unless(region_used(®ion) == 10); ptr = region_alloc(®ion, 10000000); fail_unless(ptr); fail_unless(region_used(®ion) == 10000010); region_free(®ion); fail_unless(region_used(®ion) == 0); footer(); } void region_test_truncate() { header(); struct region region; region_create(®ion, &cache); void *ptr = region_alloc(®ion, 10); fail_unless(ptr); size_t used = region_used(®ion); region_alloc(®ion, 10000); region_alloc(®ion, 10000000); region_truncate(®ion, used); fail_unless(region_used(®ion) == used); region_free(®ion); footer(); } int main() { quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); region_basic(); region_test_truncate(); slab_cache_destroy(&cache); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/rb_aug.c0000644000000000000000000001061213306562360021237 0ustar rootroot#include #include #include #include #include #include #include #include "unit.h" #define RB_COMPACT 1 #include "../small/rb.h" /* * Weighted binary search tree. * * This is an augmented version of a standard binary search tree * where each node maintains the size of the sub-tree rooted at it. * Allows to efficiently (in log N) calculate the rank of each node. */ struct wtree_node { /* Link in a binary search tree. */ rb_node(struct wtree_node) in_tree; /* Value associated with this node. */ int value; /* Size of the sub-tree rooted at this node. */ int weight; }; typedef struct wtree_node wtree_node_t; typedef rb_tree(struct wtree_node) wtree_t; rb_proto_ext_key(static, wtree_, wtree_t, wtree_node_t, int); static int wtree_node_cmp(const wtree_node_t *a, const wtree_node_t *b) { int rc = a->value < b->value ? -1 : a->value > b->value; if (rc == 0) rc = a < b ? -1 : a > b; return rc; } static int wtree_key_node_cmp(const int key, const wtree_node_t *node) { return key < node->value ? -1 : key > node->value; } static void wtree_node_aug(wtree_node_t *node, const wtree_node_t *left, const wtree_node_t *right) { node->weight = 1; if (left != NULL) node->weight += left->weight; if (right != NULL) node->weight += right->weight; } rb_gen_ext_key_aug(MAYBE_UNUSED static, wtree_, wtree_t, wtree_node_t, in_tree, wtree_node_cmp, int, wtree_key_node_cmp, wtree_node_aug); static void wtree_selfcheck(wtree_t *tree) { wtree_node_t *node, *prev, *left, *right; /* Check node order. */ struct wtree_iterator it; wtree_ifirst(tree, &it); prev = NULL; while ((node = wtree_inext(&it)) != NULL) { if (prev != NULL) fail_unless(prev->value <= node->value); prev = node; } /* Check node weights. */ struct wtree_walk walk; wtree_walk_init(&walk, tree); while ((node = wtree_walk_next(&walk, RB_WALK_RIGHT | RB_WALK_LEFT, &left, &right)) != NULL) { int left_weight = left != NULL ? left->weight : 0; int right_weight = right != NULL ? right->weight : 0; fail_unless(node->weight == left_weight + right_weight + 1); } } /* * Return the number of elements in the tree that are * less than the given value. */ static int wtree_rank(wtree_t *tree, int value) { int dir = 0; int count = 0; struct wtree_walk walk; wtree_walk_init(&walk, tree); wtree_node_t *node, *left, *right; while ((node = wtree_walk_next(&walk, dir, &left, &right)) != NULL) { if (value > node->value) { /* * All nodes in the left sub-tree are less * than the given value. Account them and * inspect the right sub-tree. */ if (left != NULL) count += left->weight; count++; /* current node */ dir = RB_WALK_RIGHT; } else { /* * The given value is less than or equal * to any value in the right sub-tree. * Inspect the left sub-tree. */ dir = RB_WALK_LEFT; } } return count; } static int wtree_rank_slow(wtree_t *tree, int value) { int count = 0;; struct wtree_iterator it; wtree_isearch_lt(tree, value, &it); wtree_node_t *node; while ((node = wtree_iprev(&it)) != NULL) count++; return count; } static void check_aug(void) { header(); int count = 0; /* actual number of tree nodes */ int max_count = 3000; /* max number of tree nodes */ int max_value = 3000; /* max node value */ int remove_prob = 20; /* chance of removing a node on each iteration */ int check_count = 100; /* number of random values to check rank * calculation against */ wtree_node_t *n; wtree_node_t **nodes = calloc(max_count, sizeof(*nodes)); /* Generate a random tree. */ wtree_t tree; wtree_new(&tree); for (int i = 0; i < max_count; i++) { if (count > 0 && rand() % 100 < remove_prob) { /* Remove a random node. */ int idx = rand() % count; n = nodes[idx]; nodes[idx] = nodes[--count]; nodes[count] = NULL; wtree_remove(&tree, n); free(n); } /* Insert a node with a random value. */ n = nodes[count++] = malloc(sizeof(*n)); n->value = rand() % max_value + 1; wtree_insert(&tree, n); } wtree_selfcheck(&tree); for (int i = 0; i < check_count; i++) { int value = rand() % (3 * max_value / 2) - max_value / 4; fail_unless(wtree_rank(&tree, value) == wtree_rank_slow(&tree, value)); } for (int i = 0; i < count; i++) free(nodes[i]); free(nodes); footer(); } int main() { srand(time(NULL)); check_aug(); return 0; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/small_alloc.c0000644000000000000000000000617613306562360022274 0ustar rootroot#include #include #include #include #include #include "unit.h" enum { OBJSIZE_MIN = 3 * sizeof(int), OBJECTS_MAX = 1000 }; struct slab_arena arena; struct slab_cache cache; struct small_alloc alloc; struct quota quota; /* Streak type - allocating or freeing */ bool allocating = true; /** Keep global to easily inspect the core. */ long seed; static int *ptrs[OBJECTS_MAX]; static inline void free_checked(int *ptr) { fail_unless(ptr[0] < OBJECTS_MAX && ptr[ptr[1]/sizeof(int)-1] == ptr[0]); int pos = ptr[0]; fail_unless(ptrs[pos] == ptr); ptrs[pos][0] = ptrs[pos][ptr[1]/sizeof(int)-1] = INT_MAX; smfree_delayed(&alloc, ptrs[pos], ptrs[pos][1]); ptrs[pos] = NULL; } static inline void * alloc_checked(int pos, int size_min, int size_max) { assert(size_max > size_min); int size = size_min + rand() % (size_max - size_min); if (ptrs[pos]) { assert(ptrs[pos][0] == pos); free_checked(ptrs[pos]); } if (! allocating) return NULL; ptrs[pos] = smalloc(&alloc, size); ptrs[pos][0] = pos; ptrs[pos][1] = size; ptrs[pos][size/sizeof(int)-1] = pos; // printf("size: %d\n", size); return ptrs[pos]; } static int small_is_unused_cb(const struct mempool_stats *stats, void *arg) { unsigned long *slab_total = arg; *slab_total += stats->slabsize * stats->slabcount; return 0; } static bool small_is_unused(void) { struct small_stats totals; unsigned long slab_total = 0; small_stats(&alloc, &totals, small_is_unused_cb, &slab_total); if (totals.used > 0) return false; if (slab_cache_used(&cache) > slab_total) return false; return true; } static void small_alloc_test(int size_min, int size_max, int objects_max, int oscillation_max, int iterations_max) { small_alloc_create(&alloc, &cache, OBJSIZE_MIN, 1.3); for (int i = 0; i < iterations_max; i++) { small_alloc_setopt(&alloc, SMALL_DELAYED_FREE_MODE, i % 5 == 0); int oscillation = rand() % oscillation_max; for (int j = 0; j < oscillation; ++j) { int pos = rand() % objects_max; alloc_checked(pos, size_min, size_max); } allocating = ! allocating; } small_alloc_setopt(&alloc, SMALL_DELAYED_FREE_MODE, false); for (int pos = 0; pos < OBJECTS_MAX; pos++) { if (ptrs[pos] != NULL) free_checked(ptrs[pos]); } /* Trigger garbage collection. */ allocating = true; for (int i = 0; i < iterations_max; i++) { if (small_is_unused()) break; void *p = alloc_checked(0, size_min, size_max); free_checked(p); } fail_unless(small_is_unused()); small_alloc_destroy(&alloc); } static void small_alloc_basic(void) { header(); small_alloc_test(OBJSIZE_MIN, 5000, 1000, 1024, 5000); footer(); } static void small_alloc_large(void) { header(); size_t large_size_min = mempool_objsize_max(cache.arena->slab_size); size_t large_size_max = 2 * cache.arena->slab_size; small_alloc_test(large_size_min, large_size_max, 50, 10, 100); footer(); } int main() { seed = time(0); srand(seed); quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); small_alloc_basic(); small_alloc_large(); slab_cache_destroy(&cache); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/rb.result0000644000000000000000000000066513306562360021506 0ustar rootroot *** check_simple *** *** check_simple: done *** *** check_old_iter *** 0->0 1->2 2->4 3->6 4->8 5->10 6->12 7->14 8->16 9->18 10->20 11->22 12->24 13->26 14->28 14->28 13->26 12->24 11->22 10->20 9->18 8->16 7->14 6->12 5->10 4->8 3->6 2->4 1->2 0->0 3->6 4->8 5->10 6->12 7->14 8->16 9->18 10->20 11->22 12->24 13->26 14->28 3->6 2->4 1->2 0->0 *** check_old_iter: done *** *** check_new_iter *** *** check_new_iter: done *** tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/rb_aug.result0000644000000000000000000000005413306562360022332 0ustar rootroot *** check_aug *** *** check_aug: done *** tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/quota.result0000644000000000000000000000024113306562360022222 0ustar rootroot1..5 ok 1 - no fails detected ok 2 - one of thread limit set is final ok 3 - total alloc match ok 4 - uses are mosly successful ok 5 - sets are mosly successful tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/matras.cc0000644000000000000000000002021213306562360021427 0ustar rootroot#include #include #include #include #include #include #include #include static void * pta_alloc(void *ctx); static void pta_free(void *ctx, void *p); #define PROV_BLOCK_SIZE 16 #define PROV_EXTENT_SIZE 64 static size_t AllocatedCount = 0; static std::set AllocatedBlocks; static std::set AllocatedItems; static void check_file_line(bool expr, const char *err_message, const char *file, int line) { if (!expr) { std::cout << " ****************************************\n" << " * " << file << ":" << line << " ERROR: " << err_message << "\n"; } assert(expr); if (!expr) { exit(-1); } } #define check(e, m) check_file_line(e, m, __FILE__, __LINE__) bool alloc_err_inj_enabled = false; unsigned int alloc_err_inj_countdown = 0; #define MATRAS_VERSION_COUNT 8 static void * pta_alloc(void *ctx) { static_cast(ctx); if (alloc_err_inj_enabled) { if (alloc_err_inj_countdown == 0) return 0; alloc_err_inj_countdown--; } void *p = new char[PROV_EXTENT_SIZE]; AllocatedCount++; AllocatedBlocks.insert(p); return p; } static void pta_free(void *ctx, void *p) { static_cast(ctx); check(AllocatedBlocks.find(p) != AllocatedBlocks.end(), "Bad free"); AllocatedBlocks.erase(p); delete [] static_cast(p); AllocatedCount--; } void matras_alloc_test() { std::cout << "Testing matras_alloc..." << std::endl; unsigned int maxCapacity = PROV_EXTENT_SIZE / PROV_BLOCK_SIZE; maxCapacity *= PROV_EXTENT_SIZE / sizeof(void *); maxCapacity *= PROV_EXTENT_SIZE / sizeof(void *); struct matras mat; alloc_err_inj_enabled = false; for (unsigned int i = 0; i <= maxCapacity; i++) { matras_create(&mat, PROV_EXTENT_SIZE, PROV_BLOCK_SIZE, pta_alloc, pta_free, NULL); check(1u << mat.log2_capacity == maxCapacity, "Wrong capacity!"); AllocatedItems.clear(); for (unsigned int j = 0; j < i; j++) { unsigned int res = 0; void *data = matras_alloc(&mat, &res); check(data, "Alloc returned NULL"); void *test_data = matras_get(&mat, res); check(data == test_data, "Alloc and Get mismatch"); size_t provConsumedMemory = (size_t)matras_extent_count(&mat) * PROV_EXTENT_SIZE; check(provConsumedMemory == AllocatedCount * PROV_EXTENT_SIZE, "ConsumedMemory counter failed (1)"); check(res == j, "Index mismatch"); { check(!AllocatedBlocks.empty(), "Alloc w/o alloc!"); std::set::iterator itr = AllocatedBlocks.lower_bound(data); if (itr == AllocatedBlocks.end() || *itr != data) { check(itr != AllocatedBlocks.begin(), "Pointer to not allocatead region! (1)"); --itr; } check (itr != AllocatedBlocks.end(), "Pointer to not allocatead region! (2)"); check(data <= (void*)( ((char*)(*itr)) + PROV_EXTENT_SIZE - PROV_BLOCK_SIZE), "Pointer to not allocatead region! (3)"); } { if (!AllocatedItems.empty()) { std::set::iterator itr = AllocatedItems.lower_bound(data); if (itr != AllocatedItems.end()) { check(*itr >= (void*)(((char*)data) + PROV_BLOCK_SIZE), "Data regions overlaps! (1)"); } if (itr != AllocatedItems.begin()) { --itr; check(data >= (void*)(((char*)(*itr)) + PROV_BLOCK_SIZE), "Data regions overlaps! (2)"); } } } AllocatedItems.insert(data); } size_t provConsumedMemory = (size_t)matras_extent_count(&mat) * PROV_EXTENT_SIZE; check(provConsumedMemory == AllocatedCount * PROV_EXTENT_SIZE, "ConsumedMemory counter failed (2)"); matras_destroy(&mat); check(AllocatedCount == 0, "Not all memory freed (1)"); } for (unsigned int i = 0; i <= maxCapacity; i++) { matras_create(&mat, PROV_EXTENT_SIZE, PROV_BLOCK_SIZE, pta_alloc, pta_free, NULL); for (unsigned int j = 0; j < i; j++) { unsigned int res = 0; (void) matras_alloc(&mat, &res); } for (unsigned int j = 0; j < i; j++) { matras_dealloc(&mat); size_t provConsumedMemory = (size_t)matras_extent_count(&mat) * PROV_EXTENT_SIZE; check(provConsumedMemory == AllocatedCount * PROV_EXTENT_SIZE, "ConsumedMemory counter failed (3)"); } check(AllocatedCount == 0, "Not all memory freed (2)"); matras_destroy(&mat); } alloc_err_inj_enabled = true; for (unsigned int i = 0; i <= maxCapacity; i++) { matras_create(&mat, PROV_EXTENT_SIZE, PROV_BLOCK_SIZE, pta_alloc, pta_free, NULL); alloc_err_inj_countdown = i; for (unsigned int j = 0; j < maxCapacity; j++) { unsigned int res = 0; unsigned int prev_block_count = mat.head.block_count; void *data = matras_alloc(&mat, &res); if (!data) { check(prev_block_count == mat.head.block_count, "Created count changed during memory fail!"); break; } } matras_destroy(&mat); check(AllocatedCount == 0, "Not all memory freed after memory fail!"); } std::cout << "Testing matras_alloc successfully finished" << std::endl; } typedef uint64_t type_t; const size_t VER_EXTENT_SIZE = 512; void *all(void *ctx) { long *extents_in_use = static_cast(ctx); ++*extents_in_use; return malloc(VER_EXTENT_SIZE); } void dea(void *ctx, void *p) { long *extents_in_use = static_cast(ctx); --*extents_in_use; free(p); } struct matras_view views[MATRAS_VERSION_COUNT]; int vermask = 1; int reg_view_id() { int id = __builtin_ctz(~vermask); vermask |= 1 << id; return id; } void unreg_view_id(int id) { vermask &=~ (1 << id); } void matras_vers_test() { std::cout << "Testing matras versions..." << std::endl; std::vector comps[MATRAS_VERSION_COUNT]; int use_mask = 1; int cur_num_or_ver = 1; struct matras local; long extents_in_use = 0; matras_create(&local, VER_EXTENT_SIZE, sizeof(type_t), all, dea, &extents_in_use); type_t val = 0; for (int s = 10; s < 8000; s = int(s * 1.5)) { for (int k = 0; k < 800; k++) { if (rand() % 16 == 0) { bool add_ver; if (cur_num_or_ver == 1) add_ver = true; else if (cur_num_or_ver == MATRAS_VERSION_COUNT) add_ver = false; else add_ver = rand() % 2 == 0; if (add_ver) { cur_num_or_ver++; matras_id_t new_ver = reg_view_id(); matras_create_read_view(&local, views + new_ver); check(new_ver > 0, "create read view failed"); use_mask |= (1 << new_ver); comps[new_ver] = comps[0]; } else { cur_num_or_ver--; int del_ver; do { del_ver = 1 + rand() % (MATRAS_VERSION_COUNT - 1); } while ((use_mask & (1 << del_ver)) == 0); matras_destroy_read_view(&local, views + del_ver); unreg_view_id(del_ver); comps[del_ver].clear(); use_mask &= ~(1 << del_ver); } } else { if (rand() % 8 == 0 && comps[0].size() > 0) { matras_dealloc(&local); comps[0].pop_back(); } size_t p = rand() % s; type_t mod = 0; while (p >= comps[0].size()) { comps[0].push_back(val * 10000 + mod); matras_id_t tmp; type_t *ptrval = (type_t *)matras_alloc(&local, &tmp); *ptrval = val * 10000 + mod; mod++; } val++; comps[0][p] = val; matras_touch(&local, p); *(type_t *)matras_get(&local, p) = val; } views[0] = local.head; for (int i = 0; i < MATRAS_VERSION_COUNT; i++) { if ((use_mask & (1 << i)) == 0) continue; check(comps[i].size() == views[i].block_count, "size mismatch"); for (size_t j = 0; j < comps[i].size(); j++) { type_t val1 = comps[i][j]; type_t val2 = *(type_t *)matras_view_get(&local, views + i, j); check(val1 == val2, "data mismatch"); } } } } matras_destroy(&local); check(extents_in_use == 0, "memory leak"); std::cout << "Testing matras_version successfully finished" << std::endl; } void matras_gh_1145_test() { std::cout << "Testing matras gh-1145 test..." << std::endl; struct matras local; long extents_in_use = 0; matras_create(&local, VER_EXTENT_SIZE, sizeof(type_t), all, dea, &extents_in_use); struct matras_view view; matras_create_read_view(&local, &view); matras_id_t id; matras_alloc(&local, &id); matras_touch(&local, id); matras_destroy_read_view(&local, &view); matras_destroy(&local); check(extents_in_use == 0, "memory leak"); std::cout << "Testing matras gh-1145 test successfully finished" << std::endl; } int main(int, const char **) { matras_alloc_test(); matras_vers_test(); matras_gh_1145_test(); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/slab_cache.c0000644000000000000000000000232113306562360022042 0ustar rootroot#include #include #include #include #include #include #include "unit.h" enum { NRUNS = 25, ITERATIONS = 1000, MAX_ALLOC = 5000000 }; static struct slab *runs[NRUNS]; int main() { srand(time(0)); struct quota quota; struct slab_arena arena; struct slab_cache cache; quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); int i = 0; while (i < ITERATIONS) { int run = random() % NRUNS; int size = random() % MAX_ALLOC; if (runs[run]) { slab_put(&cache, runs[run]); } runs[run] = slab_get(&cache, size); fail_unless(runs[run]); slab_cache_check(&cache); i++; } /* Put all allocated memory back to cache */ for (i = 0; i < NRUNS; i++) { if (runs[i]) slab_put(&cache, runs[i]); } slab_cache_check(&cache); /* * It is allowed to hold only one slab of arena. * If at lest one block was allocated then after freeing * all memory it must be exactly one slab. */ if (cache.allocated.stats.total != arena.slab_size) { fail("Slab cache returned memory to arena", "false"); } slab_cache_destroy(&cache); slab_arena_destroy(&arena); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/matras.result0000644000000000000000000000033413306562360022363 0ustar rootrootTesting matras_alloc... Testing matras_alloc successfully finished Testing matras versions... Testing matras_version successfully finished Testing matras gh-1145 test... Testing matras gh-1145 test successfully finished tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/lf_lifo.c0000644000000000000000000000326613306562360021421 0ustar rootroot#include #include #include "unit.h" #if !defined(MAP_ANONYMOUS) #define MAP_ANONYMOUS MAP_ANON #endif static void * mmap_aligned(size_t size) { assert((size & (size - 1)) == 0); void *map = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); /* Align the mapped address around slab size. */ size_t offset = (intptr_t) map & (size - 1); if (offset != 0) { munmap(map, size - offset); map += size - offset; munmap(map + size, offset); } else { /* The address is returned aligned. */ munmap(map + size, size); } return map; } #define MAP_SIZE 0x10000 int main() { struct lf_lifo head; void *val1 = mmap_aligned(MAP_SIZE); void *val2 = mmap_aligned(MAP_SIZE); void *val3 = mmap_aligned(MAP_SIZE); lf_lifo_init(&head); fail_unless(lf_lifo_pop(&head) == NULL); fail_unless(lf_lifo_pop(lf_lifo_push(&head, val1)) == val1); fail_unless(lf_lifo_pop(lf_lifo_push(&head, val1)) == val1); lf_lifo_push(lf_lifo_push(lf_lifo_push(&head, val1), val2), val3); fail_unless(lf_lifo_pop(&head) == val3); fail_unless(lf_lifo_pop(&head) == val2); fail_unless(lf_lifo_pop(&head) == val1); fail_unless(lf_lifo_pop(&head) == NULL); lf_lifo_init(&head); /* Test overflow of ABA counter. */ int i = 0; do { lf_lifo_push(&head, val1); fail_unless(lf_lifo_pop(&head) == val1); fail_unless(lf_lifo_pop(&head) == NULL); i++; } while (head.next != 0); munmap(val1, MAP_SIZE); munmap(val2, MAP_SIZE); munmap(val3, MAP_SIZE); printf("success\n"); return 0; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/lsregion.c0000644000000000000000000002423613306562360021631 0ustar rootroot#include #include #include #include #include "unit.h" enum { TEST_ARRAY_SIZE = 10 }; static size_t lsregion_slab_count(struct lsregion *region) { size_t res = 0; struct rlist *next; rlist_foreach(next, ®ion->slabs.slabs) ++res; return res; } /** * Test constructor, allocation and truncating of one memory * block. */ static void test_basic() { note("basic"); plan(42); struct quota quota; struct slab_arena arena; struct lsregion allocator; quota_init("a, 4 * SLAB_MIN_SIZE); is(slab_arena_create(&arena, "a, 0, 1024, MAP_PRIVATE), 0, "init"); lsregion_create(&allocator, &arena); /* Test that initialization was correct. */ is(lsregion_used(&allocator), 0, "used after init"); is(lsregion_total(&allocator), 0, "total after init"); is(arena.used, 0, "arena used after init") is(lsregion_slab_count(&allocator), 0, "slab count after init"); is(allocator.cached, NULL, "slab cache after init"); /* Try to alloc 100 bytes. */ uint32_t size = 100; int64_t id = 10; char *data = lsregion_alloc(&allocator, size, id); isnt(data, NULL, "alloc(100)") uint32_t used = lsregion_used(&allocator); uint32_t total = lsregion_total(&allocator); is(used, size, "used after alloc(100)"); is(total, arena.slab_size, "total after alloc(100)"); is(arena.used, arena.slab_size, "arena used after alloc(100)") is(lsregion_slab_count(&allocator), 1, "slab count after alloc(100)"); is(allocator.cached, NULL, "slab cache after alloc(100)"); /* * Truncate with id < the allocated block id has't any * effect. */ lsregion_gc(&allocator, id / 2); used = lsregion_used(&allocator); total = lsregion_total(&allocator); is(used, size, "used after gc(id / 2)"); is(total, arena.slab_size, "total after gc(id / 2)"); is(arena.used, arena.slab_size, "arena used after gc(id / 2)"); is(lsregion_slab_count(&allocator), 1, "slab count after gc(id / 2)"); is(allocator.cached, NULL, "slab cache after gc(id / 2)"); /* * Tuncate the allocated block. Used bytes count is 0 now. * But total = lsregion.slab_size, because the last slab * is cached. */ lsregion_gc(&allocator, id); used = lsregion_used(&allocator); total = lsregion_total(&allocator); is(used, 0, "used after gc(id)"); is(total, arena.slab_size, "total after gc(id)"); is(arena.used, arena.slab_size, "arena used after gc(id)"); is(lsregion_slab_count(&allocator), 0, "slab count after gc(id)"); isnt(allocator.cached, NULL, "slab cache after gc(id)"); /* * Try to allocate block with size > specified slab_size. */ size = 2048; ++id; data = lsregion_alloc(&allocator, size, id); isnt(data, NULL, "alloc(2048)"); used = lsregion_used(&allocator); total = lsregion_total(&allocator); is(used, size, "used after alloc(2048)"); is(total, arena.slab_size, "total after alloc(2048)"); is(arena.used, arena.slab_size, "arena used after alloc(2048)") is(lsregion_slab_count(&allocator), 1, "slab count after alloc(2048)"); is(allocator.cached, NULL, "slab cache after alloc(2048)"); /* * Large allocation backed by malloc() */ ++id; size_t qused = quota_used(arena.quota); size_t aused = arena.used; used = lsregion_used(&allocator); total = lsregion_total(&allocator); size = arena.slab_size + 100; data = lsregion_alloc(&allocator, size, id); isnt(data, NULL, "large alloc()") is(lsregion_used(&allocator), used + size, "used after large alloc()"); is(lsregion_total(&allocator), total + size + lslab_sizeof(), "total after large alloc()"); is(arena.used, aused, "arena used is not changed after large alloc()"); size_t size_quota = (size + lslab_sizeof() + QUOTA_UNIT_SIZE - 1) & ~(size_t)(QUOTA_UNIT_SIZE - 1); is(quota_used(arena.quota), qused + size_quota, "quota used after large alloc()") is(lsregion_slab_count(&allocator), 2, "slab count after large alloc()"); is(allocator.cached, NULL, "slab cache after large alloc()"); /* * Allocation after large slab */ ++id; size = 10; used = lsregion_used(&allocator); total = lsregion_total(&allocator); data = lsregion_alloc(&allocator, size, id); isnt(data, NULL, "alloc after large") is(lsregion_used(&allocator), used + size, "alloc after large"); is(lsregion_total(&allocator), total + arena.slab_size, "large slab is not re-used"); is(lsregion_slab_count(&allocator), 3, "large slab is not reused"); /* * gc of large slab */ lsregion_gc(&allocator, id); is(lsregion_slab_count(&allocator), 0, "slab count after large gc()"); lsregion_destroy(&allocator); /* Sic: slabs are cached by arena */ is(arena.used, 2 * arena.slab_size, "arena used after destroy"); is(quota_used(arena.quota), 2 * arena.slab_size, "quota used after destroy"); slab_arena_destroy(&arena); check_plan(); } static void fill_data(char **data, uint32_t count, uint32_t size, uint32_t start_id, struct lsregion *allocator) { for (uint32_t i = 0; i < count; ++i) { data[i] = lsregion_alloc(allocator, size, start_id++); assert(data[i] != NULL); memset(data[i], i % CHAR_MAX, size); } } static void test_data(char **data, uint32_t count, uint32_t size) { for (uint32_t i = 0; i < count; ++i) { for (uint32_t j = 0; j < size; ++j) { fail_if(data[i][j] != (char) (i % CHAR_MAX)); } } } /** Test many blocks allocation in one slab. */ static void test_many_allocs_one_slab() { note("many_allocs_one_slab"); plan(6); struct quota quota; struct slab_arena arena; struct lsregion allocator; quota_init("a, 4 * SLAB_MIN_SIZE); is(slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE), 0, "init"); lsregion_create(&allocator, &arena); /* * Allocate many small blocks that are fitting in one slab * and fill them with simple data. */ const int count = TEST_ARRAY_SIZE; char *data[TEST_ARRAY_SIZE]; uint32_t size = 400; fill_data(data, count, size, 0, &allocator); is(arena.used, arena.slab_size, "arena used after many small blocks") /* * Used bytes count is count * size, but only one slab is * used. */ uint32_t total_size = size * count; uint32_t used = lsregion_used(&allocator); is(used, total_size, "used after small blocks"); is(lsregion_slab_count(&allocator), 1, "slab count after small blocks"); test_data(data, count, size); /* * Try to truncate the middle of memory blocks, but it * hasn't an effect since the lsregion allocator can't * truncate a part of a slab. */ uint32_t middle_id = count / 2; lsregion_gc(&allocator, middle_id); used = lsregion_used(&allocator);; is(used, total_size, "used after gc"); is(lsregion_slab_count(&allocator), 1, "slab count after gc(id/2)"); lsregion_destroy(&allocator); slab_arena_destroy(&arena); check_plan(); } /** Test many memory blocks in many slabs. */ static void test_many_allocs_many_slabs() { note("many_allocs_many_slabs"); plan(10); struct quota quota; struct slab_arena arena; struct lsregion allocator; quota_init("a, 4 * SLAB_MIN_SIZE); is(slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE), 0, "init"); lsregion_create(&allocator, &arena); /* * Allocate many small blocks that are fitting in one slab * and fill them with simple data. */ const int count = TEST_ARRAY_SIZE + 1; char *data[TEST_ARRAY_SIZE + 1]; uint32_t size = arena.slab_size / 12; uint32_t id = 0; fill_data(data, count, size, id, &allocator); id += count; is(arena.used, arena.slab_size, "arena used after one slab") /* * Used bytes count is count * size, but only one slab is * used. */ uint32_t total_size = size * count; uint32_t used = lsregion_used(&allocator); is(used, total_size, "used after one slab"); is(lsregion_slab_count(&allocator), 1, "slab count after one slab"); test_data(data, count, size); /* Allocate more memory blocks in a second slab. */ char *next_block_data[count]; fill_data(next_block_data, count, size, id, &allocator); id += count; total_size += size * count; used = lsregion_used(&allocator); is(arena.used, 2 * arena.slab_size, "arena used after many slabs") /* Test that the first slab is still exists. */ is(used, total_size, "used after many slabs"); /* Truncate the first slab. */ uint32_t block_max_id = count; lsregion_gc(&allocator, block_max_id); is(lsregion_slab_count(&allocator), 1, "slab count after gc first"); is(arena.used, 2 * arena.slab_size, "arena used after gc first") /* The second slab still has valid data. */ test_data(next_block_data, count, size); /* Truncate the second slab. */ block_max_id = id; lsregion_gc(&allocator, block_max_id); is(lsregion_slab_count(&allocator), 0, "slab count after gc second"); is(arena.used, 2 * arena.slab_size, "arena used after gc second") fail_if(lsregion_used(&allocator) > 0); lsregion_destroy(&allocator); slab_arena_destroy(&arena); check_plan(); } /** * Test allocation of many big memory blocks, but specify a little * slab_size for the slab arena. */ static void test_big_data_small_slabs() { note("big_data_small_slabs"); plan(7); struct quota quota; struct slab_arena arena; struct lsregion allocator; quota_init("a, 16 * SLAB_MIN_SIZE); is(slab_arena_create(&arena, "a, 0, 0, MAP_PRIVATE), 0, "init"); lsregion_create(&allocator, &arena); const uint32_t count = TEST_ARRAY_SIZE; char *data[TEST_ARRAY_SIZE]; uint32_t size = arena.slab_size * 3 / 4; int64_t id = 0; /* * Allocate big memory blocks and fill them with simple * data. */ fill_data(data, count, size, id, &allocator); uint32_t total_size = size * count; uint32_t used = lsregion_used(&allocator); is(used, total_size, "used after alloc"); is(arena.used, count * arena.slab_size, "arena used after alloc") is(lsregion_slab_count(&allocator), count, "slab count after alloc"); id += count; /* Try to truncate a middle of the memory blocks. */ lsregion_gc(&allocator, id / 2); isnt(lsregion_used(&allocator), 0, "used after gc(id / 2)"); is(lsregion_slab_count(&allocator), count / 2 -1, "slab count after gc (id / 2)"); is(arena.used, count * arena.slab_size, "arena used after gc(id / 2)") lsregion_gc(&allocator, id); fail_if(lsregion_used(&allocator) > 0); lsregion_destroy(&allocator); slab_arena_destroy(&arena); check_plan(); } int main() { plan(4); test_basic(); test_many_allocs_one_slab(); test_many_allocs_many_slabs(); test_big_data_small_slabs(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/unit.c0000644000000000000000000000234313306562360020761 0ustar rootroot#include "unit.h" #include #include enum { MAX_LEVELS = 10 }; static int tests_done[MAX_LEVELS]; static int tests_failed[MAX_LEVELS]; static int plan_test[MAX_LEVELS]; static int level = -1; void _space(FILE *stream) { for (int i = 0 ; i < level; i++) { fprintf(stream, " "); } } void plan(int count) { ++level; plan_test[level] = count; tests_done[level] = 0; tests_failed[level] = 0; _space(stdout); printf("%d..%d\n", 1, plan_test[level]); } int check_plan(void) { int r = 0; if (tests_done[level] != plan_test[level]) { _space(stderr); fprintf(stderr, "# Looks like you planned %d tests but ran %d.\n", plan_test[level], tests_done[level]); r = -1; } if (tests_failed[level]) { _space(stderr); fprintf(stderr, "# Looks like you failed %d test of %d run.\n", tests_failed[level], tests_done[level]); r = tests_failed[level]; } --level; if (level >= 0) { is(r, 0, "subtests"); } return r; } int _ok(int condition, const char *fmt, ...) { va_list ap; _space(stdout); printf("%s %d - ", condition ? "ok" : "not ok", ++tests_done[level]); if (!condition) tests_failed[level]++; va_start(ap, fmt); vprintf(fmt, ap); printf("\n"); va_end(ap); return condition; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/quota_lessor.c0000644000000000000000000000601413306562360022521 0ustar rootroot#include "small/quota_lessor.h" #include "unit.h" void test_basic() { plan(23); struct quota q; quota_init(&q, QUOTA_MAX); struct quota_lessor l; quota_lessor_create(&l, &q); is(100, quota_lease(&l, 100), "lease 100 bytes"); is(100, quota_leased(&l), "leased 100 bytes"); is(QUOTA_USE_MIN - 100, quota_available(&l), "UNIT_SIZE - 100 available"); is(QUOTA_USE_MIN, quota_used(&q), "source quota used"); /* Lease without source qouta usage. */ is(200, quota_lease(&l, 200), "lease 200 bytes"); is(300, quota_leased(&l), "leased 300 bytes at all"); is(QUOTA_USE_MIN - 300, quota_available(&l), "UNIT_SIZE - 300 available"); is(QUOTA_USE_MIN, quota_used(&q), "source quota used did not change"); /* Lease several LEASE_SIZEs. */ is(QUOTA_USE_MIN * 3, quota_lease(&l, QUOTA_USE_MIN * 3), "lease big size"); is(QUOTA_USE_MIN * 3 + 300, quota_leased(&l), "leased size"); is(QUOTA_UNIT_SIZE - 300, quota_available(&l), "available size"); is(QUOTA_USE_MIN * 3 + QUOTA_UNIT_SIZE, quota_used(&q), "update source quota used"); /* End lease. */ quota_end_lease(&l, 300); is(QUOTA_UNIT_SIZE, quota_available(&l), "end small lease"); is(QUOTA_USE_MIN * 3, quota_leased(&l), "decrease leased"); is(QUOTA_USE_MIN * 3 + QUOTA_UNIT_SIZE, quota_used(&q), "source quota did not change - too small size to free"); quota_end_lease(&l, QUOTA_USE_MIN * 2 + 100); is(QUOTA_USE_MIN - 100, quota_leased(&l), "decrease leased with big chunk"); is(100 + QUOTA_USE_MIN, quota_available(&l), "return big chunks into source quota"); is(QUOTA_USE_MIN * 2, quota_used(&q), "release source quota"); quota_end_lease(&l, QUOTA_USE_MIN - 100); is(0, quota_leased(&l), "lessor is empty"); is(true, quota_available(&l) > 0, "lessor avoids oscillation"); is(quota_available(&l), quota_used(&q), "source quota isn't empty"); quota_lessor_destroy(&l); is(0, quota_available(&l), "lessor has no memory"); is(0, quota_used(&q), "source quota is empty"); check_plan(); } void test_hard_lease() { plan(12); struct quota q; size_t quota_total = QUOTA_USE_MIN + QUOTA_USE_MIN / 8; quota_init(&q, quota_total); struct quota_lessor l; quota_lessor_create(&l, &q); is(QUOTA_USE_MIN, quota_lease(&l, QUOTA_USE_MIN), "lease 1Mb"); is(0, quota_available(&l), "available 0"); is(QUOTA_USE_MIN, quota_leased(&l), "leased 1Mb"); is(QUOTA_USE_MIN, quota_used(&q), "source quota used"); is(-1, quota_lease(&l, QUOTA_USE_MIN), "lease too big"); is(QUOTA_UNIT_SIZE, quota_lease(&l, QUOTA_UNIT_SIZE), "hard lease"); is(QUOTA_UNIT_SIZE + QUOTA_USE_MIN, quota_leased(&l), "leased changed"); is(QUOTA_USE_MIN / 8 - QUOTA_UNIT_SIZE, quota_available(&l), "available the part of 1MB"); is(quota_total, quota_used(&q), "source quota fully used"); quota_end_lease(&l, quota_leased(&l)); quota_lessor_destroy(&l); is(0, quota_available(&l), "lessor is empty"); is(0, quota_leased(&l), "lessor is empty"); is(0, quota_used(&q), "sourcr quota is empty"); check_plan(); } int main() { plan(2); test_basic(); test_hard_lease(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/ibuf.result0000644000000000000000000000005613306562360022022 0ustar rootroot *** ibuf_basic *** *** ibuf_basic: done *** tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/arena_mt.result0000644000000000000000000000000313306562360022653 0ustar rootrootok tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/CMakeLists.txt0000644000000000000000000000560613306562360022403 0ustar rootroot# Fix compilation by C++ add_definitions("-D__STDC_FORMAT_MACROS=1") add_definitions("-D__STDC_LIMIT_MACROS=1") add_definitions("-D__STDC_CONSTANT_MACROS=1") add_executable(slab_cache.test slab_cache.c) target_link_libraries(slab_cache.test small) add_executable(region.test region.c) target_link_libraries(region.test small) add_executable(ibuf.test ibuf.c unit.c) target_link_libraries(ibuf.test small) add_executable(obuf.test obuf.c) target_link_libraries(obuf.test small) add_executable(rb.test rb.c) target_link_libraries(rb.test small) add_executable(rb_aug.test rb_aug.c) target_link_libraries(rb_aug.test small) add_executable(rb_rand.test rb_rand.cc) set_source_files_properties(rb_rand.cc PROPERTIES COMPILE_FLAGS "-std=gnu++0x") add_executable(mempool.test mempool.c) target_link_libraries(mempool.test small) add_executable(small_alloc.test small_alloc.c) target_link_libraries(small_alloc.test small) add_executable(lf_lifo.test lf_lifo.c) add_executable(slab_arena.test slab_arena.c) target_link_libraries(slab_arena.test small) add_executable(arena_mt.test arena_mt.c unit.c) target_link_libraries(arena_mt.test small pthread) add_executable(matras.test matras.cc) target_link_libraries(matras.test small) add_executable(lsregion.test lsregion.c unit.c) target_link_libraries(lsregion.test small) add_executable(quota.test quota.cc unit.c) target_link_libraries(quota.test pthread) add_executable(quota_lessor.test quota_lessor.c unit.c) target_link_libraries(quota_lessor.test pthread) include_directories("${PROJECT_SOURCE_DIR}") add_test(slab_cache ${CMAKE_CURRENT_BUILD_DIR}/slab_cache.test) add_test(region ${CMAKE_CURRENT_BUILD_DIR}/region.test) add_test(ibuf ${CMAKE_CURRENT_BUILD_DIR}/ibuf.test) add_test(obuf ${CMAKE_CURRENT_BUILD_DIR}/obuf.test) add_test(mempool ${CMAKE_CURRENT_BUILD_DIR}/mempool.test) add_test(small_alloc ${CMAKE_CURRENT_BUILD_DIR}/small_alloc.test) add_test(lf_lifo ${CMAKE_CURRENT_BUILD_DIR}/lf_lifo.test) add_test(slab_cache ${CMAKE_CURRENT_BUILD_DIR}/slab_cache.test) add_test(arena_mt ${CMAKE_CURRENT_BUILD_DIR}/arena_mt.test) add_test(matras ${CMAKE_CURRENT_BUILD_DIR}/matras.test) add_test(lsregion ${CMAKE_CURRENT_BUILD_DIR}/lsregion.test) add_test(quota ${CMAKE_CURRENT_BUILD_DIR}/quota.test) add_test(quota_lessor ${CMAKE_CURRENT_BUILD_DIR}/quota_lessor.test) add_test(rb ${CMAKE_CURRENT_BUILD_DIR}/rb.test) add_test(rb_aug ${CMAKE_CURRENT_BUILD_DIR}/rb_aug.test) add_test(rb_rand ${CMAKE_CURRENT_BUILD_DIR}/rb_rand.test) if(DEFINED SMALL_EMBEDDED) return() endif() if(POLICY CMP0037) cmake_policy(SET CMP0037 OLD) # don't blame "test" target name endif(POLICY CMP0037) add_custom_target(test WORKING_DIRECTORY "${PROJECT_BINARY_DIR}" COMMAND ctest DEPENDS slab_cache.test region.test ibuf.test obuf.test mempool.test small_alloc.test lf_lifo.test slab_arena.test arena_mt.test matras.test lsregion.test quota.test rb.test ) tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/obuf.c0000644000000000000000000000254213306562360020736 0ustar rootroot#include #include #include #include #include #include #include "unit.h" enum { OBJSIZE_MIN = sizeof(int), OBJSIZE_MAX = 5000, OBJECTS_MAX = 1000, OSCILLATION_MAX = 1024, ITERATIONS_MAX = 5000, }; /** Keep global to easily inspect the core. */ long seed; void alloc_checked(struct obuf *buf) { int size = rand() % OBJSIZE_MAX; if (size < OBJSIZE_MIN || size > OBJSIZE_MAX) size = OBJSIZE_MIN; obuf_alloc(buf, size); } static void basic_alloc_streak(struct obuf *buf) { int oscillation = rand() % OSCILLATION_MAX; int i; for (i = 0; i < oscillation; ++i) alloc_checked(buf); } void obuf_basic(struct slab_cache *slabc) { int i; header(); struct obuf buf; obuf_create(&buf, slabc, 16320); for (i = 0; i < ITERATIONS_MAX; i++) { basic_alloc_streak(&buf); fail_unless(obuf_capacity(&buf) > 0); obuf_reset(&buf); fail_unless(obuf_size(&buf) == 0); } obuf_destroy(&buf); fail_unless(slab_cache_used(slabc) == 0); slab_cache_check(slabc); footer(); } int main() { struct slab_cache cache; struct slab_arena arena; struct quota quota; seed = time(0); srand(seed); quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); obuf_basic(&cache); slab_cache_destroy(&cache); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/quota.cc0000644000000000000000000000453413306562360021302 0ustar rootroot#include #include #include #include "unit.h" struct quota quota; const size_t THREAD_CNT = 10; const size_t RUN_CNT = 128 * 1024; struct thread_data { size_t use_change; size_t last_lim_set; long use_change_success; long lim_change_success; }; pthread_t threads[THREAD_CNT]; thread_data datum[THREAD_CNT]; void *thread_routine(void *vparam) { struct thread_data *data = (struct thread_data *)vparam; size_t check_fail_count = 0; ssize_t allocated_size = 0; for (size_t i = 0; i < RUN_CNT; i++) { { size_t total, used; quota_get_total_and_used("a, &total, &used); if (used > total) check_fail_count++; } ssize_t max = rand() % QUOTA_MAX; max = quota_set("a, max); sched_yield(); if (max > 0) { data->last_lim_set = max; data->lim_change_success++; } if (allocated_size > 0) { quota_release("a, allocated_size); allocated_size = -1; data->use_change = 0; data->use_change_success++; sched_yield(); } else { allocated_size = rand() % max + 1; allocated_size = quota_use("a, allocated_size); if (allocated_size > 0) { data->use_change = allocated_size; data->use_change_success++; } sched_yield(); } } return (void *)check_fail_count; } int main(int n, char **a) { (void)n; (void)a; quota_init("a, 0); srand(time(0)); plan(5); for (size_t i = 0; i < THREAD_CNT; i++) { pthread_create(threads + i, 0, thread_routine, (void *)(datum + i)); } size_t check_fail_count = 0; for (size_t i = 0; i < THREAD_CNT; i++) { void *ret; check_fail_count += (size_t)pthread_join(threads[i], &ret); } bool one_set_successed = false; size_t total_alloc = 0; long set_success_count = 0; long use_success_count = 0; for (size_t i = 0; i < THREAD_CNT; i++) { if (datum[i].last_lim_set == quota_total("a)) one_set_successed = true; total_alloc += datum[i].use_change; use_success_count += datum[i].use_change_success; set_success_count += datum[i].lim_change_success; } ok(check_fail_count == 0, "no fails detected"); ok(one_set_successed, "one of thread limit set is final"); ok(total_alloc == quota_used("a), "total alloc match"); ok(use_success_count > THREAD_CNT * RUN_CNT * .1, "uses are mosly successful"); ok(set_success_count > THREAD_CNT * RUN_CNT * .1, "sets are mosly successful"); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/ibuf.c0000644000000000000000000000141113306562360020722 0ustar rootroot#include #include #include #include #include "unit.h" struct slab_cache cache; struct slab_arena arena; struct quota quota; void ibuf_basic() { header(); struct ibuf ibuf; ibuf_create(&ibuf, &cache, 16320); fail_unless(ibuf_used(&ibuf) == 0); void *ptr = ibuf_alloc(&ibuf, 10); fail_unless(ptr); fail_unless(ibuf_used(&ibuf) == 10); ptr = ibuf_alloc(&ibuf, 1000000); fail_unless(ptr); fail_unless(ibuf_used(&ibuf) == 1000010); ibuf_reset(&ibuf); fail_unless(ibuf_used(&ibuf) == 0); footer(); } int main() { quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); ibuf_basic(); slab_cache_destroy(&cache); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/rb_rand.result0000644000000000000000000000000713306562360022500 0ustar rootrootsuccesstarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/lf_lifo.result0000644000000000000000000000001013306562360022475 0ustar rootrootsuccess tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/lsregion.result0000644000000000000000000000461613306562360022725 0ustar rootroot1..4 # basic 1..42 ok 1 - init ok 2 - used after init ok 3 - total after init ok 4 - arena used after init ok 5 - slab count after init ok 6 - slab cache after init ok 7 - alloc(100) ok 8 - used after alloc(100) ok 9 - total after alloc(100) ok 10 - arena used after alloc(100) ok 11 - slab count after alloc(100) ok 12 - slab cache after alloc(100) ok 13 - used after gc(id / 2) ok 14 - total after gc(id / 2) ok 15 - arena used after gc(id / 2) ok 16 - slab count after gc(id / 2) ok 17 - slab cache after gc(id / 2) ok 18 - used after gc(id) ok 19 - total after gc(id) ok 20 - arena used after gc(id) ok 21 - slab count after gc(id) ok 22 - slab cache after gc(id) ok 23 - alloc(2048) ok 24 - used after alloc(2048) ok 25 - total after alloc(2048) ok 26 - arena used after alloc(2048) ok 27 - slab count after alloc(2048) ok 28 - slab cache after alloc(2048) ok 29 - large alloc() ok 30 - used after large alloc() ok 31 - total after large alloc() ok 32 - arena used is not changed after large alloc() ok 33 - quota used after large alloc() ok 34 - slab count after large alloc() ok 35 - slab cache after large alloc() ok 36 - alloc after large ok 37 - alloc after large ok 38 - large slab is not re-used ok 39 - large slab is not reused ok 40 - slab count after large gc() ok 41 - arena used after destroy ok 42 - quota used after destroy ok 1 - subtests # many_allocs_one_slab 1..6 ok 1 - init ok 2 - arena used after many small blocks ok 3 - used after small blocks ok 4 - slab count after small blocks ok 5 - used after gc ok 6 - slab count after gc(id/2) ok 2 - subtests # many_allocs_many_slabs 1..10 ok 1 - init ok 2 - arena used after one slab ok 3 - used after one slab ok 4 - slab count after one slab ok 5 - arena used after many slabs ok 6 - used after many slabs ok 7 - slab count after gc first ok 8 - arena used after gc first ok 9 - slab count after gc second ok 10 - arena used after gc second ok 3 - subtests # big_data_small_slabs 1..7 ok 1 - init ok 2 - used after alloc ok 3 - arena used after alloc ok 4 - slab count after alloc ok 5 - used after gc(id / 2) ok 6 - slab count after gc (id / 2) ok 7 - arena used after gc(id / 2) ok 4 - subtests tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/slab_arena.result0000644000000000000000000000107413306562360023165 0ustar rootrootarena->prealloc = 0 arena->maxalloc = 0 arena->used = 0 arena->slab_size = 65536 arena->prealloc = 65536 arena->maxalloc = 65536 arena->used = 0 arena->slab_size = 65536 arena->prealloc = 65536 arena->maxalloc = 65536 arena->used = 65536 arena->slab_size = 65536 going beyond the limit: (nil) arena->prealloc = 65536 arena->maxalloc = 65536 arena->used = 65536 arena->slab_size = 65536 arena->prealloc = 65536 arena->maxalloc = 65536 arena->used = 65536 arena->slab_size = 65536 arena->prealloc = 2031616 arena->maxalloc = 2000896 arena->used = 0 arena->slab_size = 65536 tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/mempool.c0000644000000000000000000000456713306562360021464 0ustar rootroot#include #include #include #include #include #include "unit.h" enum { OBJSIZE_MIN = 2 * sizeof(int), OBJSIZE_MAX = 4096, OBJECTS_MAX = 10000, OSCILLATION_MAX = 1024, ITERATIONS_MAX = 500, }; struct slab_arena arena; struct slab_cache cache; struct quota quota; struct mempool pool; int objsize; size_t used; /* Streak type - allocating or freeing */ bool allocating = true; /** Keep global to easily inspect the core. */ long seed; static int *ptrs[OBJECTS_MAX]; static inline void free_checked(int *ptr) { fail_unless(ptr[0] < OBJECTS_MAX && ptr[objsize/sizeof(int)-1] == ptr[0]); int pos = ptr[0]; fail_unless(ptrs[pos] == ptr); fail_unless(mempool_used(&pool) == used); ptrs[pos][0] = ptrs[pos][objsize/sizeof(int)-1] = INT_MAX; mempool_free(&pool, ptrs[pos]); ptrs[pos] = NULL; used -= objsize; } static inline void * alloc_checked() { int pos = rand() % OBJECTS_MAX; if (ptrs[pos]) { assert(ptrs[pos][0] == pos); free_checked(ptrs[pos]); ptrs[pos] = 0; } if (! allocating) return NULL; fail_unless(mempool_used(&pool) == used); used += objsize; ptrs[pos] = mempool_alloc(&pool); ptrs[pos][0] = pos; ptrs[pos][objsize/sizeof(int)-1] = pos; return ptrs[pos]; } static void basic_alloc_streak() { int oscillation = rand() % OSCILLATION_MAX; int i; for (i = 0; i < oscillation; ++i) { alloc_checked(); } } void mempool_basic() { int i; header(); mempool_create(&pool, &cache, objsize); for (i = 0; i < ITERATIONS_MAX; i++) { basic_alloc_streak(); allocating = ! allocating; #if 0 printf("%zu %zu\n", mempool_used(&pool), mempool_total(&pool)); #endif } mempool_destroy(&pool); footer(); } void mempool_align() { header(); for (uint32_t size = OBJSIZE_MIN; size < OBJSIZE_MAX; size <<= 1) { mempool_create(&pool, &cache, size); for (uint32_t i = 0; i < 32; i++) { void *ptr = mempool_alloc(&pool); uintptr_t addr = (uintptr_t)ptr; if (addr % size) fail("aligment", "wrong"); } mempool_destroy(&pool); } footer(); } int main() { seed = time(0); srand(seed); objsize = rand() % OBJSIZE_MAX; if (objsize < OBJSIZE_MIN) objsize = OBJSIZE_MIN; quota_init("a, UINT_MAX); slab_arena_create(&arena, "a, 0, 4000000, MAP_PRIVATE); slab_cache_create(&cache, &arena); mempool_basic(); mempool_align(); slab_cache_destroy(&cache); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/mempool.result0000644000000000000000000000015013306562360022540 0ustar rootroot *** mempool_basic *** *** mempool_basic: done *** *** mempool_align *** *** mempool_align: done *** tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/rb.c0000644000000000000000000001122713306562360020406 0ustar rootroot#include "unit.h" #include "../small/rb.h" #include #include #include #include "assert.h" #include #define NUMBER_NODES 15 #define RB_COMPACT 1 typedef struct node_s node_t; struct node_s { rb_node(node_t) node; int key; int data; }; typedef rb_tree(node_t) tree_t; static inline int key_cmp(const int a, const int b) { return (a > b) ? 1 : (a == b) ? 0 : -1; } static inline int key_node_cmp(const int a, const node_t *b) { return key_cmp(a, b->key); } static inline int node_cmp(const node_t *a, const node_t *b) { return key_cmp(a->key, b->key); } rb_gen_ext_key(MAYBE_UNUSED static inline, test_, tree_t, node_t, node, node_cmp, int, key_node_cmp); node_t * check_simple(tree_t *tree) { header(); test_new(tree); fail_unless(test_empty(tree)); node_t *nodes = (node_t *) calloc(NUMBER_NODES, sizeof(*nodes)); if (!nodes) { printf("can't allocate nodes\n"); exit(1); } for (int i = 0; i < NUMBER_NODES; i++) { nodes[i].key = i; nodes[i].data = 2 * i; test_insert(tree, nodes + i); } fail_if(test_empty(tree)); for (int i = 0; i < NUMBER_NODES; i++) { node_t *node = test_search(tree, i); fail_if(node == NULL); fail_unless(node->data == 2 * i && node->key == i); if (i + 1 < NUMBER_NODES) { fail_unless(test_next(tree, node)->key == i + 1 && test_next(tree, node)->data == 2 *(i + 1)); } else { fail_unless(test_next(tree, node) == NULL); } if (i > 0) { fail_unless(test_prev(tree, node)->key == i - 1 && test_prev(tree, node)->data == 2 *(i - 1)); } else { fail_unless(test_prev(tree, node) == NULL); } } fail_if(test_search(tree, NUMBER_NODES) != NULL); fail_unless(test_first(tree)->key == 0); fail_unless(test_last(tree)->key == NUMBER_NODES - 1); footer(); return nodes; } static node_t * print_cb(tree_t *t, node_t *node, void* arg) { (void)t; (void)arg; printf(" %i->%i", node->key, node->data); return NULL; } void check_old_iter(tree_t *tree, node_t* nodes) { header(); node_t *node = test_psearch(tree, 6); fail_unless(node->key == 6); node = test_psearch(tree, -1); fail_unless(node == NULL); node = test_nsearch(tree, 6); fail_unless(node->key == 6); node = test_nsearch(tree, NUMBER_NODES); fail_unless(node == NULL); test_iter(tree, NULL, print_cb, NULL); printf("\n"); test_reverse_iter(tree, NULL, print_cb, NULL); printf("\n"); test_iter(tree, nodes + 3, print_cb, NULL); printf("\n"); test_reverse_iter(tree, nodes + 3, print_cb, NULL); printf("\n"); footer(); } void check_new_iter(tree_t *tree, node_t* nodes) { header(); struct test_iterator it; test_ifirst(tree, &it); (void) nodes; int count = 0; node_t *node = test_inext(&it); while (node) { fail_unless(node->key == count++); node = test_inext(&it); } test_icreate(tree, nodes + 3, &it); node = test_inext(&it); fail_unless(node); count = 3; while (node) { fail_unless(node->key == count++); node = test_inext(&it); } test_isearch(tree, 6, &it); node = test_inext(&it); fail_unless(node); count = 6; while (node) { fail_unless(node->key == count++); node = test_inext(&it); } test_isearch(tree, NUMBER_NODES - 1, &it); node = test_iprev(&it); fail_unless(node); count = NUMBER_NODES - 1; while (node) { fail_unless(node->key == count--); node = test_iprev(&it); } test_isearch_lt(tree, 6, &it); node = test_inext(&it); fail_unless(node->key == 5); test_isearch_gt(tree, 6, &it); node = test_inext(&it); fail_unless(node->key == 7); test_isearch_ge(tree, 6, &it); node = test_inext(&it); fail_unless(node->key == 6); test_isearch_le(tree, 6, &it); node = test_inext(&it); fail_unless(node->key == 6); test_isearch_le(tree, -1, &it); node = test_inext(&it); fail_unless(node == NULL); test_isearch_ge(tree, NUMBER_NODES, &it); node = test_inext(&it); fail_unless(node == NULL); test_isearch_lt(tree, 0, &it); node = test_inext(&it); fail_unless(node == NULL); test_isearch_gt(tree, NUMBER_NODES - 1, &it); node = test_inext(&it); fail_unless(node == NULL); footer(); } int main() { tree_t tree; node_t *nodes = check_simple(&tree); check_old_iter(&tree, nodes); check_new_iter(&tree, nodes); free(nodes); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/arena_mt.c0000644000000000000000000000341413306562360021570 0ustar rootroot#include #include #include #include #include #include #include #ifdef __FreeBSD__ #include #endif #include "unit.h" struct slab_arena arena; struct quota quota; int THREADS = 8; int ITERATIONS = 1009 /* 100003 */; int OSCILLATION = 137; int FILL = SLAB_MIN_SIZE/sizeof(pthread_t); void * run(void *p __attribute__((unused))) { #ifdef __FreeBSD__ unsigned int seed = pthread_getthreadid_np(); #else unsigned int seed = (intptr_t) pthread_self(); #endif int iterations = rand_r(&seed) % ITERATIONS; pthread_t **slabs = slab_map(&arena); for (int i = 0; i < iterations; i++) { int oscillation = rand_r(&seed) % OSCILLATION; for (int osc = 0; osc < oscillation; osc++) { slabs[osc] = (pthread_t *) slab_map(&arena); for (int fill = 0; fill < FILL; fill += 100) { slabs[osc][fill] = pthread_self(); } } sched_yield(); for (int osc = 0; osc < oscillation; osc++) { for (int fill = 0; fill < FILL; fill+= 100) { fail_unless(slabs[osc][fill] == pthread_self()); } slab_unmap(&arena, slabs[osc]); } } slab_unmap(&arena, slabs); return 0; } void bench(int count) { pthread_attr_t attr; pthread_attr_init(&attr); pthread_t *threads = (pthread_t *) malloc(sizeof(*threads)*count); int i; for (i = 0; i < count; i++) { pthread_create(&threads[i], &attr, run, NULL); } for (i = 0; i < count; i++) { pthread_t *thread = &threads[i]; pthread_join(*thread, NULL); } free(threads); } int main() { size_t maxalloc = THREADS * (OSCILLATION + 1) * SLAB_MIN_SIZE; quota_init("a, maxalloc); slab_arena_create(&arena, "a, maxalloc/8, SLAB_MIN_SIZE, MAP_PRIVATE); bench(THREADS); slab_arena_destroy(&arena); printf("ok\n"); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/region.result0000644000000000000000000000016413306562360022360 0ustar rootroot *** region_basic *** *** region_basic: done *** *** region_test_truncate *** *** region_test_truncate: done *** tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/.gitignore0000644000000000000000000000002613306562360021622 0ustar rootroot*.reject *.new *.test tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/rb_rand.cc0000644000000000000000000001227113306562360021555 0ustar rootroot#include #include "unit.h" #include "../small/rb.h" #include #include #include #include #include "assert.h" #include #include #include #include #define MAX_KEY 30 #define RB_COMPACT 0 #define DEFAULT_NODES 100 #define NUMBER_OPERS 5000 typedef struct node_s node_t; typedef std::pair my_pair; struct node_s { rb_node(node_t) node; my_pair key; }; typedef rb_tree(node_t) tree_t; typedef std::set my_set; static inline int key_cmp(const my_pair &a, const my_pair &b) { return (a.first > b.first) ? 1 : (a.first < b.first) ? -1 : (a.second > b.second) ? 1 : (a.second < b.second) ? -1: 0; } static inline int key_node_cmp(const my_pair &a , const node_t *b) { return key_cmp(a, b->key); } static inline int node_cmp(const node_t *a, const node_t *b) { return key_cmp(a->key, b->key); } rb_gen_ext_key(static inline MAYBE_UNUSED, test_, tree_t, node_t, node, node_cmp, my_pair, key_node_cmp); enum OPERS { INSERT = 0, DELETE, SEARCH, SEARCH_GE, SEARCH_LE, SEARCH_GT, SEARCH_LT, }; void insert(tree_t *tree, my_set& stl_tree) { node_t *node = (node_t *) calloc(1, sizeof(*node)); if (!node) return; node->key = std::make_pair(rand() % MAX_KEY, rand() % MAX_KEY); auto res = stl_tree.insert(node->key); if (res.second) { test_insert(tree, node); } } void remove(tree_t *tree, my_set &stl_tree) { my_pair key = std::make_pair(rand() % MAX_KEY, rand() % MAX_KEY); node_t * res = test_search(tree, key); if (res) { /* without check segfault */ stl_tree.erase(key); test_remove(tree, res); free(res); } } void filling(tree_t *tree, my_set& stl_tree) { for (int i = 0; i < DEFAULT_NODES; i++) { insert(tree, stl_tree); } } #define check(stl_it, stl_tree, res, res_it) \ if (stl_it == stl_tree.end()) { \ fail_unless(res == NULL); \ fail_unless(res_it == NULL); \ } else { \ fail_unless(res != NULL && \ key_node_cmp((*stl_it), res) == 0); \ fail_unless(res_it != NULL && \ key_node_cmp((*stl_it), res_it) == 0); \ } void search(tree_t *tree, my_set& stl_tree) { my_pair key = std::make_pair(rand() % MAX_KEY, rand() % MAX_KEY); node_t *res = test_search(tree, key); test_iterator it; test_isearch(tree, key, &it); node_t *res_it = test_iterator_get(&it); auto stl_it = stl_tree.find(key); check(stl_it, stl_tree, res, res_it); } void search_ge(tree_t *tree, my_set& stl_tree) { my_pair key = std::make_pair(rand() % MAX_KEY, rand() % MAX_KEY); node_t *res = test_nsearch(tree, key); test_iterator it; test_isearch_ge(tree, key, &it); node_t *res_it = test_iterator_get(&it); auto stl_it = stl_tree.lower_bound(key); /* lower bound is first not less, a.k.a greater or equal */ check(stl_it, stl_tree, res, res_it); } void search_gt(tree_t *tree, my_set& stl_tree) { my_pair key = std::make_pair(rand() % MAX_KEY, rand() % MAX_KEY); test_iterator it; test_isearch_gt(tree, key, &it); node_t *res_it = test_iterator_get(&it); auto stl_it = stl_tree.upper_bound(key); check(stl_it, stl_tree, res_it, res_it); } void search_le(tree_t *tree, my_set& stl_tree) { my_pair key = std::make_pair(rand() % MAX_KEY, rand() % MAX_KEY); node_t *res = test_psearch(tree, key); test_iterator it; test_isearch_le(tree, key, &it); node_t *res_it = test_iterator_get(&it); my_set::iterator stl_it = stl_tree.upper_bound(key); /* upper_bound is one step further than le */ if (stl_it == stl_tree.begin()) { /* begin is not decrementable */ stl_it = stl_tree.end(); } else { stl_it--; } check(stl_it, stl_tree, res, res_it); } void search_lt(tree_t *tree, my_set& stl_tree) { my_pair key = std::make_pair(rand() % MAX_KEY, rand() % MAX_KEY); test_iterator it; test_isearch_lt(tree, key, &it); node_t *res_it = test_iterator_get(&it); my_set::iterator stl_it = stl_tree.lower_bound(key); /* lower_bound is one step further than lt */ if (stl_it == stl_tree.begin()) { /* begin is not decrementable */ stl_it = stl_tree.end(); } else if (stl_it == stl_tree.end()) { /* if key is bigger than the largest in tree */ stl_it = std::max_element(stl_tree.begin(), stl_tree.end()); } else { stl_it--; } check(stl_it, stl_tree, res_it, res_it); } void opers(tree_t *tree, my_set& stl_tree) { for (int i = 0; i < NUMBER_OPERS; i++) { int op = rand() % 7; switch (op) { case INSERT: insert(tree, stl_tree); break; case DELETE: remove(tree, stl_tree); break; case SEARCH: search(tree, stl_tree); break; case SEARCH_GE: search_ge(tree, stl_tree); break; case SEARCH_LE: search_le(tree, stl_tree); break; case SEARCH_GT: search_gt(tree, stl_tree); break; case SEARCH_LT: search_lt(tree, stl_tree); break; } } } void delete_all(tree_t *tree, my_set& stl_tree) { for (auto it = stl_tree.begin(); it != stl_tree.end(); ++it){ my_pair p = *it; node_t *n = test_search(tree, p); fail_unless(n); test_remove(tree, n); free(n); } stl_tree.clear(); } int main() { srand (time(NULL)); tree_t tree; test_new(&tree); my_set stl_tree; filling(&tree, stl_tree); opers(&tree, stl_tree); /* clear all remaining in tree*/ delete_all(&tree, stl_tree); printf("success"); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/slab_cache.result0000644000000000000000000000000013306562360023126 0ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/small/test/unit.h0000644000000000000000000001016513306562360020767 0ustar rootroot#ifndef INCLUDES_TARANTOOL_TEST_UNIT_H #define INCLUDES_TARANTOOL_TEST_UNIT_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include /* exit() */ #include #define header() printf("\t*** %s ***\n", __func__) #define footer() printf("\t*** %s: done ***\n", __func__) #define fail(expr, result) do { \ fprintf(stderr, "Test failed: %s is %s at %s:%d, in function '%s'\n",\ expr, result, __FILE__, __LINE__, __func__); \ exit(-1); \ } while (0) #define fail_if(expr) if (expr) fail(#expr, "true") #define fail_unless(expr) if (!(expr)) fail(#expr, "false") #if defined(__GNUC__) || defined(__CLANG__) # define MAYBE_UNUSED __attribute__((unused)) #else #define MAYBE_UNUSED #endif #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** @brief example @code #include "unit.h" int main(void) { plan(3); // count of test You planned to check ok(1, "Test name 1"); is(4, 2 * 2, "2 * 2 == 4"); isnt(5, 2 * 2, "2 * 2 != 5); return check_plan(); // print resume } @endcode */ /* private function, use ok(...) instead */ int _ok(int condition, const char *fmt, ...); /* private function, use note(...) or diag(...) instead */ void _space(FILE *stream); #define msg(stream, ...) ({ _space(stream); fprintf(stream, "# "); \ fprintf(stream, __VA_ARGS__); fprintf(stream, "\n"); }) #define note(...) msg(stdout, __VA_ARGS__) #define diag(...) msg(stderr, __VA_ARGS__) /** @brief set and print plan @param count Before anything else, you need a testing plan. This basically declares how many tests your program is going to run to protect against premature failure. */ void plan(int count); /** @brief check if plan is reached and print report */ int check_plan(void); #define ok(condition, fmt, args...) { \ int res = _ok(condition, fmt, ##args); \ if (!res) { \ _space(stderr); \ fprintf(stderr, "# Failed test '"); \ fprintf(stderr, fmt, ##args); \ fprintf(stderr, "'\n"); \ _space(stderr); \ fprintf(stderr, "# in %s at line %d\n", __FILE__, __LINE__); \ } \ } #define is(a, b, fmt, args...) { \ int res = _ok((a) == (b), fmt, ##args); \ if (!res) { \ _space(stderr); \ fprintf(stderr, "# Failed test '"); \ fprintf(stderr, fmt, ##args); \ fprintf(stderr, "'\n"); \ _space(stderr); \ fprintf(stderr, "# in %s at line %d\n", __FILE__, __LINE__); \ } \ } #define isnt(a, b, fmt, args...) { \ int res = _ok((a) != (b), fmt, ##args); \ if (!res) { \ _space(stderr); \ fprintf(stderr, "# Failed test '"); \ fprintf(stderr, fmt, ##args); \ fprintf(stderr, "'\n"); \ _space(stderr); \ fprintf(stderr, "# in %s at line %d\n", __FILE__, __LINE__); \ } \ } #if defined(__cplusplus) } #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_TEST_UNIT_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/test.sh0000644000000000000000000000007013306562360020165 0ustar rootrootcmake . -DCMAKE_BUILD_TYPE=RelWithDebInfo make -j ctest tarantool_1.9.1.26.g63eb81e3c/src/lib/small/pmatomic/0000755000000000000000000000000013306562360020466 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/small/pmatomic/pmatomic.h.proto0000644000000000000000000004025213306562360023615 0ustar rootroot/*- * pmatomic.h - Poor Man's atomics * * Borrowed from FreeBSD (original copyright follows). * * Standard atomic facilities in stdatomic.h are great, unless you are * stuck with an old compiler, or you attemt to compile a code using * stdatomic.h in C++ mode [gcc 4.9], or if you were desperate enough to * enable OpenMP in C mode [gcc 4.9]. * * There are several discrepancies between gcc and clang, namely clang * refuses to apply atomic operations to non-atomic types while gcc is * more tolerant. * * For these reasons we provide a custom implementation of operations on * atomic types: * * A. same names/semantics as in stdatomic.h; * B. all names prefixed with 'pm_' to avoid name collisions; * C. applicable to non-atomic types. * * Ex: * int i; * pm_atomic_fetch_add_explicit(&i, 1, pm_memory_order_relaxed); * * Note: do NOT use _Atomic keyword (see gcc issues above). */ /*- * Migration strategy * * Switching to will be relatively easy. A * straightforward text replace on the codebase removes 'pm_' prefix * in names. Compiling with clang reveals missing _Atomic qualifiers. */ /*- * Logistics * * In order to make it possible to merge with the updated upstream we * restrict modifications in this file to the bare minimum. For this * reason we comment unused code regions with #if 0 instead of removing * them. * * Renames are carried out by a scipt generating the final header. */ /*- * Copyright (c) 2011 Ed Schouten * David Chisnall * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: releng/10.1/sys/sys/stdatomic.h 264496 2014-04-15 09:41:52Z tijl $ */ #ifndef PMATOMIC_H__ #define PMATOMIC_H__ /* Compiler-fu */ #if !defined(__has_feature) #define __has_feature(x) 0 #endif #if !defined(__has_builtin) #define __has_builtin(x) __has_feature(x) #endif #if !defined(__GNUC_PREREQ__) #if defined(__GNUC__) && defined(__GNUC_MINOR__) #define __GNUC_PREREQ__(maj, min) \ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) #else #define __GNUC_PREREQ__(maj, min) 0 #endif #endif #include #include /* * Removed __CLANG_ATOMICS clause, this is because * 1) clang understands gcc intrinsics as well; * 2) clang intrinsics require _Atomic quialified types while gcc ones * don't. */ #if __GNUC_PREREQ__(4, 7) #define __GNUC_ATOMICS #elif defined(__GNUC__) #define __SYNC_ATOMICS #else #error "pmatomic.h does not support your compiler" #endif /* * 7.17.1 Atomic lock-free macros. */ #if 0 #ifdef __GCC_ATOMIC_BOOL_LOCK_FREE #define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE #endif #ifdef __GCC_ATOMIC_CHAR_LOCK_FREE #define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE #endif #ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE #define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE #endif #ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE #define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE #endif #ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE #define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE #endif #ifdef __GCC_ATOMIC_SHORT_LOCK_FREE #define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE #endif #ifdef __GCC_ATOMIC_INT_LOCK_FREE #define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE #endif #ifdef __GCC_ATOMIC_LONG_LOCK_FREE #define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE #endif #ifdef __GCC_ATOMIC_LLONG_LOCK_FREE #define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE #endif #ifdef __GCC_ATOMIC_POINTER_LOCK_FREE #define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE #endif #endif /* * 7.17.2 Initialization. */ #if 0 #if defined(__CLANG_ATOMICS) #define ATOMIC_VAR_INIT(value) (value) #define atomic_init(obj, value) __c11_atomic_init(obj, value) #else #define ATOMIC_VAR_INIT(value) { .__val = (value) } #define atomic_init(obj, value) ((void)((obj)->__val = (value))) #endif #endif /* * Clang and recent GCC both provide predefined macros for the memory * orderings. If we are using a compiler that doesn't define them, use the * clang values - these will be ignored in the fallback path. */ #ifndef __ATOMIC_RELAXED #define __ATOMIC_RELAXED 0 #endif #ifndef __ATOMIC_CONSUME #define __ATOMIC_CONSUME 1 #endif #ifndef __ATOMIC_ACQUIRE #define __ATOMIC_ACQUIRE 2 #endif #ifndef __ATOMIC_RELEASE #define __ATOMIC_RELEASE 3 #endif #ifndef __ATOMIC_ACQ_REL #define __ATOMIC_ACQ_REL 4 #endif #ifndef __ATOMIC_SEQ_CST #define __ATOMIC_SEQ_CST 5 #endif /* * 7.17.3 Order and consistency. * * The memory_order_* constants that denote the barrier behaviour of the * atomic operations. */ typedef enum { memory_order_relaxed = __ATOMIC_RELAXED, memory_order_consume = __ATOMIC_CONSUME, memory_order_acquire = __ATOMIC_ACQUIRE, memory_order_release = __ATOMIC_RELEASE, memory_order_acq_rel = __ATOMIC_ACQ_REL, memory_order_seq_cst = __ATOMIC_SEQ_CST } memory_order; /* * 7.17.4 Fences. */ static __inline void atomic_thread_fence(memory_order __order __attribute__((__unused__))) { #ifdef __CLANG_ATOMICS __c11_atomic_thread_fence(__order); #elif defined(__GNUC_ATOMICS) __atomic_thread_fence(__order); #else __sync_synchronize(); #endif } static __inline void atomic_signal_fence(memory_order __order __attribute__((__unused__))) { #ifdef __CLANG_ATOMICS __c11_atomic_signal_fence(__order); #elif defined(__GNUC_ATOMICS) __atomic_signal_fence(__order); #else __asm volatile ("" ::: "memory"); #endif } /* * 7.17.5 Lock-free property. */ #if 0 #if defined(_KERNEL) /* Atomics in kernelspace are always lock-free. */ #define atomic_is_lock_free(obj) \ ((void)(obj), (_Bool)1) #elif defined(__CLANG_ATOMICS) #define atomic_is_lock_free(obj) \ __atomic_is_lock_free(sizeof(*(obj)), obj) #elif defined(__GNUC_ATOMICS) #define atomic_is_lock_free(obj) \ __atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val) #else #define atomic_is_lock_free(obj) \ ((void)(obj), sizeof((obj)->__val) <= sizeof(void *)) #endif #endif /* * 7.17.6 Atomic integer types. */ #if 0 typedef _Atomic(_Bool) atomic_bool; typedef _Atomic(char) atomic_char; typedef _Atomic(signed char) atomic_schar; typedef _Atomic(unsigned char) atomic_uchar; typedef _Atomic(short) atomic_short; typedef _Atomic(unsigned short) atomic_ushort; typedef _Atomic(int) atomic_int; typedef _Atomic(unsigned int) atomic_uint; typedef _Atomic(long) atomic_long; typedef _Atomic(unsigned long) atomic_ulong; typedef _Atomic(long long) atomic_llong; typedef _Atomic(unsigned long long) atomic_ullong; typedef _Atomic(__char16_t) atomic_char16_t; typedef _Atomic(__char32_t) atomic_char32_t; typedef _Atomic(___wchar_t) atomic_wchar_t; typedef _Atomic(__int_least8_t) atomic_int_least8_t; typedef _Atomic(__uint_least8_t) atomic_uint_least8_t; typedef _Atomic(__int_least16_t) atomic_int_least16_t; typedef _Atomic(__uint_least16_t) atomic_uint_least16_t; typedef _Atomic(__int_least32_t) atomic_int_least32_t; typedef _Atomic(__uint_least32_t) atomic_uint_least32_t; typedef _Atomic(__int_least64_t) atomic_int_least64_t; typedef _Atomic(__uint_least64_t) atomic_uint_least64_t; typedef _Atomic(__int_fast8_t) atomic_int_fast8_t; typedef _Atomic(__uint_fast8_t) atomic_uint_fast8_t; typedef _Atomic(__int_fast16_t) atomic_int_fast16_t; typedef _Atomic(__uint_fast16_t) atomic_uint_fast16_t; typedef _Atomic(__int_fast32_t) atomic_int_fast32_t; typedef _Atomic(__uint_fast32_t) atomic_uint_fast32_t; typedef _Atomic(__int_fast64_t) atomic_int_fast64_t; typedef _Atomic(__uint_fast64_t) atomic_uint_fast64_t; typedef _Atomic(__intptr_t) atomic_intptr_t; typedef _Atomic(__uintptr_t) atomic_uintptr_t; typedef _Atomic(__size_t) atomic_size_t; typedef _Atomic(__ptrdiff_t) atomic_ptrdiff_t; typedef _Atomic(__intmax_t) atomic_intmax_t; typedef _Atomic(__uintmax_t) atomic_uintmax_t; #endif /* * 7.17.7 Operations on atomic types. */ /* * Compiler-specific operations. */ #if defined(__CLANG_ATOMICS) #define atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) \ __c11_atomic_compare_exchange_strong(object, expected, desired, \ success, failure) #define atomic_compare_exchange_weak_explicit(object, expected, \ desired, success, failure) \ __c11_atomic_compare_exchange_weak(object, expected, desired, \ success, failure) #define atomic_exchange_explicit(object, desired, order) \ __c11_atomic_exchange(object, desired, order) #define atomic_fetch_add_explicit(object, operand, order) \ __c11_atomic_fetch_add(object, operand, order) #define atomic_fetch_and_explicit(object, operand, order) \ __c11_atomic_fetch_and(object, operand, order) #define atomic_fetch_or_explicit(object, operand, order) \ __c11_atomic_fetch_or(object, operand, order) #define atomic_fetch_sub_explicit(object, operand, order) \ __c11_atomic_fetch_sub(object, operand, order) #define atomic_fetch_xor_explicit(object, operand, order) \ __c11_atomic_fetch_xor(object, operand, order) #define atomic_load_explicit(object, order) \ __c11_atomic_load(object, order) #define atomic_store_explicit(object, desired, order) \ __c11_atomic_store(object, desired, order) #elif defined(__GNUC_ATOMICS) #define atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) \ __atomic_compare_exchange_n(&(object)->__val, expected, \ desired, 0, success, failure) #define atomic_compare_exchange_weak_explicit(object, expected, \ desired, success, failure) \ __atomic_compare_exchange_n(&(object)->__val, expected, \ desired, 1, success, failure) #define atomic_exchange_explicit(object, desired, order) \ __atomic_exchange_n(&(object)->__val, desired, order) #define atomic_fetch_add_explicit(object, operand, order) \ __atomic_fetch_add(&(object)->__val, operand, order) #define atomic_fetch_and_explicit(object, operand, order) \ __atomic_fetch_and(&(object)->__val, operand, order) #define atomic_fetch_or_explicit(object, operand, order) \ __atomic_fetch_or(&(object)->__val, operand, order) #define atomic_fetch_sub_explicit(object, operand, order) \ __atomic_fetch_sub(&(object)->__val, operand, order) #define atomic_fetch_xor_explicit(object, operand, order) \ __atomic_fetch_xor(&(object)->__val, operand, order) #define atomic_load_explicit(object, order) \ __atomic_load_n(&(object)->__val, order) #define atomic_store_explicit(object, desired, order) \ __atomic_store_n(&(object)->__val, desired, order) #else #define __atomic_apply_stride(object, operand) \ (((__typeof__((object)->__val))0) + (operand)) #define atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) __extension__ ({ \ __typeof__(expected) __ep = (expected); \ __typeof__(*__ep) __e = *__ep; \ (void)(success); (void)(failure); \ (_Bool)((*__ep = __sync_val_compare_and_swap(&(object)->__val, \ __e, desired)) == __e); \ }) #define atomic_compare_exchange_weak_explicit(object, expected, \ desired, success, failure) \ atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) #if __has_builtin(__sync_swap) /* Clang provides a full-barrier atomic exchange - use it if available. */ #define atomic_exchange_explicit(object, desired, order) \ ((void)(order), __sync_swap(&(object)->__val, desired)) #else /* * __sync_lock_test_and_set() is only an acquire barrier in theory (although in * practice it is usually a full barrier) so we need an explicit barrier before * it. */ #define atomic_exchange_explicit(object, desired, order) \ __extension__ ({ \ __typeof__(object) __o = (object); \ __typeof__(desired) __d = (desired); \ (void)(order); \ __sync_synchronize(); \ __sync_lock_test_and_set(&(__o)->__val, __d); \ }) #endif #define atomic_fetch_add_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_add(&(object)->__val, \ __atomic_apply_stride(object, operand))) #define atomic_fetch_and_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_and(&(object)->__val, operand)) #define atomic_fetch_or_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_or(&(object)->__val, operand)) #define atomic_fetch_sub_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_sub(&(object)->__val, \ __atomic_apply_stride(object, operand))) #define atomic_fetch_xor_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_xor(&(object)->__val, operand)) #define atomic_load_explicit(object, order) \ ((void)(order), __sync_fetch_and_add(&(object)->__val, 0)) #define atomic_store_explicit(object, desired, order) \ ((void)atomic_exchange_explicit(object, desired, order)) #endif /* * Convenience functions. * * Don't provide these in kernel space. In kernel space, we should be * disciplined enough to always provide explicit barriers. */ #ifndef _KERNEL #define atomic_compare_exchange_strong(object, expected, desired) \ atomic_compare_exchange_strong_explicit(object, expected, \ desired, memory_order_seq_cst, memory_order_seq_cst) #define atomic_compare_exchange_weak(object, expected, desired) \ atomic_compare_exchange_weak_explicit(object, expected, \ desired, memory_order_seq_cst, memory_order_seq_cst) #define atomic_exchange(object, desired) \ atomic_exchange_explicit(object, desired, memory_order_seq_cst) #define atomic_fetch_add(object, operand) \ atomic_fetch_add_explicit(object, operand, memory_order_seq_cst) #define atomic_fetch_and(object, operand) \ atomic_fetch_and_explicit(object, operand, memory_order_seq_cst) #define atomic_fetch_or(object, operand) \ atomic_fetch_or_explicit(object, operand, memory_order_seq_cst) #define atomic_fetch_sub(object, operand) \ atomic_fetch_sub_explicit(object, operand, memory_order_seq_cst) #define atomic_fetch_xor(object, operand) \ atomic_fetch_xor_explicit(object, operand, memory_order_seq_cst) #define atomic_load(object) \ atomic_load_explicit(object, memory_order_seq_cst) #define atomic_store(object, desired) \ atomic_store_explicit(object, desired, memory_order_seq_cst) #endif /* !_KERNEL */ /* * 7.17.8 Atomic flag type and operations. * * XXX: Assume atomic_bool can be used as an atomic_flag. Is there some * kind of compiler built-in type we could use? */ #if 0 typedef struct { atomic_bool __flag; } atomic_flag; #define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(0) } static __inline _Bool atomic_flag_test_and_set_explicit(volatile atomic_flag *__object, memory_order __order) { return (atomic_exchange_explicit(&__object->__flag, 1, __order)); } static __inline void atomic_flag_clear_explicit(volatile atomic_flag *__object, memory_order __order) { atomic_store_explicit(&__object->__flag, 0, __order); } #ifndef _KERNEL static __inline _Bool atomic_flag_test_and_set(volatile atomic_flag *__object) { return (atomic_flag_test_and_set_explicit(__object, memory_order_seq_cst)); } static __inline void atomic_flag_clear(volatile atomic_flag *__object) { atomic_flag_clear_explicit(__object, memory_order_seq_cst); } #endif /* !_KERNEL */ #endif #endif /* !_STDATOMIC_H_ */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/pmatomic/stdatomic.h0000644000000000000000000003423213306562360022632 0ustar rootroot/*- * Copyright (c) 2011 Ed Schouten * David Chisnall * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: releng/10.1/sys/sys/stdatomic.h 264496 2014-04-15 09:41:52Z tijl $ */ #ifndef _STDATOMIC_H_ #define _STDATOMIC_H_ #include #include #if __has_extension(c_atomic) || __has_extension(cxx_atomic) #define __CLANG_ATOMICS #elif __GNUC_PREREQ__(4, 7) #define __GNUC_ATOMICS #elif defined(__GNUC__) #define __SYNC_ATOMICS #else #error "stdatomic.h does not support your compiler" #endif /* * 7.17.1 Atomic lock-free macros. */ #ifdef __GCC_ATOMIC_BOOL_LOCK_FREE #define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE #endif #ifdef __GCC_ATOMIC_CHAR_LOCK_FREE #define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE #endif #ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE #define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE #endif #ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE #define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE #endif #ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE #define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE #endif #ifdef __GCC_ATOMIC_SHORT_LOCK_FREE #define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE #endif #ifdef __GCC_ATOMIC_INT_LOCK_FREE #define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE #endif #ifdef __GCC_ATOMIC_LONG_LOCK_FREE #define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE #endif #ifdef __GCC_ATOMIC_LLONG_LOCK_FREE #define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE #endif #ifdef __GCC_ATOMIC_POINTER_LOCK_FREE #define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE #endif /* * 7.17.2 Initialization. */ #if defined(__CLANG_ATOMICS) #define ATOMIC_VAR_INIT(value) (value) #define atomic_init(obj, value) __c11_atomic_init(obj, value) #else #define ATOMIC_VAR_INIT(value) { .__val = (value) } #define atomic_init(obj, value) ((void)((obj)->__val = (value))) #endif /* * Clang and recent GCC both provide predefined macros for the memory * orderings. If we are using a compiler that doesn't define them, use the * clang values - these will be ignored in the fallback path. */ #ifndef __ATOMIC_RELAXED #define __ATOMIC_RELAXED 0 #endif #ifndef __ATOMIC_CONSUME #define __ATOMIC_CONSUME 1 #endif #ifndef __ATOMIC_ACQUIRE #define __ATOMIC_ACQUIRE 2 #endif #ifndef __ATOMIC_RELEASE #define __ATOMIC_RELEASE 3 #endif #ifndef __ATOMIC_ACQ_REL #define __ATOMIC_ACQ_REL 4 #endif #ifndef __ATOMIC_SEQ_CST #define __ATOMIC_SEQ_CST 5 #endif /* * 7.17.3 Order and consistency. * * The memory_order_* constants that denote the barrier behaviour of the * atomic operations. */ typedef enum { memory_order_relaxed = __ATOMIC_RELAXED, memory_order_consume = __ATOMIC_CONSUME, memory_order_acquire = __ATOMIC_ACQUIRE, memory_order_release = __ATOMIC_RELEASE, memory_order_acq_rel = __ATOMIC_ACQ_REL, memory_order_seq_cst = __ATOMIC_SEQ_CST } memory_order; /* * 7.17.4 Fences. */ static __inline void atomic_thread_fence(memory_order __order __unused) { #ifdef __CLANG_ATOMICS __c11_atomic_thread_fence(__order); #elif defined(__GNUC_ATOMICS) __atomic_thread_fence(__order); #else __sync_synchronize(); #endif } static __inline void atomic_signal_fence(memory_order __order __unused) { #ifdef __CLANG_ATOMICS __c11_atomic_signal_fence(__order); #elif defined(__GNUC_ATOMICS) __atomic_signal_fence(__order); #else __asm volatile ("" ::: "memory"); #endif } /* * 7.17.5 Lock-free property. */ #if defined(_KERNEL) /* Atomics in kernelspace are always lock-free. */ #define atomic_is_lock_free(obj) \ ((void)(obj), (_Bool)1) #elif defined(__CLANG_ATOMICS) #define atomic_is_lock_free(obj) \ __atomic_is_lock_free(sizeof(*(obj)), obj) #elif defined(__GNUC_ATOMICS) #define atomic_is_lock_free(obj) \ __atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val) #else #define atomic_is_lock_free(obj) \ ((void)(obj), sizeof((obj)->__val) <= sizeof(void *)) #endif /* * 7.17.6 Atomic integer types. */ typedef _Atomic(_Bool) atomic_bool; typedef _Atomic(char) atomic_char; typedef _Atomic(signed char) atomic_schar; typedef _Atomic(unsigned char) atomic_uchar; typedef _Atomic(short) atomic_short; typedef _Atomic(unsigned short) atomic_ushort; typedef _Atomic(int) atomic_int; typedef _Atomic(unsigned int) atomic_uint; typedef _Atomic(long) atomic_long; typedef _Atomic(unsigned long) atomic_ulong; typedef _Atomic(long long) atomic_llong; typedef _Atomic(unsigned long long) atomic_ullong; typedef _Atomic(__char16_t) atomic_char16_t; typedef _Atomic(__char32_t) atomic_char32_t; typedef _Atomic(___wchar_t) atomic_wchar_t; typedef _Atomic(__int_least8_t) atomic_int_least8_t; typedef _Atomic(__uint_least8_t) atomic_uint_least8_t; typedef _Atomic(__int_least16_t) atomic_int_least16_t; typedef _Atomic(__uint_least16_t) atomic_uint_least16_t; typedef _Atomic(__int_least32_t) atomic_int_least32_t; typedef _Atomic(__uint_least32_t) atomic_uint_least32_t; typedef _Atomic(__int_least64_t) atomic_int_least64_t; typedef _Atomic(__uint_least64_t) atomic_uint_least64_t; typedef _Atomic(__int_fast8_t) atomic_int_fast8_t; typedef _Atomic(__uint_fast8_t) atomic_uint_fast8_t; typedef _Atomic(__int_fast16_t) atomic_int_fast16_t; typedef _Atomic(__uint_fast16_t) atomic_uint_fast16_t; typedef _Atomic(__int_fast32_t) atomic_int_fast32_t; typedef _Atomic(__uint_fast32_t) atomic_uint_fast32_t; typedef _Atomic(__int_fast64_t) atomic_int_fast64_t; typedef _Atomic(__uint_fast64_t) atomic_uint_fast64_t; typedef _Atomic(__intptr_t) atomic_intptr_t; typedef _Atomic(__uintptr_t) atomic_uintptr_t; typedef _Atomic(__size_t) atomic_size_t; typedef _Atomic(__ptrdiff_t) atomic_ptrdiff_t; typedef _Atomic(__intmax_t) atomic_intmax_t; typedef _Atomic(__uintmax_t) atomic_uintmax_t; /* * 7.17.7 Operations on atomic types. */ /* * Compiler-specific operations. */ #if defined(__CLANG_ATOMICS) #define atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) \ __c11_atomic_compare_exchange_strong(object, expected, desired, \ success, failure) #define atomic_compare_exchange_weak_explicit(object, expected, \ desired, success, failure) \ __c11_atomic_compare_exchange_weak(object, expected, desired, \ success, failure) #define atomic_exchange_explicit(object, desired, order) \ __c11_atomic_exchange(object, desired, order) #define atomic_fetch_add_explicit(object, operand, order) \ __c11_atomic_fetch_add(object, operand, order) #define atomic_fetch_and_explicit(object, operand, order) \ __c11_atomic_fetch_and(object, operand, order) #define atomic_fetch_or_explicit(object, operand, order) \ __c11_atomic_fetch_or(object, operand, order) #define atomic_fetch_sub_explicit(object, operand, order) \ __c11_atomic_fetch_sub(object, operand, order) #define atomic_fetch_xor_explicit(object, operand, order) \ __c11_atomic_fetch_xor(object, operand, order) #define atomic_load_explicit(object, order) \ __c11_atomic_load(object, order) #define atomic_store_explicit(object, desired, order) \ __c11_atomic_store(object, desired, order) #elif defined(__GNUC_ATOMICS) #define atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) \ __atomic_compare_exchange_n(&(object)->__val, expected, \ desired, 0, success, failure) #define atomic_compare_exchange_weak_explicit(object, expected, \ desired, success, failure) \ __atomic_compare_exchange_n(&(object)->__val, expected, \ desired, 1, success, failure) #define atomic_exchange_explicit(object, desired, order) \ __atomic_exchange_n(&(object)->__val, desired, order) #define atomic_fetch_add_explicit(object, operand, order) \ __atomic_fetch_add(&(object)->__val, operand, order) #define atomic_fetch_and_explicit(object, operand, order) \ __atomic_fetch_and(&(object)->__val, operand, order) #define atomic_fetch_or_explicit(object, operand, order) \ __atomic_fetch_or(&(object)->__val, operand, order) #define atomic_fetch_sub_explicit(object, operand, order) \ __atomic_fetch_sub(&(object)->__val, operand, order) #define atomic_fetch_xor_explicit(object, operand, order) \ __atomic_fetch_xor(&(object)->__val, operand, order) #define atomic_load_explicit(object, order) \ __atomic_load_n(&(object)->__val, order) #define atomic_store_explicit(object, desired, order) \ __atomic_store_n(&(object)->__val, desired, order) #else #define __atomic_apply_stride(object, operand) \ (((__typeof__((object)->__val))0) + (operand)) #define atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) __extension__ ({ \ __typeof__(expected) __ep = (expected); \ __typeof__(*__ep) __e = *__ep; \ (void)(success); (void)(failure); \ (_Bool)((*__ep = __sync_val_compare_and_swap(&(object)->__val, \ __e, desired)) == __e); \ }) #define atomic_compare_exchange_weak_explicit(object, expected, \ desired, success, failure) \ atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) #if __has_builtin(__sync_swap) /* Clang provides a full-barrier atomic exchange - use it if available. */ #define atomic_exchange_explicit(object, desired, order) \ ((void)(order), __sync_swap(&(object)->__val, desired)) #else /* * __sync_lock_test_and_set() is only an acquire barrier in theory (although in * practice it is usually a full barrier) so we need an explicit barrier before * it. */ #define atomic_exchange_explicit(object, desired, order) \ __extension__ ({ \ __typeof__(object) __o = (object); \ __typeof__(desired) __d = (desired); \ (void)(order); \ __sync_synchronize(); \ __sync_lock_test_and_set(&(__o)->__val, __d); \ }) #endif #define atomic_fetch_add_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_add(&(object)->__val, \ __atomic_apply_stride(object, operand))) #define atomic_fetch_and_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_and(&(object)->__val, operand)) #define atomic_fetch_or_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_or(&(object)->__val, operand)) #define atomic_fetch_sub_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_sub(&(object)->__val, \ __atomic_apply_stride(object, operand))) #define atomic_fetch_xor_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_xor(&(object)->__val, operand)) #define atomic_load_explicit(object, order) \ ((void)(order), __sync_fetch_and_add(&(object)->__val, 0)) #define atomic_store_explicit(object, desired, order) \ ((void)atomic_exchange_explicit(object, desired, order)) #endif /* * Convenience functions. * * Don't provide these in kernel space. In kernel space, we should be * disciplined enough to always provide explicit barriers. */ #ifndef _KERNEL #define atomic_compare_exchange_strong(object, expected, desired) \ atomic_compare_exchange_strong_explicit(object, expected, \ desired, memory_order_seq_cst, memory_order_seq_cst) #define atomic_compare_exchange_weak(object, expected, desired) \ atomic_compare_exchange_weak_explicit(object, expected, \ desired, memory_order_seq_cst, memory_order_seq_cst) #define atomic_exchange(object, desired) \ atomic_exchange_explicit(object, desired, memory_order_seq_cst) #define atomic_fetch_add(object, operand) \ atomic_fetch_add_explicit(object, operand, memory_order_seq_cst) #define atomic_fetch_and(object, operand) \ atomic_fetch_and_explicit(object, operand, memory_order_seq_cst) #define atomic_fetch_or(object, operand) \ atomic_fetch_or_explicit(object, operand, memory_order_seq_cst) #define atomic_fetch_sub(object, operand) \ atomic_fetch_sub_explicit(object, operand, memory_order_seq_cst) #define atomic_fetch_xor(object, operand) \ atomic_fetch_xor_explicit(object, operand, memory_order_seq_cst) #define atomic_load(object) \ atomic_load_explicit(object, memory_order_seq_cst) #define atomic_store(object, desired) \ atomic_store_explicit(object, desired, memory_order_seq_cst) #endif /* !_KERNEL */ /* * 7.17.8 Atomic flag type and operations. * * XXX: Assume atomic_bool can be used as an atomic_flag. Is there some * kind of compiler built-in type we could use? */ typedef struct { atomic_bool __flag; } atomic_flag; #define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(0) } static __inline _Bool atomic_flag_test_and_set_explicit(volatile atomic_flag *__object, memory_order __order) { return (atomic_exchange_explicit(&__object->__flag, 1, __order)); } static __inline void atomic_flag_clear_explicit(volatile atomic_flag *__object, memory_order __order) { atomic_store_explicit(&__object->__flag, 0, __order); } #ifndef _KERNEL static __inline _Bool atomic_flag_test_and_set(volatile atomic_flag *__object) { return (atomic_flag_test_and_set_explicit(__object, memory_order_seq_cst)); } static __inline void atomic_flag_clear(volatile atomic_flag *__object) { atomic_flag_clear_explicit(__object, memory_order_seq_cst); } #endif /* !_KERNEL */ #endif /* !_STDATOMIC_H_ */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/pmatomic/add-pm-prefix-filt.py0000644000000000000000000000356113306562360024436 0ustar rootroot#! /usr/bin/env python # Append 'pm_' prefix (used to generate pmatomic.h from pmatomic.h.proto) subst=r""" _Bool bool &\(object\)->__val object \(object\)->__val *(object) &\(__o\)->__val __o __CLANG_ATOMICS __PM_CLANG_ATOMICS __GNUC_ATOMICS __PM_GNUC_ATOMICS __SYNC_ATOMICS __PM_SYNC_ATOMICS __atomic_apply_stride __pm_atomic_apply_stride atomic_compare_exchange_strong pm_atomic_compare_exchange_strong atomic_compare_exchange_strong_explicit pm_atomic_compare_exchange_strong_explicit atomic_compare_exchange_weak pm_atomic_compare_exchange_weak atomic_compare_exchange_weak_explicit pm_atomic_compare_exchange_weak_explicit atomic_exchange pm_atomic_exchange atomic_exchange_explicit pm_atomic_exchange_explicit atomic_fetch_add pm_atomic_fetch_add atomic_fetch_add_explicit pm_atomic_fetch_add_explicit atomic_fetch_and pm_atomic_fetch_and atomic_fetch_and_explicit pm_atomic_fetch_and_explicit atomic_fetch_or pm_atomic_fetch_or atomic_fetch_or_explicit pm_atomic_fetch_or_explicit atomic_fetch_sub pm_atomic_fetch_sub atomic_fetch_sub_explicit pm_atomic_fetch_sub_explicit atomic_fetch_xor pm_atomic_fetch_xor atomic_fetch_xor_explicit pm_atomic_fetch_xor_explicit atomic_load pm_atomic_load atomic_load_explicit pm_atomic_load_explicit atomic_signal_fence pm_atomic_signal_fence atomic_store pm_atomic_store atomic_store_explicit pm_atomic_store_explicit atomic_thread_fence pm_atomic_thread_fence memory_order pm_memory_order memory_order_acq_rel pm_memory_order_acq_rel memory_order_acquire pm_memory_order_acquire memory_order_consume pm_memory_order_consume memory_order_relaxed pm_memory_order_relaxed memory_order_release pm_memory_order_release memory_order_seq_cst pm_memory_order_seq_cst""" import sys import re data = sys.stdin.read() for pattern, repl in (ln.split(' ',2) for ln in subst.splitlines() if ln): data = re.sub(r'(?<=\W)'+pattern, repl, data) sys.stdout.write(data) tarantool_1.9.1.26.g63eb81e3c/src/lib/small/third_party/0000755000000000000000000000000013306562360021206 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/small/third_party/pmatomic.h0000644000000000000000000004043413306562360023175 0ustar rootroot/*- * pmatomic.h - Poor Man's atomics * * Borrowed from FreeBSD (original copyright follows). * * Standard atomic facilities in stdatomic.h are great, unless you are * stuck with an old compiler, or you attempt to compile code using * stdatomic.h in C++ mode [gcc 4.9], or if you were desperate enough to * enable OpenMP in C mode [gcc 4.9]. * * There are several discrepancies between gcc and clang, namely clang * refuses to apply atomic operations to non-atomic types while gcc is * more tolerant. * * For these reasons we provide a custom implementation of operations on * atomic types: * * A. same names/semantics as in stdatomic.h; * B. all names prefixed with 'pm_' to avoid name collisions; * C. applicable to non-atomic types. * * Ex: * int i; * pm_atomic_fetch_add_explicit(&i, 1, pm_memory_order_relaxed); * * Note: do NOT use _Atomic keyword (see gcc issues above). */ /*- * Migration strategy * * Switching to will be relatively easy. A * straightforward text replace on the codebase removes 'pm_' prefix * in names. Compiling with clang reveals missing _Atomic qualifiers. */ /*- * Logistics * * In order to make it possible to merge with the updated upstream we * restrict modifications in this file to the bare minimum. For this * reason we comment unused code regions with #if 0 instead of removing * them. * * Renames are carried out by a script generating the final header. */ /*- * Copyright (c) 2011 Ed Schouten * David Chisnall * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: releng/10.1/sys/sys/stdatomic.h 264496 2014-04-15 09:41:52Z tijl $ */ #ifndef PMATOMIC_H__ #define PMATOMIC_H__ /* Compiler-fu */ #if !defined(__has_feature) #define __has_feature(x) 0 #endif #if !defined(__has_builtin) #define __has_builtin(x) __has_feature(x) #endif #if !defined(__GNUC_PREREQ__) #if defined(__GNUC__) && defined(__GNUC_MINOR__) #define __GNUC_PREREQ__(maj, min) \ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) #else #define __GNUC_PREREQ__(maj, min) 0 #endif #endif #include #include #include /* * Removed __PM_CLANG_ATOMICS clause, this is because * 1) clang understands gcc intrinsics as well; * 2) clang intrinsics require _Atomic quialified types while gcc ones * don't. */ #if __GNUC_PREREQ__(4, 7) #define __PM_GNUC_ATOMICS #elif defined(__GNUC__) #define __PM_SYNC_ATOMICS #else #error "pmatomic.h does not support your compiler" #endif /* * 7.17.1 Atomic lock-free macros. */ #if 0 #ifdef __GCC_ATOMIC_BOOL_LOCK_FREE #define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE #endif #ifdef __GCC_ATOMIC_CHAR_LOCK_FREE #define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE #endif #ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE #define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE #endif #ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE #define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE #endif #ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE #define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE #endif #ifdef __GCC_ATOMIC_SHORT_LOCK_FREE #define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE #endif #ifdef __GCC_ATOMIC_INT_LOCK_FREE #define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE #endif #ifdef __GCC_ATOMIC_LONG_LOCK_FREE #define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE #endif #ifdef __GCC_ATOMIC_LLONG_LOCK_FREE #define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE #endif #ifdef __GCC_ATOMIC_POINTER_LOCK_FREE #define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE #endif #endif /* * 7.17.2 Initialization. */ #if 0 #if defined(__PM_CLANG_ATOMICS) #define ATOMIC_VAR_INIT(value) (value) #define atomic_init(obj, value) __c11_atomic_init(obj, value) #else #define ATOMIC_VAR_INIT(value) { .__val = (value) } #define atomic_init(obj, value) ((void)((obj)->__val = (value))) #endif #endif /* * Clang and recent GCC both provide predefined macros for the memory * orderings. If we are using a compiler that doesn't define them, use the * clang values - these will be ignored in the fallback path. */ #ifndef __ATOMIC_RELAXED #define __ATOMIC_RELAXED 0 #endif #ifndef __ATOMIC_CONSUME #define __ATOMIC_CONSUME 1 #endif #ifndef __ATOMIC_ACQUIRE #define __ATOMIC_ACQUIRE 2 #endif #ifndef __ATOMIC_RELEASE #define __ATOMIC_RELEASE 3 #endif #ifndef __ATOMIC_ACQ_REL #define __ATOMIC_ACQ_REL 4 #endif #ifndef __ATOMIC_SEQ_CST #define __ATOMIC_SEQ_CST 5 #endif /* * 7.17.3 Order and consistency. * * The pm_memory_order_* constants that denote the barrier behaviour of the * atomic operations. */ typedef enum { pm_memory_order_relaxed = __ATOMIC_RELAXED, pm_memory_order_consume = __ATOMIC_CONSUME, pm_memory_order_acquire = __ATOMIC_ACQUIRE, pm_memory_order_release = __ATOMIC_RELEASE, pm_memory_order_acq_rel = __ATOMIC_ACQ_REL, pm_memory_order_seq_cst = __ATOMIC_SEQ_CST } pm_memory_order; /* * 7.17.4 Fences. */ static __inline void pm_atomic_thread_fence(pm_memory_order __order __attribute__((__unused__))) { #ifdef __PM_CLANG_ATOMICS __c11_atomic_thread_fence(__order); #elif defined(__PM_GNUC_ATOMICS) __atomic_thread_fence(__order); #else __sync_synchronize(); #endif } static __inline void pm_atomic_signal_fence(pm_memory_order __order __attribute__((__unused__))) { #ifdef __PM_CLANG_ATOMICS __c11_atomic_signal_fence(__order); #elif defined(__PM_GNUC_ATOMICS) __atomic_signal_fence(__order); #else __asm volatile ("" ::: "memory"); #endif } /* * 7.17.5 Lock-free property. */ #if 0 #if defined(_KERNEL) /* Atomics in kernelspace are always lock-free. */ #define atomic_is_lock_free(obj) \ ((void)(obj), (bool)1) #elif defined(__PM_CLANG_ATOMICS) #define atomic_is_lock_free(obj) \ __atomic_is_lock_free(sizeof(*(obj)), obj) #elif defined(__PM_GNUC_ATOMICS) #define atomic_is_lock_free(obj) \ __atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val) #else #define atomic_is_lock_free(obj) \ ((void)(obj), sizeof((obj)->__val) <= sizeof(void *)) #endif #endif /* * 7.17.6 Atomic integer types. */ #if 0 typedef _Atomic(bool) atomic_bool; typedef _Atomic(char) atomic_char; typedef _Atomic(signed char) atomic_schar; typedef _Atomic(unsigned char) atomic_uchar; typedef _Atomic(short) atomic_short; typedef _Atomic(unsigned short) atomic_ushort; typedef _Atomic(int) atomic_int; typedef _Atomic(unsigned int) atomic_uint; typedef _Atomic(long) atomic_long; typedef _Atomic(unsigned long) atomic_ulong; typedef _Atomic(long long) atomic_llong; typedef _Atomic(unsigned long long) atomic_ullong; typedef _Atomic(__char16_t) atomic_char16_t; typedef _Atomic(__char32_t) atomic_char32_t; typedef _Atomic(___wchar_t) atomic_wchar_t; typedef _Atomic(__int_least8_t) atomic_int_least8_t; typedef _Atomic(__uint_least8_t) atomic_uint_least8_t; typedef _Atomic(__int_least16_t) atomic_int_least16_t; typedef _Atomic(__uint_least16_t) atomic_uint_least16_t; typedef _Atomic(__int_least32_t) atomic_int_least32_t; typedef _Atomic(__uint_least32_t) atomic_uint_least32_t; typedef _Atomic(__int_least64_t) atomic_int_least64_t; typedef _Atomic(__uint_least64_t) atomic_uint_least64_t; typedef _Atomic(__int_fast8_t) atomic_int_fast8_t; typedef _Atomic(__uint_fast8_t) atomic_uint_fast8_t; typedef _Atomic(__int_fast16_t) atomic_int_fast16_t; typedef _Atomic(__uint_fast16_t) atomic_uint_fast16_t; typedef _Atomic(__int_fast32_t) atomic_int_fast32_t; typedef _Atomic(__uint_fast32_t) atomic_uint_fast32_t; typedef _Atomic(__int_fast64_t) atomic_int_fast64_t; typedef _Atomic(__uint_fast64_t) atomic_uint_fast64_t; typedef _Atomic(__intptr_t) atomic_intptr_t; typedef _Atomic(__uintptr_t) atomic_uintptr_t; typedef _Atomic(__size_t) atomic_size_t; typedef _Atomic(__ptrdiff_t) atomic_ptrdiff_t; typedef _Atomic(__intmax_t) atomic_intmax_t; typedef _Atomic(__uintmax_t) atomic_uintmax_t; #endif /* * 7.17.7 Operations on atomic types. */ /* * Compiler-specific operations. */ #if defined(__PM_CLANG_ATOMICS) #define pm_atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) \ __c11_atomic_compare_exchange_strong(object, expected, desired, \ success, failure) #define pm_atomic_compare_exchange_weak_explicit(object, expected, \ desired, success, failure) \ __c11_atomic_compare_exchange_weak(object, expected, desired, \ success, failure) #define pm_atomic_exchange_explicit(object, desired, order) \ __c11_atomic_exchange(object, desired, order) #define pm_atomic_fetch_add_explicit(object, operand, order) \ __c11_atomic_fetch_add(object, operand, order) #define pm_atomic_fetch_and_explicit(object, operand, order) \ __c11_atomic_fetch_and(object, operand, order) #define pm_atomic_fetch_or_explicit(object, operand, order) \ __c11_atomic_fetch_or(object, operand, order) #define pm_atomic_fetch_sub_explicit(object, operand, order) \ __c11_atomic_fetch_sub(object, operand, order) #define pm_atomic_fetch_xor_explicit(object, operand, order) \ __c11_atomic_fetch_xor(object, operand, order) #define pm_atomic_load_explicit(object, order) \ __c11_atomic_load(object, order) #define pm_atomic_store_explicit(object, desired, order) \ __c11_atomic_store(object, desired, order) #elif defined(__PM_GNUC_ATOMICS) #define pm_atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) \ __atomic_compare_exchange_n(object, expected, \ desired, 0, success, failure) #define pm_atomic_compare_exchange_weak_explicit(object, expected, \ desired, success, failure) \ __atomic_compare_exchange_n(object, expected, \ desired, 1, success, failure) #define pm_atomic_exchange_explicit(object, desired, order) \ __atomic_exchange_n(object, desired, order) #define pm_atomic_fetch_add_explicit(object, operand, order) \ __atomic_fetch_add(object, operand, order) #define pm_atomic_fetch_and_explicit(object, operand, order) \ __atomic_fetch_and(object, operand, order) #define pm_atomic_fetch_or_explicit(object, operand, order) \ __atomic_fetch_or(object, operand, order) #define pm_atomic_fetch_sub_explicit(object, operand, order) \ __atomic_fetch_sub(object, operand, order) #define pm_atomic_fetch_xor_explicit(object, operand, order) \ __atomic_fetch_xor(object, operand, order) #define pm_atomic_load_explicit(object, order) \ __atomic_load_n(object, order) #define pm_atomic_store_explicit(object, desired, order) \ __atomic_store_n(object, desired, order) #else #define __pm_atomic_apply_stride(object, operand) \ (((__typeof__(*(object)))0) + (operand)) #define pm_atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) __extension__ ({ \ __typeof__(expected) __ep = (expected); \ __typeof__(*__ep) __e = *__ep; \ (void)(success); (void)(failure); \ (bool)((*__ep = __sync_val_compare_and_swap(object, \ __e, desired)) == __e); \ }) #define pm_atomic_compare_exchange_weak_explicit(object, expected, \ desired, success, failure) \ pm_atomic_compare_exchange_strong_explicit(object, expected, \ desired, success, failure) #if __has_builtin(__sync_swap) /* Clang provides a full-barrier atomic exchange - use it if available. */ #define pm_atomic_exchange_explicit(object, desired, order) \ ((void)(order), __sync_swap(object, desired)) #else /* * __sync_lock_test_and_set() is only an acquire barrier in theory (although in * practice it is usually a full barrier) so we need an explicit barrier before * it. */ #define pm_atomic_exchange_explicit(object, desired, order) \ __extension__ ({ \ __typeof__(object) __o = (object); \ __typeof__(desired) __d = (desired); \ (void)(order); \ __sync_synchronize(); \ __sync_lock_test_and_set(__o, __d); \ }) #endif #define pm_atomic_fetch_add_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_add(object, \ __pm_atomic_apply_stride(object, operand))) #define pm_atomic_fetch_and_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_and(object, operand)) #define pm_atomic_fetch_or_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_or(object, operand)) #define pm_atomic_fetch_sub_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_sub(object, \ __pm_atomic_apply_stride(object, operand))) #define pm_atomic_fetch_xor_explicit(object, operand, order) \ ((void)(order), __sync_fetch_and_xor(object, operand)) #define pm_atomic_load_explicit(object, order) \ ((void)(order), __sync_fetch_and_add(object, 0)) #define pm_atomic_store_explicit(object, desired, order) \ ((void)pm_atomic_exchange_explicit(object, desired, order)) #endif /* * Convenience functions. * * Don't provide these in kernel space. In kernel space, we should be * disciplined enough to always provide explicit barriers. */ #ifndef _KERNEL #define pm_atomic_compare_exchange_strong(object, expected, desired) \ pm_atomic_compare_exchange_strong_explicit(object, expected, \ desired, pm_memory_order_seq_cst, pm_memory_order_seq_cst) #define pm_atomic_compare_exchange_weak(object, expected, desired) \ pm_atomic_compare_exchange_weak_explicit(object, expected, \ desired, pm_memory_order_seq_cst, pm_memory_order_seq_cst) #define pm_atomic_exchange(object, desired) \ pm_atomic_exchange_explicit(object, desired, pm_memory_order_seq_cst) #define pm_atomic_fetch_add(object, operand) \ pm_atomic_fetch_add_explicit(object, operand, pm_memory_order_seq_cst) #define pm_atomic_fetch_and(object, operand) \ pm_atomic_fetch_and_explicit(object, operand, pm_memory_order_seq_cst) #define pm_atomic_fetch_or(object, operand) \ pm_atomic_fetch_or_explicit(object, operand, pm_memory_order_seq_cst) #define pm_atomic_fetch_sub(object, operand) \ pm_atomic_fetch_sub_explicit(object, operand, pm_memory_order_seq_cst) #define pm_atomic_fetch_xor(object, operand) \ pm_atomic_fetch_xor_explicit(object, operand, pm_memory_order_seq_cst) #define pm_atomic_load(object) \ pm_atomic_load_explicit(object, pm_memory_order_seq_cst) #define pm_atomic_store(object, desired) \ pm_atomic_store_explicit(object, desired, pm_memory_order_seq_cst) #endif /* !_KERNEL */ /* * 7.17.8 Atomic flag type and operations. * * XXX: Assume atomic_bool can be used as an atomic_flag. Is there some * kind of compiler built-in type we could use? */ #if 0 typedef struct { atomic_bool __flag; } atomic_flag; #define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(0) } static __inline bool atomic_flag_test_and_set_explicit(volatile atomic_flag *__object, pm_memory_order __order) { return (pm_atomic_exchange_explicit(&__object->__flag, 1, __order)); } static __inline void atomic_flag_clear_explicit(volatile atomic_flag *__object, pm_memory_order __order) { pm_atomic_store_explicit(&__object->__flag, 0, __order); } #ifndef _KERNEL static __inline bool atomic_flag_test_and_set(volatile atomic_flag *__object) { return (atomic_flag_test_and_set_explicit(__object, pm_memory_order_seq_cst)); } static __inline void atomic_flag_clear(volatile atomic_flag *__object) { atomic_flag_clear_explicit(__object, pm_memory_order_seq_cst); } #endif /* !_KERNEL */ #endif #endif /* !_STDATOMIC_H_ */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/third_party/valgrind/0000755000000000000000000000000013306562360023014 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/small/third_party/valgrind/memcheck.h0000644000000000000000000003435213306562360024750 0ustar rootroot /* ---------------------------------------------------------------- Notice that the following BSD-style license applies to this one file (memcheck.h) only. The rest of Valgrind is licensed under the terms of the GNU General Public License, version 2, unless otherwise indicated. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- This file is part of MemCheck, a heavyweight Valgrind tool for detecting memory errors. Copyright (C) 2000-2015 Julian Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------- Notice that the above BSD-style license applies to this one file (memcheck.h) only. The entire rest of Valgrind is licensed under the terms of the GNU General Public License, version 2. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- */ #ifndef __MEMCHECK_H #define __MEMCHECK_H /* This file is for inclusion into client (your!) code. You can use these macros to manipulate and query memory permissions inside your own programs. See comment near the top of valgrind.h on how to use them. */ #include "valgrind.h" /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! This enum comprises an ABI exported by Valgrind to programs which use client requests. DO NOT CHANGE THE ORDER OF THESE ENTRIES, NOR DELETE ANY -- add new ones at the end. */ typedef enum { VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'), VG_USERREQ__MAKE_MEM_UNDEFINED, VG_USERREQ__MAKE_MEM_DEFINED, VG_USERREQ__DISCARD, VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, VG_USERREQ__CHECK_MEM_IS_DEFINED, VG_USERREQ__DO_LEAK_CHECK, VG_USERREQ__COUNT_LEAKS, VG_USERREQ__GET_VBITS, VG_USERREQ__SET_VBITS, VG_USERREQ__CREATE_BLOCK, VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */ VG_USERREQ__COUNT_LEAK_BLOCKS, VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, /* This is just for memcheck's internal use - don't use it */ _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR = VG_USERREQ_TOOL_BASE('M','C') + 256 } Vg_MemCheckClientRequest; /* Client-code macros to manipulate the state of memory. */ /* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */ #define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__MAKE_MEM_NOACCESS, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Similarly, mark memory at _qzz_addr as addressable but undefined for _qzz_len bytes. */ #define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__MAKE_MEM_UNDEFINED, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Similarly, mark memory at _qzz_addr as addressable and defined for _qzz_len bytes. */ #define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__MAKE_MEM_DEFINED, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is not altered: bytes which are addressable are marked as defined, but those which are not addressable are left unchanged. */ #define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Create a block-description handle. The description is an ascii string which is included in any messages pertaining to addresses within the specified memory range. Has no other effect on the properties of the memory range. */ #define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CREATE_BLOCK, \ (_qzz_addr), (_qzz_len), (_qzz_desc), \ 0, 0) /* Discard a block-description-handle. Returns 1 for an invalid handle, 0 for a valid handle. */ #define VALGRIND_DISCARD(_qzz_blkindex) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__DISCARD, \ 0, (_qzz_blkindex), 0, 0, 0) /* Client-code macros to check the state of memory. */ /* Check that memory at _qzz_addr is addressable for _qzz_len bytes. If suitable addressibility is not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Check that memory at _qzz_addr is addressable and defined for _qzz_len bytes. If suitable addressibility and definedness are not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ #define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__CHECK_MEM_IS_DEFINED, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Use this macro to force the definedness and addressibility of an lvalue to be checked. If suitable addressibility and definedness are not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ #define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \ VALGRIND_CHECK_MEM_IS_DEFINED( \ (volatile unsigned char *)&(__lvalue), \ (unsigned long)(sizeof (__lvalue))) /* Do a full memory leak check (like --leak-check=full) mid-execution. */ #define VALGRIND_DO_LEAK_CHECK \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ 0, 0, 0, 0, 0) /* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for which there was an increase in leaked bytes or leaked nr of blocks since the previous leak search. */ #define VALGRIND_DO_ADDED_LEAK_CHECK \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ 0, 1, 0, 0, 0) /* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with increased or decreased leaked bytes/blocks since previous leak search. */ #define VALGRIND_DO_CHANGED_LEAK_CHECK \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ 0, 2, 0, 0, 0) /* Do a summary memory leak check (like --leak-check=summary) mid-execution. */ #define VALGRIND_DO_QUICK_LEAK_CHECK \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ 1, 0, 0, 0, 0) /* Return number of leaked, dubious, reachable and suppressed bytes found by all previous leak checks. They must be lvalues. */ #define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \ /* For safety on 64-bit platforms we assign the results to private unsigned long variables, then assign these to the lvalues the user specified, which works no matter what type 'leaked', 'dubious', etc are. We also initialise '_qzz_leaked', etc because VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as defined. */ \ { \ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \ VALGRIND_DO_CLIENT_REQUEST_STMT( \ VG_USERREQ__COUNT_LEAKS, \ &_qzz_leaked, &_qzz_dubious, \ &_qzz_reachable, &_qzz_suppressed, 0); \ leaked = _qzz_leaked; \ dubious = _qzz_dubious; \ reachable = _qzz_reachable; \ suppressed = _qzz_suppressed; \ } /* Return number of leaked, dubious, reachable and suppressed bytes found by all previous leak checks. They must be lvalues. */ #define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \ /* For safety on 64-bit platforms we assign the results to private unsigned long variables, then assign these to the lvalues the user specified, which works no matter what type 'leaked', 'dubious', etc are. We also initialise '_qzz_leaked', etc because VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as defined. */ \ { \ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \ VALGRIND_DO_CLIENT_REQUEST_STMT( \ VG_USERREQ__COUNT_LEAK_BLOCKS, \ &_qzz_leaked, &_qzz_dubious, \ &_qzz_reachable, &_qzz_suppressed, 0); \ leaked = _qzz_leaked; \ dubious = _qzz_dubious; \ reachable = _qzz_reachable; \ suppressed = _qzz_suppressed; \ } /* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it into the provided zzvbits array. Return values: 0 if not running on valgrind 1 success 2 [previously indicated unaligned arrays; these are now allowed] 3 if any parts of zzsrc/zzvbits are not addressable. The metadata is not copied in cases 0, 2 or 3 so it should be impossible to segfault your system by using this call. */ #define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__GET_VBITS, \ (const char*)(zza), \ (char*)(zzvbits), \ (zznbytes), 0, 0) /* Set the validity data for addresses [zza..zza+zznbytes-1], copying it from the provided zzvbits array. Return values: 0 if not running on valgrind 1 success 2 [previously indicated unaligned arrays; these are now allowed] 3 if any parts of zza/zzvbits are not addressable. The metadata is not copied in cases 0, 2 or 3 so it should be impossible to segfault your system by using this call. */ #define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__SET_VBITS, \ (const char*)(zza), \ (const char*)(zzvbits), \ (zznbytes), 0, 0 ) /* Disable and re-enable reporting of addressing errors in the specified address range. */ #define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \ (_qzz_addr), (_qzz_len), 0, 0, 0) #define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \ (_qzz_addr), (_qzz_len), 0, 0, 0) #endif tarantool_1.9.1.26.g63eb81e3c/src/lib/small/third_party/valgrind/valgrind.h0000644000000000000000000147117013306562360025006 0ustar rootroot/* -*- c -*- ---------------------------------------------------------------- Notice that the following BSD-style license applies to this one file (valgrind.h) only. The rest of Valgrind is licensed under the terms of the GNU General Public License, version 2, unless otherwise indicated. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- This file is part of Valgrind, a dynamic binary instrumentation framework. Copyright (C) 2000-2015 Julian Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------- Notice that the above BSD-style license applies to this one file (valgrind.h) only. The entire rest of Valgrind is licensed under the terms of the GNU General Public License, version 2. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- */ /* This file is for inclusion into client (your!) code. You can use these macros to manipulate and query Valgrind's execution inside your own programs. The resulting executables will still run without Valgrind, just a little bit more slowly than they otherwise would, but otherwise unchanged. When not running on valgrind, each client request consumes very few (eg. 7) instructions, so the resulting performance loss is negligible unless you plan to execute client requests millions of times per second. Nevertheless, if that is still a problem, you can compile with the NVALGRIND symbol defined (gcc -DNVALGRIND) so that client requests are not even compiled in. */ #ifndef __VALGRIND_H #define __VALGRIND_H /* ------------------------------------------------------------------ */ /* VERSION NUMBER OF VALGRIND */ /* ------------------------------------------------------------------ */ /* Specify Valgrind's version number, so that user code can conditionally compile based on our version number. Note that these were introduced at version 3.6 and so do not exist in version 3.5 or earlier. The recommended way to use them to check for "version X.Y or later" is (eg) #if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \ && (__VALGRIND_MAJOR__ > 3 \ || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6)) */ #define __VALGRIND_MAJOR__ 3 #define __VALGRIND_MINOR__ 12 #include /* Nb: this file might be included in a file compiled with -ansi. So we can't use C++ style "//" comments nor the "asm" keyword (instead use "__asm__"). */ /* Derive some tags indicating what the target platform is. Note that in this file we're using the compiler's CPP symbols for identifying architectures, which are different to the ones we use within the rest of Valgrind. Note, __powerpc__ is active for both 32 and 64-bit PPC, whereas __powerpc64__ is only active for the latter (on Linux, that is). Misc note: how to find out what's predefined in gcc by default: gcc -Wp,-dM somefile.c */ #undef PLAT_x86_darwin #undef PLAT_amd64_darwin #undef PLAT_x86_win32 #undef PLAT_amd64_win64 #undef PLAT_x86_linux #undef PLAT_amd64_linux #undef PLAT_ppc32_linux #undef PLAT_ppc64be_linux #undef PLAT_ppc64le_linux #undef PLAT_arm_linux #undef PLAT_arm64_linux #undef PLAT_s390x_linux #undef PLAT_mips32_linux #undef PLAT_mips64_linux #undef PLAT_tilegx_linux #undef PLAT_x86_solaris #undef PLAT_amd64_solaris #if defined(__APPLE__) && defined(__i386__) # define PLAT_x86_darwin 1 #elif defined(__APPLE__) && defined(__x86_64__) # define PLAT_amd64_darwin 1 #elif (defined(__MINGW32__) && !defined(__MINGW64__)) \ || defined(__CYGWIN32__) \ || (defined(_WIN32) && defined(_M_IX86)) # define PLAT_x86_win32 1 #elif defined(__MINGW64__) \ || (defined(_WIN64) && defined(_M_X64)) # define PLAT_amd64_win64 1 #elif defined(__linux__) && defined(__i386__) # define PLAT_x86_linux 1 #elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__) # define PLAT_amd64_linux 1 #elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__) # define PLAT_ppc32_linux 1 #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2 /* Big Endian uses ELF version 1 */ # define PLAT_ppc64be_linux 1 #elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2 /* Little Endian uses ELF version 2 */ # define PLAT_ppc64le_linux 1 #elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__) # define PLAT_arm_linux 1 #elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__) # define PLAT_arm64_linux 1 #elif defined(__linux__) && defined(__s390__) && defined(__s390x__) # define PLAT_s390x_linux 1 #elif defined(__linux__) && defined(__mips__) && (__mips==64) # define PLAT_mips64_linux 1 #elif defined(__linux__) && defined(__mips__) && (__mips!=64) # define PLAT_mips32_linux 1 #elif defined(__linux__) && defined(__tilegx__) # define PLAT_tilegx_linux 1 #elif defined(__sun) && defined(__i386__) # define PLAT_x86_solaris 1 #elif defined(__sun) && defined(__x86_64__) # define PLAT_amd64_solaris 1 #else /* If we're not compiling for our target platform, don't generate any inline asms. */ # if !defined(NVALGRIND) # define NVALGRIND 1 # endif #endif /* ------------------------------------------------------------------ */ /* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */ /* in here of use to end-users -- skip to the next section. */ /* ------------------------------------------------------------------ */ /* * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client * request. Accepts both pointers and integers as arguments. * * VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind * client request that does not return a value. * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind * client request and whose value equals the client request result. Accepts * both pointers and integers as arguments. Note that such calls are not * necessarily pure functions -- they may have side effects. */ #define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \ _zzq_request, _zzq_arg1, _zzq_arg2, \ _zzq_arg3, _zzq_arg4, _zzq_arg5) \ do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \ (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) #define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \ _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ (_zzq_request), (_zzq_arg1), (_zzq_arg2), \ (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0) #if defined(NVALGRIND) /* Define NVALGRIND to completely remove the Valgrind magic sequence from the compiled code (analogous to NDEBUG's effects on assert()) */ #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ ({(void) _zzq_default; _zzq_default; }) #else /* ! NVALGRIND */ /* The following defines the magic code sequences which the JITter spots and handles magically. Don't look too closely at them as they will rot your brain. The assembly code sequences for all architectures is in this one file. This is because this file must be stand-alone, and we don't want to have multiple files. For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default value gets put in the return slot, so that everything works when this is executed not under Valgrind. Args are passed in a memory block, and so there's no intrinsic limit to the number that could be passed, but it's currently five. The macro args are: _zzq_rlval result lvalue _zzq_default default value (result returned when running on real CPU) _zzq_request request code _zzq_arg1..5 request params The other two macros are used to support function wrapping, and are a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the guest's NRADDR pseudo-register and whatever other information is needed to safely run the call original from the wrapper: on ppc64-linux, the R2 value at the divert point is also needed. This information is abstracted into a user-visible type, OrigFn. VALGRIND_CALL_NOREDIR_* behaves the same as the following on the guest, but guarantees that the branch instruction will not be redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64: branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a complete inline asm, since it needs to be combined with more magic inline asm stuff to be useful. */ /* ----------------- x86-{linux,darwin,solaris} ---------------- */ #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ || (defined(PLAT_x86_win32) && defined(__GNUC__)) \ || defined(PLAT_x86_solaris) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "roll $3, %%edi ; roll $13, %%edi\n\t" \ "roll $29, %%edi ; roll $19, %%edi\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({volatile unsigned int _zzq_args[6]; \ volatile unsigned int _zzq_result; \ _zzq_args[0] = (unsigned int)(_zzq_request); \ _zzq_args[1] = (unsigned int)(_zzq_arg1); \ _zzq_args[2] = (unsigned int)(_zzq_arg2); \ _zzq_args[3] = (unsigned int)(_zzq_arg3); \ _zzq_args[4] = (unsigned int)(_zzq_arg4); \ _zzq_args[5] = (unsigned int)(_zzq_arg5); \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %EDX = client_request ( %EAX ) */ \ "xchgl %%ebx,%%ebx" \ : "=d" (_zzq_result) \ : "a" (&_zzq_args[0]), "0" (_zzq_default) \ : "cc", "memory" \ ); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %EAX = guest_NRADDR */ \ "xchgl %%ecx,%%ecx" \ : "=a" (__addr) \ : \ : "cc", "memory" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_EAX \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* call-noredir *%EAX */ \ "xchgl %%edx,%%edx\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "xchgl %%edi,%%edi\n\t" \ : : : "cc", "memory" \ ); \ } while (0) #endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) || PLAT_x86_solaris */ /* ------------------------- x86-Win32 ------------------------- */ #if defined(PLAT_x86_win32) && !defined(__GNUC__) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; #if defined(_MSC_VER) #define __SPECIAL_INSTRUCTION_PREAMBLE \ __asm rol edi, 3 __asm rol edi, 13 \ __asm rol edi, 29 __asm rol edi, 19 #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \ (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \ (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \ (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5)) static __inline uintptr_t valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request, uintptr_t _zzq_arg1, uintptr_t _zzq_arg2, uintptr_t _zzq_arg3, uintptr_t _zzq_arg4, uintptr_t _zzq_arg5) { volatile uintptr_t _zzq_args[6]; volatile unsigned int _zzq_result; _zzq_args[0] = (uintptr_t)(_zzq_request); _zzq_args[1] = (uintptr_t)(_zzq_arg1); _zzq_args[2] = (uintptr_t)(_zzq_arg2); _zzq_args[3] = (uintptr_t)(_zzq_arg3); _zzq_args[4] = (uintptr_t)(_zzq_arg4); _zzq_args[5] = (uintptr_t)(_zzq_arg5); __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default __SPECIAL_INSTRUCTION_PREAMBLE /* %EDX = client_request ( %EAX ) */ __asm xchg ebx,ebx __asm mov _zzq_result, edx } return _zzq_result; } #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned int __addr; \ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ /* %EAX = guest_NRADDR */ \ __asm xchg ecx,ecx \ __asm mov __addr, eax \ } \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_EAX ERROR #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \ __asm xchg edi,edi \ } \ } while (0) #else #error Unsupported compiler. #endif #endif /* PLAT_x86_win32 */ /* ----------------- amd64-{linux,darwin,solaris} --------------- */ #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ || defined(PLAT_amd64_solaris) \ || (defined(PLAT_amd64_win64) && defined(__GNUC__)) typedef struct { unsigned long int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \ "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({ volatile unsigned long int _zzq_args[6]; \ volatile unsigned long int _zzq_result; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %RDX = client_request ( %RAX ) */ \ "xchgq %%rbx,%%rbx" \ : "=d" (_zzq_result) \ : "a" (&_zzq_args[0]), "0" (_zzq_default) \ : "cc", "memory" \ ); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %RAX = guest_NRADDR */ \ "xchgq %%rcx,%%rcx" \ : "=a" (__addr) \ : \ : "cc", "memory" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_RAX \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* call-noredir *%RAX */ \ "xchgq %%rdx,%%rdx\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "xchgq %%rdi,%%rdi\n\t" \ : : : "cc", "memory" \ ); \ } while (0) #endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ /* ------------------------- amd64-Win64 ------------------------- */ #if defined(PLAT_amd64_win64) && !defined(__GNUC__) #error Unsupported compiler. #endif /* PLAT_amd64_win64 */ /* ------------------------ ppc32-linux ------------------------ */ #if defined(PLAT_ppc32_linux) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \ "rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({ unsigned int _zzq_args[6]; \ unsigned int _zzq_result; \ unsigned int* _zzq_ptr; \ _zzq_args[0] = (unsigned int)(_zzq_request); \ _zzq_args[1] = (unsigned int)(_zzq_arg1); \ _zzq_args[2] = (unsigned int)(_zzq_arg2); \ _zzq_args[3] = (unsigned int)(_zzq_arg3); \ _zzq_args[4] = (unsigned int)(_zzq_arg4); \ _zzq_args[5] = (unsigned int)(_zzq_arg5); \ _zzq_ptr = _zzq_args; \ __asm__ volatile("mr 3,%1\n\t" /*default*/ \ "mr 4,%2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = client_request ( %R4 ) */ \ "or 1,1,1\n\t" \ "mr %0,3" /*result*/ \ : "=b" (_zzq_result) \ : "b" (_zzq_default), "b" (_zzq_ptr) \ : "cc", "memory", "r3", "r4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR */ \ "or 2,2,2\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir *%R11 */ \ "or 3,3,3\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or 5,5,5\n\t" \ ); \ } while (0) #endif /* PLAT_ppc32_linux */ /* ------------------------ ppc64-linux ------------------------ */ #if defined(PLAT_ppc64be_linux) typedef struct { unsigned long int nraddr; /* where's the code? */ unsigned long int r2; /* what tocptr do we need? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ "rotldi 0,0,61 ; rotldi 0,0,51\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({ unsigned long int _zzq_args[6]; \ unsigned long int _zzq_result; \ unsigned long int* _zzq_ptr; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ _zzq_ptr = _zzq_args; \ __asm__ volatile("mr 3,%1\n\t" /*default*/ \ "mr 4,%2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = client_request ( %R4 ) */ \ "or 1,1,1\n\t" \ "mr %0,3" /*result*/ \ : "=b" (_zzq_result) \ : "b" (_zzq_default), "b" (_zzq_ptr) \ : "cc", "memory", "r3", "r4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR */ \ "or 2,2,2\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->nraddr = __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR_GPR2 */ \ "or 4,4,4\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->r2 = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir *%R11 */ \ "or 3,3,3\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or 5,5,5\n\t" \ ); \ } while (0) #endif /* PLAT_ppc64be_linux */ #if defined(PLAT_ppc64le_linux) typedef struct { unsigned long int nraddr; /* where's the code? */ unsigned long int r2; /* what tocptr do we need? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \ "rotldi 0,0,61 ; rotldi 0,0,51\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({ unsigned long int _zzq_args[6]; \ unsigned long int _zzq_result; \ unsigned long int* _zzq_ptr; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ _zzq_ptr = _zzq_args; \ __asm__ volatile("mr 3,%1\n\t" /*default*/ \ "mr 4,%2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = client_request ( %R4 ) */ \ "or 1,1,1\n\t" \ "mr %0,3" /*result*/ \ : "=b" (_zzq_result) \ : "b" (_zzq_default), "b" (_zzq_ptr) \ : "cc", "memory", "r3", "r4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR */ \ "or 2,2,2\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->nraddr = __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %R3 = guest_NRADDR_GPR2 */ \ "or 4,4,4\n\t" \ "mr %0,3" \ : "=b" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->r2 = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir *%R12 */ \ "or 3,3,3\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or 5,5,5\n\t" \ ); \ } while (0) #endif /* PLAT_ppc64le_linux */ /* ------------------------- arm-linux ------------------------- */ #if defined(PLAT_arm_linux) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \ "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({volatile unsigned int _zzq_args[6]; \ volatile unsigned int _zzq_result; \ _zzq_args[0] = (unsigned int)(_zzq_request); \ _zzq_args[1] = (unsigned int)(_zzq_arg1); \ _zzq_args[2] = (unsigned int)(_zzq_arg2); \ _zzq_args[3] = (unsigned int)(_zzq_arg3); \ _zzq_args[4] = (unsigned int)(_zzq_arg4); \ _zzq_args[5] = (unsigned int)(_zzq_arg5); \ __asm__ volatile("mov r3, %1\n\t" /*default*/ \ "mov r4, %2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* R3 = client_request ( R4 ) */ \ "orr r10, r10, r10\n\t" \ "mov %0, r3" /*result*/ \ : "=r" (_zzq_result) \ : "r" (_zzq_default), "r" (&_zzq_args[0]) \ : "cc","memory", "r3", "r4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* R3 = guest_NRADDR */ \ "orr r11, r11, r11\n\t" \ "mov %0, r3" \ : "=r" (__addr) \ : \ : "cc", "memory", "r3" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir *%R4 */ \ "orr r12, r12, r12\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "orr r9, r9, r9\n\t" \ : : : "cc", "memory" \ ); \ } while (0) #endif /* PLAT_arm_linux */ /* ------------------------ arm64-linux ------------------------- */ #if defined(PLAT_arm64_linux) typedef struct { unsigned long int nraddr; /* where's the code? */ } OrigFn; #define __SPECIAL_INSTRUCTION_PREAMBLE \ "ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \ "ror x12, x12, #51 ; ror x12, x12, #61 \n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ \ __extension__ \ ({volatile unsigned long int _zzq_args[6]; \ volatile unsigned long int _zzq_result; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ __asm__ volatile("mov x3, %1\n\t" /*default*/ \ "mov x4, %2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* X3 = client_request ( X4 ) */ \ "orr x10, x10, x10\n\t" \ "mov %0, x3" /*result*/ \ : "=r" (_zzq_result) \ : "r" ((unsigned long int)(_zzq_default)), \ "r" (&_zzq_args[0]) \ : "cc","memory", "x3", "x4"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* X3 = guest_NRADDR */ \ "orr x11, x11, x11\n\t" \ "mov %0, x3" \ : "=r" (__addr) \ : \ : "cc", "memory", "x3" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* branch-and-link-to-noredir X8 */ \ "orr x12, x12, x12\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "orr x9, x9, x9\n\t" \ : : : "cc", "memory" \ ); \ } while (0) #endif /* PLAT_arm64_linux */ /* ------------------------ s390x-linux ------------------------ */ #if defined(PLAT_s390x_linux) typedef struct { unsigned long int nraddr; /* where's the code? */ } OrigFn; /* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific * code. This detection is implemented in platform specific toIR.c * (e.g. VEX/priv/guest_s390_decoder.c). */ #define __SPECIAL_INSTRUCTION_PREAMBLE \ "lr 15,15\n\t" \ "lr 1,1\n\t" \ "lr 2,2\n\t" \ "lr 3,3\n\t" #define __CLIENT_REQUEST_CODE "lr 2,2\n\t" #define __GET_NR_CONTEXT_CODE "lr 3,3\n\t" #define __CALL_NO_REDIR_CODE "lr 4,4\n\t" #define __VEX_INJECT_IR_CODE "lr 5,5\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({volatile unsigned long int _zzq_args[6]; \ volatile unsigned long int _zzq_result; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ __asm__ volatile(/* r2 = args */ \ "lgr 2,%1\n\t" \ /* r3 = default */ \ "lgr 3,%2\n\t" \ __SPECIAL_INSTRUCTION_PREAMBLE \ __CLIENT_REQUEST_CODE \ /* results = r3 */ \ "lgr %0, 3\n\t" \ : "=d" (_zzq_result) \ : "a" (&_zzq_args[0]), "0" (_zzq_default) \ : "cc", "2", "3", "memory" \ ); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ __GET_NR_CONTEXT_CODE \ "lgr %0, 3\n\t" \ : "=a" (__addr) \ : \ : "cc", "3", "memory" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_R1 \ __SPECIAL_INSTRUCTION_PREAMBLE \ __CALL_NO_REDIR_CODE #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ __VEX_INJECT_IR_CODE); \ } while (0) #endif /* PLAT_s390x_linux */ /* ------------------------- mips32-linux ---------------- */ #if defined(PLAT_mips32_linux) typedef struct { unsigned int nraddr; /* where's the code? */ } OrigFn; /* .word 0x342 * .word 0x742 * .word 0xC2 * .word 0x4C2*/ #define __SPECIAL_INSTRUCTION_PREAMBLE \ "srl $0, $0, 13\n\t" \ "srl $0, $0, 29\n\t" \ "srl $0, $0, 3\n\t" \ "srl $0, $0, 19\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({ volatile unsigned int _zzq_args[6]; \ volatile unsigned int _zzq_result; \ _zzq_args[0] = (unsigned int)(_zzq_request); \ _zzq_args[1] = (unsigned int)(_zzq_arg1); \ _zzq_args[2] = (unsigned int)(_zzq_arg2); \ _zzq_args[3] = (unsigned int)(_zzq_arg3); \ _zzq_args[4] = (unsigned int)(_zzq_arg4); \ _zzq_args[5] = (unsigned int)(_zzq_arg5); \ __asm__ volatile("move $11, %1\n\t" /*default*/ \ "move $12, %2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* T3 = client_request ( T4 ) */ \ "or $13, $13, $13\n\t" \ "move %0, $11\n\t" /*result*/ \ : "=r" (_zzq_result) \ : "r" (_zzq_default), "r" (&_zzq_args[0]) \ : "$11", "$12", "memory"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* %t9 = guest_NRADDR */ \ "or $14, $14, $14\n\t" \ "move %0, $11" /*result*/ \ : "=r" (__addr) \ : \ : "$11" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_T9 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* call-noredir *%t9 */ \ "or $15, $15, $15\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or $11, $11, $11\n\t" \ ); \ } while (0) #endif /* PLAT_mips32_linux */ /* ------------------------- mips64-linux ---------------- */ #if defined(PLAT_mips64_linux) typedef struct { unsigned long nraddr; /* where's the code? */ } OrigFn; /* dsll $0,$0, 3 * dsll $0,$0, 13 * dsll $0,$0, 29 * dsll $0,$0, 19*/ #define __SPECIAL_INSTRUCTION_PREAMBLE \ "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \ "dsll $0,$0,29 ; dsll $0,$0,19\n\t" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ __extension__ \ ({ volatile unsigned long int _zzq_args[6]; \ volatile unsigned long int _zzq_result; \ _zzq_args[0] = (unsigned long int)(_zzq_request); \ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \ __asm__ volatile("move $11, %1\n\t" /*default*/ \ "move $12, %2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* $11 = client_request ( $12 ) */ \ "or $13, $13, $13\n\t" \ "move %0, $11\n\t" /*result*/ \ : "=r" (_zzq_result) \ : "r" (_zzq_default), "r" (&_zzq_args[0]) \ : "$11", "$12", "memory"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* $11 = guest_NRADDR */ \ "or $14, $14, $14\n\t" \ "move %0, $11" /*result*/ \ : "=r" (__addr) \ : \ : "$11"); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_T9 \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* call-noredir $25 */ \ "or $15, $15, $15\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or $11, $11, $11\n\t" \ ); \ } while (0) #endif /* PLAT_mips64_linux */ /* ------------------------ tilegx-linux --------------- */ #if defined(PLAT_tilegx_linux) typedef struct { unsigned long long int nraddr; /* where's the code? */ } OrigFn; /*** special instruction sequence. 0:02b3c7ff91234fff { moveli zero, 4660 ; moveli zero, 22136 } 8:0091a7ff95678fff { moveli zero, 22136 ; moveli zero, 4660 } ****/ #define __SPECIAL_INSTRUCTION_PREAMBLE \ ".quad 0x02b3c7ff91234fff\n" \ ".quad 0x0091a7ff95678fff\n" #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ ({ volatile unsigned long long int _zzq_args[6]; \ volatile unsigned long long int _zzq_result; \ _zzq_args[0] = (unsigned long long int)(_zzq_request); \ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \ __asm__ volatile("move r11, %1\n\t" /*default*/ \ "move r12, %2\n\t" /*ptr*/ \ __SPECIAL_INSTRUCTION_PREAMBLE \ /* r11 = client_request */ \ "or r13, r13, r13\n\t" \ "move %0, r11\n\t" /*result*/ \ : "=r" (_zzq_result) \ : "r" (_zzq_default), "r" (&_zzq_args[0]) \ : "memory", "r11", "r12"); \ _zzq_result; \ }) #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \ volatile unsigned long long int __addr; \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ /* r11 = guest_NRADDR */ \ "or r14, r14, r14\n" \ "move %0, r11\n" \ : "=r" (__addr) \ : \ : "memory", "r11" \ ); \ _zzq_orig->nraddr = __addr; \ } #define VALGRIND_CALL_NOREDIR_R12 \ __SPECIAL_INSTRUCTION_PREAMBLE \ "or r15, r15, r15\n\t" #define VALGRIND_VEX_INJECT_IR() \ do { \ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \ "or r11, r11, r11\n\t" \ ); \ } while (0) #endif /* PLAT_tilegx_linux */ /* Insert assembly code for other platforms here... */ #endif /* NVALGRIND */ /* ------------------------------------------------------------------ */ /* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */ /* ugly. It's the least-worst tradeoff I can think of. */ /* ------------------------------------------------------------------ */ /* This section defines magic (a.k.a appalling-hack) macros for doing guaranteed-no-redirection macros, so as to get from function wrappers to the functions they are wrapping. The whole point is to construct standard call sequences, but to do the call itself with a special no-redirect call pseudo-instruction that the JIT understands and handles specially. This section is long and repetitious, and I can't see a way to make it shorter. The naming scheme is as follows: CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc} 'W' stands for "word" and 'v' for "void". Hence there are different macros for calling arity 0, 1, 2, 3, 4, etc, functions, and for each, the possibility of returning a word-typed result, or no result. */ /* Use these to write the name of your wrapper. NOTE: duplicates VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts the default behaviour equivalance class tag "0000" into the name. See pub_tool_redir.h for details -- normally you don't need to think about this, though. */ /* Use an extra level of macroisation so as to ensure the soname/fnname args are fully macro-expanded before pasting them together. */ #define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd #define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \ VG_CONCAT4(_vgw00000ZU_,soname,_,fnname) #define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \ VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname) /* Use this macro from within a wrapper function to collect the context (address and possibly other info) of the original function. Once you have that you can then use it in one of the CALL_FN_ macros. The type of the argument _lval is OrigFn. */ #define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval) /* Also provide end-user facilities for function replacement, rather than wrapping. A replacement function differs from a wrapper in that it has no way to get hold of the original function being called, and hence no way to call onwards to it. In a replacement function, VALGRIND_GET_ORIG_FN always returns zero. */ #define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \ VG_CONCAT4(_vgr00000ZU_,soname,_,fnname) #define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \ VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname) /* Derivatives of the main macros below, for calling functions returning void. */ #define CALL_FN_v_v(fnptr) \ do { volatile unsigned long _junk; \ CALL_FN_W_v(_junk,fnptr); } while (0) #define CALL_FN_v_W(fnptr, arg1) \ do { volatile unsigned long _junk; \ CALL_FN_W_W(_junk,fnptr,arg1); } while (0) #define CALL_FN_v_WW(fnptr, arg1,arg2) \ do { volatile unsigned long _junk; \ CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0) #define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \ do { volatile unsigned long _junk; \ CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0) #define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \ do { volatile unsigned long _junk; \ CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0) #define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \ do { volatile unsigned long _junk; \ CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0) #define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \ do { volatile unsigned long _junk; \ CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0) #define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ do { volatile unsigned long _junk; \ CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0) /* ----------------- x86-{linux,darwin,solaris} ---------------- */ #if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \ || defined(PLAT_x86_solaris) /* These regs are trashed by the hidden call. No need to mention eax as gcc can already see that, plus causes gcc to bomb. */ #define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "movl %%esp,%%edi\n\t" \ "andl $0xfffffff0,%%esp\n\t" #define VALGRIND_RESTORE_STACK \ "movl %%edi,%%esp\n\t" /* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned long) == 4. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $12, %%esp\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $8, %%esp\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $4, %%esp\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $12, %%esp\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $8, %%esp\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $4, %%esp\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $12, %%esp\n\t" \ "pushl 36(%%eax)\n\t" \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $8, %%esp\n\t" \ "pushl 40(%%eax)\n\t" \ "pushl 36(%%eax)\n\t" \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "subl $4, %%esp\n\t" \ "pushl 44(%%eax)\n\t" \ "pushl 40(%%eax)\n\t" \ "pushl 36(%%eax)\n\t" \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "pushl 48(%%eax)\n\t" \ "pushl 44(%%eax)\n\t" \ "pushl 40(%%eax)\n\t" \ "pushl 36(%%eax)\n\t" \ "pushl 32(%%eax)\n\t" \ "pushl 28(%%eax)\n\t" \ "pushl 24(%%eax)\n\t" \ "pushl 20(%%eax)\n\t" \ "pushl 16(%%eax)\n\t" \ "pushl 12(%%eax)\n\t" \ "pushl 8(%%eax)\n\t" \ "pushl 4(%%eax)\n\t" \ "movl (%%eax), %%eax\n\t" /* target->%eax */ \ VALGRIND_CALL_NOREDIR_EAX \ VALGRIND_RESTORE_STACK \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */ /* ---------------- amd64-{linux,darwin,solaris} --------------- */ #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \ || defined(PLAT_amd64_solaris) /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */ /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \ "rdi", "r8", "r9", "r10", "r11" /* This is all pretty complex. It's so as to make stack unwinding work reliably. See bug 243270. The basic problem is the sub and add of 128 of %rsp in all of the following macros. If gcc believes the CFA is in %rsp, then unwinding may fail, because what's at the CFA is not what gcc "expected" when it constructs the CFIs for the places where the macros are instantiated. But we can't just add a CFI annotation to increase the CFA offset by 128, to match the sub of 128 from %rsp, because we don't know whether gcc has chosen %rsp as the CFA at that point, or whether it has chosen some other register (eg, %rbp). In the latter case, adding a CFI annotation to change the CFA offset is simply wrong. So the solution is to get hold of the CFA using __builtin_dwarf_cfa(), put it in a known register, and add a CFI annotation to say what the register is. We choose %rbp for this (perhaps perversely), because: (1) %rbp is already subject to unwinding. If a new register was chosen then the unwinder would have to unwind it in all stack traces, which is expensive, and (2) %rbp is already subject to precise exception updates in the JIT. If a new register was chosen, we'd have to have precise exceptions for it too, which reduces performance of the generated code. However .. one extra complication. We can't just whack the result of __builtin_dwarf_cfa() into %rbp and then add %rbp to the list of trashed registers at the end of the inline assembly fragments; gcc won't allow %rbp to appear in that list. Hence instead we need to stash %rbp in %r15 for the duration of the asm, and say that %r15 is trashed instead. gcc seems happy to go with that. Oh .. and this all needs to be conditionalised so that it is unchanged from before this commit, when compiled with older gccs that don't support __builtin_dwarf_cfa. Furthermore, since this header file is freestanding, it has to be independent of config.h, and so the following conditionalisation cannot depend on configure time checks. Although it's not clear from 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)', this expression excludes Darwin. .cfi directives in Darwin assembly appear to be completely different and I haven't investigated how they work. For even more entertainment value, note we have to use the completely undocumented __builtin_dwarf_cfa(), which appears to really compute the CFA, whereas __builtin_frame_address(0) claims to but actually doesn't. See https://bugs.kde.org/show_bug.cgi?id=243270#c47 */ #if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) # define __FRAME_POINTER \ ,"r"(__builtin_dwarf_cfa()) # define VALGRIND_CFI_PROLOGUE \ "movq %%rbp, %%r15\n\t" \ "movq %2, %%rbp\n\t" \ ".cfi_remember_state\n\t" \ ".cfi_def_cfa rbp, 0\n\t" # define VALGRIND_CFI_EPILOGUE \ "movq %%r15, %%rbp\n\t" \ ".cfi_restore_state\n\t" #else # define __FRAME_POINTER # define VALGRIND_CFI_PROLOGUE # define VALGRIND_CFI_EPILOGUE #endif /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "movq %%rsp,%%r14\n\t" \ "andq $0xfffffffffffffff0,%%rsp\n\t" #define VALGRIND_RESTORE_STACK \ "movq %%r14,%%rsp\n\t" /* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned long) == 8. */ /* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_ macros. In order not to trash the stack redzone, we need to drop %rsp by 128 before the hidden call, and restore afterwards. The nastyness is that it is only by luck that the stack still appears to be unwindable during the hidden call - since then the behaviour of any routine using this macro does not match what the CFI data says. Sigh. Why is this important? Imagine that a wrapper has a stack allocated local, and passes to the hidden call, a pointer to it. Because gcc does not know about the hidden call, it may allocate that local in the redzone. Unfortunately the hidden call may then trash it before it comes to use it. So we must step clear of the redzone, for the duration of the hidden call, to make it safe. Probably the same problem afflicts the other redzone-style ABIs too (ppc64-linux); but for those, the stack is self describing (none of this CFI nonsense) so at least messing with the stack pointer doesn't give a danger of non-unwindable stack. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $136,%%rsp\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $136,%%rsp\n\t" \ "pushq 72(%%rax)\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "pushq 80(%%rax)\n\t" \ "pushq 72(%%rax)\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $136,%%rsp\n\t" \ "pushq 88(%%rax)\n\t" \ "pushq 80(%%rax)\n\t" \ "pushq 72(%%rax)\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ VALGRIND_ALIGN_STACK \ "subq $128,%%rsp\n\t" \ "pushq 96(%%rax)\n\t" \ "pushq 88(%%rax)\n\t" \ "pushq 80(%%rax)\n\t" \ "pushq 72(%%rax)\n\t" \ "pushq 64(%%rax)\n\t" \ "pushq 56(%%rax)\n\t" \ "movq 48(%%rax), %%r9\n\t" \ "movq 40(%%rax), %%r8\n\t" \ "movq 32(%%rax), %%rcx\n\t" \ "movq 24(%%rax), %%rdx\n\t" \ "movq 16(%%rax), %%rsi\n\t" \ "movq 8(%%rax), %%rdi\n\t" \ "movq (%%rax), %%rax\n\t" /* target->%rax */ \ VALGRIND_CALL_NOREDIR_RAX \ VALGRIND_RESTORE_STACK \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=a" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */ /* ------------------------ ppc32-linux ------------------------ */ #if defined(PLAT_ppc32_linux) /* This is useful for finding out about the on-stack stuff: extern int f9 ( int,int,int,int,int,int,int,int,int ); extern int f10 ( int,int,int,int,int,int,int,int,int,int ); extern int f11 ( int,int,int,int,int,int,int,int,int,int,int ); extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int ); int g9 ( void ) { return f9(11,22,33,44,55,66,77,88,99); } int g10 ( void ) { return f10(11,22,33,44,55,66,77,88,99,110); } int g11 ( void ) { return f11(11,22,33,44,55,66,77,88,99,110,121); } int g12 ( void ) { return f12(11,22,33,44,55,66,77,88,99,110,121,132); } */ /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS \ "lr", "ctr", "xer", \ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ "r11", "r12", "r13" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "mr 28,1\n\t" \ "rlwinm 1,1,0,0,27\n\t" #define VALGRIND_RESTORE_STACK \ "mr 1,28\n\t" /* These CALL_FN_ macros assume that on ppc32-linux, sizeof(unsigned long) == 4. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "addi 1,1,-16\n\t" \ /* arg9 */ \ "lwz 3,36(11)\n\t" \ "stw 3,8(1)\n\t" \ /* args1-8 */ \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "addi 1,1,-16\n\t" \ /* arg10 */ \ "lwz 3,40(11)\n\t" \ "stw 3,12(1)\n\t" \ /* arg9 */ \ "lwz 3,36(11)\n\t" \ "stw 3,8(1)\n\t" \ /* args1-8 */ \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ _argvec[11] = (unsigned long)arg11; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "addi 1,1,-32\n\t" \ /* arg11 */ \ "lwz 3,44(11)\n\t" \ "stw 3,16(1)\n\t" \ /* arg10 */ \ "lwz 3,40(11)\n\t" \ "stw 3,12(1)\n\t" \ /* arg9 */ \ "lwz 3,36(11)\n\t" \ "stw 3,8(1)\n\t" \ /* args1-8 */ \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ _argvec[11] = (unsigned long)arg11; \ _argvec[12] = (unsigned long)arg12; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "addi 1,1,-32\n\t" \ /* arg12 */ \ "lwz 3,48(11)\n\t" \ "stw 3,20(1)\n\t" \ /* arg11 */ \ "lwz 3,44(11)\n\t" \ "stw 3,16(1)\n\t" \ /* arg10 */ \ "lwz 3,40(11)\n\t" \ "stw 3,12(1)\n\t" \ /* arg9 */ \ "lwz 3,36(11)\n\t" \ "stw 3,8(1)\n\t" \ /* args1-8 */ \ "lwz 3,4(11)\n\t" /* arg1->r3 */ \ "lwz 4,8(11)\n\t" \ "lwz 5,12(11)\n\t" \ "lwz 6,16(11)\n\t" /* arg4->r6 */ \ "lwz 7,20(11)\n\t" \ "lwz 8,24(11)\n\t" \ "lwz 9,28(11)\n\t" \ "lwz 10,32(11)\n\t" /* arg8->r10 */ \ "lwz 11,0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ VALGRIND_RESTORE_STACK \ "mr %0,3" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_ppc32_linux */ /* ------------------------ ppc64-linux ------------------------ */ #if defined(PLAT_ppc64be_linux) /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS \ "lr", "ctr", "xer", \ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ "r11", "r12", "r13" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "mr 28,1\n\t" \ "rldicr 1,1,0,59\n\t" #define VALGRIND_RESTORE_STACK \ "mr 1,28\n\t" /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned long) == 8. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+0]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+1]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+2]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+3]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+4]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+5]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+6]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+7]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+8]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+9]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-128\n\t" /* expand stack frame */ \ /* arg9 */ \ "ld 3,72(11)\n\t" \ "std 3,112(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+10]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-128\n\t" /* expand stack frame */ \ /* arg10 */ \ "ld 3,80(11)\n\t" \ "std 3,120(1)\n\t" \ /* arg9 */ \ "ld 3,72(11)\n\t" \ "std 3,112(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+11]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ _argvec[2+11] = (unsigned long)arg11; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-144\n\t" /* expand stack frame */ \ /* arg11 */ \ "ld 3,88(11)\n\t" \ "std 3,128(1)\n\t" \ /* arg10 */ \ "ld 3,80(11)\n\t" \ "std 3,120(1)\n\t" \ /* arg9 */ \ "ld 3,72(11)\n\t" \ "std 3,112(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+12]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ _argvec[2+11] = (unsigned long)arg11; \ _argvec[2+12] = (unsigned long)arg12; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 11,%1\n\t" \ "std 2,-16(11)\n\t" /* save tocptr */ \ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-144\n\t" /* expand stack frame */ \ /* arg12 */ \ "ld 3,96(11)\n\t" \ "std 3,136(1)\n\t" \ /* arg11 */ \ "ld 3,88(11)\n\t" \ "std 3,128(1)\n\t" \ /* arg10 */ \ "ld 3,80(11)\n\t" \ "std 3,120(1)\n\t" \ /* arg9 */ \ "ld 3,72(11)\n\t" \ "std 3,112(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(11)\n\t" /* arg1->r3 */ \ "ld 4, 16(11)\n\t" /* arg2->r4 */ \ "ld 5, 24(11)\n\t" /* arg3->r5 */ \ "ld 6, 32(11)\n\t" /* arg4->r6 */ \ "ld 7, 40(11)\n\t" /* arg5->r7 */ \ "ld 8, 48(11)\n\t" /* arg6->r8 */ \ "ld 9, 56(11)\n\t" /* arg7->r9 */ \ "ld 10, 64(11)\n\t" /* arg8->r10 */ \ "ld 11, 0(11)\n\t" /* target->r11 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \ "mr 11,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(11)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_ppc64be_linux */ /* ------------------------- ppc64le-linux ----------------------- */ #if defined(PLAT_ppc64le_linux) /* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */ /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS \ "lr", "ctr", "xer", \ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \ "r11", "r12", "r13" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ #define VALGRIND_ALIGN_STACK \ "mr 28,1\n\t" \ "rldicr 1,1,0,59\n\t" #define VALGRIND_RESTORE_STACK \ "mr 1,28\n\t" /* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned long) == 8. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+0]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+1]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+2]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+3]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+4]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+5]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+6]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+7]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+8]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+9]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-128\n\t" /* expand stack frame */ \ /* arg9 */ \ "ld 3,72(12)\n\t" \ "std 3,96(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+10]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-128\n\t" /* expand stack frame */ \ /* arg10 */ \ "ld 3,80(12)\n\t" \ "std 3,104(1)\n\t" \ /* arg9 */ \ "ld 3,72(12)\n\t" \ "std 3,96(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+11]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ _argvec[2+11] = (unsigned long)arg11; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-144\n\t" /* expand stack frame */ \ /* arg11 */ \ "ld 3,88(12)\n\t" \ "std 3,112(1)\n\t" \ /* arg10 */ \ "ld 3,80(12)\n\t" \ "std 3,104(1)\n\t" \ /* arg9 */ \ "ld 3,72(12)\n\t" \ "std 3,96(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3+12]; \ volatile unsigned long _res; \ /* _argvec[0] holds current r2 across the call */ \ _argvec[1] = (unsigned long)_orig.r2; \ _argvec[2] = (unsigned long)_orig.nraddr; \ _argvec[2+1] = (unsigned long)arg1; \ _argvec[2+2] = (unsigned long)arg2; \ _argvec[2+3] = (unsigned long)arg3; \ _argvec[2+4] = (unsigned long)arg4; \ _argvec[2+5] = (unsigned long)arg5; \ _argvec[2+6] = (unsigned long)arg6; \ _argvec[2+7] = (unsigned long)arg7; \ _argvec[2+8] = (unsigned long)arg8; \ _argvec[2+9] = (unsigned long)arg9; \ _argvec[2+10] = (unsigned long)arg10; \ _argvec[2+11] = (unsigned long)arg11; \ _argvec[2+12] = (unsigned long)arg12; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "mr 12,%1\n\t" \ "std 2,-16(12)\n\t" /* save tocptr */ \ "ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \ "addi 1,1,-144\n\t" /* expand stack frame */ \ /* arg12 */ \ "ld 3,96(12)\n\t" \ "std 3,120(1)\n\t" \ /* arg11 */ \ "ld 3,88(12)\n\t" \ "std 3,112(1)\n\t" \ /* arg10 */ \ "ld 3,80(12)\n\t" \ "std 3,104(1)\n\t" \ /* arg9 */ \ "ld 3,72(12)\n\t" \ "std 3,96(1)\n\t" \ /* args1-8 */ \ "ld 3, 8(12)\n\t" /* arg1->r3 */ \ "ld 4, 16(12)\n\t" /* arg2->r4 */ \ "ld 5, 24(12)\n\t" /* arg3->r5 */ \ "ld 6, 32(12)\n\t" /* arg4->r6 */ \ "ld 7, 40(12)\n\t" /* arg5->r7 */ \ "ld 8, 48(12)\n\t" /* arg6->r8 */ \ "ld 9, 56(12)\n\t" /* arg7->r9 */ \ "ld 10, 64(12)\n\t" /* arg8->r10 */ \ "ld 12, 0(12)\n\t" /* target->r12 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \ "mr 12,%1\n\t" \ "mr %0,3\n\t" \ "ld 2,-16(12)\n\t" /* restore tocptr */ \ VALGRIND_RESTORE_STACK \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[2]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_ppc64le_linux */ /* ------------------------- arm-linux ------------------------- */ #if defined(PLAT_arm_linux) /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14" /* Macros to save and align the stack before making a function call and restore it afterwards as gcc may not keep the stack pointer aligned if it doesn't realise calls are being made to other functions. */ /* This is a bit tricky. We store the original stack pointer in r10 as it is callee-saves. gcc doesn't allow the use of r11 for some reason. Also, we can't directly "bic" the stack pointer in thumb mode since r13 isn't an allowed register number in that context. So use r4 as a temporary, since that is about to get trashed anyway, just after each use of this macro. Side effect is we need to be very careful about any future changes, since VALGRIND_ALIGN_STACK simply assumes r4 is usable. */ #define VALGRIND_ALIGN_STACK \ "mov r10, sp\n\t" \ "mov r4, sp\n\t" \ "bic r4, r4, #7\n\t" \ "mov sp, r4\n\t" #define VALGRIND_RESTORE_STACK \ "mov sp, r10\n\t" /* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned long) == 4. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #4] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #4 \n\t" \ "ldr r0, [%1, #20] \n\t" \ "push {r0} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "push {r0, r1} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #4 \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "push {r0, r1, r2} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "push {r0, r1, r2, r3} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #4 \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "ldr r4, [%1, #36] \n\t" \ "push {r0, r1, r2, r3, r4} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #40] \n\t" \ "push {r0} \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "ldr r4, [%1, #36] \n\t" \ "push {r0, r1, r2, r3, r4} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #4 \n\t" \ "ldr r0, [%1, #40] \n\t" \ "ldr r1, [%1, #44] \n\t" \ "push {r0, r1} \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "ldr r4, [%1, #36] \n\t" \ "push {r0, r1, r2, r3, r4} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr r0, [%1, #40] \n\t" \ "ldr r1, [%1, #44] \n\t" \ "ldr r2, [%1, #48] \n\t" \ "push {r0, r1, r2} \n\t" \ "ldr r0, [%1, #20] \n\t" \ "ldr r1, [%1, #24] \n\t" \ "ldr r2, [%1, #28] \n\t" \ "ldr r3, [%1, #32] \n\t" \ "ldr r4, [%1, #36] \n\t" \ "push {r0, r1, r2, r3, r4} \n\t" \ "ldr r0, [%1, #4] \n\t" \ "ldr r1, [%1, #8] \n\t" \ "ldr r2, [%1, #12] \n\t" \ "ldr r3, [%1, #16] \n\t" \ "ldr r4, [%1] \n\t" /* target->r4 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \ VALGRIND_RESTORE_STACK \ "mov %0, r0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_arm_linux */ /* ------------------------ arm64-linux ------------------------ */ #if defined(PLAT_arm64_linux) /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS \ "x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \ "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \ "x18", "x19", "x20", "x30", \ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \ "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \ "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \ "v26", "v27", "v28", "v29", "v30", "v31" /* x21 is callee-saved, so we can use it to save and restore SP around the hidden call. */ #define VALGRIND_ALIGN_STACK \ "mov x21, sp\n\t" \ "bic sp, x21, #15\n\t" #define VALGRIND_RESTORE_STACK \ "mov sp, x21\n\t" /* These CALL_FN_ macros assume that on arm64-linux, sizeof(unsigned long) == 8. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #0x20 \n\t" \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1, #72] \n\t" \ "str x8, [sp, #0] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #0x20 \n\t" \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1, #72] \n\t" \ "str x8, [sp, #0] \n\t" \ "ldr x8, [%1, #80] \n\t" \ "str x8, [sp, #8] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #0x30 \n\t" \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1, #72] \n\t" \ "str x8, [sp, #0] \n\t" \ "ldr x8, [%1, #80] \n\t" \ "str x8, [sp, #8] \n\t" \ "ldr x8, [%1, #88] \n\t" \ "str x8, [sp, #16] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10,arg11, \ arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ VALGRIND_ALIGN_STACK \ "sub sp, sp, #0x30 \n\t" \ "ldr x0, [%1, #8] \n\t" \ "ldr x1, [%1, #16] \n\t" \ "ldr x2, [%1, #24] \n\t" \ "ldr x3, [%1, #32] \n\t" \ "ldr x4, [%1, #40] \n\t" \ "ldr x5, [%1, #48] \n\t" \ "ldr x6, [%1, #56] \n\t" \ "ldr x7, [%1, #64] \n\t" \ "ldr x8, [%1, #72] \n\t" \ "str x8, [sp, #0] \n\t" \ "ldr x8, [%1, #80] \n\t" \ "str x8, [sp, #8] \n\t" \ "ldr x8, [%1, #88] \n\t" \ "str x8, [sp, #16] \n\t" \ "ldr x8, [%1, #96] \n\t" \ "str x8, [sp, #24] \n\t" \ "ldr x8, [%1] \n\t" /* target->x8 */ \ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \ VALGRIND_RESTORE_STACK \ "mov %0, x0" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_arm64_linux */ /* ------------------------- s390x-linux ------------------------- */ #if defined(PLAT_s390x_linux) /* Similar workaround as amd64 (see above), but we use r11 as frame pointer and save the old r11 in r7. r11 might be used for argvec, therefore we copy argvec in r1 since r1 is clobbered after the call anyway. */ #if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM) # define __FRAME_POINTER \ ,"d"(__builtin_dwarf_cfa()) # define VALGRIND_CFI_PROLOGUE \ ".cfi_remember_state\n\t" \ "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \ "lgr 7,11\n\t" \ "lgr 11,%2\n\t" \ ".cfi_def_cfa r11, 0\n\t" # define VALGRIND_CFI_EPILOGUE \ "lgr 11, 7\n\t" \ ".cfi_restore_state\n\t" #else # define __FRAME_POINTER # define VALGRIND_CFI_PROLOGUE \ "lgr 1,%1\n\t" # define VALGRIND_CFI_EPILOGUE #endif /* Nb: On s390 the stack pointer is properly aligned *at all times* according to the s390 GCC maintainer. (The ABI specification is not precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and VALGRIND_RESTORE_STACK are not defined here. */ /* These regs are trashed by the hidden call. Note that we overwrite r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the function a proper return address. All others are ABI defined call clobbers. */ #define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \ "f0","f1","f2","f3","f4","f5","f6","f7" /* Nb: Although r11 is modified in the asm snippets below (inside VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for two reasons: (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not modified (2) GCC will complain that r11 cannot appear inside a clobber section, when compiled with -O -fno-omit-frame-pointer */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 1, 0(1)\n\t" /* target->r1 */ \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) /* The call abi has the arguments in r2-r6 and stack */ #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1, arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-160\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,160\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-168\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,168\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-176\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,176\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-184\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,184\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8, arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-192\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "mvc 184(8,15), 72(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,192\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8, arg9, arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-200\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "mvc 184(8,15), 72(1)\n\t" \ "mvc 192(8,15), 80(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,200\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8, arg9, arg10, arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ _argvec[11] = (unsigned long)arg11; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-208\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "mvc 184(8,15), 72(1)\n\t" \ "mvc 192(8,15), 80(1)\n\t" \ "mvc 200(8,15), 88(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,208\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \ arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)arg1; \ _argvec[2] = (unsigned long)arg2; \ _argvec[3] = (unsigned long)arg3; \ _argvec[4] = (unsigned long)arg4; \ _argvec[5] = (unsigned long)arg5; \ _argvec[6] = (unsigned long)arg6; \ _argvec[7] = (unsigned long)arg7; \ _argvec[8] = (unsigned long)arg8; \ _argvec[9] = (unsigned long)arg9; \ _argvec[10] = (unsigned long)arg10; \ _argvec[11] = (unsigned long)arg11; \ _argvec[12] = (unsigned long)arg12; \ __asm__ volatile( \ VALGRIND_CFI_PROLOGUE \ "aghi 15,-216\n\t" \ "lg 2, 8(1)\n\t" \ "lg 3,16(1)\n\t" \ "lg 4,24(1)\n\t" \ "lg 5,32(1)\n\t" \ "lg 6,40(1)\n\t" \ "mvc 160(8,15), 48(1)\n\t" \ "mvc 168(8,15), 56(1)\n\t" \ "mvc 176(8,15), 64(1)\n\t" \ "mvc 184(8,15), 72(1)\n\t" \ "mvc 192(8,15), 80(1)\n\t" \ "mvc 200(8,15), 88(1)\n\t" \ "mvc 208(8,15), 96(1)\n\t" \ "lg 1, 0(1)\n\t" \ VALGRIND_CALL_NOREDIR_R1 \ "lgr %0, 2\n\t" \ "aghi 15,216\n\t" \ VALGRIND_CFI_EPILOGUE \ : /*out*/ "=d" (_res) \ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_s390x_linux */ /* ------------------------- mips32-linux ----------------------- */ #if defined(PLAT_mips32_linux) /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ "$25", "$31" /* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned long) == 4. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16\n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $4, 4(%1) \n\t" /* arg1*/ \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "subu $29, $29, 16 \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 16 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 24\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 24 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 32\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "nop\n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 32 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 32\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 32 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 40\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 40 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 40\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 36(%1) \n\t" \ "sw $4, 32($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 40 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 48\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 36(%1) \n\t" \ "sw $4, 32($29) \n\t" \ "lw $4, 40(%1) \n\t" \ "sw $4, 36($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 48 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 48\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 36(%1) \n\t" \ "sw $4, 32($29) \n\t" \ "lw $4, 40(%1) \n\t" \ "sw $4, 36($29) \n\t" \ "lw $4, 44(%1) \n\t" \ "sw $4, 40($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 48 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ "subu $29, $29, 8 \n\t" \ "sw $28, 0($29) \n\t" \ "sw $31, 4($29) \n\t" \ "lw $4, 20(%1) \n\t" \ "subu $29, $29, 56\n\t" \ "sw $4, 16($29) \n\t" \ "lw $4, 24(%1) \n\t" \ "sw $4, 20($29) \n\t" \ "lw $4, 28(%1) \n\t" \ "sw $4, 24($29) \n\t" \ "lw $4, 32(%1) \n\t" \ "sw $4, 28($29) \n\t" \ "lw $4, 36(%1) \n\t" \ "sw $4, 32($29) \n\t" \ "lw $4, 40(%1) \n\t" \ "sw $4, 36($29) \n\t" \ "lw $4, 44(%1) \n\t" \ "sw $4, 40($29) \n\t" \ "lw $4, 48(%1) \n\t" \ "sw $4, 44($29) \n\t" \ "lw $4, 4(%1) \n\t" \ "lw $5, 8(%1) \n\t" \ "lw $6, 12(%1) \n\t" \ "lw $7, 16(%1) \n\t" \ "lw $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "addu $29, $29, 56 \n\t" \ "lw $28, 0($29) \n\t" \ "lw $31, 4($29) \n\t" \ "addu $29, $29, 8 \n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_mips32_linux */ /* ------------------------- mips64-linux ------------------------- */ #if defined(PLAT_mips64_linux) /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \ "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ "$25", "$31" /* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned long) == 4. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "0" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" /* arg1*/ \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1) \n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ "dsubu $29, $29, 8\n\t" \ "ld $4, 72(%1)\n\t" \ "sd $4, 0($29)\n\t" \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "daddu $29, $29, 8\n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ "dsubu $29, $29, 16\n\t" \ "ld $4, 72(%1)\n\t" \ "sd $4, 0($29)\n\t" \ "ld $4, 80(%1)\n\t" \ "sd $4, 8($29)\n\t" \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "daddu $29, $29, 16\n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ "dsubu $29, $29, 24\n\t" \ "ld $4, 72(%1)\n\t" \ "sd $4, 0($29)\n\t" \ "ld $4, 80(%1)\n\t" \ "sd $4, 8($29)\n\t" \ "ld $4, 88(%1)\n\t" \ "sd $4, 16($29)\n\t" \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "daddu $29, $29, 24\n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ "dsubu $29, $29, 32\n\t" \ "ld $4, 72(%1)\n\t" \ "sd $4, 0($29)\n\t" \ "ld $4, 80(%1)\n\t" \ "sd $4, 8($29)\n\t" \ "ld $4, 88(%1)\n\t" \ "sd $4, 16($29)\n\t" \ "ld $4, 96(%1)\n\t" \ "sd $4, 24($29)\n\t" \ "ld $4, 8(%1)\n\t" \ "ld $5, 16(%1)\n\t" \ "ld $6, 24(%1)\n\t" \ "ld $7, 32(%1)\n\t" \ "ld $8, 40(%1)\n\t" \ "ld $9, 48(%1)\n\t" \ "ld $10, 56(%1)\n\t" \ "ld $11, 64(%1)\n\t" \ "ld $25, 0(%1)\n\t" /* target->t9 */ \ VALGRIND_CALL_NOREDIR_T9 \ "daddu $29, $29, 32\n\t" \ "move %0, $2\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS \ ); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_mips64_linux */ /* ------------------------ tilegx-linux ------------------------- */ #if defined(PLAT_tilegx_linux) /* These regs are trashed by the hidden call. */ #define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3", "r4", "r5", \ "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", \ "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", \ "r23", "r24", "r25", "r26", "r27", "r28", "r29", "lr" /* These CALL_FN_ macros assume that on tilegx-linux, sizeof(unsigned long) == 8. */ #define CALL_FN_W_v(lval, orig) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[1]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "ld r12, %1 \n\t" /* target->r11 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0 \n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_W(lval, orig, arg1) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[2]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WW(lval, orig, arg1,arg2) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[3]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[4]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8 \n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[5]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[6]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[7]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[8]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[9]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[10]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ "ld_add r8, r29, 8 \n\t" /*arg9 -> r8 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \ arg7,arg8,arg9,arg10) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[11]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ "ld_add r8, r29, 8 \n\t" /*arg9 -> r8 */ \ "ld_add r9, r29, 8 \n\t" /*arg10 -> r9 */ \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 8\n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[12]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ "ld_add r8, r29, 8 \n\t" /*arg9 -> r8 */ \ "ld_add r9, r29, 8 \n\t" /*arg10 -> r9 */ \ "ld r10, r29 \n\t" \ "st_add sp, r10, -16 \n\t" \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 24 \n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \ arg6,arg7,arg8,arg9,arg10, \ arg11,arg12) \ do { \ volatile OrigFn _orig = (orig); \ volatile unsigned long _argvec[13]; \ volatile unsigned long _res; \ _argvec[0] = (unsigned long)_orig.nraddr; \ _argvec[1] = (unsigned long)(arg1); \ _argvec[2] = (unsigned long)(arg2); \ _argvec[3] = (unsigned long)(arg3); \ _argvec[4] = (unsigned long)(arg4); \ _argvec[5] = (unsigned long)(arg5); \ _argvec[6] = (unsigned long)(arg6); \ _argvec[7] = (unsigned long)(arg7); \ _argvec[8] = (unsigned long)(arg8); \ _argvec[9] = (unsigned long)(arg9); \ _argvec[10] = (unsigned long)(arg10); \ _argvec[11] = (unsigned long)(arg11); \ _argvec[12] = (unsigned long)(arg12); \ __asm__ volatile( \ "addi sp, sp, -8 \n\t" \ "st_add sp, lr, -8 \n\t" \ "move r29, %1 \n\t" \ "ld_add r12, r29, 8 \n\t" /* target->r11 */ \ "ld_add r0, r29, 8 \n\t" /*arg1 -> r0 */ \ "ld_add r1, r29, 8 \n\t" /*arg2 -> r1 */ \ "ld_add r2, r29, 8 \n\t" /*arg3 -> r2 */ \ "ld_add r3, r29, 8 \n\t" /*arg4 -> r3 */ \ "ld_add r4, r29, 8 \n\t" /*arg5 -> r4 */ \ "ld_add r5, r29, 8 \n\t" /*arg6 -> r5 */ \ "ld_add r6, r29, 8 \n\t" /*arg7 -> r6 */ \ "ld_add r7, r29, 8 \n\t" /*arg8 -> r7 */ \ "ld_add r8, r29, 8 \n\t" /*arg9 -> r8 */ \ "ld_add r9, r29, 8 \n\t" /*arg10 -> r9 */ \ "addi r28, sp, -8 \n\t" \ "addi sp, sp, -24 \n\t" \ "ld_add r10, r29, 8 \n\t" \ "ld r11, r29 \n\t" \ "st_add r28, r10, 8 \n\t" \ "st r28, r11 \n\t" \ VALGRIND_CALL_NOREDIR_R12 \ "addi sp, sp, 32 \n\t" \ "ld_add lr, sp, 8 \n\t" \ "move %0, r0\n" \ : /*out*/ "=r" (_res) \ : /*in*/ "r" (&_argvec[0]) \ : /*trash*/ "memory", __CALLER_SAVED_REGS); \ lval = (__typeof__(lval)) _res; \ } while (0) #endif /* PLAT_tilegx_linux */ /* ------------------------------------------------------------------ */ /* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */ /* */ /* ------------------------------------------------------------------ */ /* Some request codes. There are many more of these, but most are not exposed to end-user view. These are the public ones, all of the form 0x1000 + small_number. Core ones are in the range 0x00000000--0x0000ffff. The non-public ones start at 0x2000. */ /* These macros are used by tools -- they must be public, but don't embed them into other programs. */ #define VG_USERREQ_TOOL_BASE(a,b) \ ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16)) #define VG_IS_TOOL_USERREQ(a, b, v) \ (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000)) /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! This enum comprises an ABI exported by Valgrind to programs which use client requests. DO NOT CHANGE THE ORDER OF THESE ENTRIES, NOR DELETE ANY -- add new ones at the end. */ typedef enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001, VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002, /* These allow any function to be called from the simulated CPU but run on the real CPU. Nb: the first arg passed to the function is always the ThreadId of the running thread! So CLIENT_CALL0 actually requires a 1 arg function, etc. */ VG_USERREQ__CLIENT_CALL0 = 0x1101, VG_USERREQ__CLIENT_CALL1 = 0x1102, VG_USERREQ__CLIENT_CALL2 = 0x1103, VG_USERREQ__CLIENT_CALL3 = 0x1104, /* Can be useful in regression testing suites -- eg. can send Valgrind's output to /dev/null and still count errors. */ VG_USERREQ__COUNT_ERRORS = 0x1201, /* Allows the client program and/or gdbserver to execute a monitor command. */ VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202, /* These are useful and can be interpreted by any tool that tracks malloc() et al, by using vg_replace_malloc.c. */ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301, VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b, VG_USERREQ__FREELIKE_BLOCK = 0x1302, /* Memory pool support. */ VG_USERREQ__CREATE_MEMPOOL = 0x1303, VG_USERREQ__DESTROY_MEMPOOL = 0x1304, VG_USERREQ__MEMPOOL_ALLOC = 0x1305, VG_USERREQ__MEMPOOL_FREE = 0x1306, VG_USERREQ__MEMPOOL_TRIM = 0x1307, VG_USERREQ__MOVE_MEMPOOL = 0x1308, VG_USERREQ__MEMPOOL_CHANGE = 0x1309, VG_USERREQ__MEMPOOL_EXISTS = 0x130a, /* Allow printfs to valgrind log. */ /* The first two pass the va_list argument by value, which assumes it is the same size as or smaller than a UWord, which generally isn't the case. Hence are deprecated. The second two pass the vargs by reference and so are immune to this problem. */ /* both :: char* fmt, va_list vargs (DEPRECATED) */ VG_USERREQ__PRINTF = 0x1401, VG_USERREQ__PRINTF_BACKTRACE = 0x1402, /* both :: char* fmt, va_list* vargs */ VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403, VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404, /* Stack support. */ VG_USERREQ__STACK_REGISTER = 0x1501, VG_USERREQ__STACK_DEREGISTER = 0x1502, VG_USERREQ__STACK_CHANGE = 0x1503, /* Wine support */ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601, /* Querying of debug info. */ VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701, /* Disable/enable error reporting level. Takes a single Word arg which is the delta to this thread's error disablement indicator. Hence 1 disables or further disables errors, and -1 moves back towards enablement. Other values are not allowed. */ VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801, /* Initialise IR injection */ VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901 } Vg_ClientRequest; #if !defined(__GNUC__) # define __extension__ /* */ #endif /* Returns the number of Valgrinds this code is running under. That is, 0 if running natively, 1 if running under Valgrind, 2 if running under Valgrind which is running under another Valgrind, etc. */ #define RUNNING_ON_VALGRIND \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \ VG_USERREQ__RUNNING_ON_VALGRIND, \ 0, 0, 0, 0, 0) \ /* Discard translation of code in the range [_qzz_addr .. _qzz_addr + _qzz_len - 1]. Useful if you are debugging a JITter or some such, since it provides a way to make sure valgrind will retranslate the invalidated area. Returns no value. */ #define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \ _qzz_addr, _qzz_len, 0, 0, 0) /* These requests are for getting Valgrind itself to print something. Possibly with a backtrace. This is a really ugly hack. The return value is the number of characters printed, excluding the "**** " part at the start and the backtrace (if present). */ #if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) /* Modern GCC will optimize the static routine out if unused, and unused attribute will shut down warnings about it. */ static int VALGRIND_PRINTF(const char *format, ...) __attribute__((format(__printf__, 1, 2), __unused__)); #endif static int #if defined(_MSC_VER) __inline #endif VALGRIND_PRINTF(const char *format, ...) { #if defined(NVALGRIND) if (format) *(volatile const char *)format; /* avoid compiler warning */ return 0; #else /* NVALGRIND */ #if defined(_MSC_VER) || defined(__MINGW64__) uintptr_t _qzz_res; #else unsigned long _qzz_res; #endif va_list vargs; va_start(vargs, format); #if defined(_MSC_VER) || defined(__MINGW64__) _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__PRINTF_VALIST_BY_REF, (uintptr_t)format, (uintptr_t)&vargs, 0, 0, 0); #else _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__PRINTF_VALIST_BY_REF, (unsigned long)format, (unsigned long)&vargs, 0, 0, 0); #endif va_end(vargs); return (int)_qzz_res; #endif /* NVALGRIND */ } #if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER) static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...) __attribute__((format(__printf__, 1, 2), __unused__)); #endif static int #if defined(_MSC_VER) __inline #endif VALGRIND_PRINTF_BACKTRACE(const char *format, ...) { #if defined(NVALGRIND) if (format) *(volatile const char *)format; /* avoid compiler warning */ return 0; #else /* NVALGRIND */ #if defined(_MSC_VER) || defined(__MINGW64__) uintptr_t _qzz_res; #else unsigned long _qzz_res; #endif va_list vargs; va_start(vargs, format); #if defined(_MSC_VER) || defined(__MINGW64__) _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, (uintptr_t)format, (uintptr_t)&vargs, 0, 0, 0); #else _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF, (unsigned long)format, (unsigned long)&vargs, 0, 0, 0); #endif va_end(vargs); return (int)_qzz_res; #endif /* NVALGRIND */ } /* These requests allow control to move from the simulated CPU to the real CPU, calling an arbitary function. Note that the current ThreadId is inserted as the first argument. So this call: VALGRIND_NON_SIMD_CALL2(f, arg1, arg2) requires f to have this signature: Word f(Word tid, Word arg1, Word arg2) where "Word" is a word-sized type. Note that these client requests are not entirely reliable. For example, if you call a function with them that subsequently calls printf(), there's a high chance Valgrind will crash. Generally, your prospects of these working are made higher if the called function does not refer to any global variables, and does not refer to any libc or other functions (printf et al). Any kind of entanglement with libc or dynamic linking is likely to have a bad outcome, for tricky reasons which we've grappled with a lot in the past. */ #define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CLIENT_CALL0, \ _qyy_fn, \ 0, 0, 0, 0) #define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CLIENT_CALL1, \ _qyy_fn, \ _qyy_arg1, 0, 0, 0) #define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CLIENT_CALL2, \ _qyy_fn, \ _qyy_arg1, _qyy_arg2, 0, 0) #define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CLIENT_CALL3, \ _qyy_fn, \ _qyy_arg1, _qyy_arg2, \ _qyy_arg3, 0) /* Counts the number of errors that have been recorded by a tool. Nb: the tool must record the errors with VG_(maybe_record_error)() or VG_(unique_error)() for them to be counted. */ #define VALGRIND_COUNT_ERRORS \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \ 0 /* default return */, \ VG_USERREQ__COUNT_ERRORS, \ 0, 0, 0, 0, 0) /* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing when heap blocks are allocated in order to give accurate results. This happens automatically for the standard allocator functions such as malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete, delete[], etc. But if your program uses a custom allocator, this doesn't automatically happen, and Valgrind will not do as well. For example, if you allocate superblocks with mmap() and then allocates chunks of the superblocks, all Valgrind's observations will be at the mmap() level and it won't know that the chunks should be considered separate entities. In Memcheck's case, that means you probably won't get heap block overrun detection (because there won't be redzones marked as unaddressable) and you definitely won't get any leak detection. The following client requests allow a custom allocator to be annotated so that it can be handled accurately by Valgrind. VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated by a malloc()-like function. For Memcheck (an illustrative case), this does two things: - It records that the block has been allocated. This means any addresses within the block mentioned in error messages will be identified as belonging to the block. It also means that if the block isn't freed it will be detected by the leak checker. - It marks the block as being addressable and undefined (if 'is_zeroed' is not set), or addressable and defined (if 'is_zeroed' is set). This controls how accesses to the block by the program are handled. 'addr' is the start of the usable block (ie. after any redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator can apply redzones -- these are blocks of padding at the start and end of each block. Adding redzones is recommended as it makes it much more likely Valgrind will spot block overruns. `is_zeroed' indicates if the memory is zeroed (or filled with another predictable value), as is the case for calloc(). VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a heap block -- that will be used by the client program -- is allocated. It's best to put it at the outermost level of the allocator if possible; for example, if you have a function my_alloc() which calls internal_alloc(), and the client request is put inside internal_alloc(), stack traces relating to the heap block will contain entries for both my_alloc() and internal_alloc(), which is probably not what you want. For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out custom blocks from within a heap block, B, that has been allocated with malloc/calloc/new/etc, then block B will be *ignored* during leak-checking -- the custom blocks will take precedence. VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For Memcheck, it does two things: - It records that the block has been deallocated. This assumes that the block was annotated as having been allocated via VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. - It marks the block as being unaddressable. VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a heap block is deallocated. VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For Memcheck, it does four things: - It records that the size of a block has been changed. This assumes that the block was annotated as having been allocated via VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued. - If the block shrunk, it marks the freed memory as being unaddressable. - If the block grew, it marks the new area as undefined and defines a red zone past the end of the new block. - The V-bits of the overlap between the old and the new block are preserved. VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block and before deallocation of the old block. In many cases, these three client requests will not be enough to get your allocator working well with Memcheck. More specifically, if your allocator writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call will be necessary to mark the memory as addressable just before the zeroing occurs, otherwise you'll get a lot of invalid write errors. For example, you'll need to do this if your allocator recycles freed blocks, but it zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK). Alternatively, if your allocator reuses freed blocks for allocator-internal data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary. Really, what's happening is a blurring of the lines between the client program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the memory should be considered unaddressable to the client program, but the allocator knows more than the rest of the client program and so may be able to safely access it. Extra client requests are necessary for Valgrind to understand the distinction between the allocator and the rest of the program. Ignored if addr == 0. */ #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \ addr, sizeB, rzB, is_zeroed, 0) /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. Ignored if addr == 0. */ #define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \ addr, oldSizeB, newSizeB, rzB, 0) /* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details. Ignored if addr == 0. */ #define VALGRIND_FREELIKE_BLOCK(addr, rzB) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \ addr, rzB, 0, 0, 0) /* Create a memory pool. */ #define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ pool, rzB, is_zeroed, 0, 0) /* Create a memory pool with some flags specifying extended behaviour. When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL. The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used by the application as superblocks to dole out MALLOC_LIKE blocks using VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels" pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC. The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK. Note that the association between the pool and the second level blocks is implicit : second level blocks will be located inside first level blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag for such 2 levels pools, as otherwise valgrind will detect overlapping memory blocks, and will abort execution (e.g. during leak search). Such a meta pool can also be marked as an 'auto free' pool using the flag VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE will automatically free the second level blocks that are contained inside the first level block freed with VALGRIND_MEMPOOL_FREE. In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls to VALGRIND_FREELIKE_BLOCK for all the second level blocks included in the first level block. Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag without the VALGRIND_MEMPOOL_METAPOOL flag. */ #define VALGRIND_MEMPOOL_AUTO_FREE 1 #define VALGRIND_MEMPOOL_METAPOOL 2 #define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \ pool, rzB, is_zeroed, flags, 0) /* Destroy a memory pool. */ #define VALGRIND_DESTROY_MEMPOOL(pool) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \ pool, 0, 0, 0, 0) /* Associate a piece of memory with a memory pool. */ #define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \ pool, addr, size, 0, 0) /* Disassociate a piece of memory from a memory pool. */ #define VALGRIND_MEMPOOL_FREE(pool, addr) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \ pool, addr, 0, 0, 0) /* Disassociate any pieces outside a particular range. */ #define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \ pool, addr, size, 0, 0) /* Resize and/or move a piece associated with a memory pool. */ #define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \ poolA, poolB, 0, 0, 0) /* Resize and/or move a piece associated with a memory pool. */ #define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \ pool, addrA, addrB, size, 0) /* Return 1 if a mempool exists, else 0. */ #define VALGRIND_MEMPOOL_EXISTS(pool) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__MEMPOOL_EXISTS, \ pool, 0, 0, 0, 0) /* Mark a piece of memory as being a stack. Returns a stack id. start is the lowest addressable stack byte, end is the highest addressable stack byte. */ #define VALGRIND_STACK_REGISTER(start, end) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__STACK_REGISTER, \ start, end, 0, 0, 0) /* Unmark the piece of memory associated with a stack id as being a stack. */ #define VALGRIND_STACK_DEREGISTER(id) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \ id, 0, 0, 0, 0) /* Change the start and end address of the stack id. start is the new lowest addressable stack byte, end is the new highest addressable stack byte. */ #define VALGRIND_STACK_CHANGE(id, start, end) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \ id, start, end, 0, 0) /* Load PDB debug info for Wine PE image_map. */ #define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \ fd, ptr, total_size, delta, 0) /* Map a code address to a source file name and line number. buf64 must point to a 64-byte buffer in the caller's address space. The result will be dumped in there and is guaranteed to be zero terminated. If no info is found, the first byte is set to zero. */ #define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__MAP_IP_TO_SRCLOC, \ addr, buf64, 0, 0, 0) /* Disable error reporting for this thread. Behaves in a stack like way, so you can safely call this multiple times provided that VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times to re-enable reporting. The first call of this macro disables reporting. Subsequent calls have no effect except to increase the number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable reporting. Child threads do not inherit this setting from their parents -- they are always created with reporting enabled. */ #define VALGRIND_DISABLE_ERROR_REPORTING \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ 1, 0, 0, 0, 0) /* Re-enable error reporting, as per comments on VALGRIND_DISABLE_ERROR_REPORTING. */ #define VALGRIND_ENABLE_ERROR_REPORTING \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \ -1, 0, 0, 0, 0) /* Execute a monitor command from the client program. If a connection is opened with GDB, the output will be sent according to the output mode set for vgdb. If no connection is opened, output will go to the log output. Returns 1 if command not recognised, 0 otherwise. */ #define VALGRIND_MONITOR_COMMAND(command) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \ command, 0, 0, 0, 0) #undef PLAT_x86_darwin #undef PLAT_amd64_darwin #undef PLAT_x86_win32 #undef PLAT_amd64_win64 #undef PLAT_x86_linux #undef PLAT_amd64_linux #undef PLAT_ppc32_linux #undef PLAT_ppc64be_linux #undef PLAT_ppc64le_linux #undef PLAT_arm_linux #undef PLAT_s390x_linux #undef PLAT_mips32_linux #undef PLAT_mips64_linux #undef PLAT_tilegx_linux #undef PLAT_x86_solaris #undef PLAT_amd64_solaris #endif /* __VALGRIND_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/third_party/README.md0000644000000000000000000000124413306562360022466 0ustar rootrootValgrind ======== Valgrind header files were extracted from valgrind 3.12.0 source tarball available from http://valgrind.org/: 6eb03c0c10ea917013a7622e483d61bb valgrind-3.12.0.tar.bz2 valgrind/valgrind.h was patched to fix -Wunused-value warnings with -DNVALGRIND=1: index 8f29f28..b425be5 100644 --- a/third_party/valgrind/valgrind.h +++ b/third_party/valgrind/valgrind.h @@ -214,7 +214,7 @@ #define VALGRIND_DO_CLIENT_REQUEST_EXPR( \ _zzq_default, _zzq_request, \ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \ - (_zzq_default ) + ({(void) _zzq_default; _zzq_default; }) tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/0000755000000000000000000000000013306562360017765 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/lf_lifo.h0000644000000000000000000000673413306562360021562 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LF_LIFO_H #define INCLUDES_TARANTOOL_LF_LIFO_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * A very primitive implementation of lock-free * LIFO (last in first out, AKA stack, AKA single-linked * list with head-only add and remove). * * It is only usable to store free pages of a memory allocator * or similar, since it assumes that all addresses are aligned, * and lower 16 bits of address can be used as a counter-based * solution for ABA problem. */ struct lf_lifo { void *next; }; static inline unsigned short aba_value(void *a) { return (intptr_t) a & 0xffff; } static inline struct lf_lifo * lf_lifo(void *a) { return (struct lf_lifo *) ((intptr_t) a & ~0xffff); } static inline void lf_lifo_init(struct lf_lifo *head) { head->next = NULL; } static inline struct lf_lifo * lf_lifo_push(struct lf_lifo *head, void *elem) { assert(lf_lifo(elem) == elem); /* Aligned address. */ do { void *tail = head->next; lf_lifo(elem)->next = tail; /* * Sic: add 1 thus let ABA value overflow, *then* * coerce to unsigned short */ void *newhead = (char *) elem + aba_value((char *) tail + 1); if (pm_atomic_compare_exchange_strong(&head->next, &tail, newhead)) return head; } while (true); } static inline void * lf_lifo_pop(struct lf_lifo *head) { do { void *tail = head->next; struct lf_lifo *elem = lf_lifo(tail); if (elem == NULL) return NULL; /* * Discard the old tail's aba value, then save * the old head's value in the tail. * This way head's aba value grows monotonically * regardless of the exact sequence of push/pop * operations. */ void *newhead = ((char *) lf_lifo(elem->next) + aba_value(tail)); if (pm_atomic_compare_exchange_strong(&head->next, &tail, newhead)) return elem; } while (true); } static inline bool lf_lifo_is_empty(struct lf_lifo *head) { return head->next == NULL; } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LF_LIFO_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/region.h0000644000000000000000000002353613306562360021432 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SMALL_REGION_H #define INCLUDES_TARANTOOL_SMALL_REGION_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include "rlist.h" #include "slab_cache.h" #ifdef __cplusplus extern "C" { #endif /** * Region allocator. * * Good for allocating objects of any size, as long as * all of them can be freed at once. Keeps a list of * order-of-page-size memory blocks, thus has no external * fragmentation. Does have a fair bit of internal fragmentation, * but only if average allocation size is close to the block size. * Therefore is ideal for a ton of small allocations of different * sizes. * * Under the hood, the allocator uses a page cache of * mmap()-allocated pages. Pages of the page cache are never * released back to the operating system. * * Thread-safety * ------------- * @todo, not thread safe ATM * * Errors * ---------------- * The only type of failure which can occur is a failure to * allocate memory. alloc() calls return NULL in this case. */ /** A memory region. * * A memory region is a list of memory blocks. * * It's possible to allocate a chunk of any size * from a region. * It's not possible, however, to free a single allocated * piece, all memory must be freed at once with region_reset() or * region_free(). */ struct region { struct slab_cache *cache; struct slab_list slabs; }; /** * Initialize a memory region. * @sa region_free(). */ static inline void region_create(struct region *region, struct slab_cache *cache) { region->cache = cache; slab_list_create(®ion->slabs); } /** * Free all allocated objects and release the allocated * blocks. */ void region_free(struct region *region); static inline void region_destroy(struct region *region) { return region_free(region); } /** Internal: a single block in a region. */ struct rslab { /* * slab is a wrapper around struct slab - with a few * extra members. */ struct slab slab; uint32_t used; }; static inline uint32_t rslab_sizeof() { return small_align(sizeof(struct rslab), sizeof(intptr_t)); } static inline void * rslab_data(struct rslab *slab) { return (char *) slab + rslab_sizeof(); } static inline void * rslab_data_end(struct rslab *slab) { return (char *)rslab_data(slab) + slab->used; } /** How much memory is available in a given block? */ static inline uint32_t rslab_unused(struct rslab *slab) { return slab->slab.size - rslab_sizeof() - slab->used; } void * region_reserve_slow(struct region *region, size_t size); static inline void * region_reserve(struct region *region, size_t size) { if (! rlist_empty(®ion->slabs.slabs)) { struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); if (size <= rslab_unused(slab)) return (char *) rslab_data(slab) + slab->used; } return region_reserve_slow(region, size); } /** Allocate size bytes from a region. */ static inline void * region_alloc(struct region *region, size_t size) { void *ptr = region_reserve(region, size); if (ptr != NULL) { struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); assert(size <= rslab_unused(slab)); region->slabs.stats.used += size; slab->used += size; } return ptr; } static inline void * region_aligned_reserve(struct region *region, size_t size, size_t alignment) { /* reserve extra to allow for alignment */ void *ptr = region_reserve(region, size + alignment - 1); /* assuming NULL==0, aligned NULL still a NULL */ return (void *)small_align((uintptr_t)ptr, alignment); } static inline void * region_aligned_alloc(struct region *region, size_t size, size_t alignment) { void *ptr = region_aligned_reserve(region, size, alignment); if (ptr != NULL) { struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); /* * account for optional padding before the allocated * block (alignment) */ uint32_t effective_size = (uint32_t)( (char *)ptr - (char *)rslab_data_end(slab) + size); assert(effective_size <= rslab_unused(slab)); region->slabs.stats.used += effective_size; slab->used += effective_size; } return ptr; } /** * Mark region as empty, but keep the blocks. */ static inline void region_reset(struct region *region) { if (! rlist_empty(®ion->slabs.slabs)) { struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); region->slabs.stats.used -= slab->used; slab->used = 0; } } /** How much memory is used by this region. */ static inline size_t region_used(struct region *region) { return region->slabs.stats.used; } /** Return size bytes allocated last as a single chunk. */ void * region_join(struct region *region, size_t size); /** How much memory is held by this region. */ static inline size_t region_total(struct region *region) { return region->slabs.stats.total; } static inline void region_free_after(struct region *region, size_t after) { if (region_used(region) > after) region_free(region); } /** Truncate the region to the given size */ void region_truncate(struct region *pool, size_t size); static inline void * region_alloc_cb(void *ctx, size_t size) { return region_alloc((struct region *) ctx, size); } static inline void * region_reserve_cb(void *ctx, size_t *size) { struct region *region = (struct region *) ctx; void *ptr = region_reserve(region, *size); struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); *size = rslab_unused(slab); return ptr; } #if defined(__cplusplus) } /* extern "C" */ #include "exception.h" static inline void * region_alloc_xc(struct region *region, size_t size) { void *ptr = region_alloc(region, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "region", "new slab"); return ptr; } static inline void * region_alloc_xc_cb(void *ctx, size_t size) { return region_alloc_xc((struct region *) ctx, size); } static inline void * region_reserve_xc(struct region *region, size_t size) { void *ptr = region_reserve(region, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "region", "new slab"); return ptr; } static inline void * region_reserve_xc_cb(void *ctx, size_t *size) { void *ptr = region_reserve_cb(ctx, size); if (ptr == NULL) tnt_raise(OutOfMemory, *size, "region", "new slab"); return ptr; } static inline void * region_join_xc(struct region *region, size_t size) { void *ptr = region_join(region, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "region", "join"); return ptr; } static inline void * region_alloc0_xc(struct region *region, size_t size) { return memset(region_alloc_xc(region, size), 0, size); } static inline void region_dup_xc(struct region *region, const void *ptr, size_t size) { (void) memcpy(region_alloc_xc(region, size), ptr, size); } static inline void * region_aligned_alloc_xc(struct region *region, size_t size, size_t alignment) { void *ptr = region_aligned_alloc(region, size, alignment); if (ptr == NULL) tnt_raise(OutOfMemory, size, "region", "new slab"); return ptr; } static inline void * region_aligned_reserve_xc(struct region *region, size_t size, size_t alignment) { void *ptr = region_aligned_reserve(region, size, alignment); if (ptr == NULL) tnt_raise(OutOfMemory, size, "region", "new slab"); return ptr; } static inline void * region_aligned_calloc_xc(struct region *region, size_t size, size_t align) { return memset(region_aligned_alloc_xc(region, size, align), 0, size); } static inline void * region_aligned_alloc_xc_cb(void *ctx, size_t size) { return region_aligned_alloc_xc((struct region *) ctx, size, alignof(uint64_t)); } #define region_reserve_object_xc(region, T) \ (T *)region_aligned_reserve_xc((region), sizeof(T), alignof(T)) #define region_alloc_object_xc(region, T) \ (T *)region_aligned_alloc_xc((region), sizeof(T), alignof(T)) #define region_calloc_object_xc(region, T) \ (T *)region_aligned_calloc_xc((region), sizeof(T), alignof(T)) struct RegionGuard { struct region *region; size_t used; RegionGuard(struct region *region_arg) : region(region_arg), used(region_used(region_arg)) { /* nothing */ } ~RegionGuard() { region_truncate(region, used); } }; #endif /* __cplusplus */ #define region_reserve_object(region, T) \ (T *)region_aligned_reserve((region), sizeof(T), alignof(T)) #define region_alloc_object(region, T) \ (T *)region_aligned_alloc((region), sizeof(T), alignof(T)) #endif /* INCLUDES_TARANTOOL_SMALL_REGION_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/mempool.h0000644000000000000000000002165313306562360021615 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SMALL_MEMPOOL_H #define INCLUDES_TARANTOOL_SMALL_MEMPOOL_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include /* ssize_t */ #include #include "slab_cache.h" #include "lifo.h" #define RB_COMPACT 1 #include "rb.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Pool allocator. * * Good for allocating tons of small objects of the same size. * Stores all objects in order-of-virtual-page-size memory blocks, * called slabs. Each object can be freed if necessary. There is * (practically) no allocation overhead. Internal fragmentation * may occur if lots of objects are allocated, and then many of * them are freed in reverse-to-allocation order. * * Under the hood, uses a slab cache of mmap()-allocated slabs. * Slabs of the slab cache are never released back to the * operating system. * * Thread-safety * ------------- * Calls to alloc() and free() on the same mempool instance must * be externally synchronized. Use of different instances in * different threads is thread-safe (but they must also be based * on distinct slab caches). * * Exception-safety * ---------------- * The only type of failure which can occur is a failure to * allocate memory. In case of such error, an exception * (OutOfMemory) is raised. () * version of mempool_alloc() returns NULL rather than raises an * error in case of failure. */ /** mslab - a standard slab formatted to store objects of equal size. */ struct mslab { struct slab slab; /* Head of the list of used but freed objects */ void *free_list; /** Offset of an object that has never been allocated in mslab */ uint32_t free_offset; /** Number of available slots in the slab. */ uint32_t nfree; /** Used if this slab is a member of hot_slabs tree. */ rb_node(struct mslab) next_in_hot; /** Next slab in stagged slabs list in mempool object */ struct rlist next_in_cold; /** Set if this slab is a member of hot_slabs tree */ bool in_hot_slabs; }; /** * Mempool will try to allocate blocks large enough to ensure * the overhead from internal fragmentation is less than the * specified below. */ static const double OVERHEAD_RATIO = 0.01; static inline uint32_t mslab_sizeof() { return small_align(sizeof(struct mslab), sizeof(intptr_t)); } /** * Calculate the maximal size of an object for which it makes * sense to create a memory pool given the size of the slab. */ static inline uint32_t mempool_objsize_max(uint32_t slab_size) { /* Fit at least 4 objects in a slab, aligned by pointer size. */ return ((slab_size - mslab_sizeof()) / 16) & ~(sizeof(intptr_t) - 1); } typedef rb_tree(struct mslab) mslab_tree_t; /** A memory pool. */ struct mempool { /** * A link in delayed free list of pools. Must be the first * member in the struct. * @sa smfree_delayed(). */ struct lifo link; /** List of pointers for delayed free. */ struct lifo delayed; /** The source of empty slabs. */ struct slab_cache *cache; /** All slabs. */ struct slab_list slabs; /** * Slabs with some amount of free space available are put * into this red-black tree, which is sorted by slab * address. A (partially) free slab with the smallest * address is chosen for allocation. This reduces internal * memory fragmentation across many slabs. */ mslab_tree_t hot_slabs; /** Cached leftmost node of hot_slabs tree. */ struct mslab *first_hot_slab; /** * Slabs with a little of free items count, staged to * be added to hot_slabs tree. Are used in case the * tree is empty or the allocator runs out of memory. */ struct rlist cold_slabs; /** * A completely empty slab which is not freed only to * avoid the overhead of slab_cache oscillation around * a single element allocation. */ struct mslab *spare; /** * The size of an individual object. All objects * allocated on the pool have the same size. */ uint32_t objsize; /** * Mempool slabs are ordered (@sa slab_cache.h for * definition of "ordered"). The order is calculated * when the pool is initialized or is set explicitly. * The latter is necessary for 'small' allocator, * which needs to quickly find mempool containing * an allocated object when the object is freed. */ uint8_t slab_order; /** How many objects can fit in a slab. */ uint32_t objcount; /** Offset from beginning of slab to the first object */ uint32_t offset; /** Address mask to translate ptr to slab */ intptr_t slab_ptr_mask; }; /** Allocation statistics. */ struct mempool_stats { /** Object size. */ uint32_t objsize; /** Total objects allocated. */ uint32_t objcount; /** Size of the slab. */ uint32_t slabsize; /** Number of slabs. All slabs are of the same size. */ uint32_t slabcount; /** Memory used and booked but passive (to see fragmentation). */ struct small_stats totals; }; void mempool_stats(struct mempool *mempool, struct mempool_stats *stats); /** * Number of objects in the pool. */ static inline size_t mempool_count(struct mempool *pool) { return pool->slabs.stats.used/pool->objsize; } /** @todo: struct mempool_iterator */ void mempool_create_with_order(struct mempool *pool, struct slab_cache *cache, uint32_t objsize, uint8_t order); /** * Initialize a mempool. Tell the pool the size of objects * it will contain. * * objsize must be >= sizeof(mbitmap_t) * If allocated objects must be aligned, then objsize must * be aligned. The start of free area in a slab is always * uint64_t aligned. * * @sa mempool_destroy() */ static inline void mempool_create(struct mempool *pool, struct slab_cache *cache, uint32_t objsize) { size_t overhead = (objsize > sizeof(struct mslab) ? objsize : sizeof(struct mslab)); size_t slab_size = (size_t) (overhead / OVERHEAD_RATIO); if (slab_size > cache->arena->slab_size) slab_size = cache->arena->slab_size; /* * Calculate the amount of usable space in a slab. * @note: this asserts that slab_size_min is less than * SLAB_ORDER_MAX. */ uint8_t order = slab_order(cache, slab_size); assert(order <= cache->order_max); return mempool_create_with_order(pool, cache, objsize, order); } static inline bool mempool_is_initialized(struct mempool *pool) { return pool->cache != NULL; } /** * Free the memory pool and release all cached memory blocks. * @sa mempool_create() */ void mempool_destroy(struct mempool *pool); /** Allocate an object. */ void * mempool_alloc(struct mempool *pool); void mslab_free(struct mempool *pool, struct mslab *slab, void *ptr); /** * Free a single object. * @pre the object is allocated in this pool. */ static inline void mempool_free(struct mempool *pool, void *ptr) { #ifndef NDEBUG memset(ptr, '#', pool->objsize); #endif assert(ptr); struct mslab *slab = (struct mslab *) slab_from_ptr(ptr, pool->slab_ptr_mask); assert(slab->slab.order == pool->slab_order); pool->slabs.stats.used -= pool->objsize; mslab_free(pool, slab, ptr); } /** How much memory is used by this pool. */ static inline size_t mempool_used(struct mempool *pool) { return pool->slabs.stats.used; } /** How much memory is held by this pool. */ static inline size_t mempool_total(struct mempool *pool) { return pool->slabs.stats.total; } #if defined(__cplusplus) } /* extern "C" */ #include "exception.h" static inline void * mempool_alloc_xc(struct mempool *pool) { void *ptr = mempool_alloc(pool); if (ptr == NULL) tnt_raise(OutOfMemory, pool->objsize, "mempool", "new slab"); return ptr; } static inline void * mempool_alloc0_xc(struct mempool *pool) { return memset(mempool_alloc_xc(pool), 0, pool->objsize); } #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_SMALL_MEMPOOL_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/lifo.h0000644000000000000000000000437513306562360021100 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LIFO_H #define INCLUDES_TARANTOOL_LIFO_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lifo { void *next; }; #define lifo(a) ((struct lifo *) a) static inline void lifo_init(struct lifo *head) { head->next = NULL; } static inline void lifo_push(struct lifo *head, void *elem) { lifo(elem)->next = head->next; head->next = elem; } static inline void * lifo_pop(struct lifo *head) { struct lifo *elem = lifo(head->next); if (elem) head->next = elem->next; return elem; } static inline void * lifo_peek(struct lifo *head) { return head->next; } static inline bool lifo_is_empty(struct lifo *head) { return head->next == NULL; } #undef lifo #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LIFO_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/slab_arena.c0000644000000000000000000001417413306562360022227 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "slab_arena.h" #include "quota.h" #include #include #include #include #include #include #include #include #include #include #if !defined(MAP_ANONYMOUS) #define MAP_ANONYMOUS MAP_ANON #endif static void munmap_checked(void *addr, size_t size) { if (munmap(addr, size)) { char buf[64]; intptr_t ignore_it = (intptr_t)strerror_r(errno, buf, sizeof(buf)); (void)ignore_it; fprintf(stderr, "Error in munmap(%p, %zu): %s\n", addr, size, buf); assert(false); } } static void * mmap_checked(size_t size, size_t align, int flags) { /* The alignment must be a power of two. */ assert((align & (align - 1)) == 0); /* The size must be a multiple of alignment */ assert((size & (align - 1)) == 0); /* * All mappings except the first are likely to * be aligned already. Be optimistic by trying * to map exactly the requested amount. */ void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, flags | MAP_ANONYMOUS, -1, 0); if (map == MAP_FAILED) return NULL; if (((intptr_t) map & (align - 1)) == 0) return map; munmap_checked(map, size); /* * mmap enough amount to be able to align * the mapped address. This can lead to virtual memory * fragmentation depending on the kernels allocation * strategy. */ map = mmap(NULL, size + align, PROT_READ | PROT_WRITE, flags | MAP_ANONYMOUS, -1, 0); if (map == MAP_FAILED) return NULL; /* Align the mapped address around slab size. */ size_t offset = (intptr_t) map & (align - 1); if (offset != 0) { /* Unmap unaligned prefix and postfix. */ munmap_checked(map, align - offset); map += align - offset; munmap_checked(map + size, offset); } else { /* The address is returned aligned. */ munmap_checked(map + size, align); } return map; } #if 0 /** This is a way to round things up without using a built-in. */ static size_t pow2round(size_t size) { int shift = 1; size_t res = size - 1; while (res & (res + 1)) { res |= res >> shift; shift <<= 1; } return res + 1; } #endif #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define MIN(a, b) ((a) < (b) ? (a) : (b)) int slab_arena_create(struct slab_arena *arena, struct quota *quota, size_t prealloc, uint32_t slab_size, int flags) { assert(flags & (MAP_PRIVATE | MAP_SHARED)); lf_lifo_init(&arena->cache); VALGRIND_MAKE_MEM_DEFINED(&arena->cache, sizeof(struct lf_lifo)); /* * Round up the user supplied data - it can come in * directly from the configuration file. Allow * zero-size arena for testing purposes. */ arena->slab_size = small_round(MAX(slab_size, SLAB_MIN_SIZE)); arena->quota = quota; /** Prealloc can not be greater than the quota */ prealloc = MIN(prealloc, quota_total(quota)); /** Extremely large sizes can not be aligned properly */ prealloc = MIN(prealloc, SIZE_MAX - arena->slab_size); /* Align prealloc around a fixed number of slabs. */ arena->prealloc = small_align(prealloc, arena->slab_size); arena->used = 0; arena->flags = flags; if (arena->prealloc) { arena->arena = mmap_checked(arena->prealloc, arena->slab_size, arena->flags); } else { arena->arena = NULL; } return arena->prealloc && !arena->arena ? -1 : 0; } void slab_arena_destroy(struct slab_arena *arena) { void *ptr; size_t total = 0; while ((ptr = lf_lifo_pop(&arena->cache))) { if (arena->arena == NULL || ptr < arena->arena || ptr >= arena->arena + arena->prealloc) { munmap_checked(ptr, arena->slab_size); } total += arena->slab_size; } if (arena->arena) munmap_checked(arena->arena, arena->prealloc); assert(total == arena->used); } void * slab_map(struct slab_arena *arena) { void *ptr; if ((ptr = lf_lifo_pop(&arena->cache))) { VALGRIND_MAKE_MEM_UNDEFINED(ptr, arena->slab_size); return ptr; } if (quota_use(arena->quota, arena->slab_size) < 0) return NULL; /** Need to allocate a new slab. */ size_t used = pm_atomic_fetch_add(&arena->used, arena->slab_size); used += arena->slab_size; if (used <= arena->prealloc) { ptr = arena->arena + used - arena->slab_size; VALGRIND_MAKE_MEM_UNDEFINED(ptr, arena->slab_size); return ptr; } ptr = mmap_checked(arena->slab_size, arena->slab_size, arena->flags); if (!ptr) { __sync_sub_and_fetch(&arena->used, arena->slab_size); quota_release(arena->quota, arena->slab_size); } VALGRIND_MAKE_MEM_UNDEFINED(ptr, arena->slab_size); return ptr; } void slab_unmap(struct slab_arena *arena, void *ptr) { if (ptr == NULL) return; lf_lifo_push(&arena->cache, ptr); VALGRIND_MAKE_MEM_NOACCESS(ptr, arena->slab_size); VALGRIND_MAKE_MEM_DEFINED(lf_lifo(ptr), sizeof(struct lf_lifo)); } void slab_arena_mprotect(struct slab_arena *arena) { if (arena->arena) mprotect(arena->arena, arena->prealloc, PROT_READ); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/region.c0000644000000000000000000001012513306562360021413 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "region.h" #include /* ssize_t */ #include #include void * region_reserve_slow(struct region *region, size_t size) { /* The new slab must have at least this many bytes available. */ size_t slab_min_size = size + rslab_sizeof() - slab_sizeof(); struct rslab *slab; slab = (struct rslab *) slab_get(region->cache, slab_min_size); if (slab == NULL) return NULL; slab->used = 0; /* * Sic: add the new slab to the beginning of the * region, even if it is full, otherwise, * region_truncate() won't work. */ slab_list_add(®ion->slabs, &slab->slab, next_in_list); VALGRIND_MALLOCLIKE_BLOCK(rslab_data(slab), rslab_unused(slab), 0, 0); return rslab_data(slab); } void region_free(struct region *region) { struct slab *slab, *tmp; rlist_foreach_entry_safe(slab, ®ion->slabs.slabs, next_in_list, tmp) slab_put(region->cache, slab); slab_list_create(®ion->slabs); } /** * Release all memory down to new_size; new_size has to be previously * obtained by calling region_used(). */ void region_truncate(struct region *region, size_t used) { assert(region_used(region) >= used); size_t cut_size = region_used(region) - used; while (! rlist_empty(®ion->slabs.slabs)) { struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); if (slab->used > cut_size) { /* This is the last slab to trim. */ slab->used -= cut_size; cut_size = 0; break; } cut_size -= slab->used; /* Remove the entire slab. */ slab_list_del(®ion->slabs, &slab->slab, next_in_list); slab_put(region->cache, &slab->slab); } assert(cut_size == 0); region->slabs.stats.used = used; } void * region_join(struct region *region, size_t size) { if (rlist_empty(®ion->slabs.slabs)) { assert(size == 0); return region_alloc(region, 0); } struct rslab *slab = rlist_first_entry(®ion->slabs.slabs, struct rslab, slab.next_in_list); if (slab->used >= size) { /* Don't move stuff if it's in a single chunk. */ return (char *) rslab_data(slab) + slab->used - size; } /** * Use region_reserve() to ensure slab->size is not * changed when the joined region is in the same slab * as the final chunk. */ char *ptr = region_reserve(region, size); size_t offset = size; if (ptr == NULL) return NULL; /* * Copy data from last chunk to first, i.e. in the reverse order. */ while (offset > 0 && slab->used <= offset) { memcpy(ptr + offset - slab->used, rslab_data(slab), slab->used); offset -= slab->used; slab = rlist_next_entry(slab, slab.next_in_list); } if (offset > 0) memcpy(ptr, rslab_data(slab) + slab->used - offset, offset); region_alloc(region, size); return ptr; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/ibuf.h0000644000000000000000000001146513306562360021072 0ustar rootroot#ifndef TARANTOOL_SMALL_IBUF_H_INCLUDED #define TARANTOOL_SMALL_IBUF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** @module Input buffer. */ struct slab_cache; /* * Continuous piece of memory to store input. * Allocated in factors of 'start_capacity'. * Maintains position of the data "to be processed". * * Typical use case: * * struct ibuf *in; * coio_bread(coio, in, request_len); * if (ibuf_size(in) >= request_len) { * process_request(in->rpos, request_len); * in->rpos += request_len; * } */ struct ibuf { struct slab_cache *slabc; char *buf; /** Start of input. */ char *rpos; /** End of useful input */ char *wpos; /** End of buffer. */ char *end; size_t start_capacity; }; void ibuf_create(struct ibuf *ibuf, struct slab_cache *slabc, size_t start_capacity); void ibuf_destroy(struct ibuf *ibuf); void ibuf_reinit(struct ibuf *ibuf); /** How much data is read and is not parsed yet. */ static inline size_t ibuf_used(struct ibuf *ibuf) { assert(ibuf->wpos >= ibuf->rpos); return ibuf->wpos - ibuf->rpos; } /** How much data can we fit beyond buf->wpos */ static inline size_t ibuf_unused(struct ibuf *ibuf) { assert(ibuf->wpos <= ibuf->end); return ibuf->end - ibuf->wpos; } /** How much memory is allocated */ static inline size_t ibuf_capacity(struct ibuf *ibuf) { return ibuf->end - ibuf->buf; } /** * Integer value of the position in the buffer - stable * in case of realloc. */ static inline size_t ibuf_pos(struct ibuf *ibuf) { assert(ibuf->buf <= ibuf->rpos); return ibuf->rpos - ibuf->buf; } /** Forget all cached input. */ static inline void ibuf_reset(struct ibuf *ibuf) { ibuf->rpos = ibuf->wpos = ibuf->buf; } void * ibuf_reserve_slow(struct ibuf *ibuf, size_t size); static inline void * ibuf_reserve(struct ibuf *ibuf, size_t size) { if (ibuf->wpos + size <= ibuf->end) return ibuf->wpos; return ibuf_reserve_slow(ibuf, size); } static inline void * ibuf_alloc(struct ibuf *ibuf, size_t size) { void *ptr; if (ibuf->wpos + size <= ibuf->end) ptr = ibuf->wpos; else { ptr = ibuf_reserve_slow(ibuf, size); if (ptr == NULL) return NULL; } ibuf->wpos += size; return ptr; } static inline void * ibuf_reserve_cb(void *ctx, size_t *size) { struct ibuf *buf = (struct ibuf *) ctx; void *p = ibuf_reserve(buf, *size ? *size : buf->start_capacity); *size = ibuf_unused(buf); return p; } static inline void * ibuf_alloc_cb(void *ctx, size_t size) { return ibuf_alloc((struct ibuf *) ctx, size); } #if defined(__cplusplus) } /* extern "C" */ #include "exception.h" /** Reserve space for sz bytes in the input buffer. */ static inline void * ibuf_reserve_xc(struct ibuf *ibuf, size_t size) { void *ptr = ibuf_reserve(ibuf, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "ibuf", "reserve"); return ptr; } static inline void * ibuf_alloc_xc(struct ibuf *ibuf, size_t size) { void *ptr = ibuf_alloc(ibuf, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "ibuf", "alloc"); return ptr; } static inline void * ibuf_reserve_xc_cb(void *ctx, size_t *size) { void *ptr = ibuf_reserve_cb(ctx, size); if (ptr == NULL) tnt_raise(OutOfMemory, *size, "ibuf", "reserve"); return ptr; } static inline void * ibuf_alloc_xc_cb(void *ctx, size_t size) { void *ptr = ibuf_alloc_cb(ctx, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "ibuf", "alloc"); return ptr; } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_SMALL_IBUF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/small.h0000644000000000000000000002157713306562360021262 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SMALL_SMALL_H #define INCLUDES_TARANTOOL_SMALL_SMALL_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "mempool.h" #include "slab_arena.h" #include "lifo.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Small object allocator. * * The allocator consists of a collection of mempools. * * There are two containers of pools: * * pools for objects of size 8-500 bytes are stored in an array, * where pool->objsize of each array member is a multiple of 8-16 * (value defined in STEP_SIZE constant). These are * "stepped" pools, since pool->objsize of each next pool in the * array differs from the previous size by a fixed step size. * * For example, there is a pool for size range 16-32, * another one for 32-48, 48-64, etc. This makes the look up * procedure for small allocations just a matter of getting an * array index via a bit shift. All stepped pools are initialized * when an instance of small_alloc is created. * * Objects of size beyond the stepped pools range (the upper limit * is usually around 300 bytes), are stored in pools with a size * which is a multiple of alloc_factor. alloc_factor is itself * a configuration constant in the range (1.0, 2.0]. I.e. imagine * alloc_factor is 1.1, then there are pools for objects of size * 300-330, 330-363, and so on. These pools are created upon first * allocation within given range, and stored in a red-black tree. * * Initially this red-black tree contains only a pool for * alloc->object_max. * When a request for a new allocation of sz bytes arrives * and it can not be satisfied from a stepped pool, * a search for a nearest factored pool is made in the tree. * * If, for the nearest found factored pool: * * pool->objsize > sz * alloc_factor, * * (i.e. pool object size is too big) a new factored pool is * created and inserted into the tree. * * This way the tree only contains factored pools for sizes * which are actually used by the server, and can be kept * small. */ /** Basic constants of small object allocator. */ enum { /** How many stepped pools there is. */ STEP_POOL_MAX = 32, /** How many factored pools there can be. */ FACTOR_POOL_MAX = 256, }; enum small_opt { SMALL_DELAYED_FREE_MODE }; /** * A mempool to store objects sized within one multiple of * alloc_factor. Is a member of the red-black tree which * contains all such pools. * * Example: let's assume alloc_factor is 1.1. There will be an * instance of factor_pool for objects of size from 300 to 330, * from 330 to 363, and so on. */ struct factor_pool { /** rb_tree entry */ rb_node(struct factor_pool) node; /** the pool itself. */ struct mempool pool; /** * Objects starting from this size and up to * pool->objsize are stored in this factored * pool. */ size_t objsize_min; /** next free factor pool in the cache. */ struct factor_pool *next; }; typedef rb_tree(struct factor_pool) factor_tree_t; /** * Free mode */ enum small_free_mode { /** Free objects immediately. */ SMALL_FREE, /** Collect garbage after delayed free. */ SMALL_COLLECT_GARBAGE, /** Postpone deletion of objects. */ SMALL_DELAYED_FREE, }; /** A slab allocator for a wide range of object sizes. */ struct small_alloc { struct slab_cache *cache; uint32_t step_pool_objsize_max; /** * All slabs in all pools must be of the same order, * otherwise small_free() has no way to derive from * pointer its slab and then the pool. */ /** * An array of "stepped" pools, pool->objsize of adjacent * pools differ by a fixed size (step). */ struct mempool step_pools[STEP_POOL_MAX]; /** A cache for nodes in the factor_pools tree. */ struct factor_pool factor_pool_cache[FACTOR_POOL_MAX]; /** First free element in factor_pool_cache. */ struct factor_pool *factor_pool_next; /** * A red-black tree with "factored" pools, i.e. * each pool differs from its neighbor by a factor. */ factor_tree_t factor_pools; /** * List of mempool which objects to be freed if delayed free mode. */ struct lifo delayed; /** * List of large allocations by malloc() to be freed in delayed mode. */ struct lifo delayed_large; /** * The factor used for factored pools. Must be > 1. * Is provided during initialization. */ float factor; uint32_t objsize_max; /** * Free mode. */ enum small_free_mode free_mode; /** * Object size of step pool 0 divided by STEP_SIZE, to * quickly find the right stepped pool given object size. */ uint32_t step_pool0_step_count; }; /** Initialize a small memory allocator. */ void small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache, uint32_t objsize_min, float alloc_factor); /** * Enter or leave delayed mode - in delayed mode smfree_delayed() * doesn't free chunks but puts them into a pool. */ void small_alloc_setopt(struct small_alloc *alloc, enum small_opt opt, bool val); /** Destroy the allocator and all allocated memory. */ void small_alloc_destroy(struct small_alloc *alloc); /** Allocate a piece of memory in the small allocator. * * @retval NULL the requested size is beyond objsize_max * or out of memory */ void * smalloc(struct small_alloc *alloc, size_t size); /** Free memory chunk allocated by the small allocator. */ /** * Free a small objects. * * This boils down to finding the object's mempool and delegating * to mempool_free(). * * If the pool becomes completely empty, and it's a factored pool, * and the factored pool's cache is empty, put back the empty * factored pool into the factored pool cache. */ void smfree(struct small_alloc *alloc, void *ptr, size_t size); /** * Free memory chunk allocated by the small allocator * if not in snapshot mode, otherwise put to the delayed * free list. */ void smfree_delayed(struct small_alloc *alloc, void *ptr, size_t size); /** * @brief Return an unique index associated with a chunk allocated * by the allocator. * * This index space is more dense than the pointers space, * especially in the least significant bits. This number is * needed because some types of box's indexes (e.g. BITSET) have * better performance then they operate on sequential offsets * (i.e. dense space) instead of memory pointers (sparse space). * * The calculation is based on SLAB number and the position of an * item within it. Current implementation only guarantees that * adjacent chunks from one SLAB will have consecutive indexes. * That is, if two chunks were sequentially allocated from one * chunk they will have sequential ids. If a second chunk was * allocated from another SLAB thеn the difference between indexes * may be more than one. * * @param ptr pointer to memory allocated in small_alloc * @return unique index */ size_t small_ptr_compress(struct small_alloc *alloc, void *ptr); /** * Perform the opposite action of small_ptr_compress(). */ void * small_ptr_decompress(struct small_alloc *alloc, size_t val); typedef int (*mempool_stats_cb)(const struct mempool_stats *stats, void *cb_ctx); void small_stats(struct small_alloc *alloc, struct small_stats *totals, mempool_stats_cb cb, void *cb_ctx); #if defined(__cplusplus) } /* extern "C" */ #include "exception.h" static inline void * smalloc_xc(struct small_alloc *alloc, size_t size, const char *where) { void *ptr = smalloc(alloc, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "slab allocator", where); return ptr; } static inline void * smalloc0_xc(struct small_alloc *alloc, size_t size, const char *where) { return memset(smalloc_xc(alloc, size, where), 0, size); } #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_SMALL_SMALL_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/slab_cache.h0000644000000000000000000002020713306562360022203 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SMALL_SLAB_CACHE_H #define INCLUDES_TARANTOOL_SMALL_SLAB_CACHE_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include "rlist.h" #include "slab_arena.h" #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ extern const uint32_t slab_magic; struct slab { /* * Next slab in the list of allocated slabs. Unused if * this slab has a buddy. Sic: if a slab is not allocated * but is made by a split of a larger (allocated) slab, * this member got to be left intact, to not corrupt * cache->allocated list. */ struct rlist next_in_cache; /** Next slab in slab_list->slabs list. */ struct rlist next_in_list; /** * Allocated size. * Is different from (SLAB_MIN_SIZE << slab->order) * when requested size is bigger than SLAB_MAX_SIZE * (i.e. slab->order is SLAB_CLASS_LAST). */ size_t size; /** Slab magic (for sanity checks). */ uint32_t magic; /** Base of lb(size) for ordered slabs. */ uint8_t order; /** * Only used for buddy slabs. If the buddy of the current * free slab is also free, both slabs are merged and * a free slab of the higher order emerges. * Value of 0 means the slab is free. Otherwise * slab->in_use is set to slab->order + 1. */ uint8_t in_use; }; /** Allocation statistics. */ struct small_stats { size_t used; size_t total; }; static inline void small_stats_reset(struct small_stats *stats) { stats->used = stats->total = 0; } /** * A general purpose list of slabs. Is used * to store unused slabs of a certain order in the * slab cache, as well as to contain allocated * slabs of a specialized allocator. */ struct slab_list { struct rlist slabs; /** Total/used bytes in this list. */ struct small_stats stats; }; #define slab_list_add(list, slab, member) \ do { \ rlist_add_entry(&(list)->slabs, (slab), member);\ (list)->stats.total += (slab)->size; \ } while (0) #define slab_list_del(list, slab, member) \ do { \ rlist_del_entry((slab), member); \ (list)->stats.total -= (slab)->size; \ } while (0) static inline void slab_list_create(struct slab_list *list) { rlist_create(&list->slabs); small_stats_reset(&list->stats); } /* * A binary logarithmic distance between the smallest and * the largest slab in the cache can't be that big, really. */ enum { ORDER_MAX = 16 }; struct slab_cache { /* The source of allocations for this cache. */ struct slab_arena *arena; /* * Min size of the slab in the cache maintained * using the buddy system. The logarithmic distance * between order0_size and arena->slab_max_size * defines the number of "orders" of slab cache. * This distance can't be more than ORDER_MAX. */ uint32_t order0_size; /* * Binary logarithm of order0_size, useful in pointer * arithmetics. */ uint8_t order0_size_lb; /* * Slabs of order in range [0, order_max) have size * which is a power of 2. Slabs in the next order are * double the size of the previous order. Slabs of the * previous order are obtained by splitting a slab of the * next order, and so on until order is order_max * Slabs of order order_max are obtained directly * from slab_arena. This system is also known as buddy * system. */ uint8_t order_max; /** All allocated slabs used in the cache. * The stats reflect the total used/allocated * memory in the cache. */ struct slab_list allocated; /** * Lists of unused slabs, for each slab order. * * A used slab is removed from the list and its * next_in_list link may be reused for some other purpose. */ struct slab_list orders[ORDER_MAX+1]; #ifndef _NDEBUG pthread_t thread_id; #endif }; void slab_cache_create(struct slab_cache *cache, struct slab_arena *arena); void slab_cache_destroy(struct slab_cache *cache); /** * Allocate ordered slab * @see slab_order() */ struct slab * slab_get_with_order(struct slab_cache *cache, uint8_t order); /** * Deallocate ordered slab */ void slab_put_with_order(struct slab_cache *cache, struct slab *slab); /** * Allocate large slab. * @pre size > slab_order_size(cache->arena->slab_size) */ struct slab * slab_get_large(struct slab_cache *slab, size_t size); /** * Deallocate large slab. * @pre slab was allocated with slab_get_large() */ void slab_put_large(struct slab_cache *cache, struct slab *slab); /** * A shortcut for slab_get_with_order()/slab_get_large() * @see slab_get_with_order() * @see slab_get_large() */ struct slab * slab_get(struct slab_cache *cache, size_t size); /** * Shortcut for slab_put_with_order()/slab_put_large() * @see slab_get_with_order() * @see slab_get_large() */ void slab_put(struct slab_cache *cache, struct slab *slab); /** * Return the number of bytes used by this slab cache. * @remark This function is thread-safe. */ static inline size_t slab_cache_used(struct slab_cache *slabc) { return slabc->allocated.stats.used; } /** * Given a pointer allocated in a slab, get the handle * of the slab itself. */ static inline struct slab * slab_from_ptr(void *ptr, intptr_t slab_mask) { intptr_t addr = (intptr_t) ptr; /** All memory mapped slabs are slab->size aligned. */ struct slab *slab = (struct slab *)(addr & slab_mask); assert(slab->magic == slab_magic); return slab; } /* Aligned size of slab meta. */ static inline uint32_t slab_sizeof() { return small_align(sizeof(struct slab), sizeof(intptr_t)); } /** Useful size of a slab. */ static inline uint32_t slab_capacity(struct slab *slab) { return slab->size - slab_sizeof(); } static inline void * slab_data(struct slab *slab) { return (char *) slab + slab_sizeof(); } static inline struct slab * slab_from_data(void *data) { return (struct slab *) ((char *) data - slab_sizeof()); } void slab_cache_check(struct slab_cache *cache); /** * Find the nearest power of 2 size capable of containing * a chunk of the given size. Adjust for cache->order0_size * and arena->slab_size. */ static inline uint8_t slab_order(struct slab_cache *cache, size_t size) { assert(size <= UINT32_MAX); if (size <= cache->order0_size) return 0; if (size > cache->arena->slab_size) return cache->order_max + 1; return (uint8_t) (CHAR_BIT * sizeof(unsigned) - __builtin_clz((unsigned) size - 1) - cache->order0_size_lb); } /** Convert slab order to the mmap()ed size. */ static inline intptr_t slab_order_size(struct slab_cache *cache, uint8_t order) { assert(order <= cache->order_max); intptr_t size = 1; return size << (order + cache->order0_size_lb); } /** * Debug only: track that all allocations * are made from a single thread. */ static inline void slab_cache_set_thread(struct slab_cache *cache) { (void) cache; #ifndef _NDEBUG cache->thread_id = pthread_self(); #endif } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_SMALL_SLAB_CACHE_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/lsregion.h0000644000000000000000000001652613306562360021772 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LSREGION_H #define INCLUDES_TARANTOOL_LSREGION_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "rlist.h" #include "quota.h" #include "slab_cache.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #define LSLAB_NOT_USED_ID -1 /** * Wrapper for a slab that tracks a size of used memory and * maximal identifier of memory that was allocated in the slab. */ struct lslab { /** Link in the lsregion.slabs list. */ struct rlist next_in_list; /** * Slab allocated size. */ size_t slab_size; /** * Size of used memory including aligned size of this * structure. */ size_t slab_used; /** * Maximal id that was used to alloc data from the slab. */ int64_t max_id; }; /** * Log structured allocator treats memory as a sequentially * written log. It allows to allocate memory chunks of any size, * but does not support free() of an individual chunk. Instead, * each chunk, when allocated, needs to be identified with an id. * It is assumed that ids are nondecreasing. * The chunks are stored in equally-sized slabs, obtained from * slab arena. * To free memory, the allocator requires an oldest id before * which all memory could be discarded. Upon free, it returns * all slabs containing chunks with smaller ids to the slab arena. * * id_i <= id_(i + 1) * *-------* *-------* *-------* *-------* * | slab |-->| slab |-->| slab |--> -->| slab | * *-------* *-------* *-------* *-------* * <= id1 <= id2 | <= id3 <= idN * | * truncate with id in [id2, id3) deletes from this position. */ struct lsregion { /** * List of memory slabs and the statistics. The older a * slab is, the closer it is placed to front of the list. */ struct slab_list slabs; /** Slabs arena - source for memory slabs. */ struct slab_arena *arena; struct lslab *cached; }; /** Aligned size of the struct lslab. */ static inline size_t lslab_sizeof() { return small_align(sizeof(struct lslab), sizeof(intptr_t)); } /** Initialize the lslab object. */ static inline void lslab_create(struct lslab *slab, size_t size) { rlist_create(&slab->next_in_list); slab->slab_size = size; slab->slab_used = lslab_sizeof(); slab->max_id = LSLAB_NOT_USED_ID; } /** * Size of the unused part of the slab. * @param slab Slab container. * @retval Unsed memory size. */ static inline size_t lslab_unused(const struct lslab *slab) { assert(slab->slab_size >= slab->slab_used); return slab->slab_size - slab->slab_used; } /** * Pointer to the end of the used part of the slab. * @param slab Slab container. * @retval Pointer to the unused part of the slab. */ static inline void * lslab_pos(struct lslab *slab) { return (char *) slab + slab->slab_used; } /** * Initialize log structured allocator. * @param lsregion Allocator object. * @param arena Slabs arena. */ static inline void lsregion_create(struct lsregion *lsregion, struct slab_arena *arena) { assert(arena != NULL); assert(arena->slab_size > lslab_sizeof()); slab_list_create(&lsregion->slabs); lsregion->arena = arena; lsregion->cached = NULL; } /** @sa lsregion_alloc(). */ void * lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id); /** * Allocate \p size bytes and assicoate the allocated block * with \p id. * @param lsregion Allocator object. * @param size Size to allocate. * @param id Memory chunk identifier. * * @retval not NULL Success. * @retval NULL Memory error. */ static inline void * lsregion_alloc(struct lsregion *lsregion, size_t size, int64_t id) { /* If there is an existing slab then try to use it. */ if (! rlist_empty(&lsregion->slabs.slabs)) { struct lslab *slab; slab = rlist_last_entry(&lsregion->slabs.slabs, struct lslab, next_in_list); assert(slab != NULL); assert(slab->max_id <= id); if (size <= lslab_unused(slab)) { void *res = lslab_pos(slab); slab->slab_used += size; slab->max_id = id; lsregion->slabs.stats.used += size; return res; } } return lsregion_alloc_slow(lsregion, size, id); } /** * Try to free all memory blocks in which the biggest identifier * is less or equal then the specified identifier. * @param lsregion Allocator object. * @param min_id Free all memory blocks with * max_id <= this parameter. */ static inline void lsregion_gc(struct lsregion *lsregion, int64_t min_id) { struct lslab *slab, *next; size_t arena_slab_size = lsregion->arena->slab_size; /* * First blocks are the oldest so free them until * max_id > min_id. */ rlist_foreach_entry_safe(slab, &lsregion->slabs.slabs, next_in_list, next) { if (slab->max_id > min_id) break; rlist_del_entry(slab, next_in_list); /* * lslab_sizeof() must not affect the used bytes * count. */ lsregion->slabs.stats.used -= slab->slab_used - lslab_sizeof(); if (slab->slab_size > arena_slab_size) { /* Never put large slabs into cache */ quota_release(lsregion->arena->quota, slab->slab_size); lsregion->slabs.stats.total -= slab->slab_size; free(slab); } else if (lsregion->cached != NULL) { lsregion->slabs.stats.total -= slab->slab_size; slab_unmap(lsregion->arena, slab); } else { lslab_create(slab, slab->slab_size); lsregion->cached = slab; } } } /** * Free all resources occupied by the allocator. * @param lsregion Allocator object. */ static inline void lsregion_destroy(struct lsregion *lsregion) { if (! rlist_empty(&lsregion->slabs.slabs)) lsregion_gc(lsregion, INT64_MAX); if (lsregion->cached != NULL) slab_unmap(lsregion->arena, lsregion->cached); } /** Size of the allocated memory. */ static inline size_t lsregion_used(const struct lsregion *lsregion) { return lsregion->slabs.stats.used; } /** Size of the allocated and reserved memory. */ static inline size_t lsregion_total(const struct lsregion *lsregion) { return lsregion->slabs.stats.total; } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/slab_cache.c0000644000000000000000000003257113306562360022205 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "slab_cache.h" #include #include #include #include #include #include #include #include #include #include "quota.h" const uint32_t slab_magic = 0xeec0ffee; #if !defined(MAP_ANONYMOUS) /* * MAP_ANON is deprecated, MAP_ANONYMOUS should be used instead. * Unfortunately, it's not universally present (e.g. not present * on FreeBSD. */ #define MAP_ANONYMOUS MAP_ANON #endif /* !defined(MAP_ANONYMOUS) */ static inline void slab_assert(struct slab_cache *cache, struct slab *slab) { (void) slab; assert(pthread_equal(cache->thread_id, pthread_self())); assert(slab->magic == slab_magic); assert(slab->order <= cache->order_max + 1); if (slab->order <= cache->order_max) { size_t size = slab_order_size(cache, slab->order); (void) size; assert(slab->size == size); intptr_t addr = (intptr_t) slab; (void) addr; assert(addr == (intptr_t) (addr & ~(size - 1))); } } /** Mark a slab as free. */ static inline void slab_set_free(struct slab_cache *cache, struct slab *slab) { assert(slab->in_use == slab->order + 1); /* Sanity. */ cache->allocated.stats.used -= slab->size; cache->orders[slab->order].stats.used -= slab->size; slab->in_use = 0; VALGRIND_MEMPOOL_FREE(cache, slab_data(slab)); } static inline void slab_set_used(struct slab_cache *cache, struct slab *slab) { cache->allocated.stats.used += slab->size; cache->orders[slab->order].stats.used += slab->size; /* Not a boolean to have an extra assert. */ slab->in_use = 1 + slab->order; VALGRIND_MEMPOOL_ALLOC(cache, slab_data(slab), slab_capacity(slab)); } static inline bool slab_is_free(struct slab *slab) { return slab->in_use == 0; } static inline void slab_poison(struct slab *slab) { (void)slab; #ifndef NDEBUG VALGRIND_MAKE_MEM_UNDEFINED(slab_data(slab), slab_capacity(slab)); const char poison_char = 'P'; memset(slab_data(slab), poison_char, slab_capacity(slab)); #endif VALGRIND_MAKE_MEM_NOACCESS(slab_data(slab), slab_capacity(slab)); } static inline void slab_create(struct slab *slab, uint8_t order, size_t size) { slab->magic = slab_magic; slab->order = order; slab->in_use = 0; slab->size = size; } static inline struct slab * slab_buddy(struct slab_cache *cache, struct slab *slab) { assert(slab->order <= cache->order_max); if (slab->order == cache->order_max) return NULL; /* The buddy address has its respective bit negated. */ return (void *)((intptr_t) slab ^ slab_order_size(cache, slab->order)); } static inline struct slab * slab_split(struct slab_cache *cache, struct slab *slab) { assert(slab->order > 0); uint8_t new_order = slab->order - 1; size_t new_size = slab_order_size(cache, new_order); slab_create(slab, new_order, new_size); struct slab *buddy = slab_buddy(cache, slab); VALGRIND_MAKE_MEM_UNDEFINED(buddy, sizeof(*buddy)); slab_create(buddy, new_order, new_size); slab_list_add(&cache->orders[buddy->order], buddy, next_in_list); return slab; } static inline struct slab * slab_merge(struct slab_cache *cache, struct slab *slab, struct slab *buddy) { assert(slab_buddy(cache, slab) == buddy); struct slab *merged = slab > buddy ? buddy : slab; /** Remove the buddy from the free list. */ slab_list_del(&cache->orders[buddy->order], buddy, next_in_list); merged->order++; merged->size = slab_order_size(cache, merged->order); return merged; } void slab_cache_create(struct slab_cache *cache, struct slab_arena *arena) { cache->arena = arena; /* * We have a fixed number of orders (ORDER_MAX); calculate * the size of buddies in the smallest order, given the size * of the slab size in the slab arena. */ long min_order0_size = sysconf(_SC_PAGESIZE); assert((long)arena->slab_size >= min_order0_size); cache->order_max = small_lb(arena->slab_size / min_order0_size); if (cache->order_max > ORDER_MAX - 1) cache->order_max = ORDER_MAX - 1; cache->order0_size = arena->slab_size >> cache->order_max; cache->order0_size_lb = small_lb(cache->order0_size); slab_list_create(&cache->allocated); uint8_t i; for (i = 0; i <= cache->order_max; i++) slab_list_create(&cache->orders[i]); slab_cache_set_thread(cache); VALGRIND_CREATE_MEMPOOL_EXT(cache, 0, 0, VALGRIND_MEMPOOL_METAPOOL | VALGRIND_MEMPOOL_AUTO_FREE); } void slab_cache_destroy(struct slab_cache *cache) { struct rlist *slabs = &cache->allocated.slabs; /* * cache->allocated contains huge allocations and * slabs of the largest order. All smaller slabs are * obtained from larger slabs by splitting. */ struct slab *slab, *tmp; rlist_foreach_entry_safe(slab, slabs, next_in_cache, tmp) { if (slab->order == cache->order_max + 1) { size_t slab_size = slab->size; quota_release(cache->arena->quota, slab_size); VALGRIND_MEMPOOL_FREE(cache, slab_data(slab)); free(slab); } else { slab_unmap(cache->arena, slab); } } VALGRIND_DESTROY_MEMPOOL(cache); } struct slab * slab_get_with_order(struct slab_cache *cache, uint8_t order) { assert(order <= cache->order_max); struct slab *slab; /* Search for the first available slab. If a slab * of a bigger size is found, it can be split. * If cache->order_max is reached and there are no * free slabs, allocate a new one on arena. */ struct slab_list *list= &cache->orders[order]; for ( ; rlist_empty(&list->slabs); list++) { if (list == cache->orders + cache->order_max) { slab = slab_map(cache->arena); if (slab == NULL) return NULL; slab_create(slab, cache->order_max, cache->arena->slab_size); slab_poison(slab); slab_list_add(&cache->allocated, slab, next_in_cache); slab_list_add(list, slab, next_in_list); break; } } slab = rlist_shift_entry(&list->slabs, struct slab, next_in_list); if (slab->order != order) { /* * Do not "bill" the size of this slab to this * order, to prevent double accounting of the * same memory. */ list->stats.total -= slab->size; /* Get a slab of the right order. */ do { slab = slab_split(cache, slab); } while (slab->order != order); /* * Count the slab in this order. The buddy is * already taken care of by slab_split. */ cache->orders[slab->order].stats.total += slab->size; } slab_set_used(cache, slab); slab_assert(cache, slab); return slab; } struct slab * slab_get_large(struct slab_cache *cache, size_t size) { size += slab_sizeof(); if (quota_use(cache->arena->quota, size) < 0) return NULL; struct slab *slab = (struct slab *) malloc(size); if (slab == NULL) { quota_release(cache->arena->quota, size); return NULL; } slab_create(slab, cache->order_max + 1, size); slab_list_add(&cache->allocated, slab, next_in_cache); cache->allocated.stats.used += size; VALGRIND_MEMPOOL_ALLOC(cache, slab_data(slab), slab_capacity(slab)); return slab; } void slab_put_large(struct slab_cache *cache, struct slab *slab) { slab_assert(cache, slab); assert(slab->order == cache->order_max + 1); /* * Free a huge slab right away, we have no * further business to do with it. */ size_t slab_size = slab->size; slab_list_del(&cache->allocated, slab, next_in_cache); cache->allocated.stats.used -= slab_size; quota_release(cache->arena->quota, slab_size); slab_poison(slab); VALGRIND_MEMPOOL_FREE(cache, slab_data(slab)); free(slab); return; } /** * Try to find a region of the requested order * in the cache. On failure, mmap() a new region, * optionally split it into a series of half. * Returns a next-power-of-two(size) aligned address * for all sizes below SLAB_SIZE_MAX. */ struct slab * slab_get(struct slab_cache *cache, size_t size) { uint8_t order = slab_order(cache, size + slab_sizeof()); if (order == cache->order_max + 1) return slab_get_large(cache, size); return slab_get_with_order(cache, order); } /** Return a slab back to the slab cache. */ void slab_put_with_order(struct slab_cache *cache, struct slab *slab) { slab_assert(cache, slab); assert(slab->order <= cache->order_max); /* An "ordered" slab is returned to the cache. */ slab_set_free(cache, slab); struct slab *buddy = slab_buddy(cache, slab); /* * The buddy slab could also have been split into a pair * of smaller slabs, the first of which happens to be * free. To not merge with a slab which is in fact * partially occupied, first check that slab orders match. * * A slab is not accounted in "used" or "total" counters * if it was split into slabs of a lower order. * cache->orders statistics only contains sizes of either * slabs returned by slab_get, or present in the free * list. This ensures that sums of cache->orders[i].stats * match the totals in cache->allocated.stats. */ if (buddy && buddy->order == slab->order && slab_is_free(buddy)) { cache->orders[slab->order].stats.total -= slab->size; do { slab = slab_merge(cache, slab, buddy); buddy = slab_buddy(cache, slab); } while (buddy && buddy->order == slab->order && slab_is_free(buddy)); cache->orders[slab->order].stats.total += slab->size; } slab_poison(slab); if (slab->order == cache->order_max && !rlist_empty(&cache->orders[slab->order].slabs)) { /* * Largest slab should be returned to arena, but we do so * only if the slab cache has at least one slab of that size * in order to avoid oscillations. */ assert(slab->size == cache->arena->slab_size); slab_list_del(&cache->allocated, slab, next_in_cache); cache->orders[slab->order].stats.total -= slab->size; slab_unmap(cache->arena, slab); } else { /* Put the slab to the cache */ rlist_add_entry(&cache->orders[slab->order].slabs, slab, next_in_list); } } void slab_put(struct slab_cache *cache, struct slab *slab) { if (slab->order <= cache->order_max) return slab_put_with_order(cache, slab); return slab_put_large(cache, slab); } void slab_cache_check(struct slab_cache *cache) { size_t total = 0; size_t used = 0; size_t ordered = 0; size_t huge = 0; bool dont_panic = true; struct rlist *slabs = &cache->allocated.slabs; struct slab *slab; rlist_foreach_entry(slab, slabs, next_in_cache) { if (slab->magic != slab_magic) { fprintf(stderr, "%s: incorrect slab magic," " expected %d, got %d", __func__, slab_magic, slab->magic); dont_panic = false; } if (slab->order == cache->order_max + 1) { huge += slab->size; used += slab->size; total += slab->size; } else { if ((intptr_t) slab->size != slab_order_size(cache, slab->order)) { fprintf(stderr, "%s: incorrect slab size," " expected %zu, got %zu", __func__, slab_order_size(cache, slab->order), slab->size); dont_panic = false; } /* * The slab may have been reformatted * and split into smaller slabs, don't * trust slab->size. */ total += slab_order_size(cache, cache->order_max); } } if (total != cache->allocated.stats.total) { fprintf(stderr, "%s: incorrect slab statistics, total %zu," " factual %zu\n", __func__, cache->allocated.stats.total, total); dont_panic = false; } struct slab_list *list; for (list = cache->orders; list <= cache->orders + cache->order_max; list++) { uint8_t order = slab_order_size(cache, list - cache->orders); ordered += list->stats.total; used += list->stats.used; if (list->stats.total % slab_order_size(cache, order)) { fprintf(stderr, "%s: incorrect order statistics, the" " total %zu is not multiple of slab size %zu\n", __func__, list->stats.total, slab_order_size(cache, order)); dont_panic = false; } if (list->stats.used % slab_order_size(cache, order)) { fprintf(stderr, "%s: incorrect order statistics, the" " used %zu is not multiple of slab size %zu\n", __func__, list->stats.used, slab_order_size(cache, order)); dont_panic = false; } } if (ordered + huge != total) { fprintf(stderr, "%s: incorrect totals, ordered %zu, " " huge %zu, total %zu\n", __func__, ordered, huge, total); dont_panic = false; } if (used != cache->allocated.stats.used) { fprintf(stderr, "%s: incorrect used total, " "total %zu, sum %zu\n", __func__, cache->allocated.stats.used, used); dont_panic = false; } if (dont_panic) return; abort(); } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/lsregion.c0000644000000000000000000000607413306562360021762 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lsregion.h" void * lsregion_alloc_slow(struct lsregion *lsregion, size_t size, int64_t id) { struct lslab *slab = NULL; size_t slab_size = lsregion->arena->slab_size; /* If there is an existing slab then try to use it. */ if (! rlist_empty(&lsregion->slabs.slabs)) { slab = rlist_last_entry(&lsregion->slabs.slabs, struct lslab, next_in_list); assert(slab != NULL); } if ((slab != NULL && size > lslab_unused(slab)) || slab == NULL) { if (size + lslab_sizeof() >= slab_size) { /* Large allocation, use malloc() */ slab_size = size + lslab_sizeof(); struct quota *quota = lsregion->arena->quota; if (quota_use(quota, slab_size) < 0) return NULL; slab = malloc(slab_size); if (slab == NULL) { quota_release(quota, slab_size); return NULL; } lslab_create(slab, slab_size); rlist_add_tail_entry(&lsregion->slabs.slabs, slab, next_in_list); lsregion->slabs.stats.total += slab_size; } else if (lsregion->cached != NULL) { /* If there is the cached slab then use it. */ slab = lsregion->cached; lsregion->cached = NULL; rlist_add_tail_entry(&lsregion->slabs.slabs, slab, next_in_list); } else { slab = (struct lslab *) slab_map(lsregion->arena); if (slab == NULL) return NULL; lslab_create(slab, slab_size); rlist_add_tail_entry(&lsregion->slabs.slabs, slab, next_in_list); lsregion->slabs.stats.total += slab_size; } } assert(slab != NULL); assert(slab->max_id <= id); assert(size <= lslab_unused(slab)); void *res = lslab_pos(slab); slab->slab_used += size; /* Update the memory block meta info. */ assert(slab->max_id <= id); slab->max_id = id; lsregion->slabs.stats.used += size; return res; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/quota.h0000644000000000000000000001233513306562360021273 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SMALL_QUOTA_H #define INCLUDES_TARANTOOL_SMALL_QUOTA_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include /* ssize_t */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #define QUOTA_UNIT_SIZE 1024ULL static const size_t QUOTA_MAX = QUOTA_UNIT_SIZE * UINT32_MAX > SIZE_MAX ? SIZE_MAX - QUOTA_UNIT_SIZE + 1 : QUOTA_UNIT_SIZE * UINT32_MAX; /** A basic limit on memory usage */ struct quota { /** * High order dword is the total available memory * and the low order dword is the currently used amount. * Both values are represented in units of size * QUOTA_UNIT_SIZE. */ uint64_t value; }; /** * Initialize quota with a given memory limit */ static inline void quota_init(struct quota *quota, size_t total) { uint64_t new_total = (total + (QUOTA_UNIT_SIZE - 1)) / QUOTA_UNIT_SIZE; quota->value = new_total << 32; } /** * Get current quota limit */ static inline size_t quota_total(const struct quota *quota) { return (quota->value >> 32) * QUOTA_UNIT_SIZE; } /** * Get current quota usage */ static inline size_t quota_used(const struct quota *quota) { return (quota->value & UINT32_MAX) * QUOTA_UNIT_SIZE; } static inline void quota_get_total_and_used(struct quota *quota, size_t *total, size_t *used) { uint64_t value = quota->value; *total = (value >> 32) * QUOTA_UNIT_SIZE; *used = (value & UINT32_MAX) * QUOTA_UNIT_SIZE; } /** * Set quota memory limit. * @retval > 0 aligned size set on success * @retval -1 error, i.e. when it is not possible to decrease * limit due to greater current usage */ static inline ssize_t quota_set(struct quota *quota, size_t new_total) { assert(new_total <= QUOTA_MAX); /* Align the new total */ uint32_t new_total_in_units = (new_total + (QUOTA_UNIT_SIZE - 1)) / QUOTA_UNIT_SIZE; while (1) { uint64_t value = quota->value; uint32_t used_in_units = value & UINT32_MAX; if (new_total_in_units < used_in_units) return -1; uint64_t new_value = ((uint64_t) new_total_in_units << 32) | used_in_units; if (pm_atomic_compare_exchange_strong("a->value, &value, new_value)) break; } return new_total_in_units * QUOTA_UNIT_SIZE; } /** * Use up a quota * @retval > 0 aligned value on success * @retval -1 on error - if quota limit reached */ static inline ssize_t quota_use(struct quota *quota, size_t size) { if (size > QUOTA_MAX) return -1; uint32_t size_in_units = (size + (QUOTA_UNIT_SIZE - 1)) / QUOTA_UNIT_SIZE; assert(size_in_units); while (1) { uint64_t value = quota->value; uint32_t total_in_units = value >> 32; uint32_t used_in_units = value & UINT32_MAX; uint32_t new_used_in_units = used_in_units + size_in_units; assert(new_used_in_units > used_in_units); if (new_used_in_units > total_in_units) return -1; uint64_t new_value = ((uint64_t) total_in_units << 32) | new_used_in_units; if (pm_atomic_compare_exchange_strong("a->value, &value, new_value)) break; } return size_in_units * QUOTA_UNIT_SIZE; } /** Release used memory */ static inline ssize_t quota_release(struct quota *quota, size_t size) { assert(size < QUOTA_MAX); uint32_t size_in_units = (size + (QUOTA_UNIT_SIZE - 1)) / QUOTA_UNIT_SIZE; assert(size_in_units); while (1) { uint64_t value = quota->value; uint32_t total_in_units = value >> 32; uint32_t used_in_units = value & UINT32_MAX; assert(size_in_units <= used_in_units); uint32_t new_used_in_units = used_in_units - size_in_units; uint64_t new_value = ((uint64_t) total_in_units << 32) | new_used_in_units; if (pm_atomic_compare_exchange_strong("a->value, &value, new_value)) break; } return size_in_units * QUOTA_UNIT_SIZE; } #if defined(__cplusplus) } /* extern "C" { */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_SMALL_QUOTA_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/obuf.c0000644000000000000000000001415013306562360021065 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "obuf.h" #include #include "slab_cache.h" /** Allocate memory for a single iovec buffer. */ static inline void * obuf_alloc_pos(struct obuf *buf, size_t size) { int pos = buf->pos; assert(buf->capacity[pos] == 0 && buf->iov[pos].iov_len == 0); assert(pos < SMALL_OBUF_IOV_MAX); assert(buf->n_iov == pos); /** Initialize the next pos. */ buf->iov[pos+1] = buf->iov[pos]; buf->capacity[pos+1] = buf->capacity[pos]; size_t capacity = buf->start_capacity << pos; while (capacity < size) { capacity = capacity == 0 ? buf->start_capacity: capacity * 2; } struct slab *slab = slab_get(buf->slabc, capacity); if (slab == NULL) return NULL; buf->iov[pos].iov_base = slab_data(slab); buf->capacity[pos] = slab_capacity(slab); buf->n_iov++; return buf->iov[pos].iov_base; } /** * Initialize an output buffer instance. Don't allocate memory * yet -- it may never be needed. */ void obuf_create(struct obuf *buf, struct slab_cache *slabc, size_t start_capacity) { buf->slabc = slabc; buf->n_iov = 0; buf->pos = 0; buf->used = 0; buf->start_capacity= start_capacity; buf->iov[0].iov_base = NULL; buf->iov[0].iov_len = 0; buf->capacity[0] = 0; } /** Mark an output buffer as empty. */ void obuf_reset(struct obuf *buf) { int iovcnt = obuf_iovcnt(buf); int i; for (i = 0; i < iovcnt; i++) buf->iov[i].iov_len = 0; buf->pos = 0; buf->used = 0; } void obuf_destroy(struct obuf *buf) { int i; for (i = 0; i < buf->n_iov; i++) { struct slab *slab = slab_from_data(buf->iov[i].iov_base); slab_put(buf->slabc, slab); } #ifndef NDEBUG obuf_create(buf, buf->slabc, buf->start_capacity); #endif } /** Add data to the output buffer. Copies the data. */ size_t obuf_dup(struct obuf *buf, const void *data, size_t size) { struct iovec *iov = &buf->iov[buf->pos]; size_t capacity = buf->capacity[buf->pos]; size_t to_copy = size; /** * @pre buf->pos points at an array of allocated buffers. * The array ends with a zero-initialized buffer. */ while (iov->iov_len + to_copy > capacity) { /* * The data doesn't fit into this buffer. * It could be because the buffer is not * allocated, is partially or completely full. * Copy as much as possible into already * allocated buffers. */ if (iov->iov_len < capacity) { /* * This buffer is allocated, but can't * fit all the data. Copy as much data as * possible. */ size_t fill = capacity - iov->iov_len; assert(fill < to_copy); memcpy((char *) iov->iov_base + iov->iov_len, data, fill); iov->iov_len += fill; buf->used += fill; data = (char *) data + fill; to_copy -= fill; /* * Check if the remainder can fit * without allocations. */ } else if (capacity == 0) { /** * Still some data to copy. We have to get * a new buffer. Before we allocate * a buffer for this position, ensure * there is an unallocated buffer in the * next one, since it works as an end * marker for the loop above. */ if (obuf_alloc_pos(buf, to_copy) == NULL) return size - to_copy; break; } assert(capacity == iov->iov_len); if (buf->pos + 1 >= SMALL_OBUF_IOV_MAX) return size - to_copy; buf->pos++; iov = &buf->iov[buf->pos]; capacity = buf->capacity[buf->pos]; } memcpy((char *) iov->iov_base + iov->iov_len, data, to_copy); iov->iov_len += to_copy; buf->used += to_copy; assert(iov->iov_len <= buf->capacity[buf->pos]); return size; } void * obuf_reserve_slow(struct obuf *buf, size_t size) { struct iovec *iov = &buf->iov[buf->pos]; size_t capacity = buf->capacity[buf->pos]; if (iov->iov_len > 0) { /* Move to the next buffer. */ if (buf->pos + 1 >= SMALL_OBUF_IOV_MAX) return NULL; buf->pos++; iov = &buf->iov[buf->pos]; capacity = buf->capacity[buf->pos]; } assert(iov->iov_len == 0); /* Make sure the next buffer can store size. */ if (size > capacity) { if (capacity > 0) { /* Simply realloc. */ while (capacity < size) capacity = capacity * 2; struct slab *slab = slab_get(buf->slabc, capacity); if (slab == NULL) return NULL; struct slab *old = slab_from_data(buf->iov[buf->pos].iov_base); slab_put(buf->slabc, old); buf->iov[buf->pos].iov_base = slab_data(slab); buf->capacity[buf->pos] = slab_capacity(slab); } else if (obuf_alloc_pos(buf, size) == NULL) { return NULL; } } assert(buf->iov[buf->pos].iov_len + size <= buf->capacity[buf->pos]); return (char*) buf->iov[buf->pos].iov_base + buf->iov[buf->pos].iov_len; } /** Forget about data in the output buffer beyond the savepoint. */ void obuf_rollback_to_svp(struct obuf *buf, struct obuf_svp *svp) { int iovcnt = obuf_iovcnt(buf); buf->pos = svp->pos; buf->iov[buf->pos].iov_len = svp->iov_len; buf->used = svp->used; int i; for (i = buf->pos + 1; i < iovcnt; i++) buf->iov[i].iov_len = 0; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/matras.h0000644000000000000000000002671013306562360021433 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SMALL_MATRAS_H #define INCLUDES_TARANTOOL_SMALL_MATRAS_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* {{{ Description */ /* * matras - Memory Address TRanSlation Allocator (Smile) * matras is as allocator, that provides aligned blocks of specified * size (N), and a 32-bit integer identifiers for * each returned block. Block identifiers grow incrementally * starting from 0. * * The block size (N) must be a power of 2 (checked by assert in * the debug build). matras can restore a pointer to the block * give block ID, so one can store such 32-bit ids instead of * storing pointers to blocks. * * Since block IDs grow incrementally from 0 and matras * instance stores the number of provided blocks, there is a * simple way to iterate over all provided blocks. * * Implementation * -------------- * To support block allocation, matras allocates extents of memory * by means of the supplied allocator, each extent having the same * size (M), M is a power of 2 and a multiple of N. * There is no way to free a single block, except the last one, * allocated, which happens to be the one with the largest ID. * Destroying a matras instance frees all allocated extents. * * Address translation * ------------------- * To implement 32-bit address space for block identifiers, * matras maintains a simple tree of address translation tables. * * * First N1 bits of the identifier denote a level 0 extend * id, which stores the address of level 1 extent. * * * Second N2 bits of block identifier stores the address * of a level 2 extent, which stores actual blocks. * * * The remaining N3 bits denote the block number * within the extent. * * Actual values of N1 and N2 are a function of block size B, * extent size M and sizeof(void *). * * To sum up, with a given N and M matras instance: * * 1) can provide not more than * pow(M / sizeof(void*), 2) * (M / N) blocks * * 2) costs 2 random memory accesses to provide a new block * or restore a block pointer from block id * * 3) has an approximate memory overhead of size (L * M) * * Of course, the integer type used for block id (matras_id_t, * usually is a typedef to uint32) also limits the maximum number * of objects that can be allocated by a single instance of matras. * * Versioning * ---------- * Starting from Tarantool 1.6, matras implements a way to create * a consistent read view of allocated data with * matras_create_read_view(). Once a read view is * created, the same block identifier can return two different * physical addresses in two views: the created view * and the current or latest view. Multiple read views can be * created. To work correctly with possibly existing read views, * the application must inform matras that data in a block is about to * change, using matras_touch() call. Only a block which belong to * the current, i.e. latest, view, can be changed: created * views are immutable. * * The implementation of read views is based on copy-on-write * technique, which is cheap enough as long as not too many * objects have to be touched while a view exists. * Another important property of the copy-on-write mechanism is * that whenever a write occurs, the writer pays the penalty * and copies the block to a new location, and gets a new physical * address for the same block id. The reader keeps using the * old address. This makes it possible to access the * created read view in a concurrent thread, as long as this * thread is created after the read view itself is created. */ /* }}} */ #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Type of a block ID. */ #ifdef WIN32 typedef unsigned __int32 matras_id_t; #else typedef uint32_t matras_id_t; #endif /** * Type of the extent allocator (the allocator for regions * of size M). Is allowed to return NULL, but is not allowed * to throw an exception */ typedef void *(*matras_alloc_func)(void *ctx); typedef void (*matras_free_func)(void *ctx, void *ptr); /** * sruct matras_view represents appropriate mapping between * block ID and it's pointer. * matras structure has one main read/write view, and a number * of user created read-only views. */ struct matras_view { /* root extent of the view */ void *root; /* block count in the view */ matras_id_t block_count; /* all views are linked into doubly linked list */ struct matras_view *prev_view, *next_view; }; /** * matras - memory allocator of blocks of equal * size with support of address translation. */ struct matras { /* Main read/write view of the matras */ struct matras_view head; /* Block size (N) */ matras_id_t block_size; /* Extent size (M) */ matras_id_t extent_size; /* Numberof allocated extents */ matras_id_t extent_count; /* binary logarithm of maximum possible created blocks count */ matras_id_t log2_capacity; /* See "Shifts and masks explanation" below */ matras_id_t shift1, shift2; /* See "Shifts and masks explanation" below */ matras_id_t mask1, mask2; /* External extent allocator */ matras_alloc_func alloc_func; /* External extent deallocator */ matras_free_func free_func; /* Argument passed to extent allocator */ void *alloc_ctx; }; /* * "Shifts and masks explanation" * For 3-level matras (L = 3), as claimed above, block ID consist of * three parts (N1, N2 and N3). * In order to optimize splitting ID, several masks and shifts * are precalculated during matras initialization. * Heres is an example block ID bits, high order bits first: * ID : 0 0 0 0 N1 N1 N1 N1 N2 N2 N2 N2 N3 N3 N3 N3 N3 * mask1: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 * mask1: 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 * <---------shift1--------> * <---shift2--> * When defined in such way, one can simply split ID to N1, N2 and N3: * N1 = ID >> shift1 * N2 = (ID & mask1) >> shift2 * N3 = ID & mask2 */ /* * matras API declaration */ /** * Initialize an empty instance of pointer translating * block allocator. Does not allocate memory. */ void matras_create(struct matras *m, matras_id_t extent_size, matras_id_t block_size, matras_alloc_func alloc_func, matras_free_func free_func, void *alloc_ctx); /** * Free all memory used by an instance of matras and * reinitialize it. * Identical to matras_destroy(m); matras_create(m, ...); */ void matras_reset(struct matras *m); /** * Free all memory used by an instance of matras. */ void matras_destroy(struct matras *m); /** * Allocate a new block. Return both, block pointer and block * id. * * @retval NULL failed to allocate memory */ void * matras_alloc(struct matras *m, matras_id_t *id); /* * Deallocate last block (block with maximum ID) */ void matras_dealloc(struct matras *m); /** * Allocate a range_count of blocks. Return both, first block pointer * and first block id. This method only works if current number of blocks and * number of blocks in one extent are divisible by range_count. * range_count must also be less or equal to number of blocks in one extent. * * @retval NULL failed to allocate memory */ void * matras_alloc_range(struct matras *m, matras_id_t *id, matras_id_t range_count); /* * Deallocate last range_count of blocks (blocks with maximum ID) * This method only works if current number of blocks and * number of blocks in one extent are divisible by range_count. * range_count must also be less or equal to number of blocks in one extent. */ void matras_dealloc_range(struct matras *m, matras_id_t range_count); /** * Convert block id into block address. */ static void * matras_get(const struct matras *m, matras_id_t id); /** * Convert block id of a specified version into block address. */ static void * matras_view_get(const struct matras *m, const struct matras_view *v, matras_id_t id); /* * Getting number of allocated extents (of size extent_size each) */ matras_id_t matras_extent_count(const struct matras *m); /* * Connect read view to the matras so that it is always connected with main, * "head" read view. Such a read view does not consume any resources and * should not be destroyed. */ static void matras_head_read_view(struct matras_view *v); /* * Create new read view. */ void matras_create_read_view(struct matras *m, struct matras_view *v); /* * Delete a read view. */ void matras_destroy_read_view(struct matras *m, struct matras_view *v); /* * Determine if the read view is created. * @return 1 if the read view was created with matras_create_read_view * @return 0 if the read view was initialized with matras_head_read_view */ static int matras_is_read_view_created(struct matras_view *v); /* * Notify matras that memory at given ID will be changed. * Returns (perhaps new) address of memory associated with that block. * Returns NULL on memory error * Only needed (and does any work) if some versions are used. */ void * matras_touch(struct matras *m, matras_id_t id); /* * matras_head_read_view implementation. */ static inline void matras_head_read_view(struct matras_view *v) { v->next_view = 0; } /* * matras_is_read_view_created implementation. */ static inline int matras_is_read_view_created(struct matras_view *v) { return v->next_view ? 1 : 0; } /** * Common part of matras_view_get and matras_get */ static inline void * matras_view_get_no_check(const struct matras *m, const struct matras_view *v, matras_id_t id) { assert(id < v->block_count); /* see "Shifts and masks explanation" for details */ matras_id_t n1 = id >> m->shift1; matras_id_t n2 = (id & m->mask1) >> m->shift2; matras_id_t n3 = (id & m->mask2); char ***extent = (char ***)v->root; return &extent[n1][n2][n3 * m->block_size]; } /** * matras_view_get definition */ static inline void * matras_view_get(const struct matras *m, const struct matras_view *v, matras_id_t id) { return matras_view_get_no_check(m, v->next_view ? v : &m->head, id); } /** * matras_get definition */ static inline void * matras_get(const struct matras *m, matras_id_t id) { return matras_view_get_no_check(m, &m->head, id); } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_SMALL_MATRAS_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/obuf.h0000644000000000000000000001577413306562360021107 0ustar rootroot#ifndef TARANTOOL_SMALL_OBUF_H_INCLUDED #define TARANTOOL_SMALL_OBUF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { SMALL_OBUF_IOV_MAX = 31 }; struct slab_cache; /** * Output buffer savepoint. It's possible to * save the current buffer state in a savepoint * and roll back to the saved state at any time * before obuf_reset() */ struct obuf_svp { size_t pos; size_t iov_len; size_t used; }; /** * Reset a savepoint so that it points to the beginning * of an output buffer. */ static inline void obuf_svp_reset(struct obuf_svp *svp) { svp->pos = 0; svp->iov_len = 0; svp->used = 0; } /** * An output buffer is a vector of struct iovec * for writev(). * Each iovec buffer is allocated using slab allocator. * Buffer size grows by a factor of 2. With this growth factor, * the number of used buffers is unlikely to ever exceed the * hard limit of SMALL_OBUF_IOV_MAX. If it does, an exception is * raised. */ struct obuf { struct slab_cache *slabc; /** Position of the "current" iovec. */ int pos; /* The number of allocated iov instances. */ int n_iov; /* How many bytes are in the buffer. */ size_t used; /** * iov[0] size (allocations are normally a multiple of this number), * but can be larger if a large chunk is requested by * obuf_reserve(). */ size_t start_capacity; /** How many bytes are actually allocated for each iovec. */ size_t capacity[SMALL_OBUF_IOV_MAX + 1]; /** * List of iovec vectors, each vector is at least twice * as big as the previous one. The vector following the * last allocated one is always zero-initialized * (iov_base = NULL, iov_len = 0). */ struct iovec iov[SMALL_OBUF_IOV_MAX + 1]; }; void obuf_create(struct obuf *buf, struct slab_cache *slabc, size_t start_capacity); void obuf_destroy(struct obuf *buf); void obuf_reset(struct obuf *buf); /** How many bytes are in the output buffer. */ static inline size_t obuf_size(struct obuf *obuf) { return obuf->used; } /** The size of iov vector in the buffer. */ static inline int obuf_iovcnt(struct obuf *buf) { return buf->iov[buf->pos].iov_len > 0 ? buf->pos + 1 : buf->pos; } /** * Slow path of obuf_reserve(), which actually reallocates * memory and moves data if necessary. */ void * obuf_reserve_slow(struct obuf *buf, size_t size); /** * \brief Ensure \a buf to have at least \a size bytes of contiguous memory * for write and return a point to this chunk. * After write please call obuf_advance(wsize) where wsize <= size to advance * a write position. * \param buf * \param size * \return a pointer to contiguous chunk of memory */ static inline void * obuf_reserve(struct obuf *buf, size_t size) { if (buf->iov[buf->pos].iov_len + size > buf->capacity[buf->pos]) return obuf_reserve_slow(buf, size); struct iovec *iov = &buf->iov[buf->pos]; return (char *) iov->iov_base + iov->iov_len; } /** * \brief Advance write position after using obuf_reserve() * \param buf * \param size * \sa obuf_reserve */ static inline void * obuf_alloc(struct obuf *buf, size_t size) { struct iovec *iov = &buf->iov[buf->pos]; void *ptr; if (iov->iov_len + size <= buf->capacity[buf->pos]) { ptr = (char *) iov->iov_base + iov->iov_len; } else { ptr = obuf_reserve_slow(buf, size); if (ptr == NULL) return NULL; iov = &buf->iov[buf->pos]; assert(iov->iov_len <= buf->capacity[buf->pos]); } iov->iov_len += size; buf->used += size; return ptr; } /** Append data to the output buffer. */ size_t obuf_dup(struct obuf *buf, const void *data, size_t size); static inline size_t obuf_capacity(struct obuf *buf) { /** This is an approximation, see obuf_alloc_pos() */ return buf->capacity[buf->n_iov ? buf->n_iov - 1 : 0] * 2; } static inline struct obuf_svp obuf_create_svp(struct obuf *buf) { struct obuf_svp svp; svp.pos = buf->pos; svp.iov_len = buf->iov[buf->pos].iov_len; svp.used = buf->used; return svp; } /** Forget anything added to output buffer after the savepoint. */ void obuf_rollback_to_svp(struct obuf *buf, struct obuf_svp *svp); /** Convert a savepoint position to a pointer in the buffer. */ static inline void * obuf_svp_to_ptr(struct obuf *buf, struct obuf_svp *svp) { return (char *) buf->iov[svp->pos].iov_base + svp->iov_len; } static inline void * obuf_reserve_cb(void *ctx, size_t *size) { struct obuf *buf = (struct obuf *) ctx; void *ptr = obuf_reserve(buf, *size); *size = buf->capacity[buf->pos] - buf->iov[buf->pos].iov_len; return ptr; } static inline void * obuf_alloc_cb(void *ctx, size_t size) { return obuf_alloc((struct obuf *) ctx, size); } #if defined(__cplusplus) } /* extern "C" */ #include "exception.h" static inline void * obuf_reserve_xc(struct obuf *buf, size_t size) { void *ptr = obuf_reserve(buf, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "obuf", "reserve"); return ptr; } static inline void * obuf_reserve_xc_cb(void *ctx, size_t *size) { void *ptr = obuf_reserve_cb(ctx, size); if (ptr == NULL) tnt_raise(OutOfMemory, *size, "obuf", "reserve"); return ptr; } static inline void * obuf_alloc_xc(struct obuf *buf, size_t size) { void *ptr = obuf_alloc(buf, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "obuf", "alloc"); return ptr; } static inline void * obuf_alloc_xc_cb(void *ctx, size_t size) { void *ptr = obuf_alloc_cb(ctx, size); if (ptr == NULL) tnt_raise(OutOfMemory, size, "obuf", "alloc"); return ptr; } static inline void obuf_dup_xc(struct obuf *buf, const void *data, size_t size) { if (obuf_dup(buf, data, size) != size) tnt_raise(OutOfMemory, size, "obuf", "dup"); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_SMALL_OBUF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/matras.c0000644000000000000000000002634413306562360021431 0ustar rootroot/* * matras implementation */ #include "matras.h" #include #include #include #ifdef WIN32 #include #pragma intrinsic (_BitScanReverse) #endif /* * Binary logarithm of value (exact if the value is a power of 2, * approximate (floored) otherwise) */ static matras_id_t matras_log2(matras_id_t val) { assert(val > 0); #ifdef WIN32 unsigned long res = 0; unsigned char nonzero = _BitScanReverse(&res, val); assert(nonzero); (void)nonzero; return (matras_id_t)res; #else return sizeof(unsigned int) * CHAR_BIT - __builtin_clz((unsigned int) val) - 1; #endif } /** * Initialize an empty instance of pointer translating * block allocator. Does not allocate memory. */ void matras_create(struct matras *m, matras_id_t extent_size, matras_id_t block_size, matras_alloc_func alloc_func, matras_free_func free_func, void *alloc_ctx) { /*extent_size must be power of 2 */ assert((extent_size & (extent_size - 1)) == 0); /*block_size must be power of 2 */ assert((block_size & (block_size - 1)) == 0); /*block must be not greater than the extent*/ assert(block_size <= extent_size); /*extent must be able to store at least two records*/ assert(extent_size > sizeof(void *)); m->head.block_count = 0; m->head.prev_view = 0; m->head.next_view = 0; m->block_size = block_size; m->extent_size = extent_size; m->extent_count = 0; m->alloc_func = alloc_func; m->free_func = free_func; m->alloc_ctx = alloc_ctx; matras_id_t log1 = matras_log2(extent_size); matras_id_t log2 = matras_log2(block_size); matras_id_t log3 = matras_log2(sizeof(void *)); m->log2_capacity = log1 * 3 - log2 - log3 * 2; m->shift1 = log1 * 2 - log2 - log3; m->shift2 = log1 - log2; m->mask1 = (((matras_id_t)1) << m->shift1) - ((matras_id_t)1); m->mask2 = (((matras_id_t)1) << m->shift2) - ((matras_id_t)1); } /** * Free all memory used by an instance of matras and * reinitialize it. * Identical to matras_destroy(m); matras_create(m, ...); */ void matras_reset(struct matras *m) { matras_destroy(m); m->head.block_count = 0; } /** * Helper functions for allocating new extent and incrementing extent counter */ static inline void * matras_alloc_extent(struct matras *m) { void *ext = m->alloc_func(m->alloc_ctx); if (ext) m->extent_count++; return ext; } /** * Helper functions for allocating new extent and incrementing extent counter */ static inline void matras_free_extent(struct matras *m, void *ext) { m->free_func(m->alloc_ctx, ext); m->extent_count--; } /** * Free all memory used by an instance of matras. */ void matras_destroy(struct matras *m) { while (m->head.prev_view) matras_destroy_read_view(m, m->head.prev_view); if (m->head.block_count == 0) return; matras_id_t step1 = m->mask1 + 1; matras_id_t step2 = m->mask2 + 1; matras_id_t i1 = 0, j1 = 0, i2, j2; matras_id_t ptrs_in_ext = m->extent_size / (matras_id_t)sizeof(void *); struct matras_view *v = &m->head; void **extent1 = (void **)v->root; for (; j1 < v->block_count; i1++, j1 += step1) { void **extent2 = (void **)extent1[i1]; for (i2 = j2 = 0; i2 < ptrs_in_ext && j1 + j2 < v->block_count; i2++, j2 += step2) { void **extent3 = (void **)extent2[i2]; matras_free_extent(m, extent3); } matras_free_extent(m, extent2); } matras_free_extent(m, extent1); assert(m->extent_count == 0); } /** * Allocate a new block. Return both, block pointer and block * id. * * @retval NULL failed to allocate memory */ void * matras_alloc(struct matras *m, matras_id_t *result_id) { assert(m->head.block_count == 0 || matras_log2(m->head.block_count) < m->log2_capacity); /* Current block_count is the ID of new block */ matras_id_t id = m->head.block_count; /* See "Shifts and masks explanation" for details */ /* Additionally we determine if we must allocate extents. * Basically, * if n1 == 0 && n2 == 0 && n3 == 0, we must allocate root extent, * if n2 == 0 && n3 == 0, we must allocate second level extent, * if n3 == 0, we must allocate third level extent. * Optimization: * (n1 == 0 && n2 == 0 && n3 == 0) is identical to (id == 0) * (n2 == 0 && n3 == 0) is identical to (id & mask1 == 0) */ matras_id_t extent1_available = id; matras_id_t n1 = id >> m->shift1; id &= m->mask1; matras_id_t extent2_available = id; matras_id_t n2 = id >> m->shift2; id &= m->mask2; matras_id_t extent3_available = id; matras_id_t n3 = id; void **extent1, **extent2; char *extent3; if (extent1_available) { extent1 = (void **)m->head.root; } else { extent1 = (void **)matras_alloc_extent(m); if (!extent1) return 0; m->head.root = (void *)extent1; } if (extent2_available) { extent2 = (void **)extent1[n1]; } else { extent2 = (void **)matras_alloc_extent(m); if (!extent2) { if (!extent1_available) /* was created */ matras_free_extent(m, extent1); return 0; } extent1[n1] = (void *)extent2; } if (extent3_available) { extent3 = (char *)extent2[n2]; } else { extent3 = (char *)matras_alloc_extent(m); if (!extent3) { if (!extent1_available) /* was created */ matras_free_extent(m, extent1); if (!extent2_available) /* was created */ matras_free_extent(m, extent2); return 0; } extent2[n2] = (void *)extent3; } *result_id = m->head.block_count++; return (void *)(extent3 + n3 * m->block_size); } /* * Deallocate last block (block with maximum ID) */ void matras_dealloc(struct matras *m) { assert(m->head.block_count); matras_id_t id = m->head.block_count - 1; matras_touch(m, id); m->head.block_count = id; /* Current block_count is the ID of deleting block */ /* See "Shifts and masks explanation" for details */ /* Deleting extents in same way (but reverse order) like in matras_alloc * See matras_alloc for details. */ bool extent1_free = !id; matras_id_t n1 = id >> m->shift1; id &= m->mask1; bool extent2_free = !id; matras_id_t n2 = id >> m->shift2; id &= m->mask2; bool extent3_free = !id; if (extent1_free || extent2_free || extent3_free) { void **extent1, **extent2, *extent3; extent1 = (void **)m->head.root; extent2 = (void **)extent1[n1]; extent3 = extent2[n2]; if (extent3_free) matras_free_extent(m, extent3); if (extent2_free) matras_free_extent(m, extent2); if (extent1_free) matras_free_extent(m, extent1); } } /** * Allocate a range_count of blocks. Return both, first block pointer * and first block id. This method only works if current number of blocks and * number of blocks in one extent are divisible by range_count. * range_count must also be less or equal to number of blocks in one extent. * * @retval NULL failed to allocate memory */ void * matras_alloc_range(struct matras *m, matras_id_t *id, matras_id_t range_count) { assert(m->head.block_count % range_count == 0); assert(m->extent_size / m->block_size % range_count == 0); void *res = matras_alloc(m, id); if (res) m->head.block_count += (range_count - 1); return res; } /* * Deallocate last range_count of blocks (blocks with maximum ID) * This method only works if current number of blocks and * number of blocks in one extent are divisible by range_count. * range_count must also be less or equal to number of blocks in one extent. */ void matras_dealloc_range(struct matras *m, matras_id_t range_count) { assert(m->head.block_count % range_count == 0); assert(m->extent_size / m->block_size % range_count == 0); m->head.block_count -= (range_count - 1); matras_dealloc(m); } /** * Return the number of allocated extents (of size m->extent_size each) */ matras_id_t matras_extent_count(const struct matras *m) { return m->extent_count; } /* * Create new read view. */ void matras_create_read_view(struct matras *m, struct matras_view *v) { *v = m->head; v->next_view = &m->head; m->head.prev_view = v; if (v->prev_view) v->prev_view->next_view = v; } /* * Delete a read view. */ void matras_destroy_read_view(struct matras *m, struct matras_view *v) { assert(v != &m->head); if (!v->next_view) return; struct matras_view *next_view = v->next_view; struct matras_view *prev_view = v->prev_view; next_view->prev_view = prev_view; if (prev_view) prev_view->next_view = next_view; v->next_view = 0; if (v->block_count == 0) return; if (v->root == next_view->root && next_view->block_count) return; if (prev_view && v->root == prev_view->root && prev_view->block_count) return; void **extent1 = (void **)v->root; void **extent1n = (void **) next_view->root; void **extent1p = 0; if (prev_view) extent1p = (void **) prev_view->root; matras_id_t step1 = m->mask1 + 1; matras_id_t step2 = m->mask2 + 1; matras_id_t i1 = 0, j1 = 0, i2, j2; matras_id_t ptrs_in_ext = m->extent_size / (matras_id_t)sizeof(void *); for (; j1 < v->block_count; i1++, j1 += step1) { void **extent2 = (void **)extent1[i1]; void **extent2n = 0; void **extent2p = 0; if (next_view->block_count > j1) { if (extent1[i1] == extent1n[i1]) continue; extent2n = (void **) extent1n[i1]; } if (prev_view && prev_view->block_count > j1) { if (extent1[i1] == extent1p[i1]) continue; extent2p = (void **) extent1p[i1]; } for (i2 = j2 = 0; i2 < ptrs_in_ext && j1 + j2 < v->block_count; i2++, j2 += step2) { void **extent3 = (void **)extent2[i2]; if (next_view->block_count > j1 + j2) { if (extent2[i2] == extent2n[i2]) continue; } if (prev_view && prev_view->block_count > j1 + j2) { if (extent2[i2] == extent2p[i2]) continue; } matras_free_extent(m, extent3); } matras_free_extent(m, extent2); } matras_free_extent(m, extent1); } /* * Notify matras that memory at given ID will be changed. * Returns (perhaps new) address of memory associated with that block. * Returns NULL on memory error * Only needed (and does any work) if some versions are used. */ void * matras_touch(struct matras *m, matras_id_t id) { assert(id < m->head.block_count); if (!m->head.prev_view) return matras_get(m, id); if (m->head.prev_view->block_count) { matras_id_t extent_id = id >> m->shift2; matras_id_t next_last_id = m->head.prev_view->block_count - 1; matras_id_t next_last_extent_id = next_last_id >> m->shift2; if (extent_id > next_last_extent_id) return matras_get(m, id); } else { return matras_get(m, id); } /* see "Shifts and masks explanation" for details */ matras_id_t n1 = id >> m->shift1; matras_id_t n2 = (id & m->mask1) >> m->shift2; matras_id_t n3 = id & m->mask2; void **extent1 = (void **)m->head.root; void **extent1p = (void **)m->head.prev_view->root; if (extent1 == extent1p) { void *new_extent = matras_alloc_extent(m); if (!new_extent) return 0; memcpy(new_extent, extent1, m->extent_size); m->head.root = new_extent; extent1 = (void **)new_extent; } void **extent2 = (void **)extent1[n1]; void **extent2p = (void **)extent1p[n1]; if (extent2 == extent2p) { void *new_extent = matras_alloc_extent(m); if (!new_extent) return 0; memcpy(new_extent, extent2, m->extent_size); extent1[n1] = new_extent; extent2 = (void **)new_extent; } char *extent3 = (char *)extent2[n2]; char *extent3p = (char *)extent2p[n2]; if (extent3 == extent3p) { void *new_extent = matras_alloc_extent(m); if (!new_extent) return 0; memcpy(new_extent, extent3, m->extent_size); extent2[n2] = new_extent; extent3 = (char *)new_extent; } return &extent3[n3 * m->block_size]; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/rb.h0000644000000000000000000016641013306562360020551 0ustar rootroot/*- ******************************************************************************* * * cpp macro implementation of left-leaning 2-3 red-black trees. Parent * pointers are not used, and color bits are stored in the least significant * bit of right-child pointers (if RB_COMPACT is defined), thus making node * linkage as compact as is possible for red-black trees. * * Usage: * * #include * #include * #define NDEBUG // (Optional, see assert(3).) * #include * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) * #define RB_CMP_TREE_ARG // (Optional, passes tree to comparators) * #include * ... * ******************************************************************************* */ #ifndef RB_H_ #define RB_H_ #if 0 __FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $"); #endif #ifdef RB_CMP_TREE_ARG #define RB_CMP_ARG rbtree, #else #define RB_CMP_ARG #endif #ifdef RB_COMPACT /* Node structure. */ #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right_red; \ } #else #define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right; \ bool rbn_red; \ } #endif /* Root structure. */ #define rb_tree(a_type, ...) \ struct { \ a_type *rbt_root; \ __VA_ARGS__ \ } enum { RB_WALK_LEFT = (1 << 0), RB_WALK_RIGHT = (1 << 1), }; /* * Max height of the tree which can be iterated over. * The tree can have no more nodes than x86_64 has distinct * addresses. */ #define RB_MAX_TREE_HEIGHT 48 /* Left accessors. */ #define rbtn_left_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_left) #define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ (a_node)->a_field.rbn_left = a_left; \ } while (0) #ifdef RB_COMPACT /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ & ((ssize_t)-2))) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ & ((size_t)1))) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ((ssize_t)a_red)); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ } while (0) #else /* Right accessors. */ #define rbtn_right_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_right) #define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right = a_right; \ } while (0) /* Color accessors. */ #define rbtn_red_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_red) #define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_red = (a_red); \ } while (0) #define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = true; \ } while (0) #define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = false; \ } while (0) #endif /* Node initializer. */ #define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ rbtn_left_set(a_type, a_field, (a_node), NULL); \ rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) /* Tree initializer. */ #define rb_new(a_type, a_field, a_rbt) do { \ (a_rbt)->rbt_root = NULL; \ } while (0) /* Internal utility macros. */ #define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != NULL) { \ for (; \ rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ if ((r_node) != NULL) { \ for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) #define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ rbtn_right_set(a_type, a_field, (a_node), \ rbtn_left_get(a_type, a_field, (r_node))); \ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ } while (0) #define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ rbtn_left_set(a_type, a_field, (a_node), \ rbtn_right_get(a_type, a_field, (r_node))); \ rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ } while (0) #define rbtn_augment(a_type, a_field, a_rbt, a_node, a_aug) do { \ a_type *left_ = rbtn_left_get(a_type, a_field, (a_node)); \ a_type *right_ = rbtn_right_get(a_type, a_field, (a_node)); \ a_aug((a_node), left_, right_); \ } while (0) #define rbtn_augment_propagate(a_type, a_field, a_rbt, \ a_path, a_from, a_aug) do { \ __typeof__(a_from) pathp_; \ for (pathp_ = (a_from); pathp_ >= (a_path); pathp_--) \ rbtn_augment(a_type, a_field, (a_rbt), pathp_->node, a_aug); \ } while (0) /* Iterator path population */ #define rbtn_iter_go_left_down(a_type, a_field, node, it) do { \ a_type *cur = (node); \ do { \ assert((it)->count < RB_MAX_TREE_HEIGHT); \ (it)->path[(it)->count++] = cur; \ cur = rbtn_left_get(a_type, a_field, (cur)); \ } while (cur != NULL); \ } while (0) #define rbtn_iter_go_right_down(a_type, a_field, node, it) do { \ a_type *cur = (node); \ do{ \ assert((it)->count < RB_MAX_TREE_HEIGHT); \ (it)->path[(it)->count++] = cur; \ cur = rbtn_right_get(a_type, a_field, (cur)); \ } while (cur != NULL); \ } while (0) /* Traverse up the search path to the first parent on the *left*. */ #define rbtn_iter_go_left_up(a_type, a_field, it) do { \ while(--(it)->count > 0) { \ if (rbtn_left_get(a_type, a_field, \ (it)->path[(it)->count - 1]) != \ (it)->path[(it)->count]) { \ break; \ } \ } \ } while (0) /* Traverse up the search path to the first parent on the *right*. */ #define rbtn_iter_go_right_up(a_type, a_field, it) do { \ while(--(it)->count > 0) { \ if (rbtn_right_get(a_type, a_field, \ (it)->path[(it)->count - 1]) != \ (it)->path[(it)->count]) { \ break; \ } \ } \ } while (0) /* * The rb_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to rb_gen(). */ #define rb_proto_ext_key(a_attr, a_prefix, a_rbt_type, a_type, a_key) \ struct a_prefix##iterator; \ struct a_prefix##walk; \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, a_key key); \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, a_key key); \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, a_key key); \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##iterator_get(struct a_prefix##iterator *it); \ a_attr bool \ a_prefix##icreate(a_rbt_type *rbtree, a_type *node, \ struct a_prefix##iterator *it); \ a_attr void \ a_prefix##ifirst(a_rbt_type *rbtree, struct a_prefix##iterator *it); \ a_attr void \ a_prefix##ilast(a_rbt_type *rbtree, struct a_prefix##iterator *it); \ a_attr a_type * \ a_prefix##inext(struct a_prefix##iterator *it); \ a_attr a_type * \ a_prefix##iprev(struct a_prefix##iterator *it); \ a_attr bool \ a_prefix##isearch(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it); \ a_attr void \ a_prefix##isearch_le(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it); \ a_attr void \ a_prefix##isearch_ge(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it); \ a_attr void \ a_prefix##isearch_lt(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it); \ a_attr void \ a_prefix##isearch_gt(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it); \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg); \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ a_attr void \ a_prefix##walk_init(struct a_prefix##walk *it, a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##walk_next(struct a_prefix##walk *it, int dir, \ a_type **r_left, a_type **r_right); #define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ rb_proto_ext_key(a_attr, a_prefix, a_rbt_type, a_type, a_type *) /* * The rb_gen() macro generates a type-specific red-black tree implementation, * based on the above cpp macros. * * Arguments: * * a_attr : Function attribute for generated functions (ex: static). * a_prefix : Prefix for generated functions (ex: ex_). * a_rb_type : Type for red-black tree data structure (ex: ex_t). * a_type : Type for red-black tree node data structure (ex: ex_node_t). * a_field : Name of red-black tree node linkage (ex: ex_link). * a_cmp : Node comparison function name, with the following prototype: * int (a_cmp *)(a_type *a_node, a_type *a_other); * ^^^^^^ * or a_key * Interpretation of comparision function return values: * -1 : a_node < a_other * 0 : a_node == a_other * 1 : a_node > a_other * In all cases, the a_node or a_key macro argument is the first * argument to the comparison function, which makes it possible * to write comparison functions that treat the first argument * specially. * * Assuming the following setup: * * typedef struct ex_node_s ex_node_t; * struct ex_node_s { * rb_node(ex_node_t) ex_link; * }; * typedef rb_tree(ex_node_t) ex_t; * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) * * The following API is generated: * * static void * ex_new(ex_t *tree); * Description: Initialize a red-black tree structure. * Args: * tree: Pointer to an uninitialized red-black tree object. * * static bool * ex_empty(ex_t *tree); * Description: Determine whether tree is empty. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: True if tree is empty, false otherwise. * * static ex_node_t * * ex_first(ex_t *tree); * static ex_node_t * * ex_last(ex_t *tree); * Description: Get the first/last node in tree. * Args: * tree: Pointer to an initialized red-black tree object. * Ret: First/last node in tree, or NULL if tree is empty. * * static ex_node_t * * ex_next(ex_t *tree, ex_node_t *node); * static ex_node_t * * ex_prev(ex_t *tree, ex_node_t *node); * Description: Get node's successor/predecessor. * Args: * tree: Pointer to an initialized red-black tree object. * node: A node in tree. * Ret: node's successor/predecessor in tree, or NULL if node is * last/first. * * static ex_node_t * * ex_search(ex_t *tree, ex_node_t *key); * Description: Search for node that matches key. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or NULL if no match. * * Let's explain the following searching functions with an example. * Assume we have a following set of keys: * ((1,2), (1,3), (2,1), (2,2), (2,3), (3,1), (3,2)) * The comparison function is natural: first it compares the first index, * then the second. * static ex_node_t * * ex_nsearch(ex_t *tree, ex_node_t *key); * static ex_node_t * * ex_psearch(ex_t *tree, ex_node_t *key); * Description: If a match is found, it's the minimal/maximal * among the matching keys. * If no match is found, return what would be * key's successor/predecessor, were key in tree. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * Ret: Node in tree that matches key, or if no match, hypothetical node's * successor/predecessor (NULL if no successor/predecessor). * In our example: * _nsearch(2)=(2,3) _nsearch(0)=(1,2) _nsearch(4)=nil * _psearch(2)=(2,1) _psearch(0)=nil _psearch(4)=(3,2) * * static void * ex_insert(ex_t *tree, ex_node_t *node); * Description: Insert node into tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node to be inserted into tree. * * static void * ex_remove(ex_t *tree, ex_node_t *node); * Description: Remove node from tree. * Args: * tree: Pointer to an initialized red-black tree object. * node: Node in tree to be removed. * * static ex_node_t * * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * static ex_node_t * * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, * ex_node_t *, void *), void *arg); * Description: Iterate forward/backward over tree, starting at node. If * tree is modified, iteration must be immediately * terminated by the callback function that causes the * modification. * Args: * tree : Pointer to an initialized red-black tree object. * start: Node at which to start iteration, or NULL to start at * first/last node. * cb : Callback function, which is called for each node during * iteration. Under normal circumstances the callback function * should return NULL, which causes iteration to continue. If a * callback function returns non-NULL, iteration is immediately * terminated and the non-NULL return value is returned by the * iterator. This is useful for re-starting iteration after * modifying tree. * arg : Opaque pointer passed to cb(). * Ret: NULL if iteration completed, or the non-NULL callback return value * that caused termination of the iteration. * * The following iterator API is generated: * * struct ex_iterator; * Description: A context for iteration over the tree. * * static ex_node * * ex_iterator_get(ex_iterator *it) * Description: Get the value the iterator points to. * Args: * it : Pointer to initialized iterator * Ret: NULL if iterator points to nothing, else value. * static bool * ex_icreate(ex_t *tree, ex_node_t *node, ex_iterator *it); * Description: Create an iterator that corresponds to the node. * Args: * tree : Pointer to an initialized red-black tree object. * node : Pointer to a node at which to start iteration. * it : Pointer to an uninitialized iterator. * Ret: true if the matching node is found, false otherwise. * * static void * ex_ifirst(ex_t *tree, ex_iterator *it); * Description: Position iterator to the first node of the tree. * Args: * tree : Pointer to an initialized red-black tree object. * it : Pointer to an uninitialized iterator. * * static void * ex_ilast(ex_t *tree, ex_iterator *it); * Description: Position iterator to the last node of the tree. * Args: * tree : Pointer to an initialized red-black tree object. * it : Pointer to an uninitialized iterator. * * static ex_node_t * * ex_inext(ex_iterator *it); * Description: Iterate to the next node. Changes the iterator. * Args: * it : Pointer to an initialized iterator. * Ret: NULL if iteration ends; non-NULL node, * at which the current iterator points otherwise. * * static ex_node_t * * ex_iprev(ex_iterator *it); * Description: Iterate to the previous node. Changes the iterator. * Args: * it : Pointer to an initialized iterator. * Ret: NULL if iteration ends; non-NULL node, * at which the current iterator points otherwise. * * static bool * ex_isearch(ex_t *tree, ex_key key, ex_iterator *it); * Description: Search for node that matches key and * set \a it to point to this node. * Args: * tree : Pointer to an initialized red-black tree object. * key : key to find a node at which the iterator would * be set. * it : Pointer to an uninitialized iterator. * Ret: true if found, false if not. * * static void * ex_isearch_le(ex_t *tree, ex_node_t *key, ex_iterator *it); * Description: Search for the closest node which is less * than or equal to the key, set \a it to point * to this node. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * it : Pointer to an uninitialized iterator. * * static void * ex_isearch_ge(ex_t *tree, ex_node_t *key, ex_iterator *it); * Description: Search for the closest node which is * greater than or is equal to the key, * set \a it to point to this node. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * it : Pointer to an uninitialized iterator. * * static void * ex_isearch_lt(ex_t *tree, ex_node_t *key, ex_iterator *it); * Description: Search for the closest node which is * strictly less than the key, * set \a it to point to this node. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * it : Pointer to an uninitialized iterator. * * static void * ex_isearch_gt(ex_t *tree, ex_node_t *key, ex_iterator *it); * Description: Search for the closest node which is * strictly greater than the key, * set \a it to point to this node. * Args: * tree: Pointer to an initialized red-black tree object. * key : Search key. * it : Pointer to an uninitialized iterator. * * struct ex_walk; * Description: Context for a pre-order depth-first tree traversal. * * static void * ex_walk_init(ex_walk *it, ex_t *tree) * Description: Create a context for pre-order depth-first traversal * over the given tree. * Args: * it : Traversal context. * tree: Tree that will be traversed. * * static ex_node * * ex_walk_next(ex_walk *it, int dir, ex_node **r_left, ex_node **r_right) * Description: Traverse to the next node in the tree. If the * traversal was not started, i.e. this is the * first call to ex_walk_next(), 'dir' is ignored * and the iterator is positioned to the tree root. * Args: * it : Traversal context. * dir : Allowed traversal directions, specified as bitwise * combination of RB_WALK_LEFT and RB_WALK_RIGHT. * RB_WALK_LEFT allows to iterate to the left child * of the current node if any, RB_WALK_RIGHT - to the * right. * r_left : Left child of the next node. * r_right: Right child of the next node. * Ret: Next node or NULL if the traversal is complete. * * There is also an extended 'rb_gen_ext_key' macro that allows to generate rb * code with specified key type and comparator for [p|n]search and isearch* * methods. So using this macro instead of 'rb_gen': * * ... * int (ex_key_cmp *)(int key, ex_node *node); * ... * rb_gen_ext_key(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp, * int, ex_key_cmp) * * Will generate the same code as 'rb_gen' macro with addition of these * 8 functions: * * static ex_node_t * * ex_search(ex_t *tree, int key); * * static ex_node_t * * ex_nsearch(ex_t *tree, int key); * * static ex_node_t * * ex_psearch(ex_t *tree, int key); * * static bool * ex_isearch(ex_t *tree, imt key, ex_iterator *it); * * static void * ex_isearch_le(ex_t *tree, imt key, ex_iterator *it); * * static void * ex_isearch_ge(ex_t *tree, imt key, ex_iterator *it); * * static void * ex_isearch_lt(ex_t *tree, imt key, ex_iterator *it); * * static void * ex_isearch_gt(ex_t *tree, imt key, ex_iterator *it); * * One can also used 'rb_proto_ext_key' macro to generate a declaration of * all methods with that kind of search methods. Comparing to 'rb_gen', this * macro has one additional argument - type of key. * * It is also possible to generate an augmented version of an rb tree that * will invoke a user-defined callback on each node whose configuration * changed as a result of a node insertion or deletion. The callback is * invoked in the bottom-up manner, starting from the affected leaf node * and going up to the tree root. It can be used for maintaining per node * data. The data assigned to a node should be a function of the set of * nodes that constitute the sub-tree rooted at the node. * * To generate an augmented rb tree, 'rb_gen_aug' or 'rb_gen_ext_key_aug' * macro should be used. Apart from the arguments taken by 'rb_gen' and * 'rb_gen_ext_key', these macros take an augmentation callback as the * last parameter: * * void (*ex_aug)(ex_node *node, ex_node *left, ex_node *right); * * The callback is passed the node whose value should be recomputed * and its children. The macros do not affect the generated function * signatures. */ #define rb_gen_ext_key_aug(a_attr, a_prefix, a_rbt_type, a_type, \ a_field, a_cmp, a_key, a_cmp_key, a_aug) \ struct a_prefix##iterator { \ a_type *path[RB_MAX_TREE_HEIGHT]; \ uint32_t count; \ }; \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ } \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree) { \ return (rbtree->rbt_root == NULL); \ } \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return (ret); \ } \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ return (ret); \ } \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_right_get(a_type, a_field, node) != NULL) { \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ while (true) { \ int cmp = a_cmp(RB_CMP_ARG node, tnode); \ if (cmp < 0) { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != NULL); \ } \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ if (rbtn_left_get(a_type, a_field, node) != NULL) { \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ assert(tnode != NULL); \ ret = NULL; \ while (true) { \ int cmp = a_cmp(RB_CMP_ARG node, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ break; \ } \ assert(tnode != NULL); \ } \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##search(a_rbt_type *rbtree, a_key key) { \ a_type *ret; \ int cmp; \ ret = rbtree->rbt_root; \ while (ret != NULL \ && (cmp = a_cmp_key(RB_CMP_ARG key, ret)) != 0) { \ if (cmp < 0) { \ ret = rbtn_left_get(a_type, a_field, ret); \ } else { \ ret = rbtn_right_get(a_type, a_field, ret); \ } \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##nsearch(a_rbt_type *rbtree, a_key key) { \ a_type *ret, *next; \ a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ next = NULL; \ while (tnode != NULL) { \ int cmp = a_cmp_key(RB_CMP_ARG key, tnode); \ if (cmp < 0) { \ next = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } \ } \ if (ret == NULL) { \ return next; \ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##psearch(a_rbt_type *rbtree, a_key key) { \ a_type *ret, *prev; \ a_type *tnode = rbtree->rbt_root; \ ret = NULL; \ prev = NULL; \ while (tnode != NULL) { \ int cmp = a_cmp_key(RB_CMP_ARG key, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } else if (cmp > 0) { \ prev = tnode; \ tnode = rbtn_right_get(a_type, a_field, tnode); \ } else { \ ret = tnode; \ tnode = rbtn_left_get(a_type, a_field, tnode); \ } \ } \ if (ret == NULL) { \ return prev; \ } \ return (ret); \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } path[sizeof(void *) << 4], *pathp; \ rbt_node_new(a_type, a_field, rbtree, node); \ rbtn_augment(a_type, a_field, rbtree, node, a_aug); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(RB_CMP_ARG node, pathp->node); \ assert(cmp != 0); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ } \ } \ pathp->node = node; \ /* Unwind. */ \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ a_type *cnode = pathp->node; \ if (pathp->cmp < 0) { \ a_type *left = pathp[1].node; \ rbtn_left_set(a_type, a_field, cnode, left); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* Fix up 4-node. */ \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, cnode, tnode); \ rbtn_augment(a_type, a_field, rbtree, cnode, a_aug);\ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ cnode = tnode; \ } else { \ rbtn_augment(a_type, a_field, rbtree, cnode, a_aug);\ } \ } else { \ rbtn_augment_propagate(a_type, a_field, rbtree, path, \ pathp, a_aug); \ return; \ } \ } else { \ a_type *right = pathp[1].node; \ rbtn_right_set(a_type, a_field, cnode, right); \ if (rbtn_red_get(a_type, a_field, right)) { \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \ if (left != NULL && rbtn_red_get(a_type, a_field, \ left)) { \ /* Split 4-node. */ \ rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, right); \ rbtn_red_set(a_type, a_field, cnode); \ rbtn_augment(a_type, a_field, rbtree, cnode, a_aug);\ } else { \ /* Lean left. */ \ a_type *tnode; \ bool tred = rbtn_red_get(a_type, a_field, cnode); \ rbtn_rotate_left(a_type, a_field, cnode, tnode); \ rbtn_color_set(a_type, a_field, tnode, tred); \ rbtn_red_set(a_type, a_field, cnode); \ rbtn_augment(a_type, a_field, rbtree, cnode, a_aug);\ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ cnode = tnode; \ } \ } else { \ rbtn_augment_propagate(a_type, a_field, rbtree, path, \ pathp, a_aug); \ return; \ } \ } \ pathp->node = cnode; \ } \ /* Set root, and make it black. */ \ rbtree->rbt_root = path->node; \ rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ } \ a_attr void \ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ struct { \ a_type *node; \ int cmp; \ } *pathp, *nodep, path[sizeof(void *) << 4]; \ /* Wind. */ \ nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(RB_CMP_ARG node, pathp->node); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } else { \ pathp[1].node = rbtn_right_get(a_type, a_field, \ pathp->node); \ if (cmp == 0) { \ /* Find node's successor, in preparation for swap. */ \ pathp->cmp = 1; \ nodep = pathp; \ for (pathp++; pathp->node != NULL; \ pathp++) { \ pathp->cmp = -1; \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ } \ break; \ } \ } \ } \ assert(nodep->node == node); \ pathp--; \ if (pathp->node != node) { \ /* Swap node with its successor. */ \ bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ rbtn_color_set(a_type, a_field, pathp->node, \ rbtn_red_get(a_type, a_field, node)); \ rbtn_left_set(a_type, a_field, pathp->node, \ rbtn_left_get(a_type, a_field, node)); \ /* If node's successor is its right child, the following code */\ /* will do the wrong thing for the right child pointer. */\ /* However, it doesn't matter, because the pointer will be */\ /* properly set when the successor is pruned. */\ rbtn_right_set(a_type, a_field, pathp->node, \ rbtn_right_get(a_type, a_field, node)); \ rbtn_color_set(a_type, a_field, node, tred); \ /* The pruned leaf node's child pointers are never accessed */\ /* again, so don't bother setting them to nil. */\ nodep->node = pathp->node; \ pathp->node = node; \ if (nodep == path) { \ rbtree->rbt_root = nodep->node; \ } else { \ if (nodep[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } else { \ rbtn_right_set(a_type, a_field, nodep[-1].node, \ nodep->node); \ } \ } \ } else { \ a_type *left = rbtn_left_get(a_type, a_field, node); \ if (left != NULL) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ assert(rbtn_red_get(a_type, a_field, node) == false); \ assert(rbtn_red_get(a_type, a_field, left)); \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ rbtree->rbt_root = left; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ left); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ left); \ } \ rbtn_augment_propagate(a_type, a_field, rbtree, path, \ &pathp[-1], a_aug); \ } \ return; \ } else if (pathp == path) { \ /* The tree only contained one node. */ \ rbtree->rbt_root = NULL; \ return; \ } \ } \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ rbtn_augment_propagate(a_type, a_field, rbtree, path, \ &pathp[-1], a_aug); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ /* restored. */\ pathp->node = NULL; \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ assert(pathp->cmp != 0); \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ a_type *tnode; \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ /* In the following diagrams, ||, //, and \\ */\ /* indicate the path to the removed node. */\ /* */\ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ /* */\ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_augment(a_type, a_field, rbtree, right, a_aug);\ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ rbtn_augment(a_type, a_field, rbtree, pathp->node, \ a_aug); \ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ } else { \ /* || */\ /* pathp(r) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ /* */\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ rbtn_augment(a_type, a_field, rbtree, pathp->node, \ a_aug); \ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ } \ /* Balance restored, but rotation modified subtree */\ /* root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ rbtn_augment_propagate(a_type, a_field, rbtree, path, \ &pathp[-1], a_aug); \ return; \ } else { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ rightleft)) { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, rightleft); \ rbtn_rotate_right(a_type, a_field, right, tnode); \ rbtn_augment(a_type, a_field, rbtree, right, a_aug);\ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ rbtn_right_set(a_type, a_field, pathp->node, tnode);\ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ rbtn_augment(a_type, a_field, rbtree, pathp->node, \ a_aug); \ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ /* Balance restored, but rotation modified */\ /* subree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ rbtn_augment_propagate(a_type, a_field, rbtree, \ path, &pathp[-1], a_aug); \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* // \ */\ /* (b) (b) */\ /* / */\ /* (b) */\ a_type *tnode; \ rbtn_red_set(a_type, a_field, pathp->node); \ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ rbtn_augment(a_type, a_field, rbtree, pathp->node, \ a_aug); \ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ pathp->node = tnode; \ } \ } \ } else { \ a_type *left; \ rbtn_right_set(a_type, a_field, pathp->node, \ pathp[1].node); \ left = rbtn_left_get(a_type, a_field, pathp->node); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *tnode; \ a_type *leftright = rbtn_right_get(a_type, a_field, \ left); \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ leftright); \ if (leftrightleft != NULL && rbtn_red_get(a_type, \ a_field, leftrightleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (r) */\ a_type *unode; \ rbtn_black_set(a_type, a_field, leftrightleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ unode); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_right_set(a_type, a_field, unode, tnode); \ rbtn_augment(a_type, a_field, rbtree, pathp->node, \ a_aug); \ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ rbtn_rotate_left(a_type, a_field, unode, tnode); \ rbtn_augment(a_type, a_field, rbtree, unode, a_aug);\ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (r) (b) */\ /* \ */\ /* (b) */\ /* / */\ /* (b) */\ assert(leftright != NULL); \ rbtn_red_set(a_type, a_field, leftright); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_augment(a_type, a_field, rbtree, pathp->node, \ a_aug); \ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ rbtn_black_set(a_type, a_field, tnode); \ } \ /* Balance restored, but rotation modified subtree */\ /* root, which may actually be the tree root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ } \ rbtn_augment_propagate(a_type, a_field, rbtree, path, \ &pathp[-1], a_aug); \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, pathp->node); \ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_augment(a_type, a_field, rbtree, pathp->node, \ a_aug); \ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ /* Balance restored, but rotation modified */\ /* subtree root. */\ assert((uintptr_t)pathp > (uintptr_t)path); \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp[-1].node, \ tnode); \ } else { \ rbtn_right_set(a_type, a_field, pathp[-1].node, \ tnode); \ } \ rbtn_augment_propagate(a_type, a_field, rbtree, \ path, &pathp[-1], a_aug); \ return; \ } else { \ /* || */\ /* pathp(r) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, pathp->node); \ /* Balance restored. */ \ rbtn_augment_propagate(a_type, a_field, rbtree, \ path, pathp, a_aug); \ return; \ } \ } else { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ leftleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (r) */\ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ rbtn_augment(a_type, a_field, rbtree, pathp->node, \ a_aug); \ rbtn_augment(a_type, a_field, rbtree, tnode, a_aug);\ /* Balance restored, but rotation modified */\ /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ rbtree->rbt_root = tnode; \ } else { \ if (pathp[-1].cmp < 0) { \ rbtn_left_set(a_type, a_field, \ pathp[-1].node, tnode); \ } else { \ rbtn_right_set(a_type, a_field, \ pathp[-1].node, tnode); \ } \ } \ rbtn_augment_propagate(a_type, a_field, rbtree, \ path, &pathp[-1], a_aug); \ return; \ } else { \ /* || */\ /* pathp(b) */\ /* / \\ */\ /* (b) (b) */\ /* / */\ /* (b) */\ rbtn_red_set(a_type, a_field, left); \ rbtn_augment(a_type, a_field, rbtree, pathp->node, \ a_aug); \ } \ } \ } \ } \ /* Set root. */ \ rbtree->rbt_root = path->node; \ assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ } \ a_attr a_type * \ a_prefix##iterator_get(struct a_prefix##iterator *it) \ { \ if (it->count <= 0) { \ return NULL; \ } \ return it->path[it->count - 1]; \ } \ a_attr bool \ a_prefix##icreate(a_rbt_type *rbtree, a_type *node, \ struct a_prefix##iterator *it) \ { \ it->count = 0; \ a_type *cur = rbtree->rbt_root; \ while (cur != NULL) { \ int cmp = a_cmp(RB_CMP_ARG node, cur); \ assert(it->count < RB_MAX_TREE_HEIGHT); \ it->path[it->count++] = cur; \ if (cmp < 0) { \ cur = rbtn_left_get(a_type, a_field, cur); \ } else if (cmp > 0) { \ cur = rbtn_right_get(a_type, a_field, cur); \ } else { \ return true; \ } \ } \ it->count = 0; \ return false; \ } \ a_attr void \ a_prefix##ifirst(a_rbt_type *rbtree, \ struct a_prefix##iterator *it) \ { \ it->count = 0; \ if (rbtree->rbt_root != NULL) { \ rbtn_iter_go_left_down(a_type, a_field, rbtree->rbt_root, it); \ } \ } \ a_attr void \ a_prefix##ilast(a_rbt_type *rbtree, \ struct a_prefix##iterator *it) \ { \ it->count = 0; \ if (rbtree->rbt_root != NULL) { \ rbtn_iter_go_right_down(a_type, a_field, rbtree->rbt_root, it); \ } \ } \ a_attr a_type * \ a_prefix##inext(struct a_prefix##iterator *it) \ { \ if (it->count <= 0) { \ return NULL; \ } \ a_type *ret = it->path[it->count - 1]; \ a_type *right = rbtn_right_get(a_type, a_field, ret); \ if (right != NULL) { \ rbtn_iter_go_left_down(a_type, a_field, right, it); \ } else { \ rbtn_iter_go_right_up(a_type, a_field, it); \ } \ return ret; \ } \ a_attr a_type * \ a_prefix##iprev(struct a_prefix##iterator *it) \ { \ if (it->count <= 0) { \ return NULL; \ } \ a_type *ret = it->path[it->count - 1]; \ a_type *left = rbtn_left_get(a_type, a_field, ret); \ if (left != NULL) { \ rbtn_iter_go_right_down(a_type, a_field, left, it); \ } else { \ rbtn_iter_go_left_up(a_type, a_field, it); \ } \ return ret; \ } \ a_attr bool \ a_prefix##isearch(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it) \ { \ a_type *cur = rbtree->rbt_root; \ it->count = 0; \ while (cur != NULL) { \ int cmp = a_cmp_key(RB_CMP_ARG key, cur); \ assert(it->count < RB_MAX_TREE_HEIGHT); \ it->path[it->count++] = cur; \ if (cmp < 0) { \ cur = rbtn_left_get(a_type, a_field, cur); \ } else if (cmp > 0) { \ cur = rbtn_right_get(a_type, a_field, cur); \ } else { \ return true; \ } \ } \ it->count = 0; \ return false; \ } \ a_attr void \ a_prefix##isearch_le(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it) \ { \ it->count = 0; \ a_type *cur = rbtree->rbt_root; \ int ret_count = -1; \ uint32_t prev_count = 0; \ while (cur != NULL) { \ int cmp = a_cmp_key(RB_CMP_ARG key, cur); \ assert(it->count < RB_MAX_TREE_HEIGHT); \ it->path[it->count++] = cur; \ if (cmp < 0) { \ cur = rbtn_left_get(a_type, a_field, cur); \ } else if (cmp > 0) { \ prev_count = it->count; \ cur = rbtn_right_get(a_type, a_field, cur); \ } else { \ ret_count = it->count; \ cur = rbtn_right_get(a_type, a_field, cur); \ } \ } \ if (ret_count >= 0) { \ it->count = ret_count; \ } else { \ it->count = prev_count; \ } \ } \ a_attr void \ a_prefix##isearch_ge(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it) \ { \ it->count = 0; \ a_type *cur = rbtree->rbt_root; \ int ret_count = -1; \ uint32_t next_count = 0; \ while (cur != NULL) { \ int cmp = a_cmp_key(RB_CMP_ARG key, cur); \ assert(it->count < RB_MAX_TREE_HEIGHT); \ it->path[it->count++] = cur; \ if (cmp < 0) { \ next_count = it->count; \ cur = rbtn_left_get(a_type, a_field, cur); \ } else if (cmp > 0) { \ cur = rbtn_right_get(a_type, a_field, cur); \ } else { \ ret_count = it->count; \ cur = rbtn_left_get(a_type, a_field, cur); \ } \ } \ if (ret_count >= 0) { \ it->count = ret_count; \ } else { \ it->count = next_count; \ } \ } \ a_attr void \ a_prefix##isearch_lt(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it) \ { \ it->count = 0; \ uint32_t prev_count = 0; \ a_type *cur = rbtree->rbt_root; \ while (cur != NULL) { \ int cmp = a_cmp_key(RB_CMP_ARG key, cur); \ assert(it->count < RB_MAX_TREE_HEIGHT); \ it->path[it->count++] = cur; \ if (cmp < 0) { \ cur = rbtn_left_get(a_type, a_field, cur); \ } else if (cmp > 0) { \ prev_count = it->count; \ cur = rbtn_right_get(a_type, a_field, cur); \ } else { \ cur = rbtn_left_get(a_type, a_field, cur); \ } \ } \ it->count = prev_count; \ } \ a_attr void \ a_prefix##isearch_gt(a_rbt_type *rbtree, a_key key, \ struct a_prefix##iterator *it) \ { \ it->count = 0; \ uint32_t next_count = 0; \ a_type *cur = rbtree->rbt_root; \ while (cur != NULL) { \ int cmp = a_cmp_key(RB_CMP_ARG key, cur); \ assert(it->count < RB_MAX_TREE_HEIGHT); \ it->path[it->count++] = cur; \ if (cmp < 0) { \ next_count = it->count; \ cur = rbtn_left_get(a_type, a_field, cur); \ } else if (cmp > 0) { \ cur = rbtn_right_get(a_type, a_field, cur); \ } else { \ cur = rbtn_right_get(a_type, a_field, cur); \ } \ } \ it->count = next_count; \ } \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return (NULL); \ } else { \ a_type *ret = a_prefix##iter_recurse(rbtree, rbtn_left_get( \ a_type, a_field, node), cb, arg); \ if (ret != NULL) { \ return (ret); \ } \ a_type *right = rbtn_right_get(a_type, a_field, node); \ ret = cb(rbtree, node, arg); \ if (ret != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, right, cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ int cmp = a_cmp(RB_CMP_ARG start, node); \ if (cmp < 0) { \ a_type *ret; \ if ((ret = a_prefix##iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ a_field, node), cb, arg)); \ } else if (cmp > 0) { \ return (a_prefix##iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)); \ } else { \ a_type *ret; \ a_type *right = rbtn_right_get(a_type, a_field, node); \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##iter_recurse(rbtree, right, cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ cb, arg); \ } else { \ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ } \ return (ret); \ } \ a_attr a_type * \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ if (node == NULL) { \ return (NULL); \ } else { \ a_type *ret = a_prefix##reverse_iter_recurse(rbtree, \ rbtn_right_get(a_type, a_field, node), cb, arg); \ if (ret != NULL) { \ return (ret); \ } \ a_type *left = rbtn_left_get(a_type, a_field, node); \ ret = cb(rbtree, node, arg); \ if (ret != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, left, cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ void *arg) { \ int cmp = a_cmp(RB_CMP_ARG start, node); \ if (cmp > 0) { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ (ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } else if (cmp < 0) { \ return (a_prefix##reverse_iter_start(rbtree, start, \ rbtn_left_get(a_type, a_field, node), cb, arg)); \ } else { \ a_type *ret; \ a_type *left = rbtn_left_get(a_type, a_field, node); \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ return (ret); \ } \ return (a_prefix##reverse_iter_recurse(rbtree, left, cb, arg)); \ } \ } \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ a_type *ret; \ if (start != NULL) { \ ret = a_prefix##reverse_iter_start(rbtree, start, \ rbtree->rbt_root, cb, arg); \ } else { \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ cb, arg); \ } \ return (ret); \ } \ struct a_prefix##walk { \ struct { \ a_type *node; \ int dir; \ } path[RB_MAX_TREE_HEIGHT]; \ int count; \ }; \ a_attr void \ a_prefix##walk_init(struct a_prefix##walk *it, a_rbt_type *rbtree) { \ it->count = 0; \ it->path[0].node = rbtree->rbt_root; \ } \ a_attr a_type * \ a_prefix##walk_next(struct a_prefix##walk *it, int dir, \ a_type **r_left, a_type **r_right) { \ a_type *node, *left, *right, *parent; \ if (it->count == 0) { \ node = it->path[0].node; \ if (node == NULL) \ return NULL; \ it->count++; \ } else { \ node = it->path[it->count - 1].node; \ it->path[it->count - 1].dir = dir; \ left = rbtn_left_get(a_type, a_field, node); \ right = rbtn_right_get(a_type, a_field, node); \ if ((dir & RB_WALK_LEFT) && left != NULL) { \ node = left; \ } else if ((dir & RB_WALK_RIGHT) && right != NULL) { \ node = right; \ } else { \ while (--it->count > 0) { \ parent = it->path[it->count - 1].node; \ right = rbtn_right_get(a_type, a_field, parent); \ if ((it->path[it->count - 1].dir & RB_WALK_RIGHT) && \ right != NULL && right != node) { \ node = right; \ break; \ } \ node = parent; \ } \ if (it->count == 0) \ return NULL; \ } \ it->path[it->count++].node = node; \ } \ *r_left = rbtn_left_get(a_type, a_field, node); \ *r_right = rbtn_right_get(a_type, a_field, node); \ return node; \ } #define rb_aug_noop(a_node, a_left, a_right) do { \ (void)(a_node); \ (void)(a_left); \ (void)(a_right); \ } while (0) #define rb_gen_ext_key(a_attr, a_prefix, a_rbt_type, a_type, a_field, \ a_cmp, a_key, a_cmp_key) \ rb_gen_ext_key_aug(a_attr, a_prefix, a_rbt_type, a_type, a_field, \ a_cmp, a_key, a_cmp_key, rb_aug_noop) #define rb_gen_aug(a_attr, a_prefix, a_rbt_type, a_type, a_field, \ a_cmp, a_aug) \ rb_gen_ext_key_aug(a_attr, a_prefix, a_rbt_type, a_type, a_field, \ a_cmp, a_type *, a_cmp, a_aug) #define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ rb_gen_aug(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \ rb_aug_noop) #endif /* RB_H_ */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/ibuf.c0000644000000000000000000000632013306562360021057 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "ibuf.h" #include #include "slab_cache.h" /** Initialize an input buffer. */ void ibuf_create(struct ibuf *ibuf, struct slab_cache *slabc, size_t start_capacity) { ibuf->slabc = slabc; ibuf->buf = ibuf->rpos = ibuf->wpos = ibuf->end = NULL; ibuf->start_capacity = start_capacity; /* Don't allocate the buffer yet. */ } void ibuf_destroy(struct ibuf *ibuf) { if (ibuf->buf) { struct slab *slab = slab_from_data(ibuf->buf); slab_put(ibuf->slabc, slab); } } /** Free memory allocated by this buffer */ void ibuf_reinit(struct ibuf *ibuf) { struct slab_cache *slabc = ibuf->slabc; size_t start_capacity = ibuf->start_capacity; ibuf_destroy(ibuf); ibuf_create(ibuf, slabc, start_capacity); } /** * Ensure the buffer has sufficient capacity * to store size bytes, and return pointer to * the beginning. */ void * ibuf_reserve_slow(struct ibuf *ibuf, size_t size) { assert(ibuf->wpos + size > ibuf->end); size_t used = ibuf_used(ibuf); size_t capacity = ibuf_capacity(ibuf); /* * Check if we have enough space in the * current buffer. In this case de-fragment it * by moving existing data to the beginning. * Otherwise, get a bigger buffer. */ if (size + used <= capacity) { memmove(ibuf->buf, ibuf->rpos, used); } else { /* Use iobuf_readahead as allocation factor. */ size_t new_capacity = capacity * 2; if (new_capacity < ibuf->start_capacity) new_capacity = ibuf->start_capacity; while (new_capacity < used + size) new_capacity *= 2; struct slab *slab = slab_get(ibuf->slabc, new_capacity); if (slab == NULL) return NULL; char *ptr = (char *) slab_data(slab); memcpy(ptr, ibuf->rpos, used); if (ibuf->buf) slab_put(ibuf->slabc, slab_from_data(ibuf->buf)); ibuf->buf = ptr; ibuf->end = ibuf->buf + slab_capacity(slab); } ibuf->rpos = ibuf->buf; ibuf->wpos = ibuf->rpos + used; return ibuf->wpos; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/slab_arena.h0000644000000000000000000001140713306562360022230 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SMALL_SLAB_ARENA_H #define INCLUDES_TARANTOOL_SMALL_SLAB_ARENA_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lf_lifo.h" #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { /* Smallest possible slab size. */ SLAB_MIN_SIZE = ((size_t)USHRT_MAX) + 1, /** The largest allowed amount of memory of a single arena. */ SMALL_UNLIMITED = SIZE_MAX/2 + 1 }; /** * slab_arena -- a source of large aligned blocks of memory. * MT-safe. * Uses a lock-free LIFO to maintain a cache of used slabs. * Uses a lock-free quota to limit allocating memory. * Never returns memory to the operating system. */ struct slab_arena { /** * A lock free list of cached slabs. * Initially there are no cached slabs, only arena. * As slabs are used and returned to arena, the cache is * used to recycle them. */ struct lf_lifo cache; /** A preallocated arena of size = prealloc. */ void *arena; /** * How much memory is preallocated during initialization * of slab_arena. */ size_t prealloc; /** * How much memory in the arena has * already been initialized for slabs. */ size_t used; /** * An external quota to which we must adhere. * A quota exists to set a common limit on two arenas. */ struct quota *quota; /* * Each object returned by arena_map() has this size. * The size is provided at arena initialization. * It must be a power of 2 and large enough * (at least 64kb, since the two lower bytes are * used for ABA counter in the lock-free list). * Returned pointers are always aligned by this size. * * It's important to keep this value moderate to * limit the overhead of partially populated slabs. * It is still necessary, however, to make it settable, * to allow allocation of large objects. * Typical value is 4Mb, which makes it possible to * allocate objects of size up to ~1MB. */ uint32_t slab_size; /** * mmap() flags: MAP_SHARED or MAP_PRIVATE */ int flags; }; /** Initialize an arena. */ int slab_arena_create(struct slab_arena *arena, struct quota *quota, size_t prealloc, uint32_t slab_size, int flags); /** Destroy an arena. */ void slab_arena_destroy(struct slab_arena *arena); /** Get a slab. */ void * slab_map(struct slab_arena *arena); /** Put a slab into cache. */ void slab_unmap(struct slab_arena *arena, void *ptr); /** mprotect() the preallocated arena. */ void slab_arena_mprotect(struct slab_arena *arena); /** * Align a size - round up to nearest divisible by the given alignment. * Alignment must be a power of 2 */ static inline size_t small_align(size_t size, size_t alignment) { /* Must be a power of two */ assert((alignment & (alignment - 1)) == 0); /* Bit arithmetics won't work for a large size */ assert(size <= SIZE_MAX - alignment); return (size - 1 + alignment) & ~(alignment - 1); } /** Round up a number to the nearest power of two. */ static inline size_t small_round(size_t size) { if (size < 2) return size; assert(size <= SIZE_MAX / 2 + 1); assert(size - 1 <= ULONG_MAX); size_t r = 1; return r << (sizeof(unsigned long) * CHAR_BIT - __builtin_clzl((unsigned long) (size - 1))); } /** Binary logarithm of a size. */ static inline size_t small_lb(size_t size) { assert(size <= ULONG_MAX); return sizeof(unsigned long) * CHAR_BIT - __builtin_clzl((unsigned long) size) - 1; } #if defined(__cplusplus) } /* extern "C" */ #endif #endif /* INCLUDES_TARANTOOL_SMALL_SLAB_ARENA_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/quota_lessor.h0000644000000000000000000001202413306562360022655 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SMALL_QUOTA_LESSOR_H #define INCLUDES_TARANTOOL_SMALL_QUOTA_LESSOR_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "quota.h" /** * Quota lessor is a convenience wrapper around thread-safe `struct quota` * to allocate small chunks of memory from the single thread. Original quota * has 1Kb precision and uses atomics, which are too slow for frequent calls * from different threads. * * The quota lessor allocates huge (1Mb+) chunks of memory from * the source quota and then leases small chunks to the end users. * The end of lease is implemented in the similar way - the lessor * does not release small amounts, but accumulates freed memory until * it reaches at least 1Mb an then releases it. * * This decreases usage of atomic locks and improves quota * precision from 1024 bytes to 1 byte. This class, however, is * not thread-safe, so there must be a lessor in each thread. */ struct quota_lessor { /** Original thread-safe, 1Kb precision quota. */ struct quota *source; /** The number of bytes taken from @a source. */ size_t used; /** The number of bytes leased. */ size_t leased; }; /** * Return the total number of bytes leased * @param lessor quota_lessor */ static inline size_t quota_leased(const struct quota_lessor *lessor) { return lessor->leased; } /** * Return the number of bytes allocated from the source, but not leased yet * @param lessor quota_lessor */ static inline size_t quota_available(const struct quota_lessor *lessor) { return lessor->used - lessor->leased; } /** Min byte count to alloc from original quota. */ #define QUOTA_USE_MIN (QUOTA_UNIT_SIZE * 1024) /** * Create a new quota lessor from @a source. * @param lessor quota_lessor * @param source source quota */ static inline void quota_lessor_create(struct quota_lessor *lessor, struct quota *source) { lessor->source = source; lessor->used = 0; lessor->leased = 0; assert(quota_total(source) >= QUOTA_USE_MIN); } /** * Destroy the quota lessor * @param lessor quota_lessor * @pre quota_leased(lessor) == 0 */ static inline void quota_lessor_destroy(struct quota_lessor *lessor) { assert(lessor->leased == 0); if (lessor->used == 0) return; assert(lessor->used % QUOTA_UNIT_SIZE == 0); quota_release(lessor->source, lessor->used); lessor->used = 0; } /** * Lease @a size bytes. * @param lessor quota lessor * @param size the number of bytes to lease * @retval >= 0 Number of leased bytes. * @retval -1 Error, not enough quota. */ static inline ssize_t quota_lease(struct quota_lessor *lessor, ssize_t size) { /* Fast way, there is enough unused quota. */ if (lessor->leased + size <= lessor->used) { lessor->leased += size; return size; } /* Need to use the original quota. */ size_t required = size + lessor->leased - lessor->used; size_t use = required > QUOTA_USE_MIN ? required : QUOTA_USE_MIN; for (; use >= required; use = use/2) { ssize_t used = quota_use(lessor->source, use); if (used >= 0) { lessor->used += used; lessor->leased += size; return size; } } return -1; } /* * End the lease of @a size bytes * @param lessor quota_lessor * @param size the number of bytes to return */ static inline ssize_t quota_end_lease(struct quota_lessor *lessor, size_t size) { assert(lessor->leased >= size); lessor->leased -= size; size_t available = lessor->used - lessor->leased; /* * Release the original quota when enough bytes * accumulated to avoid frequent quota_release() calls. */ if (available >= 2 * QUOTA_USE_MIN) { /* Do not release too much to avoid oscillation. */ size_t release = available - QUOTA_USE_MIN - QUOTA_UNIT_SIZE; lessor->used -= quota_release(lessor->source, release); } return size; } #endif /* INCLUDES_TARANTOOL_SMALL_QUOTA_LESSOR_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/rlist.h0000644000000000000000000002105313306562360021274 0ustar rootroot#ifndef TARANTOOL_RLIST_H_INCLUDED #define TARANTOOL_RLIST_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #ifndef typeof /* TODO: 'typeof' is a GNU extension */ #define typeof __typeof__ #endif /** * list entry and head structure */ struct rlist { struct rlist *prev; struct rlist *next; }; /** * init list head (or list entry as ins't included in list) */ inline static void rlist_create(struct rlist *list) { list->next = list; list->prev = list; } /** * add item to list */ inline static void rlist_add(struct rlist *head, struct rlist *item) { item->prev = head; item->next = head->next; item->prev->next = item; item->next->prev = item; } /** * add item to list tail */ inline static void rlist_add_tail(struct rlist *head, struct rlist *item) { item->next = head; item->prev = head->prev; item->prev->next = item; item->next->prev = item; } /** * delete element */ inline static void rlist_del(struct rlist *item) { item->prev->next = item->next; item->next->prev = item->prev; rlist_create(item); } inline static struct rlist * rlist_shift(struct rlist *head) { struct rlist *shift = head->next; head->next = shift->next; shift->next->prev = head; shift->next = shift->prev = shift; return shift; } inline static struct rlist * rlist_shift_tail(struct rlist *head) { struct rlist *shift = head->prev; rlist_del(shift); return shift; } /** * return first element */ inline static struct rlist * rlist_first(struct rlist *head) { return head->next; } /** * return last element */ inline static struct rlist * rlist_last(struct rlist *head) { return head->prev; } /** * return next element by element */ inline static struct rlist * rlist_next(struct rlist *item) { return item->next; } /** * return previous element */ inline static struct rlist * rlist_prev(struct rlist *item) { return item->prev; } /** * return TRUE if list is empty */ inline static int rlist_empty(struct rlist *item) { return item->next == item->prev && item->next == item; } /** @brief delete from one list and add as another's head @param to the head that will precede our entry @param item the entry to move */ static inline void rlist_move(struct rlist *to, struct rlist *item) { rlist_del(item); rlist_add(to, item); } /** @brief delete from one list and add_tail as another's head @param to the head that will precede our entry @param item the entry to move */ static inline void rlist_move_tail(struct rlist *to, struct rlist *item) { item->prev->next = item->next; item->next->prev = item->prev; item->next = to; item->prev = to->prev; item->prev->next = item; item->next->prev = item; } static inline void rlist_swap(struct rlist *rhs, struct rlist *lhs) { struct rlist tmp = *rhs; *rhs = *lhs; *lhs = tmp; /* Relink the nodes. */ if (lhs->next == rhs) /* Take care of empty list case */ lhs->next = lhs; lhs->next->prev = lhs; lhs->prev->next = lhs; if (rhs->next == lhs) /* Take care of empty list case */ rhs->next = rhs; rhs->next->prev = rhs; rhs->prev->next = rhs; } /** * move all items of list head2 to the head of list head1 */ static inline void rlist_splice(struct rlist *head1, struct rlist *head2) { if (!rlist_empty(head2)) { head1->next->prev = head2->prev; head2->prev->next = head1->next; head1->next = head2->next; head2->next->prev = head1; rlist_create(head2); } } /** * move all items of list head2 to the tail of list head1 */ static inline void rlist_splice_tail(struct rlist *head1, struct rlist *head2) { if (!rlist_empty(head2)) { head1->prev->next = head2->next; head2->next->prev = head1->prev; head1->prev = head2->prev; head2->prev->next = head1; rlist_create(head2); } } /** * list head initializer */ #define RLIST_HEAD_INITIALIZER(name) { &(name), &(name) } /** * list link node */ #define RLIST_LINK_INITIALIZER { 0, 0 } /** * allocate and init head of list */ #define RLIST_HEAD(name) \ struct rlist name = RLIST_HEAD_INITIALIZER(name) /** * return entry by list item */ #define rlist_entry(item, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (item); \ (type *)( (char *)__mptr - ((size_t) &((type *)0)->member) ); }) /** * return first entry */ #define rlist_first_entry(head, type, member) \ rlist_entry(rlist_first(head), type, member) /** * Remove one element from the list and return it * @pre the list is not empty */ #define rlist_shift_entry(head, type, member) \ rlist_entry(rlist_shift(head), type, member) \ /** * Remove one element from the list tail and return it * @pre the list is not empty */ #define rlist_shift_tail_entry(head, type, member) \ rlist_entry(rlist_shift_tail(head), type, member) \ /** * return last entry * @pre the list is not empty */ #define rlist_last_entry(head, type, member) \ rlist_entry(rlist_last(head), type, member) /** * return next entry */ #define rlist_next_entry(item, member) \ rlist_entry(rlist_next(&(item)->member), typeof(*item), member) /** * return previous entry */ #define rlist_prev_entry(item, member) \ rlist_entry(rlist_prev(&(item)->member), typeof(*item), member) #define rlist_prev_entry_safe(item, head, member) \ ((rlist_prev(&(item)->member) == (head)) ? NULL : \ rlist_entry(rlist_prev(&(item)->member), typeof(*item), member)) /** * add entry to list */ #define rlist_add_entry(head, item, member) \ rlist_add((head), &(item)->member) /** * add entry to list tail */ #define rlist_add_tail_entry(head, item, member) \ rlist_add_tail((head), &(item)->member) /** delete from one list and add as another's head */ #define rlist_move_entry(to, item, member) \ rlist_move((to), &((item)->member)) /** delete from one list and add_tail as another's head */ #define rlist_move_tail_entry(to, item, member) \ rlist_move_tail((to), &((item)->member)) /** * delete entry from list */ #define rlist_del_entry(item, member) \ rlist_del(&((item)->member)) /** * foreach through list */ #define rlist_foreach(item, head) \ for (item = rlist_first(head); item != (head); item = rlist_next(item)) /** * foreach backward through list */ #define rlist_foreach_reverse(item, head) \ for (item = rlist_last(head); item != (head); item = rlist_prev(item)) /** * foreach through all list entries */ #define rlist_foreach_entry(item, head, member) \ for (item = rlist_first_entry((head), typeof(*item), member); \ &item->member != (head); \ item = rlist_next_entry((item), member)) /** * foreach backward through all list entries */ #define rlist_foreach_entry_reverse(item, head, member) \ for (item = rlist_last_entry((head), typeof(*item), member); \ &item->member != (head); \ item = rlist_prev_entry((item), member)) #define rlist_foreach_entry_safe(item, head, member, tmp) \ for ((item) = rlist_first_entry((head), typeof(*item), member); \ &item->member != (head) && \ ((tmp) = rlist_next_entry((item), member)); \ (item) = (tmp)) #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_RLIST_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/mempool.c0000644000000000000000000001541113306562360021603 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "mempool.h" #include #include #include #include #include "slab_cache.h" /* slab fragmentation must reach 1/8 before it's recycled */ enum { MAX_COLD_FRACTION_LB = 3 }; static inline int mslab_cmp(struct mslab *lhs, struct mslab *rhs) { /* pointer arithmetics may overflow int * range. */ return lhs > rhs ? 1 : (lhs < rhs ? -1 : 0); } rb_proto(, mslab_tree_, mslab_tree_t, struct mslab) rb_gen(, mslab_tree_, mslab_tree_t, struct mslab, next_in_hot, mslab_cmp) static inline void mslab_create(struct mslab *slab, struct mempool *pool) { slab->nfree = pool->objcount; slab->free_offset = pool->offset; slab->free_list = NULL; slab->in_hot_slabs = false; rlist_create(&slab->next_in_cold); } void * mslab_alloc(struct mempool *pool, struct mslab *slab) { assert(slab->nfree); void *result; if (slab->free_list) { /* Recycle an object from the garbage pool. */ result = slab->free_list; slab->free_list = *(void **)slab->free_list; } else { /* Use an object from the "untouched" area of the slab. */ result = (char *)slab + slab->free_offset; slab->free_offset += pool->objsize; } /* If the slab is full, remove it from the rb tree. */ if (--slab->nfree == 0) { if (slab == pool->first_hot_slab) { pool->first_hot_slab = mslab_tree_next(&pool->hot_slabs, slab); } mslab_tree_remove(&pool->hot_slabs, slab); slab->in_hot_slabs = false; } return result; } void mslab_free(struct mempool *pool, struct mslab *slab, void *ptr) { /* put object to garbage list */ *(void **)ptr = slab->free_list; slab->free_list = ptr; VALGRIND_FREELIKE_BLOCK(ptr, 0); VALGRIND_MAKE_MEM_DEFINED(ptr, sizeof(void *)); slab->nfree++; if (slab->in_hot_slabs == false && slab->nfree >= (pool->objcount >> MAX_COLD_FRACTION_LB)) { /** * Add this slab to the rbtree which contains * sufficiently fragmented slabs. */ rlist_del_entry(slab, next_in_cold); mslab_tree_insert(&pool->hot_slabs, slab); slab->in_hot_slabs = true; /* * Update first_hot_slab pointer if the newly * added tree node is the leftmost. */ if (pool->first_hot_slab == NULL || mslab_cmp(pool->first_hot_slab, slab) == 1) { pool->first_hot_slab = slab; } } else if (slab->nfree == 1) { rlist_add_entry(&pool->cold_slabs, slab, next_in_cold); } else if (slab->nfree == pool->objcount) { /** Free the slab. */ if (slab == pool->first_hot_slab) { pool->first_hot_slab = mslab_tree_next(&pool->hot_slabs, slab); } mslab_tree_remove(&pool->hot_slabs, slab); slab->in_hot_slabs = false; if (pool->spare > slab) { slab_list_del(&pool->slabs, &pool->spare->slab, next_in_list); slab_put_with_order(pool->cache, &pool->spare->slab); pool->spare = slab; } else if (pool->spare) { slab_list_del(&pool->slabs, &slab->slab, next_in_list); slab_put_with_order(pool->cache, &slab->slab); } else { pool->spare = slab; } } } void mempool_create_with_order(struct mempool *pool, struct slab_cache *cache, uint32_t objsize, uint8_t order) { assert(order <= cache->order_max); lifo_init(&pool->link); lifo_init(&pool->delayed); pool->cache = cache; slab_list_create(&pool->slabs); mslab_tree_new(&pool->hot_slabs); pool->first_hot_slab = NULL; rlist_create(&pool->cold_slabs); pool->spare = NULL; pool->objsize = objsize; pool->slab_order = order; /* Total size of slab */ uint32_t slab_size = slab_order_size(pool->cache, pool->slab_order); /* Calculate how many objects will actually fit in a slab. */ pool->objcount = (slab_size - mslab_sizeof()) / objsize; assert(pool->objcount); pool->offset = slab_size - pool->objcount * pool->objsize; pool->slab_ptr_mask = ~(slab_order_size(cache, order) - 1); } void mempool_destroy(struct mempool *pool) { struct slab *slab, *tmp; rlist_foreach_entry_safe(slab, &pool->slabs.slabs, next_in_list, tmp) slab_put_with_order(pool->cache, slab); } void * mempool_alloc(struct mempool *pool) { struct mslab *slab = pool->first_hot_slab; if (slab == NULL) { if (pool->spare) { slab = pool->spare; pool->spare = NULL; } else if ((slab = (struct mslab *) slab_get_with_order(pool->cache, pool->slab_order))) { mslab_create(slab, pool); slab_list_add(&pool->slabs, &slab->slab, next_in_list); } else if (! rlist_empty(&pool->cold_slabs)) { slab = rlist_shift_entry(&pool->cold_slabs, struct mslab, next_in_cold); } else { return NULL; } assert(slab->in_hot_slabs == false); mslab_tree_insert(&pool->hot_slabs, slab); slab->in_hot_slabs = true; pool->first_hot_slab = slab; } pool->slabs.stats.used += pool->objsize; void *ptr = mslab_alloc(pool, slab); assert(ptr != NULL); VALGRIND_MALLOCLIKE_BLOCK(ptr, pool->objsize, 0, 0); return ptr; } void mempool_stats(struct mempool *pool, struct mempool_stats *stats) { /* Object size. */ stats->objsize = pool->objsize; /* Number of objects. */ stats->objcount = mempool_count(pool); /* Size of the slab. */ stats->slabsize = slab_order_size(pool->cache, pool->slab_order); /* The number of slabs. */ stats->slabcount = pool->slabs.stats.total/stats->slabsize; /* How much memory is used for slabs. */ stats->totals.used = pool->slabs.stats.used; /* * How much memory is available. Subtract the slab size, * which is allocation overhead and is not available * memory. */ stats->totals.total = pool->slabs.stats.total - mslab_sizeof() * stats->slabcount; } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/small/small.c0000644000000000000000000002737413306562360021256 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "small.h" #include #include #include enum { /** Step size for stepped pools, in bytes */ STEP_SIZE = 8, /** * LB stands for logarithm with binary base, this constant * is used for bit shifts, when we need to divide by * STEP_SIZE. */ STEP_SIZE_LB = 3, }; rb_proto(, factor_tree_, factor_tree_t, struct factor_pool) /** Used for search in the tree. */ static inline int factor_pool_cmp(struct factor_pool *a, struct factor_pool *b) { return a->pool.objsize > b->pool.objsize ? 1 : a->pool.objsize < b->pool.objsize ? -1 : 0; } rb_gen(, factor_tree_, factor_tree_t, struct factor_pool, node, factor_pool_cmp) static inline struct factor_pool * factor_pool_create(struct small_alloc *alloc, struct factor_pool *upper_bound, size_t size) { assert(size > alloc->step_pool_objsize_max); assert(size <= alloc->objsize_max); if (alloc->factor_pool_next == NULL) { /** * Too many factored pools already, fall back * to an imperfect one. */ return upper_bound; } size_t objsize = alloc->step_pool_objsize_max; size_t prevsize; do { prevsize = objsize; /* * Align objsize after each multiplication to * ensure that the distance between objsizes of * factored pools is a multiple of STEP_SIZE. */ objsize = small_align(objsize * alloc->factor, sizeof(intptr_t)); assert(objsize > alloc->step_pool_objsize_max); } while (objsize < size); if (objsize > alloc->objsize_max) objsize = alloc->objsize_max; struct factor_pool *pool = alloc->factor_pool_next; alloc->factor_pool_next = pool->next; mempool_create(&pool->pool, alloc->cache, objsize); pool->objsize_min = prevsize + 1; factor_tree_insert(&alloc->factor_pools, pool); return pool; } /** Initialize the small allocator. */ void small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache, uint32_t objsize_min, float alloc_factor) { alloc->cache = cache; /* Align sizes. */ objsize_min = small_align(objsize_min, STEP_SIZE); alloc->step_pool0_step_count = (objsize_min - 1) >> STEP_SIZE_LB; /* Make sure at least 4 largest objects can fit in a slab. */ alloc->objsize_max = mempool_objsize_max(slab_order_size(cache, cache->order_max)); if (!(alloc->objsize_max > objsize_min + STEP_POOL_MAX * STEP_SIZE)) { fprintf(stderr, "Can't create small alloc, small " "object min size should not be greather than %u\n", alloc->objsize_max - (STEP_POOL_MAX + 1) * STEP_SIZE); abort(); } struct mempool *step_pool; for (step_pool = alloc->step_pools; step_pool < alloc->step_pools + STEP_POOL_MAX; step_pool++) { mempool_create(step_pool, alloc->cache, objsize_min); objsize_min += STEP_SIZE; } alloc->step_pool_objsize_max = (step_pool - 1)->objsize; if (alloc_factor > 2.0) alloc_factor = 2.0; /* * Correct the user-supplied alloc_factor to ensure that * it actually produces growing object sizes. */ if (alloc->step_pool_objsize_max * alloc_factor < alloc->step_pool_objsize_max + STEP_SIZE) { alloc_factor = (alloc->step_pool_objsize_max + STEP_SIZE + 0.5)/ alloc->step_pool_objsize_max; } alloc->factor = alloc_factor; /* Initialize the factored pool cache. */ struct factor_pool *factor_pool = alloc->factor_pool_cache; do { factor_pool->next = factor_pool + 1; factor_pool++; } while (factor_pool != alloc->factor_pool_cache + FACTOR_POOL_MAX - 1); factor_pool->next = NULL; alloc->factor_pool_next = alloc->factor_pool_cache; factor_tree_new(&alloc->factor_pools); (void) factor_pool_create(alloc, NULL, alloc->objsize_max); lifo_init(&alloc->delayed); lifo_init(&alloc->delayed_large); alloc->free_mode = SMALL_FREE; } void small_alloc_setopt(struct small_alloc *alloc, enum small_opt opt, bool val) { switch (opt) { case SMALL_DELAYED_FREE_MODE: alloc->free_mode = val ? SMALL_DELAYED_FREE : SMALL_COLLECT_GARBAGE; break; default: assert(false); break; } } static inline void small_collect_garbage(struct small_alloc *alloc) { if (alloc->free_mode != SMALL_COLLECT_GARBAGE) return; const int BATCH = 100; if (!lifo_is_empty(&alloc->delayed_large)) { /* Free large allocations */ for (int i = 0; i < BATCH; i++) { void *item = lifo_pop(&alloc->delayed_large); if (item == NULL) break; struct slab *slab = slab_from_data(item); slab_put_large(alloc->cache, slab); } } else if (!lifo_is_empty(&alloc->delayed)) { /* Free regular allocations */ struct mempool *pool = lifo_peek(&alloc->delayed); for (int i = 0; i < BATCH; i++) { void *item = lifo_pop(&pool->delayed); if (item == NULL) { (void) lifo_pop(&alloc->delayed); pool = lifo_peek(&alloc->delayed); if (pool == NULL) break; continue; } mempool_free(pool, item); } } else { /* Finish garbage collection and switch to regular mode */ alloc->free_mode = SMALL_FREE; } } /** * Allocate a small object. * * Find or create a mempool instance of the right size, * and allocate the object on the pool. * * If object is small enough to fit a stepped pool, * finding the right pool for it is just a matter of bit * shifts. Otherwise, look up a pool in the red-black * factored pool tree. * * @retval ptr success * @retval NULL out of memory */ void * smalloc(struct small_alloc *alloc, size_t size) { small_collect_garbage(alloc); struct mempool *pool; int idx = (size - 1) >> STEP_SIZE_LB; idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0; if (idx < STEP_POOL_MAX) { /* Allocate in a stepped pool. */ pool = &alloc->step_pools[idx]; assert(size <= pool->objsize && (size + STEP_SIZE > pool->objsize || idx == 0)); } else { struct factor_pool pattern; pattern.pool.objsize = size; struct factor_pool *upper_bound = factor_tree_nsearch(&alloc->factor_pools, &pattern); if (upper_bound == NULL) { /* Object is too large, fallback to slab_cache */ struct slab *slab = slab_get_large(alloc->cache, size); if (slab == NULL) return NULL; return slab_data(slab); } if (size < upper_bound->objsize_min) upper_bound = factor_pool_create(alloc, upper_bound, size); pool = &upper_bound->pool; } assert(size <= pool->objsize); return mempool_alloc(pool); } static void small_recycle_pool(struct small_alloc *alloc, struct mempool *pool) { if (mempool_used(pool) == 0 && pool->objsize > alloc->step_pool_objsize_max && alloc->factor_pool_next == NULL) { struct factor_pool *factor_pool = (struct factor_pool *) ((char *) pool - (intptr_t) &((struct factor_pool *) NULL)->pool); factor_tree_remove(&alloc->factor_pools, factor_pool); mempool_destroy(pool); alloc->factor_pool_next = factor_pool; } } static inline struct mempool * mempool_find(struct small_alloc *alloc, size_t size) { struct mempool *pool; int idx = (size - 1) >> STEP_SIZE_LB; idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0; if (idx < STEP_POOL_MAX) { /* Allocated in a stepped pool. */ pool = &alloc->step_pools[idx]; assert((size + STEP_SIZE > pool->objsize) || (idx == 0)); } else { /* Allocated in a factor pool. */ struct factor_pool pattern; pattern.pool.objsize = size; struct factor_pool *upper_bound = factor_tree_nsearch(&alloc->factor_pools, &pattern); if (upper_bound == NULL) return NULL; /* Allocated by slab_cache. */ assert(size >= upper_bound->objsize_min); pool = &upper_bound->pool; } assert(size <= pool->objsize); return pool; } /** Free memory chunk allocated by the small allocator. */ /** * Free a small object. * * This boils down to finding the object's mempool and delegating * to mempool_free(). * * If the pool becomes completely empty, and it's a factored pool, * and the factored pool's cache is empty, put back the empty * factored pool into the factored pool cache. */ void smfree(struct small_alloc *alloc, void *ptr, size_t size) { struct mempool *pool = mempool_find(alloc, size); if (pool == NULL) { /* Large allocation by slab_cache */ struct slab *slab = slab_from_data(ptr); slab_put_large(alloc->cache, slab); return; } /* Regular allocation in mempools */ mempool_free(pool, ptr); if (mempool_used(pool) == 0) small_recycle_pool(alloc, pool); } /** * Free memory chunk allocated by the small allocator * if not in snapshot mode, otherwise put to the delayed * free list. */ void smfree_delayed(struct small_alloc *alloc, void *ptr, size_t size) { if (alloc->free_mode == SMALL_DELAYED_FREE && ptr) { struct mempool *pool = mempool_find(alloc, size); if (pool == NULL) { /* Large-object allocation by slab_cache. */ lifo_push(&alloc->delayed_large, ptr); return; } /* Regular allocation in mempools */ if (lifo_is_empty(&pool->delayed)) lifo_push(&alloc->delayed, &pool->link); lifo_push(&pool->delayed, ptr); } else { smfree(alloc, ptr, size); } } /** Simplify iteration over small allocator mempools. */ struct mempool_iterator { struct small_alloc *alloc; struct mempool *step_pool; struct factor_tree_iterator factor_iterator; }; void mempool_iterator_create(struct mempool_iterator *it, struct small_alloc *alloc) { it->alloc = alloc; it->step_pool = alloc->step_pools; factor_tree_ifirst(&alloc->factor_pools, &it->factor_iterator); } struct mempool * mempool_iterator_next(struct mempool_iterator *it) { if (it->step_pool < it->alloc->step_pools + STEP_POOL_MAX) return it->step_pool++; struct factor_pool *factor_pool = factor_tree_inext(&it->factor_iterator); if (factor_pool) { return &(factor_pool->pool); } return NULL; } /** Destroy all pools. */ void small_alloc_destroy(struct small_alloc *alloc) { struct mempool_iterator it; mempool_iterator_create(&it, alloc); struct mempool *pool; while ((pool = mempool_iterator_next(&it))) { mempool_destroy(pool); } lifo_init(&alloc->delayed); /* Free large allocations */ void *item; while ((item = lifo_pop(&alloc->delayed_large))) { struct slab *slab = slab_from_data(item); slab_put_large(alloc->cache, slab); } } /** Calculate allocation statistics. */ void small_stats(struct small_alloc *alloc, struct small_stats *totals, mempool_stats_cb cb, void *cb_ctx) { memset(totals, 0, sizeof(*totals)); struct mempool_iterator it; mempool_iterator_create(&it, alloc); struct mempool *pool; while ((pool = mempool_iterator_next(&it))) { struct mempool_stats stats; mempool_stats(pool, &stats); totals->used += stats.totals.used; totals->total += stats.totals.total; if (cb(&stats, cb_ctx)) break; } } tarantool_1.9.1.26.g63eb81e3c/src/lib/small/rpm/0000755000000000000000000000000013306562360017453 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/small/rpm/small.spec0000644000000000000000000000252013306562360021436 0ustar rootrootName: small Version: 1.0.1 Release: 1%{?dist} Summary: Collection of Specialized Memory ALLocators Group: Development/Languages License: BSD URL: https://github.com/tarantool/small Source0: https://github.com/tarantool/%{name}/archive/%{version}/%{name}-%{version}.tar.gz BuildRequires: cmake >= 2.8 BuildRequires: gcc >= 4.5 %description Collection of Specialized Memory ALLocators for small allocations %package devel Summary: Collection of Specialized Memory ALLocators Requires: %{name}%{?_isa} = %{version}-%{release} %description devel Collection of Specialized Memory ALLocators for small allocations This package contains development files. %prep %setup -q -n %{name}-%{version} %build %cmake . -DCMAKE_BUILD_TYPE=RelWithDebInfo make %{?_smp_mflags} %check make %{?_smp_mflags} test %install %make_install %post -p /sbin/ldconfig %postun -p /sbin/ldconfig %files %{_libdir}/libsmall.so.1* %files devel %dir %{_includedir}/small %{_includedir}/small/*.h %{_includedir}/small/third_party/*.h %{_includedir}/small/third_party/valgrind/*.h %{_libdir}/libsmall.a # unversioned libraries should belong devel package %{_libdir}/libsmall.so %changelog * Wed Feb 17 2016 Roman Tsisyk 1.0.1-1 - Fix to comply Fedora Package Guidelines * Tue Oct 27 2015 Eugine Blikh 1.0.0-1 - Initial version of the RPM spec tarantool_1.9.1.26.g63eb81e3c/src/lib/small/LICENSE0000644000000000000000000000237313306562360017667 0ustar rootrootCopyright 2010-2016 Tarantool AUTHORS: please see AUTHORS file. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. tarantool_1.9.1.26.g63eb81e3c/src/lib/small/README.md0000644000000000000000000000707613306562360020146 0ustar rootroot# small - a collection of Specialized Memory ALLocators for small allocations [![Build Status](https://travis-ci.org/tarantool/small.png?branch=master)](https://travis-ci.org/tarantool/small) The library provides the following facilities: # quota Set a limit on the amount of memory all allocators use. Thread-safe. ## slab_arena To initialize an arena, you need a quota. Multiple arenas can use a shared quota object. Thread safe. Defines an API with two methods: map() and unmap(). Map returns a memory area. Unmap returns this area to the arena. All objects returned by arena have the same size, defined in initialization-time constant SLAB_MAX_SIZE. By default, SLAB_MAX_SIZE is 4M. All objects returned by arena are aligned by SLAB_MAX_SIZE: (ptr & (SLAB_MAX_SIZE - 1)) is always 0. SLAB_MAX_SIZE therefore must be a power of 2. Limiting SLAB_MAX_SIZE is important to avoid internal fragmentation. Multiple arenas can exist, an object must be returned to the same arena in which it was allocated. There is a number of different implementations of slab_arena API: - huge_arena: this implementation maps at initialization time a huge region of memory, and then uses this region to produce objects. Can be configured to use shared or private mappings. - grow_arena - mmaps() each individual block. Thus can incur fragmentation of the address space, but actually returns objects to the OS on unmap. Use of instances of slab_arena is thread-safe: multiple threads can use the same arena. ## slab_cache Requires an arena for initialization, which works as a memory source for slab_cache. Returns power-of-two sized slabs, with size-aligned address. Uses a buddy system to deal with memory fragmentation. Is expected to be thread-local. ## mempool A memory pool for objects of the same size. Thread local. Requires a slab cache, which works as a source of memory. Automatically defines the optimal slab size, given the object size. Supports alloc() and free(). ## region A typical region allocator. Very cheap allocation, but all memory can be freed at once only. Supports savepoints, i.e. an allocation point to which it can roll back, i.e. free all memory allocated after a savepoint. Uses slab_cache as a memory source. ## small A typical slab allocator. Built as a collection of mempool allocators, each mempool suited for a particular object size. Has stepped pools, i.e. pools for small objects up to 500 bytes, and factored pools, for larger objects. The difference between stepped and factored pools is that object size in stepped pools grows step by step, each next pool serving objects of prev_pool_object_size + 8. In factored pools a growth factor is used, i.e. given a factor of 1.1 and previous pool for objects of size up to 1000, next pool will serve objects in range 1001-1100. Since is based on mempool, uses slab_cache as a memory source. ## ibuf A typical input buffer, which could be seen as a memory allocator as well, which reallocates itself when it gets full. Uses slab_cache as a memory source. ## obuf Another implementation of an output buffer, which, for growth, instead of reallocation, used a collection of buffers, size of each next buffer in a collection twice the size the prevoius one. Uses slab_cache as a memory source. ## matras This is the best one. Memory Address Translating allocator. Only allows to allocate objects of size which is a power of two. Provides a 32-bit id for each allocated object. Supports multi-versioning for all allocated objects, i.e. it's possible to create a consistent read view of all allocated memory. Uses slab_cache as a memory source. tarantool_1.9.1.26.g63eb81e3c/src/lib/small/CMakeLists.txt0000644000000000000000000000417013306562360021417 0ustar rootrootproject(small C CXX) cmake_minimum_required(VERSION 2.8 FATAL_ERROR) if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Debug) endif() if(NOT DEFINED SMALL_EMBEDDED) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99 -Wall -Wextra") set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -Werror") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra") set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -Werror") endif() # Enable GNU glibc extentions. add_definitions("-D_GNU_SOURCE") # Valgrind include_directories(third_party) set(lib_headers small/ibuf.h small/lf_lifo.h small/lifo.h small/matras.h small/mempool.h small/obuf.h small/quota.h small/rb.h small/region.h small/rlist.h small/slab_arena.h small/slab_cache.h small/small.h small/lsregion.h) set(lib_sources small/slab_cache.c small/region.c small/mempool.c small/slab_arena.c small/small.c small/matras.c small/ibuf.c small/obuf.c small/lsregion.c) add_library(${PROJECT_NAME} STATIC ${lib_sources}) enable_testing() add_subdirectory(test) if(DEFINED SMALL_EMBEDDED) # Don't build shared library and skip INSTALL() targets if this # library is used as submodule in other project. return() endif() option(ENABLE_VALGRIND "Enable integration with valgrind, a memory analyzing tool" OFF) if (NOT ENABLE_VALGRIND) add_definitions(-DNVALGRIND=1) endif() add_library(${PROJECT_NAME}_shared SHARED ${lib_sources}) set_target_properties(${PROJECT_NAME}_shared PROPERTIES VERSION 1.0 SOVERSION 1) set_target_properties(${PROJECT_NAME}_shared PROPERTIES OUTPUT_NAME ${PROJECT_NAME}) include(GNUInstallDirs) install(TARGETS ${PROJECT_NAME} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT library) install(TARGETS ${PROJECT_NAME}_shared ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT library) install(FILES ${lib_headers} DESTINATION include/${PROJECT_NAME}) install(DIRECTORY third_party DESTINATION include/${PROJECT_NAME} FILES_MATCHING PATTERN "*.h") tarantool_1.9.1.26.g63eb81e3c/src/lib/small/VERSION0000644000000000000000000000002213306562360017717 0ustar rootroot1.0.0-19-gc1b6abd tarantool_1.9.1.26.g63eb81e3c/src/lib/small/.travis.yml0000644000000000000000000000377013306562360020775 0ustar rootrootsudo: false language: C services: - docker cache: directories: - $HOME/.cache env: matrix: - OS=el DIST=6 - OS=el DIST=7 - OS=fedora DIST=24 - OS=fedora DIST=25 - OS=ubuntu DIST=precise - OS=ubuntu DIST=trusty - OS=ubuntu DIST=xenial - OS=ubuntu DIST=yakkety - OS=debian DIST=wheezy - OS=debian DIST=jessie - OS=debian DIST=stretch #matrix: # allow_failures: # - env: OS=el DIST=6 # - env: OS=el DIST=7 # - env: OS=fedora DIST=23 # - env: OS=fedora DIST=24 # - env: OS=fedora DIST=25 # - env: OS=ubuntu DIST=precise # - env: OS=ubuntu DIST=trusty # - env: OS=ubuntu DIST=xenial # - env: OS=ubuntu DIST=yakkety # - env: OS=debian DIST=wheezy # - env: OS=debian DIST=jessie # - env: OS=debian DIST=stretch script: - git describe --long - git clone https://github.com/packpack/packpack.git packpack - packpack/packpack before_deploy: - ls -l build/ deploy: # Deploy packages to PackageCloud - provider: packagecloud username: tarantool repository: "1_6" token: ${PACKAGECLOUD_TOKEN} dist: ${OS}/${DIST} package_glob: build/*.{deb,rpm} skip_cleanup: true on: branch: master condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" - provider: packagecloud username: tarantool repository: "1_7" token: ${PACKAGECLOUD_TOKEN} dist: ${OS}/${DIST} package_glob: build/*.{deb,rpm} skip_cleanup: true on: branch: master condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" - provider: packagecloud username: tarantool repository: "1_8" token: ${PACKAGECLOUD_TOKEN} dist: ${OS}/${DIST} package_glob: build/*.{deb,rpm} skip_cleanup: true on: branch: master condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" notifications: email: recipients: - build@tarantool.org on_success: change on_failure: always tarantool_1.9.1.26.g63eb81e3c/src/lib/small/.gitignore0000644000000000000000000000030113306562360020637 0ustar rootroot*~ *.a *.o *.so* *.dylib* *.user *.cbp *.log obj-*/ CMakeFiles/ CMakeCache.txt cmake_install.cmake install_manifest.txt Makefile Doxyfile build/ CTestTestfile.cmake Testing CTestTestfile.cmake tarantool_1.9.1.26.g63eb81e3c/src/lib/small/Jenkinsfile0000644000000000000000000000026113306562360021040 0ustar rootrootstage('Build'){ packpack = new org.tarantool.packpack() node { checkout scm packpack.prepareSources() } packpack.packpackBuildMatrix('result') } tarantool_1.9.1.26.g63eb81e3c/src/lib/CMakeLists.txt0000664000000000000000000000033613306565107020313 0ustar rootrootadd_subdirectory(bit) add_subdirectory(bitset) set(SMALL_EMBEDDED ON) add_subdirectory(small) add_subdirectory(salad) add_subdirectory(csv) if(ENABLE_BUNDLED_MSGPUCK) add_subdirectory(msgpuck EXCLUDE_FROM_ALL) endif() tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/0000775000000000000000000000000013306565107016635 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/salad/rope.c0000664000000000000000000004446113306560010017743 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved. * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. * * Author: Hans-J. Boehm (boehm@parc.xerox.com) */ /* * This is a rope implementation which uses AVL tree * balancing algorithm for rope tree balance. */ #include "rope.h" #include #include #include #include static inline int rope_node_height(struct rope_node *node) { return node ? node->height : 0; } #if !defined(MAX) #define MAX(a, b) ((a) > (b) ? (a) : (b)) #endif /* MAX */ static inline void rope_relink(struct rope_node *node) { node->tree_size = (rope_node_size(node->link[0]) + rope_node_size(node->link[1]) + node->leaf_size); node->height = MAX(rope_node_height(node->link[0]), rope_node_height(node->link[1])) + 1; } static inline struct rope_node * rope_node_new(struct rope *rope, void *data, rope_size_t size) { struct rope_node *node = (struct rope_node *) rope->alloc(rope->alloc_ctx, sizeof(struct rope_node)); if (node == NULL) return NULL; node->height = 1; node->tree_size = node->leaf_size = size; node->data = data; node->link[0] = node->link[1] = NULL; return node; } void rope_clear(struct rope *rope) { struct rope_node *it = rope->root; struct rope_node *save; /* Destruction by rotation */ while (it != NULL) { if (it->link[0] == NULL) { /* Remove node */ save = it->link[1]; rope->free(rope->alloc_ctx, it); } else { /* Rotate right */ save = it->link[0]; it->link[0] = save->link[1]; save->link[1] = it; } it = save; } rope->root = NULL; } static struct rope_node * rope_node_split(struct rope *rope, struct rope_node *node, rope_size_t offset) { rope_size_t old_size = node->leaf_size; node->leaf_size = offset; void *data = rope->split(rope->split_ctx, node->data, old_size, offset); return rope_node_new(rope, data, old_size - offset); } static inline struct rope_node * avl_rotate_single(struct rope_node *parent, int direction) { struct rope_node *save = parent->link[!direction]; parent->link[!direction] = save->link[direction]; save->link[direction] = parent; /* First relink the parent, since it's now a child. */ rope_relink(parent); rope_relink(save); return save; } static inline struct rope_node * avl_rotate_double(struct rope_node *parent, int direction) { parent->link[!direction] = avl_rotate_single(parent->link[!direction], !direction); return avl_rotate_single(parent, direction); } /** Rebalance the tree. */ static inline void avl_rebalance_after_insert(struct rope_node ***path, struct rope_node ***p_end, int insert_height) { while (p_end > path) { struct rope_node *left = **p_end--; struct rope_node *parent = **p_end; /* * To use the same rotation functions, set mirror * to 1 if left is right and right is left. */ int mirror = left != parent->link[0]; struct rope_node *right = parent->link[!mirror]; int left_height = rope_node_height(left); int right_height = rope_node_height(right); parent->height = MAX(left_height, right_height) + 1; /* * Rotations flattened the tree, so there is no * further changes in height up the insertion * path. */ if (left_height == right_height) break; /* * We've been adding a new child (children) to the * 'left' subtree, so it couldn't get shorter. * The old difference between subtrees was in the * range -1..1. So the new difference can only be * in the range -1..1 + height(new_node). */ if (left_height - right_height >= 2) { struct rope_node *l_left = left->link[mirror]; struct rope_node *l_right = left->link[!mirror]; int l_left_height = rope_node_height(l_left); int l_right_height = rope_node_height(l_right); /* * Rotate in the direction, opposite to * the skew. E.g. if we have two left-left * nodes hanging off the tree, rotate the * parent clockwise. If we have a left * node with a right child, rotate the * child counterclockwise, and then the whole * thing clockwise. */ if (l_left_height >= l_right_height) **p_end = avl_rotate_single(parent, !mirror); else **p_end = avl_rotate_double(parent, !mirror); /* * If we inserted only one node, no more * than 1 rotation is required (see * D. Knuth, Introduction to Algorithms, * vol. 3.). For 2 nodes, its max * 2 rotations. */ if (l_left_height != l_right_height && --insert_height == 0) break; } } } /* This is a copy-cat of the previous loop, * with the exception that the heuristic to break * the loop is different. */ static inline void avl_rebalance_after_delete(struct rope_node ***path, struct rope_node ***p_end) { while (p_end > path) { struct rope_node *left = **p_end--; struct rope_node *parent = **p_end; int mirror = left != parent->link[0]; struct rope_node *right = parent->link[!mirror]; int left_height = rope_node_height(left); int right_height = rope_node_height(right); parent->height = MAX(left_height, right_height) + 1; /* * Right was taller, and we deleted from the left. * We can break the loop since there can be no * changes in height up in the route. */ if (left_height - right_height == -1) break; if (left_height - right_height <= -2) { struct rope_node *r_left = right->link[mirror]; struct rope_node *r_right = right->link[!mirror]; int r_left_height = rope_node_height(r_left); int r_right_height = rope_node_height(r_right); if (r_left_height <= r_right_height) **p_end = avl_rotate_single(parent, mirror); else **p_end = avl_rotate_double(parent, mirror); } } } /** * Find a rope node which contains the substring at offset, * adjusting tree size with adjust_size and saving the path * in path. * * @return the end of the route. */ static inline struct rope_node *** avl_route_to_offset(struct rope_node ***path, rope_size_t *p_offset, ssize_t adjust_size) { rope_size_t offset = *p_offset; while (**path) { struct rope_node *node = **path; node->tree_size += adjust_size; rope_size_t left_size = rope_node_size(node->link[0]); if (offset < left_size) { /* The offset lays in the left subtree. */ *++path = &node->link[0]; } else { /* Make the new offset relative to the parent. */ offset -= left_size; if (offset < node->leaf_size) { /* Found. */ break; } else { /* * Make the offset relative to the * leftmost node in the right subtree. */ offset -= node->leaf_size; } *++path = &node->link[1]; } } *p_offset = offset; return path; } /** * Route to successor or predecessor node of the node * in **path. It's either the rightmost leaf of the left child * (previous node) or leftmost leaf of the right child. */ static inline struct rope_node *** avl_route_to_next(struct rope_node ***path, int dir, rope_ssize_t adjust_size) { struct rope_node *node = **path; *++path = &node->link[dir]; while (**path) { node = **path; node->tree_size += adjust_size; *++path = &node->link[!dir]; } return path; } /** * A new node is always inserted at a leaf position. * If insertion unbalances the tree, the rebalancing * procedure may put the node into an intermediate position. * * While traversing the tree, we simultaneously update * tree sizes of all intermediate nodes, taking into account * the size of the new node. * * When insertion offset falls at the middle of an existing node, * we truncate this node and attach its tail to the left leaf * of the new node. This trim operation doesn't decrease the old * subtree height, and, while it does change subtree size * temporarily, as long as we attach the new node to the right * subtree of the truncated node, truncation has no effect on the * tree size either. * * Rebalancing, when it occurs, will correctly update subtree * heights and sizes of all modified nodes. */ int rope_insert(struct rope *rope, rope_size_t offset, void *data, rope_size_t size) { if (offset > rope_size(rope)) offset = rope_size(rope); assert(size); struct rope_node *new_node = rope_node_new(rope, data, size); if (new_node == NULL) return -1; struct rope_node **path[ROPE_HEIGHT_MAX]; path[0] = &rope->root; struct rope_node ***p_end = avl_route_to_offset(path, &offset, size); if (**p_end != NULL) { /* * The offset is inside an existing * substring in the rope. If offset is 0, * then insert the new node at the rightmost leaf * of the left child. Otherwise, cut the tail of * the substring, make it a prefix of the inserted * string, and insert the result at the leftmost * leaf of the right child. */ if (offset != 0) { struct rope_node *split_node; split_node = rope_node_split(rope, **p_end, offset); if (split_node == NULL) return -1; split_node->link[0] = new_node; split_node->height++; split_node->tree_size += new_node->tree_size; new_node = split_node; } p_end = avl_route_to_next(p_end, offset != 0, new_node->tree_size); } **p_end = new_node; avl_rebalance_after_insert(path, p_end, new_node->height); return 0; } /** Make sure there is a rope node at the given offset. */ struct rope_node * rope_extract_node(struct rope *rope, rope_size_t offset) { assert(offset < rope_size(rope)); struct rope_node **path[ROPE_HEIGHT_MAX]; path[0] = &rope->root; struct rope_node ***p_end = avl_route_to_offset(path, &offset, 0); if (offset == 0) return **p_end; struct rope_node *new_node = rope_node_split(rope, **p_end, offset); if (new_node == NULL) return NULL; p_end = avl_route_to_next(p_end, 1, new_node->tree_size); **p_end = new_node; avl_rebalance_after_insert(path, p_end, new_node->height); return new_node; } /** * Erase a single element from the rope. * This is a straightforward implementation for a single-element * deletion from a rope. A generic cut from a rope involves * 2 tree splits and one merge. * * When deleting a single element, 3 cases are possible: * - offset falls at a node with a single element. In this * case we perform a normal AVL tree delete. * - offset falls at the end or the beginning of an existing node * with leaf_size > 1. In that case we trim the existing node * and return. * - offset falls inside an existing node. In that case * we split the existing node at offset, and insert the tail. * * The implementation is a copycat of rope_insert(). If you're * trying to understand the code, it's recommended to start * from rope_insert(). */ int rope_erase(struct rope *rope, rope_size_t offset) { assert(offset < rope_size(rope)); struct rope_node **path[ROPE_HEIGHT_MAX]; path[0] = &rope->root; struct rope_node ***p_end = avl_route_to_offset(path, &offset, -1); struct rope_node *node = **p_end; if (node->leaf_size > 1) { /* Check if we can simply trim the node. */ if (offset == 0) { /* Cut the head. */ node->data = rope->split(rope->split_ctx, node->data, node->leaf_size, 1); node->leaf_size -= 1; return 0; } rope_size_t size = node->leaf_size; /* Cut the tail */ void *next = rope->split(rope->split_ctx, node->data, node->leaf_size, offset); node->leaf_size = offset; if (offset == size - 1) return 0; /* Trimmed the tail, nothing else to do */ /* * Offset falls inside a substring. Erase the * first field and insert the tail. */ next = rope->split(rope->split_ctx, next, size - offset, 1); struct rope_node *new_node = rope_node_new(rope, next, size - offset - 1); if (new_node == NULL) return -1; /* Trim the old node. */ p_end = avl_route_to_next(p_end, 1, new_node->tree_size); **p_end = new_node; avl_rebalance_after_insert(path, p_end, new_node->height); return 0; } /* We need to delete the node. */ assert(offset == 0); int direction; if (node->link[0] != NULL && node->link[1] != NULL) { /* * The node has two non-NULL leaves. We can't * simply delete the node since in that case we * won't know what to do with one of the leaves. * Instead of deleting the node, store in it data * from the rightmost node in the left subtree, or * the leftmost node in the right subtree, * (depending on which subtree is taller), and * delete this leftmost/rightmost node instead. */ struct rope_node *save = node; direction = node->link[1]->height > node->link[0]->height; p_end = avl_route_to_next(p_end, direction, 0) - 1; node = **p_end; /* Move the data pointers. */ save->data = node->data; save->leaf_size = node->leaf_size; /* * Now follow the path again and update tree_size * in the parents of the moved child. */ save = save->link[direction]; while (save != node) { save->tree_size -= node->leaf_size; save = save->link[!direction]; } } else { /* * Left or right subtree are NULL, so we * can simply put the non-NULL leaf in place * of the parent. */ direction = node->link[0] == NULL; } **p_end = node->link[direction]; rope->free(rope, node); avl_rebalance_after_delete(path, p_end); return 0; } /** * Traverse left until the left subtree is NULL, * save the path in iter->path. * @pre iter->path[iter->top] is not NULL * @post iter->path[iter->top] is not NULL and points to the last * not-NULL node. */ static inline void rope_iter_down_to_leaf(struct rope_iter *it) { while (it->top[0]->link[0] != NULL) { it->top[1] = it->top[0]->link[0]; it->top++; } } struct rope_node * rope_iter_start(struct rope_iter *it) { it->top = it->path; it->top[0] = it->rope->root; if (it->top[0] != NULL) rope_iter_down_to_leaf(it); return it->top[0]; } struct rope_node * rope_iter_next(struct rope_iter *it) { if (it->top[0]->link[1] != NULL) { it->top[1] = it->top[0]->link[1]; it->top++; rope_iter_down_to_leaf(it); } else { /* * Right subtree is NULL. Left subtree is fully * traversed (guaranteed by the order in which we * iterate). Pop up the path until the current * node points to a link we haven't visited * yet: this is the case when we return to the * parent from its left child. */ do { /* * Returned to the root from the right * subtree: the tree is fully traversed. */ if (it->top == it->path) { /* * Crash, rather than infinite loop * if next() is called beyond last. */ it->top[0] = NULL; return NULL; } it->top--; } while (it->top[1] == it->top[0]->link[1]); } return *it->top; } /** Apply visit_leaf function to every rope leaf. */ void rope_traverse(struct rope *rope, void (*visit_leaf)(void *, size_t)) { struct rope_iter iter; rope_iter_create(&iter, rope); struct rope_node *leaf; for (leaf = rope_iter_start(&iter); leaf != NULL; leaf = rope_iter_next(&iter)) { visit_leaf(leaf->data, leaf->leaf_size); } } void rope_check(struct rope *rope) { struct rope_iter iter; rope_iter_create(&iter, rope); struct rope_node *node; for (node = rope_iter_start(&iter); node != NULL; node = rope_iter_next(&iter)) { assert(node->leaf_size != 0); assert(node->tree_size == rope_node_size(node->link[0]) + rope_node_size(node->link[1]) + node->leaf_size); assert(node->height == (MAX(rope_node_height(node->link[0]), rope_node_height(node->link[1])) + 1)); if (node->leaf_size == 0 || node->tree_size != (rope_node_size(node->link[0]) + rope_node_size(node->link[1]) + node->leaf_size) || node->height != MAX(rope_node_height(node->link[0]), rope_node_height(node->link[1])) + 1) abort(); } } static void rope_node_print(struct rope_node *node, void (*print)(void *, size_t), const char *prefix, int dir) { const char *conn[] = { "┌──", "└──" }; const char *padding[] = { "│ ", " " }; rope_size_t child_prefix_len = strlen(prefix) + strlen(padding[0]) + 1; char *child_prefix = malloc(child_prefix_len); if (node && (node->link[0] || node->link[1])) { snprintf(child_prefix, child_prefix_len - 1, "%s%s", prefix, padding[!dir]); rope_node_print(node->link[0], print, child_prefix, 0); } snprintf(child_prefix, child_prefix_len - 1, "%s%s", prefix, padding[dir]); printf("%s%s", prefix, conn[dir]); if (node == NULL) { printf("nil\n"); } else { printf("{ len = %zu, height = %d, data = '", (size_t) node->leaf_size, node->height); print(node->data, node->leaf_size); printf("'}\n"); if (node->link[0] || node->link[1]) rope_node_print(node->link[1], print, child_prefix, 1); } free(child_prefix); } void rope_pretty_print(struct rope *rope, void (*print_leaf)(void *, size_t)) { printf("size = %zu\nstring = '", (size_t) rope_size(rope)); rope_traverse(rope, print_leaf); printf("'\n"); rope_node_print(rope->root, print_leaf, "", true); printf("\n"); } tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/rtree.h0000664000000000000000000002405613306560010020122 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SALAD_RTREE_H #define INCLUDES_TARANTOOL_SALAD_RTREE_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "small/matras.h" #define RB_COMPACT 1 #include "small/rb.h" /** * In-memory Guttman's R-tree */ /* Type of payload data */ typedef void *record_t; /* Type of coordinate */ typedef double coord_t; /* Type of square coordinate */ typedef double sq_coord_t; /* Type of area (volume) of rectangle (box) */ typedef double area_t; #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct rtree_neighbor { rb_node(struct rtree_neighbor) link; struct rtree_neighbor *next; void *child; int level; sq_coord_t distance; }; typedef rb_tree(struct rtree_neighbor) rtnt_t; enum { /** Maximal possible R-tree height */ RTREE_MAX_HEIGHT = 16, /** Maximal possible R-tree height */ RTREE_MAX_DIMENSION = 20 }; /** * Rtree search operations. Used for searching and iterations. * All operations except SOP_ALL reqires a rectangle to be set, * and treat it in different ways */ enum spatial_search_op { /* Find and itearate all records */ SOP_ALL, /* Find and itearate records with the same rectangle */ SOP_EQUALS, /* Find and itearate records that contain given rectangle */ SOP_CONTAINS, /* Find and itearate records that strictly contain given rectangle */ SOP_STRICT_CONTAINS, /* Find and itearate records that overlaps with given rectangle */ SOP_OVERLAPS, /* Find and itearate records that belongs to given rectangle */ SOP_BELONGS, /* Find and itearate records that strictly belongs to given rectangle */ SOP_STRICT_BELONGS, /* Find and itearate nearest records from a given point (the point is * acluattly lowest_point of given rectangle). Records are iterated in * order of distance to given point. Yes, it is KNN iterator */ SOP_NEIGHBOR }; /* pointers to page allocation and deallocations functions */ typedef void *(*rtree_extent_alloc_t)(void *ctx); typedef void (*rtree_extent_free_t)(void *ctx, void *extent); /* A box in RTREE_DIMENSION space */ struct rtree_rect { /* coords: { low X, upper X, low Y, upper Y, etc } */ coord_t coords[RTREE_MAX_DIMENSION * 2]; }; /* Type of function, comparing two rectangles */ typedef bool (*rtree_comparator_t)(const struct rtree_rect *rt1, const struct rtree_rect *rt2, unsigned dimension); /* Type distance comparison */ enum rtree_distance_type { RTREE_EUCLID = 0, /* Euclid distance, sqrt(dx*dx + dy*dy) */ RTREE_MANHATTAN = 1 /* Manhattan distance, fabs(dx) + fabs(dy) */ }; /* Main rtree struct */ struct rtree { /* Root node (page) */ struct rtree_page *root; /* R-tree dimension */ unsigned dimension; /* Minimal number of branches in tree page */ unsigned page_min_fill; /* Maximal number of branches in tree page */ unsigned page_max_fill; /* Page size in bytes */ unsigned page_size; /* Page branch size in bytes */ unsigned page_branch_size; /* For iterator usage, pages are splitted into structs neighbours * Here is number of neighbours fit into one page */ unsigned neighbours_in_page; /* Number of records in entire tree */ unsigned n_records; /* Height of a tree */ unsigned height; /* Unique version that increments on every tree modification */ unsigned version; /* Number of allocated (used) pages */ unsigned n_pages; /* Matras for allocating new page */ struct matras mtab; /* List of free pages */ void *free_pages; /* Distance type */ enum rtree_distance_type distance_type; }; /* Struct for iteration and retrieving rtree values */ struct rtree_iterator { /* Pointer to rtree */ const struct rtree *tree; /* Rectangle of current iteration operation */ struct rtree_rect rect; /* Type of current iteration operation */ enum spatial_search_op op; /* Flag that means that no more values left */ bool eof; /* A verion of a tree when the iterator was created */ unsigned version; /* Special rb tree of closest neqighbors * Used only for iteration with op = SOP_NEIGHBOR * For allocating list entries, page allocator of tree is used. * Allocated page is much bigger than list entry and thus * provides several list entries. */ rtnt_t neigh_tree; /* List of unused (deleted) list entries */ struct rtree_neighbor *neigh_free_list; /* List of tree pages, allocated for list entries */ struct rtree_neighbor_page *page_list; /* Position of ready-to-use list entry in allocated page */ unsigned page_pos; /* Comparators for comparison rectagnle of the iterator with * rectangles of tree nodes. If the comparator returns true, * the node is accepted; if false - skipped. */ /* Comparator for interanal (not leaf) nodes of the tree */ rtree_comparator_t intr_cmp; /* Comparator for leaf nodes of the tree */ rtree_comparator_t leaf_cmp; /* Current path of search in tree */ struct { struct rtree_page *page; int pos; } stack[RTREE_MAX_HEIGHT]; }; /** * @brief Rectangle normalization. Makes lower_point member to be vertex * with minimal coordinates, and upper_point - with maximal coordinates. * Useful when the rectangle is initialized with two diagonal vertexes that * could be not lowest and highest correspondingly. * @param rect - pointer to a rectangle */ void rtree_rect_normalize(struct rtree_rect *rect, unsigned dimension); /** * @brief Set up 2D rectangle by 4 coordinates * @param rect - pointer to a rectangle * @params left, bottom, right, top - corresponding coordinates */ void rtree_set2d(struct rtree_rect *rect, coord_t left, coord_t bottom, coord_t right, coord_t top); /** * @brief Set up 2D rectangle by 2 coordinates (set to point) * @param rect - pointer to a rectangle * @params x, y - corresponding coordinates */ void rtree_set2dp(struct rtree_rect *rect, coord_t x, coord_t y); /** * @brief Initialize a tree * @param tree - pointer to a tree * @param extent_size - size of extents allocated by extent_alloc (see next) * @param extent_alloc - extent allocation function * @param extent_free - extent deallocation function * @param alloc_ctx - argument passed to extent allocator * @return 0 on success, -1 on error */ int rtree_init(struct rtree *tree, unsigned dimension, uint32_t extent_size, rtree_extent_alloc_t extent_alloc, rtree_extent_free_t extent_free, void *alloc_ctx, enum rtree_distance_type distance_type); /** * @brief Destroy a tree * @param tree - pointer to a tree */ void rtree_destroy(struct rtree *tree); /** * @brief Delete all data from a tree, i.e. make it empty * @param tree - pointer to a tree */ void rtree_purge(struct rtree *tree); /** * @brief Find a record in a tree * @return true if at least one record found (false otherwise) * @param tree - pointer to a tree * @param rect - rectangle to find (the meaning depends on op argument) * @param op - type of search, see enum spatial_search_op for details * @param itr - pointer to iterator (must be initialized earlier), * iterator itr should be used for accessing found record */ bool rtree_search(const struct rtree *tree, const struct rtree_rect *rect, enum spatial_search_op op, struct rtree_iterator *itr); /** * @brief Insert a record to the tree * @param tree - pointer to a tree * @param rect - rectangle to insert * @param obj - record to insert */ void rtree_insert(struct rtree *tree, struct rtree_rect *rect, record_t obj); /** * @brief Remove the record from a tree * @return true if the record deleted (false otherwise) * @param tree - pointer to a tree * @param rect - rectangle of the record to delete * @param obj - record to delete */ bool rtree_remove(struct rtree *tree, const struct rtree_rect *rect, record_t obj); /** * @brief Size of memory used by tree * @param tree - pointer to a tree **/ size_t rtree_used_size(const struct rtree *tree); /** * @brief Number of records in the tree * @param tree - pointer to a tree **/ unsigned rtree_number_of_records(const struct rtree *tree); #if 0 /** * @brief Print a tree to stdout. Debug function, thus disabled. * Needs to be included before * @param tree - pointer to a tree **/ void rtree_debug_print(const struct rtree *tree); #endif /** * @brief Initialize an iterator for rtree * Every iterator must be initialized before any usage * @param itr - pointer to a iterator **/ void rtree_iterator_init(struct rtree_iterator *itr); /** * @brief Destroy an iterator * Every iterator must be destroyed * @param itr - pointer to a iterator **/ void rtree_iterator_destroy(struct rtree_iterator *itr); /** * @brief Retrieve a record from the iterator and iterate it to the next record * @return a record or NULL if no more records * @param itr - pointer to a iterator **/ record_t rtree_iterator_next(struct rtree_iterator *itr); #if defined(__cplusplus) } /* extern "C" { */ #endif /* defined(__cplusplus) */ #endif /* #ifndef INCLUDES_TARANTOOL_SALAD_RTREE_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/fifo.h0000664000000000000000000000574013306560010017723 0ustar rootroot#ifndef TARANTOOL_FIFO_H_INCLUDED #define TARANTOOL_FIFO_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #define FIFO_WATERMARK (512 * sizeof(void*)) /** A simple FIFO made using a ring buffer */ struct fifo { char *buf; size_t bottom; /* advanced by batch free */ size_t top; size_t size; /* total buffer size */ }; static inline int fifo_create(struct fifo *q, size_t size) { q->size = size; q->bottom = 0; q->top = 0; q->buf = (char*)malloc(size); return (q->buf == NULL ? -1 : 0); } static inline void fifo_destroy(struct fifo *q) { if (q->buf) { free(q->buf); q->buf = NULL; } } static inline int fifo_size(struct fifo *q) { return (q->top - q->bottom) / sizeof(void*); } #ifndef unlikely # define unlikely __builtin_expect(!! (EXPR), 0) #endif static inline int fifo_push(struct fifo *q, void *ptr) { /* reduce memory allocation and memmove * effect by reusing free pointers buffer space only after the * watermark frees reached. */ if (unlikely(q->bottom >= FIFO_WATERMARK)) { memmove(q->buf, q->buf + q->bottom, q->bottom); q->top -= q->bottom; q->bottom = 0; } if (unlikely((q->top + sizeof(void*)) > q->size)) { size_t newsize = q->size * 2; char *ptr = (char*)realloc((void*)q->buf, newsize); if (unlikely(ptr == NULL)) return -1; q->buf = ptr; q->size = newsize; } memcpy(q->buf + q->top, (char*)&ptr, sizeof(ptr)); q->top += sizeof(void*); return 0; } static inline void * fifo_pop(struct fifo *q) { if (unlikely(q->bottom == q->top)) return NULL; void *ret = *(void**)(q->buf + q->bottom); q->bottom += sizeof(void*); return ret; } #undef FIFO_WATERMARK #endif /* TARANTOOL_FIFO_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/README0000664000000000000000000000005413306560010017500 0ustar rootrootsalad - Some ALgorithms And Data structures tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/guava.c0000664000000000000000000000376513306560010020103 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lib/salad/guava.h" #include /** * This is implements a consistent hashing algorithm * A Fast, Minimal Memory, Consistent Hash Algorithm * John Lamping, Eric Veach */ static const int64_t K = 2862933555777941757; static const double D = 0x1.0p31; static inline double lcg(int64_t *state) { return (double )((int32_t)(((uint64_t )*state >> 33) + 1)) / D; } int32_t guava(int64_t state, int32_t buckets) { int32_t candidate = 0; int32_t next; while (1) { state = K * state + 1; next = (int32_t)((candidate + 1) / lcg(&state)); if (next >= 0 && next < buckets) candidate = next; else return candidate; } } tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/rtree.c0000664000000000000000000007756313306560010020130 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "rtree.h" #include #include #include #include #include /*------------------------------------------------------------------------- */ /* R-tree internal structures definition */ /*------------------------------------------------------------------------- */ enum { /* rtree will try to determine optimal page size */ RTREE_OPTIMAL_BRANCHES_IN_PAGE = 18, /* actual number of branches could be up to double of the previous * constant */ RTREE_MAXIMUM_BRANCHES_IN_PAGE = RTREE_OPTIMAL_BRANCHES_IN_PAGE * 2 }; struct rtree_page_branch { union { struct rtree_page *page; record_t record; } data; struct rtree_rect rect; }; enum { RTREE_BRANCH_DATA_SIZE = offsetof(struct rtree_page_branch, rect) }; struct rtree_page { /* number of branches at page */ unsigned n; /* branches */ struct rtree_page_branch data[]; }; struct rtree_neighbor_page { struct rtree_neighbor_page* next; struct rtree_neighbor buf[]; }; struct rtree_reinsert_list { struct rtree_page *chain; int level; }; static int neighbor_cmp(struct rtree_neighbor *a, struct rtree_neighbor *b) { return a->distance < b->distance ? -1 : a->distance > b->distance ? 1 : a->level < b->level ? -1 : a->level > b->level ? 1 : a < b ? -1 : a > b ? 1 : 0; return 0; } rb_gen(, rtnt_, rtnt_t, struct rtree_neighbor, link, neighbor_cmp); /*------------------------------------------------------------------------- */ /* R-tree rectangle methods */ /*------------------------------------------------------------------------- */ void rtree_rect_normalize(struct rtree_rect *rect, unsigned dimension) { for (int i = dimension; --i >= 0; ) { coord_t *coords = &rect->coords[2 * i]; if (coords[0] <= coords[1]) continue; coord_t tmp = coords[0]; coords[0] = coords[1]; coords[1] = tmp; } } static void rtree_rect_copy(struct rtree_rect *to, const struct rtree_rect *from, unsigned dimension) { for (int i = dimension * 2; --i >= 0; ) to->coords[i] = from->coords[i]; } void rtree_set2d(struct rtree_rect *rect, coord_t left, coord_t bottom, coord_t right, coord_t top) { rect->coords[0] = left; rect->coords[1] = right; rect->coords[2] = bottom; rect->coords[3] = top; } void rtree_set2dp(struct rtree_rect *rect, coord_t x, coord_t y) { rect->coords[0] = x; rect->coords[1] = x; rect->coords[2] = y; rect->coords[3] = y; } /* Manhattan distance */ static sq_coord_t rtree_rect_neigh_distance(const struct rtree_rect *rect, const struct rtree_rect *neigh_rect, unsigned dimension) { sq_coord_t result = 0; for (int i = dimension; --i >= 0; ) { const coord_t *coords = &rect->coords[2 * i]; coord_t neigh_coord = neigh_rect->coords[2 * i]; if (neigh_coord < coords[0]) { sq_coord_t diff = (sq_coord_t)(neigh_coord - coords[0]); result += -diff; } else if (neigh_coord > coords[1]) { sq_coord_t diff = (sq_coord_t)(neigh_coord - coords[1]); result += diff; } } return result; } /* Euclid distance, squared */ static sq_coord_t rtree_rect_neigh_distance2(const struct rtree_rect *rect, const struct rtree_rect *neigh_rect, unsigned dimension) { sq_coord_t result = 0; for (int i = dimension; --i >= 0; ) { const coord_t *coords = &rect->coords[2 * i]; coord_t neigh_coord = neigh_rect->coords[2 * i]; if (neigh_coord < coords[0]) { sq_coord_t diff = (sq_coord_t)(neigh_coord - coords[0]); result += diff * diff; } else if (neigh_coord > coords[1]) { sq_coord_t diff = (sq_coord_t)(neigh_coord - coords[1]); result += diff * diff; } } return result; } static area_t rtree_rect_area(const struct rtree_rect *rect, unsigned dimension) { area_t area = 1; for (int i = dimension; --i >= 0; ) { const coord_t *coords = &rect->coords[2 * i]; area *= coords[1] - coords[0]; } return area; } static coord_t rtree_rect_half_margin(const struct rtree_rect *rect, unsigned dimension) { coord_t hm = 0; for (int i = dimension; --i >= 0; ) { const coord_t *coords = &rect->coords[2 * i]; hm += coords[1] - coords[0]; } return hm; } static void rtree_rect_add(struct rtree_rect *to, const struct rtree_rect *item, unsigned dimension) { for (int i = dimension; --i >= 0; ) { coord_t *to_coords = &to->coords[2 * i]; const coord_t *item_coords = &item->coords[2 * i]; if (to_coords[0] > item_coords[0]) to_coords[0] = item_coords[0]; if (to_coords[1] < item_coords[1]) to_coords[1] = item_coords[1]; } } static coord_t rtree_min(coord_t a, coord_t b) { return a < b ? a : b; } static coord_t rtree_max(coord_t a, coord_t b) { return a > b ? a : b; } static void rtree_rect_cover(const struct rtree_rect *item1, const struct rtree_rect *item2, struct rtree_rect *result, unsigned dimension) { for (int i = dimension; --i >= 0; ) { const coord_t *i1_coords = &item1->coords[2 * i]; const coord_t *i2_coords = &item2->coords[2 * i]; coord_t *r_coords = &result->coords[2 * i]; r_coords[0] = rtree_min(i1_coords[0], i2_coords[0]); r_coords[1] = rtree_max(i1_coords[1], i2_coords[1]); } } static void rtree_rect_intersection(const struct rtree_rect *item1, const struct rtree_rect *item2, struct rtree_rect *result, unsigned dimension) { for (int i = dimension; --i >= 0; ) { const coord_t *i1_coords = &item1->coords[2 * i]; const coord_t *i2_coords = &item2->coords[2 * i]; coord_t *r_coords = &result->coords[2 * i]; if (i1_coords[0] > i2_coords[1] || i1_coords[1] < i2_coords[0]) r_coords[0] = r_coords[1] = 0; else { r_coords[0] = rtree_max(i1_coords[0], i2_coords[0]); r_coords[1] = rtree_min(i1_coords[1], i2_coords[1]); } } } static bool rtree_rect_intersects_rect(const struct rtree_rect *rt1, const struct rtree_rect *rt2, unsigned dimension) { for (int i = dimension; --i >= 0; ) { const coord_t *coords1 = &rt1->coords[2 * i]; const coord_t *coords2 = &rt2->coords[2 * i]; if (coords1[0] > coords2[1] || coords1[1] < coords2[0]) return false; } return true; } static bool rtree_rect_in_rect(const struct rtree_rect *rt1, const struct rtree_rect *rt2, unsigned dimension) { for (int i = dimension; --i >= 0; ) { const coord_t *coords1 = &rt1->coords[2 * i]; const coord_t *coords2 = &rt2->coords[2 * i]; if (coords1[0] < coords2[0] || coords1[1] > coords2[1]) return false; } return true; } static bool rtree_rect_strict_in_rect(const struct rtree_rect *rt1, const struct rtree_rect *rt2, unsigned dimension) { for (int i = dimension; --i >= 0; ) { const coord_t *coords1 = &rt1->coords[2 * i]; const coord_t *coords2 = &rt2->coords[2 * i]; if (coords1[0] <= coords2[0] || coords1[1] >= coords2[1]) return false; } return true; } static bool rtree_rect_holds_rect(const struct rtree_rect *rt1, const struct rtree_rect *rt2, unsigned dimension) { return rtree_rect_in_rect(rt2, rt1, dimension); } static bool rtree_rect_strict_holds_rect(const struct rtree_rect *rt1, const struct rtree_rect *rt2, unsigned dimension) { return rtree_rect_strict_in_rect(rt2, rt1, dimension); } static bool rtree_rect_equal_to_rect(const struct rtree_rect *rt1, const struct rtree_rect *rt2, unsigned dimension) { for (int i = dimension * 2; --i >= 0; ) if (rt1->coords[i] != rt2->coords[i]) return false; return true; } static bool rtree_always_true(const struct rtree_rect *rt1, const struct rtree_rect *rt2, unsigned dimension) { (void) rt1; (void) rt2; (void) dimension; return true; } /*------------------------------------------------------------------------- */ /* R-tree page methods */ /*------------------------------------------------------------------------- */ static struct rtree_page * rtree_page_alloc(struct rtree *tree) { if (tree->free_pages) { struct rtree_page *result = (struct rtree_page *)tree->free_pages; tree->free_pages = *(void **)tree->free_pages; return result; } else { uint32_t unused_id; return (struct rtree_page *) matras_alloc(&tree->mtab, &unused_id); } } static void rtree_page_free(struct rtree *tree, struct rtree_page *page) { *(void **)page = tree->free_pages; tree->free_pages = (void *)page; } static struct rtree_page_branch * rtree_branch_get(const struct rtree *tree, const struct rtree_page *page, unsigned ind) { return (struct rtree_page_branch *) ((char *)page->data + ind * tree->page_branch_size); } static void rtree_branch_copy(struct rtree_page_branch *to, const struct rtree_page_branch *from, unsigned dimension) { to->data = from->data; rtree_rect_copy(&to->rect, &from->rect, dimension); } static void set_next_reinsert_page(const struct rtree *tree, struct rtree_page *page, struct rtree_page *next_page) { /* The page must be MIN_FILLed, so last branch is unused */ struct rtree_page_branch *b = rtree_branch_get(tree, page, tree->page_max_fill - 1); b->data.page = next_page; } struct rtree_page * get_next_reinsert_page(const struct rtree *tree, const struct rtree_page *page) { struct rtree_page_branch *b = rtree_branch_get(tree, page, tree->page_max_fill - 1); return b->data.page; } /* Calculate cover of all rectangles at page */ static void rtree_page_cover(const struct rtree *tree, const struct rtree_page *page, struct rtree_rect *res) { rtree_rect_copy(res, &rtree_branch_get(tree, page, 0)->rect, tree->dimension); for (unsigned i = 1; i < page->n; i++) { rtree_rect_add(res, &rtree_branch_get(tree, page, i)->rect, tree->dimension); } } /* Create root page by first inserting record */ static void rtree_page_init_with_record(const struct rtree *tree, struct rtree_page *page, struct rtree_rect *rect, record_t obj) { struct rtree_page_branch *b = rtree_branch_get(tree, page, 0); page->n = 1; rtree_rect_copy(&b->rect, rect, tree->dimension); b->data.record = obj; } /* Create new root page (root splitting) */ static void rtree_page_init_with_pages(const struct rtree *tree, struct rtree_page *page, struct rtree_page *page1, struct rtree_page *page2) { page->n = 2; struct rtree_page_branch *b = rtree_branch_get(tree, page, 0); rtree_page_cover(tree, page1, &b->rect); b->data.page = page1; b = rtree_branch_get(tree, page, 1); rtree_page_cover(tree, page2, &b->rect); b->data.page = page2; } static struct rtree_page * rtree_split_page(struct rtree *tree, struct rtree_page *page, const struct rtree_page_branch *br) { assert(page->n == tree->page_max_fill); const struct rtree_rect *rects[RTREE_MAXIMUM_BRANCHES_IN_PAGE + 1]; unsigned ids[RTREE_MAXIMUM_BRANCHES_IN_PAGE + 1]; rects[0] = &br->rect; ids[0] = 0; for (unsigned i = 0; i < page->n; i++) { struct rtree_page_branch *b = rtree_branch_get(tree, page, i); rects[i + 1] = &b->rect; ids[i + 1] = i + 1; } const unsigned n = page->n + 1; const unsigned k_max = n - 2 * tree->page_min_fill; unsigned d = tree->dimension; unsigned best_axis = 0; coord_t best_s = 0; for (unsigned a = 0; a < d; a++) { for (unsigned i = 0; i < n - 1; i++) { unsigned min_i = i; coord_t min_l = rects[ids[i]]->coords[2 * a]; coord_t min_r = rects[ids[i]]->coords[2 * a + 1]; for (unsigned j = i + 1; j < n; j++) { coord_t l = rects[ids[j]]->coords[2 * a]; coord_t r = rects[ids[j]]->coords[2 * a + 1]; if (l < min_l || (l == min_l && r < min_r)) { min_i = j; min_l = l; min_r = r; } } unsigned tmp = ids[i]; ids[i] = ids[min_i]; ids[min_i] = tmp; } struct rtree_rect test_rect; coord_t dir_hm[RTREE_MAXIMUM_BRANCHES_IN_PAGE + 1]; coord_t rev_hm[RTREE_MAXIMUM_BRANCHES_IN_PAGE + 1]; dir_hm[0] = 0; rtree_rect_copy(&test_rect, rects[ids[0]], d); dir_hm[1] = rtree_rect_half_margin(&test_rect, d); for (unsigned i = 1; i < n - tree->page_min_fill; i++) { rtree_rect_add(&test_rect, rects[ids[i]], d); dir_hm[i + 1] = rtree_rect_half_margin(&test_rect, d); } rev_hm[0] = 0; rtree_rect_copy(&test_rect, rects[ids[n - 1]], d); rev_hm[1] = rtree_rect_half_margin(&test_rect, d); for (unsigned i = 1; i < n - tree->page_min_fill; i++) { rtree_rect_add(&test_rect, rects[ids[n - i - 1]], d); rev_hm[i + 1] = rtree_rect_half_margin(&test_rect, d); } coord_t s = 0; for (unsigned k = 0; k < k_max; k++) { unsigned k1 = tree->page_min_fill + k; unsigned k2 = n - k1; s += dir_hm[k1] + rev_hm[k2]; } if (a == 0 || s < best_s) { best_axis = a; best_s = s; } } unsigned a = best_axis; for (unsigned i = 0; i < n - 1; i++) { unsigned min_i = i; coord_t min_l = rects[ids[i]]->coords[2 * a]; coord_t min_r = rects[ids[i]]->coords[2 * a + 1]; for (unsigned j = i + 1; j < n; j++) { coord_t l = rects[ids[j]]->coords[2 * a]; coord_t r = rects[ids[j]]->coords[2 * a + 1]; if (l < min_l || (l == min_l && r < min_r)) { min_i = j; min_l = l; min_r = r; } } unsigned tmp = ids[i]; ids[i] = ids[min_i]; ids[min_i] = tmp; } area_t min_overlap = 0; area_t min_area = 0; unsigned min_k = 0; for (unsigned k = 0; k < k_max; k++) { unsigned k1 = tree->page_min_fill + k; /* unsigned k2 = n - k1; */ struct rtree_rect rt1, rt2, over_rt; rtree_rect_copy(&rt1, rects[ids[0]], d); for (unsigned i = 1; i < k1; i++) { rtree_rect_add(&rt1, rects[ids[i]], d); } rtree_rect_copy(&rt2, rects[ids[k1]], d); for (unsigned i = k1 + 1; i < n; i++) { rtree_rect_add(&rt2, rects[ids[i]], d); } rtree_rect_intersection(&rt1, &rt2, &over_rt, d); area_t overlap = rtree_rect_area(&over_rt, d); area_t area = rtree_rect_area(&rt1, d) + rtree_rect_area(&rt2, d); if (k == 0 || overlap < min_overlap || (overlap == min_overlap && area < min_area)) { min_k = k; min_overlap = overlap; min_area = area; } } unsigned k = min_k; unsigned k1 = tree->page_min_fill + k; unsigned k2 = n - k1; struct rtree_page *new_page = rtree_page_alloc(tree); tree->n_pages++; char taken[RTREE_MAXIMUM_BRANCHES_IN_PAGE]; memset(taken, 0, sizeof(taken)); for (unsigned i = 0; i < k1; i++) { struct rtree_page_branch *new_b = rtree_branch_get(tree, new_page, i); const struct rtree_page_branch *from_b = br; if (ids[i]) { from_b = rtree_branch_get(tree, page, ids[i] - 1); taken[ids[i] - 1] = 1; } rtree_branch_copy(new_b, from_b, d); } unsigned moved = 0; for (unsigned i = 0, j = 0; j < page->n; j++) { if (taken[j] == 0) { struct rtree_page_branch *to, *from; to = rtree_branch_get(tree, page, i++); from = rtree_branch_get(tree, page, j); rtree_branch_copy(to, from, tree->dimension); moved++; } } assert(moved == k2 || moved + 1 == k2); if (moved + 1 == k2) { struct rtree_page_branch *to; to = rtree_branch_get(tree, page, moved); rtree_branch_copy(to, br, tree->dimension); } new_page->n = k1; page->n = k2; return new_page; } static struct rtree_page* rtree_page_add_branch(struct rtree *tree, struct rtree_page *page, const struct rtree_page_branch *br) { if (page->n < tree->page_max_fill) { struct rtree_page_branch *b; b = rtree_branch_get(tree, page, page->n++); rtree_branch_copy(b, br, tree->dimension); return NULL; } else { return rtree_split_page(tree, page, br); } } static void rtree_page_remove_branch(struct rtree *tree, struct rtree_page *page, int i) { page->n--; for (unsigned j = i; j < page->n; j++) { struct rtree_page_branch *to, *from; to = rtree_branch_get(tree, page, j); from = rtree_branch_get(tree, page, j + 1); rtree_branch_copy(to, from, tree->dimension); } } static struct rtree_page * rtree_page_insert(struct rtree *tree, struct rtree_page *page, const struct rtree_rect *rect, record_t obj, int level) { struct rtree_page_branch br; if (--level != 0) { /* not a leaf page, minize area increase */ unsigned mini = 0; char found = 0; area_t min_incr = 0, best_area = 0; for (unsigned i = 0; i < page->n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(tree, page, i); area_t r_area = rtree_rect_area(&b->rect, tree->dimension); struct rtree_rect cover; rtree_rect_cover(&b->rect, rect, &cover, tree->dimension); area_t incr = rtree_rect_area(&cover, tree->dimension); incr -= r_area; assert(incr >= 0); if (i == 0 || incr < min_incr || (incr == min_incr && r_area < best_area)) { best_area = r_area; min_incr = incr; mini = i; found = 1; } } assert(found); (void) found; struct rtree_page_branch *b; b = rtree_branch_get(tree, page, mini); struct rtree_page *p = b->data.page; struct rtree_page *q = rtree_page_insert(tree, p, rect, obj, level); if (q == NULL) { /* child was not split */ rtree_rect_add(&b->rect, rect, tree->dimension); return NULL; } else { /* child was split */ rtree_page_cover(tree, p, &b->rect); br.data.page = q; rtree_page_cover(tree, q, &br.rect); return rtree_page_add_branch(tree, page, &br); } } else { br.data.record = obj; rtree_rect_copy(&br.rect, rect, tree->dimension); return rtree_page_add_branch(tree, page, &br); } } static bool rtree_page_remove(struct rtree *tree, struct rtree_page *page, const struct rtree_rect *rect, record_t obj, int level, struct rtree_reinsert_list *rlist) { unsigned d = tree->dimension; if (--level != 0) { for (unsigned i = 0; i < page->n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(tree, page, i); if (!rtree_rect_intersects_rect(&b->rect, rect, d)) continue; struct rtree_page *next_page = b->data.page; if (!rtree_page_remove(tree, next_page, rect, obj, level, rlist)) continue; if (next_page->n >= tree->page_min_fill) { rtree_page_cover(tree, next_page, &b->rect); } else { /* not enough entries in child */ set_next_reinsert_page(tree, next_page, rlist->chain); rlist->chain = next_page; rlist->level = level - 1; rtree_page_remove_branch(tree, page, i); } return true; } } else { for (unsigned i = 0; i < page->n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(tree, page, i); if (b->data.page == obj) { rtree_page_remove_branch(tree, page, i); return true; } } } return false; } static void rtree_page_purge(struct rtree *tree, struct rtree_page *page, int level) { if (--level != 0) { /* this is an internal node in the tree */ for (unsigned i = 0; i < page->n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(tree, page, i); rtree_page_purge(tree, b->data.page, level); } } rtree_page_free(tree, page); } /*------------------------------------------------------------------------- */ /* R-tree iterator methods */ /*------------------------------------------------------------------------- */ static bool rtree_iterator_goto_first(struct rtree_iterator *itr, unsigned sp, struct rtree_page* pg) { unsigned d = itr->tree->dimension; if (sp + 1 == itr->tree->height) { for (unsigned i = 0, n = pg->n; i < n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(itr->tree, pg, i); if (itr->leaf_cmp(&itr->rect, &b->rect, d)) { itr->stack[sp].page = pg; itr->stack[sp].pos = i; return true; } } } else { for (unsigned i = 0, n = pg->n; i < n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(itr->tree, pg, i); if (itr->intr_cmp(&itr->rect, &b->rect, d) && rtree_iterator_goto_first(itr, sp + 1, b->data.page)) { itr->stack[sp].page = pg; itr->stack[sp].pos = i; return true; } } } return false; } static bool rtree_iterator_goto_next(struct rtree_iterator *itr, unsigned sp) { unsigned d = itr->tree->dimension; struct rtree_page *pg = itr->stack[sp].page; if (sp + 1 == itr->tree->height) { for (unsigned i = itr->stack[sp].pos, n = pg->n; ++i < n;) { struct rtree_page_branch *b; b = rtree_branch_get(itr->tree, pg, i); if (itr->leaf_cmp(&itr->rect, &b->rect, d)) { itr->stack[sp].pos = i; return true; } } } else { for (int i = itr->stack[sp].pos, n = pg->n; ++i < n;) { struct rtree_page_branch *b; b = rtree_branch_get(itr->tree, pg, i); if (itr->intr_cmp(&itr->rect, &b->rect, d) && rtree_iterator_goto_first(itr, sp + 1, b->data.page)) { itr->stack[sp].page = pg; itr->stack[sp].pos = i; return true; } } } return sp > 0 ? rtree_iterator_goto_next(itr, sp - 1) : false; } void rtree_iterator_destroy(struct rtree_iterator *itr) { struct rtree_neighbor_page *curr, *next; for (curr = itr->page_list; curr != NULL; curr = next) { next = curr->next; rtree_page_free((struct rtree *) itr->tree, (struct rtree_page *) curr); } itr->page_list = NULL; itr->page_pos = INT_MAX; } struct rtree_neighbor * rtree_iterator_reset_cb(rtnt_t *t, struct rtree_neighbor *n, void *d) { (void) t; struct rtree_iterator *itr = (struct rtree_iterator *)d; n->next = itr->neigh_free_list; itr->neigh_free_list = n; return 0; } static void rtree_iterator_reset(struct rtree_iterator *itr) { rtnt_iter(&itr->neigh_tree, 0, rtree_iterator_reset_cb, (void *)itr); rtnt_new(&itr->neigh_tree); } static struct rtree_neighbor * rtree_iterator_allocate_neighbour(struct rtree_iterator *itr) { if (itr->page_pos >= itr->tree->neighbours_in_page) { struct rtree_neighbor_page *new_page = (struct rtree_neighbor_page *) rtree_page_alloc((struct rtree*)itr->tree); new_page->next = itr->page_list; itr->page_list = new_page; itr->page_pos = 0; } return itr->page_list->buf + itr->page_pos++; } static struct rtree_neighbor * rtree_iterator_new_neighbor(struct rtree_iterator *itr, void *child, sq_coord_t distance, int level) { struct rtree_neighbor *n = itr->neigh_free_list; if (n == NULL) n = rtree_iterator_allocate_neighbour(itr); else itr->neigh_free_list = n->next; n->child = child; n->distance = distance; n->level = level; return n; } static void rtree_iterator_free_neighbor(struct rtree_iterator *itr, struct rtree_neighbor *n) { n->next = itr->neigh_free_list; itr->neigh_free_list = n; } void rtree_iterator_init(struct rtree_iterator *itr) { itr->tree = 0; rtnt_new(&itr->neigh_tree); itr->neigh_free_list = NULL; itr->page_list = NULL; itr->page_pos = INT_MAX; } static void rtree_iterator_process_neigh(struct rtree_iterator *itr, struct rtree_neighbor *neighbor) { unsigned d = itr->tree->dimension; void *child = neighbor->child; struct rtree_page *pg = (struct rtree_page *)child; int level = neighbor->level; rtree_iterator_free_neighbor(itr, neighbor); for (int i = 0, n = pg->n; i < n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(itr->tree, pg, i); coord_t distance; if (itr->tree->distance_type == RTREE_EUCLID) distance = rtree_rect_neigh_distance2(&b->rect, &itr->rect, d); else distance = rtree_rect_neigh_distance(&b->rect, &itr->rect, d); struct rtree_neighbor *neigh = rtree_iterator_new_neighbor(itr, b->data.page, distance, level - 1); rtnt_insert(&itr->neigh_tree, neigh); } } record_t rtree_iterator_next(struct rtree_iterator *itr) { if (itr->version != itr->tree->version) { /* Index was updated since cursor initialziation */ return NULL; } if (itr->op == SOP_NEIGHBOR) { /* To return element in order of increasing distance from * specified point, we build sorted list of R-Tree items * (ordered by distance from specified point) starting from * root page. * Algorithm is the following: * * insert root R-Tree page in the sorted list * while sorted list is not empty: * get top element from the sorted list * if it is tree leaf (record) then return it as * current element * otherwise (R-Tree page) get siblings of this R-Tree * page and insert them in sorted list */ while (true) { struct rtree_neighbor *neighbor = rtnt_first(&itr->neigh_tree); if (neighbor == NULL) return NULL; rtnt_remove(&itr->neigh_tree, neighbor); if (neighbor->level == 0) { void *child = neighbor->child; rtree_iterator_free_neighbor(itr, neighbor); return (record_t)child; } else { rtree_iterator_process_neigh(itr, neighbor); } } } int sp = itr->tree->height - 1; if (!itr->eof && rtree_iterator_goto_next(itr, sp)) { struct rtree_page_branch *b; b = rtree_branch_get(itr->tree, itr->stack[sp].page, itr->stack[sp].pos); return b->data.record; } itr->eof = true; return NULL; } /*------------------------------------------------------------------------- */ /* R-tree methods */ /*------------------------------------------------------------------------- */ int rtree_init(struct rtree *tree, unsigned dimension, uint32_t extent_size, rtree_extent_alloc_t extent_alloc, rtree_extent_free_t extent_free, void *alloc_ctx, enum rtree_distance_type distance_type) { tree->n_records = 0; tree->height = 0; tree->root = NULL; tree->version = 0; tree->n_pages = 0; tree->free_pages = 0; tree->dimension = dimension; tree->distance_type = distance_type; tree->page_branch_size = (RTREE_BRANCH_DATA_SIZE + dimension * 2 * sizeof(coord_t)); tree->page_size = RTREE_OPTIMAL_BRANCHES_IN_PAGE * tree->page_branch_size + sizeof(int); /* round up to closest power of 2 */ int lz = __builtin_clz(tree->page_size - 1); tree->page_size = 1u << (sizeof(int) * CHAR_BIT - lz); assert(tree->page_size - sizeof(int) >= tree->page_branch_size * RTREE_OPTIMAL_BRANCHES_IN_PAGE); tree->page_max_fill = (tree->page_size - sizeof(int)) / tree->page_branch_size; tree->page_min_fill = tree->page_max_fill * 2 / 5; tree->neighbours_in_page = (tree->page_size - sizeof(void *)) / sizeof(struct rtree_neighbor); matras_create(&tree->mtab, extent_size, tree->page_size, extent_alloc, extent_free, alloc_ctx); return 0; } void rtree_destroy(struct rtree *tree) { rtree_purge(tree); matras_destroy(&tree->mtab); } void rtree_insert(struct rtree *tree, struct rtree_rect *rect, record_t obj) { if (tree->root == NULL) { tree->root = rtree_page_alloc(tree); rtree_page_init_with_record(tree, tree->root, rect, obj); tree->height = 1; tree->n_pages++; } else { struct rtree_page *p = rtree_page_insert(tree, tree->root, rect, obj, tree->height); if (p != NULL) { /* root splitted */ struct rtree_page *new_root = rtree_page_alloc(tree); rtree_page_init_with_pages(tree, new_root, tree->root, p); tree->root = new_root; tree->height++; tree->n_pages++; } } tree->version++; tree->n_records++; } bool rtree_remove(struct rtree *tree, const struct rtree_rect *rect, record_t obj) { struct rtree_reinsert_list rlist; rlist.chain = NULL; if (tree->height == 0) return false; if (!rtree_page_remove(tree, tree->root, rect, obj, tree->height, &rlist)) return false; struct rtree_page *pg = rlist.chain; int level = rlist.level; while (pg != NULL) { for (int i = 0, n = pg->n; i < n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(tree, pg, i); struct rtree_page *p = rtree_page_insert(tree, tree->root, &b->rect, b->data.record, tree->height - level); if (p != NULL) { /* root splitted */ struct rtree_page *new_root = rtree_page_alloc(tree); rtree_page_init_with_pages(tree, new_root, tree->root, p); tree->root = new_root; tree->height++; tree->n_pages++; } } level--; struct rtree_page *next = get_next_reinsert_page(tree, pg); rtree_page_free(tree, pg); tree->n_pages--; pg = next; } if (tree->root->n == 1 && tree->height > 1) { struct rtree_page_branch *b; b = rtree_branch_get(tree, tree->root, 0); struct rtree_page *new_root = b->data.page; rtree_page_free(tree, tree->root); tree->root = new_root; tree->height--; tree->n_pages--; } tree->n_records--; tree->version++; return true; } bool rtree_search(const struct rtree *tree, const struct rtree_rect *rect, enum spatial_search_op op, struct rtree_iterator *itr) { rtree_iterator_reset(itr); assert(itr->tree == 0 || itr->tree == tree); itr->tree = tree; itr->version = tree->version; rtree_rect_copy(&itr->rect, rect, tree->dimension); itr->op = op; assert(tree->height <= RTREE_MAX_HEIGHT); switch (op) { case SOP_ALL: itr->intr_cmp = itr->leaf_cmp = rtree_always_true; break; case SOP_EQUALS: itr->intr_cmp = rtree_rect_in_rect; itr->leaf_cmp = rtree_rect_equal_to_rect; break; case SOP_CONTAINS: itr->intr_cmp = itr->leaf_cmp = rtree_rect_in_rect; break; case SOP_STRICT_CONTAINS: itr->intr_cmp = itr->leaf_cmp = rtree_rect_strict_in_rect; break; case SOP_OVERLAPS: itr->intr_cmp = itr->leaf_cmp = rtree_rect_intersects_rect; break; case SOP_BELONGS: itr->intr_cmp = rtree_rect_intersects_rect; itr->leaf_cmp = rtree_rect_holds_rect; break; case SOP_STRICT_BELONGS: itr->intr_cmp = rtree_rect_intersects_rect; itr->leaf_cmp = rtree_rect_strict_holds_rect; break; case SOP_NEIGHBOR: if (tree->root) { struct rtree_rect cover; rtree_page_cover(tree, tree->root, &cover); sq_coord_t distance; if (tree->distance_type == RTREE_EUCLID) distance = rtree_rect_neigh_distance2(&cover, rect, tree->dimension); else distance = rtree_rect_neigh_distance(&cover, rect, tree->dimension); struct rtree_neighbor *n = rtree_iterator_new_neighbor(itr, tree->root, distance, tree->height); rtnt_insert(&itr->neigh_tree, n); return true; } else { return false; } } if (tree->root && rtree_iterator_goto_first(itr, 0, tree->root)) { itr->stack[tree->height-1].pos -= 1; /* will be incremented by goto_next */ itr->eof = false; return true; } else { itr->eof = true; return false; } } void rtree_purge(struct rtree *tree) { if (tree->root != NULL) { rtree_page_purge(tree, tree->root, tree->height); tree->root = NULL; tree->n_records = 0; tree->n_pages = 0; tree->height = 0; } } size_t rtree_used_size(const struct rtree *tree) { return tree->n_pages * tree->page_size; } unsigned rtree_number_of_records(const struct rtree *tree) { return tree->n_records; } #if 0 #include void rtree_debug_print_page(const struct rtree *tree, const struct rtree_page *page, unsigned level, unsigned path) { printf("%d:\n", path); unsigned d = tree->dimension; for (int i = 0; i < page->n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(tree, page, i); double v = 1; for (unsigned j = 0; j < d; j++) { double d1 = b->rect.coords[j * 2]; double d2 = b->rect.coords[j * 2 + 1]; v *= (d2 - d1) / 100; printf("[%04.1lf-%04.1lf:%04.1lf]", d2, d1, d2 - d1); } printf("%d\n", (int)(v * 100)); } if (--level > 1) { for (int i = 0; i < page->n; i++) { struct rtree_page_branch *b; b = rtree_branch_get(tree, page, i); rtree_debug_print_page(tree, b->data.page, level, path * 100 + i + 1); } } } void rtree_debug_print(const struct rtree *tree) { if (tree->root) rtree_debug_print_page(tree, tree->root, tree->height, 1); } #endif tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/bps_tree.h0000664000000000000000000060137313306560010020607 0ustar rootroot/* * *No header guard*: the header is allowed to be included twice * with different sets of defines. */ /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* memmove, memset */ #include #include #include /* printf */ #include "small/matras.h" /* {{{ BPS-tree description */ /** * BPS-tree implementation. * BPS-tree is an in-memory B+*-tree, i.e. B-tree with (+) and (*) * variants. * * Useful links: * http://en.wikipedia.org/wiki/B-tree * http://en.wikipedia.org/wiki/B-tree#Variants * http://en.wikipedia.org/wiki/B%2B_tree * http://ru.wikipedia.org/wiki/B*-%D0%B4%D0%B5%D1%80%D0%B5%D0%B2%D0%BE * * BPS-tree stores specified elements orderly with specified * compare function. * * The tree can be used to insert, replace, delete elements and * search values by key. * Search/modification of elements has logarithmic complexity, * lg B (N). * * It also has iterator support, providing sequential access to * elements in ascending and descending order. An iterator can be * initialized by the first or last element of the tree, or by the * lower/upper bound value of a key. Iteration has constant * complexity. * * The main features of the tree are: * * 1) It could be very compact. BPS-tree consumes the amount of * memory mostly proportional to (!) the maximal payload of the * tree. In other words, if a thee contains N elements of size * S, and maximum of N over a lifetime * of the tree is Nmax, then the consumed memory is asymptotically * proportional to (Nmax*S). * * In practice, a well configured BPS-tree consumes about 120% * of payload asymptotically when the tree is randomly filled, * i.e. has about 20% of memory overhead on big amounts of * data. * * In a rather bad case, when the tree is filled with * monotonically increasing values, the asymptotic overhead is * that about 40% of the payload, and the theoretical maximal * asymptotic overhead is about 60% of the payload. * * The theoretical minimal asymptotic overhead is about 0% :) * * However, and it could be important, if a tree is first * filled up and then emptied (but not destroyed), it still * consumes the amount of memory used to index the now * deleted elements. * * 2) It has a low cache-miss rate. A look up in the tree boils * down to to search in H blocks, where H is the height of the * tree, and can be bound by log(N) / log(K), where N is the * size of the tree and K is the average number of elements in * a block. For example, with 8-byte values and 512-byte blocks, * the tree with a million of elements will probably have height * of 4, and the tree with a billion of elements will probably have * height of 6. * 3) Successful insertion into the tree or deletion of an element * can break any of this tree's active iterators. * Nevertheless, dealing with broken iterators never leads to memory * access violation; the element, returned by the iterator is always * valid (the tree contains the value) and iteration never leads * to an infinite loop. * Note, that replacement of an element does not break an iterator * at all. * Note also, that using an uninitialised iterator indeed leads to * memory access violation. * * Setup and usage: * * 1) Define all macros like in the example below before including * this header. See "BPS-tree interface settings" section for * details. Example: * * #define BPS_TREE_NAME * #define BPS_TREE_BLOCK_SIZE 512 * #define BPS_TREE_EXTENT_SIZE 16*1024 * #define BPS_TREE_COMPARE(a, b, context) my_compare(a, b, context) * #define BPS_TREE_COMPARE_KEY(a, b, context) my_compare_key(a, b, context) * #define bps_tree_elem_t struct tuple * * #define bps_tree_key_t struct key_t * * #define bps_tree_arg_t struct compare_context * * * 2) Use structs and functions from the list below. * See "BPS-tree interface" section for details. Here is short list: * // types: * struct bps_tree; * struct bps_tree_iterator; * typedef void *(*bps_tree_extent_alloc_f)(); * typedef void (*bps_tree_extent_free_f)(void *); * // base: * void bps_tree_create(tree, arg, extent_alloc_func, extent_free_func, * alloc_ctx); * void bps_tree_destroy(tree); * int bps_tree_build(tree, sorted_array, array_size); * bps_tree_elem_t *bps_tree_find(tree, key); * int bps_tree_insert(tree, new_elem, replaced_elem); * int bps_tree_insert_get_iterator(tree, new_elem, replaced_elem, * inserted_iterator) * int bps_tree_delete(tree, elem); * size_t bps_tree_size(tree); * size_t bps_tree_mem_used(tree); * bps_tree_elem_t *bps_tree_random(tree, rnd); * int bps_tree_debug_check(tree); * void bps_tree_print(tree, "%p"); * int bps_tree_debug_check_internal_functions(assert_on_error); * // iterators: * struct bps_tree_iterator bps_tree_invalid_iterator(); * bool bps_tree_iterator_is_invalid(itr); * bool bps_tree_iterator_are_equal(tree, itr1, itr2); * struct bps_tree_iterator bps_tree_iterator_first(tree); * struct bps_tree_iterator bps_tree_iterator_last(tree); * struct bps_tree_iterator bps_tree_lower_bound(tree, key, exact); * struct bps_tree_iterator bps_tree_upper_bound(tree, key, exact); * struct bps_tree_iterator bps_tree_lower_bound_elem(tree, elem, exact); * struct bps_tree_iterator bps_tree_upper_bound_elem(tree, elem, exact); * size_t bps_tree_approxiamte_count(tree, key); * bps_tree_elem_t *bps_tree_iterator_get_elem(tree, itr); * bool bps_tree_iterator_next(tree, itr); * bool bps_tree_iterator_prev(tree, itr); * void bps_tree_iterator_freeze(tree, itr); * void bps_tree_iterator_destroy(tree, itr); */ /* }}} */ /* {{{ BPS-tree interface settings */ /** * Custom name for structs and functions. * Struct and functions will have bps_tree##BPS_TREE_NAME name or prefix. * For example one can #define BPS_TREE_NAME _test, and use then * struct bps_tree_test my_tree; * bps_tree_test_create(&my_tree, ...); * Allowed to be empty (just #define BPS_TREE_NAME) */ #ifndef BPS_TREE_NAME #error "BPS_TREE_NAME must be defined" #endif /** * Size of a block of the tree. A block should be large enough to contain * dozens of elements and dozens of 32-bit identifiers. * Must be a power of 2, i.e. log2(BPS_TREE_BLOCK_SIZE) must be an integer. * Tests show that for 64-bit elements, an ideal block size is 512 bytes * if binary search is used, and 256 bytes if linear search is used. * (see below for the binary/linear search setting) * Example: * #define BPS_TREE_BLOCK_SIZE 512 */ #ifndef BPS_TREE_BLOCK_SIZE #error "BPS_TREE_BLOCK_SIZE must be defined" #endif /** * Allocation granularity. The tree allocates memory by extents of * that size. Must be power of 2, i.e. log2(BPS_TREE_EXTENT_SIZE) * must be a whole number. * Two important things: * * 1) The maximal amount of memory, that particular btree instance * can use, is * ( (BPS_TREE_EXTENT_SIZE ^ 3) / (sizeof(void *) ^ 2) ) * * 2) The first insertion of an element leads to immidiate * allocation of three extents. Thus, memory overhead of almost * empty tree is * 3 * BPS_TREE_EXTENT_SIZE * * Example: * #define BPS_TREE_EXTENT_SIZE 8*1024 */ #ifndef BPS_TREE_EXTENT_SIZE #error "BPS_TREE_EXTENT_SIZE must be defined" #endif /** * Type of the tree element. Must be POD. The implementation * copies elements by memmove and assignment operator and * compares them with comparators defined below, and also * could be compared with operator == Example: * #define bps_tree_elem_t struct tuple * */ #ifndef bps_tree_elem_t #error "bps_tree_elem_t must be defined" #endif /** * Type of tree key. Must be POD. Used for finding an element in * the tree and in iterator initialization. * Example: * #define bps_tree_key_t struct key_data * */ #ifndef bps_tree_key_t #error "bps_tree_key_t must be defined" #endif /** * Type of comparison additional argument. The argument of this * type is initialized during tree creation and then passed to * compare function. If it is non necessary, define as int and * forget. Example: * * #define bps_tree_arg_t struct key_def * */ #ifndef bps_tree_arg_t #define bps_tree_arg_t int #endif /** * Function to compare elements. * Parameters: two elements and an additional argument, specified * for the tree instance. See struct bps_tree members for details. * Must return int-compatible value, like strcmp or memcmp * Examples: * #define BPS_TREE_COMPARE(a, b, arg) ((a) < (b) ? -1 : (a) > (b)) * #define BPS_TREE_COMPARE(a, b, arg) my_compare(a, b, arg) */ #ifndef BPS_TREE_COMPARE #error "BPS_TREE_COMPARE must be defined" #endif /** * Function to compare an element with a key. * Parameters: element, key and an additional argument, specified * for the tree instance. See struct bps_tree members for details. * Must return int-compatible value, like strcmp or memcmp * Examples: * #define BPS_TREE_COMPARE_KEY(a, b, arg) ((a) < (b) ? -1 : (a) > (b)) * #define BPS_TREE_COMPARE_KEY(a, b, arg) BPS_TREE_COMPARE(a, b, arg) */ #ifndef BPS_TREE_COMPARE_KEY #error "BPS_TREE_COMPARE_KEY must be defined" #endif /** * A switch to define the type of search in an array elements. * By default, bps_tree uses binary search to find a particular * element in a block. But if the element type is simple * (like an integer or float) it could be significantly faster to * use linear search. To turn on the linear search * #define BPS_BLOCK_LINEAR_SEARCH */ /** * A switch that enables collection of executions of different * branches of code. Used only for debug purposes, I hope you * will not use it. Nevertheless, to turn it on, * #define BPS_TREE_DEBUG_BRANCH_VISIT */ /* }}} */ /* {{{ BPS-tree internal settings */ typedef int16_t bps_tree_pos_t; typedef uint32_t bps_tree_block_id_t; /* }}} */ /* {{{ Compile time utils */ /** * Concatenation of name at compile time */ #ifndef CONCAT #define CONCAT_R(a, b) a##b #define CONCAT(a, b) CONCAT_R(a, b) #define CONCAT3_R(a, b, c) a##b##c #define CONCAT3(a, b, c) CONCAT3_R(a, b, c) #define CONCAT4_R(a, b, c, d) a##b##c##d #define CONCAT4(a, b, c, d) CONCAT4_R(a, b, c, d) #define CONCAT5_R(a, b, c, d, e) a##b##c##d##e #define CONCAT5(a, b, c, d, e) CONCAT5_R(a, b, c, d, e) #endif /** * Compile time assertion for use in function blocks */ #ifndef CT_ASSERT #define CT_ASSERT(e) do { typedef char __ct_assert[(e) ? 1 : -1]; } while(0) #endif /** * Compile time assertion for use in global scope (and in class scope) */ #ifndef CT_ASSERT_G #define CT_ASSERT_G(e) typedef char CONCAT(__ct_assert_, __LINE__)[(e) ? 1 :-1] #endif /* }}} */ /* {{{ Macros for custom naming of structs and functions */ #ifdef _ #error '_' must be undefinded! #endif #define _bps(postfix) CONCAT4(bps, BPS_TREE_NAME, _, postfix) #define _api_name(postfix) CONCAT3(BPS_TREE_NAME, _, postfix) #define _bps_tree(postfix) CONCAT5(bps, _tree_, BPS_TREE_NAME, _, postfix) #define _BPS(postfix) CONCAT5(BPS, _, BPS_TREE_NAME, _, postfix) #define _BPS_TREE(postfix) CONCAT5(BPS_TREE, _, BPS_TREE_NAME, _, postfix) #define _bps_tree_name BPS_TREE_NAME #define bps_tree BPS_TREE_NAME #define bps_block _bps(block) #define bps_leaf _bps(leaf) #define bps_inner _bps(inner) #define bps_garbage _bps(garbage) #define bps_tree_iterator _api_name(iterator) #define bps_inner_path_elem _bps(inner_path_elem) #define bps_leaf_path_elem _bps(leaf_path_elem) #define bps_tree_create _api_name(create) #define bps_tree_build _api_name(build) #define bps_tree_destroy _api_name(destroy) #define bps_tree_find _api_name(find) #define bps_tree_insert _api_name(insert) #define bps_tree_insert_get_iterator _api_name(insert_get_iterator) #define bps_tree_delete _api_name(delete) #define bps_tree_size _api_name(size) #define bps_tree_mem_used _api_name(mem_used) #define bps_tree_random _api_name(random) #define bps_tree_invalid_iterator _api_name(invalid_iterator) #define bps_tree_iterator_is_invalid _api_name(iterator_is_invalid) #define bps_tree_iterator_are_equal _api_name(iterator_are_equal) #define bps_tree_iterator_first _api_name(iterator_first) #define bps_tree_iterator_last _api_name(iterator_last) #define bps_tree_lower_bound _api_name(lower_bound) #define bps_tree_upper_bound _api_name(upper_bound) #define bps_tree_lower_bound_elem _api_name(lower_bound_elem) #define bps_tree_upper_bound_elem _api_name(upper_bound_elem) #define bps_tree_approximate_count _api_name(approximate_count) #define bps_tree_iterator_get_elem _api_name(iterator_get_elem) #define bps_tree_iterator_next _api_name(iterator_next) #define bps_tree_iterator_prev _api_name(iterator_prev) #define bps_tree_iterator_freeze _api_name(iterator_freeze) #define bps_tree_iterator_destroy _api_name(iterator_destroy) #define bps_tree_debug_check _api_name(debug_check) #define bps_tree_print _api_name(print) #define bps_tree_debug_check_internal_functions \ _api_name(debug_check_internal_functions) #define bps_tree_max_sizes _bps_tree(max_sizes) #define BPS_TREE_MAX_COUNT_IN_LEAF _BPS_TREE(MAX_COUNT_IN_LEAF) #define BPS_TREE_MAX_COUNT_IN_INNER _BPS_TREE(MAX_COUNT_IN_INNER) #define BPS_TREE_MAX_DEPTH _BPS_TREE(MAX_DEPTH) #define bps_block_type _bps(block_type) #define BPS_TREE_BT_GARBAGE _BPS_TREE(BT_GARBAGE) #define BPS_TREE_BT_INNER _BPS_TREE(BT_INNER) #define BPS_TREE_BT_LEAF _BPS_TREE(BT_LEAF) #define bps_tree_restore_block _bps_tree(restore_block) #define bps_tree_restore_block_ver _bps_tree(restore_block_ver) #define bps_tree_root _bps_tree(root) #define bps_tree_touch_block _bps_tree(touch_block) #define bps_tree_find_ins_point_key _bps_tree(find_ins_point_key) #define bps_tree_find_ins_point_elem _bps_tree(find_ins_point_elem) #define bps_tree_find_after_ins_point_key _bps_tree(find_after_ins_point_key) #define bps_tree_find_after_ins_point_elem _bps_tree(find_after_ins_point_elem) #define bps_tree_get_leaf_safe _bps_tree(get_leaf_safe) #define bps_tree_garbage_push _bps_tree(garbage_push) #define bps_tree_garbage_pop _bps_tree(garbage_pop) #define bps_tree_create_leaf _bps_tree(create_leaf) #define bps_tree_create_inner _bps_tree(create_inner) #define bps_tree_dispose_leaf _bps_tree(dispose_leaf) #define bps_tree_dispose_inner _bps_tree(dispose_inner) #define bps_tree_reserve_blocks _bps_tree(reserve_blocks) #define bps_tree_insert_first_elem _bps_tree(insert_first_elem) #define bps_tree_collect_path _bps_tree(collect_path) #define bps_tree_touch_leaf_path_max_elem _bps_tree(touch_leaf_path_max_elem) #define bps_tree_touch_path _bps_tree(touch_path_max_elem) #define bps_tree_process_replace _bps_tree(process_replace) #define bps_tree_debug_memmove _bps_tree(debug_memmove) #define bps_tree_insert_into_leaf _bps_tree(insert_into_leaf) #define bps_tree_insert_into_inner _bps_tree(insert_into_inner) #define bps_tree_delete_from_leaf _bps_tree(delete_from_leaf) #define bps_tree_delete_from_inner _bps_tree(delete_from_inner) #define bps_tree_move_elems_to_right_leaf _bps_tree(move_elems_to_right_leaf) #define bps_tree_move_elems_to_right_inner _bps_tree(move_elems_to_right_inner) #define bps_tree_move_elems_to_left_leaf _bps_tree(move_elems_to_left_leaf) #define bps_tree_move_elems_to_left_inner _bps_tree(move_elems_to_left_inner) #define bps_tree_insert_and_move_elems_to_right_leaf \ _bps_tree(insert_and_move_elems_to_right_leaf) #define bps_tree_insert_and_move_elems_to_right_inner \ _bps_tree(insert_and_move_elems_to_right_inner) #define bps_tree_insert_and_move_elems_to_left_leaf \ _bps_tree(insert_and_move_elems_to_left_leaf) #define bps_tree_insert_and_move_elems_to_left_inner \ _bps_tree(insert_and_move_elems_to_left_inner) #define bps_tree_leaf_free_size _bps_tree(leaf_free_size) #define bps_tree_inner_free_size _bps_tree(inner_free_size) #define bps_tree_leaf_overmin_size _bps_tree(leaf_overmin_size) #define bps_tree_inner_overmin_size _bps_tree(inner_overmin_size) #define bps_tree_collect_left_path_elem_leaf \ _bps_tree(collect_left_path_elem_leaf) #define bps_tree_collect_left_path_elem_inner \ _bps_tree(collect_left_path_elem_inner) #define bps_tree_collect_right_ext_leaf _bps_tree(collect_right_ext_leaf) #define bps_tree_collect_right_ext_inner _bps_tree(collect_right_ext_inner) #define bps_tree_prepare_new_ext_leaf _bps_tree(prepare_new_ext_leaf) #define bps_tree_prepare_new_ext_inner _bps_tree(prepare_new_ext_inner) #define bps_tree_process_insert_leaf _bps_tree(process_insert_leaf) #define bps_tree_process_insert_inner _bps_tree(process_insert_inner) #define bps_tree_process_delete_leaf _bps_tree(process_delete_leaf) #define bps_tree_process_delete_inner _bps_tree(process_delete_inner) #define bps_tree_debug_find_max_elem _bps_tree(debug_find_max_elem) #define bps_tree_debug_check_block _bps_tree(debug_check_block) #define bps_tree_print_indent _bps_tree(print_indent) #define bps_tree_print_block _bps_tree(print_block) #define bps_tree_print_leaf _bps_tree(print_leaf) #define bps_tree_print_inner _bps_tree(print_inner) #define bps_tree_debug_set_elem _bps_tree(debug_set_elem) #define bps_tree_debug_get_elem _bps_tree(debug_get_elem) #define bps_tree_debug_set_elem_inner _bps_tree(debug_set_elem_inner) #define bps_tree_debug_get_elem_inner _bps_tree(debug_get_elem_inner) #define bps_tree_debug_check_insert_into_leaf \ _bps_tree(debug_check_insert_into_leaf) #define bps_tree_debug_check_delete_from_leaf \ _bps_tree(debug_check_delete_from_leaf) #define bps_tree_debug_check_move_to_right_leaf \ _bps_tree(debug_check_move_to_right_leaf) #define bps_tree_debug_check_move_to_left_leaf \ _bps_tree(debug_check_move_to_left_leaf) #define bps_tree_debug_check_insert_and_move_to_right_leaf \ _bps_tree(debug_check_insert_and_move_to_right_leaf) #define bps_tree_debug_check_insert_and_move_to_left_leaf \ _bps_tree(debug_check_insert_and_move_to_left_leaf) #define bps_tree_debug_check_insert_into_inner \ _bps_tree(debug_check_insert_into_inner) #define bps_tree_debug_check_delete_from_inner \ _bps_tree(debug_check_delete_from_inner) #define bps_tree_debug_check_move_to_right_inner \ _bps_tree(debug_check_move_to_right_inner) #define bps_tree_debug_check_move_to_left_inner \ _bps_tree(debug_check_move_to_left_inner) #define bps_tree_debug_check_insert_and_move_to_right_inner \ _bps_tree(debug_check_insert_and_move_to_right_inner) #define bps_tree_debug_check_insert_and_move_to_left_inner \ _bps_tree(debug_check_insert_and_move_to_left_inner) /* }}} */ /* {{{ BPS-tree interface (declaration) */ /** * struct bps_block forward declaration (Used in struct bps_tree) */struct bps_block; #ifdef BPS_TREE_DEBUG_BRANCH_VISIT #define BPS_TREE_BRANCH_TRACE(tree, type, branch_bit) \ (tree)->debug_##type##_branches_mask |= (branch_bit) #else #define BPS_TREE_BRANCH_TRACE(tree, type, branch_bit) \ ((void)0) #endif /** * Main tree struct. One instance - one tree. */ struct bps_tree { /* ID of root block. (bps_tree_block_id_t)-1 in empty tree. */ bps_tree_block_id_t root_id; /* IDs of first and last block. (-1) in empty tree. */ bps_tree_block_id_t first_id, last_id; /* Counters of used blocks and garbaged blocks */ bps_tree_block_id_t leaf_count, inner_count, garbage_count; /* Depth (height?) of a tree. Is 0 in empty tree. */ bps_tree_block_id_t depth; /* Number of elements in tree */ size_t size; /* Head of list of garbaged blocks */ bps_tree_block_id_t garbage_head_id; /* User-provided argument for comparator */ bps_tree_arg_t arg; /* Copy of maximal element in tree. Used for beauty */ bps_tree_elem_t max_elem; /* Special allocator of blocks and their IDs */ struct matras matras; #ifdef BPS_TREE_DEBUG_BRANCH_VISIT /* Bit masks of different branches visits */ uint32_t debug_insert_leaf_branches_mask; uint32_t debug_insert_inner_branches_mask; uint32_t debug_delete_leaf_branches_mask; uint32_t debug_delete_inner_branches_mask; /* Possible masks of different branches visits */ uint32_t debug_insert_leaf_branches_max_mask; uint32_t debug_insert_inner_branches_max_mask; uint32_t debug_delete_leaf_branches_max_mask; uint32_t debug_delete_inner_branches_max_mask; #endif }; /** * Tree iterator. Points to an element in tree. * There are 4 possible states of iterator: * 1)Normal. Points to concrete element in tree. * 2)Invalid. Points to nothing. Safe. * 3)Broken. Normal can become broken during tree modification. * Safe to use, but has undefined behavior. * 4)Uninitialized (or initialized in wrong way). * Unsafe and undefined behaviour. */ struct bps_tree_iterator { /* ID of a block, containing element. -1 for an invalid iterator */ bps_tree_block_id_t block_id; /* Position of an element in the block. Could be -1 for last in block*/ bps_tree_pos_t pos; /* Version of matras memory for MVCC */ struct matras_view view; }; /** * Pointer to function that allocates extent of size BPS_TREE_EXTENT_SIZE * BPS-tree properly handles with NULL result but could leak memory * in case of exception. */ typedef void *(*bps_tree_extent_alloc_f)(void *ctx); /** * Pointer to function frees extent (of size BPS_TREE_EXTENT_SIZE) */ typedef void (*bps_tree_extent_free_f)(void *ctx, void *extent); /** * @brief Tree construction. Fills struct bps_tree members. * @param tree - pointer to a tree * @param arg - user defined argument for comparator * @param extent_alloc_func - pointer to function that allocates extents, * see bps_tree_extent_alloc_f description for details * @param extent_free_func - pointer to function that allocates extents, * @param alloc_ctx - argument passed to extent allocator * see bps_tree_extent_free_f description for details */ static inline void bps_tree_create(struct bps_tree *tree, bps_tree_arg_t arg, bps_tree_extent_alloc_f extent_alloc_func, bps_tree_extent_free_f extent_free_func, void *alloc_ctx); /** * @brief Fills a new (asserted) tree with values from sorted array. * Elements are copied from the array. Array is not checked to be sorted! * @param tree - pointer to a tree * @param sorted_array - pointer to the sorted array * @param array_size - size of the array (count of elements) * @return 0 on success, -1 on memory error */ static inline int bps_tree_build(struct bps_tree *tree, bps_tree_elem_t *sorted_array, size_t array_size); /** * @brief Tree destruction. Frees allocated memory. * @param tree - pointer to a tree */ static inline void bps_tree_destroy(struct bps_tree *tree); /** * @brief Find the first element that is equal to the key (comparator returns 0) * @param tree - pointer to a tree * @param key - key that will be compared with elements * @return pointer to the first equal element or NULL if not found */ static inline bps_tree_elem_t * bps_tree_find(const struct bps_tree *tree, bps_tree_key_t key); /** * @brief Insert an element to the tree or replace an element in the tree * In case of replacing, if 'replaced' argument is not null, * it'll be filled with replaced element. In case of inserting it's untoched. * Thus one can distinguish real insert or replace by passing to the function * pointer to some value; and if it was changed during the function call, * then the replace was happend; insert otherwise. * @param tree - pointer to a tree * @param new_elem - inserting or replacing element * @replaced - optional pointer for a replaces element * @return - 0 on success or -1 if memory allocation failed for insert */ static inline int bps_tree_insert(struct bps_tree *tree, bps_tree_elem_t new_elem, bps_tree_elem_t *replaced); /** * @sa bps_tree_insert + new parameter: * @param[out] inserted_iterator Iterator, positioned to the * new element. */ static inline int bps_tree_insert_get_iterator(struct bps_tree *tree, bps_tree_elem_t new_elem, bps_tree_elem_t *replaced, struct bps_tree_iterator *inserted_iterator); /** * @brief Delete an element from a tree. * @param tree - pointer to a tree * @param elem - the element tot delete * @return - 0 on success or -1 if the element was not found in tree */ static inline int bps_tree_delete(struct bps_tree *tree, bps_tree_elem_t elem); /** * @brief Get size of tree, i.e. count of elements in tree * @param tree - pointer to a tree * @return - count count of elements in tree */ static inline size_t bps_tree_size(const struct bps_tree *tree); /** * @brief Get amount of memory in bytes that the tree is using * (not including sizeof(struct bps_tree)) * @param tree - pointer to a tree * @return - count count of elements in tree */ static inline size_t bps_tree_mem_used(const struct bps_tree *tree); /** * @brief Get a random element in a tree. * @param tree - pointer to a tree * @param rnd - some random value * @return - count count of elements in tree */ static inline bps_tree_elem_t * bps_tree_random(const struct bps_tree *tree, size_t rnd); /** * @brief Get an invalid iterator. See iterator description. * @return - Invalid iterator */ static inline struct bps_tree_iterator bps_tree_invalid_iterator(); /** * @brief Check if an iterator is invalid. See iterator description. * @param itr - iterator to check * @return - true if iterator is invalid, false otherwise */ static inline bool bps_tree_iterator_is_invalid(struct bps_tree_iterator *itr); /** * @brief Compare two iterators and return true if trey points to same element. * Two invalid iterators are equal and points to the same nowhere. * Broken iterator is possibly not equal to any valid or invalid iterators. * @param tree - pointer to a tree * @param itr1 - first iterator * @param itr2 - second iterator * @return - true if iterators are equal, false otherwise */ static inline bool bps_tree_iterator_are_equal(const struct bps_tree *tree, struct bps_tree_iterator *itr1, struct bps_tree_iterator *itr2); /** * @brief Get an iterator to the first element of the tree * @param tree - pointer to a tree * @return - First iterator. Could be invalid if the tree is empty. */ static inline struct bps_tree_iterator bps_tree_iterator_first(const struct bps_tree *tree); /** * @brief Get an iterator to the last element of the tree * @param tree - pointer to a tree * @return - Last iterator. Could be invalid if the tree is empty. */ static inline struct bps_tree_iterator bps_tree_iterator_last(const struct bps_tree *tree); /** * @brief Get an iterator to the first element that is greater or * equal than key * @param tree - pointer to a tree * @param key - key that will be compared with elements * @param exact - pointer to a bool value, that will be set to true if * and element pointed by the iterator is equal to the key, false otherwise * Pass NULL if you don't need that info. * @return - Lower-bound iterator. Invalid if all elements are less than key. */ static inline struct bps_tree_iterator bps_tree_lower_bound(const struct bps_tree *tree, bps_tree_key_t key, bool *exact); /** * @brief Get an iterator to the first element that is greater than key * @param tree - pointer to a tree * @param key - key that will be compared with elements * @param exact - pointer to a bool value, that will be set to true if * and element pointed by the (!)previous iterator is equal to the key, * false otherwise. Pass NULL if you don't need that info. * @return - Upper-bound iterator. Invalid if all elements are less or equal * than the key. */ static inline struct bps_tree_iterator bps_tree_upper_bound(const struct bps_tree *tree, bps_tree_key_t key, bool *exact); /** * @brief Get an iterator to the first element that is greater or * equal than given element. * @param tree - pointer to a tree * @param key - the element that will be compared with tree elements * @param exact - pointer to a bool value, that will be set to true if * and element pointed by the iterator is equal to the key, false otherwise * Pass NULL if you don't need that info. * @return - Lower-bound iterator. Invalid if all elements are less than key. */ static inline struct bps_tree_iterator bps_tree_lower_bound_elem(const struct bps_tree *tree, bps_tree_elem_t key, bool *exact); /** * @brief Get an iterator to the first element that is greater than given * element. * @param tree - pointer to a tree * @param key - the element that will be compared with tree elements * @param exact - pointer to a bool value, that will be set to true if * and element pointed by the (!)previous iterator is equal to the key, * false otherwise. Pass NULL if you don't need that info. * @return - Upper-bound iterator. Invalid if all elements are less or equal * than the key. */ static inline struct bps_tree_iterator bps_tree_upper_bound_elem(const struct bps_tree *tree, bps_tree_elem_t key, bool *exact); /** * @brief Get approximate number of entries that are equal to given key. * Accuracy limits: * If the result is less than BPS_TREE_name_MAX_COUNT_IN_LEAF * 5 / 6, the * result is precise. If not, let's define: * X = BPS_TREE_name_MAX_COUNT_IN_LEAF * 5 / 6 * Y = BPS_TREE_name_MAX_COUNT_IN_INNER * 5 / 6 * H = ceil(log(Result / X) / log(Y)) * Then the true count is between: * [ Result * pow(0.8 - z, H), Result * pow(1.2 + z, H) ] * where z parameter is a small number due to rounding errors * @param tree - pointer to a tree * @param key - key that will be compared with elements * @return - approximate number of entries that are equal to given key. */ static inline size_t bps_tree_approximate_count(const struct bps_tree *tree, bps_tree_key_t key); /** * @brief Get a pointer to the element pointed by iterator. * If iterator is detected as broken, it is invalidated and NULL returned. * @param tree - pointer to a tree * @param itr - pointer to tree iterator * @return - Pointer to the element. Null for invalid iterator */ static inline bps_tree_elem_t * bps_tree_iterator_get_elem(const struct bps_tree *tree, struct bps_tree_iterator *itr); /** * @brief Increments an iterator, makes it point to the next element * If the iterator is to last element, it will be invalidated * If the iterator is detected as broken, it will be invalidated. * If the iterator is invalid, then it will be set to first element. * @param tree - pointer to a tree * @param itr - pointer to tree iterator * @return - true on success, false if a resulted iterator is set to invalid */ static inline bool bps_tree_iterator_next(const struct bps_tree *tree, struct bps_tree_iterator *itr); /** * @brief Decrements an iterator, makes it point to the previous element * If the iterator is to first element, it will be invalidated * If the iterator is detected as broken, it will be invalidated. * If the iterator is invalid, then it will be set to last element. * @param tree - pointer to a tree * @param itr - pointer to tree iterator * @return - true on success, false if a resulted iterator is set to invalid */ static inline bool bps_tree_iterator_prev(const struct bps_tree *tree, struct bps_tree_iterator *itr); /** * @brief Freezes tree state for given iterator. All following tree modification * will not apply to that iterator iteration. That iterator should be destroyed * with a bps_tree_iterator_destroy call after usage. * @param tree - pointer to a tree * @param itr - pointer to tree iterator */ static inline void bps_tree_iterator_freeze(struct bps_tree *tree, struct bps_tree_iterator *itr); /** * @brief Destroy an iterator that was frozen before. Useless for not frozen * iterators. * @param tree - pointer to a tree * @param itr - pointer to tree iterator */ static inline void bps_tree_iterator_destroy(struct bps_tree *tree, struct bps_tree_iterator *itr); #ifndef BPS_TREE_NO_DEBUG /** * @brief Debug self-checking. Returns bitmask of found errors (0 * on success). * I hope you will not need it. * @param tree - pointer to a tree * @return - Bitwise-OR of all errors found */ static inline int bps_tree_debug_check(const struct bps_tree *tree); /** * @brief Debug print tree to output in readable form. * I hope you will not need it. * @param tree - tree to print * @param elem_fmt - format for printing an element. "%d" or "%p" for example. */ static inline void bps_tree_print(const struct bps_tree *tree, const char *elem_fmt); /** * @brief Debug print tree to output in readable form. * I hope you will not need it. * @param assertme - if true, errors will lead to assert call, * if false, just error code will be returned. * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_internal_functions(bool assertme); #endif /* BPS_TREE_NO_DEBUG */ /* }}} */ /* {{{ BPS-tree implementation (definition) */ /* Data moving */ #ifndef NDEBUG /* Debug version checks buffer overflow an runtime */ #define BPS_TREE_MEMMOVE(dst, src, num, dst_block, src_block) \ bps_tree_debug_memmove(dst, src, num, dst_block, src_block) #else /* Release version just moves memory */ #define BPS_TREE_MEMMOVE(dst, src, num, dst_block, src_block) \ memmove(dst, src, num) #endif /* Same as BPS_TREE_MEMMOVE but takes count of values instead of memory size */ #define BPS_TREE_DATAMOVE(dst, src, num, dst_bck, src_bck) \ BPS_TREE_MEMMOVE(dst, src, (num) * sizeof((dst)[0]), dst_bck, src_bck) /** * Types of a block */ enum bps_block_type { BPS_TREE_BT_GARBAGE = 1, BPS_TREE_BT_INNER = 2, BPS_TREE_BT_LEAF = 4 }; /** * Header for bps_leaf, bps_inner or bps_garbage blocks */ struct bps_block { /* Type of a block. See bps_block_type. Used for iterators and debug */ bps_tree_pos_t type; /* Count of elements for leaf, and of children for inner nodes */ bps_tree_pos_t size; }; /** * Calculation of max sizes (max count + 1) */ enum bps_tree_max_sizes { BPS_TREE_MAX_COUNT_IN_LEAF = (BPS_TREE_BLOCK_SIZE - sizeof(struct bps_block) - 2 * sizeof(bps_tree_block_id_t) ) / sizeof(bps_tree_elem_t), BPS_TREE_MAX_COUNT_IN_INNER = (BPS_TREE_BLOCK_SIZE - sizeof(struct bps_block)) / (sizeof(bps_tree_elem_t) + sizeof(bps_tree_block_id_t)), BPS_TREE_MAX_DEPTH = 16 }; /** * B* tree modification makes most of blocks to be filled al least of 2/3 * of the allocated space, so allocated space must be al least 3. */ CT_ASSERT_G(BPS_TREE_MAX_COUNT_IN_LEAF >= 3); CT_ASSERT_G(BPS_TREE_MAX_COUNT_IN_INNER >= 3); /** * Leaf block definition. * Contains array of element on the last level of the tree */ struct bps_leaf { /* Block header */ struct bps_block header; /* Next leaf block ID in ordered linked list */ bps_tree_block_id_t next_id; /* Previous leaf block ID in ordered linked list */ bps_tree_block_id_t prev_id; /* Ordered array of elements */ bps_tree_elem_t elems[BPS_TREE_MAX_COUNT_IN_LEAF]; }; /** * Stop compile if smth went terribly wrong */ CT_ASSERT_G(sizeof(struct bps_leaf) <= BPS_TREE_BLOCK_SIZE); /** * Inner block definition. * Contains an array of child (inner of leaf) IDs, and array of * copies of maximal elements of the corresponding subtrees. Only * last child subtree does not have corresponding element copy in * this array (but it has a copy of maximal element somewhere in * parent's arrays on in tree struct) */ struct bps_inner { /* Block header */ struct bps_block header; /* Ordered array of elements. Note -1 in size. See struct descr. */ bps_tree_elem_t elems[BPS_TREE_MAX_COUNT_IN_INNER - 1]; /* Corresponding child IDs */ bps_tree_block_id_t child_ids[BPS_TREE_MAX_COUNT_IN_INNER]; }; /** * Stop compile if smth went terribly wrong */ CT_ASSERT_G(sizeof(struct bps_inner) <= BPS_TREE_BLOCK_SIZE); /** * Garbaged block definition */ struct bps_garbage { /* Block header */ struct bps_block header; /* Next garbaged block id in single-linked list */ bps_tree_block_id_t next_id; /* If a leaf is pushed to garbage, next and previous blocks' IDs are * saved in the garbage node in order to fix iterators pointing to * that node. */ /* Next leaf block ID in ordered linked list */ bps_tree_block_id_t next_leaf_id; /* Previous leaf block ID in ordered linked list */ bps_tree_block_id_t prev_leaf_id; }; /** * Stop compile if smth went terribly wrong */ CT_ASSERT_G(sizeof(struct bps_garbage) <= BPS_TREE_BLOCK_SIZE); /** * Struct for collecting path in tree, corresponds to one inner block */ struct bps_inner_path_elem { /* Pointer to block */ struct bps_inner *block; /* ID of the block */ bps_tree_block_id_t block_id; /* Position of next path element in block's child_ids array */ bps_tree_pos_t insertion_point; /* Position of this path element in parent's child_ids array */ bps_tree_pos_t pos_in_parent; /* Pointer to parent block (NULL for root) */ struct bps_inner_path_elem *parent; /* Pointer to the sequent to the max element in the subtree */ bps_tree_elem_t *max_elem_copy; /* Holder of max_elem_copy (block_id) */ bps_tree_block_id_t max_elem_block_id; /* Holder of max_elem_copy (pos) */ bps_tree_pos_t max_elem_pos; }; /** * An auxiliary struct to collect a path in tree, * corresponds to one leaf block/one element of the path. * */ struct bps_leaf_path_elem { /* A pointer to the block */ struct bps_leaf *block; /* ID of the block */ bps_tree_block_id_t block_id; /* Position of the next path element in block's child_ids array */ bps_tree_pos_t insertion_point; /* Position of this path element in parent's child_ids array */ bps_tree_pos_t pos_in_parent; /* A pointer to the parent block (NULL for root) */ struct bps_inner_path_elem *parent; /* A pointer to the sequent to the max element in the subtree */ bps_tree_elem_t *max_elem_copy; /* Holder of max_elem_copy (block_id) */ bps_tree_block_id_t max_elem_block_id; /* Holder of max_elem_copy (pos) */ bps_tree_pos_t max_elem_pos; }; /** * @brief Tree construction. Fills struct bps_tree members. * @param tree - pointer to a tree * @param arg - user defined argument for comparator * @param extent_alloc_func - pointer to function that allocates extents, * see bps_tree_extent_alloc_f description for details * @param extent_free_func - pointer to function that allocates extents, * @param alloc_ctx - argument passed to extent allocator * see bps_tree_extent_free_f description for details */ static inline void bps_tree_create(struct bps_tree *tree, bps_tree_arg_t arg, bps_tree_extent_alloc_f extent_alloc_func, bps_tree_extent_free_f extent_free_func, void *alloc_ctx) { tree->root_id = (bps_tree_block_id_t)(-1); tree->first_id = (bps_tree_block_id_t)(-1); tree->last_id = (bps_tree_block_id_t)(-1); tree->leaf_count = 0; tree->inner_count = 0; tree->garbage_count = 0; tree->depth = 0; tree->size = 0; tree->garbage_head_id = (bps_tree_block_id_t)(-1); tree->arg = arg; memset(&tree->max_elem, 0, sizeof(tree->max_elem)); matras_create(&tree->matras, BPS_TREE_EXTENT_SIZE, BPS_TREE_BLOCK_SIZE, extent_alloc_func, extent_free_func, alloc_ctx); #ifdef BPS_TREE_DEBUG_BRANCH_VISIT /* Bit masks of different branches visits */ tree->debug_insert_leaf_branches_mask = 0; tree->debug_insert_inner_branches_mask = 0; tree->debug_delete_leaf_branches_mask = 0; tree->debug_delete_inner_branches_mask = 0; /* Possible masks of different branches visits */ tree->debug_insert_leaf_branches_max_mask = (1 << 0xE) - 1; tree->debug_insert_inner_branches_max_mask = (1 << 0xE) - 1; tree->debug_delete_leaf_branches_max_mask = (1 << 0x11) - 1; tree->debug_delete_inner_branches_max_mask = (1 << 0x11) - 1; #endif } /** * @brief Fills a new (asserted) tree with values from sorted array. * Elements are copied from the array. Array is not checked to be sorted! * @param tree - pointer to a tree * @param sorted_array - pointer to the sorted array * @param array_size - size of the array (count of elements) * @return 0 on success, -1 on memory error */ static inline int bps_tree_build(struct bps_tree *tree, bps_tree_elem_t *sorted_array, size_t array_size) { assert(tree->size == 0); assert(tree->root_id == (bps_tree_block_id_t)(-1)); assert(tree->garbage_head_id == (bps_tree_block_id_t)(-1)); assert(tree->matras.head.block_count == 0); if (array_size == 0) return 0; bps_tree_block_id_t leaf_count = (array_size + BPS_TREE_MAX_COUNT_IN_LEAF - 1) / BPS_TREE_MAX_COUNT_IN_LEAF; bps_tree_block_id_t depth = 1; bps_tree_block_id_t level_count = leaf_count; while (level_count > 1) { level_count = (level_count + BPS_TREE_MAX_COUNT_IN_INNER - 1) / BPS_TREE_MAX_COUNT_IN_INNER; depth++; } /* Initializing by {0} to suppress compile warnings (gh-1287) */ bps_tree_block_id_t level_block_count[BPS_TREE_MAX_DEPTH] = {0}; bps_tree_block_id_t level_child_count[BPS_TREE_MAX_DEPTH] = {0}; struct bps_inner *parents[BPS_TREE_MAX_DEPTH]; level_count = leaf_count; for (bps_tree_block_id_t i = 0; i < depth - 1; i++) { level_child_count[i] = level_count; level_count = (level_count + BPS_TREE_MAX_COUNT_IN_INNER - 1) / BPS_TREE_MAX_COUNT_IN_INNER; level_block_count[i] = level_count; parents[i] = 0; } bps_tree_block_id_t leaf_left = leaf_count; size_t elems_left = array_size; bps_tree_elem_t *current = sorted_array; struct bps_leaf *leaf = 0; bps_tree_block_id_t prev_leaf_id = (bps_tree_block_id_t)-1; bps_tree_block_id_t first_leaf_id = (bps_tree_block_id_t)-1; bps_tree_block_id_t last_leaf_id = (bps_tree_block_id_t)-1; bps_tree_block_id_t inner_count = 0; bps_tree_block_id_t root_if_inner_id = (bps_tree_block_id_t)-1; do { bps_tree_block_id_t id; struct bps_leaf *new_leaf = (struct bps_leaf *) matras_alloc(&tree->matras, &id); if (!new_leaf) { matras_reset(&tree->matras); return -1; } if (first_leaf_id == (bps_tree_block_id_t)-1) first_leaf_id = id; last_leaf_id = id; if (leaf) leaf->next_id = id; leaf = new_leaf; leaf->header.type = BPS_TREE_BT_LEAF; leaf->header.size = elems_left / leaf_left; leaf->prev_id = prev_leaf_id; prev_leaf_id = id; memmove(leaf->elems, current, leaf->header.size * sizeof(*current)); bps_tree_block_id_t insert_id = id; for (bps_tree_block_id_t i = 0; i < depth - 1; i++) { bps_tree_block_id_t new_id = (bps_tree_block_id_t)-1; if (!parents[i]) { parents[i] = (struct bps_inner *) matras_alloc(&tree->matras, &new_id); if (!parents[i]) { matras_reset(&tree->matras); return -1; } parents[i]->header.type = BPS_TREE_BT_INNER; parents[i]->header.size = 0; inner_count++; } parents[i]->child_ids[parents[i]->header.size] = insert_id; if (new_id == (bps_tree_block_id_t)-1) break; if (i == depth - 2) { root_if_inner_id = new_id; } else { insert_id = new_id; } } bps_tree_elem_t insert_value = current[leaf->header.size - 1]; for (bps_tree_block_id_t i = 0; i < depth - 1; i++) { parents[i]->header.size++; bps_tree_block_id_t max_size = level_child_count[i] / level_block_count[i]; if ((uint32_t)parents[i]->header.size != max_size) { parents[i]->elems[parents[i]->header.size - 1] = insert_value; break; } else { parents[i] = 0; level_child_count[i] -= max_size; level_block_count[i]--; } } leaf_left--; elems_left -= leaf->header.size; current += leaf->header.size; } while (leaf_left); leaf->next_id = (bps_tree_block_id_t)-1; assert(elems_left == 0); for (bps_tree_block_id_t i = 0; i < depth - 1; i++) { assert(level_child_count[i] == 0); assert(level_block_count[i] == 0); assert(parents[i] == 0); } tree->first_id = first_leaf_id; tree->last_id = last_leaf_id; tree->leaf_count = leaf_count; tree->inner_count = inner_count; tree->depth = depth; tree->size = array_size; tree->max_elem = sorted_array[array_size - 1]; if (depth == 1) { tree->root_id = first_leaf_id; } else { tree->root_id = root_if_inner_id; } return 0; } /** * @brief Tree destruction. Frees allocated memory. * @param tree - pointer to a tree */ static inline void bps_tree_destroy(struct bps_tree *tree) { matras_destroy(&tree->matras); } /** * @brief Get size of tree, i.e. count of elements in tree * @param tree - pointer to a tree * @return - count count of elements in tree */ static inline size_t bps_tree_size(const struct bps_tree *tree) { return tree->size; } /** * @brief Get amount of memory in bytes that the tree is using * (not including sizeof(struct bps_tree)) * @param tree - pointer to a tree * @return - count count of elements in tree */ static inline size_t bps_tree_mem_used(const struct bps_tree *tree) { size_t res = matras_extent_count(&tree->matras); res *= BPS_TREE_EXTENT_SIZE; return res; } /** * @brief Get a pointer to block by it's ID. */ static inline struct bps_block * bps_tree_restore_block(const struct bps_tree *tree, bps_tree_block_id_t id) { return (struct bps_block *)matras_get(&tree->matras, id); } /** * @brief Get a pointer to block by it's ID and provided read view. */ static inline struct bps_block * bps_tree_restore_block_ver(const struct bps_tree *tree, bps_tree_block_id_t id, struct matras_view *view) { return (struct bps_block *)matras_view_get(&tree->matras, view, id); } /** * @brief Get a pointer to block by it's ID. */ static inline struct bps_block * bps_tree_root(const struct bps_tree *tree) { return (struct bps_block *)matras_get(&tree->matras, tree->root_id); } /** * @brief Get a pointer to block by it's ID. */ static inline struct bps_block * bps_tree_touch_block(struct bps_tree *tree, bps_tree_block_id_t id) { return (struct bps_block *)matras_touch(&tree->matras, id); } /** * @brief Get a random element in a tree. * @param tree - pointer to a tree * @param rnd - some random value * @return - count count of elements in tree */ static inline bps_tree_elem_t * bps_tree_random(const struct bps_tree *tree, size_t rnd) { if (tree->root_id == (bps_tree_block_id_t)(-1)) return 0; struct bps_block *block = bps_tree_root(tree); for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { struct bps_inner *inner = (struct bps_inner *)block; bps_tree_pos_t pos = rnd % inner->header.size; rnd /= inner->header.size; block = bps_tree_restore_block(tree, inner->child_ids[pos]); } struct bps_leaf *leaf = (struct bps_leaf *)block; bps_tree_pos_t pos = rnd % leaf->header.size; return leaf->elems + pos; } /** * @brief Find the lowest element in sorted array that is >= than the key * @param tree - pointer to a tree * @param arr - array of elements * @param size - size of the array * @param key - key to find * @param exact - point to bool that receives true if equal element was found */ static inline bps_tree_pos_t bps_tree_find_ins_point_key(const struct bps_tree *tree, bps_tree_elem_t *arr, size_t size, bps_tree_key_t key, bool *exact) { (void)tree; bps_tree_elem_t *begin = arr; bps_tree_elem_t *end = arr + size; *exact = false; #ifdef BPS_BLOCK_LINEAR_SEARCH while (begin != end) { int res = BPS_TREE_COMPARE_KEY(*begin, key, tree->arg); if (res >= 0) { *exact = res == 0; return (bps_tree_pos_t)(begin - arr); } ++begin; } return (bps_tree_pos_t)(begin - arr); #else while (begin != end) { bps_tree_elem_t *mid = begin + (end - begin) / 2; int res = BPS_TREE_COMPARE_KEY(*mid, key, tree->arg); if (res > 0) { end = mid; } else if (res < 0) { begin = mid + 1; } else { *exact = true; end = mid; /* Equal found, continue search for lowest equal */ } } return (bps_tree_pos_t)(end - arr); #endif } /** * @brief Find the lowest element in sorted array that is >= than the elem * @param tree - pointer to a tree * @param arr - array of elements * @param size - size of the array * @param elem - element to find * @param exact - point to bool that receives true if equal * element was found */ static inline bps_tree_pos_t bps_tree_find_ins_point_elem(const struct bps_tree *tree, bps_tree_elem_t *arr, size_t size, bps_tree_elem_t elem, bool *exact) { (void)tree; bps_tree_elem_t *begin = arr; bps_tree_elem_t *end = arr + size; *exact = false; #ifdef BPS_BLOCK_LINEAR_SEARCH while (begin != end) { int res = BPS_TREE_COMPARE(*begin, elem, tree->arg); if (res >= 0) { *exact = res == 0; return (bps_tree_pos_t)(begin - arr); } ++begin; } return (bps_tree_pos_t)(begin - arr); #else while (begin != end) { bps_tree_elem_t *mid = begin + (end - begin) / 2; int res = BPS_TREE_COMPARE(*mid, elem, tree->arg); if (res > 0) { end = mid; } else if (res < 0) { begin = mid + 1; } else { *exact = true; /* Since elements are unique in array, stop search */ return (bps_tree_pos_t)(mid - arr); } } return (bps_tree_pos_t)(end - arr); #endif } /** * @brief Find the lowest element in sorted array that is greater * than the key. * @param tree - pointer to a tree * @param arr - array of elements * @param size - size of the array * @param key - key to find * @param exact - point to bool that receives true if equal * element is present */ static inline bps_tree_pos_t bps_tree_find_after_ins_point_key(const struct bps_tree *tree, bps_tree_elem_t *arr, size_t size, bps_tree_key_t key, bool *exact) { (void)tree; bps_tree_elem_t *begin = arr; bps_tree_elem_t *end = arr + size; *exact = false; #ifdef BPS_BLOCK_LINEAR_SEARCH while (begin != end) { int res = BPS_TREE_COMPARE_KEY(*begin, key, tree->arg); if (res == 0) *exact = true; else if (res > 0) return (bps_tree_pos_t)(begin - arr); ++begin; } return (bps_tree_pos_t)(begin - arr); #else while (begin != end) { bps_tree_elem_t *mid = begin + (end - begin) / 2; int res = BPS_TREE_COMPARE_KEY(*mid, key, tree->arg); if (res > 0) { end = mid; } else if (res < 0) { begin = mid + 1; } else { *exact = true; begin = mid + 1; } } return (bps_tree_pos_t)(end - arr); #endif } /** * @brief Find the lowest element in sorted array that is greater * than the key. * @param tree - pointer to a tree * @param arr - array of elements * @param size - size of the array * @param elem - element to find * @param exact - point to bool that receives true if equal * element is present */ static inline bps_tree_pos_t bps_tree_find_after_ins_point_elem(const struct bps_tree *tree, bps_tree_elem_t *arr, size_t size, bps_tree_elem_t elem, bool *exact) { (void)tree; bps_tree_elem_t *begin = arr; bps_tree_elem_t *end = arr + size; *exact = false; #ifdef BPS_BLOCK_LINEAR_SEARCH while (begin != end) { int res = BPS_TREE_COMPARE(*begin, elem, tree->arg); if (res == 0) *exact = true; else if (res > 0) return (bps_tree_pos_t)(begin - arr); ++begin; } return (bps_tree_pos_t)(begin - arr); #else while (begin != end) { bps_tree_elem_t *mid = begin + (end - begin) / 2; int res = BPS_TREE_COMPARE(*mid, elem, tree->arg); if (res > 0) { end = mid; } else if (res < 0) { begin = mid + 1; } else { *exact = true; begin = mid + 1; } } return (bps_tree_pos_t)(end - arr); #endif } /** * @brief Get an invalid iterator. See iterator description. * @return - Invalid iterator */ static inline struct bps_tree_iterator bps_tree_invalid_iterator() { struct bps_tree_iterator res; res.block_id = (bps_tree_block_id_t)(-1); res.pos = 0; matras_head_read_view(&res.view); return res; } /** * @brief Check if an iterator is invalid. See iterator * description. * @param itr - iterator to check * @return - true if iterator is invalid, false otherwise */ static inline bool bps_tree_iterator_is_invalid(struct bps_tree_iterator *itr) { return itr->block_id == (bps_tree_block_id_t)(-1); } /** * @brief Check for a validity of an iterator and return pointer * to the leaf. Position is also checked an (-1) is converted to * position to last element. If smth is wrong, iterator is * invalidated and NULL returned. */ static inline struct bps_leaf * bps_tree_get_leaf_safe(const struct bps_tree *tree, struct bps_tree_iterator *itr) { if (itr->block_id == (bps_tree_block_id_t)(-1)) return 0; struct bps_block *block = bps_tree_restore_block_ver(tree, itr->block_id, &itr->view); if (block->type == BPS_TREE_BT_GARBAGE) { struct bps_garbage *garbage = (struct bps_garbage *)block; while (garbage->next_leaf_id != (bps_tree_block_id_t)(-1) && garbage->next_leaf_id != itr->block_id) { block = bps_tree_restore_block_ver(tree, garbage->next_leaf_id, &itr->view); if (block->type == BPS_TREE_BT_LEAF) { itr->block_id = garbage->next_leaf_id; itr->pos = 0; break; } else if (block->type != BPS_TREE_BT_GARBAGE) { break; } garbage = (struct bps_garbage *)block; } } if (block->type != BPS_TREE_BT_LEAF) { itr->block_id = (bps_tree_block_id_t)(-1); return 0; } if (itr->pos == (bps_tree_pos_t)(-1)) { itr->pos = block->size - 1; } else if (itr->pos >= block->size) { struct bps_leaf *leaf = (struct bps_leaf *)block; if (leaf->next_id == (bps_tree_block_id_t)(-1)) { itr->block_id = (bps_tree_block_id_t)(-1); return 0; } itr->block_id = leaf->next_id; itr->pos = 0; block = bps_tree_restore_block_ver(tree, itr->block_id, &itr->view); } return (struct bps_leaf *)block; } /** * @brief Compare two iterators and return true if trey point to * the same element. * Two invalid iterators are equal and point to the same nowhere. * A broken iterator is possibly not equal to any valid or invalid * iterators. * @param tree - pointer to a tree * @param itr1 - first iterator * @param itr2 - second iterator * @return - true if iterators are equal, false otherwise */ static inline bool bps_tree_iterator_are_equal(const struct bps_tree *tree, struct bps_tree_iterator *itr1, struct bps_tree_iterator *itr2) { if (bps_tree_iterator_is_invalid(itr1) && bps_tree_iterator_is_invalid(itr2)) return true; if (bps_tree_iterator_is_invalid(itr1) || bps_tree_iterator_is_invalid(itr2)) return false; if (itr1->block_id == itr2->block_id && itr1->pos == itr2->pos) return true; if (itr1->pos == (bps_tree_pos_t)(-1)) { struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr1); if (!leaf) return false; itr1->pos = leaf->header.size - 1; if (itr1->block_id == itr2->block_id && itr1->pos == itr2->pos) return true; } if (itr2->pos == (bps_tree_pos_t)(-1)) { struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr2); if (!leaf) return false; itr2->pos = leaf->header.size - 1; if (itr1->block_id == itr2->block_id && itr1->pos == itr2->pos) return true; } return false; } /** * @brief Get an iterator to the first element of the tree * @param tree - pointer to a tree * @return - First iterator. Could be invalid if the tree is empty. */ static inline struct bps_tree_iterator bps_tree_iterator_first(const struct bps_tree *tree) { struct bps_tree_iterator itr; itr.block_id = tree->first_id; itr.pos = 0; matras_head_read_view(&itr.view); return itr; } /** * @brief Get an iterator to the last element of the tree. * @param tree - pointer to a tree * @return - Last iterator. Could be invalid if the tree is empty. */ static inline struct bps_tree_iterator bps_tree_iterator_last(const struct bps_tree *tree) { struct bps_tree_iterator itr; itr.block_id = tree->last_id; itr.pos = (bps_tree_pos_t)(-1); matras_head_read_view(&itr.view); return itr; } /** * @brief Get an iterator to the first element that is greater * than or equal to the key. * @param tree - pointer to a tree * @param key - key that will be compared with elements * @param exact - pointer to a bool value, that will be set to true if * and element pointed by the iterator is equal to the key, false otherwise * Pass NULL if you don't need that info. * @return - Lower-bound iterator. Invalid if all elements are less than key. */ static inline struct bps_tree_iterator bps_tree_lower_bound(const struct bps_tree *tree, bps_tree_key_t key, bool *exact) { struct bps_tree_iterator res; matras_head_read_view(&res.view); bool local_result; if (!exact) exact = &local_result; *exact = false; if (tree->root_id == (bps_tree_block_id_t)(-1)) { res.block_id = (bps_tree_block_id_t)(-1); res.pos = 0; return res; } struct bps_block *block = bps_tree_root(tree); bps_tree_block_id_t block_id = tree->root_id; for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { struct bps_inner *inner = (struct bps_inner *)block; bps_tree_pos_t pos; pos = bps_tree_find_ins_point_key(tree, inner->elems, inner->header.size - 1, key, exact); block_id = inner->child_ids[pos]; block = bps_tree_restore_block(tree, block_id); } struct bps_leaf *leaf = (struct bps_leaf *)block; bps_tree_pos_t pos; pos = bps_tree_find_ins_point_key(tree, leaf->elems, leaf->header.size, key, exact); if (pos >= leaf->header.size) { res.block_id = leaf->next_id; res.pos = 0; } else { res.block_id = block_id; res.pos = pos; } return res; } /** * @brief Get an iterator to the first element that is greater than key * @param tree - pointer to a tree * @param key - key that will be compared with elements * @param exact - pointer to a bool value, that will be set to true if * and element pointed by the (!)previous iterator is equal to the key, * false otherwise. Pass NULL if you don't need that info. * @return - Upper-bound iterator. Invalid if all elements are less or equal * than the key. */ static inline struct bps_tree_iterator bps_tree_upper_bound(const struct bps_tree *tree, bps_tree_key_t key, bool *exact) { struct bps_tree_iterator res; matras_head_read_view(&res.view); bool local_result; if (!exact) exact = &local_result; *exact = false; bool exact_test; if (tree->root_id == (bps_tree_block_id_t)(-1)) { res.block_id = (bps_tree_block_id_t)(-1); res.pos = 0; return res; } struct bps_block *block = bps_tree_root(tree); bps_tree_block_id_t block_id = tree->root_id; for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { struct bps_inner *inner = (struct bps_inner *)block; bps_tree_pos_t pos; pos = bps_tree_find_after_ins_point_key(tree, inner->elems, inner->header.size - 1, key, &exact_test); if (exact_test) *exact = true; block_id = inner->child_ids[pos]; block = bps_tree_restore_block(tree, block_id); } struct bps_leaf *leaf = (struct bps_leaf *)block; bps_tree_pos_t pos; pos = bps_tree_find_after_ins_point_key(tree, leaf->elems, leaf->header.size, key, &exact_test); if (exact_test) *exact = true; if (pos >= leaf->header.size) { res.block_id = leaf->next_id; res.pos = 0; } else { res.block_id = block_id; res.pos = pos; } return res; } /** * @brief Get an iterator to the first element that is greater or * equal than given element. * @param tree - pointer to a tree * @param key - the element that will be compared with tree elements * @param exact - pointer to a bool value, that will be set to true if * and element pointed by the iterator is equal to the key, false otherwise * Pass NULL if you don't need that info. * @return - Lower-bound iterator. Invalid if all elements are less than key. */ static inline struct bps_tree_iterator bps_tree_lower_bound_elem(const struct bps_tree *tree, bps_tree_elem_t key, bool *exact) { struct bps_tree_iterator res; matras_head_read_view(&res.view); bool local_result; if (!exact) exact = &local_result; *exact = false; if (tree->root_id == (bps_tree_block_id_t)(-1)) { res.block_id = (bps_tree_block_id_t)(-1); res.pos = 0; return res; } struct bps_block *block = bps_tree_root(tree); bps_tree_block_id_t block_id = tree->root_id; for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { struct bps_inner *inner = (struct bps_inner *)block; bps_tree_pos_t pos; pos = bps_tree_find_ins_point_elem(tree, inner->elems, inner->header.size - 1, key, exact); block_id = inner->child_ids[pos]; block = bps_tree_restore_block(tree, block_id); } struct bps_leaf *leaf = (struct bps_leaf *)block; bps_tree_pos_t pos; pos = bps_tree_find_ins_point_elem(tree, leaf->elems, leaf->header.size, key, exact); if (pos >= leaf->header.size) { res.block_id = leaf->next_id; res.pos = 0; } else { res.block_id = block_id; res.pos = pos; } return res; } /** * @brief Get an iterator to the first element that is greater than given * element. * @param tree - pointer to a tree * @param key - the element that will be compared with tree elements * @param exact - pointer to a bool value, that will be set to true if * and element pointed by the (!)previous iterator is equal to the key, * false otherwise. Pass NULL if you don't need that info. * @return - Upper-bound iterator. Invalid if all elements are less or equal * than the key. */ static inline struct bps_tree_iterator bps_tree_upper_bound_elem(const struct bps_tree *tree, bps_tree_elem_t key, bool *exact) { struct bps_tree_iterator res; matras_head_read_view(&res.view); bool local_result; if (!exact) exact = &local_result; *exact = false; bool exact_test; if (tree->root_id == (bps_tree_block_id_t)(-1)) { res.block_id = (bps_tree_block_id_t)(-1); res.pos = 0; return res; } struct bps_block *block = bps_tree_root(tree); bps_tree_block_id_t block_id = tree->root_id; for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { struct bps_inner *inner = (struct bps_inner *)block; bps_tree_pos_t pos; pos = bps_tree_find_after_ins_point_elem(tree, inner->elems, inner->header.size - 1, key, &exact_test); if (exact_test) *exact = true; block_id = inner->child_ids[pos]; block = bps_tree_restore_block(tree, block_id); } struct bps_leaf *leaf = (struct bps_leaf *)block; bps_tree_pos_t pos; pos = bps_tree_find_after_ins_point_elem(tree, leaf->elems, leaf->header.size, key, &exact_test); if (exact_test) *exact = true; if (pos >= leaf->header.size) { res.block_id = leaf->next_id; res.pos = 0; } else { res.block_id = block_id; res.pos = pos; } return res; } /** * @brief Get approximate number of entries that are equal to given key. * Accuracy limits: * If the result is less than BPS_TREE_name_MAX_COUNT_IN_LEAF * 5 / 6, the * result is precise. If not, let's define: * X = BPS_TREE_name_MAX_COUNT_IN_LEAF * 5 / 6 * Y = BPS_TREE_name_MAX_COUNT_IN_INNER * 5 / 6 * H = ceil(log(Result / X) / log(Y)) * Then the true count is between: * [ Result * pow(0.8 - z, H), Result * pow(1.2 + z, H) ] * where z parameter is a small number due to rounding errors * @param tree - pointer to a tree * @param key - key that will be compared with elements * @return - approximate number of entries that are equal to given key. */ static inline size_t bps_tree_approximate_count(const struct bps_tree *tree, bps_tree_key_t key) { if (tree->root_id == (bps_tree_block_id_t)(-1)) return 0; size_t result = 0; bool exact; struct bps_block *lower_block = bps_tree_root(tree); struct bps_block *upper_block = bps_tree_root(tree); for (bps_tree_block_id_t i = 1; i < tree->depth; i++) { /* average occupancy in B+* block is 5/6 */ result *= BPS_TREE_MAX_COUNT_IN_INNER * 5 / 6; struct bps_inner *lower_inner = (struct bps_inner *)lower_block; bps_tree_pos_t lower_pos = bps_tree_find_ins_point_key(tree, lower_inner->elems, lower_inner->header.size - 1, key, &exact); struct bps_inner *upper_inner = (struct bps_inner *)upper_block; bps_tree_pos_t upper_pos = bps_tree_find_after_ins_point_key(tree, upper_inner->elems, upper_inner->header.size - 1, key, &exact); if (lower_inner == upper_inner) { if (upper_pos > lower_pos) result += upper_pos - lower_pos - 1; } else { result += lower_inner->header.size - 1 - lower_pos; result += upper_pos; } bps_tree_block_id_t lower_block_id = lower_inner->child_ids[lower_pos]; lower_block = bps_tree_restore_block(tree, lower_block_id); bps_tree_block_id_t upper_block_id = upper_inner->child_ids[upper_pos]; upper_block = bps_tree_restore_block(tree, upper_block_id); } /* average occupancy in B+* block is 5/6 */ result *= BPS_TREE_MAX_COUNT_IN_LEAF * 5 / 6; struct bps_leaf *lower_leaf = (struct bps_leaf *)lower_block; bps_tree_pos_t lower_pos = bps_tree_find_ins_point_key(tree, lower_leaf->elems, lower_leaf->header.size, key, &exact); struct bps_leaf *upper_leaf = (struct bps_leaf *)upper_block; bps_tree_pos_t upper_pos = bps_tree_find_after_ins_point_key(tree, upper_leaf->elems, upper_leaf->header.size, key, &exact); if (lower_leaf == upper_leaf) { result += upper_pos - lower_pos; } else { result += lower_leaf->header.size - 1 - lower_pos; result += upper_pos; result++; } return result; } /** * @brief Get a pointer to the element pointed by iterator. * If iterator is detected as broken, it is invalidated and NULL returned. * @param tree - pointer to a tree * @param itr - pointer to tree iterator * @return - Pointer to the element. Null for invalid iterator */ static inline bps_tree_elem_t * bps_tree_iterator_get_elem(const struct bps_tree *tree, struct bps_tree_iterator *itr) { struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr); if (!leaf) return 0; return leaf->elems + itr->pos; } /** * @brief Increments an iterator, makes it point to the next element * If the iterator is to last element, it will be invalidated * If the iterator is detected as broken, it will be invalidated. * If the iterator is invalid, then it will be set to first element. * @param tree - pointer to a tree * @param itr - pointer to tree iterator * @return - true on success, false if a resulted iterator is set to invalid */ static inline bool bps_tree_iterator_next(const struct bps_tree *tree, struct bps_tree_iterator *itr) { if (itr->block_id == (bps_tree_block_id_t)(-1)) { if (matras_is_read_view_created(&itr->view)) return false; itr->block_id = tree->first_id; itr->pos = 0; return itr->block_id != (bps_tree_block_id_t)(-1); } struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr); if (!leaf) return false; itr->pos++; if (itr->pos >= leaf->header.size) { itr->block_id = leaf->next_id; itr->pos = 0; return itr->block_id != (bps_tree_block_id_t)(-1); } return true; } /** * @brief Decrements an iterator, makes it point to the previous element * If the iterator is to first element, it will be invalidated * If the iterator is detected as broken, it will be invalidated. * If the iterator is invalid, then it will be set to last element. * @param tree - pointer to a tree * @param itr - pointer to tree iterator * @return - true on success, false if a resulted iterator is set to invalid */ static inline bool bps_tree_iterator_prev(const struct bps_tree *tree, struct bps_tree_iterator *itr) { if (itr->block_id == (bps_tree_block_id_t)(-1)) { if (matras_is_read_view_created(&itr->view)) return false; itr->block_id = tree->last_id; itr->pos = (bps_tree_pos_t)(-1); return itr->block_id != (bps_tree_block_id_t)(-1); } struct bps_leaf *leaf = bps_tree_get_leaf_safe(tree, itr); if (!leaf) return false; if (itr->pos == 0) { itr->block_id = leaf->prev_id; itr->pos = (bps_tree_pos_t)(-1); return itr->block_id != (bps_tree_block_id_t)(-1); } else { itr->pos--; } return true; } /** * @brief Freezes tree state for given iterator. All following tree modification * will not apply to that iterator iteration. That iterator should be destroyed * with a bps_tree_iterator_destroy call after usage. * @param tree - pointer to a tree * @param itr - pointer to tree iterator */ static inline void bps_tree_iterator_freeze(struct bps_tree *tree, struct bps_tree_iterator *itr) { assert(!matras_is_read_view_created(&itr->view)); matras_create_read_view(&tree->matras, &itr->view); } /** * @brief Destroy an iterator that was frozen before. Useless for not frozen * iterators. * @param tree - pointer to a tree * @param itr - pointer to tree iterator */ static inline void bps_tree_iterator_destroy(struct bps_tree *tree, struct bps_tree_iterator *itr) { matras_destroy_read_view(&tree->matras, &itr->view); } /** * @brief Find the first element that is equal to the key (comparator returns 0) * @param tree - pointer to a tree * @param key - key that will be compared with elements * @return pointer to the first equal element or NULL if not found */ static inline bps_tree_elem_t * bps_tree_find(const struct bps_tree *tree, bps_tree_key_t key) { if (tree->root_id == (bps_tree_block_id_t)(-1)) return 0; struct bps_block *block = bps_tree_root(tree); bool exact = false; for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { struct bps_inner *inner = (struct bps_inner *)block; bps_tree_pos_t pos; pos = bps_tree_find_ins_point_key(tree, inner->elems, inner->header.size - 1, key, &exact); block = bps_tree_restore_block(tree, inner->child_ids[pos]); } struct bps_leaf *leaf = (struct bps_leaf *)block; bps_tree_pos_t pos; pos = bps_tree_find_ins_point_key(tree, leaf->elems, leaf->header.size, key, &exact); if (exact) return leaf->elems + pos; else return 0; } /** * @brief Add a block to the garbage for future reuse */ static inline void bps_tree_garbage_push(struct bps_tree *tree, struct bps_block *block, bps_tree_block_id_t id) { assert(block); (void) block; bps_tree_block_id_t next_leaf_id = (bps_tree_block_id_t)(-1); bps_tree_block_id_t prev_leaf_id = (bps_tree_block_id_t)(-1); if (block->type == BPS_TREE_BT_LEAF) { struct bps_leaf *leaf = (struct bps_leaf *)block; next_leaf_id = leaf->next_id; prev_leaf_id = leaf->prev_id; } struct bps_garbage *garbage = (struct bps_garbage *) bps_tree_touch_block(tree, id); garbage->header.type = BPS_TREE_BT_GARBAGE; garbage->next_id = tree->garbage_head_id; garbage->next_leaf_id = next_leaf_id; garbage->prev_leaf_id = prev_leaf_id; tree->garbage_head_id = id; tree->garbage_count++; } /** * @brief Reclaim a block from the garbage for reuse */ static inline struct bps_block * bps_tree_garbage_pop(struct bps_tree *tree, bps_tree_block_id_t *id) { if (tree->garbage_head_id != (bps_tree_block_id_t)(-1)) { *id = tree->garbage_head_id; struct bps_garbage *result = (struct bps_garbage *) bps_tree_touch_block(tree, tree->garbage_head_id); tree->garbage_head_id = result->next_id; tree->garbage_count--; return (struct bps_block *) result; } else { return 0; } } /** * @brief Reclaim from garbage of create new block and convert it to leaf */ static inline struct bps_leaf * bps_tree_create_leaf(struct bps_tree *tree, bps_tree_block_id_t *id) { struct bps_leaf *res = (struct bps_leaf *) bps_tree_garbage_pop(tree, id); if (!res) res = (struct bps_leaf *)matras_alloc(&tree->matras, id); res->header.type = BPS_TREE_BT_LEAF; tree->leaf_count++; return res; } /** * @brief Reclaim from garbage of create new block and convert it to inner */ static inline struct bps_inner * bps_tree_create_inner(struct bps_tree *tree, bps_tree_block_id_t *id) { struct bps_inner *res = (struct bps_inner *) bps_tree_garbage_pop(tree, id); if (!res) res = (struct bps_inner *)matras_alloc(&tree->matras, id); res->header.type = BPS_TREE_BT_INNER; tree->inner_count++; return res; } /** * @brief Dispose leaf block (to garbage and decrement counter) */ static inline void bps_tree_dispose_leaf(struct bps_tree *tree, struct bps_leaf *leaf, bps_tree_block_id_t id) { tree->leaf_count--; bps_tree_garbage_push(tree, (struct bps_block *)leaf, id); } /** * @brief Dispose inner block (to garbage and decrement counter) */ static inline void bps_tree_dispose_inner(struct bps_tree *tree, struct bps_inner *inner, bps_tree_block_id_t id) { tree->inner_count--; bps_tree_garbage_push(tree, (struct bps_block *)inner, id); } /** * @brief Reserve a number of block, return false if failed. */ static inline bool bps_tree_reserve_blocks(struct bps_tree *tree, bps_tree_block_id_t count) { while (tree->garbage_count < count) { bps_tree_block_id_t id; struct bps_block *block = (struct bps_block *) matras_alloc(&tree->matras, &id); if (!block) return false; block->type = BPS_TREE_BT_GARBAGE; bps_tree_garbage_push(tree, block, id); } return true; } /** * @brief Insert first element to and empty tree. */ static inline int bps_tree_insert_first_elem(struct bps_tree *tree, bps_tree_elem_t new_elem) { assert(tree->depth == 0); assert(tree->size == 0); assert(tree->leaf_count == 0); tree->max_elem = new_elem; struct bps_leaf *leaf = bps_tree_create_leaf(tree, &tree->root_id); if (!leaf) return -1; leaf->header.size = 1; leaf->elems[0] = new_elem; tree->first_id = tree->root_id; tree->last_id = tree->root_id; leaf->prev_id = (bps_tree_block_id_t)(-1); leaf->next_id = (bps_tree_block_id_t)(-1); tree->depth = 1; tree->size = 1; return 0; } /** * @brief Collect path to an element or to the place where it can be inserted */ static inline void bps_tree_collect_path(struct bps_tree *tree, bps_tree_elem_t new_elem, struct bps_inner_path_elem *path, struct bps_leaf_path_elem *leaf_path_elem, bool *exact) { *exact = false; struct bps_inner_path_elem *prev_ext = 0; bps_tree_pos_t prev_pos = 0; struct bps_block *block = bps_tree_root(tree); bps_tree_block_id_t block_id = tree->root_id; bps_tree_elem_t *max_elem_copy = &tree->max_elem; bps_tree_block_id_t max_elem_block_id = (bps_tree_block_id_t)-1; bps_tree_pos_t max_elem_pos = (bps_tree_pos_t)-1; for (bps_tree_block_id_t i = 0; i < tree->depth - 1; i++) { struct bps_inner *inner = (struct bps_inner *)block; bps_tree_pos_t pos; if (*exact) pos = inner->header.size - 1; else pos = bps_tree_find_ins_point_elem(tree, inner->elems, inner->header.size - 1, new_elem, exact); path[i].block = inner; path[i].block_id = block_id; path[i].insertion_point = pos; path[i].pos_in_parent = prev_pos; path[i].parent = prev_ext; path[i].max_elem_copy = max_elem_copy; path[i].max_elem_block_id = max_elem_block_id; path[i].max_elem_pos = max_elem_pos; if (pos < inner->header.size - 1) { max_elem_copy = inner->elems + pos; max_elem_block_id = block_id; max_elem_pos = pos; } block_id = inner->child_ids[pos]; block = bps_tree_restore_block(tree, block_id); prev_pos = pos; prev_ext = path + i; } struct bps_leaf *leaf = (struct bps_leaf *)block; bps_tree_pos_t pos; if (*exact) pos = leaf->header.size - 1; else pos = bps_tree_find_ins_point_elem(tree, leaf->elems, leaf->header.size, new_elem, exact); leaf_path_elem->block = leaf; leaf_path_elem->block_id = block_id; leaf_path_elem->insertion_point = pos; leaf_path_elem->pos_in_parent = prev_pos; leaf_path_elem->parent = prev_ext; leaf_path_elem->max_elem_copy = max_elem_copy; leaf_path_elem->max_elem_block_id = max_elem_block_id; leaf_path_elem->max_elem_pos = max_elem_pos; } /** * @brief Get new COW link to max_elem_copy member of leaf path */ static inline void bps_tree_touch_leaf_path_max_elem(struct bps_tree *tree, struct bps_leaf_path_elem *leaf_path_elem) { if (leaf_path_elem->max_elem_block_id == (bps_tree_block_id_t)-1) return; struct bps_inner *holder = (struct bps_inner *) bps_tree_touch_block(tree, leaf_path_elem->max_elem_block_id); leaf_path_elem->max_elem_copy = holder->elems + leaf_path_elem->max_elem_pos; } /** * @brief Get new COW link to blocks and max_elem_copys */ static inline void bps_tree_touch_path(struct bps_tree *tree, struct bps_leaf_path_elem *leaf_path_elem) { bps_tree_touch_leaf_path_max_elem(tree, leaf_path_elem); for (struct bps_inner_path_elem *path = leaf_path_elem->parent; path; path = path->parent) { path->block = (struct bps_inner *) bps_tree_touch_block(tree, path->block_id); if (path->max_elem_block_id == (bps_tree_block_id_t)-1) continue; struct bps_inner *holder = (struct bps_inner *) bps_tree_touch_block(tree, path->max_elem_block_id); path->max_elem_copy = holder->elems + path->max_elem_pos; } } /** * @brief Replace element by it's path and fill the *replaced argument */ static inline bool bps_tree_process_replace(struct bps_tree *tree, struct bps_leaf_path_elem *leaf_path_elem, bps_tree_elem_t new_elem, bps_tree_elem_t *replaced) { leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, leaf_path_elem->block_id); struct bps_leaf *leaf = leaf_path_elem->block; assert(leaf_path_elem->insertion_point < leaf->header.size); if (replaced) *replaced = leaf->elems[leaf_path_elem->insertion_point]; leaf->elems[leaf_path_elem->insertion_point] = new_elem; if (leaf_path_elem->insertion_point == leaf->header.size - 1) { bps_tree_touch_leaf_path_max_elem(tree, leaf_path_elem); *leaf_path_elem->max_elem_copy = leaf->elems[leaf->header.size - 1]; } return true; } #ifndef NDEBUG /** * @brief Debug memmove, checks for overflow */ static inline void bps_tree_debug_memmove(void *dst_arg, void *src_arg, size_t num, void *dst_block_arg, void *src_block_arg) { char *dst = (char *)dst_arg; char *src = (char *)src_arg; struct bps_block *dst_block = (struct bps_block *)dst_block_arg; struct bps_block *src_block = (struct bps_block *)src_block_arg; (void) dst_block; (void) src_block; assert(dst_block->type == src_block->type); assert(dst_block->type == BPS_TREE_BT_LEAF || dst_block->type == BPS_TREE_BT_INNER); if (dst_block->type == BPS_TREE_BT_LEAF) { struct bps_leaf *dst_leaf = (struct bps_leaf *)dst_block_arg; struct bps_leaf *src_leaf = (struct bps_leaf *)src_block_arg; (void) dst_leaf; (void) src_leaf; if (num) { assert(dst >= ((char *)dst_leaf->elems)); assert(dst < ((char *)dst_leaf->elems) + BPS_TREE_MAX_COUNT_IN_LEAF * sizeof(bps_tree_elem_t)); assert(src >= (char *)src_leaf->elems); assert(src < ((char *)src_leaf->elems) + BPS_TREE_MAX_COUNT_IN_LEAF * sizeof(bps_tree_elem_t)); } else { assert(dst >= ((char *)dst_leaf->elems)); assert(dst <= ((char *)dst_leaf->elems) + BPS_TREE_MAX_COUNT_IN_LEAF * sizeof(bps_tree_elem_t)); assert(src >= (char *)src_leaf->elems); assert(src <= ((char *)src_leaf->elems) + BPS_TREE_MAX_COUNT_IN_LEAF * sizeof(bps_tree_elem_t)); } } else { struct bps_inner *dst_inner = (struct bps_inner *) dst_block_arg; struct bps_inner *src_inner = (struct bps_inner *) src_block_arg; if (num) { if (dst >= ((char *)dst_inner->elems) && dst < ((char *)dst_inner->elems) + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * sizeof(bps_tree_elem_t)) { assert(dst >= ((char *)dst_inner->elems)); assert(dst < ((char *)dst_inner->elems) + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * sizeof(bps_tree_elem_t)); assert(src >= (char *)src_inner->elems); assert(src < ((char *)src_inner->elems) + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * sizeof(bps_tree_elem_t)); } else { assert(dst >= ((char *)dst_inner->child_ids)); assert(dst < ((char *)dst_inner->child_ids) + BPS_TREE_MAX_COUNT_IN_INNER * sizeof(bps_tree_block_id_t)); assert(src >= (char *)src_inner->child_ids); assert(src < ((char *)src_inner->child_ids) + BPS_TREE_MAX_COUNT_IN_INNER * sizeof(bps_tree_block_id_t)); } } else { if (dst >= ((char *)dst_inner->elems) && dst <= ((char *)dst_inner->elems) + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * sizeof(bps_tree_elem_t) && src >= (char *)src_inner->elems && src <= ((char *)src_inner->elems) + (BPS_TREE_MAX_COUNT_IN_INNER - 1) * sizeof(bps_tree_elem_t)) { /* nothing to do due to if condition */ } else { assert(dst >= ((char *)dst_inner->child_ids)); assert(dst <= ((char *)dst_inner->child_ids) + BPS_TREE_MAX_COUNT_IN_INNER * sizeof(bps_tree_block_id_t)); assert(src >= (char *)src_inner->child_ids); assert(src <= ((char *)src_inner->child_ids) + BPS_TREE_MAX_COUNT_IN_INNER * sizeof(bps_tree_block_id_t)); } } } /* oh, useful work at last */ memmove(dst, src, num); } #endif /** * @breif Insert an element into leaf block. There must be enough space. */ static inline void bps_tree_insert_into_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *leaf_path_elem, bps_tree_elem_t new_elem) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, leaf_path_elem->block_id); struct bps_leaf *leaf = leaf_path_elem->block; bps_tree_pos_t pos = leaf_path_elem->insertion_point; assert(pos >= 0); assert(pos <= leaf->header.size); assert(leaf->header.size < BPS_TREE_MAX_COUNT_IN_LEAF); BPS_TREE_DATAMOVE(leaf->elems + pos + 1, leaf->elems + pos, leaf->header.size - pos, leaf, leaf); leaf->elems[pos] = new_elem; if (pos == leaf->header.size) { bps_tree_touch_leaf_path_max_elem(tree, leaf_path_elem); *leaf_path_elem->max_elem_copy = leaf->elems[leaf->header.size]; } leaf->header.size++; tree->size++; } /** * @breif Insert a child into inner block. There must be enough space. */ static inline void bps_tree_insert_into_inner(struct bps_tree *tree, struct bps_inner_path_elem *inner_path_elem, bps_tree_block_id_t block_id, bps_tree_pos_t pos, bps_tree_elem_t max_elem) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, inner_path_elem->block_id); struct bps_inner *inner = inner_path_elem->block; assert(pos >= 0); assert(pos <= inner->header.size); assert(inner->header.size < BPS_TREE_MAX_COUNT_IN_INNER); if (pos < inner->header.size) { BPS_TREE_DATAMOVE(inner->elems + pos + 1, inner->elems + pos, inner->header.size - pos - 1, inner, inner); inner->elems[pos] = max_elem; BPS_TREE_DATAMOVE(inner->child_ids + pos + 1, inner->child_ids + pos, inner->header.size - pos, inner, inner); } else { if (pos > 0) inner->elems[pos - 1] = *inner_path_elem->max_elem_copy; *inner_path_elem->max_elem_copy = max_elem; } inner->child_ids[pos] = block_id; inner->header.size++; } /** * @breif Delete element from leaf block. */ static inline void bps_tree_delete_from_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *leaf_path_elem) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, leaf_path_elem->block_id); struct bps_leaf *leaf = leaf_path_elem->block; bps_tree_pos_t pos = leaf_path_elem->insertion_point; assert(pos >= 0); assert(pos < leaf->header.size); BPS_TREE_DATAMOVE(leaf->elems + pos, leaf->elems + pos + 1, leaf->header.size - 1 - pos, leaf, leaf); leaf->header.size--; if (leaf->header.size > 0 && pos == leaf->header.size) { bps_tree_touch_leaf_path_max_elem(tree, leaf_path_elem); *leaf_path_elem->max_elem_copy = leaf->elems[leaf->header.size - 1]; } tree->size--; } /** * @breif Delete a child from inner block. */ static inline void bps_tree_delete_from_inner(struct bps_tree *tree, struct bps_inner_path_elem *inner_path_elem) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, inner_path_elem->block_id); struct bps_inner *inner = inner_path_elem->block; bps_tree_pos_t pos = inner_path_elem->insertion_point; assert(pos >= 0); assert(pos < inner->header.size); if (pos < inner->header.size - 1) { BPS_TREE_DATAMOVE(inner->elems + pos, inner->elems + pos + 1, inner->header.size - 2 - pos, inner, inner); BPS_TREE_DATAMOVE(inner->child_ids + pos, inner->child_ids + pos + 1, inner->header.size - 1 - pos, inner, inner); } else if (pos > 0) { *inner_path_elem->max_elem_copy = inner->elems[pos - 1]; } inner->header.size--; } /** * @breif Move a number of elements from left leaf to right leaf */ static inline void bps_tree_move_elems_to_right_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *a_leaf_path_elem, struct bps_leaf_path_elem *b_leaf_path_elem, bps_tree_pos_t num) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) { a_leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, a_leaf_path_elem->block_id); b_leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, b_leaf_path_elem->block_id); } struct bps_leaf *a = a_leaf_path_elem->block; struct bps_leaf *b = b_leaf_path_elem->block; bool move_all = a->header.size == num; assert(num > 0); assert(a->header.size >= num); assert(b->header.size + num <= BPS_TREE_MAX_COUNT_IN_LEAF); BPS_TREE_DATAMOVE(b->elems + num, b->elems, b->header.size, b, b); BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, num, b, a); a->header.size -= num; b->header.size += num; if (!move_all) *a_leaf_path_elem->max_elem_copy = a->elems[a->header.size - 1]; *b_leaf_path_elem->max_elem_copy = b->elems[b->header.size - 1]; } /** * @breif Move a number of children from left inner to right inner block */ static inline void bps_tree_move_elems_to_right_inner(struct bps_tree *tree, struct bps_inner_path_elem *a_inner_path_elem, struct bps_inner_path_elem *b_inner_path_elem, bps_tree_pos_t num) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) { a_inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, a_inner_path_elem->block_id); b_inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, b_inner_path_elem->block_id); } struct bps_inner *a = a_inner_path_elem->block; struct bps_inner *b = b_inner_path_elem->block; bool move_to_empty = b->header.size == 0; bool move_all = a->header.size == num; assert(num > 0); assert(a->header.size >= num); assert(b->header.size + num <= BPS_TREE_MAX_COUNT_IN_INNER); BPS_TREE_DATAMOVE(b->child_ids + num, b->child_ids, b->header.size, b, b); BPS_TREE_DATAMOVE(b->child_ids, a->child_ids + a->header.size - num, num, b, a); if (!move_to_empty) BPS_TREE_DATAMOVE(b->elems + num, b->elems, b->header.size - 1, b, b); BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, num - 1, b, a); if (move_to_empty) *b_inner_path_elem->max_elem_copy = *a_inner_path_elem->max_elem_copy; else b->elems[num - 1] = *a_inner_path_elem->max_elem_copy; if (!move_all) *a_inner_path_elem->max_elem_copy = a->elems[a->header.size - num - 1]; a->header.size -= num; b->header.size += num; } /** * @breif Move a number of elements from right leaf to left leaf */ static inline void bps_tree_move_elems_to_left_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *a_leaf_path_elem, struct bps_leaf_path_elem *b_leaf_path_elem, bps_tree_pos_t num) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) { a_leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, a_leaf_path_elem->block_id); b_leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, b_leaf_path_elem->block_id); } struct bps_leaf *a = a_leaf_path_elem->block; struct bps_leaf *b = b_leaf_path_elem->block; assert(num > 0); assert(b->header.size >= num); assert(a->header.size + num <= BPS_TREE_MAX_COUNT_IN_LEAF); BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, num, a, b); BPS_TREE_DATAMOVE(b->elems, b->elems + num, b->header.size - num, b, b); a->header.size += num; b->header.size -= num; *a_leaf_path_elem->max_elem_copy = a->elems[a->header.size - 1]; } /** * @breif Move a number of children from right inner to left inner block */ static inline void bps_tree_move_elems_to_left_inner(struct bps_tree *tree, struct bps_inner_path_elem *a_inner_path_elem, struct bps_inner_path_elem *b_inner_path_elem, bps_tree_pos_t num) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) { a_inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, a_inner_path_elem->block_id); b_inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, b_inner_path_elem->block_id); } struct bps_inner *a = a_inner_path_elem->block; struct bps_inner *b = b_inner_path_elem->block; bool move_to_empty = a->header.size == 0; bool move_all = b->header.size == num; assert(num > 0); assert(b->header.size >= num); assert(a->header.size + num <= BPS_TREE_MAX_COUNT_IN_INNER); BPS_TREE_DATAMOVE(a->child_ids + a->header.size, b->child_ids, num, a, b); BPS_TREE_DATAMOVE(b->child_ids, b->child_ids + num, b->header.size - num, b, b); if (!move_to_empty) a->elems[a->header.size - 1] = *a_inner_path_elem->max_elem_copy; BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, num - 1, a, b); if (move_all) { *a_inner_path_elem->max_elem_copy = *b_inner_path_elem->max_elem_copy; } else { *a_inner_path_elem->max_elem_copy = b->elems[num - 1]; BPS_TREE_DATAMOVE(b->elems, b->elems + num, b->header.size - num - 1, b, b); } a->header.size += num; b->header.size -= num; } /** * @breif Insert into leaf and move a number of elements to the right * Works like if bps_tree_insert_into_leaf and * bps_tree_move_elems_to_right_leaf was consequentially called, * BUT(!) insertion is allowed into full block, so one can consider * insertion as a virtual insertion into virtual block of greater maximum size */ static inline struct bps_leaf_path_elem * bps_tree_insert_and_move_elems_to_right_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *a_leaf_path_elem, struct bps_leaf_path_elem *b_leaf_path_elem, bps_tree_pos_t num, bps_tree_elem_t new_elem) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) { a_leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, a_leaf_path_elem->block_id); b_leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, b_leaf_path_elem->block_id); } struct bps_leaf *a = a_leaf_path_elem->block; struct bps_leaf *b = b_leaf_path_elem->block; bps_tree_pos_t pos = a_leaf_path_elem->insertion_point; bool move_to_empty = b->header.size == 0; bool move_all = a->header.size == num - 1; struct bps_leaf_path_elem *ret; assert(num > 0); assert(a->header.size >= num - 1); assert(b->header.size + num <= BPS_TREE_MAX_COUNT_IN_LEAF); assert(pos <= a->header.size); assert(pos >= 0); BPS_TREE_DATAMOVE(b->elems + num, b->elems, b->header.size, b, b); bps_tree_pos_t mid_part_size = a->header.size - pos; if (mid_part_size >= num) { /* In fact insert to 'a' block */ BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, num, b, a); BPS_TREE_DATAMOVE(a->elems + pos + 1, a->elems + pos, mid_part_size - num, a, a); a->elems[pos] = new_elem; ret = a_leaf_path_elem; } else { /* In fact insert to 'b' block */ bps_tree_pos_t new_pos = num - mid_part_size - 1;/* Can be 0 */ BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num + 1, new_pos, b, a); b->elems[new_pos] = new_elem; BPS_TREE_DATAMOVE(b->elems + new_pos + 1, a->elems + pos, mid_part_size, b, a); ret = b_leaf_path_elem; ret->insertion_point = new_pos; } a->header.size -= (num - 1); b->header.size += num; if (!move_all) *a_leaf_path_elem->max_elem_copy = a->elems[a->header.size - 1]; if (move_to_empty) *b_leaf_path_elem->max_elem_copy = b->elems[b->header.size - 1]; tree->size++; return ret; } /** * @breif Insert into inner and move a number of children to the right * Works like if bps_tree_insert_into_inner and * bps_tree_move_elems_to_right_inner was consequentially called, * BUT(!) insertion is allowed into full block, so one can consider * insertion as a virtual insertion into virtual block of greater maximum size */ static inline void bps_tree_insert_and_move_elems_to_right_inner(struct bps_tree *tree, struct bps_inner_path_elem *a_inner_path_elem, struct bps_inner_path_elem *b_inner_path_elem, bps_tree_pos_t num, bps_tree_block_id_t block_id, bps_tree_pos_t pos, bps_tree_elem_t max_elem) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) { a_inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, a_inner_path_elem->block_id); b_inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, b_inner_path_elem->block_id); } struct bps_inner *a = a_inner_path_elem->block; struct bps_inner *b = b_inner_path_elem->block; bool move_to_empty = b->header.size == 0; bool move_all = a->header.size == num - 1; assert(num > 0); assert(a->header.size >= num - 1); assert(b->header.size + num <= BPS_TREE_MAX_COUNT_IN_INNER); assert(pos <= a->header.size); assert(pos >= 0); if (!move_to_empty) { BPS_TREE_DATAMOVE(b->child_ids + num, b->child_ids, b->header.size, b, b); BPS_TREE_DATAMOVE(b->elems + num, b->elems, b->header.size - 1, b, b); } bps_tree_pos_t mid_part_size = a->header.size - pos; if (mid_part_size > num) { /* In fact insert to 'a' block, to the internal position */ BPS_TREE_DATAMOVE(b->child_ids, a->child_ids + a->header.size - num, num, b, a); BPS_TREE_DATAMOVE(a->child_ids + pos + 1, a->child_ids + pos, mid_part_size - num, a, a); a->child_ids[pos] = block_id; BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, num - 1, b, a); if (move_to_empty) *b_inner_path_elem->max_elem_copy = *a_inner_path_elem->max_elem_copy; else b->elems[num - 1] = *a_inner_path_elem->max_elem_copy; *a_inner_path_elem->max_elem_copy = a->elems[a->header.size - num - 1]; BPS_TREE_DATAMOVE(a->elems + pos + 1, a->elems + pos, mid_part_size - num - 1, a, a); a->elems[pos] = max_elem; } else if (mid_part_size == num) { /* In fact insert to 'a' block, to the last position */ BPS_TREE_DATAMOVE(b->child_ids, a->child_ids + a->header.size - num, num, b, a); BPS_TREE_DATAMOVE(a->child_ids + pos + 1, a->child_ids + pos, mid_part_size - num, a, a); a->child_ids[pos] = block_id; BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num, num - 1, b, a); if (move_to_empty) *b_inner_path_elem->max_elem_copy = *a_inner_path_elem->max_elem_copy; else b->elems[num - 1] = *a_inner_path_elem->max_elem_copy; *a_inner_path_elem->max_elem_copy = max_elem; } else { /* In fact insert to 'b' block */ bps_tree_pos_t new_pos = num - mid_part_size - 1;/* Can be 0 */ BPS_TREE_DATAMOVE(b->child_ids, a->child_ids + a->header.size - num + 1, new_pos, b, a); b->child_ids[new_pos] = block_id; BPS_TREE_DATAMOVE(b->child_ids + new_pos + 1, a->child_ids + pos, mid_part_size, b, a); if (pos == a->header.size) { /* +1 */ if (move_to_empty) *b_inner_path_elem->max_elem_copy = max_elem; else b->elems[num - 1] = max_elem; if (num > 1) { /* +(num - 2) */ BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num + 1, num - 2, b, a); /* +1 */ b->elems[num - 2] = *a_inner_path_elem->max_elem_copy; if (!move_all) *a_inner_path_elem->max_elem_copy = a->elems[a->header.size - num]; } } else { assert(num > 1); BPS_TREE_DATAMOVE(b->elems, a->elems + a->header.size - num + 1, num - mid_part_size - 1, b, a); b->elems[new_pos] = max_elem; BPS_TREE_DATAMOVE(b->elems + new_pos + 1, a->elems + pos, mid_part_size - 1, b, a); if (move_to_empty) *b_inner_path_elem->max_elem_copy = *a_inner_path_elem->max_elem_copy; else b->elems[num - 1] = *a_inner_path_elem->max_elem_copy; if (!move_all) *a_inner_path_elem->max_elem_copy = a->elems[a->header.size - num]; } } a->header.size -= (num - 1); b->header.size += num; } /** * @breif Insert into leaf and move a number of elements to the left * Works like if bps_tree_insert_into_leaf and * bps_tree_move_elems_to_right_left was consequentially called, * BUT(!) insertion is allowed into full block, so one can consider * insertion as a virtual insertion into virtual block of greater maximum size */ static inline struct bps_leaf_path_elem * bps_tree_insert_and_move_elems_to_left_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *a_leaf_path_elem, struct bps_leaf_path_elem *b_leaf_path_elem, bps_tree_pos_t num, bps_tree_elem_t new_elem) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) { a_leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, a_leaf_path_elem->block_id); b_leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, b_leaf_path_elem->block_id); } struct bps_leaf *a = a_leaf_path_elem->block; struct bps_leaf *b = b_leaf_path_elem->block; bps_tree_pos_t pos = b_leaf_path_elem->insertion_point; bool move_all = b->header.size == num - 1; struct bps_leaf_path_elem *ret; assert(num > 0); assert(b->header.size >= num - 1); assert(a->header.size + num <= BPS_TREE_MAX_COUNT_IN_LEAF); assert(pos >= 0); assert(pos <= b->header.size); if (pos >= num) { /* In fact insert to 'b' block */ bps_tree_pos_t new_pos = pos - num; /* Can be 0 */ BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, num, a, b); BPS_TREE_DATAMOVE(b->elems, b->elems + num, new_pos, b, b); b->elems[new_pos] = new_elem; BPS_TREE_DATAMOVE(b->elems + new_pos + 1, b->elems + pos, b->header.size - pos, b, b); ret = b_leaf_path_elem; ret->insertion_point = new_pos; } else { /* In fact insert to 'a' block */ bps_tree_pos_t new_pos = a->header.size + pos; /* Can be 0 */ BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, pos, a, b); a->elems[new_pos] = new_elem; BPS_TREE_DATAMOVE(a->elems + new_pos + 1, b->elems + pos, num - 1 - pos, a, b); if (!move_all) BPS_TREE_DATAMOVE(b->elems, b->elems + num - 1, b->header.size - num + 1, b, b); ret = a_leaf_path_elem; ret->insertion_point = new_pos; } a->header.size += num; b->header.size -= (num - 1); *a_leaf_path_elem->max_elem_copy = a->elems[a->header.size - 1]; if (!move_all) *b_leaf_path_elem->max_elem_copy = b->elems[b->header.size - 1]; tree->size++; return ret; } /** * @breif Insert into inner and move a number of children to the left * Works like if bps_tree_insert_into_inner and * bps_tree_move_elems_to_right_inner was consequentially called, * BUT(!) insertion is allowed into full block, so one can consider * insertion as a virtual insertion into virtual block of greater maximum size */ static inline void bps_tree_insert_and_move_elems_to_left_inner(struct bps_tree *tree, struct bps_inner_path_elem *a_inner_path_elem, struct bps_inner_path_elem *b_inner_path_elem, bps_tree_pos_t num, bps_tree_block_id_t block_id, bps_tree_pos_t pos, bps_tree_elem_t max_elem) { /* exclusive behaviuor for debug checks */ if (tree->root_id != (bps_tree_block_id_t) -1) { a_inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, a_inner_path_elem->block_id); b_inner_path_elem->block = (struct bps_inner *) bps_tree_touch_block(tree, b_inner_path_elem->block_id); } struct bps_inner *a = a_inner_path_elem->block; struct bps_inner *b = b_inner_path_elem->block; bool move_to_empty = a->header.size == 0; bool move_all = b->header.size == num - 1; assert(num > 0); assert(b->header.size >= num - 1); assert(a->header.size + num <= BPS_TREE_MAX_COUNT_IN_INNER); assert(pos >= 0); assert(pos <= b->header.size); if (pos >= num) { /* In fact insert to 'b' block */ bps_tree_pos_t new_pos = pos - num; /* Can be 0 */ BPS_TREE_DATAMOVE(a->child_ids + a->header.size, b->child_ids, num, a, b); BPS_TREE_DATAMOVE(b->child_ids, b->child_ids + num, new_pos, b, b); b->child_ids[new_pos] = block_id; BPS_TREE_DATAMOVE(b->child_ids + new_pos + 1, b->child_ids + pos, b->header.size - pos, b, b); if (!move_to_empty) a->elems[a->header.size - 1] = *a_inner_path_elem->max_elem_copy; BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, num - 1, a, b); if (num < b->header.size) *a_inner_path_elem->max_elem_copy = b->elems[num - 1]; else *a_inner_path_elem->max_elem_copy = *b_inner_path_elem->max_elem_copy; if (pos == b->header.size) { /* arrow is righter than star */ if (num < b->header.size) { BPS_TREE_DATAMOVE(b->elems, b->elems + num, b->header.size - num - 1, b, b); b->elems[b->header.size - num - 1] = *b_inner_path_elem->max_elem_copy; } *b_inner_path_elem->max_elem_copy = max_elem; } else { /* star is righter than arrow */ BPS_TREE_DATAMOVE(b->elems, b->elems + num, new_pos, b, b); b->elems[new_pos] = max_elem; BPS_TREE_DATAMOVE(b->elems + new_pos + 1, b->elems + pos, b->header.size - pos - 1, b, b); } } else { /* In fact insert to 'a' block */ bps_tree_pos_t new_pos = a->header.size + pos; /* Can be 0 */ BPS_TREE_DATAMOVE(a->child_ids + a->header.size, b->child_ids, pos, a, b); a->child_ids[new_pos] = block_id; BPS_TREE_DATAMOVE(a->child_ids + new_pos + 1, b->child_ids + pos, num - 1 - pos, a, b); if (!move_all) BPS_TREE_DATAMOVE(b->child_ids, b->child_ids + num - 1, b->header.size - num + 1, b, b); if (!move_to_empty) a->elems[a->header.size - 1] = *a_inner_path_elem->max_elem_copy; if (!move_all) { BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, pos, a, b); } else { if (pos == b->header.size) { if (pos > 0) { /* why? */ BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, pos - 1, a, b); a->elems[new_pos - 1] = *b_inner_path_elem->max_elem_copy; } } else { BPS_TREE_DATAMOVE(a->elems + a->header.size, b->elems, pos, a, b); } } if (new_pos == a->header.size + num - 1) { *a_inner_path_elem->max_elem_copy = max_elem; } else { a->elems[new_pos] = max_elem; BPS_TREE_DATAMOVE(a->elems + new_pos + 1, b->elems + pos, num - 1 - pos - 1, a, b); if (move_all) *a_inner_path_elem->max_elem_copy = *b_inner_path_elem->max_elem_copy; else *a_inner_path_elem->max_elem_copy = b->elems[num - 2]; } if (!move_all) BPS_TREE_DATAMOVE(b->elems, b->elems + num - 1, b->header.size - num, b, b); } a->header.size += num; b->header.size -= (num - 1); } /** * @brieaf Difference between maximum possible and current size of the leaf */ static inline bps_tree_pos_t bps_tree_leaf_free_size(struct bps_leaf *leaf) { return BPS_TREE_MAX_COUNT_IN_LEAF - leaf->header.size; } /** * @brieaf Difference between maximum possible and current size of the inner */ static inline bps_tree_pos_t bps_tree_inner_free_size(struct bps_inner *inner) { return BPS_TREE_MAX_COUNT_IN_INNER - inner->header.size; } /** * @brieaf Difference between current size of the leaf and minumum allowed */ static inline bps_tree_pos_t bps_tree_leaf_overmin_size(struct bps_leaf *leaf) { return leaf->header.size - BPS_TREE_MAX_COUNT_IN_LEAF * 2 / 3; } /** * @brieaf Difference between current size of the inner and minumum allowed */ static inline bps_tree_pos_t bps_tree_inner_overmin_size(struct bps_inner *inner) { return inner->header.size - BPS_TREE_MAX_COUNT_IN_INNER * 2 / 3; } /** * @brief Fill path element structure of the left leaf */ static inline bool bps_tree_collect_left_path_elem_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *path_elem, struct bps_leaf_path_elem *new_path_elem) { struct bps_inner_path_elem *parent = path_elem->parent; if (!parent) return false; if (path_elem->pos_in_parent == 0) return false; new_path_elem->parent = path_elem->parent; new_path_elem->pos_in_parent = path_elem->pos_in_parent - 1; new_path_elem->block_id = parent->block->child_ids[new_path_elem->pos_in_parent]; new_path_elem->block = (struct bps_leaf *) bps_tree_restore_block(tree, new_path_elem->block_id); new_path_elem->max_elem_copy = parent->block->elems + new_path_elem->pos_in_parent; new_path_elem->insertion_point = (bps_tree_pos_t)(-1); /* unused */ return true; } /** * @brief Fill path element structure of the left inner * almost exact copy of collect_tree_left_ext_leaf */ static inline bool bps_tree_collect_left_path_elem_inner(struct bps_tree *tree, struct bps_inner_path_elem *path_elem, struct bps_inner_path_elem *new_path_elem) { struct bps_inner_path_elem * parent = path_elem->parent; if (!parent) return false; if (path_elem->pos_in_parent == 0) return false; new_path_elem->parent = path_elem->parent; new_path_elem->pos_in_parent = path_elem->pos_in_parent - 1; new_path_elem->block_id = parent->block->child_ids[new_path_elem->pos_in_parent]; new_path_elem->block = (struct bps_inner *) bps_tree_restore_block(tree, new_path_elem->block_id); new_path_elem->max_elem_copy = parent->block->elems + new_path_elem->pos_in_parent; new_path_elem->insertion_point = (bps_tree_pos_t)(-1); /* unused */ return true; } /** * @brief Fill path element structure of the right leaf */ static inline bool bps_tree_collect_right_ext_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *path_elem, struct bps_leaf_path_elem *new_path_elem) { struct bps_inner_path_elem *parent = path_elem->parent; if (!parent) return false; if (path_elem->pos_in_parent >= parent->block->header.size - 1) return false; new_path_elem->parent = path_elem->parent; new_path_elem->pos_in_parent = path_elem->pos_in_parent + 1; new_path_elem->block_id = parent->block->child_ids[new_path_elem->pos_in_parent]; new_path_elem->block = (struct bps_leaf *) bps_tree_restore_block(tree, new_path_elem->block_id); if (new_path_elem->pos_in_parent >= parent->block->header.size - 1) new_path_elem->max_elem_copy = parent->max_elem_copy; else new_path_elem->max_elem_copy = parent->block->elems + new_path_elem->pos_in_parent; new_path_elem->insertion_point = (bps_tree_pos_t)(-1); /* unused */ return true; } /** * @brief Fill path element structure of the right inner * almost exact copy of bps_tree_collect_right_ext_leaf */ static inline bool bps_tree_collect_right_ext_inner(struct bps_tree *tree, struct bps_inner_path_elem *path_elem, struct bps_inner_path_elem *new_path_elem) { struct bps_inner_path_elem *parent = path_elem->parent; if (!parent) return false; if (path_elem->pos_in_parent >= parent->block->header.size - 1) return false; new_path_elem->parent = path_elem->parent; new_path_elem->pos_in_parent = path_elem->pos_in_parent + 1; new_path_elem->block_id = parent->block->child_ids[new_path_elem->pos_in_parent]; new_path_elem->block = (struct bps_inner *) bps_tree_restore_block(tree, new_path_elem->block_id); if (new_path_elem->pos_in_parent >= parent->block->header.size - 1) new_path_elem->max_elem_copy = parent->max_elem_copy; else new_path_elem->max_elem_copy = parent->block->elems + new_path_elem->pos_in_parent; new_path_elem->insertion_point = (bps_tree_pos_t)(-1); /* unused */ return true; } /** * @brief Fill path element structure of the new leaf */ static inline void bps_tree_prepare_new_ext_leaf(struct bps_leaf_path_elem *path_elem, struct bps_leaf_path_elem *new_path_elem, struct bps_leaf* new_leaf, bps_tree_block_id_t new_leaf_id, bps_tree_elem_t *max_elem_copy) { new_path_elem->parent = path_elem->parent; new_path_elem->pos_in_parent = path_elem->pos_in_parent + 1; new_path_elem->block_id = new_leaf_id; new_path_elem->block = new_leaf; new_path_elem->max_elem_copy = max_elem_copy; new_path_elem->insertion_point = (bps_tree_pos_t)(-1); /* unused */ } /** * @brief Fill path element structure of the new inner */ static inline void bps_tree_prepare_new_ext_inner(struct bps_inner_path_elem *path_elem, struct bps_inner_path_elem *new_path_elem, struct bps_inner* new_inner, bps_tree_block_id_t new_inner_id, bps_tree_elem_t *max_elem_copy) { new_path_elem->parent = path_elem->parent; new_path_elem->pos_in_parent = path_elem->pos_in_parent + 1; new_path_elem->block_id = new_inner_id; new_path_elem->block = new_inner; new_path_elem->max_elem_copy = max_elem_copy; new_path_elem->insertion_point = (bps_tree_pos_t)(-1); /* unused */ } /** * bps_tree_process_insert_inner declaration. See definition for details. */ static int bps_tree_process_insert_inner(struct bps_tree *tree, struct bps_inner_path_elem *inner_path_elem, bps_tree_block_id_t block_id, bps_tree_pos_t pos, bps_tree_elem_t max_elem); /** * Basic inserted into leaf, dealing with spliting, merging and moving data * to neighbour blocks if necessary */ static inline int bps_tree_process_insert_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *leaf_path_elem, bps_tree_elem_t new_elem, bps_tree_block_id_t *inserted_in_block, bps_tree_pos_t *inserted_in_pos) { if (bps_tree_leaf_free_size(leaf_path_elem->block)) { bps_tree_insert_into_leaf(tree, leaf_path_elem, new_elem); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x0); *inserted_in_block = leaf_path_elem->block_id; *inserted_in_pos = leaf_path_elem->insertion_point; return 0; } bps_tree_touch_path(tree, leaf_path_elem); struct bps_leaf_path_elem left_ext = {0, 0, 0, 0, 0, 0, 0, 0}, right_ext = {0, 0, 0, 0, 0, 0, 0, 0}, left_left_ext = {0, 0, 0, 0, 0, 0, 0, 0}, right_right_ext = {0, 0, 0, 0, 0, 0, 0, 0}; bool has_left_ext = bps_tree_collect_left_path_elem_leaf(tree, leaf_path_elem, &left_ext); bool has_right_ext = bps_tree_collect_right_ext_leaf(tree, leaf_path_elem, &right_ext); bool has_left_left_ext = false; bool has_right_right_ext = false; struct bps_leaf_path_elem *inserted_ext; if (has_left_ext && has_right_ext) { if (bps_tree_leaf_free_size(left_ext.block) > bps_tree_leaf_free_size(right_ext.block)) { bps_tree_pos_t move_count = 1 + bps_tree_leaf_free_size(left_ext.block) / 2; inserted_ext = bps_tree_insert_and_move_elems_to_left_leaf(tree, &left_ext, leaf_path_elem, move_count, new_elem); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x1); *inserted_in_block = inserted_ext->block_id; *inserted_in_pos = inserted_ext->insertion_point; return 0; } else if (bps_tree_leaf_free_size(right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_leaf_free_size(right_ext.block) / 2; inserted_ext = bps_tree_insert_and_move_elems_to_right_leaf(tree, leaf_path_elem, &right_ext, move_count, new_elem); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x2); *inserted_in_block = inserted_ext->block_id; *inserted_in_pos = inserted_ext->insertion_point; return 0; } } else if (has_left_ext) { if (bps_tree_leaf_free_size(left_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_leaf_free_size(left_ext.block) / 2; inserted_ext = bps_tree_insert_and_move_elems_to_left_leaf(tree, &left_ext, leaf_path_elem, move_count, new_elem); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x3); *inserted_in_block = inserted_ext->block_id; *inserted_in_pos = inserted_ext->insertion_point; return 0; } has_left_left_ext = bps_tree_collect_left_path_elem_leaf(tree, &left_ext, &left_left_ext); if (has_left_left_ext && bps_tree_leaf_free_size(left_left_ext.block) > 0) { bps_tree_pos_t move_count = 1 + (2 * bps_tree_leaf_free_size(left_left_ext.block) - 1) / 3; bps_tree_move_elems_to_left_leaf(tree, &left_left_ext, &left_ext, move_count); move_count = 1 + move_count / 2; inserted_ext = bps_tree_insert_and_move_elems_to_left_leaf(tree, &left_ext, leaf_path_elem, move_count, new_elem); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x4); *inserted_in_block = inserted_ext->block_id; *inserted_in_pos = inserted_ext->insertion_point; return 0; } } else if (has_right_ext) { if (bps_tree_leaf_free_size(right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_leaf_free_size(right_ext.block) / 2; inserted_ext = bps_tree_insert_and_move_elems_to_right_leaf(tree, leaf_path_elem, &right_ext, move_count, new_elem); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x5); *inserted_in_block = inserted_ext->block_id; *inserted_in_pos = inserted_ext->insertion_point; return 0; } has_right_right_ext = bps_tree_collect_right_ext_leaf(tree, &right_ext, &right_right_ext); if (has_right_right_ext && bps_tree_leaf_free_size(right_right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + (2 * bps_tree_leaf_free_size(right_right_ext.block) - 1) / 3; bps_tree_move_elems_to_right_leaf(tree, &right_ext, &right_right_ext, move_count); move_count = 1 + move_count / 2; inserted_ext = bps_tree_insert_and_move_elems_to_right_leaf(tree, leaf_path_elem, &right_ext, move_count, new_elem); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x6); *inserted_in_block = inserted_ext->block_id; *inserted_in_pos = inserted_ext->insertion_point; return 0; } } if (!bps_tree_reserve_blocks(tree, tree->depth + 1)) { return -1; } bps_tree_block_id_t new_block_id = (bps_tree_block_id_t)(-1); struct bps_leaf *new_leaf = bps_tree_create_leaf(tree, &new_block_id); leaf_path_elem->block = (struct bps_leaf *) bps_tree_touch_block(tree, leaf_path_elem->block_id); if (leaf_path_elem->block->next_id != (bps_tree_block_id_t)(-1)) { struct bps_leaf *next_leaf = (struct bps_leaf *) bps_tree_touch_block(tree, leaf_path_elem->block->next_id); assert(next_leaf->prev_id == leaf_path_elem->block_id); next_leaf->prev_id = new_block_id; } else { tree->last_id = new_block_id; } new_leaf->next_id = leaf_path_elem->block->next_id; leaf_path_elem->block->next_id = new_block_id; new_leaf->prev_id = leaf_path_elem->block_id; new_leaf->header.size = 0; struct bps_leaf_path_elem new_path_elem; bps_tree_elem_t new_max_elem = tree->max_elem; bps_tree_prepare_new_ext_leaf(leaf_path_elem, &new_path_elem, new_leaf, new_block_id, &new_max_elem); if (has_left_ext && has_right_ext) { /* * The block has MAX elems and +1 elem is inserted, * left and right has MAX too. Split: insert new node at right * and evenly rearrange elements. * Blocks: * [ MAX ] [ MAX + 1 ] [ 0 ] [ MAX ] * Moving: * ----mc1---> * ----mc2---> <---mc3---- * To become: * [ nc1 ] [ nc2 ] [ nc3 ] [ nc4 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_LEAF; const bps_tree_pos_t total = max * 3 + 1; const bps_tree_pos_t nc2 = total / 4; const bps_tree_pos_t nc3 = (total - nc2) / 3; const bps_tree_pos_t nc4 = (total - nc2 - nc3) / 2; const bps_tree_pos_t nc1 = (total - nc2 - nc3 - nc4); const bps_tree_pos_t mc2 = max - nc1; const bps_tree_pos_t mc3 = max - nc4; const bps_tree_pos_t mc1 = nc3 - mc3; inserted_ext = bps_tree_insert_and_move_elems_to_right_leaf(tree, leaf_path_elem, &new_path_elem, mc1, new_elem); assert(inserted_ext == leaf_path_elem || inserted_ext == &new_path_elem); bps_tree_move_elems_to_right_leaf(tree, &left_ext, leaf_path_elem, mc2); /* * If the inserted_ext == leaf_path_elem, then * update the insertion_point, because the new * elements were inserted from the left leaf. * To avoid using 'if' lets always update the * insertion_point. It is better than branching. * * [e - mc2, ... e - 1] ---> [ e1, e2 ... eN ] * new elements ^ * inserted_position */ leaf_path_elem->insertion_point += mc2; /* * In a case of insertion into the new_path_elem * there is no need to update insertion_point, * because the positions of old elements are not * changed in the new_path_elem block. */ bps_tree_move_elems_to_left_leaf(tree, &new_path_elem, &right_ext, mc3); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x7); } else if (has_left_ext && has_left_left_ext) { /* * The block has MAX elems and +1 elem is inserted, * there no right block, and left and left-left has MAX too. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX ] [ MAX ] [ MAX + 1 ] [ 0 ] * Moving: * ----mc1---> * ----mc2---> * ----mc3---> * To become: * [ nc1 ] [ nc2 ] [ nc3 ] [ nc4 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_LEAF; const bps_tree_pos_t total = max * 3 + 1; const bps_tree_pos_t nc4 = total / 4; const bps_tree_pos_t nc3 = (total - nc4) / 3; const bps_tree_pos_t nc2 = (total - nc4 - nc3) / 2; const bps_tree_pos_t nc1 = (total - nc4 - nc3 - nc2); const bps_tree_pos_t mc1 = nc4; const bps_tree_pos_t mc3 = max - nc1; const bps_tree_pos_t mc2 = max + mc3 - nc2; inserted_ext = bps_tree_insert_and_move_elems_to_right_leaf(tree, leaf_path_elem, &new_path_elem, mc1, new_elem); bps_tree_move_elems_to_right_leaf(tree, &left_ext, leaf_path_elem, mc2); /* * @sa comment for 'has_left_ext && has_right_ext' */ leaf_path_elem->insertion_point += mc2; bps_tree_move_elems_to_right_leaf(tree, &left_left_ext, &left_ext, mc3); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x8); } else if (has_right_ext && has_right_right_ext) { /* * The block has MAX elems and +1 elem is inserted, * there no left block, and right and right-right has MAX too. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX + 1 ] [ 0 ] [ MAX ] [ MAX ] * Moving: * ----mc1---> <---mc2---- * <---mc3---- * To become: * [ nc1 ] [ nc2 ] [ nc3 ] [ nc4 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_LEAF; const bps_tree_pos_t total = max * 3 + 1; const bps_tree_pos_t nc1 = total / 4; const bps_tree_pos_t nc2 = (total - nc1) / 3; const bps_tree_pos_t nc3 = (total - nc1 - nc2) / 2; const bps_tree_pos_t nc4 = (total - nc1 - nc2 - nc3); const bps_tree_pos_t mc1 = max + 1 - nc1; const bps_tree_pos_t mc3 = max - nc4; const bps_tree_pos_t mc2 = max + mc3 - nc3; inserted_ext = bps_tree_insert_and_move_elems_to_right_leaf(tree, leaf_path_elem, &new_path_elem, mc1, new_elem); bps_tree_move_elems_to_left_leaf(tree, &new_path_elem, &right_ext, mc2); bps_tree_move_elems_to_left_leaf(tree, &right_ext, &right_right_ext, mc3); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0x9); } else if (has_left_ext) { /* * The block has MAX elems and +1 elem is inserted, * there only left block, and it has MAX too. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX ] [ MAX + 1 ] [ 0 ] * Moving: * ----mc1---> * ----mc2---> * To become: * [ nc1 ] [ nc2 ] [ nc3 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_LEAF; const bps_tree_pos_t total = max * 2 + 1; const bps_tree_pos_t nc3 = total / 3; const bps_tree_pos_t nc2 = (total - nc3) / 2; const bps_tree_pos_t nc1 = (total - nc3 - nc2); const bps_tree_pos_t mc1 = nc3; const bps_tree_pos_t mc2 = max - nc1; inserted_ext = bps_tree_insert_and_move_elems_to_right_leaf(tree, leaf_path_elem, &new_path_elem, mc1, new_elem); bps_tree_move_elems_to_right_leaf(tree, &left_ext, leaf_path_elem, mc2); /* * @sa comment for 'has_left_ext && has_right_ext' */ leaf_path_elem->insertion_point += mc2; BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0xA); } else if (has_right_ext) { /* * The block has MAX elems and +1 elem is inserted, * there only right block, and it has MAX too. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX + 1 ] [ 0 ] [ MAX ] * Moving: * ----mc1---> <---mc2---- * To become: * [ nc1 ] [ nc2 ] [ nc3 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_LEAF; const bps_tree_pos_t total = max * 2 + 1; const bps_tree_pos_t nc1 = total / 3; const bps_tree_pos_t nc2 = (total - nc1) / 2; const bps_tree_pos_t nc3 = (total - nc1 - nc2); const bps_tree_pos_t mc1 = max + 1 - nc1; const bps_tree_pos_t mc2 = max - nc3; inserted_ext = bps_tree_insert_and_move_elems_to_right_leaf(tree, leaf_path_elem, &new_path_elem, mc1, new_elem); bps_tree_move_elems_to_left_leaf(tree, &new_path_elem, &right_ext, mc2); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0xB); } else { assert(!leaf_path_elem->parent); /* * The block has MAX elems and +1 elem is inserted, * there no siblings. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX + 1 ] [ 0 ] * Moving: * ----mc1---> * To become: * [ nc1 ] [ nc2 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_LEAF; const bps_tree_pos_t total = max + 1; const bps_tree_pos_t nc2 = total / 2; /* const bps_tree_pos_t nc1 = (total - nc2); */ const bps_tree_pos_t mc1 = nc2; inserted_ext = bps_tree_insert_and_move_elems_to_right_leaf(tree, leaf_path_elem, &new_path_elem, mc1, new_elem); bps_tree_block_id_t new_root_id = (bps_tree_block_id_t)(-1); struct bps_inner *new_root = bps_tree_create_inner(tree, &new_root_id); new_root->header.size = 2; new_root->child_ids[0] = tree->root_id; new_root->child_ids[1] = new_block_id; new_root->elems[0] = tree->max_elem; tree->root_id = new_root_id; tree->max_elem = new_max_elem; tree->depth++; BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0xC); *inserted_in_block = inserted_ext->block_id; *inserted_in_pos = inserted_ext->insertion_point; return 0; } *inserted_in_block = inserted_ext->block_id; *inserted_in_pos = inserted_ext->insertion_point; assert(leaf_path_elem->parent); BPS_TREE_BRANCH_TRACE(tree, insert_leaf, 1 << 0xD); return bps_tree_process_insert_inner(tree, leaf_path_elem->parent, new_block_id, new_path_elem.pos_in_parent, new_max_elem); } /** * Basic inserted into inner, dealing with spliting, merging and moving data * to neighbour blocks if necessary */ static inline int bps_tree_process_insert_inner(struct bps_tree *tree, struct bps_inner_path_elem *inner_path_elem, bps_tree_block_id_t block_id, bps_tree_pos_t pos, bps_tree_elem_t max_elem) { if (bps_tree_inner_free_size(inner_path_elem->block)) { bps_tree_insert_into_inner(tree, inner_path_elem, block_id, pos, max_elem); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x0); return 0; } struct bps_inner_path_elem left_ext = {0, 0, 0, 0, 0, 0, 0, 0}, right_ext = {0, 0, 0, 0, 0, 0, 0, 0}, left_left_ext = {0, 0, 0, 0, 0, 0, 0, 0}, right_right_ext = {0, 0, 0, 0, 0, 0, 0, 0}; bool has_left_ext = bps_tree_collect_left_path_elem_inner(tree, inner_path_elem, &left_ext); bool has_right_ext = bps_tree_collect_right_ext_inner(tree, inner_path_elem, &right_ext); bool has_left_left_ext = false; bool has_right_right_ext = false; if (has_left_ext && has_right_ext) { if (bps_tree_inner_free_size(left_ext.block) > bps_tree_inner_free_size(right_ext.block)) { bps_tree_pos_t move_count = 1 + bps_tree_inner_free_size(left_ext.block) / 2; bps_tree_insert_and_move_elems_to_left_inner(tree, &left_ext, inner_path_elem, move_count, block_id, pos, max_elem); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x1); return 0; } else if (bps_tree_inner_free_size(right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_inner_free_size(right_ext.block) / 2; bps_tree_insert_and_move_elems_to_right_inner(tree, inner_path_elem, &right_ext, move_count, block_id, pos, max_elem); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x2); return 0; } } else if (has_left_ext) { if (bps_tree_inner_free_size(left_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_inner_free_size(left_ext.block) / 2; bps_tree_insert_and_move_elems_to_left_inner(tree, &left_ext, inner_path_elem, move_count, block_id, pos, max_elem); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x3); return 0; } has_left_left_ext = bps_tree_collect_left_path_elem_inner(tree, &left_ext, &left_left_ext); if (has_left_left_ext && bps_tree_inner_free_size(left_left_ext.block) > 0) { bps_tree_pos_t move_count = 1 + (2 * bps_tree_inner_free_size(left_left_ext.block) - 1) / 3; bps_tree_move_elems_to_left_inner(tree, &left_left_ext, &left_ext, move_count); move_count = 1 + move_count / 2; bps_tree_insert_and_move_elems_to_left_inner(tree, &left_ext, inner_path_elem, move_count, block_id, pos, max_elem); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x4); return 0; } } else if (has_right_ext) { if (bps_tree_inner_free_size(right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_inner_free_size(right_ext.block) / 2; bps_tree_insert_and_move_elems_to_right_inner(tree, inner_path_elem, &right_ext, move_count, block_id, pos, max_elem); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x5); return 0; } has_right_right_ext = bps_tree_collect_right_ext_inner(tree, &right_ext, &right_right_ext); if (has_right_right_ext && bps_tree_inner_free_size(right_right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + (2 * bps_tree_inner_free_size(right_right_ext.block) - 1) / 3; bps_tree_move_elems_to_right_inner(tree, &right_ext, &right_right_ext, move_count); move_count = 1 + move_count / 2; bps_tree_insert_and_move_elems_to_right_inner(tree, inner_path_elem, &right_ext, move_count, block_id, pos, max_elem); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x6); return 0; } } bps_tree_block_id_t new_block_id = (bps_tree_block_id_t)(-1); struct bps_inner *new_inner = bps_tree_create_inner(tree, &new_block_id); new_inner->header.size = 0; struct bps_inner_path_elem new_path_elem; bps_tree_elem_t new_max_elem = tree->max_elem; bps_tree_prepare_new_ext_inner(inner_path_elem, &new_path_elem, new_inner, new_block_id, &new_max_elem); if (has_left_ext && has_right_ext) { /* * The block has MAX elems and +1 elem is inserted, * left and right has MAX too. Split: insert new node at right * and evenly rearrange elements. * Blocks: * [ MAX ] [ MAX + 1 ] [ 0 ] [ MAX ] * Moving: * ----mc1---> * ----mc2---> <---mc3---- * To become: * [ nc1 ] [ nc2 ] [ nc3 ] [ nc4 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_INNER; const bps_tree_pos_t total = max * 3 + 1; const bps_tree_pos_t nc2 = total / 4; const bps_tree_pos_t nc3 = (total - nc2) / 3; const bps_tree_pos_t nc4 = (total - nc2 - nc3) / 2; const bps_tree_pos_t nc1 = (total - nc2 - nc3 - nc4); const bps_tree_pos_t mc2 = max - nc1; const bps_tree_pos_t mc3 = max - nc4; const bps_tree_pos_t mc1 = nc3 - mc3; bps_tree_insert_and_move_elems_to_right_inner(tree, inner_path_elem, &new_path_elem, mc1, block_id, pos, max_elem); bps_tree_move_elems_to_right_inner(tree, &left_ext, inner_path_elem, mc2); bps_tree_move_elems_to_left_inner(tree, &new_path_elem, &right_ext, mc3); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x7); } else if (has_left_ext && has_left_left_ext) { /* * The block has MAX elems and +1 elem is inserted, * there no right block, and left and left-left has MAX too. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX ] [ MAX ] [ MAX + 1 ] [ 0 ] * Moving: * ----mc1---> * ----mc2---> * ----mc3---> * To become: * [ nc1 ] [ nc2 ] [ nc3 ] [ nc4 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_INNER; const bps_tree_pos_t total = max * 3 + 1; const bps_tree_pos_t nc4 = total / 4; const bps_tree_pos_t nc3 = (total - nc4) / 3; const bps_tree_pos_t nc2 = (total - nc4 - nc3) / 2; const bps_tree_pos_t nc1 = (total - nc4 - nc3 - nc2); const bps_tree_pos_t mc1 = nc4; const bps_tree_pos_t mc3 = max - nc1; const bps_tree_pos_t mc2 = max + mc3 - nc2; bps_tree_insert_and_move_elems_to_right_inner(tree, inner_path_elem, &new_path_elem, mc1, block_id, pos, max_elem); bps_tree_move_elems_to_right_inner(tree, &left_ext, inner_path_elem, mc2); bps_tree_move_elems_to_right_inner(tree, &left_left_ext, &left_ext, mc3); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x8); } else if (has_right_ext && has_right_right_ext) { /* * The block has MAX elems and +1 elem is inserted, * there no left block, and right and right-right has MAX too. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX + 1 ] [ 0 ] [ MAX ] [ MAX ] * Moving: * ----mc1---> <---mc2---- * <---mc3---- * To become: * [ nc1 ] [ nc2 ] [ nc3 ] [ nc4 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_INNER; const bps_tree_pos_t total = max * 3 + 1; const bps_tree_pos_t nc1 = total / 4; const bps_tree_pos_t nc2 = (total - nc1) / 3; const bps_tree_pos_t nc3 = (total - nc1 - nc2) / 2; const bps_tree_pos_t nc4 = (total - nc1 - nc2 - nc3); const bps_tree_pos_t mc1 = max + 1 - nc1; const bps_tree_pos_t mc3 = max - nc4; const bps_tree_pos_t mc2 = max + mc3 - nc3; bps_tree_insert_and_move_elems_to_right_inner(tree, inner_path_elem, &new_path_elem, mc1, block_id, pos, max_elem); bps_tree_move_elems_to_left_inner(tree, &new_path_elem, &right_ext, mc2); bps_tree_move_elems_to_left_inner(tree, &right_ext, &right_right_ext, mc3); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0x9); } else if (has_left_ext) { /* * The block has MAX elems and +1 elem is inserted, * there only left block, and it has MAX too. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX ] [ MAX + 1 ] [ 0 ] * Moving: * ----mc1---> * ----mc2---> * To become: * [ nc1 ] [ nc2 ] [ nc3 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_INNER; const bps_tree_pos_t total = max * 2 + 1; const bps_tree_pos_t nc3 = total / 3; const bps_tree_pos_t nc2 = (total - nc3) / 2; const bps_tree_pos_t nc1 = (total - nc3 - nc2); const bps_tree_pos_t mc1 = nc3; const bps_tree_pos_t mc2 = max - nc1; bps_tree_insert_and_move_elems_to_right_inner(tree, inner_path_elem, &new_path_elem, mc1, block_id, pos, max_elem); bps_tree_move_elems_to_right_inner(tree, &left_ext, inner_path_elem, mc2); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0xA); } else if (has_right_ext) { /* * The block has MAX elems and +1 elem is inserted, * there only right block, and it has MAX too. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX + 1 ] [ 0 ] [ MAX ] * Moving: * ----mc1---> <---mc2---- * To become: * [ nc1 ] [ nc2 ] [ nc3 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_INNER; const bps_tree_pos_t total = max * 2 + 1; const bps_tree_pos_t nc1 = total / 3; const bps_tree_pos_t nc2 = (total - nc1) / 2; const bps_tree_pos_t nc3 = (total - nc1 - nc2); const bps_tree_pos_t mc1 = max + 1 - nc1; const bps_tree_pos_t mc2 = max - nc3; bps_tree_insert_and_move_elems_to_right_inner(tree, inner_path_elem, &new_path_elem, mc1, block_id, pos, max_elem); bps_tree_move_elems_to_left_inner(tree, &new_path_elem, &right_ext, mc2); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0xB); } else { assert(!inner_path_elem->parent); /* * The block has MAX elems and +1 elem is inserted, * there no siblings. * Split: insert new node at right and evenly rearrange * elements. * Blocks: * [ MAX + 1 ] [ 0 ] * Moving: * ----mc1---> * To become: * [ nc1 ] [ nc2 ] */ const bps_tree_pos_t max = BPS_TREE_MAX_COUNT_IN_INNER; const bps_tree_pos_t total = max + 1; const bps_tree_pos_t nc2 = total / 2; /* const bps_tree_pos_t nc1 = (total - nc2); */ const bps_tree_pos_t mc1 = nc2; bps_tree_insert_and_move_elems_to_right_inner(tree, inner_path_elem, &new_path_elem, mc1, block_id, pos, max_elem); bps_tree_block_id_t new_root_id = (bps_tree_block_id_t)(-1); struct bps_inner *new_root = bps_tree_create_inner(tree, &new_root_id); new_root->header.size = 2; new_root->child_ids[0] = tree->root_id; new_root->child_ids[1] = new_block_id; new_root->elems[0] = tree->max_elem; tree->root_id = new_root_id; tree->max_elem = new_max_elem; tree->depth++; BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0xC); return 0; } assert(inner_path_elem->parent); BPS_TREE_BRANCH_TRACE(tree, insert_inner, 1 << 0xD); return bps_tree_process_insert_inner(tree, inner_path_elem->parent, new_block_id, new_path_elem.pos_in_parent, new_max_elem); } /** * bps_tree_process_delete_inner declaration. See definition for details. */ static void bps_tree_process_delete_inner(struct bps_tree *tree, struct bps_inner_path_elem *inner_path_elem); /** * Basic deleting from leaf, dealing with spliting, merging and moving data * to neighbour blocks if necessary */ static inline void bps_tree_process_delete_leaf(struct bps_tree *tree, struct bps_leaf_path_elem *leaf_path_elem) { bps_tree_delete_from_leaf(tree, leaf_path_elem); if (leaf_path_elem->block->header.size >= BPS_TREE_MAX_COUNT_IN_LEAF * 2 / 3) { BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x0); return; } bps_tree_touch_path(tree, leaf_path_elem); struct bps_leaf_path_elem left_ext = {0, 0, 0, 0, 0, 0, 0, 0}, right_ext = {0, 0, 0, 0, 0, 0, 0, 0}, left_left_ext = {0, 0, 0, 0, 0, 0, 0, 0}, right_right_ext = {0, 0, 0, 0, 0, 0, 0, 0}; bool has_left_ext = bps_tree_collect_left_path_elem_leaf(tree, leaf_path_elem, &left_ext); bool has_right_ext = bps_tree_collect_right_ext_leaf(tree, leaf_path_elem, &right_ext); bool has_left_left_ext = false; bool has_right_right_ext = false; if (has_left_ext && has_right_ext) { if (bps_tree_leaf_overmin_size(left_ext.block) > bps_tree_leaf_overmin_size(right_ext.block)) { bps_tree_pos_t move_count = 1 + bps_tree_leaf_overmin_size(left_ext.block) / 2; bps_tree_move_elems_to_right_leaf(tree, &left_ext, leaf_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x1); return; } else if (bps_tree_leaf_overmin_size(right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_leaf_overmin_size(right_ext.block) / 2; bps_tree_move_elems_to_left_leaf(tree, leaf_path_elem, &right_ext, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x2); return; } } else if (has_left_ext) { if (bps_tree_leaf_overmin_size(left_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_leaf_overmin_size(left_ext.block) / 2; bps_tree_move_elems_to_right_leaf(tree, &left_ext, leaf_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x3); return; } has_left_left_ext = bps_tree_collect_left_path_elem_leaf(tree, &left_ext, &left_left_ext); if (has_left_left_ext && bps_tree_leaf_overmin_size(left_left_ext.block) > 0) { bps_tree_pos_t move_count2 = 1 + (2 * bps_tree_leaf_overmin_size(left_left_ext.block) - 1) / 3; bps_tree_pos_t move_count1 = 1 + move_count2 / 2; bps_tree_move_elems_to_right_leaf(tree, &left_ext, leaf_path_elem, move_count1); bps_tree_move_elems_to_right_leaf(tree, &left_left_ext, &left_ext, move_count2); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x4); return; } } else if (has_right_ext) { if (bps_tree_leaf_overmin_size(right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_leaf_overmin_size(right_ext.block) / 2; bps_tree_move_elems_to_left_leaf(tree, leaf_path_elem, &right_ext, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x5); return; } has_right_right_ext = bps_tree_collect_right_ext_leaf(tree, &right_ext, &right_right_ext); if (has_right_right_ext && bps_tree_leaf_overmin_size(right_right_ext.block) > 0) { bps_tree_pos_t move_count2 = 1 + (2 * bps_tree_leaf_overmin_size(right_right_ext.block) - 1) / 3; bps_tree_pos_t move_count1 = 1 + move_count2 / 2; bps_tree_move_elems_to_left_leaf(tree, leaf_path_elem, &right_ext, move_count1); bps_tree_move_elems_to_left_leaf(tree, &right_ext, &right_right_ext, move_count2); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x6); return; } } if (has_left_ext && has_right_ext) { bps_tree_pos_t move_count = (leaf_path_elem->block->header.size + 1) / 2; bps_tree_move_elems_to_right_leaf(tree, leaf_path_elem, &right_ext, move_count); move_count = leaf_path_elem->block->header.size; bps_tree_move_elems_to_left_leaf(tree, &left_ext, leaf_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x7); } else if (has_left_ext && has_left_left_ext) { bps_tree_pos_t move_count = (leaf_path_elem->block->header.size + 1) / 2; bps_tree_move_elems_to_left_leaf(tree, &left_left_ext, &left_ext, move_count); move_count = leaf_path_elem->block->header.size; bps_tree_move_elems_to_left_leaf(tree, &left_ext, leaf_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x8); } else if (has_right_ext && has_right_right_ext) { bps_tree_pos_t move_count = (leaf_path_elem->block->header.size + 1) / 2; bps_tree_move_elems_to_right_leaf(tree, &right_ext, &right_right_ext, move_count); move_count = leaf_path_elem->block->header.size; bps_tree_move_elems_to_right_leaf(tree, leaf_path_elem, &right_ext, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x9); } else if (has_left_ext) { if (leaf_path_elem->block->header.size + left_ext.block->header.size > BPS_TREE_MAX_COUNT_IN_LEAF) { BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0xA); return; } bps_tree_pos_t move_count = leaf_path_elem->block->header.size; bps_tree_move_elems_to_left_leaf(tree, &left_ext, leaf_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0xB); } else if (has_right_ext) { if (leaf_path_elem->block->header.size + right_ext.block->header.size > BPS_TREE_MAX_COUNT_IN_LEAF) { BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0xC); return; } bps_tree_pos_t move_count = leaf_path_elem->block->header.size; bps_tree_move_elems_to_right_leaf(tree, leaf_path_elem, &right_ext, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0xD); } else { if (leaf_path_elem->block->header.size > 0) { BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0xE); return; } assert(leaf_path_elem->parent == 0); assert(tree->depth == 1); assert(tree->size == 0); tree->root_id = (bps_tree_block_id_t)(-1); tree->depth = 0; tree->first_id = (bps_tree_block_id_t)(-1); tree->last_id = (bps_tree_block_id_t)(-1); bps_tree_dispose_leaf(tree, leaf_path_elem->block, leaf_path_elem->block_id); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0xF); return; } assert(leaf_path_elem->block->header.size == 0); struct bps_leaf *leaf = (struct bps_leaf*)leaf_path_elem->block; if (leaf->prev_id == (bps_tree_block_id_t)(-1)) { tree->first_id = leaf->next_id; } else { struct bps_leaf *prev_block = (struct bps_leaf *) bps_tree_touch_block(tree, leaf->prev_id); prev_block->next_id = leaf->next_id; } if (leaf->next_id == (bps_tree_block_id_t)(-1)) { tree->last_id = leaf->prev_id; } else { struct bps_leaf *next_block = (struct bps_leaf *) bps_tree_touch_block(tree, leaf->next_id); next_block->prev_id = leaf->prev_id; } bps_tree_dispose_leaf(tree, leaf_path_elem->block, leaf_path_elem->block_id); assert(leaf_path_elem->parent); bps_tree_process_delete_inner(tree, leaf_path_elem->parent); BPS_TREE_BRANCH_TRACE(tree, delete_leaf, 1 << 0x10); } /** * Basic deletion from a leaf, deals with possible splitting, * merging and moving of elements data to neighbouring blocks. */ static inline void bps_tree_process_delete_inner(struct bps_tree *tree, struct bps_inner_path_elem *inner_path_elem) { bps_tree_delete_from_inner(tree, inner_path_elem); if (inner_path_elem->block->header.size >= BPS_TREE_MAX_COUNT_IN_INNER * 2 / 3) { BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x0); return; } struct bps_inner_path_elem left_ext = {0, 0, 0, 0, 0, 0, 0, 0}, right_ext = {0, 0, 0, 0, 0, 0, 0, 0}, left_left_ext = {0, 0, 0, 0, 0, 0, 0, 0}, right_right_ext = {0, 0, 0, 0, 0, 0, 0, 0}; bool has_left_ext = bps_tree_collect_left_path_elem_inner(tree, inner_path_elem, &left_ext); bool has_right_ext = bps_tree_collect_right_ext_inner(tree, inner_path_elem, &right_ext); bool has_left_left_ext = false; bool has_right_right_ext = false; if (has_left_ext && has_right_ext) { if (bps_tree_inner_overmin_size(left_ext.block) > bps_tree_inner_overmin_size(right_ext.block)) { bps_tree_pos_t move_count = 1 + bps_tree_inner_overmin_size(left_ext.block) / 2; bps_tree_move_elems_to_right_inner(tree, &left_ext, inner_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x1); return; } else if (bps_tree_inner_overmin_size(right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_inner_overmin_size(right_ext.block) / 2; bps_tree_move_elems_to_left_inner(tree, inner_path_elem, &right_ext, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x2); return; } } else if (has_left_ext) { if (bps_tree_inner_overmin_size(left_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_inner_overmin_size(left_ext.block) / 2; bps_tree_move_elems_to_right_inner(tree, &left_ext, inner_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x3); return; } has_left_left_ext = bps_tree_collect_left_path_elem_inner(tree, &left_ext, &left_left_ext); if (has_left_left_ext && bps_tree_inner_overmin_size(left_left_ext.block) > 0) { bps_tree_pos_t move_count2 = 1 + (2 * bps_tree_inner_overmin_size(left_left_ext.block) - 1) / 3; bps_tree_pos_t move_count1 = 1 + move_count2 / 2; bps_tree_move_elems_to_right_inner(tree, &left_ext, inner_path_elem, move_count1); bps_tree_move_elems_to_right_inner(tree, &left_left_ext, &left_ext, move_count2); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x4); return; } } else if (has_right_ext) { if (bps_tree_inner_overmin_size(right_ext.block) > 0) { bps_tree_pos_t move_count = 1 + bps_tree_inner_overmin_size(right_ext.block) / 2; bps_tree_move_elems_to_left_inner(tree, inner_path_elem, &right_ext, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x5); return; } has_right_right_ext = bps_tree_collect_right_ext_inner(tree, &right_ext, &right_right_ext); if (has_right_right_ext && bps_tree_inner_overmin_size(right_right_ext.block) > 0) { bps_tree_pos_t move_count2 = 1 + (2 * bps_tree_inner_overmin_size(right_right_ext.block) - 1) / 3; bps_tree_pos_t move_count1 = 1 + move_count2 / 2; bps_tree_move_elems_to_left_inner(tree, inner_path_elem, &right_ext, move_count1); bps_tree_move_elems_to_left_inner(tree, &right_ext, &right_right_ext, move_count2); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x6); return; } } if (has_left_ext && has_right_ext) { bps_tree_pos_t move_count = (inner_path_elem->block->header.size + 1) / 2; bps_tree_move_elems_to_right_inner(tree, inner_path_elem, &right_ext, move_count); move_count = inner_path_elem->block->header.size; bps_tree_move_elems_to_left_inner(tree, &left_ext, inner_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x7); } else if (has_left_ext && has_left_left_ext) { bps_tree_pos_t move_count = (inner_path_elem->block->header.size + 1) / 2; bps_tree_move_elems_to_left_inner(tree, &left_left_ext, &left_ext, move_count); move_count = inner_path_elem->block->header.size; bps_tree_move_elems_to_left_inner(tree, &left_ext, inner_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x8); } else if (has_right_ext && has_right_right_ext) { bps_tree_pos_t move_count = (inner_path_elem->block->header.size + 1) / 2; bps_tree_move_elems_to_right_inner(tree, &right_ext, &right_right_ext, move_count); move_count = inner_path_elem->block->header.size; bps_tree_move_elems_to_right_inner(tree, inner_path_elem, &right_ext, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x9); } else if (has_left_ext) { if (inner_path_elem->block->header.size + left_ext.block->header.size > BPS_TREE_MAX_COUNT_IN_INNER) { BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0xA); //throw 1; return; } bps_tree_pos_t move_count = inner_path_elem->block->header.size; bps_tree_move_elems_to_left_inner(tree, &left_ext, inner_path_elem, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0xB); } else if (has_right_ext) { if (inner_path_elem->block->header.size + right_ext.block->header.size > BPS_TREE_MAX_COUNT_IN_INNER) { BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0xC); //throw 2; return; } bps_tree_pos_t move_count = inner_path_elem->block->header.size; bps_tree_move_elems_to_right_inner(tree, inner_path_elem, &right_ext, move_count); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0xD); } else { if (inner_path_elem->block->header.size > 1) { BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0xE); return; } assert(tree->depth > 1); assert(inner_path_elem->parent == 0); tree->depth--; tree->root_id = inner_path_elem->block->child_ids[0]; bps_tree_dispose_inner(tree, inner_path_elem->block, inner_path_elem->block_id); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0xF); return; } assert(inner_path_elem->block->header.size == 0); bps_tree_dispose_inner(tree, inner_path_elem->block, inner_path_elem->block_id); assert(inner_path_elem->parent); bps_tree_process_delete_inner(tree, inner_path_elem->parent); BPS_TREE_BRANCH_TRACE(tree, delete_inner, 1 << 0x10); } /** * @brief Insert an element to the tree or replace an element in the tree * In case of replacing, if 'replaced' argument is not null, it'll * be filled with replaced element. In case of inserting it's left * intact. * Thus one can distinguish a real insert or replace by passing to * the function a pointer to some value; and if it was changed * during the function call, then the replace has happened. * Otherwise, it was an insert. * @param tree - pointer to a tree * @param new_elem - inserting or replacing element * @replaced - optional pointer for a replaces element * @return - 0 on success or -1 if memory allocation failed for insert */ static inline int bps_tree_insert(struct bps_tree *tree, bps_tree_elem_t new_elem, bps_tree_elem_t *replaced) { if (tree->root_id == (bps_tree_block_id_t)(-1)) return bps_tree_insert_first_elem(tree, new_elem); struct bps_inner_path_elem path[BPS_TREE_MAX_DEPTH]; struct bps_leaf_path_elem leaf_path_elem; bool exact; bps_tree_collect_path(tree, new_elem, path, &leaf_path_elem, &exact); if (exact) { bps_tree_process_replace(tree, &leaf_path_elem, new_elem, replaced); return 0; } else { bps_tree_block_id_t unused1; bps_tree_pos_t unused2; return bps_tree_process_insert_leaf(tree, &leaf_path_elem, new_elem, &unused1, &unused2); } } /** * @sa bps_tree_insert + new parameter: * @param[out] inserted_iterator Iterator, positioned to the * new element. */ static inline int bps_tree_insert_get_iterator(struct bps_tree *tree, bps_tree_elem_t new_elem, bps_tree_elem_t *replaced, struct bps_tree_iterator *inserted_iterator) { if (tree->root_id == (bps_tree_block_id_t)(-1)) { int rc = bps_tree_insert_first_elem(tree, new_elem); inserted_iterator->block_id = tree->first_id; inserted_iterator->pos = 0; matras_head_read_view(&inserted_iterator->view); return rc; } struct bps_inner_path_elem path[BPS_TREE_MAX_DEPTH]; struct bps_leaf_path_elem leaf_path_elem; bool exact; bps_tree_collect_path(tree, new_elem, path, &leaf_path_elem, &exact); if (exact) { inserted_iterator->block_id = leaf_path_elem.block_id; inserted_iterator->pos = leaf_path_elem.insertion_point; matras_head_read_view(&inserted_iterator->view); bps_tree_process_replace(tree, &leaf_path_elem, new_elem, replaced); return 0; } else { int rc = bps_tree_process_insert_leaf(tree, &leaf_path_elem, new_elem, &inserted_iterator->block_id, &inserted_iterator->pos); matras_head_read_view(&inserted_iterator->view); return rc; } } /** * @brief Delete an element from a tree. * @param tree - pointer to a tree * @param elem - the element tot delete * @return - true on success or false if the element was not found in tree */ static inline int bps_tree_delete(struct bps_tree *tree, bps_tree_elem_t elem) { if (tree->root_id == (bps_tree_block_id_t)(-1)) return -1; struct bps_inner_path_elem path[BPS_TREE_MAX_DEPTH]; struct bps_leaf_path_elem leaf_path_elem; bool exact; bps_tree_collect_path(tree, elem, path, &leaf_path_elem, &exact); if (!exact) return -1; bps_tree_process_delete_leaf(tree, &leaf_path_elem); return 0; } /** * @brief Recursively find a maximum element in subtree. * Used only for debug purposes */ static inline bps_tree_elem_t bps_tree_debug_find_max_elem(const struct bps_tree *tree, struct bps_block *block) { assert(block->size); if (block->type == BPS_TREE_BT_LEAF) { struct bps_leaf *leaf = (struct bps_leaf *)block; return leaf->elems[block->size - 1]; } else { assert(block->type == BPS_TREE_BT_INNER); struct bps_inner *inner = (struct bps_inner *)block; bps_tree_block_id_t next_block_id = inner->child_ids[block->size - 1]; struct bps_block *next_block = bps_tree_restore_block(tree, next_block_id); return bps_tree_debug_find_max_elem(tree, next_block); } } #ifndef BPS_TREE_NO_DEBUG /** * @brief Recursively checks the block and the corresponding subtree * Used by bps_tree_debug_check */ static inline int bps_tree_debug_check_block(const struct bps_tree *tree, struct bps_block *block, bps_tree_block_id_t id, int level, size_t *calc_count, bps_tree_block_id_t *expected_prev_id, bps_tree_block_id_t *expected_this_id, bool check_fullness) { if (block->type != BPS_TREE_BT_LEAF && block->type != BPS_TREE_BT_INNER) return 0x10; if (block->type == BPS_TREE_BT_LEAF) { struct bps_leaf *leaf = (struct bps_leaf *)(block); int result = 0; if (check_fullness) if (block->size < BPS_TREE_MAX_COUNT_IN_LEAF * 2 / 3) result |= 0x1000000; *calc_count += block->size; if (id != *expected_this_id) result |= 0x10000; if (leaf->prev_id != *expected_prev_id) result |= 0x20000; *expected_prev_id = id; *expected_this_id = leaf->next_id; if (level != 1) result |= 0x100; if (block->size == 0) result |= 0x200; if (block->size > BPS_TREE_MAX_COUNT_IN_LEAF) result |= 0x200; for (bps_tree_pos_t i = 1; i < block->size; i++) if (BPS_TREE_COMPARE(leaf->elems[i - 1], leaf->elems[i], tree->arg) >= 0) result |= 0x400; return result; } else { struct bps_inner *inner = (struct bps_inner *)(block); int result = 0; if (check_fullness) if (block->size < BPS_TREE_MAX_COUNT_IN_INNER * 2 / 3) result |= 0x2000000; if (block->size < 2) result |= 0x1000; if (block->size > BPS_TREE_MAX_COUNT_IN_INNER) result |= 0x1000; for (bps_tree_pos_t i = 1; i < block->size - 1; i++) if (BPS_TREE_COMPARE(inner->elems[i - 1], inner->elems[i], tree->arg) >= 0) result |= 0x2000; for (bps_tree_pos_t i = 0; i < block->size - 1; i++) { struct bps_block *tmp_block = bps_tree_restore_block(tree, inner->child_ids[i]); bps_tree_elem_t calc_max_elem = bps_tree_debug_find_max_elem(tree, tmp_block); if (inner->elems[i] != calc_max_elem) result |= 0x4000; } if (block->size > 1) { bps_tree_elem_t calc_max_elem = bps_tree_debug_find_max_elem(tree, block); if (BPS_TREE_COMPARE(inner->elems[block->size - 2], calc_max_elem, tree->arg) >= 0) result |= 0x8000; } bool check_fullness_next = block->size > 2; if (block->size == 2) { bps_tree_pos_t block_max_size; if (level == 2) block_max_size = BPS_TREE_MAX_COUNT_IN_LEAF; else block_max_size = BPS_TREE_MAX_COUNT_IN_INNER; struct bps_block *child1 = bps_tree_restore_block(tree, inner->child_ids[0]); struct bps_block *child2 = bps_tree_restore_block(tree, inner->child_ids[1]); if (child1->size + child2->size <= block_max_size) result |= 0x4000000; } for (bps_tree_pos_t i = 0; i < block->size; i++) result |= bps_tree_debug_check_block(tree, bps_tree_restore_block(tree, inner->child_ids[i]), inner->child_ids[i], level - 1, calc_count, expected_prev_id, expected_this_id, check_fullness_next); return result; } } /** * @brief A debug self-check. * Returns a bitmask of found errors (0 on success). * I hope you will not need it. * @param tree - pointer to a tree * @return - Bitwise-OR of all errors found */ static inline int bps_tree_debug_check(const struct bps_tree *tree) { int result = 0; if (tree->root_id == (bps_tree_block_id_t)(-1)) { if (tree->depth != 0) result |= 0x1; if (tree->size != 0) result |= 0x1; if (tree->leaf_count != 0 || tree->inner_count != 0) result |= 0x1; return result; } struct bps_block *root = bps_tree_root(tree); if (tree->max_elem != bps_tree_debug_find_max_elem(tree, root)) result |= 0x8; size_t calc_count = 0; bps_tree_block_id_t expected_prev_id = (bps_tree_block_id_t)(-1); bps_tree_block_id_t expected_this_id = tree->first_id; result |= bps_tree_debug_check_block(tree, root, tree->root_id, tree->depth, &calc_count, &expected_prev_id, &expected_this_id, false); if (expected_this_id != (bps_tree_block_id_t)(-1)) result |= 0x40000; if (expected_prev_id != tree->last_id) result |= 0x80000; if (tree->size != calc_count) result |= 0x4; return result; } /** * @brief Print an indent to distinguish levels of the tree in output. * @param level - current printing level of a tree. */ static inline void bps_tree_print_indent(int level) { for (int i = 0; i < level; i++) printf(" "); } /** * @brief Print a block of a tree. * @param tree - printing tree * @param block - block to print * @param level - current printing level * @param elem_fmt - printing format of elements */ static void bps_tree_print_block(const struct bps_tree *tree, const struct bps_block *block, int level, const char *elem_fmt); /** * @brief Print a leaf block of a tree. * @param block - block to print * @param level - current printing level * @param elem_fmt - printing format of elements */ static inline void bps_tree_print_leaf(const struct bps_leaf* block, int indent, const char *elem_fmt) { bps_tree_print_indent(indent); printf("[(%d)", (int)block->header.size); for (bps_tree_pos_t i = 0; i < block->header.size; i++) { printf(" "); printf(elem_fmt, block->elems[i]); } printf("]\n"); } /** * @brief Print an inner block of a tree. Recursively prints children. * @param tree - printing tree * @param block - block to print * @param level - current printing level * @param elem_fmt - printing format of elements */ static inline void bps_tree_print_inner(const struct bps_tree *tree, const struct bps_inner* block, int indent, const char *elem_fmt) { struct bps_block *next = bps_tree_restore_block(tree, block->child_ids[0]); bps_tree_print_block(tree, next, indent + 1, elem_fmt); for (bps_tree_pos_t i = 0; i < block->header.size - 1; i++) { bps_tree_print_indent(indent); printf(elem_fmt, block->elems[i]); printf("\n"); next = bps_tree_restore_block(tree, block->child_ids[i + 1]); bps_tree_print_block(tree, next, indent + 1, elem_fmt); } } /** * @brief Print a block of a tree. * @param tree - printing tree * @param block - block to print * @param level - current printing level * @param elem_fmt - printing format of elements */ static inline void bps_tree_print_block(const struct bps_tree *tree, const struct bps_block *block, int indent, const char *elem_fmt) { if (block->type == BPS_TREE_BT_INNER) bps_tree_print_inner(tree, (const struct bps_inner *)block, indent, elem_fmt); else bps_tree_print_leaf((const struct bps_leaf *)block, indent, elem_fmt); } /** * @brief Debug print tree to output in readable form. * I hope you will not need it. * @param tree - tree to print * @param elem_fmt - format for printing an element. "%d" or "%p" for example. */ static inline void bps_tree_print(const struct bps_tree *tree, const char *elem_fmt) { if (tree->root_id == (bps_tree_block_id_t)(-1)) { printf("Empty\n"); return; } bps_tree_print_block(tree, bps_tree_root(tree), 0, elem_fmt); } /* * Debug utilities for testing base operation on blocks: * inserting, deleting, moving to left and right blocks, * and (inserting and moving) */ /** * @brief Assign a value to an element. * Used for debug self-check */ static inline void bps_tree_debug_set_elem(bps_tree_elem_t *elem, unsigned char c) { memset(elem, 0, sizeof(bps_tree_elem_t)); *(unsigned char *)elem = c; } /** * @brief Get previously assigned value from an element. * Used for debug self-check */ static inline unsigned char bps_tree_debug_get_elem(bps_tree_elem_t *elem) { return *(unsigned char *)elem; } /** * @brief Assign a value to an element in inner block. * Used for debug self-check */ static inline void bps_tree_debug_set_elem_inner(struct bps_inner_path_elem *path_elem, bps_tree_pos_t pos, unsigned char c) { assert(pos >= 0); assert(pos < path_elem->block->header.size); if (pos < path_elem->block->header.size - 1 && pos < BPS_TREE_MAX_COUNT_IN_INNER - 1) /* fix gcc-4.9 warning */ bps_tree_debug_set_elem(path_elem->block->elems + pos, c); else bps_tree_debug_set_elem(path_elem->max_elem_copy, c); } /** * @brief Get previously assigned value from an element in inner block. * Used for debug self-check */ static inline unsigned char bps_tree_debug_get_elem_inner(const struct bps_inner_path_elem *path_elem, bps_tree_pos_t pos) { assert(pos >= 0); assert(pos < path_elem->block->header.size); if (pos < path_elem->block->header.size - 1 && pos < BPS_TREE_MAX_COUNT_IN_INNER - 1) /* fix gcc-4.9 warning */ return bps_tree_debug_get_elem(path_elem->block->elems + pos); else return bps_tree_debug_get_elem(path_elem->max_elem_copy); } /** * @brief Check all possible insertions into a leaf. * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_insert_into_leaf(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_LEAF; for (unsigned int i = 0; i < szlim; i++) { for (unsigned int j = 0; j <= i; j++) { tree->size = 0; struct bps_leaf block; block.header.type = BPS_TREE_BT_LEAF; block.header.size = i; for (unsigned int k = 0; k < szlim; k++) if (k < j) bps_tree_debug_set_elem(block.elems + k, k); else bps_tree_debug_set_elem(block.elems + k, k + 1); struct bps_leaf_path_elem path_elem; bps_tree_elem_t max; bps_tree_elem_t ins; bps_tree_debug_set_elem(&max, i); bps_tree_debug_set_elem(&ins, j); path_elem.block = █ path_elem.block_id = 0; path_elem.insertion_point = j; path_elem.max_elem_copy = &max; path_elem.max_elem_block_id = -1; path_elem.max_elem_pos = -1; bps_tree_insert_into_leaf(tree, &path_elem, ins); if (block.header.size != (bps_tree_pos_t)(i + 1) || tree->size != (bps_tree_pos_t)1) { result |= (1 << 0); assert(!assertme); } if (bps_tree_debug_get_elem(&max) != bps_tree_debug_get_elem( block.elems + block.header.size - 1)) { result |= (1 << 1); assert(!assertme); } for (unsigned int k = 0; k <= i; k++) { if (bps_tree_debug_get_elem(block.elems + k) != (unsigned char) k) { result |= (1 << 1); assert(!assertme); } } } } return result; } /** * @brief Check all possible deleting from a leaf. * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_delete_from_leaf(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_LEAF; for (unsigned int i = 1; i <= szlim; i++) { for (unsigned int j = 0; j < i; j++) { tree->size = 1; struct bps_leaf block; block.header.type = BPS_TREE_BT_LEAF; block.header.size = i; for (unsigned int k = 0; k < i; k++) bps_tree_debug_set_elem(block.elems + k, k); struct bps_leaf_path_elem path_elem; bps_tree_elem_t max; bps_tree_debug_set_elem(&max, j == i - 1 ? i - 2 : i - 1); path_elem.block = █ path_elem.block_id = 0; path_elem.insertion_point = j; path_elem.max_elem_copy = &max; path_elem.max_elem_block_id = -1; path_elem.max_elem_pos = -1; bps_tree_delete_from_leaf(tree, &path_elem); if (block.header.size != (bps_tree_pos_t)(i - 1) || tree->size != (bps_tree_pos_t)0) { result |= (1 << 2); assert(!assertme); } if (i > 1 && bps_tree_debug_get_elem(&max) != bps_tree_debug_get_elem( block.elems + block.header.size - 1)) { result |= (1 << 3); assert(!assertme); } for (unsigned int k = 0; k < i - 1; k++) { if (bps_tree_debug_get_elem(block.elems + k) != (unsigned char) (k < j ? k : k + 1)) { result |= (1 << 3); assert(!assertme); } } } } return result; } /** * @brief Check all possible moving right of leafs. * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_move_to_right_leaf(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_LEAF; for (unsigned int i = 0; i <= szlim; i++) { for (unsigned int j = 0; j <= szlim; j++) { unsigned int max_move = i < szlim - j ? i : szlim - j; for (unsigned int k = 1; k <= max_move; k++) { struct bps_leaf a, b; a.header.type = BPS_TREE_BT_LEAF; a.header.size = i; b.header.type = BPS_TREE_BT_LEAF; b.header.size = j; memset(a.elems, 0xFF, sizeof(a.elems)); memset(b.elems, 0xFF, sizeof(b.elems)); unsigned char c = 0; for (unsigned int u = 0; u < i; u++) bps_tree_debug_set_elem(a.elems + u, c++); for (unsigned int u = 0; u < j; u++) bps_tree_debug_set_elem(b.elems + u, c++); bps_tree_elem_t ma; bps_tree_debug_set_elem(&ma, 0xFF); bps_tree_elem_t mb; bps_tree_debug_set_elem(&mb, 0xFF); if (i) ma = a.elems[i - 1]; if (j) mb = b.elems[j - 1]; struct bps_leaf_path_elem a_path_elem, b_path_elem; a_path_elem.block = &a; a_path_elem.max_elem_copy = &ma; a_path_elem.max_elem_block_id = -1; a_path_elem.max_elem_pos = -1; b_path_elem.block = &b; b_path_elem.max_elem_copy = &mb; b_path_elem.max_elem_block_id = -1; b_path_elem.max_elem_pos = -1; a_path_elem.block_id = 0; b_path_elem.block_id = 0; bps_tree_move_elems_to_right_leaf(tree, &a_path_elem, &b_path_elem, (bps_tree_pos_t) k); if (a.header.size != (bps_tree_pos_t) (i - k)) { result |= (1 << 4); assert(!assertme); } if (b.header.size != (bps_tree_pos_t) (j + k)) { result |= (1 << 4); assert(!assertme); } if (a.header.size) if (ma != a.elems[a.header.size - 1]) { result |= (1 << 5); assert(!assertme); } if (b.header.size) if (mb != b.elems[b.header.size - 1]) { result |= (1 << 5); assert(!assertme); } c = 0; for (unsigned int u = 0; u < (unsigned int) a.header.size; u++) if (bps_tree_debug_get_elem(a.elems + u) != c++) { result |= (1 << 5); assert(!assertme); } for (unsigned int u = 0; u < (unsigned int) b.header.size; u++) if (bps_tree_debug_get_elem(b.elems + u) != c++) { result |= (1 << 5); assert(!assertme); } } } } return result; } /** * @brief Check all possible moving left of leafs. * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_move_to_left_leaf(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_LEAF; for (unsigned int i = 0; i <= szlim; i++) { for (unsigned int j = 0; j <= szlim; j++) { unsigned int max_move = j < szlim - i ? j : szlim - i; for (unsigned int k = 1; k <= max_move; k++) { struct bps_leaf a, b; a.header.type = BPS_TREE_BT_LEAF; a.header.size = i; b.header.type = BPS_TREE_BT_LEAF; b.header.size = j; memset(a.elems, 0xFF, sizeof(a.elems)); memset(b.elems, 0xFF, sizeof(b.elems)); unsigned char c = 0; for (unsigned int u = 0; u < i; u++) bps_tree_debug_set_elem(a.elems + u, c++); for (unsigned int u = 0; u < j; u++) bps_tree_debug_set_elem(b.elems + u, c++); bps_tree_elem_t ma; bps_tree_debug_set_elem(&ma, 0xFF); bps_tree_elem_t mb; bps_tree_debug_set_elem(&mb, 0xFF); if (i) ma = a.elems[i - 1]; if (j) mb = b.elems[j - 1]; struct bps_leaf_path_elem a_path_elem, b_path_elem; a_path_elem.block = &a; a_path_elem.max_elem_copy = &ma; a_path_elem.max_elem_block_id = -1; a_path_elem.max_elem_pos = -1; b_path_elem.block = &b; b_path_elem.max_elem_copy = &mb; b_path_elem.max_elem_block_id = -1; b_path_elem.max_elem_pos = -1; a_path_elem.block_id = 0; b_path_elem.block_id = 0; bps_tree_move_elems_to_left_leaf(tree, &a_path_elem, &b_path_elem, (bps_tree_pos_t) k); if (a.header.size != (bps_tree_pos_t) (i + k)) { result |= (1 << 6); assert(!assertme); } if (b.header.size != (bps_tree_pos_t) (j - k)) { result |= (1 << 6); assert(!assertme); } if (a.header.size) if (ma != a.elems[a.header.size - 1]) { result |= (1 << 7); assert(!assertme); } if (b.header.size) if (mb != b.elems[b.header.size - 1]) { result |= (1 << 7); assert(!assertme); } c = 0; for (unsigned int u = 0; u < (unsigned int) a.header.size; u++) if (bps_tree_debug_get_elem(a.elems + u) != c++) { result |= (1 << 7); assert(!assertme); } for (unsigned int u = 0; u < (unsigned int) b.header.size; u++) if (bps_tree_debug_get_elem(b.elems + u) != c++) { result |= (1 << 7); assert(!assertme); } } } } return result; } /** * @brief Check all possible insertion and moving right of leafs. * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_insert_and_move_to_right_leaf(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_LEAF; for (unsigned int i = 0; i <= szlim; i++) { for (unsigned int j = 0; j <= szlim; j++) { unsigned int max_move = i + 1 < szlim - j ? i + 1 : szlim - j; for (unsigned int k = 0; k <= i; k++) { for (unsigned int u = 1; u <= max_move; u++) { struct bps_leaf a, b; a.header.type = BPS_TREE_BT_LEAF; a.header.size = i; b.header.type = BPS_TREE_BT_LEAF; b.header.size = j; memset(a.elems, 0xFF, sizeof(a.elems)); memset(b.elems, 0xFF, sizeof(b.elems)); unsigned char c = 0; unsigned char ic = i + j; for (unsigned int v = 0; v < i; v++) { if (v == k) ic = c++; bps_tree_debug_set_elem( a.elems + v, c++); } if (k == i) ic = c++; for (unsigned int v = 0; v < j; v++) bps_tree_debug_set_elem( b.elems + v, c++); bps_tree_elem_t ma; bps_tree_debug_set_elem(&ma, 0xFF); bps_tree_elem_t mb; bps_tree_debug_set_elem(&mb, 0xFF); if (i) ma = a.elems[i - 1]; if (j) mb = b.elems[j - 1]; struct bps_leaf_path_elem a_path_elem, b_path_elem; a_path_elem.block = &a; a_path_elem.max_elem_copy = &ma; a_path_elem.max_elem_block_id = -1; a_path_elem.max_elem_pos = -1; b_path_elem.block = &b; b_path_elem.max_elem_copy = &mb; b_path_elem.max_elem_block_id = -1; b_path_elem.max_elem_pos = -1; a_path_elem.insertion_point = k; a_path_elem.block_id = 0; b_path_elem.block_id = 0; bps_tree_elem_t ins; bps_tree_debug_set_elem(&ins, ic); bps_tree_insert_and_move_elems_to_right_leaf( tree, &a_path_elem, &b_path_elem, (bps_tree_pos_t) u, ins); if (a.header.size != (bps_tree_pos_t) (i - u + 1)) { result |= (1 << 8); assert(!assertme); } if (b.header.size != (bps_tree_pos_t) (j + u)) { result |= (1 << 8); assert(!assertme); } if (i - u + 1) if (ma != a.elems[a.header.size - 1]) { result |= (1 << 9); assert(!assertme); } if (j + u) if (mb != b.elems[b.header.size - 1]) { result |= (1 << 9); assert(!assertme); } c = 0; for (unsigned int v = 0; v < (unsigned int) a.header.size; v++) if (bps_tree_debug_get_elem( a.elems + v) != c++) { result |= (1 << 9); assert(!assertme); } for (unsigned int v = 0; v < (unsigned int) b.header.size; v++) if (bps_tree_debug_get_elem( b.elems + v) != c++) { result |= (1 << 9); assert(!assertme); } } } } } return result; } /** * @brief Check all possible insertion and moving left of leafs. * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_insert_and_move_to_left_leaf(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_LEAF; for (unsigned int i = 0; i <= szlim; i++) { for (unsigned int j = 0; j <= szlim; j++) { unsigned int max_move = j + 1 < szlim - i ? j + 1 : szlim - i; for (unsigned int k = 0; k <= j; k++) { for (unsigned int u = 1; u <= max_move; u++) { struct bps_leaf a, b; a.header.type = BPS_TREE_BT_LEAF; a.header.size = i; b.header.type = BPS_TREE_BT_LEAF; b.header.size = j; memset(a.elems, 0xFF, sizeof(a.elems)); memset(b.elems, 0xFF, sizeof(b.elems)); unsigned char c = 0; unsigned char ic = i + j; for (unsigned int v = 0; v < i; v++) bps_tree_debug_set_elem( a.elems + v, c++); for (unsigned int v = 0; v < j; v++) { if (v == k) ic = c++; bps_tree_debug_set_elem( b.elems + v, c++); } bps_tree_elem_t ma; bps_tree_debug_set_elem(&ma, 0xFF); bps_tree_elem_t mb; bps_tree_debug_set_elem(&mb, 0xFF); if (i) ma = a.elems[i - 1]; if (j) mb = b.elems[j - 1]; struct bps_leaf_path_elem a_path_elem, b_path_elem; a_path_elem.block = &a; a_path_elem.max_elem_copy = &ma; a_path_elem.max_elem_block_id = -1; a_path_elem.max_elem_pos = -1; b_path_elem.block = &b; b_path_elem.max_elem_copy = &mb; b_path_elem.max_elem_block_id = -1; b_path_elem.max_elem_pos = -1; b_path_elem.insertion_point = k; a_path_elem.block_id = 0; b_path_elem.block_id = 0; bps_tree_elem_t ins; bps_tree_debug_set_elem(&ins, ic); bps_tree_insert_and_move_elems_to_left_leaf( tree, &a_path_elem, &b_path_elem, (bps_tree_pos_t) u, ins); if (a.header.size != (bps_tree_pos_t) (i + u)) { result |= (1 << 10); assert(!assertme); } if (b.header.size != (bps_tree_pos_t) (j - u + 1)) { result |= (1 << 10); assert(!assertme); } if (i + u) if (ma != a.elems[a.header.size - 1]) { result |= (1 << 11); assert(!assertme); } if (j - u + 1) if (mb != b.elems[b.header.size - 1]) { result |= (1 << 11); assert(!assertme); } c = 0; for (unsigned int v = 0; v < (unsigned int) a.header.size; v++) if (bps_tree_debug_get_elem( a.elems + v) != c++) { result |= (1 << 11); assert(!assertme); } for (unsigned int v = 0; v < (unsigned int) b.header.size; v++) if (bps_tree_debug_get_elem( b.elems + v) != c++) { result |= (1 << 11); assert(!assertme); } } } } } return result; } /** * @brief Check all possible insertion to an inner * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_insert_into_inner(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_INNER; for (unsigned int i = 0; i < szlim; i++) { for (unsigned int j = 0; j <= i; j++) { tree->size = 0; struct bps_inner block; block.header.type = BPS_TREE_BT_INNER; block.header.size = i; memset(block.elems, 0xFF, sizeof(block.elems)); memset(block.child_ids, 0xFF, sizeof(block.child_ids)); bps_tree_elem_t max; bps_tree_elem_t ins; bps_tree_debug_set_elem(&ins, j); struct bps_inner_path_elem path_elem; path_elem.block = █ path_elem.block_id = 0; path_elem.max_elem_copy = &max; path_elem.max_elem_block_id = -1; path_elem.max_elem_pos = -1; for (unsigned int k = 0; k < i; k++) { if (k < j) bps_tree_debug_set_elem_inner( &path_elem, k, k); else bps_tree_debug_set_elem_inner( &path_elem, k, k + 1); } for (unsigned int k = 0; k < i; k++) if (k < j) block.child_ids[k] = (bps_tree_block_id_t) k; else block.child_ids[k] = (bps_tree_block_id_t) (k + 1); bps_tree_insert_into_inner(tree, &path_elem, (bps_tree_block_id_t) j, (bps_tree_pos_t) j, ins); for (unsigned int k = 0; k <= i; k++) { if (bps_tree_debug_get_elem_inner(&path_elem, k) != (unsigned char) k) { result |= (1 << 12); assert(!assertme); } } for (unsigned int k = 0; k <= i; k++) { if (block.child_ids[k] != k) { result |= (1 << 13); assert(!assertme); } } } } return result; } /** * @brief Check all possible deletions from an inner * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_delete_from_inner(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_INNER; for (unsigned int i = 1; i <= szlim; i++) { for (unsigned int j = 0; j < i; j++) { struct bps_inner block; block.header.type = BPS_TREE_BT_INNER; block.header.size = i; for (unsigned int k = 0; k < szlim - 1; k++) bps_tree_debug_set_elem(block.elems + k, k); for (unsigned int k = 0; k < szlim; k++) block.child_ids[k] = k; struct bps_inner_path_elem path_elem; bps_tree_elem_t max; bps_tree_debug_set_elem(&max, i - 1); path_elem.block = █ path_elem.block_id = 0; path_elem.insertion_point = j; path_elem.max_elem_copy = &max; path_elem.max_elem_block_id = -1; path_elem.max_elem_pos = -1; bps_tree_delete_from_inner(tree, &path_elem); unsigned char c = 0; bps_tree_block_id_t kk = 0; for (unsigned int k = 0; k < i - 1; k++) { if (k == j) { c++; kk++; } if (bps_tree_debug_get_elem_inner(&path_elem, k) != c++) { result |= (1 << 14); assert(!assertme); } if (block.child_ids[k] != kk++) { result |= (1 << 15); assert(!assertme); } } } } return result; } /** * @brief Check all possible moving right of inners * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_move_to_right_inner(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_INNER; for (unsigned int i = 0; i <= szlim; i++) { for (unsigned int j = 0; j <= szlim; j++) { unsigned int max_move = i < szlim - j ? i : szlim - j; for (unsigned int k = 1; k <= max_move; k++) { struct bps_inner a, b; a.header.type = BPS_TREE_BT_INNER; a.header.size = i; b.header.type = BPS_TREE_BT_INNER; b.header.size = j; memset(a.elems, 0xFF, sizeof(a.elems)); memset(b.elems, 0xFF, sizeof(b.elems)); memset(a.child_ids, 0xFF, sizeof(a.child_ids)); memset(b.child_ids, 0xFF, sizeof(b.child_ids)); bps_tree_elem_t ma; bps_tree_debug_set_elem(&ma, 0xFF); bps_tree_elem_t mb; bps_tree_debug_set_elem(&mb, 0xFF); struct bps_inner_path_elem a_path_elem, b_path_elem; a_path_elem.block = &a; a_path_elem.max_elem_copy = &ma; a_path_elem.max_elem_block_id = -1; a_path_elem.max_elem_pos = -1; b_path_elem.block = &b; b_path_elem.max_elem_copy = &mb; b_path_elem.max_elem_block_id = -1; b_path_elem.max_elem_pos = -1; a_path_elem.block_id = 0; b_path_elem.block_id = 0; unsigned char c = 0; bps_tree_block_id_t kk = 0; for (unsigned int u = 0; u < i; u++) { bps_tree_debug_set_elem_inner( &a_path_elem, u, c++); a.child_ids[u] = kk++; } for (unsigned int u = 0; u < j; u++) { bps_tree_debug_set_elem_inner( &b_path_elem, u, c++); b.child_ids[u] = kk++; } bps_tree_move_elems_to_right_inner(tree, &a_path_elem, &b_path_elem, (bps_tree_pos_t) k); if (a.header.size != (bps_tree_pos_t) (i - k)) { result |= (1 << 16); assert(!assertme); } if (b.header.size != (bps_tree_pos_t) (j + k)) { result |= (1 << 16); assert(!assertme); } c = 0; kk = 0; for (unsigned int u = 0; u < (unsigned int) a.header.size; u++) { if (bps_tree_debug_get_elem_inner( &a_path_elem, u) != c++) { result |= (1 << 17); assert(!assertme); } if (a.child_ids[u] != kk++) { result |= (1 << 17); assert(!assertme); } } for (unsigned int u = 0; u < (unsigned int) b.header.size; u++) { if (bps_tree_debug_get_elem_inner( &b_path_elem, u) != c++) { result |= (1 << 17); assert(!assertme); } if (b.child_ids[u] != kk++) { result |= (1 << 17); assert(!assertme); } } } } } return result; } /** * @brief Check all possible moving left of inners * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_move_to_left_inner(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_INNER; for (unsigned int i = 0; i <= szlim; i++) { for (unsigned int j = 0; j <= szlim; j++) { unsigned int max_move = j < szlim - i ? j : szlim - i; for (unsigned int k = 1; k <= max_move; k++) { struct bps_inner a, b; a.header.type = BPS_TREE_BT_INNER; a.header.size = i; b.header.type = BPS_TREE_BT_INNER; b.header.size = j; memset(a.elems, 0xFF, sizeof(a.elems)); memset(b.elems, 0xFF, sizeof(b.elems)); memset(a.child_ids, 0xFF, sizeof(a.child_ids)); memset(b.child_ids, 0xFF, sizeof(b.child_ids)); bps_tree_elem_t ma; bps_tree_debug_set_elem(&ma, 0xFF); bps_tree_elem_t mb; bps_tree_debug_set_elem(&mb, 0xFF); struct bps_inner_path_elem a_path_elem, b_path_elem; a_path_elem.block = &a; a_path_elem.max_elem_copy = &ma; a_path_elem.max_elem_block_id = -1; a_path_elem.max_elem_pos = -1; b_path_elem.block = &b; b_path_elem.max_elem_copy = &mb; b_path_elem.max_elem_block_id = -1; b_path_elem.max_elem_pos = -1; a_path_elem.block_id = 0; b_path_elem.block_id = 0; unsigned char c = 0; bps_tree_block_id_t kk = 0; for (unsigned int u = 0; u < i; u++) { bps_tree_debug_set_elem_inner( &a_path_elem, u, c++); a.child_ids[u] = kk++; } for (unsigned int u = 0; u < j; u++) { bps_tree_debug_set_elem_inner( &b_path_elem, u, c++); b.child_ids[u] = kk++; } bps_tree_move_elems_to_left_inner(tree, &a_path_elem, &b_path_elem, (bps_tree_pos_t) k); if (a.header.size != (bps_tree_pos_t) (i + k)) { result |= (1 << 18); assert(!assertme); } if (b.header.size != (bps_tree_pos_t) (j - k)) { result |= (1 << 18); assert(!assertme); } c = 0; kk = 0; for (unsigned int u = 0; u < (unsigned int) a.header.size; u++) { if (bps_tree_debug_get_elem_inner( &a_path_elem, u) != c++) { result |= (1 << 19); assert(!assertme); } if (a.child_ids[u] != kk++) { result |= (1 << 19); assert(!assertme); } } for (unsigned int u = 0; u < (unsigned int) b.header.size; u++) { if (bps_tree_debug_get_elem_inner( &b_path_elem, u) != c++) { result |= (1 << 19); assert(!assertme); } if (b.child_ids[u] != kk++) { result |= (1 << 19); assert(!assertme); } } } } } return result; } /** * @brief Check all possible insertion and moving right of inners * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_insert_and_move_to_right_inner(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_INNER; for (unsigned int i = 0; i <= szlim; i++) { for (unsigned int j = 0; j <= szlim; j++) { unsigned int max_move = i + 1 < szlim - j ? i + 1 : szlim - j; for (unsigned int k = 0; k <= i; k++) { for (unsigned int u = 1; u <= max_move; u++) { struct bps_inner a, b; a.header.type = BPS_TREE_BT_INNER; a.header.size = i; b.header.type = BPS_TREE_BT_INNER; b.header.size = j; memset(a.elems, 0xFF, sizeof(a.elems)); memset(b.elems, 0xFF, sizeof(b.elems)); memset(a.child_ids, 0xFF, sizeof(a.child_ids)); memset(b.child_ids, 0xFF, sizeof(b.child_ids)); bps_tree_elem_t ma; bps_tree_debug_set_elem(&ma, 0xFF); bps_tree_elem_t mb; bps_tree_debug_set_elem(&mb, 0xFF); struct bps_inner_path_elem a_path_elem, b_path_elem; a_path_elem.block = &a; a_path_elem.max_elem_copy = &ma; a_path_elem.max_elem_block_id = -1; a_path_elem.max_elem_pos = -1; b_path_elem.block = &b; b_path_elem.max_elem_copy = &mb; b_path_elem.max_elem_block_id = -1; b_path_elem.max_elem_pos = -1; a_path_elem.block_id = 0; b_path_elem.block_id = 0; unsigned char c = 0; bps_tree_block_id_t kk = 0; unsigned char ic = i + j; bps_tree_block_id_t ikk = (bps_tree_block_id_t) (i + j); for (unsigned int v = 0; v < i; v++) { if (v == k) { ic = c++; ikk = kk++; } bps_tree_debug_set_elem_inner( &a_path_elem, v, c++); a.child_ids[v] = kk++; } if (k == i) { ic = c++; ikk = kk++; } for (unsigned int v = 0; v < j; v++) { bps_tree_debug_set_elem_inner( &b_path_elem, v, c++); b.child_ids[v] = kk++; } a_path_elem.insertion_point = -1; bps_tree_elem_t ins; bps_tree_debug_set_elem(&ins, ic); bps_tree_insert_and_move_elems_to_right_inner( tree, &a_path_elem, &b_path_elem, (bps_tree_pos_t) u, ikk, (bps_tree_pos_t) k, ins); if (a.header.size != (bps_tree_pos_t) (i - u + 1)) { result |= (1 << 20); assert(!assertme); } if (b.header.size != (bps_tree_pos_t) (j + u)) { result |= (1 << 20); assert(!assertme); } c = 0; kk = 0; for (unsigned int v = 0; v < (unsigned int) a.header.size; v++) { if (bps_tree_debug_get_elem_inner( &a_path_elem, v) != c++) { result |= (1 << 21); assert(!assertme); } if (a.child_ids[v] != kk++) { result |= (1 << 21); assert(!assertme); } } for (unsigned int v = 0; v < (unsigned int) b.header.size; v++) { if (bps_tree_debug_get_elem_inner( &b_path_elem, v) != c++) { result |= (1 << 21); assert(!assertme); } if (b.child_ids[v] != kk++) { result |= (1 << 21); assert(!assertme); } } } } } } return result; } /** * @brief Check all possible insertion and moving left of inners * Used for debug self-check * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_insert_and_move_to_left_inner(struct bps_tree *tree, bool assertme) { (void) assertme; int result = 0; const unsigned int szlim = BPS_TREE_MAX_COUNT_IN_INNER; for (unsigned int i = 0; i <= szlim; i++) { for (unsigned int j = 0; j <= szlim; j++) { unsigned int max_move = j + 1 < szlim - i ? j + 1 : szlim - i; for (unsigned int k = 0; k <= j; k++) { for (unsigned int u = 1; u <= max_move; u++) { struct bps_inner a, b; a.header.type = BPS_TREE_BT_INNER; a.header.size = i; b.header.type = BPS_TREE_BT_INNER; b.header.size = j; memset(a.elems, 0xFF, sizeof(a.elems)); memset(b.elems, 0xFF, sizeof(b.elems)); memset(a.child_ids, 0xFF, sizeof(a.child_ids)); memset(b.child_ids, 0xFF, sizeof(b.child_ids)); bps_tree_elem_t ma; bps_tree_debug_set_elem(&ma, 0xFF); bps_tree_elem_t mb; bps_tree_debug_set_elem(&mb, 0xFF); struct bps_inner_path_elem a_path_elem, b_path_elem; a_path_elem.block = &a; a_path_elem.max_elem_copy = &ma; a_path_elem.max_elem_block_id = -1; a_path_elem.max_elem_pos = -1; b_path_elem.block = &b; b_path_elem.max_elem_copy = &mb; b_path_elem.max_elem_block_id = -1; b_path_elem.max_elem_pos = -1; a_path_elem.block_id = 0; b_path_elem.block_id = 0; unsigned char c = 0; bps_tree_block_id_t kk = 0; unsigned char ic = i + j; bps_tree_block_id_t ikk = (bps_tree_block_id_t) (i + j); for (unsigned int v = 0; v < i; v++) { bps_tree_debug_set_elem_inner( &a_path_elem, v, c++); a.child_ids[v] = kk++; } for (unsigned int v = 0; v < j; v++) { if (v == k) { ic = c++; ikk = kk++; } bps_tree_debug_set_elem_inner( &b_path_elem, v, c++); b.child_ids[v] = kk++; } b_path_elem.insertion_point = -1; bps_tree_elem_t ins; bps_tree_debug_set_elem(&ins, ic); bps_tree_insert_and_move_elems_to_left_inner( tree, &a_path_elem, &b_path_elem, (bps_tree_pos_t) u, ikk, (bps_tree_pos_t) k, ins); if (a.header.size != (bps_tree_pos_t) (i + u)) { result |= (1 << 22); assert(!assertme); } if (b.header.size != (bps_tree_pos_t) (j - u + 1)) { result |= (1 << 22); assert(!assertme); } c = 0; kk = 0; for (unsigned int v = 0; v < (unsigned int) a.header.size; v++) { if (bps_tree_debug_get_elem_inner( &a_path_elem, v) != c++) { result |= (1 << 23); assert(!assertme); } if (a.child_ids[v] != kk++) { result |= (1 << 23); assert(!assertme); } } for (unsigned int v = 0; v < (unsigned int) b.header.size; v++) { if (bps_tree_debug_get_elem_inner( &b_path_elem, v) != c++) { result |= (1 << 23); assert(!assertme); } if (b.child_ids[v] != kk++) { result |= (1 << 23); assert(!assertme); } } } } } } return result; } /** * @brief Debug print tree to output in readable form. * I hope you will not need it. * @param assertme - if true, errors will lead to assert call, * if false, just error code will be returned. * @return 0 if OK; bit mask of errors otherwise. */ static inline int bps_tree_debug_check_internal_functions(bool assertme) { int result = 0; struct bps_tree tree; tree.root_id = (bps_tree_block_id_t) -1; result |= bps_tree_debug_check_insert_into_leaf(&tree, assertme); result |= bps_tree_debug_check_delete_from_leaf(&tree, assertme); result |= bps_tree_debug_check_move_to_right_leaf(&tree, assertme); result |= bps_tree_debug_check_move_to_left_leaf(&tree, assertme); result |= bps_tree_debug_check_insert_and_move_to_right_leaf(&tree, assertme); result |= bps_tree_debug_check_insert_and_move_to_left_leaf(&tree, assertme); result |= bps_tree_debug_check_insert_into_inner(&tree, assertme); result |= bps_tree_debug_check_delete_from_inner(&tree, assertme); result |= bps_tree_debug_check_move_to_right_inner(&tree, assertme); result |= bps_tree_debug_check_move_to_left_inner(&tree, assertme); result |= bps_tree_debug_check_insert_and_move_to_right_inner(&tree, assertme); result |= bps_tree_debug_check_insert_and_move_to_left_inner(&tree, assertme); return result; } #endif //#ifndef BPS_TREE_NO_DEBUG /* }}} */ #undef BPS_TREE_MEMMOVE #undef BPS_TREE_DATAMOVE #undef BPS_TREE_BRANCH_TRACE /* {{{ Macros for custom naming of structs and functions */ #undef _bps #undef _bps_tree #undef _BPS #undef _BPS_TREE #undef _bps_tree_name #undef bps_tree #undef bps_block #undef bps_leaf #undef bps_inner #undef bps_garbage #undef bps_tree_iterator #undef bps_inner_path_elem #undef bps_leaf_path_elem #undef bps_tree_create #undef bps_tree_build #undef bps_tree_destroy #undef bps_tree_find #undef bps_tree_insert #undef bps_tree_delete #undef bps_tree_size #undef bps_tree_mem_used #undef bps_tree_random #undef bps_tree_invalid_iterator #undef bps_tree_iterator_is_invalid #undef bps_tree_iterator_are_equal #undef bps_tree_iterator_first #undef bps_tree_iterator_last #undef bps_tree_lower_bound #undef bps_tree_upper_bound #undef bps_tree_lower_bound_elem #undef bps_tree_upper_bound_elem #undef bps_tree_approximate_count #undef bps_tree_iterator_get_elem #undef bps_tree_iterator_next #undef bps_tree_iterator_prev #undef bps_tree_iterator_freeze #undef bps_tree_iterator_destroy #undef bps_tree_debug_check #undef bps_tree_print #undef bps_tree_debug_check_internal_functions #undef bps_tree_max_sizes #undef BPS_TREE_MAX_COUNT_IN_LEAF #undef BPS_TREE_MAX_COUNT_IN_INNER #undef BPS_TREE_MAX_DEPTH #undef bps_block_type #undef BPS_TREE_BT_GARBAGE #undef BPS_TREE_BT_INNER #undef BPS_TREE_BT_LEAF #undef bps_tree_restore_block #undef bps_tree_restore_block_ver #undef bps_tree_root #undef bps_tree_touch_block #undef bps_tree_find_ins_point_key #undef bps_tree_find_ins_point_elem #undef bps_tree_find_after_ins_point_key #undef bps_tree_find_after_ins_point_elem #undef bps_tree_get_leaf_safe #undef bps_tree_garbage_push #undef bps_tree_garbage_pop #undef bps_tree_create_leaf #undef bps_tree_create_inner #undef bps_tree_dispose_leaf #undef bps_tree_dispose_inner #undef bps_tree_reserve_blocks #undef bps_tree_insert_first_elem #undef bps_tree_collect_path #undef bps_tree_touch_leaf_path_max_elem #undef bps_tree_touch_path #undef bps_tree_process_replace #undef bps_tree_debug_memmove #undef bps_tree_insert_into_leaf #undef bps_tree_insert_into_inner #undef bps_tree_delete_from_leaf #undef bps_tree_delete_from_inner #undef bps_tree_move_elems_to_right_leaf #undef bps_tree_move_elems_to_right_inner #undef bps_tree_move_elems_to_left_leaf #undef bps_tree_move_elems_to_left_inner #undef bps_tree_insert_and_move_elems_to_right_leaf #undef bps_tree_insert_and_move_elems_to_right_inner #undef bps_tree_insert_and_move_elems_to_left_leaf #undef bps_tree_insert_and_move_elems_to_left_inner #undef bps_tree_leaf_free_size #undef bps_tree_inner_free_size #undef bps_tree_leaf_overmin_size #undef bps_tree_inner_overmin_size #undef bps_tree_collect_left_path_elem_leaf #undef bps_tree_collect_left_path_elem_inner #undef bps_tree_collect_right_ext_leaf #undef bps_tree_collect_right_ext_inner #undef bps_tree_prepare_new_ext_leaf #undef bps_tree_prepare_new_ext_inner #undef bps_tree_process_insert_leaf #undef bps_tree_process_insert_inner #undef bps_tree_process_delete_leaf #undef bps_tree_process_delete_inner #undef bps_tree_debug_find_max_elem #undef bps_tree_debug_check_block #undef bps_tree_print_indent #undef bps_tree_print_block #undef bps_tree_print_leaf #undef bps_tree_print_inner #undef bps_tree_debug_set_elem #undef bps_tree_debug_get_elem #undef bps_tree_debug_set_elem_inner #undef bps_tree_debug_get_elem_inner #undef bps_tree_debug_check_insert_into_leaf #undef bps_tree_debug_check_delete_from_leaf #undef bps_tree_debug_check_move_to_right_leaf #undef bps_tree_debug_check_move_to_left_leaf #undef bps_tree_debug_check_insert_and_move_to_right_leaf #undef bps_tree_debug_check_insert_and_move_to_left_leaf #undef bps_tree_debug_check_insert_into_inner #undef bps_tree_debug_check_delete_from_inner #undef bps_tree_debug_check_move_to_right_inner #undef bps_tree_debug_check_move_to_left_inner #undef bps_tree_debug_check_insert_and_move_to_right_inner #undef bps_tree_debug_check_insert_and_move_to_left_inner /* }}} */ tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/light.h0000664000000000000000000010223313306560010020102 0ustar rootroot/* * *No header guard*: the header is allowed to be included twice * with different sets of defines. */ /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "small/matras.h" /** * Additional user defined name that appended to prefix 'light' * for all names of structs and functions in this header file. * All names use pattern: light_ * May be empty, but still have to be defined (just #define LIGHT_NAME) * Example: * #define LIGHT_NAME _test * ... * struct light_test_core hash_table; * light_test_create(&hash_table, ...); */ #ifndef LIGHT_NAME #error "LIGHT_NAME must be defined" #endif /** * Data type that hash table holds. Must be less tant 8 bytes. */ #ifndef LIGHT_DATA_TYPE #error "LIGHT_DATA_TYPE must be defined" #endif /** * Data type that used to for finding values. */ #ifndef LIGHT_KEY_TYPE #error "LIGHT_KEY_TYPE must be defined" #endif /** * Type of optional third parameter of comparing function. * If not needed, simply use #define LIGHT_CMP_ARG_TYPE int */ #ifndef LIGHT_CMP_ARG_TYPE #error "LIGHT_CMP_ARG_TYPE must be defined" #endif /** * Data comparing function. Takes 3 parameters - value1, value2 and * optional value that stored in hash table struct. * Third parameter may be simply ignored like that: * #define LIGHT_EQUAL(a, b, garb) a == b */ #ifndef LIGHT_EQUAL #error "LIGHT_EQUAL must be defined" #endif /** * Data comparing function. Takes 3 parameters - value, key and * optional value that stored in hash table struct. * Third parameter may be simply ignored like that: * #define LIGHT_EQUAL_KEY(a, b, garb) a == b */ #ifndef LIGHT_EQUAL_KEY #error "LIGHT_EQUAL_KEY must be defined" #endif /** * Tools for name substitution: */ #ifndef CONCAT4 #define CONCAT4_R(a, b, c, d) a##b##c##d #define CONCAT4(a, b, c, d) CONCAT4_R(a, b, c, d) #endif #ifdef _ #error '_' must be undefinded! #endif #define LIGHT(name) CONCAT4(light, LIGHT_NAME, _, name) /** * Overhead per value stored in a hash table. * Must be adjusted if struct LIGHT(record) is modified. */ enum { LIGHT_RECORD_OVERHEAD = 8 }; /** * Struct for one record of the hash table */ struct LIGHT(record) { /* hash of a value */ uint32_t hash; /* slot of the next record in chain */ uint32_t next; /* the value */ union { LIGHT_DATA_TYPE value; uint32_t empty_next; /* Round record size up to nearest power of two. */ uint8_t padding[(1 << (32 - __builtin_clz(sizeof(LIGHT_DATA_TYPE) + LIGHT_RECORD_OVERHEAD - 1))) - LIGHT_RECORD_OVERHEAD]; }; }; /* Number of records added while grow iteration */ enum { LIGHT_GROW_INCREMENT = 8 }; /** * Main struct for holding hash table */ struct LIGHT(core) { /* count of values in hash table */ uint32_t count; /* size of hash table ( equal to mtable.size ) */ uint32_t table_size; /* * cover is power of two; * if count is positive, then cover/2 < count <= cover * cover_mask is cover - 1 */ uint32_t cover_mask; /* * Start of chain of empty slots */ uint32_t empty_slot; /* additional parameter for data comparison */ LIGHT_CMP_ARG_TYPE arg; /* dynamic storage for records */ struct matras mtable; }; /** * Iterator, for iterating all values in hash_table. * It also may be used for restoring one value by key. */ struct LIGHT(iterator) { /* Current position on table (ID of a current record) */ uint32_t slotpos; /* Version of matras memory for MVCC */ struct matras_view view; }; /** * Type of functions for memory allocation and deallocation */ typedef void *(*LIGHT(extent_alloc_t))(void *ctx); typedef void (*LIGHT(extent_free_t))(void *ctx, void *extent); /** * Special result of light_find that means that nothing was found * Must be equal or greater than possible hash table size */ static const uint32_t LIGHT(end) = 0xFFFFFFFF; /* Functions declaration */ /** * @brief Hash table construction. Fills struct light members. * @param ht - pointer to a hash table struct * @param extent_size - size of allocating memory blocks * @param extent_alloc_func - memory blocks allocation function * @param extent_free_func - memory blocks allocation function * @param alloc_ctx - argument passed to memory block allocator * @param arg - optional parameter to save for comparing function */ static inline void LIGHT(create)(struct LIGHT(core) *ht, size_t extent_size, LIGHT(extent_alloc_t) extent_alloc_func, LIGHT(extent_free_t) extent_free_func, void *alloc_ctx, LIGHT_CMP_ARG_TYPE arg); /** * @brief Hash table destruction. Frees all allocated memory * @param ht - pointer to a hash table struct */ static inline void LIGHT(destroy)(struct LIGHT(core) *ht); /** * @brief Find a record with given hash and value * @param ht - pointer to a hash table struct * @param hash - hash to find * @param data - value to find * @return integer ID of found record or light_end if nothing found */ static inline uint32_t LIGHT(find)(const struct LIGHT(core) *ht, uint32_t hash, LIGHT_DATA_TYPE data); /** * @brief Find a record with given hash and key * @param ht - pointer to a hash table struct * @param hash - hash to find * @param data - key to find * @return integer ID of found record or light_end if nothing found */ static inline uint32_t LIGHT(find_key)(const struct LIGHT(core) *ht, uint32_t hash, LIGHT_KEY_TYPE data); /** * @brief Insert a record with given hash and value * @param ht - pointer to a hash table struct * @param hash - hash to insert * @param data - value to insert * @return integer ID of inserted record or light_end if failed */ static inline uint32_t LIGHT(insert)(struct LIGHT(core) *ht, uint32_t hash, LIGHT_DATA_TYPE data); /** * @brief Replace a record with given hash and value * @param ht - pointer to a hash table struct * @param hash - hash to find * @param data - value to find and replace * @param replaced - pointer to a value that was stored in table before replace * @return integer ID of found record or light_end if nothing found */ static inline uint32_t LIGHT(replace)(struct LIGHT(core) *ht, uint32_t hash, LIGHT_DATA_TYPE data, LIGHT_DATA_TYPE *replaced); /** * @brief Delete a record from a hash table by given record ID * @param ht - pointer to a hash table struct * @param slotpos - ID of an record. See LIGHT(find) for details. * @return 0 if ok, -1 on memory error (only with freezed iterators) */ static inline int LIGHT(delete)(struct LIGHT(core) *ht, uint32_t slotpos); /** * @brief Delete a record from a hash table by that value and its hash. * @param ht - pointer to a hash table struct * @param slotpos - ID of an record. See LIGHT(find) for details. * @return 0 if ok, 1 if not found or -1 on memory error * (only with freezed iterators) */ static inline int LIGHT(delete_value)(struct LIGHT(core) *ht, uint32_t hash, LIGHT_DATA_TYPE value); /** * @brief Get a value from a desired position * @param ht - pointer to a hash table struct * @param slotpos - ID of an record * ID must be vaild, check it by light_pos_valid (asserted). */ static inline LIGHT_DATA_TYPE LIGHT(get)(struct LIGHT(core) *ht, uint32_t slotpos); /** * @brief Determine if posision holds a value * @param ht - pointer to a hash table struct * @param slotpos - ID of an record * ID must be in valid range [0, ht->table_size) (asserted). */ static inline bool LIGHT(pos_valid)(struct LIGHT(core) *ht, uint32_t slotpos); /** * @brief Set iterator to the beginning of hash table * @param ht - pointer to a hash table struct * @param itr - iterator to set */ static inline void LIGHT(iterator_begin)(const struct LIGHT(core) *ht, struct LIGHT(iterator) *itr); /** * @brief Set iterator to position determined by key * @param ht - pointer to a hash table struct * @param itr - iterator to set * @param hash - hash to find * @param data - key to find */ static inline void LIGHT(iterator_key)(const struct LIGHT(core) *ht, struct LIGHT(iterator) *itr, uint32_t hash, LIGHT_KEY_TYPE data); /** * @brief Get the value that iterator currently points to * @param ht - pointer to a hash table struct * @param itr - iterator to set * @return poiner to the value or NULL if iteration is complete */ static inline LIGHT_DATA_TYPE * LIGHT(iterator_get_and_next)(const struct LIGHT(core) *ht, struct LIGHT(iterator) *itr); /** * @brief Freezes state for given iterator. All following hash table modification * will not apply to that iterator iteration. That iterator should be destroyed * with a light_iterator_destroy call after usage. * @param ht - pointer to a hash table struct * @param itr - iterator to freeze */ static inline void LIGHT(iterator_freeze)(struct LIGHT(core) *ht, struct LIGHT(iterator) *itr); /** * @brief Destroy an iterator that was frozen before. Useless for not frozen * iterators. * @param ht - pointer to a hash table struct * @param itr - iterator to destroy */ static inline void LIGHT(iterator_destroy)(struct LIGHT(core) *ht, struct LIGHT(iterator) *itr); /* Functions definition */ /** * @brief Hash table construction. Fills struct light members. * @param ht - pointer to a hash table struct * @param extent_size - size of allocating memory blocks * @param extent_alloc_func - memory blocks allocation function * @param extent_free_func - memory blocks allocation function * @param alloc_ctx - argument passed to memory block allocator * @param arg - optional parameter to save for comparing function */ static inline void LIGHT(create)(struct LIGHT(core) *ht, size_t extent_size, LIGHT(extent_alloc_t) extent_alloc_func, LIGHT(extent_free_t) extent_free_func, void *alloc_ctx, LIGHT_CMP_ARG_TYPE arg) { assert((LIGHT_GROW_INCREMENT & (LIGHT_GROW_INCREMENT - 1)) == 0); assert(sizeof(LIGHT_DATA_TYPE) >= sizeof(uint32_t)); ht->count = 0; ht->table_size = 0; ht->empty_slot = LIGHT(end); ht->arg = arg; matras_create(&ht->mtable, extent_size, sizeof(struct LIGHT(record)), extent_alloc_func, extent_free_func, alloc_ctx); } /** * @brief Hash table destruction. Frees all allocated memory * @param ht - pointer to a hash table struct */ static inline void LIGHT(destroy)(struct LIGHT(core) *ht) { matras_destroy(&ht->mtable); } /** * Find a slot (index in the hash table), where an item with * given hash should be placed. */ static inline uint32_t LIGHT(slot)(const struct LIGHT(core) *ht, uint32_t hash) { uint32_t cover_mask = ht->cover_mask; uint32_t res = hash & cover_mask; uint32_t probe = (ht->table_size - res - 1) >> 31; uint32_t shift = __builtin_ctz(~(cover_mask >> 1)); res ^= (probe << shift); return res; } /** * @brief Find a record with given hash and value * @param ht - pointer to a hash table struct * @param hash - hash to find * @param data - value to find * @return integer ID of found record or light_end if nothing found */ static inline uint32_t LIGHT(find)(const struct LIGHT(core) *ht, uint32_t hash, LIGHT_DATA_TYPE value) { if (ht->count == 0) return LIGHT(end); uint32_t slot = LIGHT(slot)(ht, hash); struct LIGHT(record) *record = (struct LIGHT(record) *) matras_get(&ht->mtable, slot); if (record->next == slot) return LIGHT(end); while (1) { if (record->hash == hash && LIGHT_EQUAL((record->value), (value), (ht->arg))) return slot; slot = record->next; if (slot == LIGHT(end)) return LIGHT(end); record = (struct LIGHT(record) *) matras_get(&ht->mtable, slot); } /* unreachable */ return LIGHT(end); } /** * @brief Find a record with given hash and key * @param ht - pointer to a hash table struct * @param hash - hash to find * @param data - key to find * @return integer ID of found record or light_end if nothing found */ static inline uint32_t LIGHT(find_key)(const struct LIGHT(core) *ht, uint32_t hash, LIGHT_KEY_TYPE key) { if (ht->count == 0) return LIGHT(end); uint32_t slot = LIGHT(slot)(ht, hash); struct LIGHT(record) *record = (struct LIGHT(record) *) matras_get(&ht->mtable, slot); if (record->next == slot) return LIGHT(end); while (1) { if (record->hash == hash && LIGHT_EQUAL_KEY((record->value), (key), (ht->arg))) return slot; slot = record->next; if (slot == LIGHT(end)) return LIGHT(end); record = (struct LIGHT(record) *) matras_get(&ht->mtable, slot); } /* unreachable */ return LIGHT(end); } /** * @brief Replace a record with given hash and value * @param ht - pointer to a hash table struct * @param hash - hash to find * @param data - value to find and replace * @param replaced - pointer to a value that was stored in table before replace * @return integer ID of found record or light_end if nothing found */ static inline uint32_t LIGHT(replace)(struct LIGHT(core) *ht, uint32_t hash, LIGHT_DATA_TYPE value, LIGHT_DATA_TYPE *replaced) { if (ht->count == 0) return LIGHT(end); uint32_t slot = LIGHT(slot)(ht, hash); struct LIGHT(record) *record = (struct LIGHT(record) *) matras_get(&ht->mtable, slot); if (record->next == slot) return LIGHT(end); while (1) { if (record->hash == hash && LIGHT_EQUAL((record->value), (value), (ht->arg))) { record = (struct LIGHT(record) *) matras_touch(&ht->mtable, slot); if (!record) return LIGHT(end); *replaced = record->value; record->value = value; return slot; } slot = record->next; if (slot == LIGHT(end)) return LIGHT(end); record = (struct LIGHT(record) *) matras_get(&ht->mtable, slot); } /* unreachable */ return LIGHT(end); } /* * Empty records (that do not store value) are linked into doubly linked list. * Get an slot of the previous record in that list. */ static inline uint32_t LIGHT(get_empty_prev)(struct LIGHT(record) *record) { return record->hash; } /* * Empty records (that do not store value) are linked into doubly linked list. * Set an slot of the previous record in that list. */ static inline void LIGHT(set_empty_prev)(struct LIGHT(record) *record, uint32_t pos) { record->hash = pos; } /* * Empty records (that do not store value) are linked into doubly linked list. * Get an slot of the next record in that list. */ static inline uint32_t LIGHT(get_empty_next)(struct LIGHT(record) *record) { return record->empty_next; } /* * Empty records (that do not store value) are linked into doubly linked list. * Get an slot of the next record in that list. */ static inline void LIGHT(set_empty_next)(struct LIGHT(record) *record, uint32_t pos) { record->empty_next = pos; } /* * Empty records (that do not store value) are linked into doubly linked list. * Add given record with given slot to that list. * Touches matras of the record */ static inline int LIGHT(enqueue_empty)(struct LIGHT(core) *ht, uint32_t slot, struct LIGHT(record) *record) { record->next = slot; if (ht->empty_slot != LIGHT(end)) { struct LIGHT(record) *empty_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, ht->empty_slot); if (!empty_record) return -1; LIGHT(set_empty_prev)(empty_record, slot); } LIGHT(set_empty_prev)(record, LIGHT(end)); LIGHT(set_empty_next)(record, ht->empty_slot); ht->empty_slot = slot; return 0; } /* * Empty records (that do not store value) are linked into doubly linked list. * Remove from list first record of that list and return that record * Touches matras of result and all changing records */ static inline struct LIGHT(record) * LIGHT(detach_first_empty)(struct LIGHT(core) *ht) { assert(ht->empty_slot != LIGHT(end)); struct LIGHT(record) *empty_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, ht->empty_slot); if (!empty_record) return 0; assert(empty_record == (struct LIGHT(record) *) matras_get(&ht->mtable, ht->empty_slot)); assert(empty_record->next == ht->empty_slot); uint32_t new_empty_slot = LIGHT(get_empty_next)(empty_record); if (new_empty_slot != LIGHT(end)) { struct LIGHT(record) *new_empty_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, new_empty_slot); if (!new_empty_record) return 0; LIGHT(set_empty_prev)(new_empty_record, LIGHT(end)); } ht->empty_slot = new_empty_slot; return empty_record; } /* * Empty records (that do not store value) are linked into doubly linked list. * Remove from list the record by given slot and return that record * Touches matras of result and all changing records */ static inline struct LIGHT(record) * LIGHT(detach_empty)(struct LIGHT(core) *ht, uint32_t slot) { struct LIGHT(record) *record = (struct LIGHT(record) *) matras_touch(&ht->mtable, slot); if (!record) return 0; uint32_t prev_slot = LIGHT(get_empty_prev)(record); uint32_t next_slot = LIGHT(get_empty_next)(record); struct LIGHT(record) *prev_record = 0; if (prev_slot != LIGHT(end)) { prev_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, prev_slot); if (!prev_record) return 0; } struct LIGHT(record) *next_record = 0; if (next_slot != LIGHT(end)) { next_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, next_slot); if (!next_record) return 0; } if (prev_slot != LIGHT(end)) { LIGHT(set_empty_next)(prev_record, next_slot); } else { ht->empty_slot = next_slot; } if (next_slot != LIGHT(end)) { LIGHT(set_empty_prev)(next_record, prev_slot); } return record; } /* * Allocate memory and initialize empty list to get ready for first insertion */ static inline int LIGHT(prepare_first_insert)(struct LIGHT(core) *ht) { assert(ht->count == 0); assert(ht->table_size == 0); assert(ht->mtable.head.block_count == 0); uint32_t slot; struct LIGHT(record) *record = (struct LIGHT(record) *) matras_alloc_range(&ht->mtable, &slot, LIGHT_GROW_INCREMENT); if (!record) return -1; assert(slot == 0); ht->table_size = LIGHT_GROW_INCREMENT; ht->cover_mask = LIGHT_GROW_INCREMENT - 1; ht->empty_slot = 0; for (int i = 0; i < LIGHT_GROW_INCREMENT; i++) { record[i].next = i; LIGHT(set_empty_prev)(record + i, i - 1); LIGHT(set_empty_next)(record + i, i + 1); } LIGHT(set_empty_prev)(record, LIGHT(end)); LIGHT(set_empty_next)(record + LIGHT_GROW_INCREMENT - 1, LIGHT(end)); return 0; } /* * Enlarge hash table to store more values */ static inline int LIGHT(grow)(struct LIGHT(core) *ht) { assert(ht->empty_slot == LIGHT(end)); uint32_t new_slot; struct LIGHT(record) *new_record = (struct LIGHT(record) *) matras_alloc_range(&ht->mtable, &new_slot, LIGHT_GROW_INCREMENT); if (!new_record) /* memory failure */ return -1; new_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, new_slot); if (!new_record) { /* memory failure */ matras_dealloc_range(&ht->mtable, LIGHT_GROW_INCREMENT); return -1; } uint32_t save_cover_mask = ht->cover_mask; ht->table_size += LIGHT_GROW_INCREMENT; if (ht->cover_mask < ht->table_size - 1) ht->cover_mask = (ht->cover_mask << 1) | (uint32_t)1; uint32_t split_comm_mask = (ht->cover_mask >> 1); uint32_t split_diff_mask = ht->cover_mask ^ split_comm_mask; uint32_t susp_slot = new_slot & split_comm_mask; struct LIGHT(record) *susp_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, susp_slot); if (!susp_record) { matras_dealloc_range(&ht->mtable, LIGHT_GROW_INCREMENT); ht->cover_mask = save_cover_mask; ht->table_size -= LIGHT_GROW_INCREMENT; return -1; } for (int i = 0; i < LIGHT_GROW_INCREMENT; i++, susp_slot++, susp_record++, new_slot++, new_record++) { if (susp_record->next == susp_slot) { /* Suspicious slot is empty, nothing to split */ LIGHT(enqueue_empty)(ht, new_slot, new_record); continue; } if ((susp_record->hash & split_comm_mask) != susp_slot) { /* Another chain in suspicious slot, nothing to split */ LIGHT(enqueue_empty)(ht, new_slot, new_record); continue; } uint32_t chain_head_slot[2] = {susp_slot, new_slot}; struct LIGHT(record) *chain_head[2] = {susp_record, new_record}; struct LIGHT(record) *chain_tail[2] = {0, 0}; uint32_t shift = __builtin_ctz(split_diff_mask); assert(split_diff_mask == (((uint32_t)1) << shift)); uint32_t last_empty_slot = new_slot; uint32_t prev_flag = 0; struct LIGHT(record) *test_record = susp_record; uint32_t test_slot = susp_slot; struct LIGHT(record) *prev_record = 0; uint32_t prev_slot = LIGHT(end); while (1) { uint32_t test_flag = (test_record->hash >> shift) & ((uint32_t)1); if (test_flag != prev_flag) { if (prev_slot != LIGHT(end)) prev_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, prev_slot); /* TODO: check the result */ chain_tail[prev_flag] = prev_record; if (chain_tail[test_flag]) { chain_tail[test_flag]->next = test_slot; } else { *chain_head[test_flag] = *test_record; last_empty_slot = test_slot; test_slot = chain_head_slot[test_flag]; } prev_flag = test_flag; } prev_slot = test_slot; test_slot = test_record->next; if (test_slot == LIGHT(end)) break; test_record = (struct LIGHT(record) *) matras_get(&ht->mtable, test_slot); } prev_flag = prev_flag ^ ((uint32_t)1); if (chain_tail[prev_flag]) chain_tail[prev_flag]->next = LIGHT(end); struct LIGHT(record) *last_empty_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, last_empty_slot); LIGHT(enqueue_empty)(ht, last_empty_slot, last_empty_record); } return 0; } /** * @brief Insert a record with given hash and value * @param ht - pointer to a hash table struct * @param hash - hash to insert * @param data - value to insert * @return integer ID of inserted record or light_end if failed */ static inline uint32_t LIGHT(insert)(struct LIGHT(core) *ht, uint32_t hash, LIGHT_DATA_TYPE value) { if (ht->table_size == 0) if (LIGHT(prepare_first_insert)(ht)) return LIGHT(end); if (ht->empty_slot == LIGHT(end)) if (LIGHT(grow)(ht)) return LIGHT(end); assert(ht->table_size == ht->mtable.head.block_count); ht->count++; uint32_t slot = LIGHT(slot)(ht, hash); struct LIGHT(record) *record = (struct LIGHT(record) *) matras_touch(&ht->mtable, slot); if (!record) return LIGHT(end); if (record->next == slot) { /* Inserting to an empty slot */ record = LIGHT(detach_empty)(ht, slot); if (!record) return LIGHT(end); record->value = value; record->hash = hash; record->next = LIGHT(end); return slot; } uint32_t chain_slot = LIGHT(slot)(ht, record->hash); struct LIGHT(record) *chain = 0; if (chain_slot != slot) { chain = (struct LIGHT(record) *) matras_get(&ht->mtable, chain_slot); while (chain->next != slot) { chain_slot = chain->next; chain = (struct LIGHT(record) *) matras_get(&ht->mtable, chain_slot); } chain = (struct LIGHT(record) *) matras_touch(&ht->mtable, chain_slot); if (!chain) return LIGHT(end); } uint32_t empty_slot = ht->empty_slot; struct LIGHT(record) *empty_record = LIGHT(detach_first_empty)(ht); if (!empty_record) return LIGHT(end); if (chain_slot == slot) { /* add to existing chain */ empty_record->value = value; empty_record->hash = hash; empty_record->next = record->next; record->next = empty_slot; return empty_slot; } else { /* create new chain */ *empty_record = *record; chain->next = empty_slot; record->value = value; record->hash = hash; record->next = LIGHT(end); return slot; } } /** * @brief Delete a record from a hash table by given record ID * @param ht - pointer to a hash table struct * @param slotpos - ID of an record. See LIGHT(find) for details. * @return 0 if ok, -1 on memory error (only with freezed iterators) */ static inline int LIGHT(delete)(struct LIGHT(core) *ht, uint32_t slot) { assert(slot < ht->table_size); uint32_t empty_slot; struct LIGHT(record) *empty_record; struct LIGHT(record) *record = (struct LIGHT(record) *) matras_touch(&ht->mtable, slot); if (!record) return -1; assert(record->next != slot); if (ht->empty_slot != LIGHT(end)) { if (!matras_touch(&ht->mtable, ht->empty_slot)) return -1; } if (record->next != LIGHT(end)) { empty_slot = record->next; empty_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, empty_slot); if (!empty_record) return -1; *record = *empty_record; } else { empty_slot = slot; empty_record = record; uint32_t chain_slot = LIGHT(slot)(ht, record->hash); if (chain_slot != slot) { /* deleting a last record of chain */ struct LIGHT(record) *chain = (struct LIGHT(record) *) matras_get(&ht->mtable, chain_slot); uint32_t chain_next_slot = chain->next; assert(chain_next_slot != LIGHT(end)); while (chain_next_slot != slot) { chain_slot = chain_next_slot; chain = (struct LIGHT(record) *) matras_get(&ht->mtable, chain_slot); chain_next_slot = chain->next; assert(chain_next_slot != LIGHT(end)); } chain = (struct LIGHT(record) *) matras_touch(&ht->mtable, chain_slot); if (!chain) return -1; chain->next = LIGHT(end); } } LIGHT(enqueue_empty)(ht, empty_slot, empty_record); ht->count--; return 0; } /** * @brief Delete a record from a hash table by that value and its hash. * @param ht - pointer to a hash table struct * @param slotpos - ID of an record. See LIGHT(find) for details. * @return 0 if ok, 1 if not found or -1 on memory error * (only with freezed iterators) */ static inline int LIGHT(delete_value)(struct LIGHT(core) *ht, uint32_t hash, LIGHT_DATA_TYPE value) { if (ht->count == 0) return 1; /* not found */ uint32_t slot = LIGHT(slot)(ht, hash); struct LIGHT(record) *record = (struct LIGHT(record) *) matras_get(&ht->mtable, slot); if (record->next == slot) return 1; /* not found */ uint32_t prev_slot = LIGHT(end); struct LIGHT(record) *prev_record = 0; while (1) { if (record->hash == hash && LIGHT_EQUAL((record->value), (value), (ht->arg))) break; prev_slot = slot; slot = record->next; if (slot == LIGHT(end)) return 1; /* not found */ prev_record = record; record = (struct LIGHT(record) *) matras_get(&ht->mtable, slot); } record = (struct LIGHT(record) *) matras_touch(&ht->mtable, slot); if (!record) { return -1; /* mem fail */ } if (ht->empty_slot != LIGHT(end)) { if (!matras_touch(&ht->mtable, ht->empty_slot)) return -1; /* mem fail */ } if (prev_record) { prev_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, prev_slot); if (!prev_record) return -1; /* mem fail */ prev_record->next = record->next; LIGHT(enqueue_empty)(ht, slot, record); ht->count--; return 0; } if (record->next == LIGHT(end)) { LIGHT(enqueue_empty)(ht, slot, record); ht->count--; return 0; } uint32_t next_slot = record->next; struct LIGHT(record) *next_record = (struct LIGHT(record) *) matras_touch(&ht->mtable, next_slot); if (!next_record) return -1; /* mem fail */ *record = *next_record; LIGHT(enqueue_empty)(ht, next_slot, next_record); ht->count--; return 0; } /** * @brief Get a value from a desired position * @param ht - pointer to a hash table struct * @param slotpos - ID of an record * ID must be vaild, check it by light_pos_valid (asserted). */ static inline LIGHT_DATA_TYPE LIGHT(get)(struct LIGHT(core) *ht, uint32_t slotpos) { assert(slotpos < ht->table_size); struct LIGHT(record) *record = (struct LIGHT(record) *) matras_get(&ht->mtable, slotpos); assert(record->next != slotpos); return record->value; } /** * @brief Determine if posision holds a value * @param ht - pointer to a hash table struct * @param slotpos - ID of an record * ID must be in valid range [0, ht->table_size) (asserted). */ static inline bool LIGHT(pos_valid)(struct LIGHT(core) *ht, uint32_t slotpos) { assert(slotpos < ht->table_size); struct LIGHT(record) *record = (struct LIGHT(record) *) matras_get(&ht->mtable, slotpos); return record->next != slotpos; } /** * @brief Set iterator to the beginning of hash table * @param ht - pointer to a hash table struct * @param itr - iterator to set */ static inline void LIGHT(iterator_begin)(const struct LIGHT(core) *ht, struct LIGHT(iterator) *itr) { (void)ht; itr->slotpos = 0; matras_head_read_view(&itr->view); } /** * @brief Set iterator to position determined by key * @param ht - pointer to a hash table struct * @param itr - iterator to set * @param hash - hash to find * @param data - key to find */ static inline void LIGHT(iterator_key)(const struct LIGHT(core) *ht, struct LIGHT(iterator) *itr, uint32_t hash, LIGHT_KEY_TYPE data) { itr->slotpos = LIGHT(find_key)(ht, hash, data); matras_head_read_view(&itr->view); } /** * @brief Get the value that iterator currently points to * @param ht - pointer to a hash table struct * @param itr - iterator to set * @return poiner to the value or NULL if iteration is complete */ static inline LIGHT_DATA_TYPE * LIGHT(iterator_get_and_next)(const struct LIGHT(core) *ht, struct LIGHT(iterator) *itr) { const struct matras_view *view; view = matras_is_read_view_created(&itr->view) ? &itr->view : &ht->mtable.head; while (itr->slotpos < view->block_count) { uint32_t slotpos = itr->slotpos; struct LIGHT(record) *record = (struct LIGHT(record) *) matras_view_get(&ht->mtable, view, slotpos); itr->slotpos++; if (record->next != slotpos) return &record->value; } return 0; } /** * @brief Freezes state for given iterator. All following hash table modification * will not apply to that iterator iteration. That iterator should be destroyed * with a light_iterator_destroy call after usage. * @param ht - pointer to a hash table struct * @param itr - iterator to freeze */ static inline void LIGHT(iterator_freeze)(struct LIGHT(core) *ht, struct LIGHT(iterator) *itr) { assert(!matras_is_read_view_created(&itr->view)); matras_create_read_view(&ht->mtable, &itr->view); } /** * @brief Destroy an iterator that was frozen before. Useless for not frozen * iterators. * @param ht - pointer to a hash table struct * @param itr - iterator to destroy */ static inline void LIGHT(iterator_destroy)(struct LIGHT(core) *ht, struct LIGHT(iterator) *itr) { matras_destroy_read_view(&ht->mtable, &itr->view); } /* * Selfcheck of the internal state of hash table. Used only for debugging. * That means that you should not use this function. * If return not zero, something went terribly wrong. */ static inline int LIGHT(selfcheck)(const struct LIGHT(core) *ht) { int res = 0; if (ht->table_size != ht->mtable.head.block_count) res |= 64; uint32_t empty_slot = ht->empty_slot; uint32_t prev_empty_slot = LIGHT(end); while (empty_slot != LIGHT(end)) { struct LIGHT(record) *empty_record = (struct LIGHT(record) *) matras_get(&ht->mtable, empty_slot); if (empty_record->next != empty_slot) res |= 2048; if (LIGHT(get_empty_prev)(empty_record) != prev_empty_slot) res |= 4096; prev_empty_slot = empty_slot; empty_slot = LIGHT(get_empty_next)(empty_record); } for (uint32_t i = 0; i < ht->table_size; i++) { struct LIGHT(record) *record = (struct LIGHT(record) *) matras_get(&ht->mtable, i); if (record->next == i) { uint32_t empty_slot = ht->empty_slot; while (empty_slot != LIGHT(end) && empty_slot != i) { struct LIGHT(record) *empty_record = (struct LIGHT(record) *) matras_get(&ht->mtable, empty_slot); empty_slot = LIGHT(get_empty_next)(empty_record); } if (empty_slot != i) res |= 256; continue; } uint32_t slot = LIGHT(slot)(ht, record->hash); if (slot != i) { bool found = false; uint32_t chain_slot = slot; uint32_t chain_start_slot = slot; do { struct LIGHT(record) *chain_record = (struct LIGHT(record) *) matras_get(&ht->mtable, chain_slot); chain_slot = chain_record->next; if (chain_slot >= ht->table_size) { res |= 16; /* out of bounds (1) */ break; } if (chain_slot == i) { found = true; break; } if (chain_slot == chain_start_slot) { res |= 4; /* cycles in chain (1) */ break; } } while (chain_slot != LIGHT(end)); if (!found) res |= 1; /* slot is out of chain */ } else { do { struct LIGHT(record) *record = (struct LIGHT(record) *) matras_get(&ht->mtable, slot); if (LIGHT(slot)(ht, record->hash) != i) res |= 2; /* wrong value in chain */ slot = record->next; if (slot != LIGHT(end) && slot >= ht->table_size) { res |= 32; /* out of bounds (2) */ break; } if (slot == i) { res |= 8; /* cycles in chain (2) */ break; } } while (slot != LIGHT(end)); } } return res; } tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/CMakeLists.txt0000664000000000000000000000021013306560010021352 0ustar rootrootset(lib_sources rope.c rtree.c guava.c bloom.c) set_source_files_compile_flags(${lib_sources}) add_library(salad STATIC ${lib_sources}) tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/bloom.h0000664000000000000000000002040713306565107020121 0ustar rootroot#ifndef TARANTOOL_BLOOM_H_INCLUDED #define TARANTOOL_BLOOM_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Classic bloom filter with several improvements * 1) Cache oblivious: * Putze, F.; Sanders, P.; Singler, J. (2007), * "Cache-, Hash- and Space-Efficient Bloom Filters" * http://algo2.iti.kit.edu/singler/publications/cacheefficientbloomfilters-wea2007.pdf * 2) Fast hash function calculation: * Kirsch, Adam; Mitzenmacher, Michael (2006) * "Less Hashing, Same Performance: Building a Better Bloom Filter" * https://www.eecs.harvard.edu/~michaelm/postscripts/tr-02-05.pdf * 3) Using only one hash value that is splitted into several independent parts */ #include #include #include #include #include "bit/bit.h" #include "small/quota.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { /* Expected cache line of target processor */ BLOOM_CACHE_LINE = 64, /* Number of different bloom filter in bloom spectrum */ BLOOM_SPECTRUM_SIZE = 10, }; typedef uint32_t bloom_hash_t; /** * Cache-line-size block of bloom filter */ struct bloom_block { unsigned char bits[BLOOM_CACHE_LINE]; }; /** * Bloom filter data structure */ struct bloom { /* Number of buckets (blocks) in the table */ uint32_t table_size; /* Number of hash function per value */ uint16_t hash_count; /* Bit field table */ struct bloom_block *table; }; /* {{{ API declaration */ /** * Allocate and initialize an instance of bloom filter * * @param bloom - structure to initialize * @param number_of_values - estimated number of values to be added * @param false_positive_rate - desired false positive rate * @param quota - quota for memory allocation * @return 0 - OK, -1 - memory error */ int bloom_create(struct bloom *bloom, uint32_t number_of_values, double false_positive_rate, struct quota *quota); /** * Free resources of the bloom filter * * @param bloom - the bloom filter * @param quota - quota for memory deallocation */ void bloom_destroy(struct bloom *bloom, struct quota *quota); /** * Add a value into the data set * @param bloom - the bloom filter * @param hash - hash of the value */ static void bloom_add(struct bloom *bloom, bloom_hash_t hash); /** * Query for presence of a value in the data set * @param bloom - the bloom filter * @param hash - hash of the value * @return true - the value could be in data set; false - the value is * definitively not in data set * */ static bool bloom_possible_has(const struct bloom *bloom, bloom_hash_t hash); /** * Calculate size of a buffer that is needed for storing bloom table * @param bloom - the bloom filter to store * @return - Exact size */ size_t bloom_store_size(const struct bloom *bloom); /** * Store bloom filter table to the given buffer * Other struct bloom members must be stored manually. * @param bloom - the bloom filter to store * @param table - buffer to store to * #return - end of written buffer */ char * bloom_store(const struct bloom *bloom, char *table); /** * Allocate table and load it from given buffer. * Other struct bloom members must be loaded manually. * * @param bloom - structure to load to * @param table - data to load * @param quota - quota for memory allocation * @return 0 - OK, -1 - memory error */ int bloom_load_table(struct bloom *bloom, const char *table, struct quota *quota); /** * Bloom spectrum that allows to create and fill a bloom filter in case when * there is no enough knowledge about final number of elements. * It consists of several bloom filter (set of filters) of different sizes * and fills them all. After filling it allows to choose more efficient bloom * filter. */ struct bloom_spectrum { uint32_t count_expected; uint32_t count_collected; int chosen_one; struct bloom vector[BLOOM_SPECTRUM_SIZE]; }; /** * Create a bloom spectrum * @param spectrum - spectrum to init * @param max_number_of_values - upper bound of estimation about * number of elements * @param false_positive_rate - desired false positive rate * @param quota - quota for memory allocation * @return 0 - OK, -1 - memory error */ int bloom_spectrum_create(struct bloom_spectrum *spectrum, uint32_t max_number_of_values, double false_positive_rate, struct quota *quota); /** * Add a value into the data set * @param spectrum - spectrum to add to * @param hash - a hash of a value */ static void bloom_spectrum_add(struct bloom_spectrum *spectrum, bloom_hash_t hash); /** * Choose best bloom filter after filling the set. * Must be used only once. * @param spectrum - spectrum to choose from * @param bloom - target structure that will hold the best bloom filter */ void bloom_spectrum_choose(struct bloom_spectrum *spectrum, struct bloom *bloom); /** * Destroy spectrum and free all data (except the chosen one) * @param spectrum - spectrum to destroy * @param quota - quota for memory deallocation */ void bloom_spectrum_destroy(struct bloom_spectrum *spectrum, struct quota *quota); /* }}} API declaration */ /* {{{ API definition */ static inline void bloom_add(struct bloom *bloom, bloom_hash_t hash) { /* Using lower part of the has for finding a block */ bloom_hash_t pos = hash % bloom->table_size; hash = hash / bloom->table_size; /* __builtin_prefetch(bloom->table + pos, 1); */ const bloom_hash_t bloom_block_bits = BLOOM_CACHE_LINE * CHAR_BIT; /* bit_no in block is less than bloom_block_bits (512). * split the given hash into independent lower part and high part. */ bloom_hash_t hash2 = hash / bloom_block_bits + 1; for (bloom_hash_t i = 0; i < bloom->hash_count; i++) { bloom_hash_t bit_no = hash % bloom_block_bits; bit_set(bloom->table[pos].bits, bit_no); /* Combine two hashes to create required number of hashes */ /* Add i**2 for better distribution */ hash += hash2 + i * i; } } static inline bool bloom_possible_has(const struct bloom *bloom, bloom_hash_t hash) { /* Using lower part of the has for finding a block */ bloom_hash_t pos = hash % bloom->table_size; hash = hash / bloom->table_size; /* __builtin_prefetch(bloom->table + pos, 0); */ const bloom_hash_t bloom_block_bits = BLOOM_CACHE_LINE * CHAR_BIT; /* bit_no in block is less than bloom_block_bits (512). * split the given hash into independent lower part and high part. */ bloom_hash_t hash2 = hash / bloom_block_bits + 1; for (bloom_hash_t i = 0; i < bloom->hash_count; i++) { bloom_hash_t bit_no = hash % bloom_block_bits; if (!bit_test(bloom->table[pos].bits, bit_no)) return false; /* Combine two hashes to create required number of hashes */ /* Add i**2 for better distribution */ hash += hash2 + i * i; } return true; } static inline void bloom_spectrum_add(struct bloom_spectrum *spectrum, bloom_hash_t hash) { spectrum->count_collected++; for (uint32_t i = 0; i < BLOOM_SPECTRUM_SIZE; i++) bloom_add(&spectrum->vector[i], hash); } /* }}} API definition */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BLOOM_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/stailq.h0000664000000000000000000001365513306560010020301 0ustar rootroot#ifndef TARANTOOL_STAILQ_H_INCLUDED #define TARANTOOL_STAILQ_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #ifndef typeof /* TODO: 'typeof' is a GNU extension */ #define typeof __typeof__ #endif struct stailq_entry { struct stailq_entry *next; }; struct stailq { struct stailq_entry *first; struct stailq_entry **last; }; /** * init list head (or list entry as ins't included in list) */ inline static void stailq_create(struct stailq *head) { head->first = NULL; head->last = &head->first; } /** * Add an item to list head */ inline static void stailq_add(struct stailq *head, struct stailq_entry *item) { item->next = head->first; if (item->next == NULL) head->last = &item->next; head->first = item; } /** * Pop an item from list head. */ inline static struct stailq_entry * stailq_shift(struct stailq *head) { struct stailq_entry *shift = head->first; if ((head->first = head->first->next) == NULL) head->last = &head->first; return shift; } /** * Add an item to list tail */ inline static void stailq_add_tail(struct stailq *head, struct stailq_entry *item) { item->next = NULL; *head->last = item; head->last = &item->next; } /** * return first element */ inline static struct stailq_entry * stailq_first(struct stailq *head) { return head->first; } /** * return last element */ inline static struct stailq_entry * stailq_last(struct stailq *head) { return head->last == &head->first ? NULL : (struct stailq_entry *) head->last; } /** * return next element by element */ inline static struct stailq_entry * stailq_next(struct stailq_entry *item) { return item->next; } /** * return TRUE if list is empty */ inline static int stailq_empty(struct stailq *head) { return head->first == NULL; } /* * Singly-linked Tail queue functions. */ static inline void stailq_concat(struct stailq *head1, struct stailq *head2) { if (!stailq_empty(head2)) { *head1->last = head2->first; head1->last = head2->last; stailq_create(head2); } } /* Reverse a list in-place. */ static inline void stailq_reverse(struct stailq *head) { struct stailq_entry *elem = stailq_first(head), *next; stailq_create(head); while (elem) { next = stailq_next(elem); stailq_add(head, elem); elem = next; } } /** * Move elements of list @head starting from @last->next to * list @tail. If @last is NULL, then this function moves all * elements from @head to @tail. Note, all elements of list * @tail are discarded. */ static inline void stailq_cut_tail(struct stailq *head, struct stailq_entry *last, struct stailq *tail) { if (last != NULL) { tail->first = last->next; tail->last = head->last; head->last = &last->next; last->next = NULL; } else { tail->first = head->first; tail->last = head->first != NULL ? head->last : &tail->first; head->first = NULL; head->last = &head->first; } } #define stailq_entry(item, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (item); \ (type *)( (char *)__mptr - ((size_t) &((type *)0)->member) ); }) /** * return first entry */ #define stailq_first_entry(head, type, member) \ stailq_entry(stailq_first(head), type, member) /** * return first entry */ #define stailq_last_entry(head, type, member) \ stailq_entry(stailq_last(head), type, member) /** * return next entry */ #define stailq_next_entry(item, member) \ stailq_entry(stailq_next(&(item)->member), typeof(*item), member) #define stailq_foreach_entry(item, head, member) \ for (item = stailq_first_entry((head), typeof(*item), member); \ item != stailq_entry(0, typeof(*item), member); \ item = stailq_next_entry(item, member)) #define stailq_foreach_entry_safe(item, next, head, member) \ for (item = stailq_first_entry((head), typeof(*item), member); \ item != stailq_entry(0, typeof(*item), member) && \ (next = stailq_next_entry(item, member), 1); \ item = next) /** * Remove one element from the list and return it * @pre the list is not empty */ #define stailq_shift_entry(head, type, member) \ stailq_entry(stailq_shift(head), type, member) \ /** * add entry to list */ #define stailq_add_entry(head, item, member) \ stailq_add((head), &(item)->member) /** * add entry to list tail */ #define stailq_add_tail_entry(head, item, member) \ stailq_add_tail((head), &(item)->member) /** * foreach through list */ #define stailq_foreach(item, head) \ for (item = stailq_first(head); item; item = item->next) #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_STAILQ_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/mhash.h0000664000000000000000000003675713306560010020114 0ustar rootroot/* * *No header guard*: the header is allowed to be included twice * with different sets of defines. */ /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* The MIT License Copyright (c) 2008, by Attractive Chaos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef MH_INCREMENTAL_RESIZE #define MH_INCREMENTAL_RESIZE 1 #endif #include #include #include #include #include #define mh_cat(a, b) mh##a##_##b #define mh_ecat(a, b) mh_cat(a, b) #define _mh(x) mh_ecat(mh_name, x) #define mh_unlikely(x) __builtin_expect((x),0) #ifndef MH_TYPEDEFS #define MH_TYPEDEFS 1 typedef uint32_t mh_int_t; #endif /* MH_TYPEDEFS */ #ifndef MH_HEADER #define MH_HEADER #ifndef mh_bytemap #define mh_bytemap 0 #endif struct _mh(t) { mh_node_t *p; #if !mh_bytemap uint32_t *b; #else uint8_t *b; #endif mh_int_t n_buckets; mh_int_t n_dirty; mh_int_t size; mh_int_t upper_bound; mh_int_t prime; mh_int_t resize_cnt; mh_int_t resize_position; mh_int_t batch; struct _mh(t) *shadow; }; #if !mh_bytemap #define mh_exist(h, i) ({ h->b[i >> 4] & (1 << (i % 16)); }) #define mh_dirty(h, i) ({ h->b[i >> 4] & (1u << (i % 16 + 16)); }) #define mh_gethk(hash) (1) #define mh_mayeq(h, i, hk) mh_exist(h, i) #define mh_setfree(h, i) ({ h->b[i >> 4] &= ~(1 << (i % 16)); }) #define mh_setexist(h, i, hk) ({ h->b[i >> 4] |= (1 << (i % 16)); }) #define mh_setdirty(h, i) ({ h->b[i >> 4] |= (1u << (i % 16 + 16)); }) #else #define mh_exist(h, i) ({ h->b[i] & 0x7f; }) #define mh_dirty(h, i) ({ h->b[i] & 0x80; }) #define mh_gethk(hash) ({ (hash) % 127 + 1; }) #define mh_mayeq(h, i, hk) ({ mh_exist(h, i) == hk; }) #define mh_setfree(h, i) ({ h->b[i] &= 0x80; }) #define mh_setexist(h, i, hk) ({ h->b[i] |= hk; }) #define mh_setdirty(h, i) ({ h->b[i] |= 0x80; }) #endif #define mh_node(h, i) ((const mh_node_t *) &((h)->p[(i)])) #define mh_size(h) ({ (h)->size; }) #define mh_capacity(h) ({ (h)->n_buckets; }) #define mh_begin(h) ({ 0; }) #define mh_end(h) ({ (h)->n_buckets; }) #define mh_first(h) ({ \ mh_int_t i; \ for (i = 0; i < mh_end(h); i++) { \ if (mh_exist(h, i)) \ break; \ } \ i; \ }) #define mh_next(h, i) ({ \ mh_int_t n = i; \ if (n < mh_end(h)) { \ for (n = i + 1; n < mh_end(h); n++) { \ if (mh_exist(h, n)) \ break; \ } \ } \ n; \ }) #define mh_foreach(h, i) \ for (i = mh_first(h); i < mh_end(h); i = mh_next(h, i)) #define MH_DENSITY 0.7 struct _mh(t) * _mh(new)(); void _mh(clear)(struct _mh(t) *h); void _mh(delete)(struct _mh(t) *h); void _mh(resize)(struct _mh(t) *h, mh_arg_t arg); int _mh(start_resize)(struct _mh(t) *h, mh_int_t buckets, mh_int_t batch, mh_arg_t arg); int _mh(reserve)(struct _mh(t) *h, mh_int_t size, mh_arg_t arg); void NOINLINE _mh(del_resize)(struct _mh(t) *h, mh_int_t x, mh_arg_t arg); size_t _mh(memsize)(struct _mh(t) *h); void _mh(dump)(struct _mh(t) *h); #define put_slot(h, node, exist, arg) \ _mh(put_slot)(h, node, exist, arg) static inline mh_node_t * _mh(node)(struct _mh(t) *h, mh_int_t x) { return (mh_node_t *) &(h->p[x]); } static inline mh_int_t _mh(next_slot)(mh_int_t slot, mh_int_t inc, mh_int_t size) { slot += inc; return slot >= size ? slot - size : slot; } #if defined(mh_hash_key) && defined(mh_cmp_key) /** * If it is necessary to search by something different * than a hash node, define mh_hash_key and mh_eq_key * and use mh_find(). */ static inline mh_int_t _mh(find)(struct _mh(t) *h, mh_key_t key, mh_arg_t arg) { (void) arg; mh_int_t k = mh_hash_key(key, arg); uint8_t hk = mh_gethk(k); (void)hk; mh_int_t i = k % h->n_buckets; mh_int_t inc = 1 + k % (h->n_buckets - 1); for (;;) { if ((mh_mayeq(h, i, hk) && !mh_cmp_key(key, mh_node(h, i), arg))) return i; if (!mh_dirty(h, i)) return h->n_buckets; i = _mh(next_slot)(i, inc, h->n_buckets); } } #endif static inline mh_int_t _mh(get)(struct _mh(t) *h, const mh_node_t *node, mh_arg_t arg) { (void) arg; mh_int_t k = mh_hash(node, arg); uint8_t hk = mh_gethk(k); (void)hk; mh_int_t i = k % h->n_buckets; mh_int_t inc = 1 + k % (h->n_buckets - 1); for (;;) { if ((mh_mayeq(h, i, hk) && !mh_cmp(node, mh_node(h, i), arg))) return i; if (!mh_dirty(h, i)) return h->n_buckets; i = _mh(next_slot)(i, inc, h->n_buckets); } } static inline mh_int_t _mh(random)(struct _mh(t) *h, mh_int_t rnd) { for (mh_int_t i = 0; i < mh_size(h); i++, rnd++) { rnd %= h->n_buckets; if (mh_exist(h, rnd)) return rnd; } return h->n_buckets; } static inline mh_int_t _mh(put_slot)(struct _mh(t) *h, const mh_node_t *node, int *exist, mh_arg_t arg) { (void) arg; mh_int_t k = mh_hash(node, arg); /* hash key */ uint8_t hk = mh_gethk(k); (void)hk; mh_int_t i = k % h->n_buckets; /* offset in the hash table. */ mh_int_t inc = 1 + k % (h->n_buckets - 1); /* overflow chain increment. */ *exist = 1; /* Skip through all collisions. */ while (mh_exist(h, i)) { if (mh_mayeq(h, i, hk) && !mh_cmp(node, mh_node(h, i), arg)) return i; /* Found a duplicate. */ /* * Mark this link as part of a collision chain. The * chain always ends with a non-marked link. * Note: the collision chain for this key may share * links with collision chains of other keys. */ mh_setdirty(h, i); i = _mh(next_slot)(i, inc, h->n_buckets); } /* * Found an unused, but possibly dirty slot. Use it. * However, if this is a dirty slot, first check that * there are no duplicates down the collision chain. The * current link can also be from a collision chain of some * other key, but this is can't be established, so check * anyway. */ mh_int_t save_i = i; while (mh_dirty(h, i)) { i = _mh(next_slot)(i, inc, h->n_buckets); if (mh_mayeq(h, i, hk) && !mh_cmp(mh_node(h, i), node, arg)) return i; /* Found a duplicate. */ } /* Reached the end of the collision chain: no duplicates. */ *exist = 0; h->size++; if (!mh_dirty(h, save_i)) h->n_dirty++; mh_setexist(h, save_i, hk); return save_i; } /** * Find a node in the hash and replace it with a new value. * Save the old node in ret pointer, if it is provided. * If the old node didn't exist, just insert the new node. * * @retval != mh_end() pos of the new node, ret is either NULL * or copy of the old node * @retval mh_end() out of memory, ret is unchanged. */ static inline mh_int_t _mh(put)(struct _mh(t) *h, const mh_node_t *node, mh_node_t **ret, mh_arg_t arg) { mh_int_t x = mh_end(h); int exist; if (h->size == h->n_buckets) /* no one free elements in the hash table */ goto put_done; #if MH_INCREMENTAL_RESIZE if (mh_unlikely(h->resize_position > 0)) _mh(resize)(h, arg); else if (mh_unlikely(h->n_dirty >= h->upper_bound)) { if (_mh(start_resize)(h, h->n_buckets + 1, 0, arg)) goto put_done; } if (h->resize_position) _mh(put)(h->shadow, node, NULL, arg); #else if (mh_unlikely(h->n_dirty >= h->upper_bound)) { if (_mh(start_resize)(h, h->n_buckets + 1, h->size, arg)) goto put_done; } #endif x = put_slot(h, node, &exist, arg); if (ret) { if (exist) memcpy(*ret, &(h->p[x]), sizeof(mh_node_t)); else *ret = NULL; } memcpy(&(h->p[x]), node, sizeof(mh_node_t)); put_done: return x; } static inline void _mh(del)(struct _mh(t) *h, mh_int_t x, mh_arg_t arg) { if (x != h->n_buckets && mh_exist(h, x)) { mh_setfree(h, x); h->size--; if (!mh_dirty(h, x)) h->n_dirty--; #if MH_INCREMENTAL_RESIZE if (mh_unlikely(h->resize_position)) _mh(del_resize)(h, x, arg); #endif } } #endif static inline void _mh(remove)(struct _mh(t) *h, const mh_node_t *node, mh_arg_t arg) { mh_int_t k = _mh(get)(h, node, arg); if (k != mh_end(h)) _mh(del)(h, k, arg); } #ifdef MH_SOURCE #ifndef __ac_HASH_PRIME_SIZE #define __ac_HASH_PRIME_SIZE 31 static const mh_int_t __ac_prime_list[__ac_HASH_PRIME_SIZE] = { 3ul, 11ul, 23ul, 53ul, 97ul, 193ul, 389ul, 769ul, 1543ul, 3079ul, 6151ul, 12289ul, 24593ul, 49157ul, 98317ul, 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul, 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul, 201326611ul, 402653189ul, 805306457ul, 1610612741ul, 3221225473ul, 4294967291ul }; #endif /* __ac_HASH_PRIME_SIZE */ NOINLINE void _mh(del_resize)(struct _mh(t) *h, mh_int_t x, mh_arg_t arg) { struct _mh(t) *s = h->shadow; mh_int_t y = _mh(get)(s, (const mh_node_t *) &(h->p[x]), arg); _mh(del)(s, y, arg); _mh(resize)(h, arg); } struct _mh(t) * _mh(new)() { struct _mh(t) *h = (struct _mh(t) *) calloc(1, sizeof(*h)); h->shadow = (struct _mh(t) *) calloc(1, sizeof(*h)); h->prime = 0; h->n_buckets = __ac_prime_list[h->prime]; h->p = (mh_node_t *) calloc(h->n_buckets, sizeof(mh_node_t)); #if !mh_bytemap h->b = (uint32_t *) calloc(h->n_buckets / 16 + 1, sizeof(uint32_t)); #else h->b = (uint8_t *) calloc(h->n_buckets, sizeof(uint8_t)); #endif h->upper_bound = h->n_buckets * MH_DENSITY; return h; } void _mh(clear)(struct _mh(t) *h) { if (h->shadow->p) { free(h->shadow->p); free(h->shadow->b); memset(h->shadow, 0, sizeof(*h->shadow)); } free(h->p); free(h->b); h->prime = 0; h->n_buckets = __ac_prime_list[h->prime]; h->p = (mh_node_t *) calloc(h->n_buckets, sizeof(mh_node_t)); #if !mh_bytemap h->b = (uint32_t *) calloc(h->n_buckets / 16 + 1, sizeof(uint32_t)); #else h->b = (uint8_t *) calloc(h->n_buckets, sizeof(uint8_t)); #endif h->size = 0; h->upper_bound = h->n_buckets * MH_DENSITY; } void _mh(delete)(struct _mh(t) *h) { if (h->shadow->p) { free(h->shadow->p); free(h->shadow->b); memset(h->shadow, 0, sizeof(*h->shadow)); } free(h->shadow); free(h->b); free(h->p); free(h); } /** Calculate hash size. */ size_t _mh(memsize)(struct _mh(t) *h) { size_t sz = 2 * sizeof(struct _mh(t)); sz += h->n_buckets * sizeof(mh_node_t); #if !mh_bytemap sz += (h->n_buckets / 16 + 1) * sizeof(uint32_t); #else sz += h->n_buckets; #endif if (h->resize_position) { h = h->shadow; sz += h->n_buckets * sizeof(mh_node_t); #if !mh_bytemap sz += (h->n_buckets / 16 + 1) * sizeof(uint32_t); #else sz += h->n_buckets; #endif } return sz; } void _mh(resize)(struct _mh(t) *h, mh_arg_t arg) { struct _mh(t) *s = h->shadow; int exist; #if MH_INCREMENTAL_RESIZE mh_int_t batch = h->batch; #endif for (mh_int_t i = h->resize_position; i < h->n_buckets; i++) { #if MH_INCREMENTAL_RESIZE if (batch-- == 0) { h->resize_position = i; return; } #endif if (!mh_exist(h, i)) continue; mh_int_t n = put_slot(s, mh_node(h, i), &exist, arg); s->p[n] = h->p[i]; } free(h->p); free(h->b); if (s->size != h->size) abort(); memcpy(h, s, sizeof(*h)); h->resize_cnt++; memset(s, 0, sizeof(*s)); } int _mh(start_resize)(struct _mh(t) *h, mh_int_t buckets, mh_int_t batch, mh_arg_t arg) { if (h->resize_position) { /* resize has already been started */ return 0; } if (buckets < h->n_buckets) { /* hash size is already greater than requested */ return 0; } while (h->prime < __ac_HASH_PRIME_SIZE - 1) { if (__ac_prime_list[h->prime] >= buckets) break; h->prime += 1; } h->batch = batch > 0 ? batch : h->n_buckets / (256 * 1024); if (h->batch < 256) { /* * Minimal batch must be greater or equal to * 1 / (1 - f), where f is upper bound percent * = MH_DENSITY */ h->batch = 256; } struct _mh(t) *s = h->shadow; memcpy(s, h, sizeof(*h)); s->resize_position = 0; s->n_buckets = __ac_prime_list[h->prime]; s->upper_bound = s->n_buckets * MH_DENSITY; s->n_dirty = 0; s->size = 0; s->p = (mh_node_t *) malloc(s->n_buckets * sizeof(mh_node_t)); if (s->p == NULL) return -1; #if !mh_bytemap s->b = (uint32_t *) calloc(s->n_buckets / 16 + 1, sizeof(uint32_t)); #else s->b = (uint8_t *) calloc(s->n_buckets, sizeof(uint8_t)); #endif if (s->b == NULL) { free(s->p); s->p = NULL; return -1; } _mh(resize)(h, arg); return 0; } int _mh(reserve)(struct _mh(t) *h, mh_int_t size, mh_arg_t arg) { return _mh(start_resize)(h, size/MH_DENSITY, h->size, arg); } #ifndef mh_stat #define mh_stat(buf, h) ({ \ tbuf_printf(buf, " n_buckets: %" PRIu32 CRLF \ " n_dirty: %" PRIu32 CRLF \ " size: %" PRIu32 CRLF \ " resize_cnt: %" PRIu32 CRLF \ " resize_position: %" PRIu32 CRLF, \ h->n_buckets, \ h->n_dirty, \ h->size, \ h->resize_cnt, \ h->resize_position); \ }) #endif #ifdef MH_DEBUG void _mh(dump)(struct _mh(t) *h) { printf("slots:\n"); int k = 0; for(int i = 0; i < h->n_buckets; i++) { if (mh_dirty(h, i) || mh_exist(h, i)) { printf(" [%i] ", i); if (mh_exist(h, i)) { /* TODO(roman): fix this printf */ printf(" -> %p", h->p[i]); k++; } if (mh_dirty(h, i)) printf(" dirty"); printf("\n"); } } printf("end(%i)\n", k); } #endif #endif #if defined(MH_SOURCE) || defined(MH_UNDEF) #undef MH_HEADER #undef mh_int_t #undef mh_node_t #undef mh_arg_t #undef mh_key_t #undef mh_name #undef mh_hash #undef mh_hash_key #undef mh_cmp #undef mh_cmp_key #undef mh_node #undef mh_dirty #undef mh_place #undef mh_setdirty #undef mh_setexist #undef mh_setvalue #undef mh_unlikely #undef slot #undef slot_and_dirty #undef MH_DENSITY #undef mh_bytemap #endif #undef mh_cat #undef mh_ecat #undef _mh tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/guava.h0000664000000000000000000000321113306560010020072 0ustar rootroot#ifndef TARANTOOL_LIB_GUAVA_H_INCLUDED #define TARANTOOL_LIB_GUAVA_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif int32_t guava(int64_t state, int32_t buckets); #if defined(__cplusplus) } /* extern C */ #endif #endif /* TARANTOOL_LIB_GUAVA_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/rope.h0000664000000000000000000001567013306560010017750 0ustar rootroot#ifndef INCLUDES_TARANTOOL_ROPE_H #define INCLUDES_TARANTOOL_ROPE_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ typedef uint32_t rope_size_t; typedef int32_t rope_ssize_t; typedef void *(*rope_split_func)(void *, void *, size_t, size_t); typedef void *(*rope_alloc_func)(void *, size_t); typedef void (*rope_free_func)(void *, void *); /** Tallest allowable tree, 1.44*log(2^32) */ enum { ROPE_HEIGHT_MAX = 46 }; struct rope_node { /** Node height, see rope_node_height(), used for AVL balance. */ int height; /** Subtree size. */ rope_size_t tree_size; /* Substring size. */ rope_size_t leaf_size; /* Substring. */ void *data; /* Left (0) and right (1) links */ struct rope_node *link[2]; }; struct rope { /** Top of the tree */ struct rope_node *root; /** Memory management context. */ void *alloc_ctx; /** Get a sequence tail, given offset. */ rope_split_func split; /** Split function context. */ void *split_ctx; /** Allocate memory (context, size). */ void *(*alloc)(void *, size_t); /** Free memory (context, pointer) */ void (*free)(void *, void *); }; struct rope_iter { /** rope->free is used to free the iterator. */ struct rope *rope; /** End of the traversal path. */ struct rope_node **top; /** Traversal path */ struct rope_node *path[ROPE_HEIGHT_MAX]; }; static inline rope_size_t rope_node_size(struct rope_node *node) { return node ? node->tree_size : 0; } static inline rope_size_t rope_leaf_size(struct rope_node *node) { return node->leaf_size; } static inline void * rope_leaf_data(struct rope_node *node) { return node->data; } static inline rope_size_t rope_size(struct rope *rope) { return rope_node_size(rope->root); } /** Initialize an empty rope. */ static inline void rope_create(struct rope *rope, rope_split_func split_func, void *split_ctx, rope_alloc_func alloc_func, rope_free_func free_func, void *alloc_ctx) { rope->root = NULL; rope->split = split_func; rope->split_ctx = split_ctx; rope->alloc = alloc_func; rope->free = free_func; rope->alloc_ctx = alloc_ctx; } /** Create a new empty rope. * @param split_func a function which returns * a pointer to substring * given an offset. Used * to split substrings when * inserting into a rope. * @param alloc_func used to allocate memory * @param free_func used to free memory * @param alloc_ctx allocator context * * @return an empty rope, or NULL if failed * to allocate memory */ static inline struct rope * rope_new(rope_split_func split_func, void *split_ctx, rope_alloc_func alloc_func, rope_free_func free_func, void *alloc_ctx) { struct rope *rope= (struct rope *) alloc_func(alloc_ctx, sizeof(struct rope)); if (rope == NULL) return NULL; rope_create(rope, split_func, split_ctx, alloc_func, free_func, alloc_ctx); return rope; } /** Delete rope contents. Can also be used * to free a rope which is allocated on stack. * Doesn't delete rope substrings, only * rope nodes. */ void rope_clear(struct rope *rope); /** Delete a rope allocated with rope_new() */ static inline void rope_delete(struct rope *rope) { rope_clear(rope); rope->free(rope->alloc_ctx, rope); } /** Insert a substring into a rope at the given * offset. * If offset is greater than rope size, insertion * happens at the end. * * @retval 0 success * @retval -1 failed to allocate memory for a new * tree node */ int rope_insert(struct rope *rope, rope_size_t offset, void *data, rope_size_t size); /** Append a substring at rope tail. */ static inline int rope_append(struct rope *rope, void *data, size_t size) { return rope_insert(rope, rope_size(rope), data, size); } /** Make sure there is a rope node which has a substring * which starts at the given offset. Useful when * rope substrings carry additional information. * * @retval NULL failed to allocate memory for a new * tree node */ struct rope_node * rope_extract_node(struct rope *rope, rope_size_t offset); static inline void * rope_extract(struct rope *rope, rope_size_t offset) { return rope_leaf_data(rope_extract_node(rope, offset)); } /** * Erase a single element from a rope at the given * offset. * * @pre offset < rope_size(rope) */ int rope_erase(struct rope *rope, rope_size_t offset); /** Initialize an iterator. */ static inline void rope_iter_create(struct rope_iter *it, struct rope *rope) { it->rope = rope; } /** Create an iterator. */ static inline struct rope_iter * rope_iter_new(struct rope *rope) { struct rope_iter *it = (struct rope_iter *) rope->alloc(rope->alloc_ctx, sizeof(struct rope_iter)); if (it == NULL) return NULL; rope_iter_create(it, rope); return it; } /** * Begin iteration. * @retval NULL the rope is empty */ struct rope_node * rope_iter_start(struct rope_iter *it); /** * Advance to the next rope node. * * @return node, or NULL if iterator * has advanced beyond the last * node. */ struct rope_node * rope_iter_next(struct rope_iter *it); /** Free iterator. */ static inline void rope_iter_delete(struct rope_iter *it) { it->rope->free(it->rope->alloc_ctx, it); } /** Apply visit_leaf function to every rope leaf. */ void rope_traverse(struct rope *rope, void (*visit_leaf)(void *, size_t)); /** Check AVL tree consistency. */ void rope_check(struct rope *rope); /** Pretty print a rope. */ void rope_pretty_print(struct rope *rope, void (*print_leaf)(void *, size_t)); #if defined(__cplusplus) } #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_ROPE_H */ tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/heap.h0000664000000000000000000002422413306560010017713 0ustar rootroot/* * *No header guard*: the header is allowed to be included twice * with different sets of defines. */ /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include /** * Additional user defined name that appended to prefix 'heap' * for all names of structs and functions in this header file. * All names use pattern: heap_ * May be empty, but still have to be defined (just #define HEAP_NAME) * Example: * #define HEAP_NAME test_ * ... * test_heap_create(&some_heap); * test_heap_destroy(&some_heap); */ /* For predefinition of structures and type non specific functions just make: * #define HEAP_FORWARD_DECLARATION * #inlude "heap.h" */ #ifndef HEAP_FORWARD_DECLARATION #ifndef HEAP_NAME #error "HEAP_NAME must be defined" #endif /* HEAP_NAME */ /** * Data comparing function. Takes 3 parameters - heap, node1, node2, * where heap is pointer onto heap_t structure and node1, node2 * are two pointers on nodes in your structure. * For example you have such type: * struct my_type { * int value; * struct heap_node vnode; * }; * Then node1 and node2 will be pointers on field vnode of two * my_type instances. * The function below is example of valid comparator by value: * * int test_type_less(const heap_t *heap, * const struct heap_node *a, * const struct heap_node *b) { * * const struct my_type *left = (struct my_type *)((char *)a - * offsetof(struct my_type, vnode)); * const struct my_type *right = (struct my_type *)((char *)b - * offsetof(struct my_type, vnode)); * return left->value < right->value; * } * * HEAP_LESS is less function that is important! */ #ifndef HEAP_LESS #error "HEAP_LESS must be defined" #endif /** * Tools for name substitution: */ #ifndef CONCAT3 #define CONCAT3_R(a, b, c) a##b##c #define CONCAT3(a, b, c) CONCAT3_R(a, b, c) #endif #ifdef _ #error '_' must be undefinded! #endif #ifndef HEAP #define HEAP(name) CONCAT3(HEAP_NAME, _, name) #endif #endif /* HEAP_FORWARD_DECLARATION */ /* Structures. */ #ifndef HEAP_STRUCTURES /* Include guard for structures */ #define HEAP_STRUCTURES enum { HEAP_INITIAL_CAPACITY = 8 }; typedef uint32_t heap_off_t; /** * Main structure for holding heap. */ struct heap_core_structure { heap_off_t size; heap_off_t capacity; struct heap_node **harr; /* array of heap node pointers */ }; typedef struct heap_core_structure heap_t; /** * Heap entry structure. */ struct heap_node { heap_off_t pos; }; /** * Heap iterator structure. */ struct heap_iterator { heap_t *heap; heap_off_t curr_pos; }; #endif /* HEAP_STRUCTURES */ #ifndef HEAP_FORWARD_DECLARATION /* Extern API that is the most usefull part. */ /** * Initialize the heap. */ static inline void HEAP(create)(heap_t *heap); /** * Destroy current heap. */ static inline void HEAP(destroy)(heap_t *heap); /** * Return min value. */ static inline struct heap_node * HEAP(top)(heap_t *heap); /** * Erase min value. */ static inline struct heap_node * HEAP(pop)(heap_t *heap); /** * Insert value. */ static inline int HEAP(insert)(heap_t *heap, struct heap_node *nd); /** * Delete node from heap. */ static inline void HEAP(delete)(heap_t *heap, struct heap_node *value_node); /** * Heapify tree after update of value under value_node pointer. */ static inline void HEAP(update)(heap_t *heap, struct heap_node *value_node); /** * Heapify tree after updating all values. */ static inline void HEAP(update_all)(heap_t *heap); /** * Heap iterator init. */ static inline void HEAP(iterator_init)(heap_t *heap, struct heap_iterator *it); /** * Heap iterator next. */ static inline struct heap_node * HEAP(iterator_next) (struct heap_iterator *it); /* Routines. Functions below are useless for ordinary user. */ /* * Update backlink in the give heap_node structure. */ static inline void HEAP(update_link)(heap_t *heap, heap_off_t pos); /** * Sift up current node. */ static inline void HEAP(sift_up)(heap_t *heap, struct heap_node *node); /** * Sift down current node. */ static inline void HEAP(sift_down)(heap_t *heap, struct heap_node *node); /* Debug functions */ /** * Check that heap inveriants is holded. */ static inline int /* inline for suppress warning */ HEAP(check)(heap_t *heap); /* Function definitions. */ /** * Init heap. */ static inline void HEAP(create)(heap_t *heap) { heap->size = 0; heap->capacity = 0; heap->harr = NULL; } /** * Destroy current heap. */ static inline void HEAP(destroy)(heap_t *heap) { free(heap->harr); } /* * Update backlink in the give heap_node structure. */ static inline void HEAP(update_link)(heap_t *heap, heap_off_t pos) { heap->harr[pos]->pos = pos; } /** * Sift up current node. */ static inline void HEAP(sift_up)(heap_t *heap, struct heap_node *node) { heap_off_t curr_pos = node->pos, parent = (curr_pos - 1) / 2; while (curr_pos > 0 && HEAP_LESS(heap, node, heap->harr[parent])) { node = heap->harr[curr_pos]; heap->harr[curr_pos] = heap->harr[parent]; HEAP(update_link)(heap, curr_pos); heap->harr[parent] = node; HEAP(update_link)(heap, parent); curr_pos = parent; parent = (curr_pos - 1) / 2; /* here overflow can occure, but that won't affect */ } } /** * Sift down current node. */ static inline void HEAP(sift_down)(heap_t *heap, struct heap_node *node) { heap_off_t curr_pos = node->pos, left, right; heap_off_t min_child; while (true) { left = 2 * curr_pos + 1; right = 2 * curr_pos + 2; min_child = left; if (right < heap->size && HEAP_LESS(heap, heap->harr[right], heap->harr[left])) min_child = right; if (left >= heap->size || HEAP_LESS(heap, heap->harr[curr_pos], heap->harr[min_child]) ) return; node = heap->harr[curr_pos]; heap->harr[curr_pos] = heap->harr[min_child]; heap->harr[min_child] = node; HEAP(update_link)(heap, curr_pos); HEAP(update_link)(heap, min_child); curr_pos = min_child; } } /** * Increase capacity. */ static inline int HEAP(reserve)(heap_t *heap) { heap_off_t capacity = heap->capacity == 0 ? HEAP_INITIAL_CAPACITY : heap->capacity << 1; void *harr = realloc(heap->harr, sizeof(struct heap_node *) * capacity); if (harr == NULL) return -1; heap->harr = harr; heap->capacity = capacity; return 0; } /** * Insert value. */ static inline int HEAP(insert)(heap_t *heap, struct heap_node *node) { (void) heap; assert(heap); if (heap->size + 1 > heap->capacity) { if (HEAP(reserve)(heap)) return -1; } heap->harr[heap->size] = node; HEAP(update_link)(heap, heap->size++); HEAP(sift_up)(heap, node); /* heapify */ return 0; } /** * Return min value without removing it from heap. * If heap is empty, return NULL. */ static inline struct heap_node * HEAP(top)(heap_t *heap) { if (heap->size == 0) return NULL; return heap->harr[0]; } /** * Erase min value. Returns delete value. */ static inline struct heap_node * HEAP(pop)(heap_t *heap) { if (heap->size == 0) return NULL; struct heap_node *res = heap->harr[0]; HEAP(delete)(heap, heap->harr[0]); return res; } /* * Delete node from heap. */ static inline void HEAP(delete)(heap_t *heap, struct heap_node *value_node) { if (heap->size == 0) return; heap->size--; heap_off_t curr_pos = value_node->pos; if (curr_pos == heap->size) return; heap->harr[curr_pos] = heap->harr[heap->size]; HEAP(update_link)(heap, curr_pos); HEAP(update)(heap, heap->harr[curr_pos]); } /** * Heapify tree after update of value under value_node pointer. */ static inline void HEAP(update)(heap_t *heap, struct heap_node *value_node) { /* heapify */ HEAP(sift_down)(heap, value_node); HEAP(sift_up)(heap, value_node); } /** * Heapify tree after updating all values. */ static inline void HEAP(update_all)(heap_t *heap) { if (heap->size <= 1) return; /* Find the parent of the last element. */ heap_off_t curr_pos = (heap->size - 2) / 2; do { HEAP(sift_down)(heap, heap->harr[curr_pos]); } while (curr_pos-- > 0); } /** * Heap iterator init. */ static inline void HEAP(iterator_init)(heap_t *heap, struct heap_iterator *it) { it->curr_pos = 0; it->heap = heap; } /** * Heap iterator next. */ static inline struct heap_node * HEAP(iterator_next)(struct heap_iterator *it) { if (it->curr_pos == it->heap->size) return NULL; return it->heap->harr[it->curr_pos++]; } /** * Check that heap inveriants is holded. */ static inline int HEAP(check)(heap_t *heap) { heap_off_t left, right, min_child; for (heap_off_t curr_pos = 0; 2 * curr_pos + 1 < heap->size; ++curr_pos) { left = 2 * curr_pos + 1; right = 2 * curr_pos + 2; min_child = left; if (right < heap->size && HEAP_LESS(heap, heap->harr[right], heap->harr[left])) min_child = right; if (HEAP_LESS(heap, heap->harr[min_child], heap->harr[curr_pos])) return -1; } return 0; } #endif /* HEAP_FORWARD_DECLARATION */ #undef HEAP_FORWARD_DECLARATION tarantool_1.9.1.26.g63eb81e3c/src/lib/salad/bloom.c0000664000000000000000000001206213306565107020112 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "bloom.h" #include #include #include #include #include #include int bloom_create(struct bloom *bloom, uint32_t number_of_values, double false_positive_rate, struct quota *quota) { /* Optimal hash_count and bit count calculation */ bloom->hash_count = (uint32_t) (log(false_positive_rate) / log(0.5) + 0.99); /* Number of bits */ uint64_t m = (uint64_t) (number_of_values * bloom->hash_count / log(2) + 0.5); /* mmap page size */ uint64_t page_size = sysconf(_SC_PAGE_SIZE); /* Number of bits in one page */ uint64_t b = page_size * CHAR_BIT; /* number of pages, round up */ uint64_t p = (uint32_t)((m + b - 1) / b); /* bit array size in bytes */ size_t mmap_size = p * page_size; bloom->table_size = p * page_size / sizeof(struct bloom_block); if (quota_use(quota, mmap_size) < 0) { bloom->table = NULL; return -1; } bloom->table = (struct bloom_block *) mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (bloom->table == MAP_FAILED) { bloom->table = NULL; quota_release(quota, mmap_size); return -1; } return 0; } void bloom_destroy(struct bloom *bloom, struct quota *quota) { size_t mmap_size = bloom->table_size * sizeof(struct bloom_block); munmap(bloom->table, mmap_size); quota_release(quota, mmap_size); } size_t bloom_store_size(const struct bloom *bloom) { return bloom->table_size * sizeof(struct bloom_block); } char * bloom_store(const struct bloom *bloom, char *table) { size_t store_size = bloom_store_size(bloom); memcpy(table, bloom->table, store_size); return table + store_size; } int bloom_load_table(struct bloom *bloom, const char *table, struct quota *quota) { size_t mmap_size = bloom->table_size * sizeof(struct bloom_block); if (quota_use(quota, mmap_size) < 0) { bloom->table = NULL; return -1; } bloom->table = (struct bloom_block *) mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (bloom->table == MAP_FAILED) { bloom->table = NULL; quota_release(quota, mmap_size); return -1; } memcpy(bloom->table, table, mmap_size); return 0; } void bloom_spectrum_choose(struct bloom_spectrum *spectrum, struct bloom *bloom) { assert(spectrum->chosen_one < 0); spectrum->chosen_one = 0; uint32_t number_of_values = spectrum->count_expected; for (int i = 1; i < BLOOM_SPECTRUM_SIZE; i++) { number_of_values = number_of_values * 4 / 5; if (number_of_values < 1) number_of_values = 1; if (spectrum->count_collected > number_of_values) break; spectrum->chosen_one = i; } /* Move the chosen one to result bloom structure */ *bloom = spectrum->vector[spectrum->chosen_one]; memset(&spectrum->vector[spectrum->chosen_one], 0, sizeof(spectrum->vector[spectrum->chosen_one])); } int bloom_spectrum_create(struct bloom_spectrum *spectrum, uint32_t max_number_of_values, double false_positive_rate, struct quota *quota) { spectrum->count_expected = max_number_of_values; spectrum->count_collected = 0; spectrum->chosen_one = -1; for (uint32_t i = 0; i < BLOOM_SPECTRUM_SIZE; i++) { int rc = bloom_create(&spectrum->vector[i], max_number_of_values, false_positive_rate, quota); if (rc) { for (uint32_t j = 0; j < i; j++) bloom_destroy(&spectrum->vector[i], quota); return rc; } max_number_of_values = max_number_of_values * 4 / 5; if (max_number_of_values < 1) max_number_of_values = 1; } return 0; } void bloom_spectrum_destroy(struct bloom_spectrum *spectrum, struct quota *quota) { for (int i = 0; i < BLOOM_SPECTRUM_SIZE; i++) { if (i != spectrum->chosen_one) bloom_destroy(&spectrum->vector[i], quota); } } /* }}} API definition */ tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/0000775000000000000000000000000013306560010017027 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/iterator.h0000664000000000000000000001047613306560010021041 0ustar rootroot#ifndef TARANTOOL_LIB_BITSET_ITERATOR_H_INCLUDED #define TARANTOOL_LIB_BITSET_ITERATOR_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * @file * @brief Iterator for @link bitset @endlink objects with * expression support. * * @link bitset_iterator @endlink is used to iterate over a result * of the evaluation a @link bitset_expr logical expression * @endlink on a set of bitsets. The iterator evaluates its * expression on the fly, without producing temporary bitsets. * Each iteration (@link bitset_iterator_next @endlink) returns * the next position where a given expression evaluates to true on * a given set of bitsets. * * @see expr.h */ #include "bitset/bitset.h" #include "bitset/expr.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** @cond false **/ struct bitset_iterator_conj; /** @endcond **/ /** * @brief Bitset Iterator */ struct bitset_iterator { /** @cond false **/ size_t size; size_t capacity; struct bitset_iterator_conj *conjs; struct bitset_page *page; struct bitset_page *page_tmp; void *(*realloc)(void *ptr, size_t size); struct bit_iterator page_it; /** @endcond **/ }; /** * @brief Construct \a it. * * The created iterator must be initialized by * @link bitset_iterator_init @endlink method before first usage. * @param it bitset iterator * @param realloc memory allocator to use */ void bitset_iterator_create(struct bitset_iterator *it, void *(*realloc)(void *ptr, size_t size)); /** * @brief Destruct \a it. * @param it bitset iterator */ void bitset_iterator_destroy(struct bitset_iterator *it); /** * @brief Initialize the \a it using \a expr and \a bitsets and rewind the * iterator to the start position. * * @note It is safe to reinitialize an iterator with a new expression and new * bitsets. All internal buffers are safely reused in this case with minimal * number of new allocations. * * @note @a expr object is only used during initialization time and can be * safetly reused or destroyed just after this call. * * @param it bitset iterator * @param expr bitset expression * @param bitsets array of pointers to bitsets that should be used to bind * the expression parameters. * @param size of @a bitsets array * @retval 0 on success * @retval -1 on memory error * @see expr.h */ int bitset_iterator_init(struct bitset_iterator *it, struct bitset_expr *expr, struct bitset **bitsets, size_t bitsets_size); /** * @brief Rewind the \a it to the start position. * @param it bitset iterator * @see @link bitset_iterator_init @endlink */ void bitset_iterator_rewind(struct bitset_iterator *it); /** * @brief Move \a it to a next position * @param it bitset iterator * @return the next offset where the expression evaluates to true * or SIZE_MAX if there is no more bits in the result set. * @see @link bitset_iterator_init @endlink */ size_t bitset_iterator_next(struct bitset_iterator *it); #if defined(__cplusplus) } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LIB_BITSET_ITERATOR_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/index.c0000664000000000000000000002257313306560010020313 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "bitset/index.h" #include "bitset/expr.h" #include "bit/bit.h" #include #include const size_t INDEX_DEFAULT_CAPACITY = 32; void bitset_index_create(struct bitset_index *index, void *(*realloc)(void *ptr, size_t size)) { assert(index != NULL); memset(index, 0, sizeof(*index)); index->realloc = realloc; } void bitset_index_destroy(struct bitset_index *index) { assert(index != NULL); for (size_t b = 0; b < index->capacity; b++) { if (index->bitsets[b] == NULL) break; bitset_destroy(index->bitsets[b]); index->realloc(index->bitsets[b], 0); index->bitsets[b] = NULL; } if (index->capacity > 0) { index->realloc(index->bitsets, 0); index->realloc(index->rollback_buf, 0); } memset(index, 0, sizeof(*index)); } static int bitset_index_reserve(struct bitset_index *index, size_t size) { if (size <= index->capacity) return 0; size_t capacity = (index->capacity > 0) ? index->capacity : INDEX_DEFAULT_CAPACITY; while (capacity <= size) { capacity *= 2; } struct bitset **bitsets = index->realloc(index->bitsets, capacity * sizeof(*index->bitsets)); if (bitsets == NULL) goto error_1; memset(bitsets + index->capacity, 0, (capacity - index->capacity) * sizeof(*index->bitsets)); /* Save bitset ** but do not update index->capacity */ index->bitsets = bitsets; /* Resize rollback buffer */ char *rollback_buf = (char *) index->realloc(index->rollback_buf, capacity); if (rollback_buf == NULL) goto error_1; index->rollback_buf = rollback_buf; /* Initialize bitsets */ for (size_t b = index->capacity; b < capacity; b++) { index->bitsets[b] = index->realloc(NULL, sizeof(*index->bitsets[b])); if (index->bitsets[b] == NULL) goto error_2; bitset_create(index->bitsets[b], index->realloc); } index->capacity = capacity; return 0; error_2: for (size_t b = index->capacity; b < capacity; b++) { if (index->bitsets[b] == NULL) break; bitset_destroy(index->bitsets[b]); index->realloc(index->bitsets[b], 0); index->bitsets[b] = NULL; } error_1: return -1; } int bitset_index_insert(struct bitset_index *index, const void *key, size_t key_size, size_t value) { assert(index != NULL); assert(key != NULL); /* * Step 0: allocate enough number of bitsets * * bitset_index_reserve could fail on realloc and return -1. * Do not change anything and return the error to the caller. */ const size_t size = 1 + key_size * CHAR_BIT; if (bitset_index_reserve(index, size) != 0) return -1; /* * Step 1: set the 'flag' bitset * * bitset_set for 'falg' bitset could fail on realloc. * Do not change anything. Do not shrink buffers allocated on step 1. */ int rc = bitset_set(index->bitsets[0], value); if (rc < 0) return -1; assert(rc == 0); /* if 1 then the value is already exist in the index */ if (key_size == 0) /* optimization for empty key */ return 0; index->rollback_buf[0] = (char) rc; /* * Step 2: iterate over 'set' bits in the key and update related bitsets. * * A bitset_set somewhere in the middle also could fail on realloc. * If this happens, we stop processing and jump to the rollback code. * Rollback uses index->rollback_buf buffer to restore previous values * of all bitsets on given position. Remember, that bitset_set * returns 1 if a previous value was 'true' and 0 if it was 'false'. * The buffer is indexed by bytes (char *) instead of bits (bit_set) * because it is a little bit faster here. */ struct bit_iterator bit_it; bit_iterator_init(&bit_it, key, key_size, true); size_t pos = 0; while ((pos = bit_iterator_next(&bit_it)) != SIZE_MAX) { size_t b = pos + 1; rc = bitset_set(index->bitsets[b], value); if (rc < 0) goto rollback; index->rollback_buf[b] = (char) rc; } return 0; rollback: /* * Rollback changes done by Step 2. */ bit_iterator_init(&bit_it, key, size, true); size_t rpos; while ((rpos = bit_iterator_next(&bit_it)) != SIZE_MAX && rpos < pos) { size_t b = rpos + 1; if (index->rollback_buf[b] == 1) { bitset_set(index->bitsets[b], value); } else { bitset_clear(index->bitsets[b], value); } } /* * Rollback changes done by Step 1. */ if (index->rollback_buf[0] == 1) { bitset_set(index->bitsets[0], value); } else { bitset_clear(index->bitsets[0], value); } return -1; } void bitset_index_remove_value(struct bitset_index *index, size_t value) { assert(index != NULL); if (index->capacity == 0) return; for (size_t b = 1; b < index->capacity; b++) { if (index->bitsets[b] == NULL) continue; /* Ignore all errors here */ bitset_clear(index->bitsets[b], value); } bitset_clear(index->bitsets[0], value); } bool bitset_index_contains_value(struct bitset_index *index, size_t value) { assert(index != NULL); return bitset_test(index->bitsets[0], value); } int bitset_index_expr_all(struct bitset_expr *expr) { (void) index; bitset_expr_clear(expr); if (bitset_expr_add_conj(expr) != 0) return -1; if (bitset_expr_add_param(expr, 0, false) != 0) return -1; return 0; } int bitset_index_expr_equals(struct bitset_expr *expr, const void *key, size_t key_size) { bitset_expr_clear(expr); if (bitset_expr_add_conj(expr) != 0) return -1; for (size_t pos = 0; pos < key_size * CHAR_BIT; pos++) { size_t b = pos + 1; bool bit_exist = bit_test(key, pos); if (bitset_expr_add_param(expr, b, !bit_exist) != 0) return -1; } if (bitset_expr_add_param(expr, 0, false) != 0) { return -1; } return 0; } int bitset_index_expr_all_set(struct bitset_expr *expr, const void *key, size_t key_size) { bitset_expr_clear(expr); if (bitset_expr_add_conj(expr) != 0) return -1; if (key_size == 0) return 0; /* optimization for empty key */ struct bit_iterator bit_it; bit_iterator_init(&bit_it, key, key_size, true); size_t pos; while ( (pos = bit_iterator_next(&bit_it)) != SIZE_MAX ) { size_t b = pos + 1; if (bitset_expr_add_param(expr, b, false) != 0) return -1; } return 0; } int bitset_index_expr_any_set(struct bitset_expr *expr, const void *key, size_t key_size) { bitset_expr_clear(expr); if (key_size == 0) return 0; /* optimization for empty key */ struct bit_iterator bit_it; bit_iterator_init(&bit_it, key, key_size, true); size_t pos; while ( (pos = bit_iterator_next(&bit_it)) != SIZE_MAX) { size_t b = pos + 1; if (bitset_expr_add_conj(expr) != 0) return -1; if (bitset_expr_add_param(expr, b, false) != 0) return -1; } return 0; } int bitset_index_expr_all_not_set(struct bitset_expr *expr, const void *key, size_t key_size) { bitset_expr_clear(expr); if (bitset_expr_add_conj(expr) != 0) return -1; if (bitset_expr_add_param(expr, 0, false) != 0) return -1; if (key_size == 0) return 0; /* optimization for empty key */ struct bit_iterator bit_it; bit_iterator_init(&bit_it, key, key_size, true); size_t pos; while ( (pos = bit_iterator_next(&bit_it)) != SIZE_MAX) { size_t b = pos + 1; if (bitset_expr_add_param(expr, b, true) != 0) return -1; } return 0; } int bitset_index_init_iterator(struct bitset_index *index, struct bitset_iterator *it, struct bitset_expr *expr) { assert(index != NULL); assert(it != NULL); /* Check that we have all required bitsets */ size_t max = 0; for (size_t c = 0; c < expr->size; c++) { for (size_t b = 0; b < expr->conjs[c].size; b++) { if (expr->conjs[c].bitset_ids[b] > max) { max = expr->conjs[c].bitset_ids[b]; } } } /* Resize the index with empty bitsets */ if (bitset_index_reserve(index, max + 1) != 0) return -1; return bitset_iterator_init(it, expr, index->bitsets, index->capacity); } size_t bitset_index_bsize(const struct bitset_index *index) { size_t result = 0; for (size_t b = 0; b < index->capacity; b++) { if (index->bitsets[b] == NULL) continue; struct bitset_info info; bitset_info(index->bitsets[b], &info); result += info.page_total_size * info.pages; } return result; } extern inline size_t bitset_index_size(const struct bitset_index *index); extern inline size_t bitset_index_count(const struct bitset_index *index, size_t bit); tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/expr.c0000664000000000000000000001052613306560010020155 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "bitset/expr.h" #include #include #include const size_t EXPR_DEFAULT_CAPACITY = 2; const size_t EXPR_CONJ_DEFAULT_CAPACITY = 32; void bitset_expr_create(struct bitset_expr *expr, void *(*realloc)(void *ptr, size_t size)) { memset(expr, 0, sizeof(*expr)); expr->realloc = realloc; } void bitset_expr_destroy(struct bitset_expr *expr) { for (size_t c = 0; c < expr->size; c++) { if (expr->conjs[c].capacity == 0) continue; expr->realloc(expr->conjs[c].bitset_ids, 0); expr->realloc(expr->conjs[c].pre_nots, 0); } if (expr->capacity > 0) { expr->realloc(expr->conjs, 0); } memset(expr, 0, sizeof(*expr)); } void bitset_expr_clear(struct bitset_expr *expr) { for (size_t c = 0; c < expr->size; c++) { memset(expr->conjs[c].bitset_ids, 0, expr->conjs[c].size * sizeof(*expr->conjs[c].bitset_ids)); memset(expr->conjs[c].pre_nots, 0, expr->conjs[c].size * sizeof(*expr->conjs[c].pre_nots)); expr->conjs[c].size = 0; } expr->size = 0; } static int bitset_expr_reserve(struct bitset_expr *expr, size_t size) { if (size <= expr->capacity) return 0; size_t capacity = (expr->capacity > 0) ? expr->capacity : EXPR_DEFAULT_CAPACITY; while (capacity <= expr->size) { capacity *= 2; } struct bitset_expr_conj *conjs = expr->realloc(expr->conjs, capacity * sizeof(*expr->conjs)); if (conjs == NULL) return -1; memset(conjs + expr->capacity, 0, (capacity - expr->capacity) * sizeof(*expr->conjs)); expr->conjs = conjs; expr->capacity = capacity; return 0; } int bitset_expr_add_conj(struct bitset_expr *expr) { if (bitset_expr_reserve(expr, expr->size + 1) != 0) return -1; expr->size++; return 0; } static int bitset_expr_conj_reserve(struct bitset_expr *expr, struct bitset_expr_conj *conj, size_t size) { if (size <= conj->capacity) return 0; size_t capacity = (conj->capacity > 0) ? conj->capacity : EXPR_CONJ_DEFAULT_CAPACITY; while (capacity <= conj->size) { capacity *= 2; } size_t *bitset_ids = expr->realloc(conj->bitset_ids, capacity * sizeof(*conj->bitset_ids)); if (bitset_ids == NULL) goto error_1; bool *pre_nots = expr->realloc(conj->pre_nots, capacity * sizeof(*conj->pre_nots)); if (pre_nots == NULL) goto error_2; memset(bitset_ids + conj->capacity, 0, (capacity - conj->capacity) * sizeof(*conj->bitset_ids)); memset(pre_nots + conj->capacity, 0, (capacity - conj->capacity) * sizeof(*conj->pre_nots)); conj->bitset_ids = bitset_ids; conj->pre_nots = pre_nots; conj->capacity = capacity; return 0; error_2: expr->realloc(bitset_ids, 0); error_1: return -1; } int bitset_expr_add_param(struct bitset_expr *expr, size_t bitset_id, bool pre_not) { assert(expr->size > 0); struct bitset_expr_conj *conj = &expr->conjs[expr->size - 1]; if (bitset_expr_conj_reserve(expr, conj, conj->size + 1) != 0) return -1; conj->bitset_ids[conj->size] = bitset_id; conj->pre_nots[conj->size] = pre_not; conj->size++; return 0; } tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/page.h0000664000000000000000000001321313306560010020114 0ustar rootroot#ifndef TARANTOOL_LIB_BITSET_PAGE_H_INCLUDED #define TARANTOOL_LIB_BITSET_PAGE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * @file * @brief Bitset page * * Private header file, please don't use directly. * @internal */ #include "bitset/bitset.h" #include #if defined(DEBUG) #include /* for dumping bitset_page to file */ #endif /* defined(DEBUG) */ #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { /** How many bytes to store in one page */ BITSET_PAGE_DATA_SIZE = 160 }; #if defined(ENABLE_AVX) typedef __m256i bitset_word_t; #define BITSET_PAGE_DATA_ALIGNMENT 32 #elif defined(ENABLE_SSE2) typedef __m128i bitset_word_t; #define BITSET_PAGE_DATA_ALIGNMENT 16 #elif defined(__x86_64__) typedef uint64_t bitset_word_t; #define BITSET_PAGE_DATA_ALIGNMENT 1 #else #define BITSET_PAGE_DATA_ALIGNMENT 1 typedef uint32_t bitset_word_t; #endif #if (defined(__GLIBC__) && (__WORDSIZE == 64) && \ ((__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))) || \ (defined(__FreeBSD__) && !defined(__arm__) && !defined(__mips__)) || \ (defined(__APPLE__)) /** * @brief Defined if malloc is 16-byte aligned * @see http://www.gnu.org/software/libc/manual/html_node/Aligned-Memory-Blocks.html * @see http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c * @see http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c * @see man malloc(2) on OS X */ #define MALLOC_ALIGNMENT 16 #else #define MALLOC_ALIGNMENT 8 #endif /* aligned malloc */ inline size_t bitset_page_alloc_size(void *(*realloc_arg)(void *ptr, size_t size)) { if (BITSET_PAGE_DATA_ALIGNMENT <= 1 || ( (MALLOC_ALIGNMENT % BITSET_PAGE_DATA_ALIGNMENT == 0) && (sizeof(struct bitset_page) % BITSET_PAGE_DATA_ALIGNMENT == 0) && (realloc_arg == realloc))) { /* Alignment is not needed */ return sizeof(struct bitset_page) + BITSET_PAGE_DATA_SIZE; } return sizeof(struct bitset_page) + BITSET_PAGE_DATA_SIZE + BITSET_PAGE_DATA_ALIGNMENT; } #undef MALLOC_ALIGNMENT inline void * bitset_page_data(struct bitset_page *page) { uintptr_t r = (uintptr_t) (page->data + BITSET_PAGE_DATA_ALIGNMENT - 1); return (void *) (r & ~((uintptr_t) BITSET_PAGE_DATA_ALIGNMENT - 1)); } inline void bitset_page_create(struct bitset_page *page) { size_t size = ((char *) bitset_page_data(page) - (char *) page) + BITSET_PAGE_DATA_SIZE; memset(page, 0, size); } inline void bitset_page_destroy(struct bitset_page *page) { (void) page; /* nothing */ } inline size_t bitset_page_first_pos(size_t pos) { return pos - (pos % (BITSET_PAGE_DATA_SIZE * CHAR_BIT)); } inline void bitset_page_set_zeros(struct bitset_page *page) { void *data = bitset_page_data(page); memset(data, 0, BITSET_PAGE_DATA_SIZE); } inline void bitset_page_set_ones(struct bitset_page *page) { void *data = bitset_page_data(page); memset(data, -1, BITSET_PAGE_DATA_SIZE); } inline void bitset_page_and(struct bitset_page *dst, struct bitset_page *src) { bitset_word_t *d = (bitset_word_t *) bitset_page_data(dst); bitset_word_t *s = (bitset_word_t *) bitset_page_data(src); assert(BITSET_PAGE_DATA_SIZE % sizeof(bitset_word_t) == 0); int cnt = BITSET_PAGE_DATA_SIZE / sizeof(bitset_word_t); for (int i = 0; i < cnt; i++) { *d++ &= *s++; } } inline void bitset_page_nand(struct bitset_page *dst, struct bitset_page *src) { bitset_word_t *d = (bitset_word_t *) bitset_page_data(dst); bitset_word_t *s = (bitset_word_t *) bitset_page_data(src); assert(BITSET_PAGE_DATA_SIZE % sizeof(bitset_word_t) == 0); int cnt = BITSET_PAGE_DATA_SIZE / sizeof(bitset_word_t); for (int i = 0; i < cnt; i++) { *d++ &= ~*s++; } } inline void bitset_page_or(struct bitset_page *dst, struct bitset_page *src) { bitset_word_t *d = (bitset_word_t *) bitset_page_data(dst); bitset_word_t *s = (bitset_word_t *) bitset_page_data(src); assert(BITSET_PAGE_DATA_SIZE % sizeof(bitset_word_t) == 0); int cnt = BITSET_PAGE_DATA_SIZE / sizeof(bitset_word_t); for (int i = 0; i < cnt; i++) { *d++ |= *s++; } } #if defined(DEBUG) void bitset_page_dump(struct bitset_page *page, FILE *stream); #endif /* defined(DEBUG) */ rb_proto(, bitset_pages_, bitset_pages_t, struct bitset_page) #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LIB_BITSET_PAGE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/page.c0000664000000000000000000000534613306560010020117 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "page.h" #include "bitset/bitset.h" extern inline size_t bitset_page_alloc_size(void *(*realloc_arg)(void *ptr, size_t size)); extern inline void * bitset_page_data(struct bitset_page *page); extern inline void bitset_page_create(struct bitset_page *page); extern inline void bitset_page_destroy(struct bitset_page *page); extern inline size_t bitset_page_first_pos(size_t pos); extern inline void bitset_page_set_zeros(struct bitset_page *page); extern inline void bitset_page_set_ones(struct bitset_page *page); extern inline void bitset_page_and(struct bitset_page *dst, struct bitset_page *src); extern inline void bitset_page_nand(struct bitset_page *dst, struct bitset_page *src); extern inline void bitset_page_or(struct bitset_page *dst, struct bitset_page *src); #if defined(DEBUG) void bitset_page_dump(struct bitset_page *page, FILE *stream) { fprintf(stream, "Page %zu:\n", page->first_pos); char *d = bitset_page_data(page); for (int i = 0; i < BITSET_PAGE_DATA_SIZE; i++) { fprintf(stream, "%x ", *d); d++; } fprintf(stream, "\n--\n"); } #endif /* defined(DEBUG) */ static inline int page_cmp(const struct bitset_page *a, const struct bitset_page *b) { if (a->first_pos < b->first_pos) { return -1; } else if (a->first_pos > b->first_pos) { return 1; } else { return 0; } } rb_gen(, bitset_pages_, bitset_pages_t, struct bitset_page, node, page_cmp) tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/index.h0000664000000000000000000002500513306560010020311 0ustar rootroot#ifndef TARANTOOL_LIB_BITSET_INDEX_H_INCLUDED #define TARANTOOL_LIB_BITSET_INDEX_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * @file * @brief bitset_index - a bit index based on @link bitset @endlink. * * @section Purpose * * bitset_index is an associative container that stores (key, * value) pairs in a way that is optimized for searching values * matching a logical expressions on bits of the key. The * organization structure of bitset_index makes it easy to respond * to queries like 'return all (key, value) pairs where the key * has bit i and bit j set'. The implementation supports * evaluation of arbitrary logical expressions represented in * Disjunctive Normal Form. * * To search over keys in a bitset_index, a logical expression * needs to be constructed. * logical expression. The expression can be constructed one time * and used for multiple queries. A search for an exact match for * a given key is not what bitset_index is designed for -- * a conventional TREE or HASH index suits this task better. * * @section Organization * * bitset_index is a compressed bit matrix with dimensions N+1xK, * where N corresponds to the bit count of the longest key present * in the index, and K is the maximal value present in the index. * Each column in the matrix stands for a single bit of the key * and is represented by a single bitset. * If there is value k, which corresponding key has bit i set, * then bitset i+1 will have bit k set. * For example, if a pair with (key, value) is inserted to the * index and its key, has 0, 2, 5, 6 bits set then bitsets #1, #3, * #6, #7 are set at position = pair.value (@link bitset_test * bitset_test(bitset, pair.value) @endlink is true) and bitsets * #2, #4, #7 , ... are unset at the position. * * bitset_index also uses a special bitset #0 that is set to true * for each position where a pair with value = position exists in * an index. This bitset is mostly needed for evaluation * expressions with binary NOTs. * * A consequence of to the above design, is that in a bitset_index * one can have multiple pairs with same key, but all values in an * index must be unique. * * @section Performance * * For a certain kind of tasks bitset_index is more efficient both * speed- and memory- wise than a binary search tree or a hash * table. * * The complexity of @link bitset_insert @endlink operation is * mostly equivalent to inserting one value into \a k balanced * binary search trees, each of size \a m, where \a k is the number of * set bits in the key and \ m is the number of pairs in the index * divided by bitset page size. * * The complexity of iteration is linear from the number of pairs * in which the search expression evaluates to true. The * complexity of an iterator expression does not affect * iteration performance directly, which is more dependent * on the number of matching values. * * The actual performance heavily depends on the distribution of * values. If the value space is dense, then internal bitsets are * also compact and better optimized for iteration. * * @section Limitations * * Key size is limited only by the available memory. * bitset_index automatically resizes on 'insert' if a key * contains more bits than in any key inserted thus far. * * Since values are used as a position in bitsets, the actual * range of values must be in [0..SIZE_MAX) range. * * @see bitset.h * @see expr.h * @see iterator.h */ #include "bitset/bitset.h" #include "bitset/iterator.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * @brief BitsetIndex */ struct bitset_index { /** @cond false **/ /* Used bitsets */ struct bitset **bitsets; /* Capacity of bitsets array */ size_t capacity; /* Memory allocator to use */ void *(*realloc)(void *ptr, size_t size); /* A buffer used for rollback changes in bitset_insert */ char *rollback_buf; /** @endcond **/ }; /** * @brief Construct \a index * @param index bitset index * @param realloc memory allocator to use */ void bitset_index_create(struct bitset_index *index, void *(*realloc)(void *ptr, size_t size)); /** * @brief Destruct \a index * @param index bitset index */ void bitset_index_destroy(struct bitset_index *index); /** * @brief Insert (\a key, \a value) pair into \a index. * \a value must be unique in the index. * This method is atomic, i.e. \a index will be in a consistent * state after a return even in case of error. * * @param index object * @param key key * @param key_size size of the key * @param value value * @retval 0 on success * @retval -1 on memory error */ int bitset_index_insert(struct bitset_index *index, const void *key, size_t key_size, size_t value); /** * @brief Remove a pair with \a value (*, \a value) from \a index. * @param index bitset index * @param value value */ void bitset_index_remove_value(struct bitset_index *index, size_t value); /** * @brief Initialize \a expr to iterate over a bitset index. * The \a expr can be then passed to @link bitset_index_init_iterator @endlink. * * 'All' algorithm. Matches all pairs in a index. * * @param expr bitset expression * @retval 0 on success * @retval -1 on memory error * @see @link bitset_index_init_iterator @endlink * @see expr.h */ int bitset_index_expr_all(struct bitset_expr *expr); /** * @brief Initialize \a expr to iterate over a bitset index. * The \a expr can be then passed to @link bitset_index_init_iterator @endlink. * * 'Equals' algorithm. Matches all pairs where \a key exactly equals to * pair.key (\a key == pair.key). * * @param expr bitset expression * @param key key * @param key_size of \a key (in char, as sizeof returns) * @retval 0 on success * @retval -1 on memory error * @see @link bitset_index_init_iterator @endlink * @see expr.h */ int bitset_index_expr_equals(struct bitset_expr *expr, const void *key, size_t key_size); /** * @brief Initialize \a expr to iterate over a bitset index. * The \a expr can be then passed to @link bitset_index_init_iterator @endlink. * * 'All-Bits-Set' algorithm. Matches all pairs where all bits from \a key * are set in pair.key ((\a key & pair.key) == \a key). * * @param expr bitset expression * @retval 0 on success * @retval -1 on memory error * @see @link bitset_index_init_iterator @endlink * @see expr.h */ int bitset_index_expr_all_set(struct bitset_expr *expr, const void *key, size_t key_size); /** * @brief Initialize \a expr to iterate over a bitset index. * The \a expr can then be passed to @link bitset_index_init_iterator @endlink. * * 'Any-Bits-Set' algorithm. Matches all pairs where at least one bit from * \a key is set in pair.key ((\a key & pair.key) != 0). * * @param expr bitset expression * @retval 0 on success * @retval -1 on memory error * @see @link bitset_index_init_iterator @endlink * @see expr.h */ int bitset_index_expr_any_set(struct bitset_expr *expr, const void *key, size_t key_size); /** * @brief Initialize \a expr to iterate over a bitset index. * The \a expr can be then passed to @link bitset_index_init_iterator @endlink. * * 'All-Bits-Not-Set' algorithm. Matches all pairs in the \a index, where all * bits from the \a key is not set in pair.key ((\a key & pair.key) == 0). * * @param expr bitset expression * @retval 0 on success * @retval -1 on memory error * @see @link bitset_index_init_iterator @endlink * @see expr.h */ int bitset_index_expr_all_not_set(struct bitset_expr *expr, const void *key, size_t key_size); /** * @brief Initialize \a it using \a expr and bitsets used in \a index. * * @param index bitset index * @param it bitset iterator * @param expr bitset expression * @retval 0 on success * @retval 1 on memory error */ int bitset_index_init_iterator(struct bitset_index *index, struct bitset_iterator *it, struct bitset_expr *expr); /** * @brief Checks if a (*, \a value) pair exists in \a index * @param index bitset index * @param value * @retval true if \a index contains pair with the \a value * @retval false otherwise */ bool bitset_index_contains_value(struct bitset_index *index, size_t value); /** * @brief Return the number of pairs in \a index. * @param index bitset index * @return number of pairs in \a index */ inline size_t bitset_index_size(const struct bitset_index *index) { return bitset_cardinality(index->bitsets[0]); } /** * @brief Returns the number of (key, value ) pairs where @a bit is set in key * @param index bitset index * @param bit bit * @retval the number of (key, value ) pairs where (@a bit & key) != 0 */ inline size_t bitset_index_count(const struct bitset_index *index, size_t bit) { if (bit + 1 >= index->capacity) return 0; return bitset_cardinality(index->bitsets[bit + 1]); } /** * @brief Return the number of bytes used by index. Only dynamically allocated * data are counted (i.e. sizeof(struct bitset_index) is not counted) * @param index bitset index * @return number of bytes used by index. */ size_t bitset_index_bsize(const struct bitset_index *index); #if defined(DEBUG) void bitset_index_dump(struct bitset_index *index, int verbose, FILE *stream); #endif /* defined(DEBUG) */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LIB_BITSET_INDEX_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/CMakeLists.txt0000664000000000000000000000031413306560010021565 0ustar rootrootset(lib_sources bitset.c page.c expr.c iterator.c index.c ) set_source_files_compile_flags(${lib_sources}) add_library(bitset STATIC ${lib_sources}) target_link_libraries(bitset bit) tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/iterator.c0000664000000000000000000002441513306560010021032 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "bitset/iterator.h" #include "bitset/expr.h" #include "page.h" #include const size_t ITERATOR_DEFAULT_CAPACITY = 2; const size_t ITERATOR_CONJ_DEFAULT_CAPACITY = 32; struct bitset_iterator_conj { size_t page_first_pos; size_t size; size_t capacity; struct bitset **bitsets; bool *pre_nots; struct bitset_page **pages; }; /** * @brief Construct iterator * @param it iterator * @param realloc memory allocator to use */ void bitset_iterator_create(struct bitset_iterator *it, void *(*realloc)(void *ptr, size_t size)) { memset(it, 0, sizeof(*it)); it->realloc = realloc; } /** * @brief Destroys the @a it object * @param it object * @see bitset_iterator_new */ void bitset_iterator_destroy(struct bitset_iterator *it) { for (size_t c = 0; c < it->size; c++) { if (it->conjs[c].capacity == 0) continue; it->realloc(it->conjs[c].bitsets, 0); it->realloc(it->conjs[c].pre_nots, 0); it->realloc(it->conjs[c].pages, 0); } if (it->capacity > 0) { it->realloc(it->conjs, 0); } if (it->page != NULL) { bitset_page_destroy(it->page); it->realloc(it->page, 0); } if (it->page_tmp != NULL) { bitset_page_destroy(it->page_tmp); it->realloc(it->page_tmp, 0); } memset(it, 0, sizeof(*it)); } static int bitset_iterator_reserve(struct bitset_iterator *it, size_t size) { if (size <= it->capacity) return 0; size_t capacity = (it->capacity > 0) ? it->capacity : ITERATOR_DEFAULT_CAPACITY; while (capacity <= size) { capacity *= 2; } struct bitset_iterator_conj *conjs = it->realloc(it->conjs, capacity * sizeof(*it->conjs)); if (conjs == NULL) return -1; memset(conjs + it->capacity, 0, (capacity - it->capacity) * sizeof(*it->conjs)); it->conjs = conjs; it->capacity = capacity; return 0; } static int bitset_iterator_conj_reserve(struct bitset_iterator *it, struct bitset_iterator_conj *conj, size_t size) { if (size <= conj->capacity) return 0; size_t capacity = (conj->capacity > 0) ? conj->capacity : ITERATOR_CONJ_DEFAULT_CAPACITY; while (capacity <= size) { capacity *= 2; } struct bitset **bitsets = it->realloc(conj->bitsets, capacity * sizeof(*conj->bitsets)); if (bitsets == NULL) goto error_1; bool *pre_nots = it->realloc(conj->pre_nots, capacity * sizeof(*conj->pre_nots)); if (pre_nots == NULL) goto error_2; struct bitset_page **pages = it->realloc(conj->pages, capacity * sizeof(*conj->pages)); if (pages == NULL) goto error_3; memset(bitsets + conj->capacity, 0, (capacity - conj->capacity) * sizeof(*conj->bitsets)); memset(pre_nots + conj->capacity, 0, (capacity - conj->capacity) * sizeof(*conj->pre_nots)); memset(pages + conj->capacity, 0, (capacity - conj->capacity) * sizeof(*conj->pages)); conj->bitsets = bitsets; conj->pre_nots = pre_nots; conj->pages = pages; conj->capacity = capacity; return 0; error_3: it->realloc(pre_nots, 0); error_2: it->realloc(bitsets, 0); error_1: return -1; } int bitset_iterator_init(struct bitset_iterator *it, struct bitset_expr *expr, struct bitset **p_bitsets, size_t bitsets_size) { assert(it != NULL); assert(expr != NULL); if (bitsets_size > 0) { assert(p_bitsets != NULL); } size_t page_alloc_size = bitset_page_alloc_size(it->realloc); if (it->page != NULL) { bitset_page_destroy(it->page); } else { it->page = it->realloc(NULL, page_alloc_size); } bitset_page_create(it->page); if (it->page_tmp != NULL) { bitset_page_destroy(it->page_tmp); } else { it->page_tmp = it->realloc(NULL, page_alloc_size); if (it->page_tmp == NULL) return -1; } bitset_page_create(it->page_tmp); if (bitset_iterator_reserve(it, expr->size) != 0) return -1; for (size_t c = 0; c < expr->size; c++) { struct bitset_expr_conj *exconj = &expr->conjs[c]; struct bitset_iterator_conj *itconj = &it->conjs[c]; itconj->page_first_pos = 0; if (bitset_iterator_conj_reserve(it, itconj, exconj->size) != 0) return -1; for (size_t b = 0; b < exconj->size; b++) { assert(exconj->bitset_ids[b] < bitsets_size); assert(p_bitsets[exconj->bitset_ids[b]] != NULL); itconj->bitsets[b] = p_bitsets[exconj->bitset_ids[b]]; itconj->pre_nots[b] = exconj->pre_nots[b]; itconj->pages[b] = NULL; } itconj->size = exconj->size; } it->size = expr->size; bitset_iterator_rewind(it); return 0; } static void bitset_iterator_conj_rewind(struct bitset_iterator_conj *conj, size_t pos) { assert(conj != NULL); assert(pos % (BITSET_PAGE_DATA_SIZE * CHAR_BIT) == 0); assert(conj->page_first_pos <= pos); if (conj->size == 0) { conj->page_first_pos = SIZE_MAX; return; } struct bitset_page key; key.first_pos = pos; restart: for (size_t b = 0; b < conj->size; b++) { conj->pages[b] = bitset_pages_nsearch(&conj->bitsets[b]->pages, &key); #if 0 if (conj->pages[b] != NULL) { fprintf(stderr, "rewind [%zu] => %zu (%p)\n", b, conj->pages[b]->first_pos, conj->pages[b]); } else { fprintf(stderr, "rewind [%zu] => NULL\n", b); } #endif if (conj->pre_nots[b]) continue; /* bitset b does not have more pages */ if (conj->pages[b] == NULL) { conj->page_first_pos = SIZE_MAX; return; } assert(conj->pages[b]->first_pos >= key.first_pos); /* bitset b have a next page, but it is beyond pos scope */ if (conj->pages[b]->first_pos > key.first_pos) { key.first_pos = conj->pages[b]->first_pos; goto restart; } } conj->page_first_pos = key.first_pos; } static int bitset_iterator_conj_cmp(const void *p1, const void *p2) { assert(p1 != NULL && p2 != NULL); struct bitset_iterator_conj *conj1 = (struct bitset_iterator_conj *) p1; struct bitset_iterator_conj *conj2 = (struct bitset_iterator_conj *) p2; if (conj1->page_first_pos < conj2->page_first_pos) { return -1; } else if (conj1->page_first_pos > conj2->page_first_pos) { return 1; } else { return 0; } } static void bitset_iterator_conj_prepare_page(struct bitset_iterator_conj *conj, struct bitset_page *dst) { assert(conj != NULL); assert(dst != NULL); assert(conj->size > 0); assert(conj->page_first_pos != SIZE_MAX); bitset_page_set_ones(dst); for (size_t b = 0; b < conj->size; b++) { if (!conj->pre_nots[b]) { /* conj->pages[b] is rewinded to conj->page_first_pos */ assert(conj->pages[b]->first_pos == conj->page_first_pos); bitset_page_and(dst, conj->pages[b]); } else { /* * If page is NULL or its position is not equal * to conj->page_first_pos then conj->bitset[b] * does not have page with the required position and * all bits in this page are considered to be zeros. * Since NAND(a, zeros) => a, we can simple skip this * bitset here. */ if (conj->pages[b] == NULL || conj->pages[b]->first_pos != conj->page_first_pos) continue; bitset_page_nand(dst, conj->pages[b]); } } } static void bitset_iterator_prepare_page(struct bitset_iterator *it) { qsort(it->conjs, it->size, sizeof(*it->conjs), bitset_iterator_conj_cmp); bitset_page_set_zeros(it->page); if (it->size > 0) { it->page->first_pos = it->conjs[0].page_first_pos; } else { it->page->first_pos = SIZE_MAX; } /* There is no more conjunctions that can be ORed */ if (it->page->first_pos == SIZE_MAX) return; /* For each conj where conj->page_first_pos == pos */ for (size_t c = 0; c < it->size; c++) { if (it->conjs[c].page_first_pos > it->page->first_pos) break; /* Get result from conj */ bitset_iterator_conj_prepare_page(&it->conjs[c], it->page_tmp); /* OR page from conjunction with it->page */ bitset_page_or(it->page, it->page_tmp); } /* Init the bit iterator on it->page */ bit_iterator_init(&it->page_it, bitset_page_data(it->page), BITSET_PAGE_DATA_SIZE, true); } static void bitset_iterator_first_page(struct bitset_iterator *it) { assert(it != NULL); /* Rewind all conjunctions to first positions */ for (size_t c = 0; c < it->size; c++) { bitset_iterator_conj_rewind(&it->conjs[c], 0); } /* Prepare the result page */ bitset_iterator_prepare_page(it); } static void bitset_iterator_next_page(struct bitset_iterator *it) { assert(it != NULL); size_t PAGE_BIT = BITSET_PAGE_DATA_SIZE * CHAR_BIT; size_t pos = it->page->first_pos; /* Rewind all conjunctions that at the current position to the * next position */ for (size_t c = 0; c < it->size; c++) { if (it->conjs[c].page_first_pos > pos) break; bitset_iterator_conj_rewind(&it->conjs[c], pos + PAGE_BIT); assert(pos + PAGE_BIT <= it->conjs[c].page_first_pos); } /* Prepare the result page */ bitset_iterator_prepare_page(it); } void bitset_iterator_rewind(struct bitset_iterator *it) { assert(it != NULL); /* Prepare first page */ bitset_iterator_first_page(it); } size_t bitset_iterator_next(struct bitset_iterator *it) { assert(it != NULL); while (true) { if (it->page->first_pos == SIZE_MAX) return SIZE_MAX; size_t pos = bit_iterator_next(&it->page_it); if (pos != SIZE_MAX) { return it->page->first_pos + pos; } bitset_iterator_next_page(it); } } tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/bitset.h0000664000000000000000000001140213306560010020470 0ustar rootroot#ifndef TARANTOOL_LIB_BITSET_BITSET_H_INCLUDED #define TARANTOOL_LIB_BITSET_BITSET_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * @file * @brief Module to work with arrays of bits (bitsets) * * Bitset is an array of bits where each bit can be set or unset * independently. The bits of a @link bitset @endlink are indexed * by \a size_t position number. Initially all bits are set to * false. You can use any values in range [0,SIZE_MAX). The * container grows automatically. */ #include "bit/bit.h" #include #include #include #include #include /* ssize_t for rbtree */ #if defined(DEBUG) #include /* for dumping debug output to FILE */ #endif /* defined(DEBUG) */ /** @cond false */ #define RB_COMPACT 1 #include /** @endcond */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** @cond false */ struct bitset_page { size_t first_pos; rb_node(struct bitset_page) node; size_t cardinality; uint8_t data[0]; }; typedef rb_tree(struct bitset_page) bitset_pages_t; /** @endcond */ /** * Bitset */ struct bitset { /** @cond false */ bitset_pages_t pages; size_t cardinality; void *(*realloc)(void *ptr, size_t size); /** @endcond */ }; /** * @brief Construct \a bitset * @param bitset bitset * @param realloc memory allocator to use */ void bitset_create(struct bitset *bitset, void *(*realloc)(void *ptr, size_t size)); /** * @brief Destruct \a bitset * @param bitset bitset */ void bitset_destroy(struct bitset *bitset); /** * @brief Test bit \a pos in \a bitset * @param bitset bitset * @param pos bit number * @retval true if \a pos is set in \a bitset * @retval false if \a pos is not set in \a bitset */ bool bitset_test(struct bitset *bitset, size_t pos); /** * @brief Set bit \a pos in \a bitset * @param bitset bitset * @param pos bit number * @retval 1 on success if previous value of \a pos was true * @retval 0 on success if previous value of \a pos was false * @retval -1 on memory error */ int bitset_set(struct bitset *bitset, size_t pos); /** * @brief Clear bit \a pos in \a bitset * @param bitset bitset * @param pos bit number * @retval 1 on success if previous value of \a pos was true * @retval 0 on success if previous value of \a pos was false * @retval -1 on memory error */ int bitset_clear(struct bitset *bitset, size_t pos); /** * @brief Return the number of bits set to \a true in \a bitset. * @param bitset bitset * @return returns the number of bits set to \a true in \a bitset. */ inline size_t bitset_cardinality(const struct bitset *bitset) { return bitset->cardinality; } /** * @brief Bitset Information structure * @see bitset_info */ struct bitset_info { /** Number of allocated pages */ size_t pages; /** Data (payload) size of one page (in bytes) */ size_t page_data_size; /** Full size of one page (in bytes, including padding and tree data) */ size_t page_total_size; /** A multiplier by which an address of page data is aligned **/ size_t page_data_alignment; }; /** * @brief Fill information about \a bitset * @param bitset bitset * @param stat structure to fill */ void bitset_info(struct bitset *bitset, struct bitset_info *info); #if defined(DEBUG) void bitset_dump(struct bitset *bitset, int verbose, FILE *stream); #endif /* defined(DEBUG) */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LIB_BITSET_BITSET_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/bitset.c0000664000000000000000000001615213306560010020472 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "bitset/bitset.h" #include "page.h" #include #include #include void bitset_create(struct bitset *bitset, void *(*realloc)(void *ptr, size_t size)) { memset(bitset, 0, sizeof(*bitset)); bitset->realloc = realloc; /* Initialize pages tree */ bitset_pages_new(&bitset->pages); } static struct bitset_page * bitset_destroy_iter_cb(bitset_pages_t *t, struct bitset_page *page, void *arg) { (void) t; struct bitset *bitset = (struct bitset *) arg; bitset_page_destroy(page); bitset->realloc(page, 0); return NULL; } void bitset_destroy(struct bitset *bitset) { bitset_pages_iter(&bitset->pages, NULL, bitset_destroy_iter_cb, bitset); memset(&bitset->pages, 0, sizeof(bitset->pages)); } bool bitset_test(struct bitset *bitset, size_t pos) { struct bitset_page key; key.first_pos = bitset_page_first_pos(pos); /* Find a page in pages tree */ struct bitset_page *page = bitset_pages_search(&bitset->pages, &key); if (page == NULL) return false; assert(page->first_pos <= pos && pos < page->first_pos + BITSET_PAGE_DATA_SIZE * CHAR_BIT); return bit_test(bitset_page_data(page), pos - page->first_pos); } int bitset_set(struct bitset *bitset, size_t pos) { struct bitset_page key; key.first_pos = bitset_page_first_pos(pos); /* Find a page in pages tree */ struct bitset_page *page = bitset_pages_search(&bitset->pages, &key); if (page == NULL) { /* Allocate a new page */ size_t size = bitset_page_alloc_size(bitset->realloc); page = bitset->realloc(NULL, size); if (page == NULL) return -1; bitset_page_create(page); page->first_pos = key.first_pos; /* Insert the page into pages tree */ bitset_pages_insert(&bitset->pages, page); } assert(page->first_pos <= pos && pos < page->first_pos + BITSET_PAGE_DATA_SIZE * CHAR_BIT); bool prev = bit_set(bitset_page_data(page), pos - page->first_pos); if (prev) { /* Value has not changed */ return 1; } bitset->cardinality++; page->cardinality++; return 0; } int bitset_clear(struct bitset *bitset, size_t pos) { struct bitset_page key; key.first_pos = bitset_page_first_pos(pos); /* Find a page in the pages tree */ struct bitset_page *page = bitset_pages_search(&bitset->pages, &key); if (page == NULL) return 0; assert(page->first_pos <= pos && pos < page->first_pos + BITSET_PAGE_DATA_SIZE * CHAR_BIT); bool prev = bit_clear(bitset_page_data(page), pos - page->first_pos); if (!prev) { return 0; } assert(bitset->cardinality > 0); assert(page->cardinality > 0); bitset->cardinality--; page->cardinality--; if (page->cardinality == 0) { /* Remove the page from the pages tree */ bitset_pages_remove(&bitset->pages, page); /* Free the page */ bitset_page_destroy(page); bitset->realloc(page, 0); } return 1; } extern inline size_t bitset_cardinality(const struct bitset *bitset); void bitset_info(struct bitset *bitset, struct bitset_info *info) { memset(info, 0, sizeof(*info)); info->page_data_size = BITSET_PAGE_DATA_SIZE; info->page_total_size = bitset_page_alloc_size(bitset->realloc); info->page_data_alignment = BITSET_PAGE_DATA_ALIGNMENT; size_t cardinality_check = 0; struct bitset_page *page = bitset_pages_first(&bitset->pages); while (page != NULL) { info->pages++; cardinality_check += page->cardinality; page = bitset_pages_next(&bitset->pages, page); } assert(bitset_cardinality(bitset) == cardinality_check); } #if defined(DEBUG) void bitset_dump(struct bitset *bitset, int verbose, FILE *stream) { struct bitset_info info; bitset_info(bitset, &info); size_t PAGE_BIT = (info.page_data_size * CHAR_BIT); fprintf(stream, "Bitset %p\n", bitset); fprintf(stream, "{\n"); fprintf(stream, " " "page_size = %zu/%zu /* (data / total) */\n", info.page_data_size, info.page_total_size); fprintf(stream, " " "page_bit = %zu\n", PAGE_BIT); fprintf(stream, " " "pages = %zu\n", info.pages); size_t cardinality = bitset_cardinality(bitset); size_t capacity = PAGE_BIT * info.pages; fprintf(stream, " " "cardinality = %zu\n", cardinality); fprintf(stream, " " "capacity = %zu\n", capacity); if (capacity > 0) { fprintf(stream, " " "utilization = %-8.4f%% (%zu / %zu)\n", (float) cardinality * 100.0 / (capacity), cardinality, capacity ); } else { fprintf(stream, " " "utilization = undefined\n"); } size_t mem_data = info.page_data_size * info.pages; size_t mem_total = info.page_total_size * info.pages; fprintf(stream, " " "mem_data = %zu bytes\n", mem_data); fprintf(stream, " " "mem_total = %zu bytes " "/* data + padding + tree */\n", mem_total); if (cardinality > 0) { fprintf(stream, " " "density = %-8.4f bytes per value\n", (float) mem_total / cardinality); } else { fprintf(stream, " " "density = undefined\n"); } if (verbose < 1) { goto exit; } fprintf(stream, " " "pages = {\n"); for (struct bitset_page *page = bitset_pages_first(&bitset->pages); page != NULL; page = bitset_pages_next(&bitset->pages, page)) { size_t page_last_pos = page->first_pos + BITSET_PAGE_DATA_SIZE * CHAR_BIT; fprintf(stream, " " "[%zu, %zu) ", page->first_pos, page_last_pos); fprintf(stream, "utilization = %8.4f%% (%zu/%zu)", (float) page->cardinality * 1e2 / PAGE_BIT, page->cardinality, PAGE_BIT); if (verbose < 2) { fprintf(stream, "\n"); continue; } fprintf(stream, " "); fprintf(stream, "vals = {"); size_t pos = 0; struct bit_iterator it; bit_iterator_init(&it, bitset_page_data(page), BITSET_PAGE_DATA_SIZE, true); while ( (pos = bit_iterator_next(&it)) != SIZE_MAX) { fprintf(stream, "%zu, ", page->first_pos + pos); } fprintf(stream, "}\n"); } fprintf(stream, " " "}\n"); exit: fprintf(stream, "}\n"); } #endif /* defined(DEBUG) */ tarantool_1.9.1.26.g63eb81e3c/src/lib/bitset/expr.h0000664000000000000000000001132013306560010020153 0ustar rootroot#ifndef TARANTOOL_LIB_BITSET_EXPR_H_INCLUDED #define TARANTOOL_LIB_BITSET_EXPR_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * @file * @brief Expressions on bitsets. * * This library provides full support for evaluation of logical expressions * on @link bitset bitsets @endlink. One can prepare an arbitrary logical * expression in Disjunctive normal form (DNF) using @link bitset_expr @endlink * methods and then evaluate the expression on the set of @link bitset @endlink * objects. Currently only @link bitset_iterator @endlink supports expressions. * It can be used for performing iteration over the expression result on the fly, * without producing temporary bitsets. * * @link bitset_expr @endlink holds any expression that can be represented * in DNF form. Since every propositional formula can be represented using DNF, * one can construct any such logical expression using methods from this module. * * A DNF example: (~b0 & b1 & ~b2) | (b2 & ~b3 & b4) | (b3 & b6) * where b[0-9] is an arbitrary bitset. * * @link bitset_expr @endlink does not operate directly on @link bitset @endlink * objects. Instead of this, one should use placeholders (identifiers) * which will be bound to the actual bitsets by the selected evaluator * (e.g. bitset_iterator). * * @link http://en.wikipedia.org/wiki/Disjunctive_normal_form @endlink * @note Reduce operations in both cases are left-associate. * * @see bitset_iterator_init */ #include "bitset.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** @cond false **/ struct bitset_expr_conj { size_t size; size_t capacity; size_t *bitset_ids; bool *pre_nots; }; /** @endcond **/ /** * @brief Bitset Expression */ struct bitset_expr { /** @cond false **/ /** Size of \a conjs array **/ size_t size; /** Capacity of \a conjs array **/ size_t capacity; /** Array of conjunctions **/ struct bitset_expr_conj *conjs; /** Memory allocator **/ void *(*realloc)(void *ptr, size_t size); /** @endcond **/ }; /** * @brief Construct bitset expression \a expr * @param expr bitset expression * @param realloc memory allocator to use */ void bitset_expr_create(struct bitset_expr *expr, void *(*realloc)(void *ptr, size_t size)); /** * @brief Destruct bitset expression \a expr * @param expr bitset expression */ void bitset_expr_destroy(struct bitset_expr *expr); /** * @brief Clear @a expr (remove all conjunctions from it) * @param expr bitset expression * @note Allocated memory is not freed. One can continue using the object * after this operation. Use @link bitset_expr_destroy @endlink to destroy * the object completely. */ void bitset_expr_clear(struct bitset_expr *expr); /** * @brief Add a new conjunction to \a expr. * @param expr bitset expression * @retval 0 on success * @retval -1 on memory error */ int bitset_expr_add_conj(struct bitset_expr *expr); /** * @brief Add a new placeholder for a bitset to the current conjunction. * @param expr bitset expression * @param bitset_id identifier of a bitset (placeholder) * @param pre_not if set to true, then logical NOT will be performed on * the bitset during evaluation process. * @retval 0 on success * @retval -1 on memory error */ int bitset_expr_add_param(struct bitset_expr *expr, size_t bitset_id, bool pre_not); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LIB_BITSET_EXPR_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/0000775000000000000000000000000013306562360017220 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/hints.c0000644000000000000000000003451413306562360020516 0ustar rootroot/* * Copyright (c) 2013-2017 MsgPuck Authors * All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "msgpuck.h" /** * This lookup table used by mp_sizeof() to determine enum mp_type by the first * byte of MsgPack element. */ const enum mp_type mp_type_hint[256]= { /* {{{ MP_UINT (fixed) */ /* 0x00 */ MP_UINT, /* 0x01 */ MP_UINT, /* 0x02 */ MP_UINT, /* 0x03 */ MP_UINT, /* 0x04 */ MP_UINT, /* 0x05 */ MP_UINT, /* 0x06 */ MP_UINT, /* 0x07 */ MP_UINT, /* 0x08 */ MP_UINT, /* 0x09 */ MP_UINT, /* 0x0a */ MP_UINT, /* 0x0b */ MP_UINT, /* 0x0c */ MP_UINT, /* 0x0d */ MP_UINT, /* 0x0e */ MP_UINT, /* 0x0f */ MP_UINT, /* 0x10 */ MP_UINT, /* 0x11 */ MP_UINT, /* 0x12 */ MP_UINT, /* 0x13 */ MP_UINT, /* 0x14 */ MP_UINT, /* 0x15 */ MP_UINT, /* 0x16 */ MP_UINT, /* 0x17 */ MP_UINT, /* 0x18 */ MP_UINT, /* 0x19 */ MP_UINT, /* 0x1a */ MP_UINT, /* 0x1b */ MP_UINT, /* 0x1c */ MP_UINT, /* 0x1d */ MP_UINT, /* 0x1e */ MP_UINT, /* 0x1f */ MP_UINT, /* 0x20 */ MP_UINT, /* 0x21 */ MP_UINT, /* 0x22 */ MP_UINT, /* 0x23 */ MP_UINT, /* 0x24 */ MP_UINT, /* 0x25 */ MP_UINT, /* 0x26 */ MP_UINT, /* 0x27 */ MP_UINT, /* 0x28 */ MP_UINT, /* 0x29 */ MP_UINT, /* 0x2a */ MP_UINT, /* 0x2b */ MP_UINT, /* 0x2c */ MP_UINT, /* 0x2d */ MP_UINT, /* 0x2e */ MP_UINT, /* 0x2f */ MP_UINT, /* 0x30 */ MP_UINT, /* 0x31 */ MP_UINT, /* 0x32 */ MP_UINT, /* 0x33 */ MP_UINT, /* 0x34 */ MP_UINT, /* 0x35 */ MP_UINT, /* 0x36 */ MP_UINT, /* 0x37 */ MP_UINT, /* 0x38 */ MP_UINT, /* 0x39 */ MP_UINT, /* 0x3a */ MP_UINT, /* 0x3b */ MP_UINT, /* 0x3c */ MP_UINT, /* 0x3d */ MP_UINT, /* 0x3e */ MP_UINT, /* 0x3f */ MP_UINT, /* 0x40 */ MP_UINT, /* 0x41 */ MP_UINT, /* 0x42 */ MP_UINT, /* 0x43 */ MP_UINT, /* 0x44 */ MP_UINT, /* 0x45 */ MP_UINT, /* 0x46 */ MP_UINT, /* 0x47 */ MP_UINT, /* 0x48 */ MP_UINT, /* 0x49 */ MP_UINT, /* 0x4a */ MP_UINT, /* 0x4b */ MP_UINT, /* 0x4c */ MP_UINT, /* 0x4d */ MP_UINT, /* 0x4e */ MP_UINT, /* 0x4f */ MP_UINT, /* 0x50 */ MP_UINT, /* 0x51 */ MP_UINT, /* 0x52 */ MP_UINT, /* 0x53 */ MP_UINT, /* 0x54 */ MP_UINT, /* 0x55 */ MP_UINT, /* 0x56 */ MP_UINT, /* 0x57 */ MP_UINT, /* 0x58 */ MP_UINT, /* 0x59 */ MP_UINT, /* 0x5a */ MP_UINT, /* 0x5b */ MP_UINT, /* 0x5c */ MP_UINT, /* 0x5d */ MP_UINT, /* 0x5e */ MP_UINT, /* 0x5f */ MP_UINT, /* 0x60 */ MP_UINT, /* 0x61 */ MP_UINT, /* 0x62 */ MP_UINT, /* 0x63 */ MP_UINT, /* 0x64 */ MP_UINT, /* 0x65 */ MP_UINT, /* 0x66 */ MP_UINT, /* 0x67 */ MP_UINT, /* 0x68 */ MP_UINT, /* 0x69 */ MP_UINT, /* 0x6a */ MP_UINT, /* 0x6b */ MP_UINT, /* 0x6c */ MP_UINT, /* 0x6d */ MP_UINT, /* 0x6e */ MP_UINT, /* 0x6f */ MP_UINT, /* 0x70 */ MP_UINT, /* 0x71 */ MP_UINT, /* 0x72 */ MP_UINT, /* 0x73 */ MP_UINT, /* 0x74 */ MP_UINT, /* 0x75 */ MP_UINT, /* 0x76 */ MP_UINT, /* 0x77 */ MP_UINT, /* 0x78 */ MP_UINT, /* 0x79 */ MP_UINT, /* 0x7a */ MP_UINT, /* 0x7b */ MP_UINT, /* 0x7c */ MP_UINT, /* 0x7d */ MP_UINT, /* 0x7e */ MP_UINT, /* 0x7f */ MP_UINT, /* }}} */ /* {{{ MP_MAP (fixed) */ /* 0x80 */ MP_MAP, /* 0x81 */ MP_MAP, /* 0x82 */ MP_MAP, /* 0x83 */ MP_MAP, /* 0x84 */ MP_MAP, /* 0x85 */ MP_MAP, /* 0x86 */ MP_MAP, /* 0x87 */ MP_MAP, /* 0x88 */ MP_MAP, /* 0x89 */ MP_MAP, /* 0x8a */ MP_MAP, /* 0x8b */ MP_MAP, /* 0x8c */ MP_MAP, /* 0x8d */ MP_MAP, /* 0x8e */ MP_MAP, /* 0x8f */ MP_MAP, /* }}} */ /* {{{ MP_ARRAY (fixed) */ /* 0x90 */ MP_ARRAY, /* 0x91 */ MP_ARRAY, /* 0x92 */ MP_ARRAY, /* 0x93 */ MP_ARRAY, /* 0x94 */ MP_ARRAY, /* 0x95 */ MP_ARRAY, /* 0x96 */ MP_ARRAY, /* 0x97 */ MP_ARRAY, /* 0x98 */ MP_ARRAY, /* 0x99 */ MP_ARRAY, /* 0x9a */ MP_ARRAY, /* 0x9b */ MP_ARRAY, /* 0x9c */ MP_ARRAY, /* 0x9d */ MP_ARRAY, /* 0x9e */ MP_ARRAY, /* 0x9f */ MP_ARRAY, /* }}} */ /* {{{ MP_STR (fixed) */ /* 0xa0 */ MP_STR, /* 0xa1 */ MP_STR, /* 0xa2 */ MP_STR, /* 0xa3 */ MP_STR, /* 0xa4 */ MP_STR, /* 0xa5 */ MP_STR, /* 0xa6 */ MP_STR, /* 0xa7 */ MP_STR, /* 0xa8 */ MP_STR, /* 0xa9 */ MP_STR, /* 0xaa */ MP_STR, /* 0xab */ MP_STR, /* 0xac */ MP_STR, /* 0xad */ MP_STR, /* 0xae */ MP_STR, /* 0xaf */ MP_STR, /* 0xb0 */ MP_STR, /* 0xb1 */ MP_STR, /* 0xb2 */ MP_STR, /* 0xb3 */ MP_STR, /* 0xb4 */ MP_STR, /* 0xb5 */ MP_STR, /* 0xb6 */ MP_STR, /* 0xb7 */ MP_STR, /* 0xb8 */ MP_STR, /* 0xb9 */ MP_STR, /* 0xba */ MP_STR, /* 0xbb */ MP_STR, /* 0xbc */ MP_STR, /* 0xbd */ MP_STR, /* 0xbe */ MP_STR, /* 0xbf */ MP_STR, /* }}} */ /* {{{ MP_NIL, MP_BOOL */ /* 0xc0 */ MP_NIL, /* 0xc1 */ MP_EXT, /* never used */ /* 0xc2 */ MP_BOOL, /* 0xc3 */ MP_BOOL, /* }}} */ /* {{{ MP_BIN */ /* 0xc4 */ MP_BIN, /* MP_BIN(8) */ /* 0xc5 */ MP_BIN, /* MP_BIN(16) */ /* 0xc6 */ MP_BIN, /* MP_BIN(32) */ /* }}} */ /* {{{ MP_EXT */ /* 0xc7 */ MP_EXT, /* 0xc8 */ MP_EXT, /* 0xc9 */ MP_EXT, /* }}} */ /* {{{ MP_FLOAT, MP_DOUBLE */ /* 0xca */ MP_FLOAT, /* 0xcb */ MP_DOUBLE, /* }}} */ /* {{{ MP_UINT */ /* 0xcc */ MP_UINT, /* 0xcd */ MP_UINT, /* 0xce */ MP_UINT, /* 0xcf */ MP_UINT, /* }}} */ /* {{{ MP_INT */ /* 0xd0 */ MP_INT, /* MP_INT (8) */ /* 0xd1 */ MP_INT, /* MP_INT (16) */ /* 0xd2 */ MP_INT, /* MP_INT (32) */ /* 0xd3 */ MP_INT, /* MP_INT (64) */ /* }}} */ /* {{{ MP_EXT */ /* 0xd4 */ MP_EXT, /* MP_INT (8) */ /* 0xd5 */ MP_EXT, /* MP_INT (16) */ /* 0xd6 */ MP_EXT, /* MP_INT (32) */ /* 0xd7 */ MP_EXT, /* MP_INT (64) */ /* 0xd8 */ MP_EXT, /* MP_INT (127) */ /* }}} */ /* {{{ MP_STR */ /* 0xd9 */ MP_STR, /* MP_STR(8) */ /* 0xda */ MP_STR, /* MP_STR(16) */ /* 0xdb */ MP_STR, /* MP_STR(32) */ /* }}} */ /* {{{ MP_ARRAY */ /* 0xdc */ MP_ARRAY, /* MP_ARRAY(16) */ /* 0xdd */ MP_ARRAY, /* MP_ARRAY(32) */ /* }}} */ /* {{{ MP_MAP */ /* 0xde */ MP_MAP, /* MP_MAP (16) */ /* 0xdf */ MP_MAP, /* MP_MAP (32) */ /* }}} */ /* {{{ MP_INT */ /* 0xe0 */ MP_INT, /* 0xe1 */ MP_INT, /* 0xe2 */ MP_INT, /* 0xe3 */ MP_INT, /* 0xe4 */ MP_INT, /* 0xe5 */ MP_INT, /* 0xe6 */ MP_INT, /* 0xe7 */ MP_INT, /* 0xe8 */ MP_INT, /* 0xe9 */ MP_INT, /* 0xea */ MP_INT, /* 0xeb */ MP_INT, /* 0xec */ MP_INT, /* 0xed */ MP_INT, /* 0xee */ MP_INT, /* 0xef */ MP_INT, /* 0xf0 */ MP_INT, /* 0xf1 */ MP_INT, /* 0xf2 */ MP_INT, /* 0xf3 */ MP_INT, /* 0xf4 */ MP_INT, /* 0xf5 */ MP_INT, /* 0xf6 */ MP_INT, /* 0xf7 */ MP_INT, /* 0xf8 */ MP_INT, /* 0xf9 */ MP_INT, /* 0xfa */ MP_INT, /* 0xfb */ MP_INT, /* 0xfc */ MP_INT, /* 0xfd */ MP_INT, /* 0xfe */ MP_INT, /* 0xff */ MP_INT /* }}} */ }; /** * This lookup table used by mp_next() and mp_check() to determine * size of MsgPack element by its first byte. * A positive value contains size of the element (excluding the first byte). * A negative value means the element is compound (e.g. array or map) * of size (-n). * MP_HINT_* values used for special cases handled by switch() statement. */ const int8_t mp_parser_hint[256] = { /* {{{ MP_UINT(fixed) **/ /* 0x00 */ 0, /* 0x01 */ 0, /* 0x02 */ 0, /* 0x03 */ 0, /* 0x04 */ 0, /* 0x05 */ 0, /* 0x06 */ 0, /* 0x07 */ 0, /* 0x08 */ 0, /* 0x09 */ 0, /* 0x0a */ 0, /* 0x0b */ 0, /* 0x0c */ 0, /* 0x0d */ 0, /* 0x0e */ 0, /* 0x0f */ 0, /* 0x10 */ 0, /* 0x11 */ 0, /* 0x12 */ 0, /* 0x13 */ 0, /* 0x14 */ 0, /* 0x15 */ 0, /* 0x16 */ 0, /* 0x17 */ 0, /* 0x18 */ 0, /* 0x19 */ 0, /* 0x1a */ 0, /* 0x1b */ 0, /* 0x1c */ 0, /* 0x1d */ 0, /* 0x1e */ 0, /* 0x1f */ 0, /* 0x20 */ 0, /* 0x21 */ 0, /* 0x22 */ 0, /* 0x23 */ 0, /* 0x24 */ 0, /* 0x25 */ 0, /* 0x26 */ 0, /* 0x27 */ 0, /* 0x28 */ 0, /* 0x29 */ 0, /* 0x2a */ 0, /* 0x2b */ 0, /* 0x2c */ 0, /* 0x2d */ 0, /* 0x2e */ 0, /* 0x2f */ 0, /* 0x30 */ 0, /* 0x31 */ 0, /* 0x32 */ 0, /* 0x33 */ 0, /* 0x34 */ 0, /* 0x35 */ 0, /* 0x36 */ 0, /* 0x37 */ 0, /* 0x38 */ 0, /* 0x39 */ 0, /* 0x3a */ 0, /* 0x3b */ 0, /* 0x3c */ 0, /* 0x3d */ 0, /* 0x3e */ 0, /* 0x3f */ 0, /* 0x40 */ 0, /* 0x41 */ 0, /* 0x42 */ 0, /* 0x43 */ 0, /* 0x44 */ 0, /* 0x45 */ 0, /* 0x46 */ 0, /* 0x47 */ 0, /* 0x48 */ 0, /* 0x49 */ 0, /* 0x4a */ 0, /* 0x4b */ 0, /* 0x4c */ 0, /* 0x4d */ 0, /* 0x4e */ 0, /* 0x4f */ 0, /* 0x50 */ 0, /* 0x51 */ 0, /* 0x52 */ 0, /* 0x53 */ 0, /* 0x54 */ 0, /* 0x55 */ 0, /* 0x56 */ 0, /* 0x57 */ 0, /* 0x58 */ 0, /* 0x59 */ 0, /* 0x5a */ 0, /* 0x5b */ 0, /* 0x5c */ 0, /* 0x5d */ 0, /* 0x5e */ 0, /* 0x5f */ 0, /* 0x60 */ 0, /* 0x61 */ 0, /* 0x62 */ 0, /* 0x63 */ 0, /* 0x64 */ 0, /* 0x65 */ 0, /* 0x66 */ 0, /* 0x67 */ 0, /* 0x68 */ 0, /* 0x69 */ 0, /* 0x6a */ 0, /* 0x6b */ 0, /* 0x6c */ 0, /* 0x6d */ 0, /* 0x6e */ 0, /* 0x6f */ 0, /* 0x70 */ 0, /* 0x71 */ 0, /* 0x72 */ 0, /* 0x73 */ 0, /* 0x74 */ 0, /* 0x75 */ 0, /* 0x76 */ 0, /* 0x77 */ 0, /* 0x78 */ 0, /* 0x79 */ 0, /* 0x7a */ 0, /* 0x7b */ 0, /* 0x7c */ 0, /* 0x7d */ 0, /* 0x7e */ 0, /* 0x7f */ 0, /* }}} */ /* {{{ MP_MAP (fixed) */ /* 0x80 */ 0, /* empty map - just skip one byte */ /* 0x81 */ -2, /* 2 elements follow */ /* 0x82 */ -4, /* 0x83 */ -6, /* 0x84 */ -8, /* 0x85 */ -10, /* 0x86 */ -12, /* 0x87 */ -14, /* 0x88 */ -16, /* 0x89 */ -18, /* 0x8a */ -20, /* 0x8b */ -22, /* 0x8c */ -24, /* 0x8d */ -26, /* 0x8e */ -28, /* 0x8f */ -30, /* }}} */ /* {{{ MP_ARRAY (fixed) */ /* 0x90 */ 0, /* empty array - just skip one byte */ /* 0x91 */ -1, /* 1 element follows */ /* 0x92 */ -2, /* 0x93 */ -3, /* 0x94 */ -4, /* 0x95 */ -5, /* 0x96 */ -6, /* 0x97 */ -7, /* 0x98 */ -8, /* 0x99 */ -9, /* 0x9a */ -10, /* 0x9b */ -11, /* 0x9c */ -12, /* 0x9d */ -13, /* 0x9e */ -14, /* 0x9f */ -15, /* }}} */ /* {{{ MP_STR (fixed) */ /* 0xa0 */ 0, /* 0xa1 */ 1, /* 0xa2 */ 2, /* 0xa3 */ 3, /* 0xa4 */ 4, /* 0xa5 */ 5, /* 0xa6 */ 6, /* 0xa7 */ 7, /* 0xa8 */ 8, /* 0xa9 */ 9, /* 0xaa */ 10, /* 0xab */ 11, /* 0xac */ 12, /* 0xad */ 13, /* 0xae */ 14, /* 0xaf */ 15, /* 0xb0 */ 16, /* 0xb1 */ 17, /* 0xb2 */ 18, /* 0xb3 */ 19, /* 0xb4 */ 20, /* 0xb5 */ 21, /* 0xb6 */ 22, /* 0xb7 */ 23, /* 0xb8 */ 24, /* 0xb9 */ 25, /* 0xba */ 26, /* 0xbb */ 27, /* 0xbc */ 28, /* 0xbd */ 29, /* 0xbe */ 30, /* 0xbf */ 31, /* }}} */ /* {{{ MP_NIL, MP_BOOL */ /* 0xc0 */ 0, /* MP_NIL */ /* 0xc1 */ 0, /* never used */ /* 0xc2 */ 0, /* MP_BOOL*/ /* 0xc3 */ 0, /* MP_BOOL*/ /* }}} */ /* {{{ MP_BIN */ /* 0xc4 */ MP_HINT_STR_8, /* MP_BIN (8) */ /* 0xc5 */ MP_HINT_STR_16, /* MP_BIN (16) */ /* 0xc6 */ MP_HINT_STR_32, /* MP_BIN (32) */ /* }}} */ /* {{{ MP_EXT */ /* 0xc7 */ MP_HINT_EXT_8, /* MP_EXT (8) */ /* 0xc8 */ MP_HINT_EXT_16, /* MP_EXT (16) */ /* 0xc9 */ MP_HINT_EXT_32, /* MP_EXT (32) */ /* }}} */ /* {{{ MP_FLOAT, MP_DOUBLE */ /* 0xca */ sizeof(float), /* MP_FLOAT */ /* 0xcb */ sizeof(double), /* MP_DOUBLE */ /* }}} */ /* {{{ MP_UINT */ /* 0xcc */ sizeof(uint8_t), /* MP_UINT (8) */ /* 0xcd */ sizeof(uint16_t), /* MP_UINT (16) */ /* 0xce */ sizeof(uint32_t), /* MP_UINT (32) */ /* 0xcf */ sizeof(uint64_t), /* MP_UINT (64) */ /* }}} */ /* {{{ MP_INT */ /* 0xd0 */ sizeof(uint8_t), /* MP_INT (8) */ /* 0xd1 */ sizeof(uint16_t), /* MP_INT (8) */ /* 0xd2 */ sizeof(uint32_t), /* MP_INT (8) */ /* 0xd3 */ sizeof(uint64_t), /* MP_INT (8) */ /* }}} */ /* {{{ MP_EXT (fixext) */ /* 0xd4 */ 2, /* MP_EXT (fixext 8) */ /* 0xd5 */ 3, /* MP_EXT (fixext 16) */ /* 0xd6 */ 5, /* MP_EXT (fixext 32) */ /* 0xd7 */ 9, /* MP_EXT (fixext 64) */ /* 0xd8 */ 17, /* MP_EXT (fixext 128) */ /* }}} */ /* {{{ MP_STR */ /* 0xd9 */ MP_HINT_STR_8, /* MP_STR (8) */ /* 0xda */ MP_HINT_STR_16, /* MP_STR (16) */ /* 0xdb */ MP_HINT_STR_32, /* MP_STR (32) */ /* }}} */ /* {{{ MP_ARRAY */ /* 0xdc */ MP_HINT_ARRAY_16, /* MP_ARRAY (16) */ /* 0xdd */ MP_HINT_ARRAY_32, /* MP_ARRAY (32) */ /* }}} */ /* {{{ MP_MAP */ /* 0xde */ MP_HINT_MAP_16, /* MP_MAP (16) */ /* 0xdf */ MP_HINT_MAP_32, /* MP_MAP (32) */ /* }}} */ /* {{{ MP_INT (fixed) */ /* 0xe0 */ 0, /* 0xe1 */ 0, /* 0xe2 */ 0, /* 0xe3 */ 0, /* 0xe4 */ 0, /* 0xe5 */ 0, /* 0xe6 */ 0, /* 0xe7 */ 0, /* 0xe8 */ 0, /* 0xe9 */ 0, /* 0xea */ 0, /* 0xeb */ 0, /* 0xec */ 0, /* 0xed */ 0, /* 0xee */ 0, /* 0xef */ 0, /* 0xf0 */ 0, /* 0xf1 */ 0, /* 0xf2 */ 0, /* 0xf3 */ 0, /* 0xf4 */ 0, /* 0xf5 */ 0, /* 0xf6 */ 0, /* 0xf7 */ 0, /* 0xf8 */ 0, /* 0xf9 */ 0, /* 0xfa */ 0, /* 0xfb */ 0, /* 0xfc */ 0, /* 0xfd */ 0, /* 0xfe */ 0, /* 0xff */ 0 /* }}} */ }; const char *mp_char2escape[128] = { "\\u0000", "\\u0001", "\\u0002", "\\u0003", "\\u0004", "\\u0005", "\\u0006", "\\u0007", "\\b", "\\t", "\\n", "\\u000b", "\\f", "\\r", "\\u000e", "\\u000f", "\\u0010", "\\u0011", "\\u0012", "\\u0013", "\\u0014", "\\u0015", "\\u0016", "\\u0017", "\\u0018", "\\u0019", "\\u001a", "\\u001b", "\\u001c", "\\u001d", "\\u001e", "\\u001f", NULL, NULL, "\\\"", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "\\/", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "\\\\", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "\\u007f" }; tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/test/0000755000000000000000000000000013306562360020175 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/test/test.c0000644000000000000000000000517113306562360021324 0ustar rootroot/* * Copyright (C) 2010-2016 Tarantool AUTHORS: * please see AUTHORS file in tarantool/tarantool repository. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "test.h" #include #include enum { MAX_LEVELS = 10 }; static int tests_done[MAX_LEVELS]; static int tests_failed[MAX_LEVELS]; static int plan_test[MAX_LEVELS]; static int level = -1; void __space(FILE *stream) { for (int i = 0 ; i < level; i++) { fprintf(stream, " "); } } void plan(int count) { ++level; plan_test[level] = count; tests_done[level] = 0; tests_failed[level] = 0; __space(stdout); printf("%d..%d\n", 1, plan_test[level]); } int check_plan(void) { int r = 0; if (tests_done[level] != plan_test[level]) { __space(stderr); fprintf(stderr, "# Looks like you planned %d tests but ran %d.\n", plan_test[level], tests_done[level]); r = -1; } if (tests_failed[level]) { __space(stderr); fprintf(stderr, "# Looks like you failed %d test of %d run.\n", tests_failed[level], tests_done[level]); r = tests_failed[level]; } --level; if (level >= 0) { is(r, 0, "subtests"); } return r; } int __ok(int condition, const char *fmt, ...) { va_list ap; __space(stdout); printf("%s %d - ", condition ? "ok" : "not ok", ++tests_done[level]); if (!condition) tests_failed[level]++; va_start(ap, fmt); vprintf(fmt, ap); printf("\n"); va_end(ap); return condition; } tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/test/test.h0000644000000000000000000000673113306562360021334 0ustar rootroot/* * Copyright (C) 2010-2016 Tarantool AUTHORS: * please see AUTHORS file in tarantool/tarantool repository. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef TEST_H_INCLUDED #define TEST_H_INCLUDED #include /** @brief example @code #include "test.h" int main(void) { plan(3); // count of test You planned to check ok(1, "Test name 1"); is(4, 2 * 2, "2 * 2 == 4"); isnt(5, 2 * 2, "2 * 2 != 5); return check_plan(); // print resume } @endcode */ /* private function, use ok(...) instead */ int __ok(int condition, const char *fmt, ...); /* private function, use note(...) or diag(...) instead */ void __space(FILE *stream); #define msg(stream, ...) ({ __space(stream); fprintf(stream, "# "); \ fprintf(stream, __VA_ARGS__); fprintf(stream, "\n"); }) #define note(...) msg(stdout, __VA_ARGS__) #define diag(...) msg(stderr, __VA_ARGS__) /** @brief set and print plan @param count Before anything else, you need a testing plan. This basically declares how many tests your program is going to run to protect against premature failure. */ void plan(int count); /** @brief check if plan is reached and print report */ int check_plan(void); #define ok(condition, fmt, args...) { \ int res = __ok(condition, fmt, ##args); \ if (!res) { \ __space(stderr); \ fprintf(stderr, "# Failed test '"); \ fprintf(stderr, fmt, ##args); \ fprintf(stderr, "'\n"); \ __space(stderr); \ fprintf(stderr, "# in %s at line %d\n", __FILE__, __LINE__); \ } \ } #define is(a, b, fmt, args...) { \ int res = __ok((a) == (b), fmt, ##args); \ if (!res) { \ __space(stderr); \ fprintf(stderr, "# Failed test '"); \ fprintf(stderr, fmt, ##args); \ fprintf(stderr, "'\n"); \ __space(stderr); \ fprintf(stderr, "# in %s at line %d\n", __FILE__, __LINE__); \ } \ } #define isnt(a, b, fmt, args...) { \ int res = __ok((a) != (b), fmt, ##args); \ if (!res) { \ __space(stderr); \ fprintf(stderr, "# Failed test '"); \ fprintf(stderr, fmt, ##args); \ fprintf(stderr, "'\n"); \ __space(stderr); \ fprintf(stderr, "# in %s at line %d\n", __FILE__, __LINE__); \ } \ } #define fail(fmt, args...) \ ok(0, fmt, ##args) #endif /* TEST_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/test/CMakeLists.txt0000644000000000000000000000115113306562360022733 0ustar rootrootif(POLICY CMP0037) cmake_policy(SET CMP0037 OLD) # don't blame custom target names endif(POLICY CMP0037) include_directories("../") #find_program(PROVE prove) if (PROVE) set(TEST_RUNNER prove) else() set(TEST_RUNNER) endif() set(alltests) foreach (test msgpuck) add_executable(${test}.test ${test}.c test.c) target_link_libraries(${test}.test msgpuck) list(APPEND alltests ${test}.test_run) add_custom_target(${test}.test_run DEPENDS ${test}.test COMMAND ${TEST_RUNNER} ${PROJECT_BINARY_DIR}/test/${test}.test) endforeach() add_custom_target(test DEPENDS ${alltests}) tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/test/.gitignore0000644000000000000000000000000713306562360022162 0ustar rootroot*.test tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/test/msgpuck.c0000644000000000000000000007306213306562360022022 0ustar rootroot/* * Copyright (c) 2013-2016 MsgPuck Authors * All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include "msgpuck.h" #include "test.h" #define BUF_MAXLEN ((1L << 18) - 1) #define STRBIN_MAXLEN (BUF_MAXLEN - 10) static char buf[BUF_MAXLEN + 1]; static char str[STRBIN_MAXLEN]; static char *data = buf + 1; /* use unaligned address to fail early */ #define header() note("*** %s ***", __func__) #define footer() note("*** %s: done ***", __func__) #define SCALAR(x) x #define COMPLEX(x) #define DEFINE_TEST(_type, _complex, _v, _r, _rl) ({ \ const char *d1 = mp_encode_##_type(data, (_v)); \ const char *d2 = data; \ _complex(const char *d3 = data); \ _complex(const char *d4 = data); \ note(""#_type" "#_v""); \ is(mp_check_##_type(data, d1), 0, "mp_check_"#_type"("#_v") == 0"); \ is(mp_decode_##_type(&d2), (_v), "mp_decode(mp_encode("#_v")) == "#_v);\ _complex(mp_next(&d3)); \ _complex(ok(!mp_check(&d4, d3 + _rl), "mp_check("#_v")")); \ is((d1 - data), (_rl), "len(mp_encode_"#_type"("#_v")"); \ is(d1, d2, "len(mp_decode_"#_type"("#_v"))"); \ _complex(is(d1, d3, "len(mp_next_"#_type"("#_v"))")); \ _complex(is(d1, d4, "len(mp_check_"#_type"("#_v"))")); \ is(mp_sizeof_##_type(_v), _rl, "mp_sizeof_"#_type"("#_v")"); \ is(memcmp(data, (_r), (_rl)), 0, "mp_encode("#_v") == "#_r); \ }) #define DEFINE_TEST_STRBIN(_type, _vl) ({ \ note(""#_type" len="#_vl""); \ char *s1 = str; \ for (uint32_t i = 0; i < _vl; i++) { \ s1[i] = 'a' + i % 26; \ } \ const char *d1 = mp_encode_##_type(data, s1, _vl); \ const char *d2; \ uint32_t len2; \ d2 = data; \ const char *s2 = mp_decode_##_type(&d2, &len2); \ is(_vl, len2, "len(mp_decode_"#_type"(x, %u))", _vl); \ d2 = data; \ (void) mp_decode_strbin(&d2, &len2); \ is(_vl, len2, "len(mp_decode_strbin(x, %u))", _vl); \ const char *d3 = data; \ mp_next(&d3); \ const char *d4 = data; \ ok(!mp_check(&d4, d3 + _vl), \ "mp_check_"#_type"(mp_encode_"#_type"(x, "#_vl"))"); \ is(d1, d2, "len(mp_decode_"#_type"(x, "#_vl")"); \ is(d1, d3, "len(mp_next_"#_type"(x, "#_vl")"); \ is(d1, d4, "len(mp_check_"#_type"(x, "#_vl")"); \ is(mp_sizeof_##_type(_vl), (uint32_t) (d1 - data), \ "mp_sizeof_"#_type"("#_vl")"); \ is(memcmp(s1, s2, _vl), 0, "mp_encode_"#_type"(x, "#_vl") == x"); \ }) #define test_uint(...) DEFINE_TEST(uint, SCALAR, __VA_ARGS__) #define test_int(...) DEFINE_TEST(int, SCALAR, __VA_ARGS__) #define test_bool(...) DEFINE_TEST(bool, SCALAR, __VA_ARGS__) #define test_float(...) DEFINE_TEST(float, SCALAR, __VA_ARGS__) #define test_double(...) DEFINE_TEST(double, SCALAR, __VA_ARGS__) #define test_strl(...) DEFINE_TEST(strl, COMPLEX, __VA_ARGS__) #define test_binl(...) DEFINE_TEST(binl, COMPLEX, __VA_ARGS__) #define test_array(...) DEFINE_TEST(array, COMPLEX, __VA_ARGS__) #define test_map(...) DEFINE_TEST(map, COMPLEX, __VA_ARGS__) #define test_str(...) DEFINE_TEST_STRBIN(str, __VA_ARGS__) #define test_bin(...) DEFINE_TEST_STRBIN(bin, __VA_ARGS__) static int test_uints(void) { plan(135); header(); test_uint(0U, "\x00", 1); test_uint(1U, "\x01", 1); test_uint(0x7eU, "\x7e", 1); test_uint(0x7fU, "\x7f", 1); test_uint(0x80U, "\xcc\x80", 2); test_uint(0xfeU, "\xcc\xfe", 2); test_uint(0xffU, "\xcc\xff", 2); test_uint(0xfffeU, "\xcd\xff\xfe", 3); test_uint(0xffffU, "\xcd\xff\xff", 3); test_uint(0x10000U, "\xce\x00\x01\x00\x00", 5); test_uint(0xfffffffeU, "\xce\xff\xff\xff\xfe", 5); test_uint(0xffffffffU, "\xce\xff\xff\xff\xff", 5); test_uint(0x100000000ULL, "\xcf\x00\x00\x00\x01\x00\x00\x00\x00", 9); test_uint(0xfffffffffffffffeULL, "\xcf\xff\xff\xff\xff\xff\xff\xff\xfe", 9); test_uint(0xffffffffffffffffULL, "\xcf\xff\xff\xff\xff\xff\xff\xff\xff", 9); footer(); return check_plan(); } static int test_ints(void) { plan(153); header(); test_int(-0x01, "\xff", 1); test_int(-0x1e, "\xe2", 1); test_int(-0x1f, "\xe1", 1); test_int(-0x20, "\xe0", 1); test_int(-0x21, "\xd0\xdf", 2); test_int(-0x7f, "\xd0\x81", 2); test_int(-0x80, "\xd0\x80", 2); test_int(-0x81, "\xd1\xff\x7f", 3); test_int(-0x7fff, "\xd1\x80\x01", 3); test_int(-0x8000, "\xd1\x80\x00", 3); test_int(-0x8001, "\xd2\xff\xff\x7f\xff", 5); test_int(-0x7fffffff, "\xd2\x80\x00\x00\x01", 5); test_int(-0x80000000LL, "\xd2\x80\x00\x00\x00", 5); test_int(-0x80000001LL, "\xd3\xff\xff\xff\xff\x7f\xff\xff\xff", 9); test_int(-0x80000001LL, "\xd3\xff\xff\xff\xff\x7f\xff\xff\xff", 9); test_int(-0x7fffffffffffffffLL, "\xd3\x80\x00\x00\x00\x00\x00\x00\x01", 9); test_int((int64_t)-0x8000000000000000LL, "\xd3\x80\x00\x00\x00\x00\x00\x00\x00", 9); footer(); return check_plan(); } static int test_bools(void) { plan(18); header(); test_bool(true, "\xc3", 1); test_bool(false, "\xc2", 1); footer(); return check_plan(); } static int test_floats(void) { plan(27); header(); test_float((float) 1.0, "\xca\x3f\x80\x00\x00", 5); test_float((float) 3.141593, "\xca\x40\x49\x0f\xdc", 5); test_float((float) -1e38f, "\xca\xfe\x96\x76\x99", 5); footer(); return check_plan(); } static int test_doubles(void) { plan(27); header(); test_double((double) 1.0, "\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00", 9); test_double((double) 3.141592653589793, "\xcb\x40\x09\x21\xfb\x54\x44\x2d\x18", 9); test_double((double) -1e99, "\xcb\xd4\x7d\x42\xae\xa2\x87\x9f\x2e", 9); footer(); return check_plan(); } static int test_nils(void) { plan(6); header(); const char *d1 = mp_encode_nil(data); const char *d2 = data; const char *d3 = data; const char *d4 = data; note("nil"); mp_decode_nil(&d2); mp_next(&d3); ok(!mp_check(&d4, d3 + 1), "mp_check_nil()"); is((d1 - data), 1, "len(mp_encode_nil() == 1"); is(d1, d2, "len(mp_decode_nil()) == 1"); is(d1, d3, "len(mp_next_nil()) == 1"); is(d1, d4, "len(mp_check_nil()) == 1"); is(mp_sizeof_nil(), 1, "mp_sizeof_nil() == 1"); footer(); return check_plan(); } static int test_arrays(void) { plan(54); header(); test_array(0, "\x90", 1); test_array(1, "\x91", 1); test_array(15, "\x9f", 1); test_array(16, "\xdc\x00\x10", 3); test_array(0xfffe, "\xdc\xff\xfe", 3); test_array(0xffff, "\xdc\xff\xff", 3); test_array(0x10000, "\xdd\x00\x01\x00\x00", 5); test_array(0xfffffffeU, "\xdd\xff\xff\xff\xfe", 5); test_array(0xffffffffU, "\xdd\xff\xff\xff\xff", 5); footer(); return check_plan(); } static int test_maps(void) { plan(54); header(); test_map(0, "\x80", 1); test_map(1, "\x81", 1); test_map(15, "\x8f", 1); test_map(16, "\xde\x00\x10", 3); test_map(0xfffe, "\xde\xff\xfe", 3); test_map(0xffff, "\xde\xff\xff", 3); test_map(0x10000, "\xdf\x00\x01\x00\x00", 5); test_map(0xfffffffeU, "\xdf\xff\xff\xff\xfe", 5); test_map(0xffffffffU, "\xdf\xff\xff\xff\xff", 5); footer(); return check_plan(); } static int test_strls(void) { plan(78); header(); test_strl(0x00U, "\xa0", 1); test_strl(0x01U, "\xa1", 1); test_strl(0x1eU, "\xbe", 1); test_strl(0x1fU, "\xbf", 1); test_strl(0x20U, "\xd9\x20", 2); test_strl(0xfeU, "\xd9\xfe", 2); test_strl(0xffU, "\xd9\xff", 2); test_strl(0x0100U, "\xda\x01\x00", 3); test_strl(0xfffeU, "\xda\xff\xfe", 3); test_strl(0xffffU, "\xda\xff\xff", 3); test_strl(0x00010000U, "\xdb\x00\x01\x00\x00", 5); test_strl(0xfffffffeU, "\xdb\xff\xff\xff\xfe", 5); test_strl(0xffffffffU, "\xdb\xff\xff\xff\xff", 5); footer(); return check_plan(); } static int test_binls(void) { plan(78); header(); test_binl(0x00U, "\xc4\x00", 2); test_binl(0x01U, "\xc4\x01", 2); test_binl(0x1eU, "\xc4\x1e", 2); test_binl(0x1fU, "\xc4\x1f", 2); test_binl(0x20U, "\xc4\x20", 2); test_binl(0xfeU, "\xc4\xfe", 2); test_binl(0xffU, "\xc4\xff", 2); test_binl(0x0100U, "\xc5\x01\x00", 3); test_binl(0xfffeU, "\xc5\xff\xfe", 3); test_binl(0xffffU, "\xc5\xff\xff", 3); test_binl(0x00010000U, "\xc6\x00\x01\x00\x00", 5); test_binl(0xfffffffeU, "\xc6\xff\xff\xff\xfe", 5); test_binl(0xffffffffU, "\xc6\xff\xff\xff\xff", 5); footer(); return check_plan(); } static int test_strs(void) { plan(96); header(); test_str(0x01); test_str(0x1e); test_str(0x1f); test_str(0x20); test_str(0xfe); test_str(0xff); test_str(0x100); test_str(0x101); test_str(0xfffe); test_str(0xffff); test_str(0x10000); test_str(0x10001); footer(); return check_plan(); } static int test_bins(void) { plan(96); header(); test_bin(0x01); test_bin(0x1e); test_bin(0x1f); test_bin(0x20); test_bin(0xfe); test_bin(0xff); test_bin(0x100); test_bin(0x101); test_bin(0xfffe); test_bin(0xffff); test_bin(0x10000); test_bin(0x10001); footer(); return check_plan(); } static void test_next_on_array(uint32_t count) { note("next/check on array(%u)", count); char *d1 = data; d1 = mp_encode_array(d1, count); for (uint32_t i = 0; i < count; i++) { d1 = mp_encode_uint(d1, i % 0x7f); /* one byte */ } uint32_t len = count + mp_sizeof_array(count); const char *d2 = data; const char *d3 = data; ok(!mp_check(&d2, data + BUF_MAXLEN), "mp_check(array %u))", count); is((d1 - data), (ptrdiff_t)len, "len(array %u) == %u", count, len); is((d2 - data), (ptrdiff_t)len, "len(mp_check(array %u)) == %u", count, len); mp_next(&d3); is((d3 - data), (ptrdiff_t)len, "len(mp_next(array %u)) == %u", count, len); } static int test_next_on_arrays(void) { plan(52); header(); test_next_on_array(0x00); test_next_on_array(0x01); test_next_on_array(0x0f); test_next_on_array(0x10); test_next_on_array(0x11); test_next_on_array(0xfe); test_next_on_array(0xff); test_next_on_array(0x100); test_next_on_array(0x101); test_next_on_array(0xfffe); test_next_on_array(0xffff); test_next_on_array(0x10000); test_next_on_array(0x10001); footer(); return check_plan(); } static void test_next_on_map(uint32_t count) { note("next/check on map(%u)", count); char *d1 = data; d1 = mp_encode_map(d1, count); for (uint32_t i = 0; i < 2 * count; i++) { d1 = mp_encode_uint(d1, i % 0x7f); /* one byte */ } uint32_t len = 2 * count + mp_sizeof_map(count); const char *d2 = data; const char *d3 = data; ok(!mp_check(&d2, data + BUF_MAXLEN), "mp_check(map %u))", count); is((d1 - data), (ptrdiff_t)len, "len(map %u) == %u", count, len); is((d2 - data), (ptrdiff_t)len, "len(mp_check(map %u)) == %u", count, len); mp_next(&d3); is((d3 - data), (ptrdiff_t)len, "len(mp_next(map %u)) == %u", count, len); } static int test_next_on_maps(void) { plan(52); header(); test_next_on_map(0x00); test_next_on_map(0x01); test_next_on_map(0x0f); test_next_on_map(0x10); test_next_on_map(0x11); test_next_on_map(0xfe); test_next_on_map(0xff); test_next_on_map(0x100); test_next_on_map(0x101); test_next_on_map(0xfffe); test_next_on_map(0xffff); test_next_on_map(0x10000); test_next_on_map(0x10001); footer(); return check_plan(); } static void test_compare_uint(uint64_t a, uint64_t b) { char bufa[9]; char bufb[9]; mp_encode_uint(bufa, a); mp_encode_uint(bufb, b); int r = mp_compare_uint(bufa, bufb); if (a < b) { ok(r < 0, "mp_compare_uint(%"PRIu64", %" PRIu64 ") < 0", a, b); } else if (a > b) { ok(r > 0, "mp_compare_uint(%"PRIu64", %" PRIu64") > 0", a, b); } else { ok(r == 0, "mp_compare_uint(%"PRIu64", %"PRIu64") == 0", a, b); } } static int test_compare_uints(void) { plan(227); header(); test_compare_uint(0, 0); test_compare_uint(0, 0); uint64_t nums[] = { 0, 1, 0x7eU, 0x7fU, 0x80U, 0xfeU, 0xffU, 0xfffeU, 0xffffU, 0x10000U, 0xfffffffeU, 0xffffffffU, 0x100000000ULL, 0xfffffffffffffffeULL, 0xffffffffffffffffULL }; int count = sizeof(nums) / sizeof(*nums); for (int i = 0; i < count; i++) { for (int j = 0; j < count; j++) { test_compare_uint(nums[i], nums[j]); } } footer(); return check_plan(); } static bool fequal(float a, float b) { return a > b ? a - b < 1e-5f : b - a < 1e-5f; } static bool dequal(double a, double b) { return a > b ? a - b < 1e-10 : b - a < 1e-10; } static int test_format(void) { plan(282); header(); const size_t buf_size = 1024; char buf[buf_size]; size_t sz; const char *fmt; const char *p, *c, *e; uint32_t len = 0; fmt = "%d %u %i %ld %lu %li %lld %llu %lli" "%hd %hu %hi %hhd %hhu %hhi"; sz = mp_format(buf, buf_size, fmt, 1, 2, 3, (long)4, (long)5, (long)6, (long long)7, (long long)8, (long long)9, (short)10, (short)11, (short)12, (char)13, (char)14, (char)15); p = buf; for (unsigned i = 0; i < 15; i++) { ok(mp_typeof(*p) == MP_UINT, "Test type on step %d", i); ok(mp_decode_uint(&p) == i + 1, "Test value on step %d", i); } sz = mp_format(buf, buf_size, fmt, -1, -2, -3, (long)-4, (long)-5, (long)-6, (long long)-7, (long long)-8, (long long)-9, (short)-10, (unsigned short)-11, (short)-12, (signed char)-13, (unsigned char)-14, (signed char)-15); p = buf; for (int i = 0; i < 15; i++) { uint64_t expects[5] = { UINT_MAX - 1, ULONG_MAX - 4, ULLONG_MAX - 7, USHRT_MAX - 10, UCHAR_MAX - 13 }; if (i % 3 == 1) { ok(mp_typeof(*p) == MP_UINT, "Test type on step %d", i); ok(mp_decode_uint(&p) == expects[i / 3], "Test value on step %d", i); } else { ok(mp_typeof(*p) == MP_INT, "Test type on step %d", i); ok(mp_decode_int(&p) == - i - 1, "Test value on step %d", i); } } char data1[32]; char *data1_end = data1; data1_end = mp_encode_array(data1_end, 2); data1_end = mp_encode_str(data1_end, "ABC", 3); data1_end = mp_encode_uint(data1_end, 11); size_t data1_len = data1_end - data1; assert(data1_len <= sizeof(data1)); char data2[32]; char *data2_end = data2; data2_end = mp_encode_int(data2_end, -1234567890); data2_end = mp_encode_str(data2_end, "DEFGHIJKLMN", 11); data2_end = mp_encode_uint(data2_end, 321); size_t data2_len = data2_end - data2; assert(data2_len <= sizeof(data2)); fmt = "%d NIL [%d %b %b] this is test" "[%d %%%% [[ %d {%s %f %% %.*s %lf %.*s NIL}" "%p %d %.*p ]] %d%d%d]"; #define TEST_PARAMS 0, 1, true, false, -1, 2, \ "flt", 0.1, 6, "double#ignored", 0.2, 0, "ignore", \ data1, 3, data2_len, data2, 4, 5, 6 sz = mp_format(buf, buf_size, fmt, TEST_PARAMS); p = buf; e = buf + sz; c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_UINT, "type"); ok(mp_decode_uint(&p) == 0, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_NIL, "type"); mp_decode_nil(&p); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_ARRAY, "type"); ok(mp_decode_array(&p) == 3, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_UINT, "type"); ok(mp_decode_uint(&p) == 1, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_BOOL, "type"); ok(mp_decode_bool(&p) == true, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_BOOL, "type"); ok(mp_decode_bool(&p) == false, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_ARRAY, "type"); ok(mp_decode_array(&p) == 5, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_INT, "type"); ok(mp_decode_int(&p) == -1, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_ARRAY, "type"); ok(mp_decode_array(&p) == 1, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_ARRAY, "type"); ok(mp_decode_array(&p) == 5, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_UINT, "type"); ok(mp_decode_uint(&p) == 2, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_MAP, "type"); ok(mp_decode_map(&p) == 3, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_STR, "type"); c = mp_decode_str(&p, &len); ok(len == 3, "decode"); ok(memcmp(c, "flt", 3) == 0, "compare"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_FLOAT, "type"); ok(fequal(mp_decode_float(&p), 0.1), "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_STR, "type"); c = mp_decode_str(&p, &len); ok(len == 6, "decode"); ok(memcmp(c, "double", 6) == 0, "compare"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_DOUBLE, "type"); ok(dequal(mp_decode_double(&p), 0.2), "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_STR, "type"); c = mp_decode_str(&p, &len); ok(len == 0, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_NIL, "type"); mp_decode_nil(&p); c = p; ok(mp_check(&c, e) == 0, "check"); ok(((size_t)(c - p) == data1_len) && memcmp(p, data1, data1_len) == 0, "compare"); p = c; c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_UINT, "type"); ok(mp_decode_uint(&p) == 3, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_INT, "type"); ok(mp_decode_int(&p) == -1234567890, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_STR, "type"); c = mp_decode_str(&p, &len); ok(len == 11, "decode"); ok(memcmp(c, "DEFGHIJKLMN", 11) == 0, "compare"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_UINT, "type"); ok(mp_decode_uint(&p) == 321, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_UINT, "type"); ok(mp_decode_uint(&p) == 4, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_UINT, "type"); ok(mp_decode_uint(&p) == 5, "decode"); c = p; ok(mp_check(&c, e) == 0, "check"); ok(mp_typeof(*p) == MP_UINT, "type"); ok(mp_decode_uint(&p) == 6, "decode"); ok(p == e, "nothing more"); ok(sz < 70, "no magic detected"); for (size_t lim = 0; lim <= 70; lim++) { memset(buf, 0, buf_size); size_t test_sz = mp_format(buf, lim, fmt, TEST_PARAMS); ok(test_sz == sz, "return value on step %d", (int)lim); bool all_zero = true; for(size_t z = lim; z < buf_size; z++) all_zero = all_zero && (buf[z] == 0); ok(all_zero, "buffer overflow on step %d", (int)lim); } #undef TEST_PARAMS footer(); return check_plan(); } int test_mp_print() { plan(10); header(); char msgpack[128]; char *d = msgpack; d = mp_encode_array(d, 6); d = mp_encode_int(d, -5); d = mp_encode_uint(d, 42); d = mp_encode_str(d, "kill bill", 9); d = mp_encode_map(d, 6); d = mp_encode_str(d, "bool true", 9); d = mp_encode_bool(d, true); d = mp_encode_str(d, "bool false", 10); d = mp_encode_bool(d, false); d = mp_encode_str(d, "null", 4); d = mp_encode_nil(d); d = mp_encode_str(d, "float", 5); d = mp_encode_float(d, 3.14); d = mp_encode_str(d, "double", 6); d = mp_encode_double(d, 3.14); d = mp_encode_uint(d, 100); d = mp_encode_uint(d, 500); *d++ = 0xd4; /* let's pack smallest fixed ext */ *d++ = 0; *d++ = 0; char bin[] = "\x12test\x34\b\t\n\"bla\\-bla\"\f\r"; d = mp_encode_bin(d, bin, sizeof(bin)); assert(d <= msgpack + sizeof(msgpack)); const char *expected = "[-5, 42, \"kill bill\", " "{\"bool true\": true, \"bool false\": false, \"null\": null, " "\"float\": 3.14, \"double\": 3.14, 100: 500}, undefined, " "\"\\u0012test4\\b\\t\\n\\\"bla\\\\-bla\\\"\\f\\r\\u0000\"]"; int esize = strlen(expected); char result[256]; int fsize = mp_snprint(result, sizeof(result), msgpack); ok(fsize == esize, "mp_snprint return value"); ok(strcmp(result, expected) == 0, "mp_snprint result"); fsize = mp_snprint(NULL, 0, msgpack); ok(fsize == esize, "mp_snprint limit = 0"); fsize = mp_snprint(result, 1, msgpack); ok(fsize == esize && result[0] == '\0', "mp_snprint limit = 1"); fsize = mp_snprint(result, 2, msgpack); ok(fsize == esize && result[1] == '\0', "mp_snprint limit = 2"); fsize = mp_snprint(result, esize, msgpack); ok(fsize == esize && result[esize - 1] == '\0', "mp_snprint limit = expected"); fsize = mp_snprint(result, esize + 1, msgpack); ok(fsize == esize && result[esize] == '\0', "mp_snprint limit = expected + 1"); FILE *tmpf = tmpfile(); if (tmpf != NULL) { int fsize = mp_fprint(tmpf, msgpack); ok(fsize == esize, "mp_fprint return value"); (void) rewind(tmpf); int rsize = fread(result, 1, sizeof(result), tmpf); ok(rsize == esize && memcmp(result, expected, esize) == 0, "mp_fprint result"); fclose(tmpf); } /* stdin is read-only */ int rc = mp_fprint(stdin, msgpack); is(rc, -1, "mp_fprint I/O error"); footer(); return check_plan(); } int test_mp_check() { plan(65); header(); #define invalid(data, fmt, ...) ({ \ const char *p = data; \ isnt(mp_check(&p, p + sizeof(data) - 1), 0, fmt, ## __VA_ARGS__); \ }); /* fixmap */ invalid("\x81", "invalid fixmap 1"); invalid("\x81\x01", "invalid fixmap 2"); invalid("\x8f\x01", "invalid fixmap 3"); /* fixarray */ invalid("\x91", "invalid fixarray 1"); invalid("\x92\x01", "invalid fixarray 2"); invalid("\x9f\x01", "invalid fixarray 3"); /* fixstr */ invalid("\xa1", "invalid fixstr 1"); invalid("\xa2\x00", "invalid fixstr 2"); invalid("\xbf\x00", "invalid fixstr 3"); /* bin8 */ invalid("\xc4", "invalid bin8 1"); invalid("\xc4\x01", "invalid bin8 2"); /* bin16 */ invalid("\xc5", "invalid bin16 1"); invalid("\xc5\x00\x01", "invalid bin16 2"); /* bin32 */ invalid("\xc6", "invalid bin32 1"); invalid("\xc6\x00\x00\x00\x01", "invalid bin32 2"); /* ext8 */ invalid("\xc7", "invalid ext8 1"); invalid("\xc7\x00", "invalid ext8 2"); invalid("\xc7\x01\xff", "invalid ext8 3"); invalid("\xc7\x02\xff\x00", "invalid ext8 4"); /* ext16 */ invalid("\xc8", "invalid ext16 1"); invalid("\xc8\x00\x00", "invalid ext16 2"); invalid("\xc8\x00\x01\xff", "invalid ext16 3"); invalid("\xc8\x00\x02\xff\x00", "invalid ext16 4"); /* ext32 */ invalid("\xc9", "invalid ext32 1"); invalid("\xc9\x00\x00\x00\x00", "invalid ext32 2"); invalid("\xc9\x00\x00\x00\x01\xff", "invalid ext32 3"); invalid("\xc9\x00\x00\x00\x02\xff\x00", "invalid ext32 4"); /* float32 */ invalid("\xca", "invalid float32 1"); invalid("\xca\x00\x00\x00", "invalid float32 2"); /* float64 */ invalid("\xcb", "invalid float64 1"); invalid("\xcb\x00\x00\x00\x00\x00\x00\x00", "invalid float64 2"); /* uint8 */ invalid("\xcc", "invalid uint8 1"); /* uint16 */ invalid("\xcd\x00", "invalid uint16 1"); /* uint32 */ invalid("\xce\x00\x00\x00", "invalid uint32 1"); /* uint64 */ invalid("\xcf\x00\x00\x00\x00\x00\x00\x00", "invalid uint64 1"); /* int8 */ invalid("\xd0", "invalid int8 1"); /* int16 */ invalid("\xd1\x00", "invalid int16 1"); /* int32 */ invalid("\xd2\x00\x00\x00", "invalid int32 1"); /* int64 */ invalid("\xd3\x00\x00\x00\x00\x00\x00\x00", "invalid int64 1"); /* fixext8 */ invalid("\xd4", "invalid fixext8 1"); invalid("\xd4\x05", "invalid fixext8 2"); /* fixext16 */ invalid("\xd5", "invalid fixext16 1"); invalid("\xd5\x05\x05", "invalid fixext16 2"); /* fixext32 */ invalid("\xd6", "invalid fixext32 1"); invalid("\xd6\x00\x00\x05\x05", "invalid fixext32 2"); /* fixext64 */ invalid("\xd7", "invalid fixext64 1"); invalid("\xd7\x00\x00\x00\x00\x00\x00\x05\x05", "invalid fixext64 2"); /* fixext128 */ invalid("\xd8", "invalid fixext128 1"); invalid("\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x05\x05", "invalid fixext128 2"); /* str8 */ invalid("\xd9", "invalid str8 1"); invalid("\xd9\x01", "invalid str8 2"); /* str16 */ invalid("\xda", "invalid str16 1"); invalid("\xda\x00\x01", "invalid str16 2"); /* str32 */ invalid("\xdb", "invalid str32 1"); invalid("\xdb\x00\x00\x00\x01", "invalid str32 2"); /* array16 */ invalid("\xdc", "invalid array16 1"); invalid("\xdc\x00\x01", "invalid array16 2"); /* array32 */ invalid("\xdd", "invalid array32 1"); invalid("\xdd\x00\x00\x00\x01", "invalid array32 2"); /* map16 */ invalid("\xde", "invalid map16 1"); invalid("\xde\x00\x01", "invalid map16 2"); invalid("\xde\x00\x01\x5", "invalid map16 2"); /* map32 */ invalid("\xdf", "invalid map32 1"); invalid("\xdf\x00\x00\x00\x01", "invalid map32 2"); invalid("\xdf\x00\x00\x00\x01\x5", "invalid map32 3"); footer(); return check_plan(); } #define test_read_number(_func, _type, _mp_type, _val, _success) do { \ const char *s = #_func "(mp_encode_" #_mp_type "(" #_val "))"; \ const char *d1 = data; \ const char *d2 = mp_encode_##_mp_type(data, _val); \ _type v; \ int ret = _func(&d1, &v); \ if (_success) { \ is(ret, 0, "%s check success", s); \ is(d1, d2, "%s check pos advanced", s); \ ok(v - _val == 0, "%s check result", s); \ } else { \ is(ret, -1, "%s check fail", s); \ is(d1, data, "%s check pos unchanged", s); \ } \ } while (0) #define test_read_int32(...) test_read_number(mp_read_int32, int32_t, __VA_ARGS__) #define test_read_int64(...) test_read_number(mp_read_int64, int64_t, __VA_ARGS__) #define test_read_double(...) test_read_number(mp_read_double, double, __VA_ARGS__) static int test_numbers() { plan(96); header(); test_read_int32(uint, 123, true); test_read_int32(uint, 12345, true); test_read_int32(uint, 2147483647, true); test_read_int32(uint, 2147483648, false); test_read_int32(int, -123, true); test_read_int32(int, -12345, true); test_read_int32(int, -2147483648, true); test_read_int32(int, -2147483649LL, false); test_read_int32(float, -1e2, false); test_read_int32(double, 1.2345, false); test_read_int32(map, 5, false); test_read_int64(uint, 123, true); test_read_int64(uint, 12345, true); test_read_int64(uint, 123456789, true); test_read_int64(uint, 9223372036854775807ULL, true); test_read_int64(uint, 9223372036854775808ULL, false); test_read_int64(int, -123, true); test_read_int64(int, -12345, true); test_read_int64(int, -123456789, true); test_read_int64(int, -9223372036854775807LL, true); test_read_int64(float, 100, false); test_read_int64(double, -5.4321, false); test_read_int64(array, 10, false); test_read_double(uint, 123, true); test_read_double(uint, 12345, true); test_read_double(uint, 123456789, true); test_read_double(uint, 1234567890000ULL, true); test_read_double(uint, 123456789123456789ULL, false); test_read_double(int, -123, true); test_read_double(int, -12345, true); test_read_double(int, -123456789, true); test_read_double(int, -1234567890000LL, true); test_read_double(int, -123456789123456789LL, false); test_read_double(float, 6.565e6, true); test_read_double(double, -5.555, true); test_read_double(strl, 100, false); footer(); return check_plan(); } static int test_overflow() { plan(4); header(); const char *chk; char *d; d = data; chk = data; d = mp_encode_array(d, 1); d = mp_encode_array(d, UINT32_MAX); is(mp_check(&chk, d), 1, "mp_check array overflow") d = data; chk = data; d = mp_encode_array(d, 1); d = mp_encode_map(d, UINT32_MAX); is(mp_check(&chk, d), 1, "mp_check map overflow") d = data; chk = data; d = mp_encode_array(d, 2); d = mp_encode_str(d, "", 0); d = mp_encode_strl(d, UINT32_MAX); is(mp_check(&chk, d), 1, "mp_check str overflow") d = data; chk = data; d = mp_encode_array(d, 2); d = mp_encode_bin(d, "", 0); d = mp_encode_binl(d, UINT32_MAX); is(mp_check(&chk, d), 1, "mp_check bin overflow") footer(); return check_plan(); } int main() { plan(20); test_uints(); test_ints(); test_bools(); test_floats(); test_doubles(); test_nils(); test_strls(); test_binls(); test_strs(); test_bins(); test_arrays(); test_maps(); test_next_on_arrays(); test_next_on_maps(); test_compare_uints(); test_format(); test_mp_print(); test_mp_check(); test_numbers(); test_overflow(); return check_plan(); } tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/AUTHORS0000644000000000000000000000006113306562360020263 0ustar rootrootRoman Tsisyk - initial author tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/rpm/0000755000000000000000000000000013306562360020014 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/rpm/msgpuck.spec0000644000000000000000000000662013306562360022345 0ustar rootrootName: msgpuck Version: 2.0.0 Release: 1%{?dist} Summary: MsgPack binary serialization library in a self-contained header Group: Development/Libraries License: BSD URL: https://github.com/rtsisyk/msgpuck Source0: https://github.com/rtsisyk/msgpuck/archive/%{version}/msgpuck-%{version}.tar.gz BuildRequires: gcc BuildRequires: coreutils BuildRequires: cmake >= 2.8 BuildRequires: doxygen >= 1.6.0 # https://fedoraproject.org/wiki/Packaging:Guidelines#Packaging_Header_Only_Libraries # Nothing to add to -debuginfo package - this library is header-only %global debug_package %{nil} %package devel Summary: Lightweight MessagePack library Provides: msgpuck-static = %{version}-%{release} %description MsgPuck is a compact and efficient MessagePack serialization library designed with zero-cost abstractions in mind. Almost all encoding/decoding functions can be fully inlined into your application by C/C++ compiler to reach the maximum performance. MessagePack is an efficient binary serialization format. It lets you exchange data among multiple languages like JSON. But it's faster and smaller. Small integers are encoded into a single byte, and typical short strings require only one extra byte in addition to the strings themselves. %description devel MsgPack is a binary-based efficient object serialization library. It enables to exchange structured objects between many languages like JSON. But unlike JSON, it is very fast and small. msgpuck is very lightweight header-only library designed to be embedded to your application by the C/C++ compiler. The library is fully documented and covered by unit tests. This package provides a self-contained header file and a static library. The static library contains generated code for inline functions and global tables needed by the some library functions. %prep %setup -q -n %{name}-%{version} %build %cmake . -DCMAKE_BUILD_TYPE=RelWithDebInfo make %{?_smp_mflags} make man %check make test %install %make_install mkdir -p %{buildroot}%{_mandir}/man3 install -Dpm 0644 doc/man/man3/msgpuck.h.3* %{buildroot}%{_mandir}/man3/ %files devel %{_libdir}/libmsgpuck.a %{_includedir}/msgpuck.h %{_mandir}/man3/msgpuck.h.3* %doc README.md %{!?_licensedir:%global license %doc} %license LICENSE AUTHORS %changelog * Tue Feb 07 2017 Roman Tsisyk 2.0.0-1 - Drop MP_SOURCE support and make libmsgpuck.a to be mandatory - Add helpers to decode any number to int64/double - Add -fPIC to libmsgpuck.a * Fri Dec 16 2016 Roman Tsisyk 1.1.3-1 - Add mp_snprint() function. - Change mp_fprint() to return the number of bytes printed instead of 0. - Fix CVE-2016-9036. * Tue Aug 09 2016 Roman Tsisyk 1.0.3-1 - Add mp_decode_strbin() and mp_decode_strbinl() - Add mp_fprint() for debug output * Tue Feb 02 2016 Roman Tsisyk 1.0.2-1 - Add coreutils and make to BuildRequires (#1295217) - Use `install -Dpm` instead of `cp -p` - Fix GCC 6.0 and Doxygen warnings * Mon Jan 25 2016 Roman Tsisyk 1.0.1-3 - Add `BuildRequires: gcc` (#1295217) * Sun Jan 24 2016 Roman Tsisyk 1.0.1-2 - Fix msgpuck-devel dependencies after removing empty msgpuck package * Fri Jan 22 2016 Roman Tsisyk 1.0.1-1 - Changes according to Fedora review #1295217 - Fix SIGBUS on processesor without HW support for unaligned access * Thu Jul 09 2015 Roman Tsisyk 1.0.0-1 - Initial version of the RPM spec tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/.build.mk0000644000000000000000000000273113306562360020727 0ustar rootroot# Copyright (c) 2015 MsgPuck Authors # All rights reserved. # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # 1. Redistributions of source code must retain the above # copyright notice, this list of conditions and the # following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL # OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. test: rm -rf ./test_build mkdir ./test_build && cd ./test_build && cmake .. -DCMAKE_BUILD_TYPE=RelWithDebugInfo cd test_build && $(MAKE) test .PHONY: test tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/LICENSE0000644000000000000000000000241513306562360020225 0ustar rootrootCopyright (c) 2013-2016 MsgPuck Authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/README.md0000644000000000000000000000676713306562360020515 0ustar rootrootMsgPuck ======= [![Travis][travis-badge]][travis-url] [![License][license-badge]][license-url] [![RPM Packages][rpm-badge]][rpm-url] [![Debian Packages][deb-badge]][deb-url] [![Telegram][telegram-badge]][telegram-url] [![Maillist][groups-badge]][groups-url] MsgPuck is a compact and efficient [MessagePack] serialization library: * Zero-cost abstractions and zero overhead * Clean and readable C code ready to use * Easy to incorporate into your project * Fully documented and covered by unit tests * Liberal license (BSD-2) Status ------ MsgPuck is stable, which means it have been used in production without serious bugs for quite a while now. The library is fully documented and covered by unit tests. Please feel free to file a ticket if your have a problem or a question. Examples -------- **Encoding:** char buf[1024]; char *w = buf; w = mp_encode_array(w, 4) w = mp_encode_uint(w, 10); w = mp_encode_str(w, "hello world", strlen("hello world")); w = mp_encode_bool(w, true); w = mp_encode_double(w, 3.1415); **Validating:** const char *end = buf + xx; const char *b = buf; int rc = mp_check(&b, end); assert(rc == 0); assert(b == end); **Decoding:** uint32_t size; uint64_t ival; const char *sval; uint32_t sval_len; bool bval; double dval; const char *r = buf; size = mp_decode_array(&r); /* size is 4 */ ival = mp_decode_uint(&r); /* ival is 10; */ sval = mp_decode_str(&r, &sval_len); /* sval is "hello world", sval_len is strlen("hello world") */ bval = mp_decode_bool(&r); /* bval is true */ dval = mp_decode_double(&r); /* dval is 3.1415 */ assert(r == w); Usage ----- You need a C89+ or C++03+ compatible compiler to use msgpuck. Add this project as a submodule or just use libmsgpuck-dev package. MsgPuck is designed to be fully embedded to your application by a C/C++ compiler. However, some functions require auxiliary static tables which should be expanded somewhere in a compilation unit (`*.c` or `*.cc` file). Please link libmsgpuck.a to your binary to avoid problems with unresolved symbols. See Also -------- * [API Documentation](http://rtsisyk.github.io/msgpuck/) * [Specification](https://github.com/msgpack/msgpack/blob/master/spec.md) * [Tests](test) * [Packages](https://tarantool.org/download.html) * [Maillist](https://groups.google.com/forum/#!forum/tarantool) * [Facebook](http://facebook.com/TarantoolDatabase/) * [Telegram Chat][telegram-url] * [Maillist][groups-url] API documentation can be also generated using `make doc` (Doxygen is required). MsgPuck was written to use within [Tarantool](http://tarantool.org) - the world's first full-featured MsgPack-based database. [MessagePack]: https://msgpack.org/ [travis-badge]: https://api.travis-ci.org/rtsisyk/msgpuck.svg?branch=master [travis-url]: https://travis-ci.org/rtsisyk/msgpuck [license-badge]: https://img.shields.io/badge/License-BSD--2-lightgray.svg?style=flat [license-url]: LICENSE [deb-badge]: https://img.shields.io/badge/Packages-Debian-red.svg?style=flat [deb-url]: https://packagecloud.io/tarantool/1\_7?filter=debs [rpm-badge]: https://img.shields.io/badge/Packages-RPM-blue.svg?style=flat [rpm-url]: https://packagecloud.io/tarantool/1\_7?filter=rpms [telegram-badge]: https://img.shields.io/badge/Telegram-join%20chat-blue.svg [telegram-url]: http://telegram.me/tarantool [groups-badge]: https://img.shields.io/badge/Google-Groups-orange.svg [groups-url]: https://groups.google.com/forum/#!forum/tarantool tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/CMakeLists.txt0000644000000000000000000000522613306562360021763 0ustar rootrootproject(msgpuck) cmake_minimum_required(VERSION 2.8.5) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99 -fPIC -fstrict-aliasing") set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wall -Wextra -Werror") if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Debug CACHE STRING "Build type, options are: Debug Release." FORCE) endif() include(CheckCCompilerFlag) check_c_compiler_flag("-mno-unaligned-access" CC_HAS_MNO_UNALIGNED_ACCESS) add_library(msgpuck STATIC msgpuck.c hints.c) set_target_properties(msgpuck PROPERTIES VERSION 1.0 SOVERSION 1) set_target_properties(msgpuck PROPERTIES OUTPUT_NAME "msgpuck") if (NOT ${PROJECT_SOURCE_DIR} STREQUAL ${CMAKE_SOURCE_DIR}) # Embedded mode, skip tests, documentation and the install targets return() endif() option(ENABLE_GCOV "Enable integration with gcov, a code coverage program" OFF) if (ENABLE_GCOV) set(_flags "-fprofile-arcs -ftest-coverage") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_flags}") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${_flags}") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${_flags}") endif() add_subdirectory(test) include(GNUInstallDirs) install(TARGETS msgpuck ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT library) install(FILES msgpuck.h DESTINATION include) find_package(Doxygen) if(NOT DOXYGEN_FOUND) return() endif() set(GENERATE_HTML "NO") set(GENERATE_MAN "YES") configure_file("${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in" "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile.man") add_custom_command(OUTPUT doc/man/man3/msgpuck.h.3 COMMAND ${CMAKE_COMMAND} -E make_directory doc/man COMMAND ${DOXYGEN_EXECUTABLE} "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile.man" WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" DEPENDS msgpuck.h COMMENT "Generating man pages" VERBATIM) add_custom_target(man DEPENDS doc/man/man3/msgpuck.h.3) set(GENERATE_HTML "YES") set(GENERATE_MAN "NO") configure_file("${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in" "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile.html") add_custom_command(OUTPUT doc/html/index.html COMMAND ${CMAKE_COMMAND} -E make_directory doc/html COMMAND ${DOXYGEN_EXECUTABLE} "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile.html" COMMAND ${CMAKE_COMMAND} -E rename doc/html/msgpuck_8h.html doc/html/index.html COMMAND sed s/msgpuck_8h\\.html/index\\.html/ -i doc/html/index.html WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" DEPENDS msgpuck.h COMMENT "Generating html documentation" VERBATIM) add_custom_target(html DEPENDS doc/html/index.html) add_custom_target(doc DEPENDS man html) tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/.travis.yml0000644000000000000000000000400413306562360021325 0ustar rootrootsudo: false language: C services: - docker cache: directories: - $HOME/.cache env: matrix: - OS=el DIST=6 - OS=el DIST=7 - OS=fedora DIST=24 - OS=fedora DIST=25 - OS=ubuntu DIST=precise - OS=ubuntu DIST=trusty - OS=ubuntu DIST=xenial - OS=ubuntu DIST=yakkety - OS=debian DIST=wheezy - OS=debian DIST=jessie - OS=debian DIST=stretch #matrix: # allow_failures: # - env: OS=el DIST=6 # - env: OS=el DIST=7 # - env: OS=fedora DIST=23 # - env: OS=fedora DIST=24 # - env: OS=fedora DIST=25 # - env: OS=ubuntu DIST=precise # - env: OS=ubuntu DIST=trusty # - env: OS=ubuntu DIST=xenial # - env: OS=ubuntu DIST=yakkety # - env: OS=debian DIST=wheezy # - env: OS=debian DIST=jessie # - env: OS=debian DIST=stretch script: - git describe --long - git clone https://github.com/packpack/packpack.git packpack - packpack/packpack before_deploy: - ls -l build/ deploy: # Deploy packages to PackageCloud - provider: packagecloud username: tarantool repository: "1_6" token: ${PACKAGECLOUD_TOKEN} dist: ${OS}/${DIST} package_glob: build/*.{deb,dsc,rpm} skip_cleanup: true on: branch: master condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" - provider: packagecloud username: tarantool repository: "1_7" token: ${PACKAGECLOUD_TOKEN} dist: ${OS}/${DIST} package_glob: build/*.{deb,dsc,rpm} skip_cleanup: true on: branch: master condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" - provider: packagecloud username: tarantool repository: "1_8" token: ${PACKAGECLOUD_TOKEN} dist: ${OS}/${DIST} package_glob: build/*.{deb,dsc,rpm} skip_cleanup: true on: branch: master condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" notifications: email: recipients: - build@tarantool.org on_success: change on_failure: always tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/Doxyfile.in0000644000000000000000000022263613306562360021344 0ustar rootroot# Doxyfile 1.8.1.2 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "MsgPuck" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = doc # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = NO # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = "@CMAKE_CURRENT_SOURCE_DIR@/msgpuck.h" # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = NO #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = @GENERATE_HTML@ # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # style sheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.tarantool.msgpack # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = YES # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = @GENERATE_MAN@ # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = "MP_PROTO = inline"\ "__attribute__(x)=" # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = NO # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/.gitignore0000644000000000000000000000033313306562360021205 0ustar rootroot*~ *.a *.o *.so* *.dynlib* *.user *.cbp *.log obj-*/ doc/ CMakeFiles/ CMakeCache.txt cmake_install.cmake install_manifest.txt Makefile Doxyfile.html Doxyfile.man debian/* VERSION ./build /build ./test_build /test_build tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/Jenkinsfile0000644000000000000000000000026113306562360021401 0ustar rootrootstage('Build'){ packpack = new org.tarantool.packpack() node { checkout scm packpack.prepareSources() } packpack.packpackBuildMatrix('result') } tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/msgpuck.h0000644000000000000000000014761313306562360021054 0ustar rootroot#ifndef MSGPUCK_H_INCLUDED #define MSGPUCK_H_INCLUDED /* * Copyright (c) 2013-2017 MsgPuck Authors * All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * \file msgpuck.h * MsgPuck * \brief MsgPuck is a simple and efficient MsgPack encoder/decoder * library in a single self-contained file. * * Usage example: * \code * // Encode * char buf[1024]; * char *w = buf; * w = mp_encode_array(w, 4) * w = mp_encode_uint(w, 10); * w = mp_encode_str(w, "hello world", strlen("hello world")); * w = mp_encode_bool(w, true); * w = mp_encode_double(w, 3.1415); * * // Validate * const char *b = buf; * int r = mp_check(&b, w); * assert(!r) * assert(b == w); * * // Decode * uint32_t size; * uint64_t ival; * const char *sval; * uint32_t sval_len; * bool bval; * double dval; * * const char *r = buf; * * size = mp_decode_array(&r); * // size is 4 * * ival = mp_decode_uint(&r); * // ival is 10; * * sval = mp_decode_str(&r, &sval_len); * // sval is "hello world", sval_len is strlen("hello world") * * bval = mp_decode_bool(&r); * // bval is true * * dval = mp_decode_double(&r); * // dval is 3.1415 * * assert(r == w); * \endcode * * \note Supported compilers. * The implementation requires a C99+ or C++03+ compatible compiler. * * \note Inline functions. * The implementation is compatible with both C99 and GNU inline functions. * Please link libmsgpuck.a static library for non-inlined versions of * functions and global tables. */ #if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) #define __STDC_CONSTANT_MACROS 1 /* make С++ to be happy */ #endif #if defined(__cplusplus) && !defined(__STDC_LIMIT_MACROS) #define __STDC_LIMIT_MACROS 1 /* make С++ to be happy */ #endif #include #include #include #include #include #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /* * {{{ Platform-specific definitions */ /** \cond false **/ #if defined(__CC_ARM) /* set the alignment to 1 for armcc compiler */ #define MP_PACKED __packed #else #define MP_PACKED __attribute__((packed)) #endif #if defined(MP_SOURCE) #error MP_SOURCE is not supported anymore, please link libmsgpuck.a #endif #if defined(__GNUC__) && !defined(__GNUC_STDC_INLINE__) #if !defined(MP_LIBRARY) #define MP_PROTO extern inline #define MP_IMPL extern inline #else /* defined(MP_LIBRARY) */ #define MP_PROTO #define MP_IMPL #endif #define MP_ALWAYSINLINE #else /* C99 inline */ #if !defined(MP_LIBRARY) #define MP_PROTO inline #define MP_IMPL inline #else /* defined(MP_LIBRARY) */ #define MP_PROTO extern inline #define MP_IMPL inline #endif #define MP_ALWAYSINLINE __attribute__((always_inline)) #endif /* GNU inline or C99 inline */ #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || \ defined __SUNPRO_C || defined __SUNPRO_CC #define MP_GCC_VERSION(major, minor) 0 #else #define MP_GCC_VERSION(major, minor) (__GNUC__ > (major) || \ (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) #endif #if !defined(__has_builtin) #define __has_builtin(x) 0 /* clang */ #endif #if MP_GCC_VERSION(2, 9) || __has_builtin(__builtin_expect) #define mp_likely(x) __builtin_expect((x), 1) #define mp_unlikely(x) __builtin_expect((x), 0) #else #define mp_likely(x) (x) #define mp_unlikely(x) (x) #endif #if MP_GCC_VERSION(4, 5) || __has_builtin(__builtin_unreachable) #define mp_unreachable() (assert(0), __builtin_unreachable()) #else MP_PROTO void mp_unreachable(void) __attribute__((noreturn)); MP_PROTO void mp_unreachable(void) { assert(0); abort(); } #define mp_unreachable() (assert(0)) #endif #define mp_identity(x) (x) /* just to simplify mp_load/mp_store macroses */ #if MP_GCC_VERSION(4, 8) || __has_builtin(__builtin_bswap16) #define mp_bswap_u16(x) __builtin_bswap16(x) #else /* !MP_GCC_VERSION(4, 8) */ #define mp_bswap_u16(x) ( \ (((x) << 8) & 0xff00) | \ (((x) >> 8) & 0x00ff) ) #endif #if MP_GCC_VERSION(4, 3) || __has_builtin(__builtin_bswap32) #define mp_bswap_u32(x) __builtin_bswap32(x) #else /* !MP_GCC_VERSION(4, 3) */ #define mp_bswap_u32(x) ( \ (((x) << 24) & UINT32_C(0xff000000)) | \ (((x) << 8) & UINT32_C(0x00ff0000)) | \ (((x) >> 8) & UINT32_C(0x0000ff00)) | \ (((x) >> 24) & UINT32_C(0x000000ff)) ) #endif #if MP_GCC_VERSION(4, 3) || __has_builtin(__builtin_bswap64) #define mp_bswap_u64(x) __builtin_bswap64(x) #else /* !MP_GCC_VERSION(4, 3) */ #define mp_bswap_u64(x) (\ (((x) << 56) & UINT64_C(0xff00000000000000)) | \ (((x) << 40) & UINT64_C(0x00ff000000000000)) | \ (((x) << 24) & UINT64_C(0x0000ff0000000000)) | \ (((x) << 8) & UINT64_C(0x000000ff00000000)) | \ (((x) >> 8) & UINT64_C(0x00000000ff000000)) | \ (((x) >> 24) & UINT64_C(0x0000000000ff0000)) | \ (((x) >> 40) & UINT64_C(0x000000000000ff00)) | \ (((x) >> 56) & UINT64_C(0x00000000000000ff)) ) #endif #define MP_LOAD_STORE(name, type, bswap) \ MP_PROTO type \ mp_load_##name(const char **data); \ MP_IMPL type \ mp_load_##name(const char **data) \ { \ struct MP_PACKED cast { type val; }; \ type val = bswap(((struct cast *) *data)->val); \ *data += sizeof(type); \ return val; \ } \ MP_PROTO char * \ mp_store_##name(char *data, type val); \ MP_IMPL char * \ mp_store_##name(char *data, type val) \ { \ struct MP_PACKED cast { type val; }; \ ((struct cast *) (data))->val = bswap(val); \ return data + sizeof(type); \ } MP_LOAD_STORE(u8, uint8_t, mp_identity); #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ MP_LOAD_STORE(u16, uint16_t, mp_bswap_u16); MP_LOAD_STORE(u32, uint32_t, mp_bswap_u32); MP_LOAD_STORE(u64, uint64_t, mp_bswap_u64); #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ MP_LOAD_STORE(u16, uint16_t, mp_identity); MP_LOAD_STORE(u32, uint32_t, mp_identity); MP_LOAD_STORE(u64, uint64_t, mp_identity); #else #error Unsupported __BYTE_ORDER__ #endif #if !defined(__FLOAT_WORD_ORDER__) #define __FLOAT_WORD_ORDER__ __BYTE_ORDER__ #endif /* defined(__FLOAT_WORD_ORDER__) */ #if __FLOAT_WORD_ORDER__ == __ORDER_LITTLE_ENDIAN__ /* * Idiots from msgpack.org byte-swaps even IEEE754 float/double types. * Some platforms (e.g. arm) cause SIGBUS on attempt to store * invalid float in registers, so code like flt = mp_bswap_float(flt) * can't be used here. */ union MP_PACKED mp_float_cast { uint32_t u32; float f; }; union MP_PACKED mp_double_cast { uint64_t u64; double d; }; MP_PROTO float mp_load_float(const char **data); MP_PROTO double mp_load_double(const char **data); MP_PROTO char * mp_store_float(char *data, float val); MP_PROTO char * mp_store_double(char *data, double val); MP_IMPL float mp_load_float(const char **data) { union mp_float_cast cast = *(union mp_float_cast *) *data; *data += sizeof(cast); cast.u32 = mp_bswap_u32(cast.u32); return cast.f; } MP_IMPL double mp_load_double(const char **data) { union mp_double_cast cast = *(union mp_double_cast *) *data; *data += sizeof(cast); cast.u64 = mp_bswap_u64(cast.u64); return cast.d; } MP_IMPL char * mp_store_float(char *data, float val) { union mp_float_cast cast; cast.f = val; cast.u32 = mp_bswap_u32(cast.u32); *(union mp_float_cast *) (data) = cast; return data + sizeof(cast); } MP_IMPL char * mp_store_double(char *data, double val) { union mp_double_cast cast; cast.d = val; cast.u64 = mp_bswap_u64(cast.u64); *(union mp_double_cast *) (data) = cast; return data + sizeof(cast); } #elif __FLOAT_WORD_ORDER__ == __ORDER_BIG_ENDIAN__ MP_LOAD_STORE(float, float, mp_identity); MP_LOAD_STORE(double, double, mp_identity); #else #error Unsupported __FLOAT_WORD_ORDER__ #endif #undef mp_identity #undef MP_LOAD_STORE /** \endcond */ /* * }}} */ /* * {{{ API definition */ /** * \brief MsgPack data types */ enum mp_type { MP_NIL = 0, MP_UINT, MP_INT, MP_STR, MP_BIN, MP_ARRAY, MP_MAP, MP_BOOL, MP_FLOAT, MP_DOUBLE, MP_EXT }; /** * \brief Determine MsgPack type by a first byte \a c of encoded data. * * Example usage: * \code * assert(MP_ARRAY == mp_typeof(0x90)); * \endcode * * \param c - a first byte of encoded data * \return MsgPack type */ MP_PROTO __attribute__((pure)) enum mp_type mp_typeof(const char c); /** * \brief Calculate exact buffer size needed to store an array header of * \a size elements. Maximum return value is 5. For performance reasons you * can preallocate buffer for maximum size without calling the function. * \param size - a number of elements * \return buffer size in bytes (max is 5) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_array(uint32_t size); /** * \brief Encode an array header of \a size elements. * * All array members must be encoded after the header. * * Example usage: * \code * // Encode * char buf[1024]; * char *w = buf; * w = mp_encode_array(w, 2) * w = mp_encode_uint(w, 10); * w = mp_encode_uint(w, 15); * * // Decode * const char *r = buf; * uint32_t size = mp_decode_array(&r); * for (uint32_t i = 0; i < size; i++) { * uint64_t val = mp_decode_uint(&r); * } * assert (r == w); * \endcode * It is your responsibility to ensure that \a data has enough space. * \param data - a buffer * \param size - a number of elements * \return \a data + \link mp_sizeof_array() mp_sizeof_array(size) \endlink * \sa mp_sizeof_array */ MP_PROTO char * mp_encode_array(char *data, uint32_t size); /** * \brief Check that \a cur buffer has enough bytes to decode an array header * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_ARRAY */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_array(const char *cur, const char *end); /** * \brief Decode an array header from MsgPack \a data. * * All array members must be decoded after the header. * \param data - the pointer to a buffer * \return the number of elements in an array * \post *data = *data + mp_sizeof_array(retval) * \sa \link mp_encode_array() An usage example \endlink */ MP_PROTO uint32_t mp_decode_array(const char **data); /** * \brief Calculate exact buffer size needed to store a map header of * \a size elements. Maximum return value is 5. For performance reasons you * can preallocate buffer for maximum size without calling the function. * \param size - a number of elements * \return buffer size in bytes (max is 5) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_map(uint32_t size); /** * \brief Encode a map header of \a size elements. * * All map key-value pairs must be encoded after the header. * * Example usage: * \code * char buf[1024]; * * // Encode * char *w = buf; * w = mp_encode_map(b, 2); * w = mp_encode_str(b, "key1", 4); * w = mp_encode_str(b, "value1", 6); * w = mp_encode_str(b, "key2", 4); * w = mp_encode_str(b, "value2", 6); * * // Decode * const char *r = buf; * uint32_t size = mp_decode_map(&r); * for (uint32_t i = 0; i < size; i++) { * // Use switch(mp_typeof(**r)) to support more types * uint32_t key_len, val_len; * const char *key = mp_decode_str(&r, key_len); * const char *val = mp_decode_str(&r, val_len); * } * assert (r == w); * \endcode * It is your responsibility to ensure that \a data has enough space. * \param data - a buffer * \param size - a number of key/value pairs * \return \a data + \link mp_sizeof_map() mp_sizeof_map(size)\endlink * \sa mp_sizeof_map */ MP_PROTO char * mp_encode_map(char *data, uint32_t size); /** * \brief Check that \a cur buffer has enough bytes to decode a map header * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_MAP */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_map(const char *cur, const char *end); /** * \brief Decode a map header from MsgPack \a data. * * All map key-value pairs must be decoded after the header. * \param data - the pointer to a buffer * \return the number of key/value pairs in a map * \post *data = *data + mp_sizeof_array(retval) * \sa \link mp_encode_map() An usage example \endlink */ MP_PROTO uint32_t mp_decode_map(const char **data); /** * \brief Calculate exact buffer size needed to store an integer \a num. * Maximum return value is 9. For performance reasons you can preallocate * buffer for maximum size without calling the function. * Example usage: * \code * char **data = ...; * char *end = *data; * my_buffer_ensure(mp_sizeof_uint(x), &end); * // my_buffer_ensure(9, &end); * mp_encode_uint(buffer, x); * \endcode * \param num - a number * \return buffer size in bytes (max is 9) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_uint(uint64_t num); /** * \brief Calculate exact buffer size needed to store an integer \a num. * Maximum return value is 9. For performance reasons you can preallocate * buffer for maximum size without calling the function. * \param num - a number * \return buffer size in bytes (max is 9) * \pre \a num < 0 */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_int(int64_t num); /** * \brief Encode an unsigned integer \a num. * It is your responsibility to ensure that \a data has enough space. * \param data - a buffer * \param num - a number * \return \a data + mp_sizeof_uint(\a num) * \sa \link mp_encode_array() An usage example \endlink * \sa mp_sizeof_uint() */ MP_PROTO char * mp_encode_uint(char *data, uint64_t num); /** * \brief Encode a signed integer \a num. * It is your responsibility to ensure that \a data has enough space. * \param data - a buffer * \param num - a number * \return \a data + mp_sizeof_int(\a num) * \sa \link mp_encode_array() An usage example \endlink * \sa mp_sizeof_int() * \pre \a num < 0 */ MP_PROTO char * mp_encode_int(char *data, int64_t num); /** * \brief Check that \a cur buffer has enough bytes to decode an uint * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_UINT */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_uint(const char *cur, const char *end); /** * \brief Check that \a cur buffer has enough bytes to decode an int * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_INT */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_int(const char *cur, const char *end); /** * \brief Decode an unsigned integer from MsgPack \a data * \param data - the pointer to a buffer * \return an unsigned number * \post *data = *data + mp_sizeof_uint(retval) */ MP_PROTO uint64_t mp_decode_uint(const char **data); /** * \brief Decode a signed integer from MsgPack \a data * \param data - the pointer to a buffer * \return an unsigned number * \post *data = *data + mp_sizeof_int(retval) */ MP_PROTO int64_t mp_decode_int(const char **data); /** * \brief Compare two packed unsigned integers. * * The function is faster than two mp_decode_uint() calls. * \param data_a unsigned int a * \param data_b unsigned int b * \retval < 0 when \a a < \a b * \retval 0 when \a a == \a b * \retval > 0 when \a a > \a b */ MP_PROTO __attribute__((pure)) int mp_compare_uint(const char *data_a, const char *data_b); /** * \brief Calculate exact buffer size needed to store a float \a num. * The return value is always 5. The function was added to provide integrity of * the library. * \param num - a float * \return buffer size in bytes (always 5) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_float(float num); /** * \brief Calculate exact buffer size needed to store a double \a num. * The return value is either 5 or 9. The function was added to provide * integrity of the library. For performance reasons you can preallocate buffer * for maximum size without calling the function. * \param num - a double * \return buffer size in bytes (5 or 9) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_double(double num); /** * \brief Encode a float \a num. * It is your responsibility to ensure that \a data has enough space. * \param data - a buffer * \param num - a float * \return \a data + mp_sizeof_float(\a num) * \sa mp_sizeof_float() * \sa \link mp_encode_array() An usage example \endlink */ MP_PROTO char * mp_encode_float(char *data, float num); /** * \brief Encode a double \a num. * It is your responsibility to ensure that \a data has enough space. * \param data - a buffer * \param num - a float * \return \a data + mp_sizeof_double(\a num) * \sa \link mp_encode_array() An usage example \endlink * \sa mp_sizeof_double() */ MP_PROTO char * mp_encode_double(char *data, double num); /** * \brief Check that \a cur buffer has enough bytes to decode a float * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_FLOAT */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_float(const char *cur, const char *end); /** * \brief Check that \a cur buffer has enough bytes to decode a double * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_DOUBLE */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_double(const char *cur, const char *end); /** * \brief Decode a float from MsgPack \a data * \param data - the pointer to a buffer * \return a float * \post *data = *data + mp_sizeof_float(retval) */ MP_PROTO float mp_decode_float(const char **data); /** * \brief Decode a double from MsgPack \a data * \param data - the pointer to a buffer * \return a double * \post *data = *data + mp_sizeof_double(retval) */ MP_PROTO double mp_decode_double(const char **data); /** * \brief Calculate exact buffer size needed to store a string header of * length \a num. Maximum return value is 5. For performance reasons you can * preallocate buffer for maximum size without calling the function. * \param len - a string length * \return size in chars (max is 5) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_strl(uint32_t len); /** * \brief Equivalent to mp_sizeof_strl(\a len) + \a len. * \param len - a string length * \return size in chars (max is 5 + \a len) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_str(uint32_t len); /** * \brief Calculate exact buffer size needed to store a binstring header of * length \a num. Maximum return value is 5. For performance reasons you can * preallocate buffer for maximum size without calling the function. * \param len - a string length * \return size in chars (max is 5) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_binl(uint32_t len); /** * \brief Equivalent to mp_sizeof_binl(\a len) + \a len. * \param len - a string length * \return size in chars (max is 5 + \a len) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_bin(uint32_t len); /** * \brief Encode a string header of length \a len. * * The function encodes MsgPack header (\em only header) for a string of * length \a len. You should append actual string data to the buffer manually * after encoding the header (exactly \a len bytes without trailing '\0'). * * This approach is very useful for cases when the total length of the string * is known in advance, but the string data is not stored in a single * continuous buffer (e.g. network packets). * * It is your responsibility to ensure that \a data has enough space. * Usage example: * \code * char buffer[1024]; * char *b = buffer; * b = mp_encode_strl(b, hdr.total_len); * char *s = b; * memcpy(b, pkt1.data, pkt1.len) * b += pkt1.len; * // get next packet * memcpy(b, pkt2.data, pkt2.len) * b += pkt2.len; * // get next packet * memcpy(b, pkt1.data, pkt3.len) * b += pkt3.len; * * // Check that all data was received * assert(hdr.total_len == (uint32_t) (b - s)) * \endcode * Hint: you can dynamically reallocate the buffer during the process. * \param data - a buffer * \param len - a string length * \return \a data + mp_sizeof_strl(len) * \sa mp_sizeof_strl() */ MP_PROTO char * mp_encode_strl(char *data, uint32_t len); /** * \brief Encode a string of length \a len. * The function is equivalent to mp_encode_strl() + memcpy. * \param data - a buffer * \param str - a pointer to string data * \param len - a string length * \return \a data + mp_sizeof_str(len) == * data + mp_sizeof_strl(len) + len * \sa mp_encode_strl */ MP_PROTO char * mp_encode_str(char *data, const char *str, uint32_t len); /** * \brief Encode a binstring header of length \a len. * See mp_encode_strl() for more details. * \param data - a bufer * \param len - a string length * \return data + mp_sizeof_binl(\a len) * \sa mp_encode_strl */ MP_PROTO char * mp_encode_binl(char *data, uint32_t len); /** * \brief Encode a binstring of length \a len. * The function is equivalent to mp_encode_binl() + memcpy. * \param data - a buffer * \param str - a pointer to binstring data * \param len - a binstring length * \return \a data + mp_sizeof_bin(\a len) == * data + mp_sizeof_binl(\a len) + \a len * \sa mp_encode_strl */ MP_PROTO char * mp_encode_bin(char *data, const char *str, uint32_t len); /** * \brief Encode a sequence of values according to format string. * Example: mp_format(buf, sz, "[%d {%d%s%d%s}]", 42, 0, "false", 1, "true"); * to get a msgpack array of two items: number 42 and map (0->"false, 2->"true") * Does not write items that don't fit to data_size argument. * * \param data - a buffer * \param data_size - a buffer size * \param format - zero-end string, containing structure of resulting * msgpack and types of next arguments. * Format can contain '[' and ']' pairs, defining arrays, * '{' and '}' pairs, defining maps, and format specifiers, described below: * %d, %i - int * %u - unsigned int * %ld, %li - long * %lu - unsigned long * %lld, %lli - long long * %llu - unsigned long long * %hd, %hi - short * %hu - unsigned short * %hhd, %hhi - char (as number) * %hhu - unsigned char (as number) * %f - float * %lf - double * %b - bool * %s - zero-end string * %.*s - string with specified length * %p - MsgPack data * %.*p - MsgPack data with specified length * %% is ignored * %smthelse assert and undefined behaviour * NIL - a nil value * all other symbols are ignored. * * \return the number of requred bytes. * \retval > data_size means that is not enough space * and whole msgpack was not encoded. */ size_t mp_format(char *data, size_t data_size, const char *format, ...); /** * \brief mp_format variation, taking variable argument list * Example: * va_list args; * va_start(args, fmt); * mp_vformat(data, data_size, fmt, args); * va_end(args); * \sa \link mp_format() mp_format() \endlink */ size_t mp_vformat(char *data, size_t data_size, const char *format, va_list args); /** * \brief print MsgPack data \a file using JSON-like format. * MP_EXT is printed as "undefined" * \param file - pointer to file (or NULL for stdout) * \param data - pointer to buffer containing msgpack object * \retval >=0 - the number of bytes printed * \retval -1 - error * \sa fprintf() */ int mp_fprint(FILE *file, const char *data); /** * \brief format MsgPack data to \a buf using JSON-like format. * \sa mp_fprint() * \param buf - buffer to use * \param size - buffer size. This function write at most size bytes * (including the terminating null byte ('\0'). * \param data - pointer to buffer containing msgpack object * \retval =size - the number of characters (excluding the null byte), * which would have been written to the final string if * enough space had been available. * \retval -1 - error * \sa snprintf() */ int mp_snprint(char *buf, int size, const char *data); /** * \brief Check that \a cur buffer has enough bytes to decode a string header * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_STR */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_strl(const char *cur, const char *end); /** * \brief Check that \a cur buffer has enough bytes to decode a binstring header * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_BIN */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_binl(const char *cur, const char *end); /** * \brief Decode a length of a string from MsgPack \a data * \param data - the pointer to a buffer * \return a length of astring * \post *data = *data + mp_sizeof_strl(retval) * \sa mp_encode_strl */ MP_PROTO uint32_t mp_decode_strl(const char **data); /** * \brief Decode a string from MsgPack \a data * \param data - the pointer to a buffer * \param len - the pointer to save a string length * \return a pointer to a decoded string * \post *data = *data + mp_sizeof_str(*len) * \sa mp_encode_binl */ MP_PROTO const char * mp_decode_str(const char **data, uint32_t *len); /** * \brief Decode a length of a binstring from MsgPack \a data * \param data - the pointer to a buffer * \return a length of a binstring * \post *data = *data + mp_sizeof_binl(retval) * \sa mp_encode_binl */ MP_PROTO uint32_t mp_decode_binl(const char **data); /** * \brief Decode a binstring from MsgPack \a data * \param data - the pointer to a buffer * \param len - the pointer to save a binstring length * \return a pointer to a decoded binstring * \post *data = *data + mp_sizeof_str(*len) * \sa mp_encode_binl */ MP_PROTO const char * mp_decode_bin(const char **data, uint32_t *len); /** * \brief Decode a length of a string or binstring from MsgPack \a data * \param data - the pointer to a buffer * \return a length of a string * \post *data = *data + mp_sizeof_strbinl(retval) * \sa mp_encode_binl */ MP_PROTO uint32_t mp_decode_strbinl(const char **data); /** * \brief Decode a string or binstring from MsgPack \a data * \param data - the pointer to a buffer * \param len - the pointer to save a binstring length * \return a pointer to a decoded binstring * \post *data = *data + mp_sizeof_strbinl(*len) * \sa mp_encode_binl */ MP_PROTO const char * mp_decode_strbin(const char **data, uint32_t *len); /** * \brief Calculate exact buffer size needed to store the nil value. * The return value is always 1. The function was added to provide integrity of * the library. * \return buffer size in bytes (always 1) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_nil(void); /** * \brief Encode the nil value. * It is your responsibility to ensure that \a data has enough space. * \param data - a buffer * \return \a data + mp_sizeof_nil() * \sa \link mp_encode_array() An usage example \endlink * \sa mp_sizeof_nil() */ MP_PROTO char * mp_encode_nil(char *data); /** * \brief Check that \a cur buffer has enough bytes to decode nil * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_NIL */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_nil(const char *cur, const char *end); /** * \brief Decode the nil value from MsgPack \a data * \param data - the pointer to a buffer * \post *data = *data + mp_sizeof_nil() */ MP_PROTO void mp_decode_nil(const char **data); /** * \brief Calculate exact buffer size needed to store a boolean value. * The return value is always 1. The function was added to provide integrity of * the library. * \return buffer size in bytes (always 1) */ MP_PROTO __attribute__((const)) uint32_t mp_sizeof_bool(bool val); /** * \brief Encode a bool value \a val. * It is your responsibility to ensure that \a data has enough space. * \param data - a buffer * \param val - a bool * \return \a data + mp_sizeof_bool(val) * \sa \link mp_encode_array() An usage example \endlink * \sa mp_sizeof_bool() */ MP_PROTO char * mp_encode_bool(char *data, bool val); /** * \brief Check that \a cur buffer has enough bytes to decode a bool value * \param cur buffer * \param end end of the buffer * \retval 0 - buffer has enough bytes * \retval > 0 - the number of remaining bytes to read * \pre cur < end * \pre mp_typeof(*cur) == MP_BOOL */ MP_PROTO __attribute__((pure)) ptrdiff_t mp_check_bool(const char *cur, const char *end); /** * \brief Decode a bool value from MsgPack \a data * \param data - the pointer to a buffer * \return a decoded bool value * \post *data = *data + mp_sizeof_bool(retval) */ MP_PROTO bool mp_decode_bool(const char **data); /** * \brief Decode an integer value as int32_t from MsgPack \a data. * \param data - the pointer to a buffer * \param[out] ret - the pointer to save a result * \retval 0 on success * \retval -1 if underlying mp type is not MP_INT or MP_UINT * \retval -1 if the result can't be stored in int32_t */ MP_PROTO int mp_read_int32(const char **data, int32_t *ret); /** * \brief Decode an integer value as int64_t from MsgPack \a data. * \param data - the pointer to a buffer * \param[out] ret - the pointer to save a result * \retval 0 on success * \retval -1 if underlying mp type is not MP_INT or MP_UINT * \retval -1 if the result can't be stored in int64_t */ MP_PROTO int mp_read_int64(const char **data, int64_t *ret); /** * \brief Decode a floating point value as double from MsgPack \a data. * \param data - the pointer to a buffer * \param[out] ret - the pointer to save a result * \retval 0 on success * \retval -1 if underlying mp type is not MP_INT, MP_UINT, * MP_FLOAT, or MP_DOUBLE * \retval -1 if the result can't be stored in double */ MP_PROTO int mp_read_double(const char **data, double *ret); /** * \brief Skip one element in a packed \a data. * * The function is faster than mp_typeof + mp_decode_XXX() combination. * For arrays and maps the function also skips all members. * For strings and binstrings the function also skips the string data. * * Usage example: * \code * char buf[1024]; * * char *w = buf; * // First MsgPack object * w = mp_encode_uint(w, 10); * * // Second MsgPack object * w = mp_encode_array(w, 4); * w = mp_encode_array(w, 2); * // Begin of an inner array * w = mp_encode_str(w, "second inner 1", 14); * w = mp_encode_str(w, "second inner 2", 14); * // End of an inner array * w = mp_encode_str(w, "second", 6); * w = mp_encode_uint(w, 20); * w = mp_encode_bool(w, true); * * // Third MsgPack object * w = mp_encode_str(w, "third", 5); * // EOF * * const char *r = buf; * * // First MsgPack object * assert(mp_typeof(**r) == MP_UINT); * mp_next(&r); // skip the first object * * // Second MsgPack object * assert(mp_typeof(**r) == MP_ARRAY); * mp_decode_array(&r); * assert(mp_typeof(**r) == MP_ARRAY); // inner array * mp_next(&r); // -->> skip the entire inner array (with all members) * assert(mp_typeof(**r) == MP_STR); // second * mp_next(&r); * assert(mp_typeof(**r) == MP_UINT); // 20 * mp_next(&r); * assert(mp_typeof(**r) == MP_BOOL); // true * mp_next(&r); * * // Third MsgPack object * assert(mp_typeof(**r) == MP_STR); // third * mp_next(&r); * * assert(r == w); // EOF * * \endcode * \param data - the pointer to a buffer * \post *data = *data + mp_sizeof_TYPE() where TYPE is mp_typeof(**data) */ MP_PROTO void mp_next(const char **data); /** * \brief Equivalent to mp_next() but also validates MsgPack in \a data. * \param data - the pointer to a buffer * \param end - the end of a buffer * \retval 0 when MsgPack in \a data is valid. * \retval != 0 when MsgPack in \a data is not valid. * \post *data = *data + mp_sizeof_TYPE() where TYPE is mp_typeof(**data) * \post *data is not defined if MsgPack is not valid * \sa mp_next() */ MP_PROTO int mp_check(const char **data, const char *end); /* * }}} */ /* * {{{ Implementation */ /** \cond false */ extern const enum mp_type mp_type_hint[]; extern const int8_t mp_parser_hint[]; extern const char *mp_char2escape[]; MP_IMPL MP_ALWAYSINLINE enum mp_type mp_typeof(const char c) { return mp_type_hint[(uint8_t) c]; } MP_IMPL uint32_t mp_sizeof_array(uint32_t size) { if (size <= 15) { return 1; } else if (size <= UINT16_MAX) { return 1 + sizeof(uint16_t); } else { return 1 + sizeof(uint32_t); } } MP_IMPL char * mp_encode_array(char *data, uint32_t size) { if (size <= 15) { return mp_store_u8(data, 0x90 | size); } else if (size <= UINT16_MAX) { data = mp_store_u8(data, 0xdc); data = mp_store_u16(data, size); return data; } else { data = mp_store_u8(data, 0xdd); return mp_store_u32(data, size); } } MP_IMPL ptrdiff_t mp_check_array(const char *cur, const char *end) { assert(cur < end); assert(mp_typeof(*cur) == MP_ARRAY); uint8_t c = mp_load_u8(&cur); if (mp_likely(!(c & 0x40))) return cur - end; assert(c >= 0xdc && c <= 0xdd); /* must be checked above by mp_typeof */ uint32_t hsize = 2U << (c & 0x1); /* 0xdc->2, 0xdd->4 */ return hsize - (end - cur); } MP_PROTO uint32_t mp_decode_array_slowpath(uint8_t c, const char **data); MP_IMPL uint32_t mp_decode_array_slowpath(uint8_t c, const char **data) { uint32_t size; switch (c & 0x1) { case 0xdc & 0x1: size = mp_load_u16(data); return size; case 0xdd & 0x1: size = mp_load_u32(data); return size; default: mp_unreachable(); } } MP_IMPL MP_ALWAYSINLINE uint32_t mp_decode_array(const char **data) { uint8_t c = mp_load_u8(data); if (mp_likely(!(c & 0x40))) return (c & 0xf); return mp_decode_array_slowpath(c, data); } MP_IMPL uint32_t mp_sizeof_map(uint32_t size) { if (size <= 15) { return 1; } else if (size <= UINT16_MAX) { return 1 + sizeof(uint16_t); } else { return 1 + sizeof(uint32_t); } } MP_IMPL char * mp_encode_map(char *data, uint32_t size) { if (size <= 15) { return mp_store_u8(data, 0x80 | size); } else if (size <= UINT16_MAX) { data = mp_store_u8(data, 0xde); data = mp_store_u16(data, size); return data; } else { data = mp_store_u8(data, 0xdf); data = mp_store_u32(data, size); return data; } } MP_IMPL ptrdiff_t mp_check_map(const char *cur, const char *end) { assert(cur < end); assert(mp_typeof(*cur) == MP_MAP); uint8_t c = mp_load_u8(&cur); if (mp_likely((c & ~0xfU) == 0x80)) return cur - end; assert(c >= 0xde && c <= 0xdf); /* must be checked above by mp_typeof */ uint32_t hsize = 2U << (c & 0x1); /* 0xde->2, 0xdf->4 */ return hsize - (end - cur); } MP_IMPL uint32_t mp_decode_map(const char **data) { uint8_t c = mp_load_u8(data); switch (c) { case 0xde: return mp_load_u16(data); case 0xdf: return mp_load_u32(data); default: if (mp_unlikely(c < 0x80 || c > 0x8f)) mp_unreachable(); return c & 0xf; } } MP_IMPL uint32_t mp_sizeof_uint(uint64_t num) { if (num <= 0x7f) { return 1; } else if (num <= UINT8_MAX) { return 1 + sizeof(uint8_t); } else if (num <= UINT16_MAX) { return 1 + sizeof(uint16_t); } else if (num <= UINT32_MAX) { return 1 + sizeof(uint32_t); } else { return 1 + sizeof(uint64_t); } } MP_IMPL uint32_t mp_sizeof_int(int64_t num) { assert(num < 0); if (num >= -0x20) { return 1; } else if (num >= INT8_MIN && num <= INT8_MAX) { return 1 + sizeof(int8_t); } else if (num >= INT16_MIN && num <= UINT16_MAX) { return 1 + sizeof(int16_t); } else if (num >= INT32_MIN && num <= UINT32_MAX) { return 1 + sizeof(int32_t); } else { return 1 + sizeof(int64_t); } } MP_IMPL ptrdiff_t mp_check_uint(const char *cur, const char *end) { assert(cur < end); assert(mp_typeof(*cur) == MP_UINT); uint8_t c = mp_load_u8(&cur); return mp_parser_hint[c] - (end - cur); } MP_IMPL ptrdiff_t mp_check_int(const char *cur, const char *end) { assert(cur < end); assert(mp_typeof(*cur) == MP_INT); uint8_t c = mp_load_u8(&cur); return mp_parser_hint[c] - (end - cur); } MP_IMPL char * mp_encode_uint(char *data, uint64_t num) { if (num <= 0x7f) { return mp_store_u8(data, num); } else if (num <= UINT8_MAX) { data = mp_store_u8(data, 0xcc); return mp_store_u8(data, num); } else if (num <= UINT16_MAX) { data = mp_store_u8(data, 0xcd); return mp_store_u16(data, num); } else if (num <= UINT32_MAX) { data = mp_store_u8(data, 0xce); return mp_store_u32(data, num); } else { data = mp_store_u8(data, 0xcf); return mp_store_u64(data, num); } } MP_IMPL char * mp_encode_int(char *data, int64_t num) { assert(num < 0); if (num >= -0x20) { return mp_store_u8(data, 0xe0 | num); } else if (num >= INT8_MIN) { data = mp_store_u8(data, 0xd0); return mp_store_u8(data, num); } else if (num >= INT16_MIN) { data = mp_store_u8(data, 0xd1); return mp_store_u16(data, num); } else if (num >= INT32_MIN) { data = mp_store_u8(data, 0xd2); return mp_store_u32(data, num); } else { data = mp_store_u8(data, 0xd3); return mp_store_u64(data, num); } } MP_IMPL uint64_t mp_decode_uint(const char **data) { uint8_t c = mp_load_u8(data); switch (c) { case 0xcc: return mp_load_u8(data); case 0xcd: return mp_load_u16(data); case 0xce: return mp_load_u32(data); case 0xcf: return mp_load_u64(data); default: if (mp_unlikely(c > 0x7f)) mp_unreachable(); return c; } } MP_IMPL int mp_compare_uint(const char *data_a, const char *data_b) { uint8_t ca = mp_load_u8(&data_a); uint8_t cb = mp_load_u8(&data_b); int r = ca - cb; if (r != 0) return r; if (ca <= 0x7f) return 0; uint64_t a, b; switch (ca & 0x3) { case 0xcc & 0x3: a = mp_load_u8(&data_a); b = mp_load_u8(&data_b); break; case 0xcd & 0x3: a = mp_load_u16(&data_a); b = mp_load_u16(&data_b); break; case 0xce & 0x3: a = mp_load_u32(&data_a); b = mp_load_u32(&data_b); break; case 0xcf & 0x3: a = mp_load_u64(&data_a); b = mp_load_u64(&data_b); return a < b ? -1 : a > b; break; default: mp_unreachable(); } int64_t v = (a - b); return (v > 0) - (v < 0); } MP_IMPL int64_t mp_decode_int(const char **data) { uint8_t c = mp_load_u8(data); switch (c) { case 0xd0: return (int8_t) mp_load_u8(data); case 0xd1: return (int16_t) mp_load_u16(data); case 0xd2: return (int32_t) mp_load_u32(data); case 0xd3: return (int64_t) mp_load_u64(data); default: if (mp_unlikely(c < 0xe0)) mp_unreachable(); return (int8_t) (c); } } MP_IMPL uint32_t mp_sizeof_float(float num) { (void) num; return 1 + sizeof(float); } MP_IMPL uint32_t mp_sizeof_double(double num) { (void) num; return 1 + sizeof(double); } MP_IMPL ptrdiff_t mp_check_float(const char *cur, const char *end) { assert(cur < end); assert(mp_typeof(*cur) == MP_FLOAT); return 1 + sizeof(float) - (end - cur); } MP_IMPL ptrdiff_t mp_check_double(const char *cur, const char *end) { assert(cur < end); assert(mp_typeof(*cur) == MP_DOUBLE); return 1 + sizeof(double) - (end - cur); } MP_IMPL char * mp_encode_float(char *data, float num) { data = mp_store_u8(data, 0xca); return mp_store_float(data, num); } MP_IMPL char * mp_encode_double(char *data, double num) { data = mp_store_u8(data, 0xcb); return mp_store_double(data, num); } MP_IMPL float mp_decode_float(const char **data) { uint8_t c = mp_load_u8(data); assert(c == 0xca); (void) c; return mp_load_float(data); } MP_IMPL double mp_decode_double(const char **data) { uint8_t c = mp_load_u8(data); assert(c == 0xcb); (void) c; return mp_load_double(data); } MP_IMPL uint32_t mp_sizeof_strl(uint32_t len) { if (len <= 31) { return 1; } else if (len <= UINT8_MAX) { return 1 + sizeof(uint8_t); } else if (len <= UINT16_MAX) { return 1 + sizeof(uint16_t); } else { return 1 + sizeof(uint32_t); } } MP_IMPL uint32_t mp_sizeof_str(uint32_t len) { return mp_sizeof_strl(len) + len; } MP_IMPL uint32_t mp_sizeof_binl(uint32_t len) { if (len <= UINT8_MAX) { return 1 + sizeof(uint8_t); } else if (len <= UINT16_MAX) { return 1 + sizeof(uint16_t); } else { return 1 + sizeof(uint32_t); } } MP_IMPL uint32_t mp_sizeof_bin(uint32_t len) { return mp_sizeof_binl(len) + len; } MP_IMPL char * mp_encode_strl(char *data, uint32_t len) { if (len <= 31) { return mp_store_u8(data, 0xa0 | (uint8_t) len); } else if (len <= UINT8_MAX) { data = mp_store_u8(data, 0xd9); return mp_store_u8(data, len); } else if (len <= UINT16_MAX) { data = mp_store_u8(data, 0xda); return mp_store_u16(data, len); } else { data = mp_store_u8(data, 0xdb); return mp_store_u32(data, len); } } MP_IMPL char * mp_encode_str(char *data, const char *str, uint32_t len) { data = mp_encode_strl(data, len); memcpy(data, str, len); return data + len; } MP_IMPL char * mp_encode_binl(char *data, uint32_t len) { if (len <= UINT8_MAX) { data = mp_store_u8(data, 0xc4); return mp_store_u8(data, len); } else if (len <= UINT16_MAX) { data = mp_store_u8(data, 0xc5); return mp_store_u16(data, len); } else { data = mp_store_u8(data, 0xc6); return mp_store_u32(data, len); } } MP_IMPL char * mp_encode_bin(char *data, const char *str, uint32_t len) { data = mp_encode_binl(data, len); memcpy(data, str, len); return data + len; } MP_IMPL ptrdiff_t mp_check_strl(const char *cur, const char *end) { assert(cur < end); assert(mp_typeof(*cur) == MP_STR); uint8_t c = mp_load_u8(&cur); if (mp_likely(c & ~0x1f) == 0xa0) return cur - end; assert(c >= 0xd9 && c <= 0xdb); /* must be checked above by mp_typeof */ uint32_t hsize = 1U << (c & 0x3) >> 1; /* 0xd9->1, 0xda->2, 0xdb->4 */ return hsize - (end - cur); } MP_IMPL ptrdiff_t mp_check_binl(const char *cur, const char *end) { uint8_t c = mp_load_u8(&cur); assert(cur < end); assert(mp_typeof(c) == MP_BIN); assert(c >= 0xc4 && c <= 0xc6); /* must be checked above by mp_typeof */ uint32_t hsize = 1U << (c & 0x3); /* 0xc4->1, 0xc5->2, 0xc6->4 */ return hsize - (end - cur); } MP_IMPL uint32_t mp_decode_strl(const char **data) { uint8_t c = mp_load_u8(data); switch (c) { case 0xd9: return mp_load_u8(data); case 0xda: return mp_load_u16(data); case 0xdb: return mp_load_u32(data); default: if (mp_unlikely(c < 0xa0 || c > 0xbf)) mp_unreachable(); return c & 0x1f; } } MP_IMPL const char * mp_decode_str(const char **data, uint32_t *len) { assert(len != NULL); *len = mp_decode_strl(data); const char *str = *data; *data += *len; return str; } MP_IMPL uint32_t mp_decode_binl(const char **data) { uint8_t c = mp_load_u8(data); switch (c) { case 0xc4: return mp_load_u8(data); case 0xc5: return mp_load_u16(data); case 0xc6: return mp_load_u32(data); default: mp_unreachable(); } } MP_IMPL const char * mp_decode_bin(const char **data, uint32_t *len) { assert(len != NULL); *len = mp_decode_binl(data); const char *str = *data; *data += *len; return str; } MP_IMPL uint32_t mp_decode_strbinl(const char **data) { uint8_t c = mp_load_u8(data); switch (c) { case 0xd9: return mp_load_u8(data); case 0xda: return mp_load_u16(data); case 0xdb: return mp_load_u32(data); case 0xc4: return mp_load_u8(data); case 0xc5: return mp_load_u16(data); case 0xc6: return mp_load_u32(data); default: if (mp_unlikely(c < 0xa0 || c > 0xbf)) mp_unreachable(); return c & 0x1f; } } MP_IMPL const char * mp_decode_strbin(const char **data, uint32_t *len) { assert(len != NULL); *len = mp_decode_strbinl(data); const char *str = *data; *data += *len; return str; } MP_IMPL uint32_t mp_sizeof_nil() { return 1; } MP_IMPL char * mp_encode_nil(char *data) { return mp_store_u8(data, 0xc0); } MP_IMPL ptrdiff_t mp_check_nil(const char *cur, const char *end) { assert(cur < end); assert(mp_typeof(*cur) == MP_NIL); return 1 - (end - cur); } MP_IMPL void mp_decode_nil(const char **data) { uint8_t c = mp_load_u8(data); assert(c == 0xc0); (void) c; } MP_IMPL uint32_t mp_sizeof_bool(bool val) { (void) val; return 1; } MP_IMPL char * mp_encode_bool(char *data, bool val) { return mp_store_u8(data, 0xc2 | (val & 1)); } MP_IMPL ptrdiff_t mp_check_bool(const char *cur, const char *end) { assert(cur < end); assert(mp_typeof(*cur) == MP_BOOL); return 1 - (end - cur); } MP_IMPL bool mp_decode_bool(const char **data) { uint8_t c = mp_load_u8(data); switch (c) { case 0xc3: return true; case 0xc2: return false; default: mp_unreachable(); } } MP_IMPL int mp_read_int32(const char **data, int32_t *ret) { uint32_t uval; const char *p = *data; uint8_t c = mp_load_u8(&p); switch (c) { case 0xd0: *ret = (int8_t) mp_load_u8(&p); break; case 0xd1: *ret = (int16_t) mp_load_u16(&p); break; case 0xd2: *ret = (int32_t) mp_load_u32(&p); break; case 0xcc: *ret = mp_load_u8(&p); break; case 0xcd: *ret = mp_load_u16(&p); break; case 0xce: uval = mp_load_u32(&p); if (mp_unlikely(uval > INT32_MAX)) return -1; *ret = uval; break; default: if (mp_unlikely(c < 0xe0 && c > 0x7f)) return -1; *ret = (int8_t) c; break; } *data = p; return 0; } MP_IMPL int mp_read_int64(const char **data, int64_t *ret) { uint64_t uval; const char *p = *data; uint8_t c = mp_load_u8(&p); switch (c) { case 0xd0: *ret = (int8_t) mp_load_u8(&p); break; case 0xd1: *ret = (int16_t) mp_load_u16(&p); break; case 0xd2: *ret = (int32_t) mp_load_u32(&p); break; case 0xd3: *ret = (int64_t) mp_load_u64(&p); break; case 0xcc: *ret = mp_load_u8(&p); break; case 0xcd: *ret = mp_load_u16(&p); break; case 0xce: *ret = mp_load_u32(&p); break; case 0xcf: uval = mp_load_u64(&p); if (uval > INT64_MAX) return -1; *ret = uval; break; default: if (mp_unlikely(c < 0xe0 && c > 0x7f)) return -1; *ret = (int8_t) c; break; } *data = p; return 0; } MP_IMPL int mp_read_double(const char **data, double *ret) { int64_t ival; uint64_t uval; double val; const char *p = *data; uint8_t c = mp_load_u8(&p); switch (c) { case 0xd0: *ret = (int8_t) mp_load_u8(&p); break; case 0xd1: *ret = (int16_t) mp_load_u16(&p); break; case 0xd2: *ret = (int32_t) mp_load_u32(&p); break; case 0xd3: val = ival = (int64_t) mp_load_u64(&p); if ((int64_t)val != ival) return -1; *ret = val; break; case 0xcc: *ret = mp_load_u8(&p); break; case 0xcd: *ret = mp_load_u16(&p); break; case 0xce: *ret = mp_load_u32(&p); break; case 0xcf: val = uval = mp_load_u64(&p); if ((uint64_t)val != uval) return -1; *ret = val; break; case 0xca: *ret = mp_load_float(&p); break; case 0xcb: *ret = mp_load_double(&p); break; default: if (mp_unlikely(c < 0xe0 && c > 0x7f)) return -1; *ret = (int8_t) c; break; } *data = p; return 0; } /** See mp_parser_hint */ enum { MP_HINT = -32, MP_HINT_STR_8 = MP_HINT, MP_HINT_STR_16 = MP_HINT - 1, MP_HINT_STR_32 = MP_HINT - 2, MP_HINT_ARRAY_16 = MP_HINT - 3, MP_HINT_ARRAY_32 = MP_HINT - 4, MP_HINT_MAP_16 = MP_HINT - 5, MP_HINT_MAP_32 = MP_HINT - 6, MP_HINT_EXT_8 = MP_HINT - 7, MP_HINT_EXT_16 = MP_HINT - 8, MP_HINT_EXT_32 = MP_HINT - 9 }; MP_PROTO void mp_next_slowpath(const char **data, int64_t k); MP_IMPL void mp_next_slowpath(const char **data, int64_t k) { for (; k > 0; k--) { uint8_t c = mp_load_u8(data); int l = mp_parser_hint[c]; if (mp_likely(l >= 0)) { *data += l; continue; } else if (mp_likely(l > MP_HINT)) { k -= l; continue; } uint32_t len; switch (l) { case MP_HINT_STR_8: /* MP_STR (8) */ len = mp_load_u8(data); *data += len; break; case MP_HINT_STR_16: /* MP_STR (16) */ len = mp_load_u16(data); *data += len; break; case MP_HINT_STR_32: /* MP_STR (32) */ len = mp_load_u32(data); *data += len; break; case MP_HINT_ARRAY_16: /* MP_ARRAY (16) */ k += mp_load_u16(data); break; case MP_HINT_ARRAY_32: /* MP_ARRAY (32) */ k += mp_load_u32(data); break; case MP_HINT_MAP_16: /* MP_MAP (16) */ k += 2 * mp_load_u16(data); break; case MP_HINT_MAP_32: /* MP_MAP (32) */ k += 2 * mp_load_u32(data); break; case MP_HINT_EXT_8: /* MP_EXT (8) */ len = mp_load_u8(data); mp_load_u8(data); *data += len; break; case MP_HINT_EXT_16: /* MP_EXT (16) */ len = mp_load_u16(data); mp_load_u8(data); *data += len; break; case MP_HINT_EXT_32: /* MP_EXT (32) */ len = mp_load_u32(data); mp_load_u8(data); *data += len; break; default: mp_unreachable(); } } } MP_IMPL void mp_next(const char **data) { int64_t k = 1; for (; k > 0; k--) { uint8_t c = mp_load_u8(data); int l = mp_parser_hint[c]; if (mp_likely(l >= 0)) { *data += l; continue; } else if (mp_likely(c == 0xd9)){ /* MP_STR (8) */ uint8_t len = mp_load_u8(data); *data += len; continue; } else if (l > MP_HINT) { k -= l; continue; } else { *data -= sizeof(uint8_t); return mp_next_slowpath(data, k); } } } MP_IMPL int mp_check(const char **data, const char *end) { #define MP_CHECK_LEN(_l) \ if (mp_unlikely((size_t)(end - *data) < (size_t)(_l))) \ return 1; int64_t k; for (k = 1; k > 0; k--) { MP_CHECK_LEN(1); uint8_t c = mp_load_u8(data); int l = mp_parser_hint[c]; if (mp_likely(l >= 0)) { MP_CHECK_LEN(l); *data += l; continue; } else if (mp_likely(l > MP_HINT)) { k -= l; continue; } uint32_t len; switch (l) { case MP_HINT_STR_8: /* MP_STR (8) */ MP_CHECK_LEN(sizeof(uint8_t)); len = mp_load_u8(data); MP_CHECK_LEN(len); *data += len; break; case MP_HINT_STR_16: /* MP_STR (16) */ MP_CHECK_LEN(sizeof(uint16_t)); len = mp_load_u16(data); MP_CHECK_LEN(len); *data += len; break; case MP_HINT_STR_32: /* MP_STR (32) */ MP_CHECK_LEN(sizeof(uint32_t)) len = mp_load_u32(data); MP_CHECK_LEN(len); *data += len; break; case MP_HINT_ARRAY_16: /* MP_ARRAY (16) */ MP_CHECK_LEN(sizeof(uint16_t)); k += mp_load_u16(data); break; case MP_HINT_ARRAY_32: /* MP_ARRAY (32) */ MP_CHECK_LEN(sizeof(uint32_t)); k += mp_load_u32(data); break; case MP_HINT_MAP_16: /* MP_MAP (16) */ MP_CHECK_LEN(sizeof(uint16_t)); k += 2 * mp_load_u16(data); break; case MP_HINT_MAP_32: /* MP_MAP (32) */ MP_CHECK_LEN(sizeof(uint32_t)); k += 2 * mp_load_u32(data); break; case MP_HINT_EXT_8: /* MP_EXT (8) */ MP_CHECK_LEN(sizeof(uint8_t) + sizeof(uint8_t)); len = mp_load_u8(data); mp_load_u8(data); MP_CHECK_LEN(len); *data += len; break; case MP_HINT_EXT_16: /* MP_EXT (16) */ MP_CHECK_LEN(sizeof(uint16_t) + sizeof(uint8_t)); len = mp_load_u16(data); mp_load_u8(data); MP_CHECK_LEN(len); *data += len; break; case MP_HINT_EXT_32: /* MP_EXT (32) */ MP_CHECK_LEN(sizeof(uint32_t) + sizeof(uint8_t)); len = mp_load_u32(data); mp_load_u8(data); MP_CHECK_LEN(len); *data += len; break; default: mp_unreachable(); } } assert(*data <= end); #undef MP_CHECK_LEN return 0; } /** \endcond */ /* * }}} */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #undef MP_LIBRARY #undef MP_PROTO #undef MP_IMPL #undef MP_ALWAYSINLINE #undef MP_GCC_VERSION #endif /* MSGPUCK_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/msgpuck/msgpuck.c0000644000000000000000000002403513306562360021037 0ustar rootroot/* * Copyright (c) 2013-2017 MsgPuck Authors * All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define MP_LIBRARY 1 #include "msgpuck.h" size_t mp_vformat(char *data, size_t data_size, const char *format, va_list vl) { size_t result = 0; const char *f = NULL; for (f = format; *f; f++) { if (f[0] == '[') { uint32_t size = 0; int level = 1; const char *e = NULL; for (e = f + 1; level && *e; e++) { if (*e == '[' || *e == '{') { if (level == 1) size++; level++; } else if (*e == ']' || *e == '}') { level--; /* opened '[' must be closed by ']' */ assert(level || *e == ']'); } else if (*e == '%') { if (e[1] == '%') e++; else if (level == 1) size++; } else if (*e == 'N' && e[1] == 'I' && e[2] == 'L' && level == 1) { size++; } } /* opened '[' must be closed */ assert(level == 0); result += mp_sizeof_array(size); if (result <= data_size) data = mp_encode_array(data, size); } else if (f[0] == '{') { uint32_t count = 0; int level = 1; const char *e = NULL; for (e = f + 1; level && *e; e++) { if (*e == '[' || *e == '{') { if (level == 1) count++; level++; } else if (*e == ']' || *e == '}') { level--; /* opened '{' must be closed by '}' */ assert(level || *e == '}'); } else if (*e == '%') { if (e[1] == '%') e++; else if (level == 1) count++; } else if (*e == 'N' && e[1] == 'I' && e[2] == 'L' && level == 1) { count++; } } /* opened '{' must be closed */ assert(level == 0); /* since map is a pair list, count must be even */ assert(count % 2 == 0); uint32_t size = count / 2; result += mp_sizeof_map(size); if (result <= data_size) data = mp_encode_map(data, size); } else if (f[0] == '%') { f++; assert(f[0]); int64_t int_value = 0; int int_status = 0; /* 1 - signed, 2 - unsigned */ if (f[0] == 'd' || f[0] == 'i') { int_value = va_arg(vl, int); int_status = 1; } else if (f[0] == 'u') { int_value = va_arg(vl, unsigned int); int_status = 2; } else if (f[0] == 's') { const char *str = va_arg(vl, const char *); uint32_t len = (uint32_t)strlen(str); result += mp_sizeof_str(len); if (result <= data_size) data = mp_encode_str(data, str, len); } else if (f[0] == '.' && f[1] == '*' && f[2] == 's') { uint32_t len = va_arg(vl, uint32_t); const char *str = va_arg(vl, const char *); result += mp_sizeof_str(len); if (result <= data_size) data = mp_encode_str(data, str, len); f += 2; } else if (f[0] == 'p') { const char *p = va_arg(vl, const char *); const char *end = p; mp_next(&end); uint32_t len = end - p; result += len; if (result <= data_size) { memcpy(data, p, len); data += len; } } else if (f[0] == '.' && f[1] == '*' && f[2] == 'p') { uint32_t len = va_arg(vl, uint32_t); const char *p = va_arg(vl, const char *); assert(len > 0); result += len; if (result <= data_size) { memcpy(data, p, len); data += len; } f += 2; } else if(f[0] == 'f') { float v = (float)va_arg(vl, double); result += mp_sizeof_float(v); if (result <= data_size) data = mp_encode_float(data, v); } else if(f[0] == 'l' && f[1] == 'f') { double v = va_arg(vl, double); result += mp_sizeof_double(v); if (result <= data_size) data = mp_encode_double(data, v); f++; } else if(f[0] == 'b') { bool v = (bool)va_arg(vl, int); result += mp_sizeof_bool(v); if (result <= data_size) data = mp_encode_bool(data, v); } else if (f[0] == 'l' && (f[1] == 'd' || f[1] == 'i')) { int_value = va_arg(vl, long); int_status = 1; f++; } else if (f[0] == 'l' && f[1] == 'u') { int_value = va_arg(vl, unsigned long); int_status = 2; f++; } else if (f[0] == 'l' && f[1] == 'l' && (f[2] == 'd' || f[2] == 'i')) { int_value = va_arg(vl, long long); int_status = 1; f += 2; } else if (f[0] == 'l' && f[1] == 'l' && f[2] == 'u') { int_value = va_arg(vl, unsigned long long); int_status = 2; f += 2; } else if (f[0] == 'h' && (f[1] == 'd' || f[1] == 'i')) { int_value = va_arg(vl, int); int_status = 1; f++; } else if (f[0] == 'h' && f[1] == 'u') { int_value = va_arg(vl, unsigned int); int_status = 2; f++; } else if (f[0] == 'h' && f[1] == 'h' && (f[2] == 'd' || f[2] == 'i')) { int_value = va_arg(vl, int); int_status = 1; f += 2; } else if (f[0] == 'h' && f[1] == 'h' && f[2] == 'u') { int_value = va_arg(vl, unsigned int); int_status = 2; f += 2; } else if (f[0] != '%') { /* unexpected format specifier */ assert(false); } if (int_status == 1 && int_value < 0) { result += mp_sizeof_int(int_value); if (result <= data_size) data = mp_encode_int(data, int_value); } else if(int_status) { result += mp_sizeof_uint(int_value); if (result <= data_size) data = mp_encode_uint(data, int_value); } } else if (f[0] == 'N' && f[1] == 'I' && f[2] == 'L') { result += mp_sizeof_nil(); if (result <= data_size) data = mp_encode_nil(data); f += 2; } } return result; } size_t mp_format(char *data, size_t data_size, const char *format, ...) { va_list args; va_start(args, format); size_t res = mp_vformat(data, data_size, format, args); va_end(args); return res; } #define MP_PRINT(SELF, PRINTF) \ { \ switch (mp_typeof(**data)) { \ case MP_NIL: \ mp_decode_nil(data); \ PRINTF("null"); \ break; \ case MP_UINT: \ PRINTF("%llu", (unsigned long long) mp_decode_uint(data)); \ break; \ case MP_INT: \ PRINTF("%lld", (long long) mp_decode_int(data)); \ break; \ case MP_STR: \ case MP_BIN: \ { \ uint32_t len = mp_typeof(**data) == MP_STR ? \ mp_decode_strl(data) : mp_decode_binl(data); \ PRINTF("\""); \ const char *s; \ for (s = *data; s < *data + len; s++) { \ unsigned char c = (unsigned char ) *s; \ if (c < 128 && mp_char2escape[c] != NULL) { \ /* Escape character */ \ PRINTF("%s", mp_char2escape[c]); \ } else { \ PRINTF("%c", c); \ } \ } \ PRINTF("\""); \ *data += len; \ break; \ } \ case MP_ARRAY: \ { \ uint32_t count = mp_decode_array(data); \ PRINTF("["); \ uint32_t i; \ for (i = 0; i < count; i++) { \ if (i) \ PRINTF(", "); \ SELF(data); \ } \ PRINTF("]"); \ break; \ } \ case MP_MAP: \ { \ uint32_t count = mp_decode_map(data); \ PRINTF("{"); \ uint32_t i; \ for (i = 0; i < count; i++) { \ if (i) \ PRINTF(", "); \ SELF(data); \ PRINTF(": "); \ SELF(data); \ } \ PRINTF("}"); \ break; \ } \ case MP_BOOL: \ PRINTF(mp_decode_bool(data) ? "true" : "false"); \ break; \ case MP_FLOAT: \ PRINTF("%g", mp_decode_float(data)); \ break; \ case MP_DOUBLE: \ PRINTF("%lg", mp_decode_double(data)); \ break; \ case MP_EXT: \ mp_next(data); \ PRINTF("undefined"); \ break; \ default: \ mp_unreachable(); \ return -1; \ } \ } static inline int mp_fprint_internal(FILE *file, const char **data) { int total_bytes = 0; #define HANDLE(FUN, ...) do { \ int bytes = FUN(file, __VA_ARGS__); \ if (mp_unlikely(bytes < 0)) \ return -1; \ total_bytes += bytes; \ } while (0) #define PRINT(...) HANDLE(fprintf, __VA_ARGS__) #define SELF(...) HANDLE(mp_fprint_internal, __VA_ARGS__) MP_PRINT(SELF, PRINT) #undef HANDLE #undef SELF #undef PRINT return total_bytes; } int mp_fprint(FILE *file, const char *data) { if (!file) file = stdout; int res = mp_fprint_internal(file, &data); return res; } static inline int mp_snprint_internal(char *buf, int size, const char **data) { int total_bytes = 0; #define HANDLE(FUN, ...) do { \ int bytes = FUN(buf, size, __VA_ARGS__); \ if (mp_unlikely(bytes < 0)) \ return -1; \ total_bytes += bytes; \ if (bytes < size) { \ buf += bytes; \ size -= bytes; \ } else { \ /* Calculate the number of bytes needed */ \ buf = NULL; \ size = 0; \ } \ } while (0) #define PRINT(...) HANDLE(snprintf, __VA_ARGS__) #define SELF(...) HANDLE(mp_snprint_internal, __VA_ARGS__) MP_PRINT(SELF, PRINT) #undef HANDLE #undef SELF #undef PRINT return total_bytes; } #undef MP_PRINT int mp_snprint(char *buf, int size, const char *data) { return mp_snprint_internal(buf, size, &data); } tarantool_1.9.1.26.g63eb81e3c/src/lib/bit/0000775000000000000000000000000013306560010016313 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lib/bit/bit.h0000664000000000000000000003424013306560010017245 0ustar rootroot#ifndef TARANTOOL_LIB_BIT_BIT_H_INCLUDED #define TARANTOOL_LIB_BIT_BIT_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * @file * @brief Bit manipulation library */ #include "trivia/util.h" #include #include #include #if defined(HAVE_FFSL) || defined(HAVE_FFSLL) #include #include #endif /* defined(HAVE_FFSL) || defined(HAVE_FFSLL) */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** @cond false **/ #define bit_likely(x) __builtin_expect((x),1) #define bit_unlikely(x) __builtin_expect((x),0) struct PACKED unaligned_mem { union { uint8_t u8; uint16_t u16; uint32_t u32; uint64_t u64; float f; double lf; bool b; }; }; /** @endcond **/ /** * @brief Unaligned load from memory. * @param p pointer * @return number */ inline uint8_t load_u8(const void *p) { return ((const struct unaligned_mem *)p)->u8; } /** @copydoc load_u8 */ inline uint16_t load_u16(const void *p) { return ((const struct unaligned_mem *)p)->u16; } /** @copydoc load_u8 */ inline uint32_t load_u32(const void *p) { return ((const struct unaligned_mem *)p)->u32; } /** @copydoc load_u8 */ inline uint64_t load_u64(const void *p) { return ((const struct unaligned_mem *)p)->u64; } /** @copydoc load_u8 */ inline float load_float(const void *p) { return ((const struct unaligned_mem *)p)->f; } /** @copydoc load_u8 */ inline double load_double(const void *p) { return ((const struct unaligned_mem *)p)->lf; } /** @copydoc load_u8 */ inline bool load_bool(const void *p) { return ((const struct unaligned_mem *)p)->b; } /** * @brief Unaligned store to memory. * @param p pointer * @param v number */ inline void store_u8(void *p, uint8_t v) { ((struct unaligned_mem *)p)->u8 = v; } /** @copydoc store_u8 */ inline void store_u16(void *p, uint16_t v) { ((struct unaligned_mem *)p)->u16 = v; } /** @copydoc store_u8 */ inline void store_u32(void *p, uint32_t v) { ((struct unaligned_mem *)p)->u32 = v; } /** @copydoc store_u8 */ inline void store_u64(void *p, uint64_t v) { ((struct unaligned_mem *)p)->u64 = v; } /** @copydoc store_u8 */ inline void store_float(void *p, float v) { ((struct unaligned_mem *)p)->f = v; } /** @copydoc store_u8 */ inline void store_double(void *p, double v) { ((struct unaligned_mem *)p)->lf = v; } /** @copydoc store_bool */ inline void store_bool(void *p, bool b) { ((struct unaligned_mem *)p)->b = b; } /** * @brief Test bit \a pos in memory chunk \a data * data is considered as a sequence of chars, * although data size must be sizeof(long) aligned * @param data memory chunk * @param pos bit number (zero-based) * @retval true bit \a pos is set in \a data * @retval false otherwise */ inline bool bit_test(const void *data, size_t pos) { size_t chunk = pos / (CHAR_BIT * sizeof(unsigned long)); size_t offset = pos % (CHAR_BIT * sizeof(unsigned long)); const unsigned long *ldata = (const unsigned long *) data; return (ldata[chunk] >> offset) & 0x1; } /** * @brief Set bit \a pos in a memory chunk \a data * data is considered as a sequence of chars, * although data size must be sizeof(long) aligned * @param data memory chunk * @param pos bit number (zero-based) * @return previous value * @see bit_test * @see bit_clear */ inline bool bit_set(void *data, size_t pos) { size_t chunk = pos / (CHAR_BIT * sizeof(unsigned long)); size_t offset = pos % (CHAR_BIT * sizeof(unsigned long)); unsigned long *ldata = (unsigned long *) data; bool prev = (ldata[chunk] >> offset) & 0x1; ldata[chunk] |= (1UL << offset); return prev; } /** * @brief Clear bit \a pos in memory chunk \a data * data is considered as a sequence of chars, * although data size must be sizeof(long) aligned * @param data memory chunk * @param pos bit number (zero-based) * @return previous value * @see bit_test * @see bit_set */ inline bool bit_clear(void *data, size_t pos) { size_t chunk = pos / (CHAR_BIT * sizeof(unsigned long)); size_t offset = pos % (CHAR_BIT * sizeof(unsigned long)); unsigned long *ldata = (unsigned long *) data; bool prev = (ldata[chunk] >> offset) & 0x1; ldata[chunk] &= ~(1UL << offset); return prev; } /** * @cond false * @brief Naive implementation of ctz. */ #define CTZ_NAIVE(x, bitsize) { \ if (x == 0) { \ return (bitsize); \ } \ \ int r = 0; \ for (; (x & 1) == 0; r++) { \ x >>= 1; \ } \ \ return r; \ } /** @endcond */ /** * @brief Count Trailing Zeros. * Returns the number of trailing 0-bits in @a x, starting at the least * significant bit position. If @a x is 0, the result is undefined. * @param x integer * @see __builtin_ctz() * @return the number trailing 0-bits */ inline int bit_ctz_u32(uint32_t x) { #if defined(HAVE_BUILTIN_CTZ) return __builtin_ctz(x); #elif defined(HAVE_FFSL) return ffsl(x) - 1; #else CTZ_NAIVE(x, sizeof(uint32_t) * CHAR_BIT); #endif } /** * @copydoc bit_ctz_u32 */ inline int bit_ctz_u64(uint64_t x) { #if defined(HAVE_BUILTIN_CTZLL) return __builtin_ctzll(x); #elif defined(HAVE_FFSLL) return ffsll(x) - 1; #else CTZ_NAIVE(x, sizeof(uint64_t) * CHAR_BIT); #endif } #undef CTZ_NAIVE /** * @cond false * @brief Naive implementation of clz. */ #define CLZ_NAIVE(x, bitsize) { \ if (x == 0) { \ return (bitsize); \ } \ \ int r = (bitsize); \ for (; x; r--) { \ x >>= 1; \ } \ \ return r; \ } /** @endcond */ /** * @brief Count Leading Zeros. * Returns the number of leading 0-bits in @a x, starting at the most * significant bit position. If @a x is 0, the result is undefined. * @param x integer * @see __builtin_clz() * @return the number of leading 0-bits */ inline int bit_clz_u32(uint32_t x) { #if defined(HAVE_BUILTIN_CLZ) return __builtin_clz(x); #else /* !defined(HAVE_BUILTIN_CLZ) */ CLZ_NAIVE(x, sizeof(uint32_t) * CHAR_BIT); #endif } /** * @copydoc bit_clz_u32 */ inline int bit_clz_u64(uint64_t x) { #if defined(HAVE_BUILTIN_CLZLL) return __builtin_clzll(x); #else /* !defined(HAVE_BUILTIN_CLZLL) */ CLZ_NAIVE(x, sizeof(uint64_t) * CHAR_BIT); #endif } #undef CLZ_NAIVE /** * @cond false * @brief Naive implementation of popcount. */ #define POPCOUNT_NAIVE(x, bitsize) { \ int r; \ for (r = 0; x; r++) { \ x &= (x-1); \ } \ \ return r; \ } /** @endcond */ /** * @brief Returns the number of 1-bits in @a x. * @param x integer * @see __builtin_popcount() * @return the number of 1-bits in @a x */ inline int bit_count_u32(uint32_t x) { #if defined(HAVE_BUILTIN_POPCOUNT) return __builtin_popcount(x); #else /* !defined(HAVE_BUILTIN_POPCOUNT) */ POPCOUNT_NAIVE(x, sizeof(uint32_t) * CHAR_BIT); #endif } /** * @copydoc bit_count_u32 */ inline int bit_count_u64(uint64_t x) { #if defined(HAVE_BUILTIN_POPCOUNTLL) return __builtin_popcountll(x); #else /* !defined(HAVE_BUILTIN_POPCOUNTLL) */ POPCOUNT_NAIVE(x, sizeof(uint64_t) * CHAR_BIT); #endif } #undef POPCOUNT_NAIVE /** * @brief Rotate @a x left by @a r bits * @param x integer * @param r number for bits to rotate * @return @a x rotated left by @a r bits */ inline uint32_t bit_rotl_u32(uint32_t x, int r) { /* gcc recognises this code and generates a rotate instruction */ return ((x << r) | (x >> (32 - r))); } /** * @copydoc bit_rotl_u32 */ inline uint64_t bit_rotl_u64(uint64_t x, int r) { /* gcc recognises this code and generates a rotate instruction */ return ((x << r) | (x >> (64 - r))); } /** * @copydoc bit_rotl_u32 */ __attribute__ ((const)) inline uintmax_t bit_rotl_umax(uintmax_t x, int r) { /* gcc recognises this code and generates a rotate instruction */ return ((x << r) | (x >> (sizeof(uintmax_t) * CHAR_BIT - r))); } /** * @brief Rotate @a x right by @a r bits * @param x integer * @param r number for bits to rotate * @return @a x rotated right by @a r bits * @todo Move this method to bit.h */ inline uint32_t bit_rotr_u32(uint32_t x, int r) { /* gcc recognises this code and generates a rotate instruction */ return ((x >> r) | (x << (32 - r))); } /** * @copydoc bit_rotr_u32 */ inline uint64_t bit_rotr_u64(uint64_t x, int r) { /* gcc recognises this code and generates a rotate instruction */ return ((x >> r) | (x << (64 - r))); } /** * @copydoc bswap_u32 */ inline uint16_t bswap_u16(uint16_t x) { #if defined(HAVE_BUILTIN_BSWAP16) return __builtin_bswap16(x); #else /* !defined(HAVE_BUILTIN_BSWAP16) */ return ((x << 8) & UINT16_C(0xff00)) | ((x >> 8) & UINT16_C(0x00ff)); #endif } /** * @brief Returns a byte order swapped integer @a x. * This function does not take into account host architecture * (as it done by htonl / ntohl functions) and always returns @a x * with byte order swapped (BE -> LE if @a x is in BE and vice versa). * @param x integer * @return @a x with swapped bytes */ inline uint32_t bswap_u32(uint32_t x) { #if defined(HAVE_BUILTIN_BSWAP32) return __builtin_bswap32(x); #else /* !defined(HAVE_BUILTIN_BSWAP32) */ return ((x << 24) & UINT32_C(0xff000000)) | ((x << 8) & UINT32_C(0x00ff0000)) | ((x >> 8) & UINT32_C(0x0000ff00)) | ((x >> 24) & UINT32_C(0x000000ff)); #endif } /** * @copydoc bswap_u32 */ inline uint64_t bswap_u64(uint64_t x) { #if defined(HAVE_BUILTIN_BSWAP64) return __builtin_bswap64(x); #else /* !defined(HAVE_BUILTIN_BSWAP64) */ return ( (x << 56) & UINT64_C(0xff00000000000000)) | ( (x << 40) & UINT64_C(0x00ff000000000000)) | ( (x << 24) & UINT64_C(0x0000ff0000000000)) | ( (x << 8) & UINT64_C(0x000000ff00000000)) | ( (x >> 8) & UINT64_C(0x00000000ff000000)) | ( (x >> 24) & UINT64_C(0x0000000000ff0000)) | ( (x >> 40) & UINT64_C(0x000000000000ff00)) | ( (x >> 56) & UINT64_C(0x00000000000000ff)); #endif } /** * @brief Index bits in the @a x, i.e. find all positions where bits are set. * This method fills @a indexes array with found positions in increasing order. * @a offset is added to each index before putting it into @a indexes. * @param x integer * @param indexes memory array where found indexes are stored * @param offset a number added to each index * @return pointer to last+1 element in indexes array */ int * bit_index_u32(uint32_t x, int *indexes, int offset); /** * @copydoc bit_index_u32 */ int * bit_index_u64(uint64_t x, int *indexes, int offset); /** @cond false **/ #if defined(__x86_64__) /* Use bigger words on x86_64 */ #define ITER_UINT uint64_t #define ITER_CTZ bit_ctz_u64 #else #define ITER_UINT uint32_t #define ITER_CTZ bit_ctz_u32 #endif /** @endcond **/ /** * @brief The Bit Iterator */ struct bit_iterator { /** @cond false **/ /** Current word to process using ctz **/ ITER_UINT word; /** A bitmask that XORed with word (for set = false iteration) **/ ITER_UINT word_xor; /** A base offset of the word in bits **/ size_t word_base; /** A pointer to the start of a memory chunk **/ const char *start; /** A pointer to the next part of a memory chunk */ const char *next; /** A pointer to the end of a memory chunk */ const char *end; /** @endcond **/ }; /** * @brief Initialize bit iterator \a it * @param it bit iterator * @param data memory chunk * @param size size of the memory chunk \a data * @param set true to iterate over set bits or false to iterate over clear bits */ inline void bit_iterator_init(struct bit_iterator *it, const void *data, size_t size, bool set) { it->start = (const char *) data; it->next = it->start; it->end = it->next + size; if (bit_unlikely(size == 0)) { it->word = 0; return; } it->word_xor = set ? 0 : (ITER_UINT) -1; it->word_base = 0; /* Check if size is a multiple of sizeof(ITER_UINT) */ const char *e = it->next + size % sizeof(ITER_UINT); if (bit_likely(it->next == e)) { it->word = *(ITER_UINT *) it->next; it->next += sizeof(ITER_UINT); } else { it->word = it->word_xor; char *w = (char *) &it->word; while (it->next < e) *w++ = *it->next++; } it->word ^= it->word_xor; } /** * @brief Return a number of a next set bit in \a it or \a SIZE_MAX * if no bits are remain in \a it * @param it bit iterator * @retval a zero-based number of a next set bit in iterator \a it * @retval SIZE_MAX if \a it does not have more set bits */ inline size_t bit_iterator_next(struct bit_iterator *it) { while (bit_unlikely(it->word == 0)) { if (bit_unlikely(it->next >= it->end)) return SIZE_MAX; /* Extract the next word from memory */ it->word = *(ITER_UINT *) it->next; it->word ^= it->word_xor; it->word_base = (it->next - it->start) * CHAR_BIT; it->next += sizeof(ITER_UINT); } /* Find the position of a first trailing bit in the current word */ int bit = ITER_CTZ(it->word); /* Remove the first trailing bit from the current word */ it->word &= it->word - 1; /* Add start position if the current word to the found bit */ return it->word_base + bit; } #undef ITER_CTZ #undef ITER_UINT #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LIB_BIT_BIT_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lib/bit/CMakeLists.txt0000664000000000000000000000016313306560010021053 0ustar rootrootset(lib_sources bit.c ) set_source_files_compile_flags(${lib_sources}) add_library(bit STATIC ${lib_sources}) tarantool_1.9.1.26.g63eb81e3c/src/lib/bit/bit.c0000664000000000000000000001115313306560010017236 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "bit/bit.h" extern inline uint8_t load_u8(const void *p); extern inline uint16_t load_u16(const void *p); extern inline uint32_t load_u32(const void *p); extern inline uint64_t load_u64(const void *p); extern inline float load_float(const void *p); extern inline double load_double(const void *p); extern inline void store_u8(void *p, uint8_t v); extern inline void store_u16(void *p, uint16_t v); extern inline void store_u32(void *p, uint32_t v); extern inline void store_u64(void *p, uint64_t v); extern inline void store_float(void *p, float v); extern inline void store_double(void *p, double v); extern inline void store_bool(void *p, bool v); extern inline bool bit_test(const void *data, size_t pos); extern inline bool bit_set(void *data, size_t pos); extern inline bool bit_clear(void *data, size_t pos); extern inline int bit_ctz_u32(uint32_t x); extern inline int bit_ctz_u64(uint64_t x); extern inline int bit_clz_u32(uint32_t x); extern inline int bit_clz_u64(uint64_t x); extern inline int bit_count_u32(uint32_t x); extern inline int bit_count_u64(uint64_t x); extern inline uint32_t bit_rotl_u32(uint32_t x, int r); extern inline uint64_t bit_rotl_u64(uint64_t x, int r); extern inline uint32_t bit_rotr_u32(uint32_t x, int r); extern inline uint64_t bit_rotr_u64(uint64_t x, int r); extern inline uint16_t bswap_u16(uint16_t x); extern inline uint32_t bswap_u32(uint32_t x); extern inline uint64_t bswap_u64(uint64_t x); #define BITINDEX_NAIVE(type, x, bitsize) { \ /* naive generic implementation, worst case */ \ type bit = 1; \ int i = 0; \ for (unsigned k = 0; k < bitsize; k++) { \ if (x & bit) { \ indexes[i++] = offset + k + 1; \ } \ bit <<= 1; \ } \ \ indexes[i] = 0; \ return indexes + i; \ } int * bit_index_u32(uint32_t x, int *indexes, int offset) { #if defined(HAVE_BUILTIN_CTZ) int prev_pos = 0; int i = 0; #if defined(HAVE_BUILTIN_POPCOUNT) /* fast implementation using built-in popcount function */ const int count = bit_count_u32(x); while (i < count) { #else /* sligtly slower implementation without using built-in popcount */ while(x) { #endif /* use ctz */ const int a = bit_ctz_u32(x); prev_pos += a + 1; x >>= a; x >>= 1; indexes[i++] = offset + prev_pos; } indexes[i] = 0; return indexes + i; #else /* !defined(HAVE_BUILTIN_CTZ) */ BITINDEX_NAIVE(uint32_t, x, sizeof(uint32_t) * CHAR_BIT); #endif } int * bit_index_u64(uint64_t x, int *indexes, int offset) { #if defined(HAVE_BUILTIN_CTZLL) int prev_pos = 0; int i = 0; #if defined(HAVE_BUILTIN_POPCOUNTLL) /* fast implementation using built-in popcount function */ const int count = bit_count_u64(x); while (i < count) { #else /* sligtly slower implementation without using built-in popcount */ while(x) { #endif /* use ctz */ const int a = bit_ctz_u64(x); prev_pos += a + 1; x >>= a; x >>= 1; indexes[i++] = offset + prev_pos; } indexes[i] = 0; return indexes + i; #else /* !defined(HAVE_CTZ) */ BITINDEX_NAIVE(uint64_t, x, sizeof(uint64_t) * CHAR_BIT); #endif } #undef BITINDEX_NAIVE extern inline void bit_iterator_init(struct bit_iterator *it, const void *data, size_t size, bool set); extern inline size_t bit_iterator_next(struct bit_iterator *it); tarantool_1.9.1.26.g63eb81e3c/src/lib/bit/int96.h0000664000000000000000000001075413306560010017444 0ustar rootroot#ifndef TARANTOOL_LIB_BIT_INT96_H_INCLUDED #define TARANTOOL_LIB_BIT_INT96_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include /** * 96-bit signed integer. * 1)Negative integer is stored as 96-bit two's complement * 2)Stores an integer modulo n, where n = 2**96 * Actually (1) == (2), as the wave-particle duality. * Designed for storing integers in range [INT64_MIN, UINT64_MAX], * and detecting overflow (out of range [INT64_MIN, UINT64_MAX]) * after operations (addition, subtraction) on them. * The base fact is when two uint64_t or int64_t values * are converted to int96, and then added or subtracted, the * int96 arithmetics cannot overflow. Actually you need at least * billions of adding UINT64_MAX or INT64_MIN to make it overflow. * Addition is implemented directly; * For subtraction use addition of inverted number. */ /** * struct 96-bit signed integer */ struct int96_num { /* most significant 64 bits */ uint64_t high64; /* least significant order 32 bits */ /* (high dword - zero bits) */ uint64_t low32; }; /** * Assign to unsigned 64-bit */ static inline void int96_set_unsigned(struct int96_num *num, uint64_t val) { const uint64_t mask = 0xFFFFFFFFu; num->high64 = val >> 32; num->low32 = val & mask; } /** * Assign to signed 64-bit */ static inline void int96_set_signed(struct int96_num *num, int64_t val) { const uint64_t mask = 0xFFFFFFFFu; num->high64 = (uint64_t)(val >> 32); num->low32 = ((uint64_t)val) & mask; } /** * Invert number (assign a to -a) */ static inline void int96_invert(struct int96_num *num) { const uint64_t mask = 0xFFFFFFFFu; assert(!(num->low32 & ~mask)); num->high64 = ~num->high64; num->low32 = (~num->low32) & mask; num->low32++; num->high64 += num->low32 >> 32; num->low32 &= mask; } /** * Add to number 'to' another number 'what' */ static inline void int96_add(struct int96_num *to, const struct int96_num *what) { const uint64_t mask = 0xFFFFFFFFu; assert(!(to->low32 & ~mask)); assert(!(what->low32 & ~mask)); to->low32 += what->low32; to->high64 += to->low32 >> 32; to->high64 += what->high64; to->low32 &= mask; } /** * Get lowers 64 bit of a number (that is C cast to uint64_t) */ static inline uint64_t int96_get_low64bit(const struct int96_num *num) { return num->low32 | (num->high64 << 32); } /** * Returns true if a number fits [0, UINT64_MAX] range */ static inline bool int96_is_uint64(const struct int96_num *num) { return (num->high64 >> 32) == 0; } /** * Get number as uint64_t, * the number is expected to be valid range (assert) */ static inline uint64_t int96_extract_uint64(const struct int96_num *num) { assert(int96_is_uint64(num)); return int96_get_low64bit(num); } /** * Returns true if a number fits [INT64_MIN, 0) range */ static inline bool int96_is_neg_int64(const struct int96_num *num) { return (num->high64 >> 31) == 0x1FFFFFFFFull; } /** * Get number as negative int64_t, * the number is expected to be valid range (assert) */ static inline int64_t int96_extract_neg_int64(const struct int96_num *num) { assert(int96_is_neg_int64(num)); return (int64_t)int96_get_low64bit(num); } #endif /* #ifndef TARANTOOL_LIB_BIT_INT96_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/coio.cc0000664000000000000000000004472413306565107016256 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coio.h" #include #include #include #include #include #include "sio.h" #include "scoped_guard.h" #include "coio_task.h" /* coio_resolve() */ struct CoioGuard { struct ev_io *ev_io; CoioGuard(struct ev_io *arg) :ev_io(arg) {} ~CoioGuard() { ev_io_stop(loop(), ev_io); } }; typedef void (*ev_stat_cb)(ev_loop *, ev_stat *, int); /** Note: this function does not throw */ void coio_create(struct ev_io *coio, int fd) { /* Prepare for ev events. */ coio->data = fiber(); ev_init(coio, (ev_io_cb) fiber_schedule_cb); coio->fd = fd; } static inline bool coio_fiber_yield_timeout(struct ev_io *coio, ev_tstamp delay) { coio->data = fiber(); bool is_timedout = fiber_yield_timeout(delay); coio->data = NULL; return is_timedout; } /** * Connect to a host with a specified timeout. * @retval -1 timeout * @retval 0 connected */ static int coio_connect_addr(struct ev_io *coio, struct sockaddr *addr, socklen_t len, ev_tstamp timeout) { ev_loop *loop = loop(); evio_socket(coio, addr->sa_family, SOCK_STREAM, 0); auto coio_guard = make_scoped_guard([=]{ evio_close(loop, coio); }); if (sio_connect(coio->fd, addr, len) == 0) { coio_guard.is_active = false; return 0; } assert(errno == EINPROGRESS); /* * Wait until socket is ready for writing or * timed out. */ ev_io_set(coio, coio->fd, EV_WRITE); ev_io_start(loop, coio); bool is_timedout = coio_fiber_yield_timeout(coio, timeout); ev_io_stop(loop, coio); fiber_testcancel(); if (is_timedout) tnt_raise(TimedOut); int error = EINPROGRESS; socklen_t sz = sizeof(error); sio_getsockopt(coio->fd, SOL_SOCKET, SO_ERROR, &error, &sz); if (error != 0) { errno = error; tnt_raise(SocketError, coio->fd, "connect"); } coio_guard.is_active = false; return 0; } void coio_fill_addrinfo(struct addrinfo *ai_local, const char *host, const char *service, int host_hint) { ai_local->ai_next = NULL; if (host_hint == 1) { // IPv4 ai_local->ai_addrlen = sizeof(sockaddr_in); ai_local->ai_addr = (sockaddr*)malloc(ai_local->ai_addrlen); memset(ai_local->ai_addr, 0, ai_local->ai_addrlen); ((sockaddr_in*)ai_local->ai_addr)->sin_family = AF_INET; ((sockaddr_in*)ai_local->ai_addr)->sin_port = htons((uint16_t)atoi(service)); inet_pton(AF_INET, host, &((sockaddr_in*)ai_local->ai_addr)->sin_addr); } else { // IPv6 ai_local->ai_addrlen = sizeof(sockaddr_in6); ai_local->ai_addr = (sockaddr*)malloc(ai_local->ai_addrlen); memset(ai_local->ai_addr, 0, ai_local->ai_addrlen); ((sockaddr_in6*)ai_local->ai_addr)->sin6_family = AF_INET6; ((sockaddr_in6*)ai_local->ai_addr)->sin6_port = htons((uint16_t)atoi(service)); inet_pton(AF_INET6, host, &((sockaddr_in6*)ai_local->ai_addr)->sin6_addr); } } /** * Resolve hostname:service from \a uri and connect to the first available * address with a specified timeout. * * If \a addr is not NULL the function provides resolved address on success. * In this case, \a addr_len is a value-result argument. It should be * initialized to the size of the buffer associated with \a addr. Upon return, * \a addr_len is updated to contain the actual size of the source address. * The returned address is truncated if the buffer provided is too small; * in this case, addrlen will return a value greater than was supplied to the * call. * * This function also supports UNIX domain sockets if uri->path is not NULL and * uri->service is NULL. * * @retval -1 timeout * @retval 0 connected */ int coio_connect_timeout(struct ev_io *coio, struct uri *uri, struct sockaddr *addr, socklen_t *addr_len, ev_tstamp timeout) { char host[URI_MAXHOST] = { '\0' }; if (uri->host) { snprintf(host, sizeof(host), "%.*s", (int) uri->host_len, uri->host); } char service[URI_MAXSERVICE]; snprintf(service, sizeof(service), "%.*s", (int) uri->service_len, uri->service); /* try to resolve a hostname */ struct ev_loop *loop = loop(); ev_tstamp start, delay; evio_timeout_init(loop, &start, &delay, timeout); if (strcmp(host, URI_HOST_UNIX) == 0) { /* UNIX socket */ struct sockaddr_un un; snprintf(un.sun_path, sizeof(un.sun_path), "%s", service); un.sun_family = AF_UNIX; if (coio_connect_addr(coio, (struct sockaddr *) &un, sizeof(un), delay) != 0) return -1; if (addr != NULL) { assert(addr_len != NULL); *addr_len = MIN(sizeof(un), *addr_len); memcpy(addr, &un, *addr_len); } return 0; } struct addrinfo *ai = NULL; struct addrinfo ai_local; if (uri->host_hint) { coio_fill_addrinfo(&ai_local, host, service, uri->host_hint); ai = &ai_local; } else { struct addrinfo hints; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_ADDRCONFIG|AI_NUMERICSERV|AI_PASSIVE; hints.ai_protocol = 0; int rc = coio_getaddrinfo(host, service, &hints, &ai, delay); if (rc != 0) { diag_raise(); panic("unspecified getaddrinfo error"); } } auto addrinfo_guard = make_scoped_guard([=] { if (!uri->host_hint) freeaddrinfo(ai); else free(ai_local.ai_addr); }); evio_timeout_update(loop(), start, &delay); coio_timeout_init(&start, &delay, timeout); assert(! evio_has_fd(coio)); while (ai) { try { if (coio_connect_addr(coio, ai->ai_addr, ai->ai_addrlen, delay)) return -1; if (addr != NULL) { assert(addr_len != NULL); *addr_len = MIN(ai->ai_addrlen, *addr_len); memcpy(addr, ai->ai_addr, *addr_len); } return 0; /* connected */ } catch (SocketError *e) { if (ai->ai_next == NULL) throw; /* ignore exception and try the next address */ } ai = ai->ai_next; ev_now_update(loop); coio_timeout_update(start, &delay); } tnt_raise(SocketError, coio->fd, "connection failed"); } /** * Wait a client connection on a server socket until * timedout. */ int coio_accept(struct ev_io *coio, struct sockaddr *addr, socklen_t addrlen, ev_tstamp timeout) { ev_tstamp start, delay; coio_timeout_init(&start, &delay, timeout); CoioGuard coio_guard(coio); while (true) { /* Assume that there are waiting clients * available */ int fd = sio_accept(coio->fd, addr, &addrlen); if (fd >= 0) { evio_setsockopt_client(fd, addr->sa_family, SOCK_STREAM); return fd; } /* The socket is not ready, yield */ if (! ev_is_active(coio)) { ev_io_set(coio, coio->fd, EV_READ); ev_io_start(loop(), coio); } /* * Yield control to other fibers until the * timeout is reached. */ bool is_timedout = coio_fiber_yield_timeout(coio, delay); fiber_testcancel(); if (is_timedout) tnt_raise(TimedOut); coio_timeout_update(start, &delay); } } /** * Read at least sz bytes from socket with readahead. * * In case of EOF returns the amount read until eof (possibly 0), * and sets errno to 0. * Can read up to bufsiz bytes. * * @retval the number of bytes read. */ ssize_t coio_read_ahead_timeout(struct ev_io *coio, void *buf, size_t sz, size_t bufsiz, ev_tstamp timeout) { assert(sz <= bufsiz); ev_tstamp start, delay; coio_timeout_init(&start, &delay, timeout); ssize_t to_read = (ssize_t) sz; CoioGuard coio_guard(coio); while (true) { /* * Sic: assume the socket is ready: since * the user called read(), some data must * be expected. */ ssize_t nrd = sio_read(coio->fd, buf, bufsiz); if (nrd > 0) { to_read -= nrd; if (to_read <= 0) return sz - to_read; buf = (char *) buf + nrd; bufsiz -= nrd; } else if (nrd == 0) { errno = 0; return sz - to_read; } /* The socket is not ready, yield */ if (! ev_is_active(coio)) { ev_io_set(coio, coio->fd, EV_READ); ev_io_start(loop(), coio); } /* * Yield control to other fibers until the * timeout is being reached. */ bool is_timedout = coio_fiber_yield_timeout(coio, delay); fiber_testcancel(); if (is_timedout) tnt_raise(TimedOut); coio_timeout_update(start, &delay); } } /** * Read at least sz bytes, with readahead. * * Treats EOF as an error, and throws an exception. * * @retval the number of bytes read, > 0. */ ssize_t coio_readn_ahead(struct ev_io *coio, void *buf, size_t sz, size_t bufsiz) { ssize_t nrd = coio_read_ahead(coio, buf, sz, bufsiz); if (nrd < (ssize_t)sz) { errno = EPIPE; tnt_raise(SocketError, coio->fd, "unexpected EOF when reading " "from socket"); } return nrd; } /** * Read at least sz bytes, with readahead and timeout. * * Treats EOF as an error, and throws an exception. * * @retval the number of bytes read, > 0. */ ssize_t coio_readn_ahead_timeout(struct ev_io *coio, void *buf, size_t sz, size_t bufsiz, ev_tstamp timeout) { ssize_t nrd = coio_read_ahead_timeout(coio, buf, sz, bufsiz, timeout); if (nrd < (ssize_t)sz && errno == 0) { /* EOF. */ errno = EPIPE; tnt_raise(SocketError, coio->fd, "unexpected EOF when reading " "from socket"); } return nrd; } /** Write sz bytes to socket. * * Throws SocketError in case of write error. If * the socket is not ready, yields the current * fiber until the socket becomes ready, until * all data is written. * * @retval the number of bytes written. Can be less than * requested only in case of timeout. */ ssize_t coio_write_timeout(struct ev_io *coio, const void *buf, size_t sz, ev_tstamp timeout) { ssize_t towrite = sz; ev_tstamp start, delay; coio_timeout_init(&start, &delay, timeout); CoioGuard coio_guard(coio); while (true) { /* * Sic: write as much data as possible, * assuming the socket is ready. */ ssize_t nwr = sio_write(coio->fd, buf, towrite); if (nwr > 0) { /* Go past the data just written. */ if (nwr >= towrite) return sz; towrite -= nwr; buf = (char *) buf + nwr; } if (! ev_is_active(coio)) { ev_io_set(coio, coio->fd, EV_WRITE); ev_io_start(loop(), coio); } /* Yield control to other fibers. */ fiber_testcancel(); /* * Yield control to other fibers until the * timeout is reached or the socket is * ready. */ bool is_timedout = coio_fiber_yield_timeout(coio, delay); fiber_testcancel(); if (is_timedout) tnt_raise(TimedOut); coio_timeout_update(start, &delay); } } /* * Write iov using sio API. * Put in an own function to workaround gcc bug with @finally */ static inline ssize_t coio_flush(int fd, struct iovec *iov, ssize_t offset, int iovcnt) { ssize_t nwr; try { sio_add_to_iov(iov, -offset); nwr = sio_writev(fd, iov, iovcnt); sio_add_to_iov(iov, offset); } catch (SocketError *e) { sio_add_to_iov(iov, offset); throw; } return nwr; } ssize_t coio_writev_timeout(struct ev_io *coio, struct iovec *iov, int iovcnt, size_t size_hint, ev_tstamp timeout) { size_t total = 0; size_t iov_len = 0; struct iovec *end = iov + iovcnt; ev_tstamp start, delay; coio_timeout_init(&start, &delay, timeout); CoioGuard coio_guard(coio); /* Avoid a syscall in case of 0 iovcnt. */ while (iov < end) { /* Write as much data as possible. */ ssize_t nwr = coio_flush(coio->fd, iov, iov_len, end - iov); if (nwr >= 0) { total += nwr; /* * If there was a hint for the total size * of the vector, use it. */ if (size_hint > 0 && size_hint == total) break; iov += sio_move_iov(iov, nwr, &iov_len); if (iov == end) { assert(iov_len == 0); break; } } if (! ev_is_active(coio)) { ev_io_set(coio, coio->fd, EV_WRITE); ev_io_start(loop(), coio); } /* Yield control to other fibers. */ fiber_testcancel(); /* * Yield control to other fibers until the * timeout is reached or the socket is * ready. */ bool is_timedout = coio_fiber_yield_timeout(coio, delay); fiber_testcancel(); if (is_timedout) tnt_raise(TimedOut); coio_timeout_update(start, &delay); } return total; } /** * Send up to sz bytes to a UDP socket. * Return the number of bytes sent. * * @retval n the number of bytes written */ ssize_t coio_sendto_timeout(struct ev_io *coio, const void *buf, size_t sz, int flags, const struct sockaddr *dest_addr, socklen_t addrlen, ev_tstamp timeout) { ev_tstamp start, delay; coio_timeout_init(&start, &delay, timeout); CoioGuard coio_guard(coio); while (true) { /* * Sic: write as much data as possible, * assuming the socket is ready. */ ssize_t nwr = sio_sendto(coio->fd, buf, sz, flags, dest_addr, addrlen); if (nwr > 0) return nwr; if (! ev_is_active(coio)) { ev_io_set(coio, coio->fd, EV_WRITE); ev_io_start(loop(), coio); } /* * Yield control to other fibers until * timeout is reached or the socket is * ready. */ bool is_timedout = coio_fiber_yield_timeout(coio, delay); fiber_testcancel(); if (is_timedout) tnt_raise(TimedOut); coio_timeout_update(start, &delay); } } /** * Read a datagram up to sz bytes from a socket, with a timeout. * * @retval 0, errno = 0 eof * @retvl n number of bytes read */ ssize_t coio_recvfrom_timeout(struct ev_io *coio, void *buf, size_t sz, int flags, struct sockaddr *src_addr, socklen_t addrlen, ev_tstamp timeout) { ev_tstamp start, delay; coio_timeout_init(&start, &delay, timeout); CoioGuard coio_guard(coio); while (true) { /* * Read as much data as possible, * assuming the socket is ready. */ ssize_t nrd = sio_recvfrom(coio->fd, buf, sz, flags, src_addr, &addrlen); if (nrd >= 0) return nrd; if (! ev_is_active(coio)) { ev_io_set(coio, coio->fd, EV_READ); ev_io_start(loop(), coio); } /* * Yield control to other fibers until * timeout is reached or the socket is * ready. */ bool is_timedout = coio_fiber_yield_timeout(coio, delay); fiber_testcancel(); if (is_timedout) tnt_raise(TimedOut); coio_timeout_update(start, &delay); } } void coio_service_on_accept(struct evio_service *evio_service, int fd, struct sockaddr *addr, socklen_t addrlen) { struct coio_service *service = (struct coio_service *) evio_service->on_accept_param; struct ev_io coio; coio_create(&coio, fd); /* Set connection name. */ char fiber_name[SERVICE_NAME_MAXLEN]; snprintf(fiber_name, sizeof(fiber_name), "%s/%s", evio_service->name, sio_strfaddr(addr, addrlen)); /* Create the worker fiber. */ struct fiber *f; try { f = fiber_new_xc(fiber_name, service->handler); } catch (struct error *e) { error_log(e); say_error("can't create a handler fiber, dropping client connection"); evio_close(loop(), &coio); throw; } /* * The coio is passed into the created fiber, reset the * libev callback param to point at it. */ coio.data = f; /* * Start the created fiber. It becomes the coio object owner * and will have to close it and free before termination. */ fiber_start(f, coio, addr, addrlen, service->handler_param); } void coio_service_init(struct coio_service *service, const char *name, fiber_func handler, void *handler_param) { evio_service_init(loop(), &service->evio_service, name, coio_service_on_accept, service); service->handler = handler; service->handler_param = handler_param; } void coio_service_start(struct evio_service *service, const char *uri) { evio_service_bind(service, uri); evio_service_listen(service); } void coio_stat_init(ev_stat *stat, const char *path) { ev_stat_init(stat, (ev_stat_cb) fiber_schedule_cb, path, 0.0); } void coio_stat_stat_timeout(ev_stat *stat, ev_tstamp timeout) { stat->data = fiber(); ev_stat_start(loop(), stat); ev_tstamp start, delay; coio_timeout_init(&start, &delay, timeout); fiber_yield_timeout(delay); ev_stat_stop(loop(), stat); fiber_testcancel(); } typedef void (*ev_child_cb)(ev_loop *, ev_child *, int); /** * Wait for a forked child to complete. * @return process return status */ int coio_waitpid(pid_t pid) { assert(cord_is_main()); ev_child cw; ev_init(&cw, (ev_child_cb) fiber_schedule_cb); ev_child_set(&cw, pid, 0); cw.data = fiber(); ev_child_start(loop(), &cw); /* * It's not safe to spuriously wakeup this fiber since * in this case the server will leave a zombie process * behind. */ bool allow_cancel = fiber_set_cancellable(false); fiber_yield(); fiber_set_cancellable(allow_cancel); ev_child_stop(loop(), &cw); int status = cw.rstatus; fiber_testcancel(); return status; } /* Values of COIO_READ(WRITE) must equal to EV_READ(WRITE) */ static_assert(COIO_READ == (int) EV_READ, "TNT_IO_READ"); static_assert(COIO_WRITE == (int) EV_WRITE, "TNT_IO_WRITE"); struct coio_wdata { struct fiber *fiber; int revents; }; static void coio_wait_cb(struct ev_loop *loop, ev_io *watcher, int revents) { (void) loop; struct coio_wdata *wdata = (struct coio_wdata *) watcher->data; wdata->revents = revents; fiber_wakeup(wdata->fiber); } int coio_wait(int fd, int events, double timeout) { if (fiber_is_cancelled()) return 0; struct ev_io io; ev_io_init(&io, coio_wait_cb, fd, events); struct coio_wdata wdata = { /* .fiber = */ fiber(), /* .revents = */ 0 }; io.data = &wdata; /* A special hack to work with zero timeout */ ev_set_priority(&io, EV_MAXPRI); ev_io_start(loop(), &io); fiber_yield_timeout(timeout); ev_io_stop(loop(), &io); return wdata.revents & (EV_READ | EV_WRITE); } int coio_close(int fd) { ev_io_closing(loop(), fd, EV_CUSTOM); return close(fd); } tarantool_1.9.1.26.g63eb81e3c/src/sio.h0000664000000000000000000001300313306565107015743 0ustar rootroot#ifndef TARANTOOL_SIO_H_INCLUDED #define TARANTOOL_SIO_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * Exception-aware wrappers around BSD sockets. * Provide better error logging and I/O statistics. */ #include #include #include #include #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ const char * sio_strfaddr(struct sockaddr *addr, socklen_t addrlen); int sio_getpeername(int fd, struct sockaddr *addr, socklen_t *addrlen); /** * Advance write position in the iovec array * based on its current value and the number of * bytes written. * * @param[in] iov the vector being written with writev(). * @param[in] nwr number of bytes written, @pre >= 0 * @param[in,out] iov_len offset in iov[0]; * * @return offset of iov[0] for the next write */ static inline int sio_move_iov(struct iovec *iov, size_t nwr, size_t *iov_len) { nwr += *iov_len; struct iovec *begin = iov; while (nwr > 0 && nwr >= iov->iov_len) { nwr -= iov->iov_len; iov++; } *iov_len = nwr; return iov - begin; } /** * Change values of iov->iov_len and iov->iov_base * to adjust to a partial write. */ static inline void sio_add_to_iov(struct iovec *iov, size_t size) { iov->iov_len += size; iov->iov_base = (char *) iov->iov_base - size; } #if defined(__cplusplus) } /* extern "C" */ #include "exception.h" enum { SERVICE_NAME_MAXLEN = 32 }; extern const struct type_info type_SocketError; class SocketError: public SystemError { public: SocketError(const char *file, unsigned line, int fd, const char *format, ...); virtual void raise() { throw this; } }; /** Close a file descriptor on exception or end of scope. */ struct FDGuard { int fd; explicit FDGuard(int fd_arg):fd(fd_arg) {} ~FDGuard() { if (fd >= 0) close(fd); } private: explicit FDGuard(const FDGuard&) = delete; FDGuard& operator=(const FDGuard&) = delete; }; const char *sio_socketname(int fd); int sio_socket(int domain, int type, int protocol); int sio_shutdown(int fd, int how); int sio_getfl(int fd); int sio_setfl(int fd, int flag, int on); void sio_setsockopt(int fd, int level, int optname, const void *optval, socklen_t optlen); void sio_getsockopt(int fd, int level, int optname, void *optval, socklen_t *optlen); int sio_connect(int fd, struct sockaddr *addr, socklen_t addrlen); int sio_bind(int fd, struct sockaddr *addr, socklen_t addrlen); int sio_listen(int fd); int sio_listen_backlog(); int sio_accept(int fd, struct sockaddr *addr, socklen_t *addrlen); ssize_t sio_read(int fd, void *buf, size_t count); ssize_t sio_write(int fd, const void *buf, size_t count); ssize_t sio_writev(int fd, const struct iovec *iov, int iovcnt); ssize_t sio_write_total(int fd, const void *buf, size_t count, size_t total); /** * Read at least count and up to buf_size bytes from fd. * Throw exception on error or disconnect. * * @return the number of of bytes actually read. */ ssize_t sio_readn_ahead(int fd, void *buf, size_t count, size_t buf_size); /** * Read count bytes from fd. * Throw an exception on error or disconnect. * * @return count of bytes actually read. */ static inline ssize_t sio_readn(int fd, void *buf, size_t count) { return sio_readn_ahead(fd, buf, count, count); } /** * Write count bytes to fd. * Throw an exception on error or disconnect. * * @return count of bytes actually written. */ ssize_t sio_writen(int fd, const void *buf, size_t count); /* Only for blocked I/O */ ssize_t sio_writev_all(int fd, struct iovec *iov, int iovcnt); /** * A wrapper over sendfile. * Throw if send file failed. */ ssize_t sio_sendfile(int sock_fd, int file_fd, off_t *offset, size_t size); /** * Receive a file sent by sendfile * Throw if receiving failed */ ssize_t sio_recvfile(int sock_fd, int file_fd, off_t *offset, size_t size); ssize_t sio_sendto(int fd, const void *buf, size_t len, int flags, const struct sockaddr *dest_addr, socklen_t addrlen); ssize_t sio_recvfrom(int fd, void *buf, size_t len, int flags, struct sockaddr *src_addr, socklen_t *addrlen); #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_SIO_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/random.c0000664000000000000000000000501213306560010016411 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "random.h" #include #include #include #include #include #include #include "say.h" static int rfd; void random_init(void) { int seed; rfd = open("/dev/urandom", O_RDONLY); if (rfd == -1) rfd = open("/dev/random", O_RDONLY | O_NONBLOCK); if (rfd == -1) { struct timeval tv; gettimeofday(&tv, 0); seed = (getpid() << 16) ^ getuid() ^ tv.tv_sec ^ tv.tv_usec; goto srand; } int flags; if ( (flags = fcntl(rfd, F_GETFD)) < 0 || fcntl(rfd, F_SETFD, flags | FD_CLOEXEC) < 0) say_syserror("fcntl, fd=%i", rfd); ssize_t res = read(rfd, &seed, sizeof(seed)); (void) res; srand: srandom(seed); srand(seed); } void random_free(void) { if (rfd == -1) return; close(rfd); } void random_bytes(char *buf, size_t size) { size_t generated = 0; if (rfd == -1) goto rand; int attempt = 0; while (generated < size) { ssize_t n = read(rfd, buf + generated, size - generated); if (n <= 0) { if (attempt++ > 5) break; continue; } generated += n; attempt = 0; } rand: /* fill remaining bytes with PRNG */ while (generated < size) buf[generated++] = rand(); } tarantool_1.9.1.26.g63eb81e3c/src/http_parser.c0000664000000000000000000002112013306560010017462 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "httpc.h" #include "http_parser.h" #define LF (unsigned char) '\n' #define CR (unsigned char) '\r' #define CRLF "\r\n" /** * Following http parser functions were taken with slight * adaptation from nginx http parser module */ /** * Utility function used in headers parsing */ static int http_parse_status_line(struct http_parser *parser, char **bufp, const char *end_buf) { char ch; char *p = *bufp; enum { sw_start = 0, sw_H, sw_HT, sw_HTT, sw_HTTP, sw_first_major_digit, sw_major_digit, sw_first_minor_digit, sw_minor_digit, sw_status, sw_space_after_status, sw_status_text, sw_almost_done } state; state = sw_start; int status_count = 0; for (;p < end_buf; p++) { ch = *p; switch (state) { /* "HTTP/" */ case sw_start: if (ch == 'H') state = sw_H; else return HTTP_PARSE_INVALID; break; case sw_H: if (ch == 'T') state = sw_HT; else return HTTP_PARSE_INVALID; break; case sw_HT: if (ch == 'T') state = sw_HTT; else return HTTP_PARSE_INVALID; break; case sw_HTT: if (ch == 'P') state = sw_HTTP; else return HTTP_PARSE_INVALID; break; case sw_HTTP: if (ch == '/') state = sw_first_major_digit; else return HTTP_PARSE_INVALID; break; /* The first digit of major HTTP version */ case sw_first_major_digit: if (ch < '1' || ch > '9') { return HTTP_PARSE_INVALID; } parser->http_major = ch - '0'; state = sw_major_digit; break; /* The major HTTP version or dot */ case sw_major_digit: if (ch == '.') { state = sw_first_minor_digit; break; } if (ch < '0' || ch > '9') { return HTTP_PARSE_INVALID; } if (parser->http_major > 99) { return HTTP_PARSE_INVALID; } parser->http_major = parser->http_major * 10 + (ch - '0'); break; /* The first digit of minor HTTP version */ case sw_first_minor_digit: if (ch < '0' || ch > '9') { return HTTP_PARSE_INVALID; } parser->http_minor = ch - '0'; state = sw_minor_digit; break; /* * The minor HTTP version or * the end of the request line */ case sw_minor_digit: if (ch == ' ') { state = sw_status; break; } if (ch < '0' || ch > '9') { return HTTP_PARSE_INVALID; } if (parser->http_minor > 99) { return HTTP_PARSE_INVALID; } parser->http_minor = parser->http_minor * 10 + (ch - '0'); break; /* HTTP status code */ case sw_status: if (ch == ' ') { break; } if (ch < '0' || ch > '9') { return HTTP_PARSE_INVALID; } if (++status_count == 3) { state = sw_space_after_status; } break; /* Space or end of line */ case sw_space_after_status: switch (ch) { case ' ': state = sw_status_text; break; case '.': /* IIS may send 403.1, 403.2, etc */ state = sw_status_text; break; case CR: state = sw_almost_done; break; case LF: goto done; default: return HTTP_PARSE_INVALID; } break; /* Any text until end of line */ case sw_status_text: switch (ch) { case CR: state = sw_almost_done; break; case LF: goto done; } break; /* End of status line */ case sw_almost_done: switch (ch) { case LF: goto done; default: return HTTP_PARSE_INVALID; } } } done: *bufp = p + 1; return HTTP_PARSE_OK; } int http_parse_header_line(struct http_parser *parser, char **bufp, const char *end_buf) { char c, ch; char *p = *bufp; char *header_name_start = p; parser->header_name_idx = 0; enum { sw_start = 0, sw_name, sw_space_before_value, sw_value, sw_space_after_value, sw_almost_done, sw_header_almost_done } state = sw_start; /* * The last '\0' is not needed * because string is zero terminated */ static char lowcase[] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0-\0\0" "0123456789" "\0\0\0\0\0\0\0abcdefghijklmnopqrstuvwxyz\0\0\0\0_\0" "abcdefghijklmnopqrstuvwxyz\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "\0\0\0\0\0\0\0\0\0\0"; for (; p < end_buf; p++) { ch = *p; switch (state) { /* first char */ case sw_start: switch (ch) { case CR: parser->header_value_end = p; state = sw_header_almost_done; break; case LF: parser->header_value_end = p; goto header_done; default: state = sw_name; c = lowcase[ch]; if (c != 0) { parser->header_name[0] = c; parser->header_name_idx = 1; break; } if (ch == '\0') { return HTTP_PARSE_INVALID; } break; } break; /* http_header name */ case sw_name: c = lowcase[ch]; if (c != 0) { parser->header_name[parser->header_name_idx] = c; parser->header_name_idx++; parser->header_name_idx &= (HEADER_LEN - 1); break; } if (ch == ':') { state = sw_space_before_value; break; } if (ch == CR) { parser->header_value_start = p; parser->header_value_end = p; state = sw_almost_done; break; } if (ch == LF) { parser->header_value_start = p; parser->header_value_end = p; goto done; } /* handle "HTTP/1.1 ..." lines */ if (ch == '/' && p - header_name_start == 4 && strncmp(header_name_start, "HTTP", 4) == 0) { int rc = http_parse_status_line(parser, &header_name_start, end_buf); if (rc == HTTP_PARSE_INVALID) { parser->http_minor = -1; parser->http_major = -1; } state = sw_start; break; } if (ch == '\0') return HTTP_PARSE_INVALID; break; /* space* before http_header value */ case sw_space_before_value: switch (ch) { case ' ': break; case CR: parser->header_value_start = p; parser->header_value_end = p; state = sw_almost_done; break; case LF: parser->header_value_start = p; parser->header_value_end = p; goto done; case '\0': return HTTP_PARSE_INVALID; default: parser->header_value_start = p; state = sw_value; break; } break; /* http_header value */ case sw_value: switch (ch) { case ' ': parser->header_value_end = p; state = sw_space_after_value; break; case CR: parser->header_value_end = p; state = sw_almost_done; break; case LF: parser->header_value_end = p; goto done; case '\0': return HTTP_PARSE_INVALID; } break; /* space* before end of http_header line */ case sw_space_after_value: switch (ch) { case ' ': break; case CR: state = sw_almost_done; break; case LF: goto done; case '\0': return HTTP_PARSE_INVALID; default: state = sw_value; break; } break; /* end of http_header line */ case sw_almost_done: switch (ch) { case LF: goto done; case CR: break; default: return HTTP_PARSE_INVALID; } break; /* end of http_header */ case sw_header_almost_done: if (ch == LF) goto header_done; else return HTTP_PARSE_INVALID; } } done: *bufp = p + 1; return HTTP_PARSE_OK; header_done: *bufp = p + 1; return HTTP_PARSE_DONE; } tarantool_1.9.1.26.g63eb81e3c/src/rmean.c0000664000000000000000000000720013306560010016234 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "rmean.h" #include "fiber.h" void rmean_roll(int64_t *value, double dt) { value[0] /= dt; int j = RMEAN_WINDOW; /* in case when dt >= 2. we update not only last counter */ for (; j > (int)(dt + 0.1); j--) value[j] = value[j - 1]; for (; j > 0; j--) value[j] = value[0]; value[0] = 0; } int64_t rmean_mean(struct rmean *rmean, size_t name) { int64_t mean = 0; for (size_t j = 1; j <= RMEAN_WINDOW; j++) mean += rmean->stats[name].value[j]; /* value[0] not adds because second isn't over */ return mean / RMEAN_WINDOW; } void rmean_collect(struct rmean *rmean, size_t name, int64_t value) { assert(name < rmean->stats_n); rmean->stats[name].value[0] += value; rmean->stats[name].total += value; } int rmean_foreach(struct rmean *rmean, rmean_cb cb, void *cb_ctx) { for (size_t i = 0; i < rmean->stats_n; i++) { if (rmean->stats[i].name == NULL) continue; int res = cb(rmean->stats[i].name, rmean_mean(rmean, i), rmean_total(rmean, i), cb_ctx); if (res != 0) return res; } return 0; } static void rmean_age(ev_loop *loop, ev_timer *timer, int events) { (void) events; struct rmean *rmean = (struct rmean *) timer->data; double dt = rmean->prev_ts; rmean->prev_ts = ev_monotonic_now(loop); dt = rmean->prev_ts - dt; for (size_t i = 0; i < rmean->stats_n; i++) { if (rmean->stats[i].name == NULL) continue; rmean_roll(rmean->stats[i].value, dt); } ev_timer_again(loop, timer); } struct rmean * rmean_new(const char **name, size_t n) { struct rmean *rmean = (struct rmean *) malloc(sizeof(struct rmean) + sizeof(struct stats) * n); if (rmean == NULL) return NULL; memset(rmean, 0, sizeof(struct rmean) + sizeof(struct stats) * n); rmean->stats_n = n; rmean->timer.data = (void *)rmean; for (size_t i = 0; i < n; i++, name++) { rmean->stats[i].name = *name; } rmean->prev_ts = ev_monotonic_now(loop()); ev_timer_init(&rmean->timer, rmean_age, 0, 1.); ev_timer_again(loop(), &rmean->timer); return rmean; } void rmean_delete(struct rmean *rmean) { ev_timer_stop(loop(), &rmean->timer); free(rmean); rmean = 0; } void rmean_cleanup(struct rmean *rmean) { for (size_t i = 0; i < rmean->stats_n; i++) { for (size_t j = 0; j < RMEAN_WINDOW + 1; j++) rmean->stats[i].value[j] = 0; rmean->stats[i].total = 0; } } tarantool_1.9.1.26.g63eb81e3c/src/systemd.h0000664000000000000000000000576213306560010016642 0ustar rootroot#ifndef TARANTOOL_SYSTEMD_H_INCLUDED #define TARANTOOL_SYSTEMD_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "trivia/config.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #if defined(WITH_SYSTEMD) /** * Open connection with systemd daemon (using unix socket located in * "NOTIFY_SOCKET" environmnent variable) * * \return 1 on non-systemd plaformts * \return -1 on errors * \return 0 on sucess */ int systemd_init(); /** * Close connection with systemd daemon */ void systemd_free(); /** * Send message to systemd * * \param message message to send to systemd * * \return 0 on non-systemd platforms * \return -1 on errors (more information in errno) * \return >0 on ok */ int systemd_notify(const char *message); /** * send message and format it using va_list * * \param format format string * \param ap arguments for formatting * * \return 0 on non-systemd platforms * \return -1 on errors (more information in errno) * \return >0 on ok */ int systemd_vsnotify(const char *format, va_list ap); /** * Send message and format it using varargs * * \param format format string * \param ... arguments for formatting * * \return 0 on non-systemd platforms * \return -1 on errors (more information in errno) * \return >0 on ok */ int systemd_snotify(const char *format, ...); #else /* !defined(WITH_SYSTEMD) */ # define systemd_init() # define systemd_free() # define systemd_notify(...) # define systemd_vsnotify(...) # define systemd_snotify(...) #endif /* WITH_SYSTEMD */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_SYSTEMD_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/assoc.h0000664000000000000000000000664613306560010016264 0ustar rootroot/* * Copyright 2010-2016 Tarantool AUTHORS: please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #if !MH_SOURCE #define MH_UNDEF #endif #include "third_party/PMurHash.h" /* * Map: (i32) => (void *) */ #define mh_name _i32ptr #define mh_key_t uint32_t struct mh_i32ptr_node_t { mh_key_t key; void *val; }; #define mh_node_t struct mh_i32ptr_node_t #define mh_arg_t void * #define mh_hash(a, arg) (a->key) #define mh_hash_key(a, arg) (a) #define mh_cmp(a, b, arg) ((a->key) != (b->key)) #define mh_cmp_key(a, b, arg) ((a) != (b->key)) #include "salad/mhash.h" /* * Map: (i64) => (void *) */ #define mh_name _i64ptr #define mh_key_t uint64_t struct mh_i64ptr_node_t { mh_key_t key; void *val; }; #define mh_node_t struct mh_i64ptr_node_t #define mh_arg_t void * #define mh_hash(a, arg) (a->key) #define mh_hash_key(a, arg) (a) #define mh_cmp(a, b, arg) ((a->key) != (b->key)) #define mh_cmp_key(a, b, arg) ((a) != (b->key)) #include "salad/mhash.h" /* * Map: (char * with length) => (void *) */ enum { MH_STRN_HASH_SEED = 13U }; static inline uint32_t mh_strn_hash(const char *str, size_t len) { uint32_t h = MH_STRN_HASH_SEED; uint32_t carry = 0; PMurHash32_Process(&h, &carry, str, len); return PMurHash32_Result(h, carry, len); } #define mh_name _strnptr struct mh_strnptr_key_t { const char *str; size_t len; uint32_t hash; }; #define mh_key_t struct mh_strnptr_key_t * struct mh_strnptr_node_t { const char *str; size_t len; uint32_t hash; void *val; }; #define mh_node_t struct mh_strnptr_node_t #define mh_arg_t void * #define mh_hash(a, arg) ((a)->hash) #define mh_hash_key(a, arg) ((a)->hash) #define mh_cmp(a, b, arg) ((a)->len != (b)->len || \ strncmp((a)->str, (b)->str, (a)->len)) #define mh_cmp_key(a, b, arg) mh_cmp(a, b, arg) #include "salad/mhash.h" static inline mh_int_t mh_strnptr_find_inp(struct mh_strnptr_t *h, const char *str, size_t len) { uint32_t hash = mh_strn_hash(str, len); struct mh_strnptr_key_t key = {str, len, hash}; return mh_strnptr_find(h, &key, NULL); }; #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ tarantool_1.9.1.26.g63eb81e3c/src/assoc.c0000664000000000000000000000260313306560010016244 0ustar rootroot/* * Copyright 2010-2016 Tarantool AUTHORS: please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define MH_SOURCE 1 #include tarantool_1.9.1.26.g63eb81e3c/src/fiber.c0000664000000000000000000007442713306565107016254 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "fiber.h" #include #include #include #include #include #include #include #include "assoc.h" #include "memory.h" #include "trigger.h" #include "third_party/valgrind/memcheck.h" static int (*fiber_invoke)(fiber_func f, va_list ap); #if ENABLE_ASAN #include #define ASAN_START_SWITCH_FIBER(var_name, will_switch_back, bottom, size) \ void *var_name = NULL; \ __sanitizer_start_switch_fiber((will_switch_back) ? &var_name : NULL, \ (bottom), (size)) #define ASAN_FINISH_SWITCH_FIBER(var_name) \ __sanitizer_finish_switch_fiber(var_name); #else #define ASAN_START_SWITCH_FIBER(var_name, will_switch_back, bottom, size) #define ASAN_FINISH_SWITCH_FIBER(var_name) #endif /* * Defines a handler to be executed on exit from cord's thread func, * accessible via cord()->on_exit (normally NULL). It is used to * implement cord_cojoin. */ struct cord_on_exit { void (*callback)(void*); void *argument; }; /* * A special value distinct from any valid pointer to cord_on_exit * structure AND NULL. This value is stored in cord()->on_exit by the * thread function prior to thread termination. */ static const struct cord_on_exit cord_on_exit_sentinel = { NULL, NULL }; #define CORD_ON_EXIT_WONT_RUN (&cord_on_exit_sentinel) static struct cord main_cord; __thread struct cord *cord_ptr = NULL; pthread_t main_thread_id; static size_t page_size; static int stack_direction; enum { /* The minimum allowable fiber stack size in bytes */ FIBER_STACK_SIZE_MINIMAL = 16384, /* Default fiber stack size in bytes */ FIBER_STACK_SIZE_DEFAULT = 65536 }; /** Default fiber attributes */ static const struct fiber_attr fiber_attr_default = { .stack_size = FIBER_STACK_SIZE_DEFAULT, .flags = FIBER_DEFAULT_FLAGS }; void fiber_attr_create(struct fiber_attr *fiber_attr) { *fiber_attr = fiber_attr_default; } struct fiber_attr * fiber_attr_new() { struct fiber_attr *fiber_attr = malloc(sizeof(*fiber_attr)); if (fiber_attr == NULL) { diag_set(OutOfMemory, sizeof(*fiber_attr), "runtime", "fiber attr"); return NULL; } fiber_attr_create(fiber_attr); return fiber_attr; } void fiber_attr_delete(struct fiber_attr *fiber_attr) { free(fiber_attr); } int fiber_attr_setstacksize(struct fiber_attr *fiber_attr, size_t stack_size) { if (stack_size < FIBER_STACK_SIZE_MINIMAL) { errno = EINVAL; diag_set(SystemError, "stack size is too small"); return -1; } fiber_attr->stack_size = stack_size; if (stack_size != FIBER_STACK_SIZE_DEFAULT) { fiber_attr->flags |= FIBER_CUSTOM_STACK; } else { fiber_attr->flags &= ~FIBER_CUSTOM_STACK; } return 0; } size_t fiber_attr_getstacksize(struct fiber_attr *fiber_attr) { return fiber_attr != NULL ? fiber_attr->stack_size : fiber_attr_default.stack_size; } static void fiber_recycle(struct fiber *fiber); static void fiber_destroy(struct cord *cord, struct fiber *f); /** * Transfer control to callee fiber. */ static void fiber_call_impl(struct fiber *callee) { struct fiber *caller = fiber(); struct cord *cord = cord(); /* Ensure we aren't switching to a fiber parked in fiber_loop */ assert(callee->f != NULL && callee->fid != 0); assert(callee->flags & FIBER_IS_READY || callee == &cord->sched); assert(! (callee->flags & FIBER_IS_DEAD)); /* * Ensure the callee was removed from cord->ready list. * If it wasn't, the callee will observe a 'spurious' wakeup * later, due to a fiber_wakeup() performed in the past. */ assert(rlist_empty(&callee->state)); assert(caller); assert(caller != callee); cord->fiber = callee; callee->flags &= ~FIBER_IS_READY; callee->csw++; ASAN_START_SWITCH_FIBER(asan_state, 1, callee->stack, callee->stack_size); coro_transfer(&caller->ctx, &callee->ctx); ASAN_FINISH_SWITCH_FIBER(asan_state); } void fiber_call(struct fiber *callee) { callee->caller = fiber(); assert(! (callee->caller->flags & FIBER_IS_READY)); assert(rlist_empty(&callee->state)); assert(! (callee->flags & FIBER_IS_READY)); callee->flags |= FIBER_IS_READY; callee->caller->flags |= FIBER_IS_READY; fiber_call_impl(callee); } void fiber_start(struct fiber *callee, ...) { va_start(callee->f_data, callee); fiber_call(callee); va_end(callee->f_data); } bool fiber_checkstack() { return false; } /** * Interrupt a synchronous wait of a fiber inside the event loop. * We do so by keeping an "async" event in every fiber, solely * for this purpose, and raising this event here. * * @note: if this is sent to self, followed by a fiber_yield() * call, it simply reschedules the fiber after other ready * fibers in the same event loop iteration. */ void fiber_wakeup(struct fiber *f) { assert(! (f->flags & FIBER_IS_DEAD)); /** * Do nothing if the fiber is already in cord->ready * list *or* is in the call chain created by * fiber_schedule_list(). While it's harmless to re-add * a fiber to cord->ready, even if it's already there, * but the same game is deadly when the fiber is in * the callee list created by fiber_schedule_list(). * * To put it another way, fiber_wakeup() is a 'request' to * schedule the fiber for execution, and once it is executing * a wakeup request is considered complete and it must be * removed. * * A dead fiber can be lingering in the cord fiber list * if it is joinable. This makes it technically possible * to schedule it. We would never make such a mistake * in our own code, hence the assert above. But as long * as fiber.wakeup() is a part of public Lua API, an * external rock can mess things up. Ignore such attempts * as well. */ if (f->flags & (FIBER_IS_READY | FIBER_IS_DEAD)) return; struct cord *cord = cord(); if (rlist_empty(&cord->ready)) { /* * ev_feed_event(EV_CUSTOM) gets scheduled in the * same event loop iteration, and we rely on this * for quick scheduling. For a wakeup which * actually can invoke a poll() in libev, * use fiber_sleep(0) */ ev_feed_event(cord->loop, &cord->wakeup_event, EV_CUSTOM); } /** * Removes the fiber from whatever wait list it is on. * * It's critical that the newly scheduled fiber is * added to the tail of the list, to preserve correct * transaction commit order after a successful WAL write. * (see tx_schedule_commit()/tx_schedule_rollback() in * box/wal.cc) */ rlist_move_tail_entry(&cord->ready, f, state); f->flags |= FIBER_IS_READY; } /** Cancel the subject fiber. * * Note: cancelation is asynchronous. Use fiber_join() to wait for the * cancelation to complete. * * A fiber may opt to set FIBER_IS_CANCELLABLE to false, and never test * that it was cancelled. Such fiber can not ever be cancelled. * However, as long as most of the cooperative code calls * fiber_testcancel(), most of the fibers are cancellable. * * The fiber which is cancelled, has FiberIsCancelled raised * in it. For cancellation to work, this exception type should be * re-raised whenever (if) it is caught. */ void fiber_cancel(struct fiber *f) { assert(f->fid != 0); struct fiber *self = fiber(); /** * Do nothing if the fiber is dead, since cancelling * the fiber would clear the diagnostics area and * the cause of death would be lost. */ if (fiber_is_dead(f)) return; f->flags |= FIBER_IS_CANCELLED; /** * Don't wake self and zombies. */ if (f != self) { if (f->flags & FIBER_IS_CANCELLABLE) fiber_wakeup(f); } } /** * Change the current cancellation state of a fiber. This is not * a cancellation point. */ bool fiber_set_cancellable(bool yesno) { bool prev = fiber()->flags & FIBER_IS_CANCELLABLE; if (yesno == true) fiber()->flags |= FIBER_IS_CANCELLABLE; else fiber()->flags &= ~FIBER_IS_CANCELLABLE; return prev; } bool fiber_is_cancelled() { return fiber()->flags & FIBER_IS_CANCELLED; } void fiber_set_joinable(struct fiber *fiber, bool yesno) { if (yesno == true) fiber->flags |= FIBER_IS_JOINABLE; else fiber->flags &= ~FIBER_IS_JOINABLE; } /** Report libev time (cheap). */ double fiber_time(void) { return ev_now(loop()); } uint64_t fiber_time64(void) { return (uint64_t) ( ev_now(loop()) * 1000000 + 0.5 ); } double fiber_clock(void) { return ev_monotonic_now(loop()); } uint64_t fiber_clock64(void) { return (uint64_t) ( ev_monotonic_now(loop()) * 1000000 + 0.5 ); } /** * Move current fiber to the end of ready fibers list and switch to next */ void fiber_reschedule(void) { fiber_wakeup(fiber()); fiber_yield(); } int fiber_join(struct fiber *fiber) { assert(fiber->flags & FIBER_IS_JOINABLE); if (! fiber_is_dead(fiber)) { rlist_add_tail_entry(&fiber->wake, fiber(), state); fiber_yield(); } assert(fiber_is_dead(fiber)); /* Move exception to the caller */ int ret = fiber->f_ret; if (ret != 0) { assert(!diag_is_empty(&fiber->diag)); diag_move(&fiber->diag, &fiber()->diag); } /* The fiber is already dead. */ fiber_recycle(fiber); return ret; } /** * @note: this is not a cancellation point (@sa fiber_testcancel()) * but it is considered good practice to call testcancel() * after each yield. */ void fiber_yield(void) { struct cord *cord = cord(); struct fiber *caller = cord->fiber; struct fiber *callee = caller->caller; caller->caller = &cord->sched; /** By convention, these triggers must not throw. */ if (! rlist_empty(&caller->on_yield)) trigger_run(&caller->on_yield, NULL); assert(callee->flags & FIBER_IS_READY || callee == &cord->sched); assert(! (callee->flags & FIBER_IS_DEAD)); cord->fiber = callee; callee->csw++; callee->flags &= ~FIBER_IS_READY; ASAN_START_SWITCH_FIBER(asan_state, (caller->flags & FIBER_IS_DEAD) == 0, callee->stack, callee->stack_size); coro_transfer(&caller->ctx, &callee->ctx); ASAN_FINISH_SWITCH_FIBER(asan_state); } struct fiber_watcher_data { struct fiber *f; bool timed_out; }; static void fiber_schedule_timeout(ev_loop *loop, ev_timer *watcher, int revents) { (void) loop; (void) revents; assert(fiber() == &cord()->sched); struct fiber_watcher_data *state = (struct fiber_watcher_data *) watcher->data; state->timed_out = true; fiber_wakeup(state->f); } /** * @brief yield & check timeout * @return true if timeout exceeded */ bool fiber_yield_timeout(ev_tstamp delay) { struct ev_timer timer; ev_timer_init(&timer, fiber_schedule_timeout, delay, 0); struct fiber_watcher_data state = { fiber(), false }; timer.data = &state; ev_timer_start(loop(), &timer); fiber_yield(); ev_timer_stop(loop(), &timer); return state.timed_out; } /** * Yield the current fiber to events in the event loop. */ void fiber_sleep(double delay) { /* * libev sleeps at least backend_mintime, which is 1 ms in * case of poll()/Linux, unless there are idle watchers. * So, to properly implement fiber_sleep(0), i.e. a sleep * with a zero timeout, we set up an idle watcher, and * it triggers libev to poll() with zero timeout. */ if (delay == 0) { ev_idle_start(loop(), &cord()->idle_event); } /* * We don't use fiber_wakeup() here to ensure there is * no infinite wakeup loop in case of fiber_sleep(0). */ fiber_yield_timeout(delay); if (delay == 0) { ev_idle_stop(loop(), &cord()->idle_event); } } void fiber_schedule_cb(ev_loop *loop, ev_watcher *watcher, int revents) { (void) loop; (void) revents; struct fiber *fiber = watcher->data; assert(fiber() == &cord()->sched); fiber_wakeup(fiber); } static inline void fiber_schedule_list(struct rlist *list) { struct fiber *first; struct fiber *last; /* * Happens when a fiber exits and is removed from cord->ready * resulting in the empty list. */ if (rlist_empty(list)) return; first = last = rlist_shift_entry(list, struct fiber, state); assert(last->flags & FIBER_IS_READY); while (! rlist_empty(list)) { last->caller = rlist_shift_entry(list, struct fiber, state); last = last->caller; assert(last->flags & FIBER_IS_READY); } last->caller = fiber(); assert(fiber() == &cord()->sched); fiber_call_impl(first); } static void fiber_schedule_wakeup(ev_loop *loop, ev_async *watcher, int revents) { (void) loop; (void) watcher; (void) revents; struct cord *cord = cord(); fiber_schedule_list(&cord->ready); } static void fiber_schedule_idle(ev_loop *loop, ev_idle *watcher, int revents) { (void) loop; (void) watcher; (void) revents; } struct fiber * fiber_find(uint32_t fid) { struct mh_i32ptr_t *fiber_registry = cord()->fiber_registry; mh_int_t k = mh_i32ptr_find(fiber_registry, fid, NULL); if (k == mh_end(fiber_registry)) return NULL; return (struct fiber *) mh_i32ptr_node(fiber_registry, k)->val; } static void register_fid(struct fiber *fiber) { struct mh_i32ptr_node_t node = { fiber->fid, fiber }; mh_i32ptr_put(cord()->fiber_registry, &node, NULL, NULL); } static void unregister_fid(struct fiber *fiber) { struct mh_i32ptr_node_t node = { fiber->fid, NULL }; mh_i32ptr_remove(cord()->fiber_registry, &node, NULL); } struct fiber * fiber_self() { return fiber(); } void fiber_gc(void) { if (region_used(&fiber()->gc) < 128 * 1024) { region_reset(&fiber()->gc); return; } region_free(&fiber()->gc); } /** Common part of fiber_new() and fiber_recycle(). */ static void fiber_reset(struct fiber *fiber) { rlist_create(&fiber->on_yield); rlist_create(&fiber->on_stop); fiber->flags = FIBER_DEFAULT_FLAGS; } /** Destroy an active fiber and prepare it for reuse. */ static void fiber_recycle(struct fiber *fiber) { /* no exceptions are leaking */ assert(diag_is_empty(&fiber->diag)); /* no pending wakeup */ assert(rlist_empty(&fiber->state)); bool has_custom_stack = fiber->flags & FIBER_CUSTOM_STACK; fiber_reset(fiber); fiber->name[0] = '\0'; fiber->f = NULL; memset(fiber->fls, 0, sizeof(fiber->fls)); unregister_fid(fiber); fiber->fid = 0; region_free(&fiber->gc); if (!has_custom_stack) { rlist_move_entry(&cord()->dead, fiber, link); } else { fiber_destroy(cord(), fiber); } } static void fiber_loop(MAYBE_UNUSED void *data) { ASAN_FINISH_SWITCH_FIBER(NULL); for (;;) { struct fiber *fiber = fiber(); assert(fiber != NULL && fiber->f != NULL && fiber->fid != 0); fiber->f_ret = fiber_invoke(fiber->f, fiber->f_data); if (fiber->f_ret != 0) { struct error *e = diag_last_error(&fiber->diag); /* diag must not be empty on error */ assert(e != NULL || fiber->flags & FIBER_IS_CANCELLED); /* * For joinable fibers, it's the business * of the caller to deal with the error. */ if (!(fiber->flags & FIBER_IS_JOINABLE)) { if (!(fiber->flags & FIBER_IS_CANCELLED)) error_log(e); diag_clear(&fiber()->diag); } } else { /* * Make sure a leftover exception does not * propagate up to the joiner. */ diag_clear(&fiber()->diag); } fiber->flags |= FIBER_IS_DEAD; while (! rlist_empty(&fiber->wake)) { struct fiber *f; f = rlist_shift_entry(&fiber->wake, struct fiber, state); assert(f != fiber); fiber_wakeup(f); } if (! rlist_empty(&fiber->on_stop)) trigger_run(&fiber->on_stop, fiber); /* reset pending wakeups */ rlist_del(&fiber->state); if (! (fiber->flags & FIBER_IS_JOINABLE)) fiber_recycle(fiber); /* * Crash if spurious wakeup happens, don't call the old * function again, ap is garbage by now. */ fiber->f = NULL; fiber_yield(); /* give control back to scheduler */ } } void fiber_set_name(struct fiber *fiber, const char *name) { assert(name != NULL); snprintf(fiber->name, sizeof(fiber->name), "%s", name); } extern inline void fiber_set_key(struct fiber *fiber, enum fiber_key key, void *value); extern inline void * fiber_get_key(struct fiber *fiber, enum fiber_key key); static inline void * page_align_down(void *ptr) { return (void *)((intptr_t)ptr & ~(page_size - 1)); } static inline void * page_align_up(void *ptr) { return page_align_down(ptr + page_size - 1); } static int fiber_stack_create(struct fiber *fiber, size_t stack_size) { stack_size -= slab_sizeof(); fiber->stack_slab = slab_get(&cord()->slabc, stack_size); if (fiber->stack_slab == NULL) { diag_set(OutOfMemory, stack_size, "runtime arena", "fiber stack"); return -1; } void *guard; /* Adjust begin and size for stack memory chunk. */ if (stack_direction < 0) { /* * A stack grows down. First page after begin of a * stack memory chunk should be protected and memory * after protected page until end of memory chunk can be * used for coro stack usage. */ guard = page_align_up(slab_data(fiber->stack_slab)); fiber->stack = guard + page_size; fiber->stack_size = slab_data(fiber->stack_slab) + stack_size - fiber->stack; } else { /* * A stack grows up. Last page should be protected and * memory from begin of chunk until protected page can * be used for coro stack usage */ guard = page_align_down(fiber->stack_slab + stack_size) - page_size; fiber->stack = fiber->stack_slab + slab_sizeof(); fiber->stack_size = guard - fiber->stack; } fiber->stack_id = VALGRIND_STACK_REGISTER(fiber->stack, (char *)fiber->stack + fiber->stack_size); mprotect(guard, page_size, PROT_NONE); return 0; } static void fiber_stack_destroy(struct fiber *fiber, struct slab_cache *slabc) { if (fiber->stack != NULL) { VALGRIND_STACK_DEREGISTER(fiber->stack_id); #if ENABLE_ASAN ASAN_UNPOISON_MEMORY_REGION(fiber->stack, fiber->stack_size); #endif void *guard; if (stack_direction < 0) guard = page_align_down(fiber->stack - page_size); else guard = page_align_up(fiber->stack + fiber->stack_size); mprotect(guard, page_size, PROT_READ | PROT_WRITE); slab_put(slabc, fiber->stack_slab); } } struct fiber * fiber_new_ex(const char *name, const struct fiber_attr *fiber_attr, fiber_func f) { struct cord *cord = cord(); struct fiber *fiber = NULL; assert(fiber_attr != NULL); /* Now we can not reuse fiber if custom attribute was set */ if (!(fiber_attr->flags & FIBER_CUSTOM_STACK) && !rlist_empty(&cord->dead)) { fiber = rlist_first_entry(&cord->dead, struct fiber, link); rlist_move_entry(&cord->alive, fiber, link); } else { fiber = (struct fiber *) mempool_alloc(&cord->fiber_mempool); if (fiber == NULL) { diag_set(OutOfMemory, sizeof(struct fiber), "fiber pool", "fiber"); return NULL; } memset(fiber, 0, sizeof(struct fiber)); if (fiber_stack_create(fiber, fiber_attr->stack_size)) { mempool_free(&cord->fiber_mempool, fiber); return NULL; } memset(&fiber->ctx, 0, sizeof(fiber->ctx)); coro_create(&fiber->ctx, fiber_loop, NULL, fiber->stack, fiber->stack_size); region_create(&fiber->gc, &cord->slabc); rlist_create(&fiber->state); rlist_create(&fiber->wake); diag_create(&fiber->diag); fiber_reset(fiber); fiber->flags = fiber_attr->flags; rlist_add_entry(&cord->alive, fiber, link); } fiber->f = f; /* fids from 0 to 100 are reserved */ if (++cord->max_fid < 100) cord->max_fid = 101; fiber->fid = cord->max_fid; fiber_set_name(fiber, name); register_fid(fiber); return fiber; } /** * Create a new fiber. * * Takes a fiber from fiber cache, if it's not empty. * Can fail only if there is not enough memory for * the fiber structure or fiber stack. * * The created fiber automatically returns itself * to the fiber cache when its "main" function * completes. */ struct fiber * fiber_new(const char *name, fiber_func f) { return fiber_new_ex(name, &fiber_attr_default, f); } /** * Free as much memory as possible taken by the fiber. * * Sic: cord()->sched needs manual destruction in * cord_destroy(). */ static void fiber_destroy(struct cord *cord, struct fiber *f) { if (f == fiber()) { /** End of the application. */ assert(cord == &main_cord); return; } assert(f != &cord->sched); trigger_destroy(&f->on_yield); trigger_destroy(&f->on_stop); rlist_del(&f->state); rlist_del(&f->link); region_destroy(&f->gc); fiber_stack_destroy(f, &cord->slabc); diag_destroy(&f->diag); } void fiber_destroy_all(struct cord *cord) { while (!rlist_empty(&cord->alive)) fiber_destroy(cord, rlist_first_entry(&cord->alive, struct fiber, link)); while (!rlist_empty(&cord->dead)) fiber_destroy(cord, rlist_first_entry(&cord->dead, struct fiber, link)); } void cord_create(struct cord *cord, const char *name) { cord() = cord; slab_cache_set_thread(&cord()->slabc); cord->id = pthread_self(); cord->on_exit = NULL; slab_cache_create(&cord->slabc, &runtime); mempool_create(&cord->fiber_mempool, &cord->slabc, sizeof(struct fiber)); rlist_create(&cord->alive); rlist_create(&cord->ready); rlist_create(&cord->dead); cord->fiber_registry = mh_i32ptr_new(); /* sched fiber is not present in alive/ready/dead list. */ cord->sched.fid = 1; fiber_reset(&cord->sched); diag_create(&cord->sched.diag); region_create(&cord->sched.gc, &cord->slabc); fiber_set_name(&cord->sched, "sched"); cord->fiber = &cord->sched; cord->max_fid = 100; /* * No need to start this event since it's only used for * ev_feed_event(). Saves a few cycles on every * event loop iteration. */ ev_async_init(&cord->wakeup_event, fiber_schedule_wakeup); ev_idle_init(&cord->idle_event, fiber_schedule_idle); cord_set_name(name); #if ENABLE_ASAN /* Record stack extents */ tt_pthread_attr_getstack(cord->id, &cord->sched.stack, &cord->sched.stack_size); #else cord->sched.stack = NULL; cord->sched.stack_size = 0; #endif } void cord_destroy(struct cord *cord) { slab_cache_set_thread(&cord->slabc); if (cord->loop) ev_loop_destroy(cord->loop); /* Only clean up if initialized. */ if (cord->fiber_registry) { fiber_destroy_all(cord); mh_i32ptr_delete(cord->fiber_registry); } region_destroy(&cord->sched.gc); diag_destroy(&cord->sched.diag); slab_cache_destroy(&cord->slabc); } struct cord_thread_arg { struct cord *cord; const char *name; void *(*f)(void *); void *arg; bool is_started; pthread_mutex_t start_mutex; pthread_cond_t start_cond; }; /** * Cord main thread function. It's not exception-safe, the * body function must catch all exceptions instead. */ void *cord_thread_func(void *p) { struct cord_thread_arg *ct_arg = (struct cord_thread_arg *) p; cord_create(ct_arg->cord, (ct_arg->name)); /** Can't possibly be the main thread */ assert(cord()->id != main_thread_id); tt_pthread_mutex_lock(&ct_arg->start_mutex); void *(*f)(void *) = ct_arg->f; void *arg = ct_arg->arg; ct_arg->is_started = true; tt_pthread_cond_signal(&ct_arg->start_cond); tt_pthread_mutex_unlock(&ct_arg->start_mutex); void *res = f(arg); /* * cord()->on_exit initially holds NULL. This field is * change-once. * Either handler installation succeeds (in cord_cojoin()) * or prior to thread exit the thread function discovers * that no handler was installed so far and it stores * CORD_ON_EXIT_WONT_RUN to prevent a future handler * installation (since a handler won't run anyway). */ const struct cord_on_exit *handler = NULL; /* expected value */ bool changed; changed = pm_atomic_compare_exchange_strong(&cord()->on_exit, &handler, CORD_ON_EXIT_WONT_RUN); if (!changed) handler->callback(handler->argument); return res; } int cord_start(struct cord *cord, const char *name, void *(*f)(void *), void *arg) { int res = -1; struct cord_thread_arg ct_arg = { cord, name, f, arg, false, PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER }; tt_pthread_mutex_lock(&ct_arg.start_mutex); cord->loop = ev_loop_new(EVFLAG_AUTO | EVFLAG_ALLOCFD); if (cord->loop == NULL) { diag_set(OutOfMemory, 0, "ev_loop_new", "ev_loop"); goto end; } if (tt_pthread_create(&cord->id, NULL, cord_thread_func, &ct_arg) != 0) { diag_set(SystemError, "failed to create thread"); goto end; } res = 0; while (! ct_arg.is_started) tt_pthread_cond_wait(&ct_arg.start_cond, &ct_arg.start_mutex); end: if (res != 0) { if (cord->loop) { ev_loop_destroy(cord->loop); cord->loop = NULL; } } tt_pthread_mutex_unlock(&ct_arg.start_mutex); tt_pthread_mutex_destroy(&ct_arg.start_mutex); tt_pthread_cond_destroy(&ct_arg.start_cond); return res; } int cord_join(struct cord *cord) { assert(cord() != cord); /* Can't join self. */ void *retval = NULL; int res = tt_pthread_join(cord->id, &retval); if (res == 0) { struct fiber *f = cord->fiber; if (f->f_ret != 0) { assert(!diag_is_empty(&f->diag)); diag_move(&f->diag, diag_get()); res = -1; } } else { diag_set(SystemError, "failed to join with thread"); } cord_destroy(cord); return res; } /** The state of the waiter for a thread to complete. */ struct cord_cojoin_ctx { struct ev_loop *loop; /** Waiting fiber. */ struct fiber *fiber; /* * This event is signalled when the subject thread is * about to die. */ struct ev_async async; bool task_complete; }; static void cord_cojoin_on_exit(void *arg) { struct cord_cojoin_ctx *ctx = (struct cord_cojoin_ctx *)arg; ev_async_send(ctx->loop, &ctx->async); } static void cord_cojoin_wakeup(struct ev_loop *loop, struct ev_async *ev, int revents) { (void)loop; (void)revents; struct cord_cojoin_ctx *ctx = (struct cord_cojoin_ctx *)ev->data; ctx->task_complete = true; fiber_wakeup(ctx->fiber); } int cord_cojoin(struct cord *cord) { assert(cord() != cord); /* Can't join self. */ struct cord_cojoin_ctx ctx; ctx.loop = loop(); ctx.fiber = fiber(); ctx.task_complete = false; ev_async_init(&ctx.async, cord_cojoin_wakeup); ctx.async.data = &ctx; ev_async_start(loop(), &ctx.async); struct cord_on_exit handler = { cord_cojoin_on_exit, &ctx }; /* * cord->on_exit initially holds a NULL value. This field is * change-once. */ const struct cord_on_exit *prev_handler = NULL; /* expected value */ bool changed = pm_atomic_compare_exchange_strong(&cord->on_exit, &prev_handler, &handler); /* * A handler installation fails either if the thread did exit or * if someone is already joining this cord (BUG). */ if (!changed) { /* Assume cord's thread already exited. */ assert(prev_handler == CORD_ON_EXIT_WONT_RUN); } else { /* * Wait until the thread exits. Prior to exit the * thread invokes cord_cojoin_on_exit, signaling * ev_async, making the event loop call * cord_cojoin_wakeup, waking up this fiber again. * * The fiber is non-cancellable during the wait to * avoid invalidating of the cord_cojoin_ctx * object declared on stack. */ bool cancellable = fiber_set_cancellable(false); fiber_yield(); /* Spurious wakeup indicates a severe BUG, fail early. */ if (ctx.task_complete == 0) panic("Wrong fiber woken"); fiber_set_cancellable(cancellable); } ev_async_stop(loop(), &ctx.async); return cord_join(cord); } void break_ev_loop_f(struct trigger *trigger, void *event) { (void) trigger; (void) event; ev_break(loop(), EVBREAK_ALL); } struct costart_ctx { fiber_func run; void *arg; }; /** Replication acceptor fiber handler. */ static void * cord_costart_thread_func(void *arg) { struct costart_ctx ctx = *(struct costart_ctx *) arg; free(arg); struct fiber *f = fiber_new("main", ctx.run); if (f == NULL) return NULL; struct trigger break_ev_loop = { RLIST_LINK_INITIALIZER, break_ev_loop_f, NULL, NULL }; /* * Got to be in a trigger, to break the loop even * in case of an exception. */ trigger_add(&f->on_stop, &break_ev_loop); fiber_set_joinable(f, true); fiber_start(f, ctx.arg); if (!fiber_is_dead(f)) { /* The fiber hasn't died right away at start. */ ev_run(loop(), 0); } /* * Preserve the exception with which the main fiber * terminated, if any. */ assert(fiber_is_dead(f)); fiber()->f_ret = fiber_join(f); return NULL; } int cord_costart(struct cord *cord, const char *name, fiber_func f, void *arg) { /** Must be allocated to avoid races. */ struct costart_ctx *ctx = (struct costart_ctx *) malloc(sizeof(*ctx)); if (ctx == NULL) { diag_set(OutOfMemory, sizeof(struct costart_ctx), "malloc", "costart_ctx"); return -1; } ctx->run = f; ctx->arg = arg; if (cord_start(cord, name, cord_costart_thread_func, ctx) == -1) { free(ctx); return -1; } return 0; } void cord_set_name(const char *name) { snprintf(cord()->name, sizeof cord()->name, "%s", name); /* Main thread's name will replace process title in ps, skip it */ if (cord_is_main()) return; tt_pthread_setname(name); } bool cord_is_main() { return cord() == &main_cord; } struct slab_cache * cord_slab_cache(void) { return &cord()->slabc; } static NOINLINE int check_stack_direction(void *prev_stack_frame) { return __builtin_frame_address(0) < prev_stack_frame ? -1: 1; } void fiber_init(int (*invoke)(fiber_func f, va_list ap)) { page_size = sysconf(_SC_PAGESIZE); stack_direction = check_stack_direction(__builtin_frame_address(0)); fiber_invoke = invoke; main_thread_id = pthread_self(); main_cord.loop = ev_default_loop(EVFLAG_AUTO | EVFLAG_ALLOCFD); cord_create(&main_cord, "main"); } void fiber_free(void) { cord_destroy(&main_cord); } int fiber_stat(fiber_stat_cb cb, void *cb_ctx) { struct fiber *fiber; struct cord *cord = cord(); int res; rlist_foreach_entry(fiber, &cord->alive, link) { res = cb(fiber, cb_ctx); if (res != 0) return res; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/fiber_pool.c0000664000000000000000000001275213306565107017276 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "fiber_pool.h" /** * Main function of the fiber invoked to handle all outstanding * tasks in a queue. */ static int fiber_pool_f(va_list ap) { struct fiber_pool *pool = va_arg(ap, struct fiber_pool *); struct cord *cord = cord(); struct fiber *f = fiber(); struct ev_loop *loop = pool->consumer; struct stailq *output = &pool->output; struct cmsg *msg; ev_tstamp last_active_at = ev_monotonic_now(loop); pool->size++; restart: msg = NULL; while (! stailq_empty(output)) { msg = stailq_shift_entry(output, struct cmsg, fifo); if (f->caller == &cord->sched && ! stailq_empty(output) && ! rlist_empty(&pool->idle)) { /* * Activate a "backup" fiber for the next * message in the queue. */ f->caller = rlist_shift_entry(&pool->idle, struct fiber, state); f->caller->flags |= FIBER_IS_READY; assert(f->caller->caller == &cord->sched); } cmsg_deliver(msg); } /** Put the current fiber into a fiber cache. */ if (msg != NULL || ev_monotonic_now(loop) - last_active_at < pool->idle_timeout) { if (msg != NULL) last_active_at = ev_monotonic_now(loop); /* * Add the fiber to the front of the list, so that * it is most likely to get scheduled again. */ rlist_add_entry(&pool->idle, fiber(), state); fiber_yield(); goto restart; } pool->size--; fiber_cond_signal(&pool->worker_cond); return 0; } static void fiber_pool_idle_cb(ev_loop *loop, struct ev_timer *watcher, int events) { (void) events; struct fiber_pool *pool = (struct fiber_pool *) watcher->data; if (! rlist_empty(&pool->idle)) { struct fiber *f; /* * Schedule the fiber at the tail of the list, * it's the one most likely to have not been * scheduled lately. */ f = rlist_shift_tail_entry(&pool->idle, struct fiber, state); fiber_call(f); } ev_timer_again(loop, watcher); } /** Create fibers to handle all outstanding tasks. */ static void fiber_pool_cb(ev_loop *loop, struct ev_watcher *watcher, int events) { (void) loop; (void) events; struct fiber_pool *pool = (struct fiber_pool *) watcher->data; /** Fetch messages */ cbus_endpoint_fetch(&pool->endpoint, &pool->output); struct stailq *output = &pool->output; while (! stailq_empty(output)) { struct fiber *f; if (! rlist_empty(&pool->idle)) { f = rlist_shift_entry(&pool->idle, struct fiber, state); fiber_call(f); } else if (pool->size < pool->max_size) { f = fiber_new(cord_name(cord()), fiber_pool_f); if (f == NULL) { diag_log(); break; } fiber_start(f, pool); } else { /** * No worries that this watcher may not * get scheduled again - there are enough * worker fibers already, so just leave. */ say_warn("fiber pool size %d reached on endpoint %s", pool->max_size, pool->endpoint.name); break; } } } void fiber_pool_create(struct fiber_pool *pool, const char *name, int max_pool_size, float idle_timeout) { pool->consumer = loop(); pool->idle_timeout = idle_timeout; rlist_create(&pool->idle); ev_timer_init(&pool->idle_timer, fiber_pool_idle_cb, 0, pool->idle_timeout); pool->idle_timer.data = pool; ev_timer_again(loop(), &pool->idle_timer); pool->size = 0; pool->max_size = max_pool_size; stailq_create(&pool->output); fiber_cond_create(&pool->worker_cond); /* Join fiber pool to cbus */ cbus_endpoint_create(&pool->endpoint, name, fiber_pool_cb, pool); } void fiber_pool_destroy(struct fiber_pool *pool) { /** Endpoint has connected pipes or unfetched messages */ cbus_endpoint_destroy(&pool->endpoint, NULL); /** * At this point all messages are started to execution because last * cbus poison message was fired (endpoint_destroy condition). * We won't to have new messages from cbus and can send wakeup * to each idle fiber. In this case idle fiber can not fetch any * new message and will exit. We adjust idle_timeout to. */ pool->idle_timeout = 0; struct fiber *idle_fiber; rlist_foreach_entry(idle_fiber, &pool->idle, state) fiber_wakeup(idle_fiber); /** * Just wait on fiber exit condition until all fibers are done */ while (pool->size > 0) fiber_cond_wait(&pool->worker_cond); fiber_cond_destroy(&pool->worker_cond); } tarantool_1.9.1.26.g63eb81e3c/src/reflection.c0000664000000000000000000000347513306560010017276 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "reflection.h" /* TODO: sorry, unimplemented: non-trivial designated initializers */ const struct method_info METHODS_SENTINEL = { .owner = NULL, .name = NULL, .rtype = CTYPE_VOID, .atype = {}, .nargs = 0, .isconst = false, ._spacer = {} }; extern inline bool type_assignable(const struct type_info *type, const struct type_info *object); extern inline const struct method_info * type_method_by_name(const struct type_info *type, const char *name); tarantool_1.9.1.26.g63eb81e3c/src/tt_uuid.h0000664000000000000000000001202413306565107016630 0ustar rootroot#ifndef TARANTOOL_UUID_H_INCLUDED #define TARANTOOL_UUID_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include /* snprintf */ #include #if defined(__cplusplus) extern "C" { #endif enum { UUID_LEN = 16, UUID_STR_LEN = 36 }; /** * \brief UUID structure struct */ struct tt_uuid { uint32_t time_low; uint16_t time_mid; uint16_t time_hi_and_version; uint8_t clock_seq_hi_and_reserved; uint8_t clock_seq_low; uint8_t node[6]; }; /** * \brief Generate new UUID * \param uu[out] UUID */ void tt_uuid_create(struct tt_uuid *uu); /** * \brief Parse UUID from string. * \param in string * \param uu[out] UUID * \return */ inline int tt_uuid_from_string(const char *in, struct tt_uuid *uu) { if (strlen(in) != UUID_STR_LEN || sscanf(in, "%8x-%4hx-%4hx-%2hhx%2hhx-%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx", &uu->time_low, &uu->time_mid, &uu->time_hi_and_version, &uu->clock_seq_hi_and_reserved, &uu->clock_seq_low, &uu->node[0], &uu->node[1], &uu->node[2], &uu->node[3], &uu->node[4], &uu->node[5]) != 11) return 1; /* Check variant (NCS, RFC4122, MSFT) */ uint8_t n = uu->clock_seq_hi_and_reserved; if ((n & 0x80) != 0x00 && (n & 0xc0) != 0x80 && (n & 0xe0) != 0xc0) return 1; return 0; } /** * \brief Compare UUIDs lexicographically. * \param a UUID * \param b UUID * \retval comparison result, as in strcmp() */ inline int tt_uuid_compare(const struct tt_uuid *a, const struct tt_uuid *b) { #define cmp_tt_uuid_field(field) \ if (a->field > b->field) return 1; \ if (a->field < b->field) return -1; cmp_tt_uuid_field(time_low); cmp_tt_uuid_field(time_mid); cmp_tt_uuid_field(time_hi_and_version); cmp_tt_uuid_field(clock_seq_hi_and_reserved); cmp_tt_uuid_field(clock_seq_low); cmp_tt_uuid_field(node[0]); cmp_tt_uuid_field(node[1]); cmp_tt_uuid_field(node[2]); cmp_tt_uuid_field(node[3]); cmp_tt_uuid_field(node[4]); cmp_tt_uuid_field(node[5]); #undef cmp_tt_uuid_field return 0; } /** * \brief Format UUID to RFC 4122 string. * \param uu uuid * \param[out] out buffer, must be at least UUID_STR_LEN + 1 length */ inline void tt_uuid_to_string(const struct tt_uuid *uu, char *out) { sprintf(out, "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", uu->time_low, uu->time_mid, uu->time_hi_and_version, uu->clock_seq_hi_and_reserved, uu->clock_seq_low, uu->node[0], uu->node[1], uu->node[2], uu->node[3], uu->node[4], uu->node[5]); } /** * \brief Return byte order swapped UUID (LE -> BE and vice versa) * \param uu */ inline void tt_uuid_bswap(struct tt_uuid *uu) { uu->time_low = bswap_u32(uu->time_low); uu->time_mid = bswap_u16(uu->time_mid); uu->time_hi_and_version = bswap_u16(uu->time_hi_and_version); } /** * \brief Test that uuid is nil * \param uu UUID * \retval true if all members of \a uu 0 * \retval false otherwise */ inline bool tt_uuid_is_nil(const struct tt_uuid *uu) { const uint64_t *p = (const uint64_t *) uu; return !p[0] && !p[1]; } /** * \brief Test that \a lhs equal \a rhs * \param lhs UUID * \param rhs UUID * \retval true if \a lhs equal \a rhs * \retval false otherwise */ inline bool tt_uuid_is_equal(const struct tt_uuid *lhs, const struct tt_uuid *rhs) { const uint64_t *lp = (const uint64_t *) lhs; const uint64_t *rp = (const uint64_t *) rhs; return lp[0] == rp[0] && lp[1] == rp[1]; } extern const struct tt_uuid uuid_nil; char * tt_uuid_str(const struct tt_uuid *uu); int tt_uuid_from_strl(const char *in, size_t len, struct tt_uuid *uu); #if defined(__cplusplus) } /* extern "C" */ #endif #endif /* TARANTOOL_UUID_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/diag.c0000664000000000000000000000431313306560010016040 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "diag.h" #include "fiber.h" /* Must be set by the library user */ struct error_factory *error_factory = NULL; void error_create(struct error *e, error_f destroy, error_f raise, error_f log, const struct type_info *type, const char *file, unsigned line) { e->destroy = destroy; e->raise = raise; e->log = log; e->type = type; e->refs = 0; if (file != NULL) { snprintf(e->file, sizeof(e->file), "%s", file); e->line = line; } else { e->file[0] = '\0'; e->line = 0; } e->errmsg[0] = '\0'; } struct diag * diag_get() { return &fiber()->diag; } void error_format_msg(struct error *e, const char *format, ...) { va_list ap; va_start(ap, format); error_vformat_msg(e, format, ap); va_end(ap); } void error_vformat_msg(struct error *e, const char *format, va_list ap) { vsnprintf(e->errmsg, sizeof(e->errmsg), format, ap); } tarantool_1.9.1.26.g63eb81e3c/src/clock.h0000664000000000000000000000364713306560010016245 0ustar rootroot#ifndef TARANTOOL_CLOCK_H_INCLUDED #define TARANTOOL_CLOCK_H_INCLUDED /* * Copyright 2010-2016 Tarantool AUTHORS: please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** \cond public */ double clock_realtime(void); double clock_monotonic(void); double clock_process(void); double clock_thread(void); uint64_t clock_realtime64(void); uint64_t clock_monotonic64(void); uint64_t clock_process64(void); uint64_t clock_thread64(void); /** \endcond public */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_clock_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/CMakeLists.txt0000664000000000000000000002261413306565107017550 0ustar rootroot# # Set compile flags for entire src/ directory # enable_tnt_compile_flags() include_directories(${LIBEV_INCLUDE_DIR}) include_directories(${LIBEIO_INCLUDE_DIR}) include_directories(${LIBCORO_INCLUDE_DIR}) include_directories(${LUAJIT_INCLUDE_DIRS}) include_directories(${READLINE_INCLUDE_DIRS}) include_directories(${LIBYAML_INCLUDE_DIRS}) include_directories(${MSGPUCK_INCLUDE_DIRS}) include_directories(${CURL_INCLUDE_DIRS}) include_directories(${ICU_INCLUDE_DIRS}) set(LIBUTIL_FREEBSD_SRC ${CMAKE_SOURCE_DIR}/third_party/libutil_freebsd) include_directories(${LIBUTIL_FREEBSD_SRC}) # Compile src/lua/*.lua files into src/lua/*.lua.c sources set(lua_sources) lua_source(lua_sources lua/init.lua) lua_source(lua_sources lua/string.lua) lua_source(lua_sources lua/fiber.lua) lua_source(lua_sources lua/buffer.lua) lua_source(lua_sources lua/uuid.lua) lua_source(lua_sources lua/crypto.lua) lua_source(lua_sources lua/digest.lua) lua_source(lua_sources lua/msgpackffi.lua) lua_source(lua_sources lua/uri.lua) lua_source(lua_sources lua/socket.lua) lua_source(lua_sources lua/errno.lua) lua_source(lua_sources lua/log.lua) lua_source(lua_sources lua/help.lua) lua_source(lua_sources lua/help_en_US.lua) lua_source(lua_sources lua/tap.lua) lua_source(lua_sources lua/fio.lua) lua_source(lua_sources lua/csv.lua) lua_source(lua_sources lua/strict.lua) lua_source(lua_sources lua/clock.lua) lua_source(lua_sources lua/title.lua) lua_source(lua_sources lua/argparse.lua) lua_source(lua_sources lua/env.lua) lua_source(lua_sources lua/pwd.lua) lua_source(lua_sources lua/trigger.lua) lua_source(lua_sources lua/table.lua) lua_source(lua_sources ../third_party/luafun/fun.lua) lua_source(lua_sources lua/httpc.lua) lua_source(lua_sources lua/iconv.lua) # LuaJIT jit.* library lua_source(lua_sources "${CMAKE_BINARY_DIR}/third_party/luajit/src/jit/bc.lua") lua_source(lua_sources "${CMAKE_BINARY_DIR}/third_party/luajit/src/jit/bcsave.lua") lua_source(lua_sources "${CMAKE_BINARY_DIR}/third_party/luajit/src/jit/dis_x86.lua") lua_source(lua_sources "${CMAKE_BINARY_DIR}/third_party/luajit/src/jit/dis_x64.lua") lua_source(lua_sources "${CMAKE_BINARY_DIR}/third_party/luajit/src/jit/dump.lua") lua_source(lua_sources "${CMAKE_BINARY_DIR}/third_party/luajit/src/jit/vmdef.lua") lua_source(lua_sources "${CMAKE_BINARY_DIR}/third_party/luajit/src/jit/v.lua") lua_source(lua_sources "${CMAKE_BINARY_DIR}/third_party/luajit/src/jit/p.lua") lua_source(lua_sources "${CMAKE_BINARY_DIR}/third_party/luajit/src/jit/zone.lua") add_custom_target(generate_lua_sources WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/src/box DEPENDS ${lua_sources}) set_property(DIRECTORY PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ${lua_sources}) add_custom_target(ragel WORKING_DIRECTORY ${CMAKE_BINARY_DIR} COMMAND ragel -G2 src/uri.rl -o src/uri.c) set (core_sources diag.c say.c memory.c clock.c fiber.c backtrace.cc cbus.c fiber_pool.c fiber_cond.c fiber_channel.c latch.c sio.cc evio.cc coio.cc coio_task.c coio_file.c coio_buf.cc fio.c cbus.c exception.cc errinj.c reflection.c assoc.c util.c random.c trigger.cc http_parser.c ) if (TARGET_OS_NETBSD) # A workaround for "undefined reference to `__gcc_personality_v0'" # on x86_64-rumprun-netbsd-gcc set_source_files_properties(util.c PROPERTIES COMPILE_FLAGS -fno-exceptions) endif () add_library(core STATIC ${core_sources}) target_link_libraries(core salad small pthread ${LIBEV_LIBRARIES} ${LIBEIO_LIBRARIES} ${LIBCORO_LIBRARIES} ${MSGPUCK_LIBRARIES} ) add_library(stat STATIC rmean.c latency.c histogram.c) target_link_libraries(stat core) if (ENABLE_BACKTRACE AND NOT TARGET_OS_DARWIN) target_link_libraries(core gcc_s ${UNWIND_LIBRARIES}) endif() if (CC_HAS_WNO_IMPLICIT_FALLTHROUGH) # Suppress noise warnings for autogenerated Ragel code set_source_files_properties(uri.c PROPERTIES COMPILE_FLAGS -Wno-implicit-fallthrough) endif() add_library(uri STATIC uri.c) add_library(uuid STATIC tt_uuid.c) target_link_libraries(uuid core bit) add_library(scramble STATIC scramble.c) target_link_libraries(scramble core misc) add_library(crc32 STATIC crc32.c cpu_feature.c ${PROJECT_SOURCE_DIR}/third_party/crc32.c ) set (server_sources find_path.c curl.c httpc.c pickle.c cfg.c cpu_feature.c title.c proc_title.c path_lock.c systemd.c version.c lua/digest.c lua/init.c lua/fiber.c lua/fiber_cond.c lua/fiber_channel.c lua/trigger.c lua/msgpack.c lua/utils.c lua/errno.c lua/socket.c lua/pickle.c lua/fio.c lua/crypto.c lua/httpc.c ${lua_sources} ${PROJECT_SOURCE_DIR}/third_party/lua-yaml/lyaml.cc ${PROJECT_SOURCE_DIR}/third_party/lua-yaml/b64.c ${PROJECT_SOURCE_DIR}/third_party/lua-cjson/lua_cjson.c ${PROJECT_SOURCE_DIR}/third_party/lua-cjson/strbuf.c ) set(api_headers ${CMAKE_BINARY_DIR}/src/trivia/config.h ${CMAKE_SOURCE_DIR}/src/trivia/util.h ${CMAKE_SOURCE_DIR}/src/say.h ${CMAKE_SOURCE_DIR}/src/fiber.h ${CMAKE_SOURCE_DIR}/src/fiber_cond.h ${CMAKE_SOURCE_DIR}/src/coio.h ${CMAKE_SOURCE_DIR}/src/coio_task.h ${CMAKE_SOURCE_DIR}/src/lua/utils.h ${CMAKE_SOURCE_DIR}/src/box/txn.h ${CMAKE_SOURCE_DIR}/src/box/key_def.h ${CMAKE_SOURCE_DIR}/src/box/field_def.h ${CMAKE_SOURCE_DIR}/src/box/tuple.h ${CMAKE_SOURCE_DIR}/src/box/tuple_format.h ${CMAKE_SOURCE_DIR}/src/box/tuple_extract_key.h ${CMAKE_SOURCE_DIR}/src/box/schema_def.h ${CMAKE_SOURCE_DIR}/src/box/box.h ${CMAKE_SOURCE_DIR}/src/box/index.h ${CMAKE_SOURCE_DIR}/src/box/iterator_type.h ${CMAKE_SOURCE_DIR}/src/box/error.h ${CMAKE_SOURCE_DIR}/src/box/lua/call.h ${CMAKE_SOURCE_DIR}/src/box/lua/tuple.h ${CMAKE_SOURCE_DIR}/src/latch.h ${CMAKE_SOURCE_DIR}/src/clock.h ) rebuild_module_api(${api_headers}) if (NOT TARGET_OS_DEBIAN_FREEBSD) if (TARGET_OS_FREEBSD) set_source_files_properties( ${PROJECT_SOURCE_DIR}/src/proc_title.c PROPERTIES COMPILE_FLAGS "-DHAVE_SETPROCTITLE") endif() endif() set_source_files_compile_flags(${server_sources}) add_library(server STATIC ${server_sources}) target_link_libraries(server core bit uri uuid) # Rule of thumb: if exporting a symbol from a static library, list the # library here. set (reexport_libraries server core misc bitset csv ${LUAJIT_LIBRARIES} ${MSGPUCK_LIBRARIES} ${ICU_LIBRARIES}) set (common_libraries ${reexport_libraries} ${LIBYAML_LIBRARIES} ${READLINE_LIBRARIES} ${OPENSSL_LIBRARIES} ${CURL_LIBRARIES} ) if (TARGET_OS_LINUX OR TARGET_OS_DEBIAN_FREEBSD) set (common_libraries ${common_libraries} dl rt) endif() if (TARGET_OS_FREEBSD AND NOT TARGET_OS_DEBIAN_FREEBSD) find_library (INTL intl) if (NOT INTL) message(FATAL_ERROR "intl library not found") else() set (common_libraries ${common_libraries} ${INTL}) endif() find_library (ICONV iconv) if (NOT ICONV) message(FATAL_ERROR "iconv library not found") else() set (common_libraries ${common_libraries} ${ICONV}) endif() endif() set (common_libraries ${common_libraries} ${LIBUUID_LIBRARIES}) set (common_libraries ${common_libraries} PARENT_SCOPE) add_subdirectory(lib) add_subdirectory(box) # Save CMAKE_XXX_FLAGS from this directory for config.h (used in --version) set(TARANTOOL_C_FLAGS ${CMAKE_C_FLAGS} PARENT_SCOPE) set(TARANTOOL_CXX_FLAGS ${CMAKE_CXX_FLAGS} PARENT_SCOPE) # Exports syntax is toolchain-dependent, preprocessing is necessary set(exports_file ${CMAKE_BINARY_DIR}/extra/exports.${CMAKE_SYSTEM_NAME}) add_custom_target(preprocess_exports DEPENDS ${exports_file}) add_custom_command( OUTPUT ${exports_file} DEPENDS ${CMAKE_SOURCE_DIR}/extra/exports COMMAND ${CMAKE_SOURCE_DIR}/extra/mkexports ${CMAKE_SOURCE_DIR}/extra/exports ${exports_file} ${CMAKE_SYSTEM_NAME}) add_executable( tarantool main.cc ${LIBUTIL_FREEBSD_SRC}/flopen.c ${LIBUTIL_FREEBSD_SRC}/pidfile.c) add_dependencies(tarantool build_bundled_libs preprocess_exports) # Re-link if exports changed set_target_properties(tarantool PROPERTIES LINK_DEPENDS ${exports_file}) # A note about linkers: # [GNU linker] When linking an *executable* visibility is ignored, and # either nothing is exported (default), or any non-static # symbol is exported (-rdynamic), or explicitly listed # symbols are exported (--dynamic-list). # # However, if a symbol listed lives in a static library it # won't be automatically pulled, hence --whole-archive # option. # # [Apple linker] One can provide an explicit export list; pulls symbols # from static libraries. # if (TARGET_OS_DARWIN) target_link_libraries(tarantool box ${common_libraries}) set_target_properties(tarantool PROPERTIES LINK_FLAGS "-Wl,-exported_symbols_list,${exports_file}") else () target_link_libraries(tarantool -Wl,--whole-archive box ${reexport_libraries} salad -Wl,--no-whole-archive ${common_libraries}) set_target_properties(tarantool PROPERTIES LINK_FLAGS "-Wl,--dynamic-list,${exports_file}") # get rid of -rdynamic set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "") endif() install (TARGETS tarantool DESTINATION bin) tarantool_1.9.1.26.g63eb81e3c/src/rmean.h0000664000000000000000000000505213306560010016244 0ustar rootroot#ifndef TARANTOOL_RMEAN_H_INCLUDED #define TARANTOOL_RMEAN_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "trivia/util.h" #include "third_party/tarantool_ev.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** Rolling mean time window, in seconds. */ enum { RMEAN_WINDOW = 5 }; struct stats { const char *name; int64_t value[RMEAN_WINDOW + 1]; int64_t total; }; /** * Rolling average. */ struct rmean { ev_timer timer; unsigned stats_n; double prev_ts; struct stats stats[0]; }; static inline int64_t rmean_total(struct rmean *rmean, size_t name) { return rmean->stats[name].total; } void rmean_roll(int64_t *value, double dt); int64_t rmean_mean(struct rmean *rmean, size_t name); struct rmean * rmean_new(const char **name, size_t n); void rmean_delete(struct rmean *rmean); void rmean_cleanup(struct rmean *rmean); void rmean_collect(struct rmean *rmean, size_t name, int64_t value); typedef int (*rmean_cb)(const char *name, int rps, int64_t total, void *cb_ctx); int rmean_foreach(struct rmean *rmean, rmean_cb cb, void *cb_ctx); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_RMEAN_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/latch.c0000664000000000000000000000354413306560010016234 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "latch.h" struct box_latch { struct latch l; }; box_latch_t* box_latch_new(void) { box_latch_t* bl = (box_latch_t*) malloc(sizeof(box_latch_t)); if (bl) latch_create(&bl->l); return bl; } void box_latch_delete(box_latch_t* bl) { if (bl) { latch_destroy(&bl->l); free(bl); } } void box_latch_lock(box_latch_t* bl) { latch_lock(&bl->l); } int box_latch_trylock(box_latch_t* bl) { return latch_trylock(&bl->l); } void box_latch_unlock(box_latch_t* bl) { latch_unlock(&bl->l); } tarantool_1.9.1.26.g63eb81e3c/src/scramble.h0000664000000000000000000000563413306560010016740 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SCRAMBLE_H #define INCLUDES_TARANTOOL_SCRAMBLE_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /** * These are the core bits of the built-in Tarantool * authentication. They implement the same algorithm as * in MySQL 4.1 authentication: * * SERVER: seed = create_random_string() * send(seed) * * CLIENT: recv(seed) * hash1 = sha1("password") * hash2 = sha1(hash1) * reply = xor(hash1, sha1(seed, hash2)) * * ^^ these steps are done in scramble_prepare() * * send(reply) * * * SERVER: recv(reply) * * hash1 = xor(reply, sha1(seed, hash2)) * candidate_hash2 = sha1(hash1) * check(candidate_hash2 == hash2) * * ^^ these steps are done in scramble_check() */ enum { SCRAMBLE_SIZE = 20, SCRAMBLE_BASE64_SIZE = 28 }; /** * Prepare a scramble (cipher) to send over the wire * to the server for authentication. */ void scramble_prepare(void *out, const void *salt, const void *password, int password_len); /** * Verify a password. * * @retval 0 passwords do match * @retval !0 passwords do not match */ int scramble_check(const void *scramble, const void *salt, const void *hash2); /** * Prepare a password hash as is stored in the _user space. * @pre out must be at least SCRAMBLE_BASE64_SIZE * @post out contains base64_encode(sha1(sha1(password)), 0) */ void password_prepare(const char *password, int len, char *out, int out_len); #if defined(__cplusplus) } /* extern "C" */ #endif #endif /* INCLUDES_TARANTOOL_SCRAMBLE_H */ tarantool_1.9.1.26.g63eb81e3c/src/crc32.c0000664000000000000000000000340713306560010016053 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "crc32.h" #include #include #include /* * Pointer to an architecture-specific implementation of * CRC32 calculation method. */ crc32_func crc32_calc = NULL; void crc32_init() { #if defined(HAVE_CPUID) && (defined (__x86_64__) || defined (__i386__)) crc32_calc = sse42_enabled_cpu() ? &crc32c_hw : &crc32c; #else crc32_calc = &crc32c; #endif } tarantool_1.9.1.26.g63eb81e3c/src/fiber.h0000664000000000000000000003731313306565107016252 0ustar rootroot#ifndef TARANTOOL_FIBER_H_INCLUDED #define TARANTOOL_FIBER_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/config.h" #include #include #include #include "tt_pthread.h" #include "third_party/tarantool_ev.h" #include "diag.h" #include "trivia/util.h" #include "small/mempool.h" #include "small/region.h" #include "small/rlist.h" #include "salad/stailq.h" #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { FIBER_NAME_MAX = 32 }; enum { /** * It's safe to resume (wakeup) this fiber * with a spurious wakeup if it is suspended, * e.g. to force it to check that it's been * cancelled. */ FIBER_IS_CANCELLABLE = 1 << 0, /** * Indicates that a fiber has been requested to end * prematurely. */ FIBER_IS_CANCELLED = 1 << 1, /** * The fiber will garbage collect automatically * when fiber function ends. The alternative * is that some other fiber will wait for * the end of this fiber and garbage collect it * with fiber_join(). */ FIBER_IS_JOINABLE = 1 << 2, /** * The fiber is in cord->ready list or in * a call chain created by fiber_schedule_list(). * The flag is set to help fiber_wakeup() avoid * double wakeup of an already scheduled fiber. */ FIBER_IS_READY = 1 << 3, /** * This flag is set when fiber function ends and before * the fiber is recycled. */ FIBER_IS_DEAD = 1 << 4, /** * This flag is set when fiber uses custom stack size. */ FIBER_CUSTOM_STACK = 1 << 5, FIBER_DEFAULT_FLAGS = FIBER_IS_CANCELLABLE }; /** * \brief Pre-defined key for fiber local storage */ enum fiber_key { /** box.session */ FIBER_KEY_SESSION = 0, /** Lua fiber.storage */ FIBER_KEY_LUA_STORAGE = 1, /** transaction */ FIBER_KEY_TXN = 2, /** User global privilege and authentication token */ FIBER_KEY_USER = 3, FIBER_KEY_MSG = 4, FIBER_KEY_MAX = 5 }; /** \cond public */ /** * Fiber attributes container */ struct fiber_attr; /** * Create a new fiber attribute container and initialize it * with default parameters. * Can be used for many fibers creation, corresponding fibers * will not take ownership. */ API_EXPORT struct fiber_attr * fiber_attr_new(); /** * Delete the fiber_attr and free all allocated resources. * This is safe when fibers created with this attribute still exist. * *\param fiber_attr fiber attribute */ API_EXPORT void fiber_attr_delete(struct fiber_attr *fiber_attr); /** * Set stack size for the fiber attribute. * * \param fiber_attribute fiber attribute container * \param stacksize stack size for new fibers */ API_EXPORT int fiber_attr_setstacksize(struct fiber_attr *fiber_attr, size_t stack_size); /** * Get stack size from the fiber attribute. * * \param fiber_attribute fiber attribute container or NULL for default * \retval stack size */ API_EXPORT size_t fiber_attr_getstacksize(struct fiber_attr *fiber_attr); struct fiber; /** * Fiber - contains information about fiber */ typedef int (*fiber_func)(va_list); /** * Return the current fiber */ API_EXPORT struct fiber * fiber_self(); /** * Create a new fiber. * * Takes a fiber from fiber cache, if it's not empty. * Can fail only if there is not enough memory for * the fiber structure or fiber stack. * * The created fiber automatically returns itself * to the fiber cache when its "main" function * completes. * * \param name string with fiber name * \param fiber_func func for run inside fiber * * \sa fiber_start */ API_EXPORT struct fiber * fiber_new(const char *name, fiber_func f); /** * Create a new fiber with defined attributes. * * Can fail only if there is not enough memory for * the fiber structure or fiber stack. * * The created fiber automatically returns itself * to the fiber cache if has default stack size * when its "main" function completes. * * \param name string with fiber name * \param fiber_attr fiber attributes * \param fiber_func func for run inside fiber * * \sa fiber_start */ API_EXPORT struct fiber * fiber_new_ex(const char *name, const struct fiber_attr *fiber_attr, fiber_func f); /** * Return control to another fiber and wait until it'll be woken. * * \sa fiber_wakeup */ API_EXPORT void fiber_yield(void); /** * Start execution of created fiber. * * \param callee fiber to start * \param ... arguments to start the fiber with * * \sa fiber_new */ API_EXPORT void fiber_start(struct fiber *callee, ...); /** * Interrupt a synchronous wait of a fiber * * \param f fiber to be woken up */ API_EXPORT void fiber_wakeup(struct fiber *f); /** * Cancel the subject fiber. (set FIBER_IS_CANCELLED flag) * * If target fiber's flag FIBER_IS_CANCELLABLE set, then it would * be woken up (maybe prematurely). Then current fiber yields * until the target fiber is dead (or is woken up by * \sa fiber_wakeup). * * \param f fiber to be cancelled */ API_EXPORT void fiber_cancel(struct fiber *f); /** * Make it possible or not possible to wakeup the current * fiber immediately when it's cancelled. * * @param yesno status to set * @return previous state. */ API_EXPORT bool fiber_set_cancellable(bool yesno); /** * Set fiber to be joinable (false by default). * \param yesno status to set */ API_EXPORT void fiber_set_joinable(struct fiber *fiber, bool yesno); /** * Wait until the fiber is dead and then move its execution * status to the caller. * The fiber must not be detached (@sa fiber_set_joinable()). * @pre FIBER_IS_JOINABLE flag is set. * * \param f fiber to be woken up * \return fiber function ret code */ API_EXPORT int fiber_join(struct fiber *f); /** * Put the current fiber to sleep for at least 's' seconds. * * \param s time to sleep * * \note this is a cancellation point (\sa fiber_is_cancelled) */ API_EXPORT void fiber_sleep(double s); /** * Check current fiber for cancellation (it must be checked * manually). */ API_EXPORT bool fiber_is_cancelled(); /** * Report loop begin time as double (cheap). * Uses real time clock. */ API_EXPORT double fiber_time(void); /** * Report loop begin time as 64-bit int. * Uses real time clock. */ API_EXPORT uint64_t fiber_time64(void); /** * Report loop begin time as double (cheap). * Uses monotonic clock. */ API_EXPORT double fiber_clock(void); /** * Report loop begin time as 64-bit int. * Uses monotonic clock. */ API_EXPORT uint64_t fiber_clock64(void); /** * Reschedule fiber to end of event loop cycle. */ API_EXPORT void fiber_reschedule(void); /** * Return slab_cache suitable to use with tarantool/small library */ struct slab_cache; API_EXPORT struct slab_cache * cord_slab_cache(void); /** \endcond public */ /** * Fiber attribute container */ struct fiber_attr { /** Fiber stack size. */ size_t stack_size; /** Fiber flags. */ uint32_t flags; }; /** * Init fiber attr with default values */ void fiber_attr_create(struct fiber_attr *fiber_attr); struct fiber { coro_context ctx; /** Coro stack slab. */ struct slab *stack_slab; /** Coro stack addr. */ void *stack; /** Coro stack size. */ size_t stack_size; /** Valgrind stack id. */ unsigned int stack_id; /* A garbage-collected memory pool. */ struct region gc; /** * The fiber which should be scheduled when * this fiber yields. */ struct fiber *caller; /** Number of context switches. */ int csw; /** Fiber id. */ uint32_t fid; /** Fiber flags */ uint32_t flags; /** Link in cord->alive or cord->dead list. */ struct rlist link; /** Link in cord->ready list. */ struct rlist state; /** Triggers invoked before this fiber yields. Must not throw. */ struct rlist on_yield; /** Triggers invoked before this fiber stops. Must not throw. */ struct rlist on_stop; /** * The list of fibers awaiting for this fiber's timely * (or untimely) death. */ struct rlist wake; /** * This struct is considered as non-POD when compiling by g++. * You can safely ignore all offset_of-related warnings. * See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=31488 */ fiber_func f; va_list f_data; int f_ret; /** Fiber local storage */ void *fls[FIBER_KEY_MAX]; /** Exception which caused this fiber's death. */ struct diag diag; char name[FIBER_NAME_MAX]; }; enum { FIBER_CALL_STACK = 16 }; struct cord_on_exit; /** * @brief An independent execution unit that can be managed by a separate OS * thread. Each cord consists of fibers to implement cooperative multitasking * model. */ struct cord { /** The fiber that is currently being executed. */ struct fiber *fiber; struct ev_loop *loop; /** * Every new fiber gets a new monotonic id. Ids 1-100 are * reserved. */ uint32_t max_fid; pthread_t id; const struct cord_on_exit *on_exit; /** A helper hash to map id -> fiber. */ struct mh_i32ptr_t *fiber_registry; /** All fibers */ struct rlist alive; /** Fibers, ready for execution */ struct rlist ready; /** A cache of dead fibers for reuse */ struct rlist dead; /** A watcher to have a single async event for all ready fibers. * This technique is necessary to be able to suspend * a single fiber on a few watchers (for example, * a timeout and an event from network, whichever comes * first). * */ ev_async wakeup_event; /** * libev sleeps at least backend_mintime, which is 1 ms in * case of poll()/Linux, unless there are idle watchers. * This is a special hack to speed up fiber_sleep(0), * i.e. a sleep with a zero timeout, to ensure that there * is no 1 ms delay in case of zero sleep timeout. */ ev_idle idle_event; /** A memory cache for (struct fiber) */ struct mempool fiber_mempool; /** A runtime slab cache for general use in this cord. */ struct slab_cache slabc; /** The "main" fiber of this cord, the scheduler. */ struct fiber sched; char name[FIBER_NAME_MAX]; }; extern __thread struct cord *cord_ptr; #define cord() cord_ptr #define fiber() cord()->fiber #define loop() (cord()->loop) void cord_create(struct cord *cord, const char *name); void cord_destroy(struct cord *cord); /** * Start a cord with the given thread function. * The return value of the function can be collected * with cord_join(). The function *must catch* all * exceptions and leave them in the diagnostics * area, cord_join() moves the exception from the * terminated cord to the caller of cord_join(). */ int cord_start(struct cord *cord, const char *name, void *(*f)(void *), void *arg); /** * Like cord_start(), but starts the event loop and * a fiber in the event loop. The event loop ends when the * fiber in main fiber dies/returns. The exception of the main * fiber is propagated to cord_cojoin(). */ int cord_costart(struct cord *cord, const char *name, fiber_func f, void *arg); /** * Yield until \a cord has terminated. * * On success: * * If \a cord has terminated with an uncaught exception * the exception is moved to the current fiber's diagnostics * area, otherwise the current fiber's diagnostics area is * cleared. * @param cord cord * @sa pthread_join() * * @return 0 on success, -1 if pthread_join failed or the * thread function terminated with an exception. */ int cord_cojoin(struct cord *cord); /** * Wait for \a cord to terminate. If \a cord has already * terminated, then returns immediately. * * @post If the subject cord terminated with an exception, * preserves the exception in the caller's cord. * * @param cord cord * @return 0 on success, -1 if pthread_join failed or the * thread function terminated with an exception. */ int cord_join(struct cord *cord); void cord_set_name(const char *name); static inline const char * cord_name(struct cord *cord) { return cord->name; } /** True if this cord represents the process main thread. */ bool cord_is_main(); void fiber_init(int (*fiber_invoke)(fiber_func f, va_list ap)); void fiber_free(void); /** * Set fiber name. * @param fiber Fiber to set name for. * @param name A new name of @a fiber. */ void fiber_set_name(struct fiber *fiber, const char *name); static inline const char * fiber_name(struct fiber *f) { return f->name; } bool fiber_checkstack(); /** * @brief yield & check for timeout * @return true if timeout exceeded */ bool fiber_yield_timeout(ev_tstamp delay); void fiber_destroy_all(); void fiber_gc(void); void fiber_call(struct fiber *callee); struct fiber * fiber_find(uint32_t fid); void fiber_schedule_cb(ev_loop * /* loop */, ev_watcher *watcher, int revents); /** * \brief Associate \a value with \a key in fiber local storage * \param fiber fiber * \param key pre-defined key * \param value value to set */ inline void fiber_set_key(struct fiber *fiber, enum fiber_key key, void *value) { assert(key < FIBER_KEY_MAX); fiber->fls[key] = value; } static inline bool fiber_is_dead(struct fiber *f) { return f->flags & FIBER_IS_DEAD; } /** * \brief Retrieve value by \a key from fiber local storage * \param fiber fiber * \param key pre-defined key * \return value from from fiber local storage */ inline void * fiber_get_key(struct fiber *fiber, enum fiber_key key) { assert(key < FIBER_KEY_MAX); return fiber->fls[key]; } /** * Finalizer callback * \sa fiber_key_on_gc() */ typedef void (*fiber_key_gc_cb)(enum fiber_key key, void *arg); typedef int (*fiber_stat_cb)(struct fiber *f, void *ctx); int fiber_stat(fiber_stat_cb cb, void *cb_ctx); /** Useful for C unit tests */ static inline int fiber_c_invoke(fiber_func f, va_list ap) { return f(ap); } #if defined(__cplusplus) } /* extern "C" */ /* * Test if this fiber is in a cancellable state and was indeed * cancelled, and raise an exception (FiberIsCancelled) if * that's the case. */ static inline void fiber_testcancel(void) { /* * Fiber can catch FiberIsCancelled using try..catch * block in C or pcall()/xpcall() in Lua. However, * FIBER_IS_CANCELLED flag is still set and the subject * fiber will be killed by subsequent unprotected call of * this function. */ if (fiber_is_cancelled()) tnt_raise(FiberIsCancelled); } static inline struct fiber * fiber_new_xc(const char *name, fiber_func func) { struct fiber *f = fiber_new(name, func); if (f == NULL) { diag_raise(); unreachable(); } return f; } static inline int fiber_cxx_invoke(fiber_func f, va_list ap) { try { return f(ap); } catch (struct error *e) { return -1; } } #endif /* defined(__cplusplus) */ static inline void * region_aligned_alloc_cb(void *ctx, size_t size) { void *ptr = region_aligned_alloc((struct region *) ctx, size, alignof(uint64_t)); if (ptr == NULL) diag_set(OutOfMemory, size, "region", "new slab"); return ptr; } #endif /* TARANTOOL_FIBER_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/cfg.c0000664000000000000000000000757113306565107015720 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "say.h" #include "cfg.h" #include "lua/utils.h" enum { MAX_OPT_NAME_LEN = 256, MAX_OPT_VAL_LEN = 256, MAX_STR_OPTS = 8 }; static void cfg_get(const char *param) { char buf[MAX_OPT_NAME_LEN]; snprintf(buf, sizeof(buf), "return box.cfg.%s", param); if (luaL_dostring(tarantool_L, buf) != 0) panic("cfg_get('%s')", param); } int cfg_geti(const char *param) { cfg_get(param); int val; if (lua_isboolean(tarantool_L, -1)) val = lua_toboolean(tarantool_L, -1); else val = lua_tointeger(tarantool_L, -1); lua_pop(tarantool_L, 1); return val; } int cfg_geti_default(const char *param, int default_val) { cfg_get(param); int ok; int val = lua_tointegerx(tarantool_L, -1, &ok); return ok ? val : default_val; } int64_t cfg_geti64(const char *param) { cfg_get(param); int64_t val = luaL_toint64(tarantool_L, -1); lua_pop(tarantool_L, 1); return val; } /* Support simultaneous cfg_gets("str1") and cfg_gets("str2") */ static const char * cfg_tostring(struct lua_State *L) { static __thread char values[MAX_STR_OPTS][MAX_OPT_VAL_LEN]; static __thread int i = 0; if (lua_isnil(L, -1)) return NULL; else { snprintf(values[i % MAX_STR_OPTS], MAX_OPT_VAL_LEN, "%s", lua_tostring(L, -1)); return values[i++ % MAX_STR_OPTS]; } } const char * cfg_gets(const char *param) { cfg_get(param); const char *val = cfg_tostring(tarantool_L); lua_pop(tarantool_L, 1); return val; } double cfg_getd(const char *param) { cfg_get(param); double val = lua_tonumber(tarantool_L, -1); lua_pop(tarantool_L, 1); return val; } double cfg_getd_default(const char *param, double default_val) { cfg_get(param); int ok; double val = lua_tonumberx(tarantool_L, -1, &ok); return ok ? val : default_val; } int cfg_getarr_size(const char *name) { cfg_get(name); if (lua_isnil(tarantool_L, -1)) { /* missing value is equal to empty array */ lua_pop(tarantool_L, 1); return 0; } else if (!lua_istable(tarantool_L, -1)) { /* scalars are handled like an array with one element */ lua_pop(tarantool_L, 1); return 1; } int result = lua_objlen(tarantool_L, -1); lua_pop(tarantool_L, 1); return result; } const char * cfg_getarr_elem(const char *name, int i) { cfg_get(name); if (!lua_istable(tarantool_L, -1)) { /* scalars are handled like an array with one element */ assert(i == 0 && !lua_isnil(tarantool_L, -1)); const char *val = cfg_tostring(tarantool_L); lua_pop(tarantool_L, 1); return val; } lua_rawgeti(tarantool_L, -1, i + 1); const char *val = cfg_tostring(tarantool_L); lua_pop(tarantool_L, 2); return val; } tarantool_1.9.1.26.g63eb81e3c/src/coio_file.h0000664000000000000000000000701113306560010017067 0ustar rootroot#ifndef INCLUDES_TARANTOOL_COIO_FILE_H #define INCLUDES_TARANTOOL_COIO_FILE_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #include #include #include /** * Cooperative file I/O. * Unlike the rest of coio API, this implementation * doesn't support timeouts or cancellation. * * It follows the error reporting convention of the respective * system calls, i.e. it doesn't throw exceptions either. */ int coio_file_open(const char *path, int flags, mode_t mode); int coio_file_close(int fd); ssize_t coio_pwrite(int fd, const void *buf, size_t count, off_t offset); ssize_t coio_pread(int fd, void *buf, size_t count, off_t offset); ssize_t coio_preadn(int fd, void *buf, size_t count, off_t offset); ssize_t coio_read(int fd, void *buf, size_t count); ssize_t coio_write(int fd, const void *buf, size_t count); off_t coio_lseek(int fd, off_t offset, int whence); int coio_stat(const char *pathname, struct stat *buf); int coio_lstat(const char *pathname, struct stat *buf); int coio_fstat(int fd, struct stat *buf); int coio_rename(const char *oldpath, const char *newpath); int coio_unlink(const char *pathname); int coio_mkdir(const char *pathname, mode_t mode); int coio_rmdir(const char *pathname); int coio_ftruncate(int fd, off_t length); int coio_truncate(const char *path, off_t length); int coio_glob(const char *pattern, int flags, int (*errfunc) (const char *epath, int eerrno), glob_t *pglob); int coio_chown(const char *path, uid_t owner, gid_t group); int coio_chmod(const char *path, mode_t mode); int coio_link(const char *oldpath, const char *newpath); int coio_symlink(const char *target, const char *linkpath); int coio_readlink(const char *pathname, char *buf, size_t bufsiz); int coio_sync(); int coio_fsync(int fd); int coio_fdatasync(int fd); int coio_tempdir(char *path, size_t path_len); int coio_readdir(const char *path, char **buf); int coio_copyfile(const char *source, const char *dest); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_COIO_FILE_H */ tarantool_1.9.1.26.g63eb81e3c/src/scramble.c0000664000000000000000000000616513306560010016733 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "scramble.h" #include "third_party/sha1.h" #include "third_party/base64.h" #include #include static void xor(unsigned char *to, unsigned const char *left, unsigned const char *right, uint32_t len) { const uint8_t *end = to + len; while (to < end) *to++= *left++ ^ *right++; } void scramble_prepare(void *out, const void *salt, const void *password, int password_len) { unsigned char hash1[SCRAMBLE_SIZE]; unsigned char hash2[SCRAMBLE_SIZE]; SHA1_CTX ctx; SHA1Init(&ctx); SHA1Update(&ctx, password, password_len); SHA1Final(hash1, &ctx); SHA1Init(&ctx); SHA1Update(&ctx, hash1, SCRAMBLE_SIZE); SHA1Final(hash2, &ctx); SHA1Init(&ctx); SHA1Update(&ctx, salt, SCRAMBLE_SIZE); SHA1Update(&ctx, hash2, SCRAMBLE_SIZE); SHA1Final(out, &ctx); xor(out, hash1, out, SCRAMBLE_SIZE); } int scramble_check(const void *scramble, const void *salt, const void *hash2) { SHA1_CTX ctx; unsigned char candidate_hash2[SCRAMBLE_SIZE]; SHA1Init(&ctx); SHA1Update(&ctx, salt, SCRAMBLE_SIZE); SHA1Update(&ctx, hash2, SCRAMBLE_SIZE); SHA1Final(candidate_hash2, &ctx); xor(candidate_hash2, candidate_hash2, scramble, SCRAMBLE_SIZE); /* * candidate_hash2 now supposedly contains hash1, turn it * into hash2 */ SHA1Init(&ctx); SHA1Update(&ctx, candidate_hash2, SCRAMBLE_SIZE); SHA1Final(candidate_hash2, &ctx); return memcmp(hash2, candidate_hash2, SCRAMBLE_SIZE); } void password_prepare(const char *password, int len, char *out, int out_len) { unsigned char hash2[SCRAMBLE_SIZE]; SHA1_CTX ctx; SHA1Init(&ctx); SHA1Update(&ctx, (const unsigned char *) password, len); SHA1Final(hash2, &ctx); SHA1Init(&ctx); SHA1Update(&ctx, hash2, SCRAMBLE_SIZE); SHA1Final(hash2, &ctx); base64_encode((char *) hash2, SCRAMBLE_SIZE, out, out_len, 0); } tarantool_1.9.1.26.g63eb81e3c/src/clock.c0000664000000000000000000000572113306560010016233 0ustar rootroot/* * Copyright 2010-2016 Tarantool AUTHORS: please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "clock.h" #include "trivia/util.h" double clock_realtime(void) { struct timespec ts; clock_gettime(CLOCK_REALTIME, &ts); return (double) ts.tv_sec + ts.tv_nsec / 1e9; } double clock_monotonic(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return (double) ts.tv_sec + ts.tv_nsec / 1e9; } double clock_process(void) { #if defined(CLOCK_PROCESS_CPUTIME_ID) struct timespec ts; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts); return (double) ts.tv_sec + ts.tv_nsec / 1e9; #else return (double) clock() / CLOCKS_PER_SEC; #endif } double clock_thread(void) { #if defined(CLOCK_THREAD_CPUTIME_ID) struct timespec ts; clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts); return (double) ts.tv_sec + ts.tv_nsec / 1e9; #else return (double) clock() / CLOCKS_PER_SEC; #endif } uint64_t clock_realtime64(void) { struct timespec ts; clock_gettime(CLOCK_REALTIME, &ts); return ((uint64_t)ts.tv_sec) * 1000000000 + ts.tv_nsec; } uint64_t clock_monotonic64(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ((uint64_t)ts.tv_sec) * 1000000000 + ts.tv_nsec; } uint64_t clock_process64(void) { #if defined(CLOCK_PROCESS_CPUTIME_ID) struct timespec ts; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts); return ((uint64_t)ts.tv_sec) * 1000000000 + ts.tv_nsec; #else return (uint64_t) clock() * 1000000000 / CLOCKS_PER_SEC; #endif } uint64_t clock_thread64(void) { #if defined(CLOCK_THREAD_CPUTIME_ID) struct timespec ts; clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts); return ((uint64_t)ts.tv_sec) * 1000000000 + ts.tv_nsec; #else return (uint64_t) clock() * 1000000000 / CLOCKS_PER_SEC; #endif } tarantool_1.9.1.26.g63eb81e3c/src/random.h0000664000000000000000000000325513306560010016425 0ustar rootroot#ifndef INCLUDES_TARANTOOL_RANDOM_H #define INCLUDES_TARANTOOL_RANDOM_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif void random_init(void); void random_free(void); void random_bytes(char *buf, size_t size); #if defined(__cplusplus) } #endif /* extern "C" */ #endif /* INCLUDES_TARANTOOL_RANDOM_H */ tarantool_1.9.1.26.g63eb81e3c/src/path_lock.h0000664000000000000000000000425113306560010017106 0ustar rootroot#ifndef TARANTOOL_PATH_LOCK_H_INCLUDED #define TARANTOOL_PATH_LOCK_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Try to obtain an advisory lock on a path. * Opens a file descriptor and locks it. The descriptor * is returned as the lock handle. * * @param[in] path the file path to lock * @param[out] lock >= 0 if lock is acquired, -1 otherwise * * @retval 0 success, check out for lock status * @retval -1 the path does not exist or flock() failed, * lock is not modified. */ int path_lock(const char *path, int *lock); /** * Release a lock returned by path_lock() * * @pre lock is acquired (>= 0) */ int path_unlock(int lock); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_PATH_LOCK_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/curl.h0000664000000000000000000000637113306560010016114 0ustar rootroot#ifndef TARANTOOL_CURL_H_INCLUDED #define TARANTOOL_CURL_H_INCLUDED 1 /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "tarantool_ev.h" #include "diag.h" #include "fiber_cond.h" /** * CURL Statistics */ struct curl_stat { uint64_t sockets_added; uint64_t sockets_deleted; uint64_t active_requests; }; /** * CURL Environment */ struct curl_env { /** libcurl multi handler. */ CURLM *multi; /** Memory pool for sockets. */ struct mempool sock_pool; /** libev timer watcher. */ struct ev_timer timer_event; /** Statistics. */ struct curl_stat stat; }; /** * CURL Request */ struct curl_request { /** Internal libcurl status code. */ int code; /** Information associated with a specific easy handle. */ CURL *easy; /** * When request is given to curl-driver, client waits on this variable * until the handler (callback function) gives a signal within variable. * */ struct fiber_cond cond; }; /** * @brief Create a new CURL environment * @param env pointer to a structure to initialize * @param max_conn The maximum number of entries in connection cache * @retval 0 on success * @retval -1 on error, check diag */ int curl_env_create(struct curl_env *env, long max_conns); /** * Destroy HTTP client environment * @param env pointer to a structure to destroy */ void curl_env_destroy(struct curl_env *env); /** * Initialize a new CURL request * @param curl_request request * @retval 0 success * @retval -1 error, check diag */ int curl_request_create(struct curl_request *curl_request); /** * Cleanup CURL request * @param curl_request request */ void curl_request_destroy(struct curl_request *curl_request); /** * Execute CURL request * @param curl_request request * @param env environment * @param timeout - timeout of waiting for libcurl api */ CURLMcode curl_execute(struct curl_request *curl_request, struct curl_env *env, double timeout); #endif /* TARANTOOL_CURL_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/cpu_feature.h0000664000000000000000000000412413306560010017443 0ustar rootroot#ifndef TARANTOOL_CPU_FEATURES_H #define TARANTOOL_CPU_FEATURES_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include /* Check whether CPU supports SSE 4.2 (needed to compute CRC32 in hardware). * * @param feature indetifier (see above) of the target feature * * @return true if feature is available, false if unavailable. */ bool sse42_enabled_cpu(); #if defined (__x86_64__) || defined (__i386__) /* Hardware-calculate CRC32 for the given data buffer. * * @param crc initial CRC * @param buf data buffer * @param len buffer length * * @pre true == cpu_has (cpuf_sse4_2) * @return CRC32 value */ uint32_t crc32c_hw(uint32_t crc, const char *buf, unsigned int len); #endif #endif /* TARANTOOL_CPU_FEATURES_H */ tarantool_1.9.1.26.g63eb81e3c/src/exception.h0000664000000000000000000001112513306565107017152 0ustar rootroot#ifndef TARANTOOL_EXCEPTION_H_INCLUDED #define TARANTOOL_EXCEPTION_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "reflection.h" #include "diag.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ extern const struct type_info type_Exception; extern const struct type_info type_OutOfMemory; extern const struct type_info type_FiberIsCancelled; extern const struct type_info type_TimedOut; extern const struct type_info type_ChannelIsClosed; extern const struct type_info type_LuajitError; extern const struct type_info type_IllegalParams; extern const struct type_info type_SystemError; const char * exception_get_string(struct error *e, const struct method_info *method); int exception_get_int(struct error *e, const struct method_info *method); #if defined(__cplusplus) } /* extern "C" */ class Exception: public error { public: void *operator new(size_t size); void *operator new(size_t size, void *p) { (void) size; return p; } void operator delete(void*); const char *get_file() const { return file; } int get_line() const { return line; } const char *get_errmsg() const { return errmsg; } NORETURN virtual void raise() = 0; virtual void log() const; virtual ~Exception(); Exception(const Exception &) = delete; Exception& operator=(const Exception&) = delete; protected: Exception(const struct type_info *type, const char *file, unsigned line); }; class SystemError: public Exception { public: virtual void raise() { throw this; } int get_errno() const { return m_errno; } virtual void log() const; SystemError(const char *file, unsigned line, const char *format, ...); protected: SystemError(const struct type_info *type, const char *file, unsigned line); protected: /* system errno */ int m_errno; }; class OutOfMemory: public SystemError { public: OutOfMemory(const char *file, unsigned line, size_t amount, const char *allocator, const char *object); virtual void raise() { throw this; } }; class TimedOut: public SystemError { public: TimedOut(const char *file, unsigned line); virtual void raise() { throw this; } }; class ChannelIsClosed: public Exception { public: ChannelIsClosed(const char *file, unsigned line); virtual void raise() { throw this; } }; /** * This is thrown by fiber_* API calls when the fiber is * cancelled. */ class FiberIsCancelled: public Exception { public: FiberIsCancelled(const char *file, unsigned line); virtual void log() const; virtual void raise() { throw this; } }; class LuajitError: public Exception { public: LuajitError(const char *file, unsigned line, const char *msg); virtual void raise() { throw this; } }; class IllegalParams: public Exception { public: IllegalParams(const char *file, unsigned line, const char *format, ...); virtual void raise() { throw this; } }; /** * Initialize the exception subsystem. */ void exception_init(); #define tnt_error(class, ...) ({ \ say_debug("%s at %s:%i", #class, __FILE__, __LINE__); \ class *e = new class(__FILE__, __LINE__, ##__VA_ARGS__); \ diag_add_error(diag_get(), e); \ e; \ }) #define tnt_raise(...) do { \ throw tnt_error(__VA_ARGS__); \ } while (0) #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_EXCEPTION_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/uri.h0000664000000000000000000000454613306560010015750 0ustar rootroot#ifndef TARANTOOL_URI_H_INCLUDED #define TARANTOOL_URI_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include /* NI_MAXHOST, NI_MAXSERV */ #include /* _POSIX_PATH_MAX */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct uri { const char *scheme; size_t scheme_len; const char *login; size_t login_len; const char *password; size_t password_len; const char *host; size_t host_len; const char *service; size_t service_len; const char *path; size_t path_len; const char *query; size_t query_len; const char *fragment; size_t fragment_len; int host_hint; }; #define URI_HOST_UNIX "unix/" #define URI_MAXHOST NI_MAXHOST #define URI_MAXSERVICE _POSIX_PATH_MAX /* _POSIX_PATH_MAX always > NI_MAXSERV */ int uri_parse(struct uri *uri, const char *str); int uri_format(char *str, int len, const struct uri *uri, bool write_password); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_URI_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/path_lock.c0000664000000000000000000000400413306560010017075 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "path_lock.h" #include #include #include #include #include "diag.h" /** * Open file descriptor and lock it. */ int path_lock(const char *path, int *lock) { int fd = open(path, O_RDONLY); if (fd < 0) { diag_set(SystemError, "Can't open path: %s", path); return -1; } if (flock(fd, LOCK_EX | LOCK_NB) < 0) { if (errno != EWOULDBLOCK) { diag_set(SystemError, "Can't lock path: %s", path); close(fd); return -1; } close(fd); fd = -1; } *lock= fd; return 0; } /* * Release a lock represented by the file descriptor. */ int path_unlock(int lock) { return close(lock); } tarantool_1.9.1.26.g63eb81e3c/src/util.c0000664000000000000000000001727313306560010016122 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include #include #include #include #include #include #include #include #include #include #include /* mp_char2escape[] table */ #include "say.h" /** Find a string in an array of strings. * * @param haystack Array of strings. Either NULL * pointer terminated (for arrays of * unknown size) or of size hmax. * @param needle string to look for * @param hmax the index to use if nothing is found * also limits the size of the array * @return string index or hmax if the string is not found. */ uint32_t strindex(const char **haystack, const char *needle, uint32_t hmax) { for (unsigned index = 0; index != hmax && haystack[index]; index++) if (strcasecmp(haystack[index], needle) == 0) return index; return hmax; } /** * Same as strindex(), but with a specified length of @a needle. * Used, when @a needle is not 0 terminated. */ uint32_t strnindex(const char **haystack, const char *needle, uint32_t len, uint32_t hmax) { if (len == 0) return hmax; for (unsigned index = 0; index != hmax && haystack[index]; index++) { if (strncasecmp(haystack[index], needle, len) == 0 && strlen(haystack[index]) == len) return index; } return hmax; } void close_all_xcpt(int fdc, ...) { unsigned keep[fdc]; va_list ap; struct rlimit nofile; va_start(ap, fdc); for (int j = 0; j < fdc; j++) { keep[j] = va_arg(ap, unsigned); } va_end(ap); if (getrlimit(RLIMIT_NOFILE, &nofile) != 0) nofile.rlim_cur = 10000; for (unsigned i = 3; i < nofile.rlim_cur; i++) { bool found = false; for (int j = 0; j < fdc; j++) { if (keep[j] == i) { found = true; break; } } if (!found) close(i); } } static int itoa(int val, char *buf) { char *p = buf; if (val < 0) { *p++ = '-'; val = -val; } /* Print full range if it is an unsigned number. */ unsigned uval = val; char *start = p; do { *p++ = '0' + uval % 10; uval /= 10; } while (uval > 0); int len = (int)(p - buf); *p-- = '\0'; /* Reverse the resulting string. */ do { char tmp = *p; *p = *start; *start = tmp; } while (++start < --p); return len; } /** * Async-signal-safe implementation of printf() into an fd, to be * able to write messages into the error log inside a signal * handler. Only supports %s and %d, %u, format specifiers. */ ssize_t fdprintf(int fd, const char *format, ...) { ssize_t total = 0; char buf[22]; va_list args; va_start(args, format); while (*format) { const char *start = format; ssize_t len, res; if (*format++ != '%') { while (*format != '\0' && *format != '%') format++; len = format - start; goto out; } switch (*format++) { case '%': len = 1; break; case 's': start = va_arg(args, char *); if (start == NULL) start = "(null)"; len = strlen(start); break; case 'd': case 'u': start = buf; len = itoa(va_arg(args, int), buf); break; default: len = 2; break; } out: res = write(fd, start, len); if (res > 0) total += res; if (res != len) break; } va_end(args); return total; } /** Allocate and fill an absolute path to a file. */ char * abspath(const char *filename) { if (filename[0] == '/') return strdup(filename); char *abspath = (char *) malloc(PATH_MAX + 1); if (abspath == NULL) return NULL; if (getcwd(abspath, PATH_MAX - strlen(filename) - 1) == NULL) say_syserror("getcwd"); else { strcat(abspath, "/"); } strcat(abspath, filename); return abspath; } char * int2str(long long int val) { static __thread char buf[22]; snprintf(buf, sizeof(buf), "%lld", val); return buf; } int utf8_check_printable(const char *start, size_t length) { const unsigned char *end = (const unsigned char *) start + length; const unsigned char *pointer = (const unsigned char *) start; while (pointer < end) { unsigned char octet; unsigned int width; unsigned int value; size_t k; octet = pointer[0]; width = (octet & 0x80) == 0x00 ? 1 : (octet & 0xE0) == 0xC0 ? 2 : (octet & 0xF0) == 0xE0 ? 3 : (octet & 0xF8) == 0xF0 ? 4 : 0; value = (octet & 0x80) == 0x00 ? octet & 0x7F : (octet & 0xE0) == 0xC0 ? octet & 0x1F : (octet & 0xF0) == 0xE0 ? octet & 0x0F : (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; if (!width) return 0; if (pointer + width > end) return 0; for (k = 1; k < width; k++) { octet = pointer[k]; if ((octet & 0xC0) != 0x80) return 0; value = (value << 6) + (octet & 0x3F); } if (!((width == 1) || (width == 2 && value >= 0x80) || (width == 3 && value >= 0x800) || (width == 4 && value >= 0x10000))) return 0; /* * gh-354: yaml incorrectly escapes special characters in a string * Check that the string can be actually printed unescaped. */ if (*pointer > 0x7F && !((pointer[0] == 0x0A) || (pointer[0] >= 0x20 && pointer[0] <= 0x7E) || (pointer[0] == 0xC2 && pointer[1] >= 0xA0) || (pointer[0] > 0xC2 && pointer[0] < 0xED) || (pointer[0] == 0xED && pointer[1] < 0xA0) || (pointer[0] == 0xEE) || (pointer[0] == 0xEF && !(pointer[1] == 0xBB && pointer[2] == 0xBF) && !(pointer[1] == 0xBF && (pointer[2] == 0xBE || pointer[2] == 0xBF))) ) ) { return 0; } pointer += width; } return 1; } int json_escape(char *buf, int size, const char *data) { int total = 0; int data_len = strlen(data); for (int i = 0; i < data_len; i++) { unsigned char c = (unsigned char ) data[i]; if (c < 128 && mp_char2escape[c] != NULL) { /* Escape character */ SNPRINT(total, snprintf, buf, size, "%s", mp_char2escape[c]); } else { SNPRINT(total, snprintf, buf, size, "%c", c); } } return total; } const char *precision_fmts[] = { "%.0lg", "%.1lg", "%.2lg", "%.3lg", "%.4lg", "%.5lg", "%.6lg", "%.7lg", "%.8lg", "%.9lg", "%.10lg", "%.11lg", "%.12lg", "%.13lg", "%.14lg" }; void fpconv_check() { char buf[8]; snprintf(buf, sizeof(buf), "%g", 0.5); /* Failing this test might imply the platform has a buggy dtoa * implementation or wide characters */ assert(buf[0] == '0' && buf[2] == '5' && buf[3] == 0); /* * Currently Tarantool doesn't support user locales (see main()). * Just check that locale decimal point is '.'. */ assert(buf[1] == '.'); } tarantool_1.9.1.26.g63eb81e3c/src/latency.h0000664000000000000000000000412613306565107016616 0ustar rootroot#ifndef TARANTOOL_LATENCY_H_INCLUDED #define TARANTOOL_LATENCY_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ struct histogram; /** * Latency counter. */ struct latency { /** * Histogram of all latency observations, * in microseconds. */ struct histogram *histogram; }; /** * Initialize a latency counter. * Return 0 on success, -1 on OOM. */ int latency_create(struct latency *latency); /** * Destroy a latency counter. */ void latency_destroy(struct latency *latency); /** * Update a latency counter with a new observation. * @value is the observed latency value, in seconds. */ void latency_collect(struct latency *latency, double value); /** * Get accumulated latency value, in seconds. */ double latency_get(struct latency *latency); #endif /* TARANTOOL_LATENCY_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/find_path.c0000664000000000000000000000473313306560010017076 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #if defined(__APPLE__) #include #elif defined(__FreeBSD__) #include #endif const char * find_path(const char *argv0) { static char path[PATH_MAX] = {'\0'}; static bool found = false; if (found) return path; char buf[PATH_MAX]; size_t size = PATH_MAX - 1; if (argv0[0] == '/') snprintf(buf, size, "%s", argv0); else { int rc = -1; #if defined(__linux__) rc = readlink("/proc/self/exe", buf, size); if (rc >= 0) { /* readlink() does not add a trailing zero */ buf[rc] = '\0'; } #elif defined(__FreeBSD__) int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 }; rc = sysctl(mib, 4, buf, &size, NULL, 0); #elif defined(__sun) snprintf(buf, size, "%s", getexecname()); rc = 0; #elif defined(__APPLE__) uint32_t usize = size; rc = _NSGetExecutablePath(buf, &usize); #endif if (rc == -1) snprintf(buf, sizeof(buf) - 1, "%s", getenv("_")); } if (realpath(buf, path) == NULL) snprintf(path, sizeof(path), "%s", buf); found = true; return path; } tarantool_1.9.1.26.g63eb81e3c/src/systemd.c0000664000000000000000000001017413306560010016626 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "systemd.h" #if defined(WITH_SYSTEMD) #include #include #include #include #include #include #include #include #include #include #include "say.h" static int systemd_fd = -1; static const char *sd_unix_path = NULL; int systemd_init() { sd_unix_path = getenv("NOTIFY_SOCKET"); if (sd_unix_path == NULL) { say_info("systemd: NOTIFY_SOCKET variable is empty, skipping"); return -1; } if ((sd_unix_path[0] != '@' && sd_unix_path[0] != '/') || (sd_unix_path[1] == '\0')) { say_error("systemd: NOTIFY_SOCKET contains bad value"); goto error; } /* To be sure, that path to unix socket is OK */ struct sockaddr_un sa = { .sun_family = AF_UNIX, .sun_path = { '\0' } }; if (strlen(sd_unix_path) >= sizeof(sa.sun_path)) { say_error("systemd: NOTIFY_SOCKET is longer that MAX_UNIX_PATH"); goto error; } if ((systemd_fd = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0)) == -1) { say_syserror("systemd: failed to create unix socket"); goto error; } int sndbuf_sz = 8 * 1024 * 1024; if (setsockopt(systemd_fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_sz, sizeof(int)) < 0) { say_syserror("systemd: failed to set sndbuf size"); goto error; }; return 0; error: if (systemd_fd > 0) { close(systemd_fd); systemd_fd = -1; } sd_unix_path = NULL; return -1; } void systemd_free() { if (systemd_fd > 0) close(systemd_fd); } int systemd_notify(const char *message) { if (systemd_fd == -1 || sd_unix_path == NULL) return 0; struct sockaddr_un sa = { .sun_family = AF_UNIX, }; struct iovec vec = { .iov_base = (char *)message, .iov_len = (size_t )strlen(message) }; struct msghdr msg = { .msg_iov = &vec, .msg_iovlen = 1, .msg_name = &sa, }; msg.msg_namelen = sizeof(sa.sun_family) + strlen(sd_unix_path); strncpy(sa.sun_path, sd_unix_path, sizeof(sa.sun_path)); if (sa.sun_path[0] == '@') sa.sun_path[0] = '\0'; say_debug("systemd: sending message '%s'", message); ssize_t sent = sendmsg(systemd_fd, &msg, MSG_NOSIGNAL); if (sent == -1) { say_syserror("systemd: failed to send message"); return -1; } return sent; } int systemd_vsnotify(const char *format, va_list ap) { if (systemd_fd == -1 || sd_unix_path == NULL) return 0; char *buf = NULL; int rv = vasprintf(&buf, format, ap); if (rv < 0 || buf == NULL) { say_syserror("systemd: failed to format string '%s'", format); return -1; } rv = systemd_notify(buf); free(buf); return rv; } CFORMAT(printf, 1, 2) int systemd_snotify(const char *format, ...) { if (systemd_fd == -1 || sd_unix_path == NULL) return 0; va_list args; va_start(args, format); size_t res = systemd_vsnotify(format, args); va_end(args); return res; } #endif /* defined(WITH_SYSTEMD) */ tarantool_1.9.1.26.g63eb81e3c/src/coio_file.c0000664000000000000000000003204213306560010017064 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coio_file.h" #include "coio_task.h" #include "fiber.h" #include "say.h" #include "fio.h" #include #include #include /** * A context of libeio request for any * coio task. */ struct coio_file_task { ssize_t result; int errorno; struct fiber *fiber; bool done; union { struct { int fd; struct stat *buf; } fstat; struct { struct stat *buf; const char *pathname; } lstat; struct { const char *pattern; int flags; int (*errfunc) (const char *epath, int eerrno); glob_t *pglob; } glob; struct { int fd; off_t offset; int whence; } lseek; struct { int fd; const void *buf; size_t count; } write; struct { int fd; void *buf; size_t count; } read; struct { const char *pathname; char *buf; size_t bufsize; } readlink; struct { char *tpl; } tempdir; struct { char **bufp; const char *pathname; } readdir; struct { const char *source; const char *dest; } copyfile; }; }; #define INIT_COEIO_FILE(name) \ struct coio_file_task name; \ memset(&name, 0, sizeof(name)); \ name.fiber = fiber(); \ /** A callback invoked by eio when a task is complete. */ static int coio_complete(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; eio->errorno = req->errorno; eio->done = true; eio->result = req->result; fiber_wakeup(eio->fiber); return 0; } /** * Synchronously (from cooperative multitasking point of view) * wait for task completion. */ static ssize_t coio_wait_done(eio_req *req, struct coio_file_task *eio) { if (!req) { errno = ENOMEM; return -1; } while (!eio->done) fiber_yield(); errno = eio->errorno; return eio->result; } int coio_file_open(const char *path, int flags, mode_t mode) { INIT_COEIO_FILE(eio); eio_req *req = eio_open(path, flags, mode, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_file_close(int fd) { INIT_COEIO_FILE(eio); eio_req *req = eio_close(fd, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } ssize_t coio_pwrite(int fd, const void *buf, size_t count, off_t offset) { INIT_COEIO_FILE(eio); eio_req *req = eio_write(fd, (void *) buf, count, offset, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } ssize_t coio_pread(int fd, void *buf, size_t count, off_t offset) { INIT_COEIO_FILE(eio); eio_req *req = eio_read(fd, buf, count, offset, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } ssize_t coio_preadn(int fd, void *buf, size_t count, off_t offset) { size_t n = 0; do { ssize_t r; do { r = coio_pread(fd, buf + n, count - n, offset + n); } while (r == -1 && errno == EINTR); if (r <= 0) return -1; n += r; } while (n < count); assert(n == count); return n; } static void coio_do_write(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; req->result = write(eio->write.fd, eio->write.buf, eio->write.count); eio->errorno = errno; } ssize_t coio_write(int fd, const void *buf, size_t count) { INIT_COEIO_FILE(eio); eio.write.buf = buf; eio.write.count = count; eio.write.fd = fd; eio_req *req = eio_custom(coio_do_write, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_read(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; req->result = read(eio->read.fd, eio->read.buf, eio->read.count); req->errorno = errno; } ssize_t coio_read(int fd, void *buf, size_t count) { INIT_COEIO_FILE(eio); eio.read.buf = buf; eio.read.count = count; eio.read.fd = fd; eio_req *req = eio_custom(coio_do_read, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_lseek(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; req->result = lseek(eio->lseek.fd, eio->lseek.offset, eio->lseek.whence); req->errorno = errno; } off_t coio_lseek(int fd, off_t offset, int whence) { INIT_COEIO_FILE(eio); eio.lseek.whence = whence; eio.lseek.offset = offset; eio.lseek.fd = fd; eio_req *req = eio_custom(coio_do_lseek, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_lstat(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; req->result = lstat(eio->lstat.pathname, eio->lstat.buf); req->errorno = errno; } int coio_lstat(const char *pathname, struct stat *buf) { INIT_COEIO_FILE(eio); eio.lstat.pathname = pathname; eio.lstat.buf = buf; eio_req *req = eio_custom(coio_do_lstat, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_stat(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; req->result = stat(eio->lstat.pathname, eio->lstat.buf); req->errorno = errno; } int coio_stat(const char *pathname, struct stat *buf) { INIT_COEIO_FILE(eio); eio.lstat.pathname = pathname; eio.lstat.buf = buf; eio_req *req = eio_custom(coio_do_stat, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_fstat(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; req->result = fstat(eio->fstat.fd, eio->fstat.buf); req->errorno = errno; } int coio_fstat(int fd, struct stat *stat) { INIT_COEIO_FILE(eio); eio.fstat.fd = fd; eio.fstat.buf = stat; eio_req *req = eio_custom(coio_do_fstat, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_rename(const char *oldpath, const char *newpath) { INIT_COEIO_FILE(eio); eio_req *req = eio_rename(oldpath, newpath, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_unlink(const char *pathname) { INIT_COEIO_FILE(eio); eio_req *req = eio_unlink(pathname, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_ftruncate(int fd, off_t length) { INIT_COEIO_FILE(eio); eio_req *req = eio_ftruncate(fd, length, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_truncate(const char *path, off_t length) { INIT_COEIO_FILE(eio); eio_req *req = eio_truncate(path, length, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_glob(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; req->result = glob(eio->glob.pattern, eio->glob.flags, eio->glob.errfunc, eio->glob.pglob); req->errorno = errno; } int coio_glob(const char *pattern, int flags, int (*errfunc) (const char *epath, int eerrno), glob_t *pglob) { INIT_COEIO_FILE(eio); eio.glob.pattern = pattern; eio.glob.flags = flags; eio.glob.errfunc = errfunc; eio.glob.pglob = pglob; eio_req *req = eio_custom(coio_do_glob, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_chown(const char *path, uid_t owner, gid_t group) { INIT_COEIO_FILE(eio); eio_req *req = eio_chown(path, owner, group, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_chmod(const char *path, mode_t mode) { INIT_COEIO_FILE(eio); eio_req *req = eio_chmod(path, mode, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_mkdir(const char *pathname, mode_t mode) { INIT_COEIO_FILE(eio); eio_req *req = eio_mkdir(pathname, mode, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_rmdir(const char *pathname) { INIT_COEIO_FILE(eio); eio_req *req = eio_rmdir(pathname, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_link(const char *oldpath, const char *newpath) { INIT_COEIO_FILE(eio); eio_req *req = eio_link(oldpath, newpath, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_symlink(const char *target, const char *linkpath) { INIT_COEIO_FILE(eio); eio_req *req = eio_symlink(target, linkpath, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_readlink(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; req->result = readlink(eio->readlink.pathname, eio->readlink.buf, eio->readlink.bufsize); req->errorno = errno; } int coio_readlink(const char *pathname, char *buf, size_t bufsize) { INIT_COEIO_FILE(eio); eio.readlink.pathname = pathname; eio.readlink.buf = buf; eio.readlink.bufsize = bufsize; eio_req *req = eio_custom(coio_do_readlink, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_tempdir(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; char *res = mkdtemp(eio->tempdir.tpl); req->errorno = errno; if (res == NULL) { req->result = -1; } else { req->result = 0; } } int coio_tempdir(char *path, size_t path_len) { INIT_COEIO_FILE(eio); if (path_len < sizeof("/tmp/XXXXXX") + 1) { errno = ENOMEM; return -1; } snprintf(path, path_len, "/tmp/XXXXXX"); eio.tempdir.tpl = path; eio_req *req = eio_custom(coio_do_tempdir, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_sync() { INIT_COEIO_FILE(eio); eio_req *req = eio_sync(0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_fsync(int fd) { INIT_COEIO_FILE(eio); eio_req *req = eio_fsync(fd, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } int coio_fdatasync(int fd) { INIT_COEIO_FILE(eio); eio_req *req = eio_fdatasync(fd, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_readdir(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; DIR *dirp = opendir(eio->readdir.pathname); if (dirp == NULL) goto error; size_t capacity = 128; size_t len = 0; struct dirent *entry; char *buf = (char *) malloc(capacity); if (buf == NULL) goto mem_error; req->result = 0; do { entry = readdir(dirp); if (entry == NULL || strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) continue; size_t namlen = strlen(entry->d_name); size_t needed = len + namlen + 1; if (needed > capacity) { if (needed <= capacity * 2) capacity *= 2; else capacity = needed * 2; char *new_buf = (char *) realloc(buf, capacity); if (new_buf == NULL) goto mem_error; buf = new_buf; } memcpy(&buf[len], entry->d_name, namlen); len += namlen; buf[len++] = '\n'; req->result++; } while(entry != NULL); if (len > 0) buf[len - 1] = 0; else buf[0] = 0; *eio->readdir.bufp = buf; closedir(dirp); return; mem_error: free(buf); closedir(dirp); error: req->result = -1; req->errorno = errno; } int coio_readdir(const char *dir_path, char **buf) { INIT_COEIO_FILE(eio) eio.readdir.bufp = buf; eio.readdir.pathname = dir_path; eio_req *req = eio_custom(coio_do_readdir, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } static void coio_do_copyfile(eio_req *req) { struct coio_file_task *eio = (struct coio_file_task *)req->data; struct stat st; if (stat(eio->copyfile.source, &st) < 0) { goto error; } int source_fd = open(eio->copyfile.source, O_RDONLY); if (source_fd < 0) { goto error; } int dest_fd = open(eio->copyfile.dest, O_WRONLY | O_CREAT, st.st_mode & 0777); if (dest_fd < 0) { goto error_dest; } enum { COPY_FILE_BUF_SIZE = 4096 }; char buf[COPY_FILE_BUF_SIZE]; while (true) { ssize_t nread = fio_read(source_fd, buf, sizeof(buf)); if (nread < 0) goto error_copy; if (nread == 0) break; /* eof */ ssize_t nwritten = fio_writen(dest_fd, buf, nread); if (nwritten < 0) goto error_copy; } req->result = 0; close(source_fd); close(dest_fd); return; error_copy: close(dest_fd); error_dest: close(source_fd); error: req->errorno = errno; req->result = -1; return; } int coio_copyfile(const char *source, const char *dest) { INIT_COEIO_FILE(eio) eio.copyfile.source = source; eio.copyfile.dest = dest; eio_req *req = eio_custom(coio_do_copyfile, 0, coio_complete, &eio); return coio_wait_done(req, &eio); } tarantool_1.9.1.26.g63eb81e3c/src/fio.c0000664000000000000000000001545313306560010015720 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "fio.h" #include #include #include #include #include #include #include #include #include #include const char * fio_filename(int fd) { #ifdef TARGET_OS_LINUX int save_errno = errno; char *proc_path = tt_static_buf(); snprintf(proc_path, TT_STATIC_BUF_LEN, "/proc/self/fd/%d", fd); char *filename_path = tt_static_buf(); ssize_t sz = readlink(proc_path, filename_path, TT_STATIC_BUF_LEN); errno = save_errno; if (sz >= 0) { sz = MIN(sz, TT_STATIC_BUF_LEN - 1); filename_path[sz] = '\0'; return filename_path; } #else /* TARGET_OS_LINUX */ (void) fd; #endif return ""; /* Not implemented. */ } ssize_t fio_read(int fd, void *buf, size_t count) { size_t n = 0; do { ssize_t nrd = read(fd, buf + n, count - n); if (nrd < 0) { if (errno == EINTR) { errno = 0; continue; } say_syserror("read, [%s]", fio_filename(fd)); return -1; /* XXX: file position is unspecified */ } else if (nrd == 0) { break; /* EOF */ } n += nrd; } while (n < count); assert(n <= count); return n; } ssize_t fio_pread(int fd, void *buf, size_t count, off_t offset) { size_t n = 0; do { ssize_t nrd = pread(fd, buf + n, count - n, offset + n); if (nrd < 0) { if (errno == EINTR) { errno = 0; continue; } say_syserror("pread, [%s]", fio_filename(fd)); return -1; } else if (nrd == 0) { break; /* EOF */ } n += nrd; } while (n < count); assert(n <= count); return n; } int fio_writen(int fd, const void *buf, size_t count) { ssize_t to_write = (ssize_t) count; while (to_write > 0) { ssize_t nwr = write(fd, buf, to_write); if (nwr < 0) { if (errno == EINTR) { errno = 0; continue; } say_syserror("write, [%s]", fio_filename(fd)); return -1; /* XXX: file position is unspecified */ } buf += nwr; to_write -= nwr; } assert(to_write == 0); return 0; } ssize_t fio_writev(int fd, struct iovec *iov, int iovcnt) { assert(iov && iovcnt >= 0); ssize_t nwr; restart: nwr = writev(fd, iov, iovcnt); if (nwr < 0) { if (errno == EINTR) { errno = 0; goto restart; } if (errno != EAGAIN && errno != EWOULDBLOCK) say_syserror("writev, [%s]", fio_filename(fd)); } return nwr; } ssize_t fio_writevn(int fd, struct iovec *iov, int iovcnt) { assert(iov && iovcnt >= 0); ssize_t nwr = 0; int iov_pos = 0; struct fio_batch *batch = fio_batch_new(); while (iov_pos < iovcnt) { int iov_to_batch = MIN(batch->max_iov, iovcnt - iov_pos); memmove(batch->iov + batch->iovcnt, iov + iov_pos, sizeof(struct iovec) * iov_to_batch); fio_batch_add(batch, iov_to_batch); iov_pos += iov_to_batch; while (batch->iovcnt) { ssize_t written = fio_batch_write(batch, fd); if (written < 0) { fio_batch_delete(batch); return -1; } nwr += written; } } fio_batch_delete(batch); return nwr; } off_t fio_lseek(int fd, off_t offset, int whence) { off_t effective_offset = lseek(fd, offset, whence); if (effective_offset == -1) { say_syserror("lseek, [%s]: offset=%jd, whence=%d", fio_filename(fd), (intmax_t) offset, whence); } else if (whence == SEEK_SET && effective_offset != offset) { say_error("lseek, [%s]: offset set to unexpected value: " "requested %jd effective %jd", fio_filename(fd), (intmax_t)offset, (intmax_t)effective_offset); } return effective_offset; } int fio_truncate(int fd, off_t offset) { int rc = ftruncate(fd, offset); if (rc) say_syserror("fio_truncate, [%s]: offset=%jd", fio_filename(fd), (intmax_t) offset); return rc; } struct fio_batch * fio_batch_new(void) { int max_iov = sysconf(_SC_IOV_MAX); if (max_iov < 1) max_iov = IOV_MAX; struct fio_batch *batch = (struct fio_batch *) malloc(sizeof(struct fio_batch) + sizeof(struct iovec) * max_iov); if (batch == NULL) return NULL; fio_batch_reset(batch); batch->max_iov = max_iov; return batch; } void fio_batch_delete(struct fio_batch *batch) { free(batch); } size_t fio_batch_add(struct fio_batch *batch, int count) { assert(batch->iovcnt + count <= batch->max_iov); size_t total_bytes = 0; struct iovec *iov = batch->iov + batch->iovcnt; struct iovec *end = iov + count; for (; iov != end; ++iov) { assert(iov->iov_base != NULL && iov->iov_len > 0); total_bytes += iov->iov_len; } batch->iovcnt += count; batch->bytes += total_bytes; return total_bytes; } /** * Rotate batch after partial write. */ static inline void fio_batch_rotate(struct fio_batch *batch, size_t bytes_written) { /* * writev(2) usually fully write all data on local filesystems. */ if (likely(bytes_written == batch->bytes)) { /* Full write */ fio_batch_reset(batch); return; } assert(bytes_written < batch->bytes); /* Partial write */ batch->bytes -= bytes_written; struct iovec *iov = batch->iov; struct iovec *iovend = iov + batch->iovcnt; for (; iov < iovend; ++iov) { if (iov->iov_len > bytes_written) { iov->iov_base = (char *) iov->iov_base + bytes_written; iov->iov_len -= bytes_written; break; } bytes_written -= iov->iov_len; } assert(iov < iovend); /* Partial write */ memmove(batch->iov, iov, (iovend - iov) * sizeof(struct iovec)); batch->iovcnt = iovend - iov; } ssize_t fio_batch_write(struct fio_batch *batch, int fd) { ssize_t bytes_written = fio_writev(fd, batch->iov, batch->iovcnt); if (unlikely(bytes_written <= 0)) return -1; /* Error */ fio_batch_rotate(batch, bytes_written); return bytes_written; } tarantool_1.9.1.26.g63eb81e3c/src/box/0000775000000000000000000000000013306565107015573 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/box/xlog.c0000664000000000000000000013765413306565107016730 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "xlog.h" #include #include #include #include "fiber.h" #include "exception.h" #include "crc32.h" #include "fio.h" #include "third_party/tarantool_eio.h" #include #include "coio_file.h" #include "error.h" #include "xrow.h" #include "iproto_constants.h" #include "errinj.h" /* * marker is MsgPack fixext2 * +--------+--------+--------+--------+ * | 0xd5 | type | data | * +--------+--------+--------+--------+ */ typedef uint32_t log_magic_t; static const log_magic_t row_marker = mp_bswap_u32(0xd5ba0bab); /* host byte order */ static const log_magic_t zrow_marker = mp_bswap_u32(0xd5ba0bba); /* host byte order */ static const log_magic_t eof_marker = mp_bswap_u32(0xd510aded); /* host byte order */ static const char inprogress_suffix[] = ".inprogress"; enum { /** * When the number of rows in xlog_tx write buffer * gets this big, don't delay flush any longer, and * issue a write. * This also acts as a default for slab size in the * slab cache so must be a power of 2. */ XLOG_TX_AUTOCOMMIT_THRESHOLD = 128 * 1024, /** * Compress output buffer before dumping it to * disk if it is at least this big. On smaller * sizes compression takes up CPU but doesn't * yield seizable gains. * Maybe this should be a configuration option. */ XLOG_TX_COMPRESS_THRESHOLD = 2 * 1024, }; /* {{{ struct xlog_meta */ enum { /* * The maximum length of xlog meta * * @sa xlog_meta_parse() */ XLOG_META_LEN_MAX = 1024 + VCLOCK_STR_LEN_MAX }; #define INSTANCE_UUID_KEY "Instance" #define INSTANCE_UUID_KEY_V12 "Server" #define VCLOCK_KEY "VClock" #define VERSION_KEY "Version" static const char v13[] = "0.13"; static const char v12[] = "0.12"; /** * Format xlog metadata into @a buf of size @a size * * @param buf buffer to use. * @param size the size of buffer. This function write at most @a size bytes. * @retval < size the number of characters printed (excluding the null byte) * @retval >=size the number of characters (excluding the null byte), * which would have been written to the final string if * enough space had been available. * @retval -1 error, check diag * @sa snprintf() */ static int xlog_meta_format(const struct xlog_meta *meta, char *buf, int size) { char *vstr = vclock_to_string(&meta->vclock); if (vstr == NULL) return -1; char *instance_uuid = tt_uuid_str(&meta->instance_uuid); int total = snprintf(buf, size, "%s\n" "%s\n" VERSION_KEY ": %s\n" INSTANCE_UUID_KEY ": %s\n" VCLOCK_KEY ": %s\n\n", meta->filetype, v13, PACKAGE_VERSION, instance_uuid, vstr); assert(total > 0); free(vstr); return total; } /** * Parse xlog meta from buffer, update buffer read * position in case of success * * @retval 0 for success * @retval -1 for parse error * @retval 1 if buffer hasn't enough data */ static ssize_t xlog_meta_parse(struct xlog_meta *meta, const char **data, const char *data_end) { memset(meta, 0, sizeof(*meta)); const char *end = (const char *)memmem(*data, data_end - *data, "\n\n", 2); if (end == NULL) return 1; ++end; /* include the trailing \n to simplify the checks */ const char *pos = (const char *)*data; /* * Parse filetype, i.e "SNAP" or "XLOG" */ const char *eol = (const char *)memchr(pos, '\n', end - pos); if (eol == end || (eol - pos) >= (ptrdiff_t) sizeof(meta->filetype)) { diag_set(XlogError, "failed to parse xlog type string"); return -1; } memcpy(meta->filetype, pos, eol - pos); meta->filetype[eol - pos] = '\0'; pos = eol + 1; assert(pos <= end); /* * Parse version string, i.e. "0.12" or "0.13" */ char version[10]; eol = (const char *)memchr(pos, '\n', end - pos); if (eol == end || (eol - pos) >= (ptrdiff_t) sizeof(version)) { diag_set(XlogError, "failed to parse xlog version string"); return -1; } memcpy(version, pos, eol - pos); version[eol - pos] = '\0'; pos = eol + 1; assert(pos <= end); if (strncmp(version, v12, sizeof(v12)) != 0 && strncmp(version, v13, sizeof(v13)) != 0) { diag_set(XlogError, "unsupported file format version %s", version); return -1; } /* * Parse "key: value" pairs */ while (pos < end) { eol = (const char *)memchr(pos, '\n', end - pos); assert(eol <= end); const char *key = pos; const char *key_end = (const char *) memchr(key, ':', eol - key); if (key_end == NULL) { diag_set(XlogError, "can't extract meta value"); return -1; } const char *val = key_end + 1; /* Skip space after colon */ while (*val == ' ' || *val == '\t') ++val; const char *val_end = eol; assert(val <= val_end); pos = eol + 1; if (memcmp(key, INSTANCE_UUID_KEY, key_end - key) == 0 || memcmp(key, INSTANCE_UUID_KEY_V12, key_end - key) == 0) { /* * Instance: */ if (val_end - val != UUID_STR_LEN) { diag_set(XlogError, "can't parse instance UUID"); return -1; } char uuid[UUID_STR_LEN + 1]; memcpy(uuid, val, UUID_STR_LEN); uuid[UUID_STR_LEN] = '\0'; if (tt_uuid_from_string(uuid, &meta->instance_uuid) != 0) { diag_set(XlogError, "can't parse instance UUID"); return -1; } } else if (memcmp(key, VCLOCK_KEY, key_end - key) == 0){ /* * VClock: */ if (val_end - val > VCLOCK_STR_LEN_MAX) { diag_set(XlogError, "can't parse vclock"); return -1; } char vclock[VCLOCK_STR_LEN_MAX + 1]; memcpy(vclock, val, val_end - val); vclock[val_end - val] = '\0'; size_t off = vclock_from_string(&meta->vclock, vclock); ERROR_INJECT(ERRINJ_XLOG_META, { off = 1;}); if (off != 0) { diag_set(XlogError, "invalid vclock at " "offset %zd", off); return -1; } } else if (memcmp(key, VERSION_KEY, key_end - key) == 0) { /* Ignore Version: for now */ } else { /* * Unknown key */ say_warn("Unknown meta item: `%.*s'", key_end - key, key); } } *data = end + 1; /* skip the last trailing \n of \n\n sequence */ return 0; } /* struct xlog }}} */ /* {{{ struct xdir */ /* sync snapshot every 16MB */ #define SNAP_SYNC_INTERVAL (1 << 24) void xdir_create(struct xdir *dir, const char *dirname, enum xdir_type type, const struct tt_uuid *instance_uuid) { memset(dir, 0, sizeof(*dir)); vclockset_new(&dir->index); /* Default mode. */ dir->mode = 0660; dir->instance_uuid = instance_uuid; snprintf(dir->dirname, PATH_MAX, "%s", dirname); dir->open_wflags = 0; switch (type) { case SNAP: dir->filetype = "SNAP"; dir->filename_ext = ".snap"; dir->suffix = INPROGRESS; dir->sync_interval = SNAP_SYNC_INTERVAL; break; case XLOG: dir->sync_is_async = true; dir->filetype = "XLOG"; dir->filename_ext = ".xlog"; dir->suffix = NONE; dir->force_recovery = true; break; case VYLOG: dir->filetype = "VYLOG"; dir->filename_ext = ".vylog"; dir->suffix = INPROGRESS; break; default: unreachable(); } dir->type = type; } /** * Delete all members from the set of vector clocks. */ static void vclockset_reset(vclockset_t *set) { struct vclock *vclock = vclockset_first(set); while (vclock != NULL) { struct vclock *next = vclockset_next(set, vclock); vclockset_remove(set, vclock); free(vclock); vclock = next; } } /** * Destroy xdir object and free memory. */ void xdir_destroy(struct xdir *dir) { /** Free vclock objects allocated in xdir_scan(). */ vclockset_reset(&dir->index); } /** * Add a single log file to the index of all log files * in a given log directory. */ static inline int xdir_index_file(struct xdir *dir, int64_t signature) { /* * Open xlog and parse vclock in its text header. * The vclock stores the state of the log at the * time it is created. */ struct xlog_cursor cursor; if (xdir_open_cursor(dir, signature, &cursor) < 0) return -1; struct xlog_meta *meta = &cursor.meta; /* * All log files in a directory must satisfy Lamport's * eventual order: events in each log file must be * separable with consistent cuts, for example: * * log1: {1, 1, 0, 1}, log2: {1, 2, 0, 2} -- good * log2: {1, 1, 0, 1}, log2: {2, 0, 2, 0} -- bad */ struct vclock *dup = vclockset_search(&dir->index, &meta->vclock); if (dup != NULL) { diag_set(XlogError, "%s: invalid xlog order", cursor.name); xlog_cursor_close(&cursor, false); return -1; } /* * Append the clock describing the file to the * directory index. */ struct vclock *vclock = (struct vclock *) malloc(sizeof(*vclock)); if (vclock == NULL) { diag_set(OutOfMemory, sizeof(*vclock), "malloc", "vclock"); xlog_cursor_close(&cursor, false); return -1; } vclock_copy(vclock, &meta->vclock); xlog_cursor_close(&cursor, false); vclockset_insert(&dir->index, vclock); return 0; } int xdir_open_cursor(struct xdir *dir, int64_t signature, struct xlog_cursor *cursor) { const char *filename = xdir_format_filename(dir, signature, NONE); int fd = open(filename, O_RDONLY); if (fd < 0) { diag_set(SystemError, "failed to open '%s' file", filename); return -1; } if (xlog_cursor_openfd(cursor, fd, filename) < 0) { close(fd); return -1; } struct xlog_meta *meta = &cursor->meta; if (strcmp(meta->filetype, dir->filetype) != 0) { xlog_cursor_close(cursor, false); diag_set(ClientError, ER_INVALID_XLOG_TYPE, dir->filetype, meta->filetype); return -1; } if (!tt_uuid_is_nil(dir->instance_uuid) && !tt_uuid_is_equal(dir->instance_uuid, &meta->instance_uuid)) { xlog_cursor_close(cursor, false); diag_set(XlogError, "%s: invalid instance UUID", filename); return -1; } /* * Check the match between log file name and contents: * the sum of vector clock coordinates must be the same * as the name of the file. */ int64_t signature_check = vclock_sum(&meta->vclock); if (signature_check != signature) { xlog_cursor_close(cursor, false); diag_set(XlogError, "%s: signature check failed", filename); return -1; } return 0; } static int cmp_i64(const void *_a, const void *_b) { const int64_t *a = (const int64_t *) _a, *b = (const int64_t *) _b; if (*a == *b) return 0; return (*a > *b) ? 1 : -1; } /** * Scan (or rescan) a directory with snapshot or write ahead logs. * Read all files matching a pattern from the directory - * the filename pattern is \d+.xlog * The name of the file is based on its vclock signature, * which is the sum of all elements in the vector clock recorded * when the file was created. Elements in the vector * reflect log sequence numbers of replicas in the asynchronous * replication set (see also _cluster system space and vclock.h * comments). * * This function tries to avoid re-reading a file if * it is already in the set of files "known" to the log * dir object. This is done to speed up local hot standby and * recovery_follow_local(), which periodically rescan the * directory to discover newly created logs. * * On error, this function throws an exception. If * dir->force_recovery is true, *some* errors are not * propagated up but only logged in the error log file. * * The list of errors ignored in force_recovery = true mode * includes: * - a file can not be opened * - some of the files have incorrect metadata (such files are * skipped) * * The goal of force_recovery = true mode is partial recovery * from a damaged/incorrect data directory. It doesn't * silence conditions such as out of memory or lack of OS * resources. * * @return nothing. */ int xdir_scan(struct xdir *dir) { DIR *dh = opendir(dir->dirname); /* log dir */ int64_t *signatures = NULL; /* log file names */ size_t s_count = 0, s_capacity = 0; if (dh == NULL) { diag_set(SystemError, "error reading directory '%s'", dir->dirname); return -1; } int rc = -1; struct vclock *vclock; struct dirent *dent; /* A note regarding thread safety, readdir vs. readdir_r: POSIX explicitly makes the following guarantee: "The pointer returned by readdir() points to data which may be overwritten by another call to readdir() on the same directory stream. This data is not overwritten by another call to readdir() on a different directory stream. In practice, you don't have a problem with readdir(3) because Android's bionic, Linux's glibc, and OS X and iOS' libc all allocate per-DIR* buffers, and return pointers into those; in Android's case, that buffer is currently about 8KiB. If future file systems mean that this becomes an actual limitation, we can fix the C library and all your applications will keep working. See also http://elliotth.blogspot.co.uk/2012/10/how-not-to-use-readdirr3.html */ while ((dent = readdir(dh)) != NULL) { char *ext = strchr(dent->d_name, '.'); if (ext == NULL) continue; /* * Compare the rest of the filename with * dir->filename_ext. */ if (strcmp(ext, dir->filename_ext) != 0) continue; char *dot; long long signature = strtoll(dent->d_name, &dot, 10); if (ext != dot || signature == LLONG_MAX || signature == LLONG_MIN) { say_warn("can't parse `%s', skipping", dent->d_name); continue; } if (s_count == s_capacity) { s_capacity = s_capacity > 0 ? 2 * s_capacity : 16; size_t size = sizeof(*signatures) * s_capacity; signatures = (int64_t *) realloc(signatures, size); if (signatures == NULL) { diag_set(OutOfMemory, size, "realloc", "signatures array"); goto exit; } } signatures[s_count++] = signature; } /** Sort the list of files */ if (s_count > 0) qsort(signatures, s_count, sizeof(*signatures), cmp_i64); /** * Update the log dir index with the current state: * remove files which no longer exist, add files which * appeared since the last scan. */ vclock = vclockset_first(&dir->index); for (unsigned i = 0; i < s_count || vclock != NULL;) { int64_t s_old = vclock ? vclock_sum(vclock) : LLONG_MAX; int64_t s_new = i < s_count ? signatures[i] : LLONG_MAX; if (s_old < s_new) { /** Remove a deleted file from the index */ struct vclock *next = vclockset_next(&dir->index, vclock); vclockset_remove(&dir->index, vclock); free(vclock); vclock = next; } else if (s_old > s_new) { /** Add a new file. */ if (xdir_index_file(dir, s_new) != 0) { /* * force_recovery must not affect OOM */ struct error *e = diag_last_error(&fiber()->diag); if (!dir->force_recovery || type_assignable(&type_OutOfMemory, e->type)) goto exit; /** Skip a corrupted file */ error_log(e); } i++; } else { assert(s_old == s_new && i < s_count && vclock != NULL); vclock = vclockset_next(&dir->index, vclock); i++; } } rc = 0; exit: closedir(dh); free(signatures); return rc; } int xdir_check(struct xdir *dir) { DIR *dh = opendir(dir->dirname); /* log dir */ if (dh == NULL) { diag_set(SystemError, "error reading directory '%s'", dir->dirname); return -1; } closedir(dh); return 0; } char * xdir_format_filename(struct xdir *dir, int64_t signature, enum log_suffix suffix) { static __thread char filename[PATH_MAX + 1]; const char *suffix_str = (suffix == INPROGRESS ? inprogress_suffix : ""); snprintf(filename, PATH_MAX, "%s/%020lld%s%s", dir->dirname, (long long) signature, dir->filename_ext, suffix_str); return filename; } int xdir_collect_garbage(struct xdir *dir, int64_t signature, bool use_coio) { struct vclock *vclock; while ((vclock = vclockset_first(&dir->index)) != NULL && vclock_sum(vclock) < signature) { char *filename = xdir_format_filename(dir, vclock_sum(vclock), NONE); say_info("removing %s", filename); int rc; if (use_coio) rc = coio_unlink(filename); else rc = unlink(filename); if (rc < 0 && errno != ENOENT) { say_syserror("error while removing %s", filename); diag_set(SystemError, "failed to unlink file '%s'", filename); return -1; } vclockset_remove(&dir->index, vclock); free(vclock); } return 0; } /* }}} */ /* {{{ struct xlog */ int xlog_rename(struct xlog *l) { char *filename = l->filename; char new_filename[PATH_MAX]; char *suffix = strrchr(filename, '.'); assert(l->is_inprogress); assert(suffix); assert(strcmp(suffix, inprogress_suffix) == 0); /* Create a new filename without '.inprogress' suffix. */ memcpy(new_filename, filename, suffix - filename); new_filename[suffix - filename] = '\0'; if (rename(filename, new_filename) != 0) { say_syserror("can't rename %s to %s", filename, new_filename); diag_set(SystemError, "failed to rename '%s' file", filename); return -1; } l->is_inprogress = false; return 0; } static int xlog_init(struct xlog *xlog) { memset(xlog, 0, sizeof(*xlog)); xlog->sync_interval = SNAP_SYNC_INTERVAL; xlog->sync_time = ev_monotonic_time(); xlog->is_autocommit = true; obuf_create(&xlog->obuf, &cord()->slabc, XLOG_TX_AUTOCOMMIT_THRESHOLD); obuf_create(&xlog->zbuf, &cord()->slabc, XLOG_TX_AUTOCOMMIT_THRESHOLD); xlog->zctx = ZSTD_createCCtx(); if (xlog->zctx == NULL) { diag_set(ClientError, ER_COMPRESSION, "failed to create context"); return -1; } return 0; } void xlog_clear(struct xlog *l) { memset(l, 0, sizeof(*l)); l->fd = -1; } static void xlog_destroy(struct xlog *xlog) { obuf_destroy(&xlog->obuf); obuf_destroy(&xlog->zbuf); ZSTD_freeCCtx(xlog->zctx); TRASH(xlog); xlog->fd = -1; } int xlog_create(struct xlog *xlog, const char *name, int flags, const struct xlog_meta *meta) { char meta_buf[XLOG_META_LEN_MAX]; int meta_len; /* * Check that the file without .inprogress suffix doesn't exist. */ if (access(name, F_OK) == 0) { errno = EEXIST; diag_set(SystemError, "file '%s' already exists", name); goto err; } if (xlog_init(xlog) != 0) goto err; xlog->meta = *meta; xlog->is_inprogress = true; snprintf(xlog->filename, PATH_MAX, "%s%s", name, inprogress_suffix); flags |= O_RDWR | O_CREAT | O_EXCL; /* * Open the ..inprogress file. * If it exists, open will fail. Always open/create * a file with .inprogress suffix: for snapshots, * the rename is done when the snapshot is complete. * Fox xlogs, we can rename only when we have written * the log file header, otherwise replication relay * may think that this is a corrupt file and stop * replication. */ xlog->fd = open(xlog->filename, flags, 0644); if (xlog->fd < 0) { say_syserror("open, [%s]", name); diag_set(SystemError, "failed to create file '%s'", name); goto err_open; } /* Format metadata */ meta_len = xlog_meta_format(&xlog->meta, meta_buf, sizeof(meta_buf)); if (meta_len < 0) goto err_write; /* Formatted metadata must fit into meta_buf */ assert(meta_len < (int)sizeof(meta_buf)); /* Write metadata */ if (fio_writen(xlog->fd, meta_buf, meta_len) < 0) { diag_set(SystemError, "%s: failed to write xlog meta", name); goto err_write; } xlog->offset = meta_len; /* first log starts after meta */ return 0; err_write: close(xlog->fd); unlink(xlog->filename); /* try to remove incomplete file */ err_open: xlog_destroy(xlog); err: return -1; } int xlog_open(struct xlog *xlog, const char *name) { char magic[sizeof(log_magic_t)]; char meta_buf[XLOG_META_LEN_MAX]; const char *meta = meta_buf; int meta_len; int rc; if (xlog_init(xlog) != 0) goto err; strncpy(xlog->filename, name, PATH_MAX); xlog->fd = open(xlog->filename, O_RDWR); if (xlog->fd < 0) { say_syserror("open, [%s]", name); diag_set(SystemError, "failed to open file '%s'", name); goto err_open; } meta_len = fio_read(xlog->fd, meta_buf, sizeof(meta_buf)); if (meta_len < 0) { diag_set(SystemError, "failed to read file '%s'", xlog->filename); goto err_read; } rc = xlog_meta_parse(&xlog->meta, &meta, meta + meta_len); if (rc < 0) goto err_read; if (rc > 0) { diag_set(XlogError, "Unexpected end of file"); goto err_read; } /* * If the file has eof marker, reposition the file pointer so * that the next write will overwrite it. */ xlog->offset = fio_lseek(xlog->fd, -(off_t)sizeof(magic), SEEK_END); if (xlog->offset < 0) goto no_eof; /* Use pread() so as not to change file pointer. */ rc = fio_pread(xlog->fd, magic, sizeof(magic), xlog->offset); if (rc < 0) { diag_set(SystemError, "failed to read file '%s'", xlog->filename); goto err_read; } if (rc != sizeof(magic) || load_u32(magic) != eof_marker) { no_eof: xlog->offset = fio_lseek(xlog->fd, 0, SEEK_END); if (xlog->offset < 0) { diag_set(SystemError, "failed to seek file '%s'", xlog->filename); goto err_read; } } return 0; err_read: close(xlog->fd); err_open: xlog_destroy(xlog); err: return -1; } int xdir_touch_xlog(struct xdir *dir, const struct vclock *vclock) { char *filename; int64_t signature = vclock_sum(vclock); filename = xdir_format_filename(dir, signature, NONE); if (dir->type != SNAP) { assert(false); diag_set(SystemError, "Can't touch xlog '%s'", filename); return -1; } if (utime(filename, NULL) != 0) { diag_set(SystemError, "Can't update xlog timestamp: '%s'", filename); return -1; } return 0; } /** * In case of error, writes a message to the error log * and sets errno. */ int xdir_create_xlog(struct xdir *dir, struct xlog *xlog, const struct vclock *vclock) { char *filename; int64_t signature = vclock_sum(vclock); struct xlog_meta meta; assert(signature >= 0); assert(!tt_uuid_is_nil(dir->instance_uuid)); /* * Check whether a file with this name already exists. * We don't overwrite existing files. */ filename = xdir_format_filename(dir, signature, NONE); /* Setup inherited values */ snprintf(meta.filetype, sizeof(meta.filetype), "%s", dir->filetype); meta.instance_uuid = *dir->instance_uuid; vclock_copy(&meta.vclock, vclock); if (xlog_create(xlog, filename, dir->open_wflags, &meta) != 0) return -1; /* set sync interval from xdir settings */ xlog->sync_interval = dir->sync_interval; /* free file cache if dir should be synced */ xlog->free_cache = dir->sync_interval != 0 ? true: false; xlog->rate_limit = 0; /* Rename xlog file */ if (dir->suffix != INPROGRESS && xlog_rename(xlog)) { int save_errno = errno; xlog_close(xlog, false); errno = save_errno; return -1; } return 0; } /** * Write a sequence of uncompressed xrow objects. * * @retval -1 error * @retval >= 0 the number of bytes written */ static off_t xlog_tx_write_plain(struct xlog *log) { /** * We created an obuf savepoint at start of xlog_tx, * now populate it with data. */ char *fixheader = (char *)log->obuf.iov[0].iov_base; *(log_magic_t *)fixheader = row_marker; char *data = fixheader + sizeof(log_magic_t); data = mp_encode_uint(data, obuf_size(&log->obuf) - XLOG_FIXHEADER_SIZE); /* Encode crc32 for previous row */ data = mp_encode_uint(data, 0); /* Encode crc32 for current row */ uint32_t crc32c = 0; struct iovec *iov; size_t offset = XLOG_FIXHEADER_SIZE; for (iov = log->obuf.iov; iov->iov_len; ++iov) { crc32c = crc32_calc(crc32c, (char *)iov->iov_base + offset, iov->iov_len - offset); offset = 0; } data = mp_encode_uint(data, crc32c); /* * Encode a padding, to ensure the resulting * fixheader always has the same size. */ ssize_t padding = XLOG_FIXHEADER_SIZE - (data - fixheader); if (padding > 0) { data = mp_encode_strl(data, padding - 1); if (padding > 1) { memset(data, 0, padding - 1); data += padding - 1; } } ERROR_INJECT(ERRINJ_WAL_WRITE_DISK, { diag_set(ClientError, ER_INJECTION, "xlog write injection"); return -1; }); ssize_t written = fio_writevn(log->fd, log->obuf.iov, log->obuf.pos + 1); if (written < 0) { diag_set(SystemError, "failed to write to '%s' file", log->filename); return -1; } return obuf_size(&log->obuf); } /** * Write a compressed block of xrow objects. * @retval -1 error * @retval >= 0 the number of bytes written */ static off_t xlog_tx_write_zstd(struct xlog *log) { char *fixheader = (char *)obuf_alloc(&log->zbuf, XLOG_FIXHEADER_SIZE); uint32_t crc32c = 0; struct iovec *iov; /* 3 is compression level. */ ZSTD_compressBegin(log->zctx, 3); size_t offset = XLOG_FIXHEADER_SIZE; for (iov = log->obuf.iov; iov->iov_len; ++iov) { /* Estimate max output buffer size. */ size_t zmax_size = ZSTD_compressBound(iov->iov_len - offset); /* Allocate a destination buffer. */ void *zdst = obuf_reserve(&log->zbuf, zmax_size); if (!zdst) { diag_set(OutOfMemory, zmax_size, "runtime arena", "compression buffer"); goto error; } size_t (*fcompress)(ZSTD_CCtx *, void *, size_t, const void *, size_t); /* * If it's the last iov or the last * log has 0 bytes, end the stream. */ if (iov == log->obuf.iov + log->obuf.pos || !(iov + 1)->iov_len) { fcompress = ZSTD_compressEnd; } else { fcompress = ZSTD_compressContinue; } size_t zsize = fcompress(log->zctx, zdst, zmax_size, (char *)iov->iov_base + offset, iov->iov_len - offset); if (ZSTD_isError(zsize)) { diag_set(ClientError, ER_COMPRESSION, ZSTD_getErrorName(zsize)); goto error; } /* Advance output buffer to the end of compressed data. */ obuf_alloc(&log->zbuf, zsize); /* Update crc32c */ crc32c = crc32_calc(crc32c, (char *)zdst, zsize); /* Discount fixheader size for all iovs after first. */ offset = 0; } *(log_magic_t *)fixheader = zrow_marker; char *data; data = fixheader + sizeof(log_magic_t); data = mp_encode_uint(data, obuf_size(&log->zbuf) - XLOG_FIXHEADER_SIZE); /* Encode crc32 for previous row */ data = mp_encode_uint(data, 0); /* Encode crc32 for current row */ data = mp_encode_uint(data, crc32c); /* Encode padding */ ssize_t padding; padding = XLOG_FIXHEADER_SIZE - (data - fixheader); if (padding > 0) { data = mp_encode_strl(data, padding - 1); if (padding > 1) { memset(data, 0, padding - 1); data += padding - 1; } } ERROR_INJECT(ERRINJ_WAL_WRITE_DISK, { diag_set(ClientError, ER_INJECTION, "xlog write injection"); obuf_reset(&log->zbuf); goto error; }); ssize_t written; written = fio_writevn(log->fd, log->zbuf.iov, log->zbuf.pos + 1); if (written < 0) { diag_set(SystemError, "failed to write to '%s' file", log->filename); goto error; } obuf_reset(&log->zbuf); return written; error: obuf_reset(&log->zbuf); return -1; } /* file syncing and posix_fadvise() should be rounded by a page boundary */ #define SYNC_MASK (4096 - 1) #define SYNC_ROUND_DOWN(size) ((size) & ~(4096 - 1)) #define SYNC_ROUND_UP(size) (SYNC_ROUND_DOWN(size + SYNC_MASK)) /** * Writes xlog batch to file */ static ssize_t xlog_tx_write(struct xlog *log) { if (obuf_size(&log->obuf) == XLOG_FIXHEADER_SIZE) return 0; ssize_t written; if (obuf_size(&log->obuf) >= XLOG_TX_COMPRESS_THRESHOLD) { written = xlog_tx_write_zstd(log); } else { written = xlog_tx_write_plain(log); } ERROR_INJECT(ERRINJ_WAL_WRITE, { diag_set(ClientError, ER_INJECTION, "xlog write injection"); written = -1; }); obuf_reset(&log->obuf); /* * Simplify recovery after a temporary write failure: * truncate the file to the best known good write * position. */ if (written < 0) { if (lseek(log->fd, log->offset, SEEK_SET) < 0 || ftruncate(log->fd, log->offset) != 0) panic_syserror("failed to truncate xlog after write error"); return -1; } log->offset += written; log->rows += log->tx_rows; log->tx_rows = 0; if ((log->sync_interval && log->offset >= (off_t)(log->synced_size + log->sync_interval)) || (log->rate_limit && log->offset >= (off_t)(log->synced_size + log->rate_limit))) { off_t sync_from = SYNC_ROUND_DOWN(log->synced_size); size_t sync_len = SYNC_ROUND_UP(log->offset) - sync_from; if (log->rate_limit > 0) { double throttle_time; throttle_time = (double)sync_len / log->rate_limit - (ev_monotonic_time() - log->sync_time); if (throttle_time > 0) fiber_sleep(throttle_time); } /** sync data from cache to disk */ #ifdef HAVE_SYNC_FILE_RANGE sync_file_range(log->fd, sync_from, sync_len, SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER); #else fdatasync(log->fd); #endif /* HAVE_SYNC_FILE_RANGE */ log->sync_time = ev_monotonic_time(); if (log->free_cache) { #ifdef HAVE_POSIX_FADVISE /** free page cache */ if (posix_fadvise(log->fd, sync_from, sync_len, POSIX_FADV_DONTNEED) != 0) { say_syserror("posix_fadvise, fd=%i", log->fd); } #else (void) sync_from; (void) sync_len; #endif /* HAVE_POSIX_FADVISE */ } log->synced_size = log->offset; } return written; } /* * Add a row to a log and possibly flush the log. * * @retval -1 error, check diag. * @retval >=0 the number of bytes written to buffer. */ ssize_t xlog_write_row(struct xlog *log, const struct xrow_header *packet) { /* * Automatically reserve space for a fixheader when adding * the first row in * a log. The fixheader is populated * at write. @sa xlog_tx_write(). */ if (obuf_size(&log->obuf) == 0) { if (!obuf_alloc(&log->obuf, XLOG_FIXHEADER_SIZE)) { diag_set(OutOfMemory, XLOG_FIXHEADER_SIZE, "runtime arena", "xlog tx output buffer"); return -1; } } struct obuf_svp svp = obuf_create_svp(&log->obuf); size_t page_offset = obuf_size(&log->obuf); /** encode row into iovec */ struct iovec iov[XROW_IOVMAX]; /** don't write sync to the disk */ int iovcnt = xrow_header_encode(packet, 0, iov, 0); if (iovcnt < 0) { obuf_rollback_to_svp(&log->obuf, &svp); return -1; } for (int i = 0; i < iovcnt; ++i) { struct errinj *inj = errinj(ERRINJ_WAL_WRITE_PARTIAL, ERRINJ_INT); if (inj != NULL && inj->iparam >= 0 && obuf_size(&log->obuf) > (size_t)inj->iparam) { diag_set(ClientError, ER_INJECTION, "xlog write injection"); obuf_rollback_to_svp(&log->obuf, &svp); return -1; }; if (obuf_dup(&log->obuf, iov[i].iov_base, iov[i].iov_len) < iov[i].iov_len) { diag_set(OutOfMemory, XLOG_FIXHEADER_SIZE, "runtime arena", "xlog tx output buffer"); obuf_rollback_to_svp(&log->obuf, &svp); return -1; } } assert(iovcnt <= XROW_IOVMAX); log->tx_rows++; size_t row_size = obuf_size(&log->obuf) - page_offset; if (log->is_autocommit && obuf_size(&log->obuf) >= XLOG_TX_AUTOCOMMIT_THRESHOLD && xlog_tx_write(log) < 0) return -1; return row_size; } /** * Begin a multi-statement xlog transaction. All xrow objects * of a single transaction share the same header and checksum * and are normally written at once. */ void xlog_tx_begin(struct xlog *log) { log->is_autocommit = false; } /* * End a non-interruptible batch of rows, thus enable flushes of * a transaction at any time, on threshold. If the buffer is big * enough already, flush it at once here. * * @retval -1 error * @retval >= 0 the number of bytes written to disk */ ssize_t xlog_tx_commit(struct xlog *log) { log->is_autocommit = true; if (obuf_size(&log->obuf) >= XLOG_TX_AUTOCOMMIT_THRESHOLD) { return xlog_tx_write(log); } return 0; } /* * Rollback a batch of buffered rows without writing to file */ void xlog_tx_rollback(struct xlog *log) { log->is_autocommit = true; log->tx_rows = 0; obuf_reset(&log->obuf); } /** * Flush any outstanding xlog_tx transactions at the end of * a WAL write batch. */ ssize_t xlog_flush(struct xlog *log) { assert(log->is_autocommit); if (log->obuf.used == 0) return 0; return xlog_tx_write(log); } static int sync_cb(eio_req *req) { int fd = (intptr_t) req->data; if (req->result) { errno = req->errorno; say_syserror("%s: fsync() failed", fio_filename(fd)); errno = 0; } close(fd); return 0; } int xlog_sync(struct xlog *l) { if (l->sync_is_async) { int fd = dup(l->fd); if (fd == -1) { say_syserror("%s: dup() failed", l->filename); return -1; } eio_fsync(fd, 0, sync_cb, (void *) (intptr_t) fd); } else if (fsync(l->fd) < 0) { say_syserror("%s: fsync failed", l->filename); return -1; } return 0; } static int xlog_write_eof(struct xlog *l) { ERROR_INJECT(ERRINJ_WAL_WRITE_EOF, { diag_set(ClientError, ER_INJECTION, "xlog write injection"); return -1; }); if (fio_writen(l->fd, &eof_marker, sizeof(eof_marker)) < 0) { diag_set(SystemError, "write() failed"); return -1; } return 0; } int xlog_close(struct xlog *l, bool reuse_fd) { int rc = xlog_write_eof(l); if (rc < 0) say_error("%s: failed to write EOF marker: %s", l->filename, diag_last_error(diag_get())->errmsg); /* * Sync the file before closing, since * otherwise we can end up with a partially * written file in case of a crash. * We sync even if file open O_SYNC, simplify code for low cost */ xlog_sync(l); if (!reuse_fd) { rc = close(l->fd); if (rc < 0) say_syserror("%s: close() failed", l->filename); } xlog_destroy(l); return rc; } /** * Free xlog memory and destroy it cleanly, without side * effects (for use in the atfork handler). */ void xlog_atfork(struct xlog *xlog) { /* * Close the file descriptor STDIO buffer does not * make its way into the respective file in * fclose(). */ close(xlog->fd); xlog->fd = -1; } /* }}} */ /* {{{ struct xlog_cursor */ #define XLOG_READ_AHEAD (1 << 14) /** * Ensure that at least count bytes are in read buffer * * @retval 0 at least count bytes are in read buf * @retval 1 if eof * @retval -1 if error */ static int xlog_cursor_ensure(struct xlog_cursor *cursor, size_t count) { if (ibuf_used(&cursor->rbuf) >= count) return 0; /* in-memory mode */ if (cursor->fd < 0) return 1; size_t to_load = count - ibuf_used(&cursor->rbuf); to_load += XLOG_READ_AHEAD; void *dst = ibuf_reserve(&cursor->rbuf, to_load); if (dst == NULL) { diag_set(OutOfMemory, to_load, "runtime", "xlog cursor read buffer"); return -1; } ssize_t readen; readen = fio_pread(cursor->fd, dst, to_load, cursor->read_offset); struct errinj *inj = errinj(ERRINJ_XLOG_READ, ERRINJ_INT); if (inj != NULL && inj->iparam >= 0 && inj->iparam < cursor->read_offset) { readen = -1; errno = EIO; }; if (readen < 0) { diag_set(SystemError, "failed to read '%s' file", cursor->name); return -1; } /* ibuf_reserve() has been called above, ibuf_alloc() must not fail */ assert((size_t)readen <= to_load); ibuf_alloc(&cursor->rbuf, readen); cursor->read_offset += readen; return ibuf_used(&cursor->rbuf) >= count ? 0: 1; } /** * Decompress zstd-compressed buf into cursor row block * * @retval -1 error, check diag * @retval 0 data fully decompressed * @retval 1 need more bytes in the output buffer */ static int xlog_cursor_decompress(char **rows, char *rows_end, const char **data, const char *data_end, ZSTD_DStream *zdctx) { ZSTD_inBuffer input = {*data, (size_t)(data_end - *data), 0}; ZSTD_outBuffer output = {*rows, (size_t)(rows_end - *rows), 0}; while (input.pos < input.size && output.pos < output.size) { size_t rc = ZSTD_decompressStream(zdctx, &output, &input); if (ZSTD_isError(rc)) { diag_set(ClientError, ER_DECOMPRESSION, ZSTD_getErrorName(rc)); return -1; } assert(output.pos <= (size_t)(rows_end - *rows)); *rows = (char *)output.dst + output.pos; *data = (char *)input.src + input.pos; } return input.pos == input.size ? 0: 1; } /** * xlog fixheader struct */ struct xlog_fixheader { /** * xlog tx magic, row_marker for plain xrows * or zrow_marker for compressed. */ log_magic_t magic; /** * crc32 for the previous xlog tx, not used now */ uint32_t crc32p; /** * crc32 for current xlog tx */ uint32_t crc32c; /** * xlog tx data length excluding fixheader */ uint32_t len; }; /** * Decode xlog tx header, set up magic, crc32c and len * * @retval 0 for success * @retval -1 for error * @retval count of bytes left to parse header */ static ssize_t xlog_fixheader_decode(struct xlog_fixheader *fixheader, const char **data, const char *data_end) { if (data_end - *data < (ptrdiff_t)XLOG_FIXHEADER_SIZE) return XLOG_FIXHEADER_SIZE - (data_end - *data); const char *pos = *data; const char *end = pos + XLOG_FIXHEADER_SIZE; /* Decode magic */ fixheader->magic = load_u32(pos); if (fixheader->magic != row_marker && fixheader->magic != zrow_marker) { diag_set(XlogError, "invalid magic: 0x%x", fixheader->magic); return -1; } pos += sizeof(fixheader->magic); /* Read length */ const char *val = pos; if (pos >= end || mp_check(&pos, end) != 0 || mp_typeof(*val) != MP_UINT) { diag_set(XlogError, "broken fixheader length"); return -1; } fixheader->len = mp_decode_uint(&val); assert(val == pos); if (fixheader->len > IPROTO_BODY_LEN_MAX) { diag_set(XlogError, "too large fixheader length"); return -1; } /* Read previous crc32 */ if (pos >= end || mp_check(&pos, end) != 0 || mp_typeof(*val) != MP_UINT) { diag_set(XlogError, "broken fixheader crc32p"); return -1; } fixheader->crc32p = mp_decode_uint(&val); assert(val == pos); /* Read current crc32 */ if (pos >= end || mp_check(&pos, end) != 0 || mp_typeof(*val) != MP_UINT) { diag_set(XlogError, "broken fixheader crc32c"); return -1; } fixheader->crc32c = mp_decode_uint(&val); assert(val == pos); /* Check and skip padding if any */ if (pos < end && (mp_check(&pos, end) != 0 || pos != end)) { diag_set(XlogError, "broken fixheader padding"); return -1; } assert(pos == end); *data = end; return 0; } int xlog_tx_decode(const char *data, const char *data_end, char *rows, char *rows_end, ZSTD_DStream *zdctx) { /* Decode fixheader */ struct xlog_fixheader fixheader; if (xlog_fixheader_decode(&fixheader, &data, data_end) != 0) return -1; /* Check that buffer has enough bytes */ if (data + fixheader.len != data_end) { diag_set(XlogError, "invalid compressed length: " "expected %zd, got %u", data_end - data, fixheader.len); return -1; } ERROR_INJECT(ERRINJ_XLOG_GARBAGE, { *((char *)data + fixheader.len / 2) = ~*((char *)data + fixheader.len / 2); }); /* Validate checksum */ if (crc32_calc(0, data, fixheader.len) != fixheader.crc32c) { diag_set(XlogError, "tx checksum mismatch"); return -1; } /* Copy uncompressed rows */ if (fixheader.magic == row_marker) { if (rows_end - rows != (ptrdiff_t)fixheader.len) { diag_set(XlogError, "invalid unpacked length: " "expected %zd, got %u", rows_end - data, fixheader.len); return -1; } memcpy(rows, data, fixheader.len); return 0; } /* Decompress zstd rows */ assert(fixheader.magic == zrow_marker); ZSTD_initDStream(zdctx); int rc = xlog_cursor_decompress(&rows, rows_end, &data, data_end, zdctx); if (rc < 0) { return -1; } else if (rc > 0) { diag_set(XlogError, "invalid decompressed length: " "expected %zd, got %zd", rows_end - data, rows_end - data + XLOG_TX_AUTOCOMMIT_THRESHOLD); return -1; } assert(data == data_end); return 0; } /** * @retval -1 error * @retval 0 success * @retval >0 how many bytes we will have for continue */ ssize_t xlog_tx_cursor_create(struct xlog_tx_cursor *tx_cursor, const char **data, const char *data_end, ZSTD_DStream *zdctx) { const char *rpos = *data; struct xlog_fixheader fixheader; ssize_t to_load; to_load = xlog_fixheader_decode(&fixheader, &rpos, data_end); if (to_load != 0) return to_load; /* Check that buffer has enough bytes */ if ((data_end - rpos) < (ptrdiff_t)fixheader.len) return fixheader.len - (data_end - rpos); ERROR_INJECT(ERRINJ_XLOG_GARBAGE, { *((char *)rpos + fixheader.len / 2) = ~*((char *)rpos + fixheader.len / 2); }); /* Validate checksum */ if (crc32_calc(0, rpos, fixheader.len) != fixheader.crc32c) { diag_set(XlogError, "tx checksum mismatch"); return -1; } data_end = rpos + fixheader.len; ibuf_create(&tx_cursor->rows, &cord()->slabc, XLOG_TX_AUTOCOMMIT_THRESHOLD); if (fixheader.magic == row_marker) { void *dst = ibuf_alloc(&tx_cursor->rows, fixheader.len); if (dst == NULL) { diag_set(OutOfMemory, fixheader.len, "runtime", "xlog rows buffer"); ibuf_destroy(&tx_cursor->rows); return -1; } memcpy(dst, rpos, fixheader.len); *data = (char *)rpos + fixheader.len; assert(*data <= data_end); tx_cursor->size = ibuf_used(&tx_cursor->rows); return 0; }; assert(fixheader.magic == zrow_marker); ZSTD_initDStream(zdctx); int rc; do { if (ibuf_reserve(&tx_cursor->rows, XLOG_TX_AUTOCOMMIT_THRESHOLD) == NULL) { diag_set(OutOfMemory, XLOG_TX_AUTOCOMMIT_THRESHOLD, "runtime", "xlog output buffer"); ibuf_destroy(&tx_cursor->rows); return -1; } } while ((rc = xlog_cursor_decompress(&tx_cursor->rows.wpos, tx_cursor->rows.end, &rpos, data_end, zdctx)) == 1); if (rc != 0) return -1; *data = rpos; assert(*data <= data_end); tx_cursor->size = ibuf_used(&tx_cursor->rows); return 0; } int xlog_tx_cursor_next_row(struct xlog_tx_cursor *tx_cursor, struct xrow_header *xrow) { if (ibuf_used(&tx_cursor->rows) == 0) return 1; /* Return row from xlog tx buffer */ int rc = xrow_header_decode(xrow, (const char **)&tx_cursor->rows.rpos, (const char *)tx_cursor->rows.wpos); if (rc != 0) { diag_set(XlogError, "can't parse row"); /* Discard remaining row data */ ibuf_reset(&tx_cursor->rows); return -1; } return 0; } int xlog_tx_cursor_destroy(struct xlog_tx_cursor *tx_cursor) { ibuf_destroy(&tx_cursor->rows); return 0; } /** * Find a next xlog tx magic */ int xlog_cursor_find_tx_magic(struct xlog_cursor *i) { assert(xlog_cursor_is_open(i)); log_magic_t magic; do { /* * Read one extra byte to start searching from the next * byte. */ int rc = xlog_cursor_ensure(i, sizeof(log_magic_t) + 1); if (rc < 0) return -1; if (rc == 1) return 1; ++i->rbuf.rpos; assert(i->rbuf.rpos + sizeof(log_magic_t) <= i->rbuf.wpos); magic = load_u32(i->rbuf.rpos); } while (magic != row_marker && magic != zrow_marker); return 0; } int xlog_cursor_next_tx(struct xlog_cursor *i) { int rc; assert(xlog_cursor_is_open(i)); /* load at least magic to check eof */ rc = xlog_cursor_ensure(i, sizeof(log_magic_t)); if (rc < 0) return -1; if (rc > 0) return 1; if (load_u32(i->rbuf.rpos) == eof_marker) { /* eof marker found */ goto eof_found; } ssize_t to_load; while ((to_load = xlog_tx_cursor_create(&i->tx_cursor, (const char **)&i->rbuf.rpos, i->rbuf.wpos, i->zdctx)) > 0) { /* not enough data in read buffer */ int rc = xlog_cursor_ensure(i, ibuf_used(&i->rbuf) + to_load); if (rc < 0) return -1; if (rc > 0) return 1; } if (to_load < 0) return -1; i->state = XLOG_CURSOR_TX; return 0; eof_found: /* * A eof marker is read, check that there is no * more data in the file. */ rc = xlog_cursor_ensure(i, sizeof(log_magic_t) + sizeof(char)); if (rc < 0) return -1; if (rc == 0) { diag_set(XlogError, "%s: has some data after " "eof marker at %lld", i->name, xlog_cursor_pos(i)); return -1; } i->state = XLOG_CURSOR_EOF; return 1; } int xlog_cursor_next_row(struct xlog_cursor *cursor, struct xrow_header *xrow) { assert(xlog_cursor_is_open(cursor)); if (cursor->state != XLOG_CURSOR_TX) return 1; int rc = xlog_tx_cursor_next_row(&cursor->tx_cursor, xrow); if (rc != 0) { cursor->state = XLOG_CURSOR_ACTIVE; xlog_tx_cursor_destroy(&cursor->tx_cursor); } return rc; } int xlog_cursor_next(struct xlog_cursor *cursor, struct xrow_header *xrow, bool force_recovery) { assert(xlog_cursor_is_open(cursor)); while (true) { int rc; rc = xlog_cursor_next_row(cursor, xrow); if (rc == 0) break; if (rc < 0) { struct error *e = diag_last_error(diag_get()); if (!force_recovery || e->type != &type_XlogError) return -1; say_error("can't decode row: %s", e->errmsg); } while ((rc = xlog_cursor_next_tx(cursor)) < 0) { struct error *e = diag_last_error(diag_get()); if (!force_recovery || e->type != &type_XlogError) return -1; say_error("can't open tx: %s", e->errmsg); if ((rc = xlog_cursor_find_tx_magic(cursor)) < 0) return -1; if (rc > 0) break; } if (rc == 1) return 1; } return 0; } int xlog_cursor_openfd(struct xlog_cursor *i, int fd, const char *name) { memset(i, 0, sizeof(*i)); i->fd = fd; ibuf_create(&i->rbuf, &cord()->slabc, XLOG_TX_AUTOCOMMIT_THRESHOLD << 1); ssize_t rc; /* * we can have eof here, but this is no error, * because we don't know exact meta size */ rc = xlog_cursor_ensure(i, XLOG_META_LEN_MAX); if (rc == -1) goto error; rc = xlog_meta_parse(&i->meta, (const char **)&i->rbuf.rpos, (const char *)i->rbuf.wpos); if (rc == -1) goto error; if (rc > 0) { diag_set(XlogError, "Unexpected end of file, run with 'force_recovery = true'"); goto error; } snprintf(i->name, PATH_MAX, "%s", name); i->zdctx = ZSTD_createDStream(); if (i->zdctx == NULL) { diag_set(ClientError, ER_DECOMPRESSION, "failed to create context"); goto error; } i->state = XLOG_CURSOR_ACTIVE; return 0; error: ibuf_destroy(&i->rbuf); return -1; } int xlog_cursor_open(struct xlog_cursor *i, const char *name) { int fd = open(name, O_RDONLY); if (fd < 0) { diag_set(SystemError, "failed to open '%s' file", name); return -1; } int rc = xlog_cursor_openfd(i, fd, name); if (rc < 0) { close(fd); return -1; } return 0; } int xlog_cursor_openmem(struct xlog_cursor *i, const char *data, size_t size, const char *name) { memset(i, 0, sizeof(*i)); i->fd = -1; ibuf_create(&i->rbuf, &cord()->slabc, XLOG_TX_AUTOCOMMIT_THRESHOLD << 1); void *dst = ibuf_alloc(&i->rbuf, size); if (dst == NULL) { diag_set(OutOfMemory, size, "runtime", "xlog cursor read buffer"); goto error; } memcpy(dst, data, size); i->read_offset = size; int rc; rc = xlog_meta_parse(&i->meta, (const char **)&i->rbuf.rpos, (const char *)i->rbuf.wpos); if (rc < 0) goto error; if (rc > 0) { diag_set(XlogError, "Unexpected end of file"); goto error; } snprintf(i->name, PATH_MAX, "%s", name); i->zdctx = ZSTD_createDStream(); if (i->zdctx == NULL) { diag_set(ClientError, ER_DECOMPRESSION, "failed to create context"); goto error; } i->state = XLOG_CURSOR_ACTIVE; return 0; error: ibuf_destroy(&i->rbuf); return -1; } int xlog_cursor_reset(struct xlog_cursor *cursor) { assert(xlog_cursor_is_open(cursor)); cursor->rbuf.rpos = cursor->rbuf.buf; if (ibuf_used(&cursor->rbuf) != (size_t)cursor->read_offset) { cursor->rbuf.wpos = cursor->rbuf.buf; cursor->read_offset = 0; } if (cursor->state == XLOG_CURSOR_TX) xlog_tx_cursor_destroy(&cursor->tx_cursor); if (xlog_cursor_ensure(cursor, XLOG_META_LEN_MAX) == -1) return -1; int rc; rc = xlog_meta_parse(&cursor->meta, (const char **)&cursor->rbuf.rpos, (const char *)cursor->rbuf.wpos); if (rc == -1) return -1; if (rc > 0) { diag_set(XlogError, "Unexpected end of file"); return -1; } cursor->state = XLOG_CURSOR_ACTIVE; return 0; } void xlog_cursor_close(struct xlog_cursor *i, bool reuse_fd) { assert(xlog_cursor_is_open(i)); if (i->fd >= 0 && !reuse_fd) close(i->fd); ibuf_destroy(&i->rbuf); if (i->state == XLOG_CURSOR_TX) xlog_tx_cursor_destroy(&i->tx_cursor); ZSTD_freeDStream(i->zdctx); i->state = (i->state == XLOG_CURSOR_EOF ? XLOG_CURSOR_EOF_CLOSED : XLOG_CURSOR_CLOSED); /* * Do not trash the cursor object since the caller might * still want to access its state and/or meta information. */ } /* }}} */ tarantool_1.9.1.26.g63eb81e3c/src/box/func.c0000664000000000000000000003034313306560010016661 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "func.h" #include "trivia/config.h" #include "assoc.h" #include "lua/utils.h" #include "error.h" #include "diag.h" #include /** * Parsed symbol and package names. */ struct func_name { /** Null-terminated symbol name, e.g. "func" for "mod.submod.func" */ const char *sym; /** Package name, e.g. "mod.submod" for "mod.submod.func" */ const char *package; /** A pointer to the last character in ->package + 1 */ const char *package_end; }; /*** * Split function name to symbol and package names. * For example, str = foo.bar.baz => sym = baz, package = foo.bar * @param str function name, e.g. "module.submodule.function". * @param[out] name parsed symbol and package names. */ static void func_split_name(const char *str, struct func_name *name) { name->package = str; name->package_end = strrchr(str, '.'); if (name->package_end != NULL) { /* module.submodule.function => module.submodule, function */ name->sym = name->package_end + 1; /* skip '.' */ } else { /* package == function => function, function */ name->sym = name->package; name->package_end = str + strlen(str); } } /** * Arguments for luaT_module_find used by lua_cpcall() */ struct module_find_ctx { const char *package; const char *package_end; char *path; size_t path_len; }; /** * A cpcall() helper for module_find() */ static int luaT_module_find(lua_State *L) { struct module_find_ctx *ctx = (struct module_find_ctx *) lua_topointer(L, 1); /* * Call package.searchpath(name, package.cpath) and use * the path to the function in dlopen(). */ lua_getglobal(L, "package"); lua_getfield(L, -1, "search"); /* Argument of search: name */ lua_pushlstring(L, ctx->package, ctx->package_end - ctx->package); lua_call(L, 1, 1); if (lua_isnil(L, -1)) return luaL_error(L, "module not found"); /* Convert path to absolute */ char resolved[PATH_MAX]; if (realpath(lua_tostring(L, -1), resolved) == NULL) { diag_set(SystemError, "realpath"); return luaT_error(L); } snprintf(ctx->path, ctx->path_len, "%s", resolved); return 0; } /** * Find path to module using Lua's package.cpath * @param package package name * @param package_end a pointer to the last byte in @a package + 1 * @param[out] path path to shared library * @param path_len size of @a path buffer * @retval 0 on success * @retval -1 on error, diag is set */ static int module_find(const char *package, const char *package_end, char *path, size_t path_len) { struct module_find_ctx ctx = { package, package_end, path, path_len }; lua_State *L = tarantool_L; int top = lua_gettop(L); if (luaT_cpcall(L, luaT_module_find, &ctx) != 0) { int package_len = (int) (package_end - package); diag_set(ClientError, ER_LOAD_MODULE, package_len, package, lua_tostring(L, -1)); lua_settop(L, top); return -1; } assert(top == lua_gettop(L)); /* cpcall discard results */ return 0; } static struct mh_strnptr_t *modules = NULL; static void module_gc(struct module *module); int module_init(void) { modules = mh_strnptr_new(); if (modules == NULL) { diag_set(OutOfMemory, sizeof(*modules), "malloc", "modules hash table"); return -1; } return 0; } void module_free(void) { while (mh_size(modules) > 0) { mh_int_t i = mh_first(modules); struct module *module = (struct module *) mh_strnptr_node(modules, i)->val; /* Can't delete modules if they have active calls */ module_gc(module); } mh_strnptr_delete(modules); } /** * Look up a module in the modules cache. */ static struct module * module_cache_find(const char *name, const char *name_end) { mh_int_t i = mh_strnptr_find_inp(modules, name, name_end - name); if (i == mh_end(modules)) return NULL; return (struct module *)mh_strnptr_node(modules, i)->val; } /** * Save module to the module cache. */ static inline int module_cache_put(const char *name, const char *name_end, struct module *module) { size_t name_len = name_end - name; uint32_t name_hash = mh_strn_hash(name, name_len); const struct mh_strnptr_node_t strnode = { name, name_len, name_hash, module}; if (mh_strnptr_put(modules, &strnode, NULL, NULL) == mh_end(modules)) { diag_set(OutOfMemory, sizeof(strnode), "malloc", "modules"); return -1; } return 0; } /** * Delete a module from the module cache */ static void module_cache_del(const char *name, const char *name_end) { mh_int_t i = mh_strnptr_find_inp(modules, name, name_end - name); if (i == mh_end(modules)) return; mh_strnptr_del(modules, i, NULL); } /* * Load a dso. * Create a new symlink based on temporary directory and try to * load via this symink to load a dso twice for cases of a function * reload. */ static struct module * module_load(const char *package, const char *package_end) { char path[PATH_MAX]; if (module_find(package, package_end, path, sizeof(path)) != 0) return NULL; struct module *module = (struct module *) malloc(sizeof(*module)); if (module == NULL) { diag_set(OutOfMemory, sizeof(struct module), "malloc", "struct module"); return NULL; } rlist_create(&module->funcs); module->calls = 0; module->is_unloading = false; char dir_name[] = "/tmp/tntXXXXXX"; if (mkdtemp(dir_name) == NULL) { diag_set(SystemError, "failed to create unique dir name"); goto error; } char load_name[PATH_MAX + 1]; snprintf(load_name, sizeof(load_name), "%s/%.*s." TARANTOOL_LIBEXT, dir_name, (int)(package_end - package), package); if (symlink(path, load_name) < 0) { diag_set(SystemError, "failed to create dso link"); goto error; } module->handle = dlopen(load_name, RTLD_NOW | RTLD_LOCAL); if (unlink(load_name) != 0) say_warn("failed to unlink dso link %s", load_name); if (rmdir(dir_name) != 0) say_warn("failed to delete temporary dir %s", dir_name); if (module->handle == NULL) { int package_len = (int) (package_end - package_end); diag_set(ClientError, ER_LOAD_MODULE, package_len, package, dlerror()); goto error; } return module; error: free(module); return NULL; } static void module_delete(struct module *module) { dlclose(module->handle); TRASH(module); free(module); } /* * Check if a dso is unused and can be closed. */ static void module_gc(struct module *module) { if (!module->is_unloading || !rlist_empty(&module->funcs) || module->calls != 0) return; module_delete(module); } /* * Import a function from the module. */ static box_function_f module_sym(struct module *module, const char *name) { box_function_f f = (box_function_f)dlsym(module->handle, name); if (f == NULL) { diag_set(ClientError, ER_LOAD_FUNCTION, name, dlerror()); return NULL; } return f; } /* * Reload a dso. */ int module_reload(const char *package, const char *package_end, struct module **module) { struct module *old_module = module_cache_find(package, package_end); if (old_module == NULL) { /* Module wasn't loaded - do nothing. */ *module = NULL; return 0; } struct module *new_module = module_load(package, package_end); if (new_module == NULL) return -1; struct func *func, *tmp_func; rlist_foreach_entry_safe(func, &old_module->funcs, item, tmp_func) { struct func_name name; func_split_name(func->def->name, &name); func->func = module_sym(new_module, name.sym); if (func->func == NULL) goto restore; func->module = new_module; rlist_move(&new_module->funcs, &func->item); } module_cache_del(package, package_end); if (module_cache_put(package, package_end, new_module) != 0) goto restore; old_module->is_unloading = true; module_gc(old_module); *module = new_module; return 0; restore: /* * Some old-dso func can't be load from new module, restore old * functions. */ do { struct func_name name; func_split_name(func->def->name, &name); func->func = module_sym(old_module, name.sym); if (func->func == NULL) { /* * Something strange was happen, an early loaden * function was not found in an old dso. */ panic("Can't restore module function, " "server state is inconsistent"); } func->module = old_module; rlist_move(&old_module->funcs, &func->item); } while (func != rlist_first_entry(&old_module->funcs, struct func, item)); assert(rlist_empty(&new_module->funcs)); module_delete(new_module); return -1; } struct func * func_new(struct func_def *def) { struct func *func = (struct func *) malloc(sizeof(struct func)); if (func == NULL) { diag_set(OutOfMemory, sizeof(*func), "malloc", "func"); return NULL; } func->def = def; /** Nobody has access to the function but the owner. */ memset(func->access, 0, sizeof(func->access)); /* * Do not initialize the privilege cache right away since * when loading up a function definition during recovery, * user cache may not be filled up yet (space _user is * recovered after space _func), so no user cache entry * may exist yet for such user. The cache will be filled * up on demand upon first access. * * Later on consistency of the cache is ensured by DDL * checks (see user_has_data()). */ func->owner_credentials.auth_token = BOX_USER_MAX; /* invalid value */ func->func = NULL; func->module = NULL; return func; } static void func_unload(struct func *func) { if (func->module) { rlist_del(&func->item); if (rlist_empty(&func->module->funcs)) { struct func_name name; func_split_name(func->def->name, &name); module_cache_del(name.package, name.package_end); } module_gc(func->module); } func->module = NULL; func->func = NULL; } /** * Resolve func->func (find the respective DLL and fetch the * symbol from it). */ static int func_load(struct func *func) { assert(func->func == NULL); struct func_name name; func_split_name(func->def->name, &name); struct module *module = module_cache_find(name.package, name.package_end); if (module == NULL) { /* Try to find loaded module in the cache */ module = module_load(name.package, name.package_end); if (module == NULL) return -1; if (module_cache_put(name.package, name.package_end, module)) { module_delete(module); return -1; } } func->func = module_sym(module, name.sym); if (func->func == NULL) return -1; func->module = module; rlist_add(&module->funcs, &func->item); return 0; } int func_reload(struct func *func) { struct func_name name; func_split_name(func->def->name, &name); struct module *module = NULL; if (module_reload(name.package, name.package_end, &module) != 0) { diag_log(); return -1; } return 0; } int func_call(struct func *func, box_function_ctx_t *ctx, const char *args, const char *args_end) { if (func->func == NULL) { if (func_load(func) != 0) return -1; } /* Module can be changed after function reload. */ struct module *module = func->module; assert(module != NULL); ++module->calls; int rc = func->func(ctx, args, args_end); --module->calls; module_gc(module); return rc; } void func_update(struct func *func, struct func_def *def) { func_unload(func); free(func->def); func->def = def; } void func_delete(struct func *func) { func_unload(func); free(func->def); free(func); } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_mem.h0000664000000000000000000003000413306565107017235 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_MEM_H #define INCLUDES_TARANTOOL_BOX_VY_MEM_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include "fiber_cond.h" #include "iterator_type.h" #include "vy_stmt.h" /* for comparators */ #include "vy_stmt_stream.h" #include "vy_read_view.h" #include "vy_stat.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** Vinyl memory environment. */ struct vy_mem_env { struct lsregion allocator; struct slab_arena arena; struct quota quota; /** Size of memory used for storing tree extents. */ size_t tree_extent_size; }; /** * Initialize a vinyl memory environment. * @param env[out] The environment to initialize. * @param memory The maximum number of in-memory bytes that vinyl uses. */ void vy_mem_env_create(struct vy_mem_env *env, size_t memory); /** * Destroy a vinyl memory environment. * @param env The environment to destroy. */ void vy_mem_env_destroy(struct vy_mem_env *env); /** @cond false */ struct tree_mem_key { const struct tuple *stmt; int64_t lsn; }; /** * Internal. Extracted to speed up BPS tree. */ static int vy_mem_tree_cmp(const struct tuple *a, const struct tuple *b, const struct key_def *cmp_def) { int res = vy_tuple_compare(a, b, cmp_def); if (res) return res; int64_t a_lsn = vy_stmt_lsn(a), b_lsn = vy_stmt_lsn(b); return a_lsn > b_lsn ? -1 : a_lsn < b_lsn; } /** * Internal. Extracted to speed up BPS tree. */ static int vy_mem_tree_cmp_key(const struct tuple *a, struct tree_mem_key *key, const struct key_def *cmp_def) { int res = vy_stmt_compare(a, key->stmt, cmp_def); if (res == 0) { if (key->lsn == INT64_MAX - 1) return 0; int64_t a_lsn = vy_stmt_lsn(a); res = a_lsn > key->lsn ? -1 : a_lsn < key->lsn; } return res; } #define VY_MEM_TREE_EXTENT_SIZE (16 * 1024) #define BPS_TREE_NAME vy_mem_tree #define BPS_TREE_BLOCK_SIZE 512 #define BPS_TREE_EXTENT_SIZE VY_MEM_TREE_EXTENT_SIZE #define BPS_TREE_COMPARE(a, b, cmp_def) vy_mem_tree_cmp(a, b, cmp_def) #define BPS_TREE_COMPARE_KEY(a, b, cmp_def) vy_mem_tree_cmp_key(a, b, cmp_def) #define bps_tree_elem_t const struct tuple * #define bps_tree_key_t struct tree_mem_key * #define bps_tree_arg_t const struct key_def * #define BPS_TREE_NO_DEBUG #include #undef BPS_TREE_NAME #undef BPS_TREE_BLOCK_SIZE #undef BPS_TREE_EXTENT_SIZE #undef BPS_TREE_COMPARE #undef BPS_TREE_COMPARE_KEY #undef bps_tree_elem_t #undef bps_tree_key_t #undef bps_tree_arg_t #undef BPS_TREE_NO_DEBUG /** @endcond false */ /** * vy_mem is an in-memory container for tuples in a single vinyl * range. * Internally it uses bps_tree to store tuples, which are ordered * by statement key and, for the same key, by lsn, in descending * order. * * For example, assume there are two statements with the same key, * but different LSN. These are duplicates of the same key, * maintained for the purpose of MVCC/consistent read view. * In Vinyl terms, they form a duplicate chain. * * vy_mem distinguishes between the first duplicate in the chain * and other keys in that chain. */ struct vy_mem { /** Vinyl memory environment. */ struct vy_mem_env *env; /** Link in range->sealed list. */ struct rlist in_sealed; /** BPS tree */ struct vy_mem_tree tree; /** Size of memory used for storing tree extents. */ size_t tree_extent_size; /** Number of statements. */ struct vy_stmt_counter count; /** The min and max values of stmt->lsn in this tree. */ int64_t min_lsn; int64_t max_lsn; /** * Key definition for this index, extended with primary * key parts. */ const struct key_def *cmp_def; /** version is initially 0 and is incremented on every write */ uint32_t version; /** Schema version at the time of creation. */ uint32_t schema_version; /** * Generation of statements stored in the tree. * Used as lsregion allocator identifier. */ int64_t generation; /** * Format of vy_mem REPLACE and DELETE tuples without * column mask. */ struct tuple_format *format; /** Format of vy_mem tuples with column mask. */ struct tuple_format *format_with_colmask; /** Same as format, but for UPSERT tuples. */ struct tuple_format *upsert_format; /** * Number of active writers to this index. * * Incremented for modified in-memory trees when * preparing a transaction. Decremented after writing * to WAL or rollback. */ int pin_count; /** * Condition variable signaled by vy_mem_unpin() * if pin_count reaches 0. */ struct fiber_cond pin_cond; }; /** * Pin an in-memory index. * * A pinned in-memory index can't be dumped until it's unpinned. */ static inline void vy_mem_pin(struct vy_mem *mem) { mem->pin_count++; } /** * Unpin an in-memory index. * * This function reverts the effect of vy_mem_pin(). */ static inline void vy_mem_unpin(struct vy_mem *mem) { assert(mem->pin_count > 0); mem->pin_count--; if (mem->pin_count == 0) fiber_cond_broadcast(&mem->pin_cond); } /** * Wait until an in-memory index is unpinned. */ static inline void vy_mem_wait_pinned(struct vy_mem *mem) { while (mem->pin_count > 0) fiber_cond_wait(&mem->pin_cond); } /** * Instantiate a new in-memory level. * * @param env Vinyl memory environment. * @param generation Generation of statements stored in the tree. * @param key_def key definition. * @param format Format for REPLACE and DELETE tuples. * @param format_with_colmask Format for tuples, which have * column mask. * @param upsert_format Format for UPSERT tuples. * @param schema_version Schema version. * @retval new vy_mem instance on success. * @retval NULL on error, check diag. */ struct vy_mem * vy_mem_new(struct vy_mem_env *env, int64_t generation, const struct key_def *cmp_def, struct tuple_format *format, struct tuple_format *format_with_colmask, struct tuple_format *upsert_format, uint32_t schema_version); /** * Delete in-memory level. */ void vy_mem_delete(struct vy_mem *index); /* * Return the older statement for the given one. */ const struct tuple * vy_mem_older_lsn(struct vy_mem *mem, const struct tuple *stmt); /** * Insert a statement into the in-memory level. * @param mem vy_mem. * @param stmt Vinyl statement. * * @retval 0 Success. * @retval -1 Memory error. */ int vy_mem_insert(struct vy_mem *mem, const struct tuple *stmt); /** * Insert an upsert statement into the mem. * * @param mem Mem to insert to. * @param stmt Upsert statement to insert. * * @retval 0 Success. * @retval -1 Memory error. */ int vy_mem_insert_upsert(struct vy_mem *mem, const struct tuple *stmt); /** * Confirm insertion of a statement into the in-memory level. * @param mem vy_mem. * @param stmt Vinyl statement. */ void vy_mem_commit_stmt(struct vy_mem *mem, const struct tuple *stmt); /** * Remove a statement from the in-memory level. * @param mem vy_mem. * @param stmt Vinyl statement. */ void vy_mem_rollback_stmt(struct vy_mem *mem, const struct tuple *stmt); /** * Iterator for in-memory level. * * Return statements from vy_mem (in-memory index) based on * initial search key, iteration order and view lsn. * * All statements with lsn > vlsn are skipped. * The API allows to traverse over resulting statements within two * dimensions - key and lsn. next_key() switches to the youngest * statement of the next key, according to the iteration order, * and next_lsn() switches to an older statement for the same * key. */ struct vy_mem_iterator { /** Usage statistics */ struct vy_mem_iterator_stat *stat; /* mem */ struct vy_mem *mem; /* Search options */ /** * Iterator type, that specifies direction, start position and stop * criteria if key == NULL: GT and EQ are changed to GE, LT to LE for * beauty. */ enum iterator_type iterator_type; /** Key to search. */ const struct tuple *key; /* LSN visibility, iterator shows values with lsn <= than that */ const struct vy_read_view **read_view; /* State of iterator */ /* Current position in tree */ struct vy_mem_tree_iterator curr_pos; /* * The pointer on a region allocated statement from vy_mem BPS tree. * There is no guarantee that curr_pos points on curr_stmt in the tree. * For example, cur_pos can be invalid but curr_stmt can point on a * valid statement. */ const struct tuple *curr_stmt; /* * Copy of the statement returned from one of public methods * (restore/next_lsn/next_key). Need to store the copy, because can't * return region allocated curr_stmt. */ struct tuple *last_stmt; /* data version from vy_mem */ uint32_t version; /* Is false until first .._next_.. method is called */ bool search_started; }; /** * Open an iterator over in-memory tree. */ void vy_mem_iterator_open(struct vy_mem_iterator *itr, struct vy_mem_iterator_stat *stat, struct vy_mem *mem, enum iterator_type iterator_type, const struct tuple *key, const struct vy_read_view **rv); /** * Advance a mem iterator to the newest statement for the next key. * The statement is returned in @ret (NULL if EOF). * Returns 0 on success, -1 on memory allocation error. */ NODISCARD int vy_mem_iterator_next_key(struct vy_mem_iterator *itr, struct tuple **ret); /** * Advance a mem iterator to the older statement for the same key. * The statement is returned in @ret (NULL if EOF). * Returns 0 on success, -1 on memory allocation error. */ NODISCARD int vy_mem_iterator_next_lsn(struct vy_mem_iterator *itr, struct tuple **ret); /** * Advance a mem iterator to the newest statement for the first key * following @last_stmt. The statement is returned in @ret (NULL if EOF). * Returns 0 on success, -1 on memory allocation error. */ NODISCARD int vy_mem_iterator_skip(struct vy_mem_iterator *itr, const struct tuple *last_stmt, struct tuple **ret); /** * Check if a mem iterator was invalidated and needs to be restored. * If it does, set the iterator position to the newest statement for * the key following @last_stmt and return 1, otherwise return 0. * Returns -1 on memory allocation error. */ NODISCARD int vy_mem_iterator_restore(struct vy_mem_iterator *itr, const struct tuple *last_stmt, struct tuple **ret); /** * Close a mem iterator. */ void vy_mem_iterator_close(struct vy_mem_iterator *itr); /** * Simple stream over a mem. @see vy_stmt_stream. */ struct vy_mem_stream { /** Parent class, must be the first member */ struct vy_stmt_stream base; /** Mem to stream */ struct vy_mem *mem; /** Current position */ struct vy_mem_tree_iterator curr_pos; }; /** * Open a mem stream. Use vy_stmt_stream api for further work. */ void vy_mem_stream_open(struct vy_mem_stream *stream, struct vy_mem *mem); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_MEM_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_quota.h0000664000000000000000000001160013306565107017611 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_QUOTA_H #define INCLUDES_TARANTOOL_BOX_VY_QUOTA_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "fiber.h" #include "fiber_cond.h" #include "say.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct vy_quota; typedef void (*vy_quota_exceeded_f)(struct vy_quota *quota); /** * Quota used for accounting and limiting memory consumption * in the vinyl engine. It is NOT multi-threading safe. */ struct vy_quota { /** * Memory limit. Once hit, new transactions are * throttled until memory is reclaimed. */ size_t limit; /** * Memory watermark. Exceeding it does not result in * throttling new transactions, but it does trigger * background memory reclaim. */ size_t watermark; /** Current memory consumption. */ size_t used; /** * If vy_quota_use() takes longer than the given * value, warn about it in the log. */ double too_long_threshold; /** * Condition variable used for throttling consumers when * there is no quota left. */ struct fiber_cond cond; /** * Called when quota is consumed if used >= watermark. * It is supposed to trigger memory reclaim. */ vy_quota_exceeded_f quota_exceeded_cb; }; static inline void vy_quota_create(struct vy_quota *q, vy_quota_exceeded_f quota_exceeded_cb) { q->limit = SIZE_MAX; q->watermark = SIZE_MAX; q->used = 0; q->too_long_threshold = TIMEOUT_INFINITY; q->quota_exceeded_cb = quota_exceeded_cb; fiber_cond_create(&q->cond); } static inline void vy_quota_destroy(struct vy_quota *q) { fiber_cond_broadcast(&q->cond); fiber_cond_destroy(&q->cond); } /** * Set memory limit. If current memory usage exceeds * the new limit, invoke the callback. */ static inline void vy_quota_set_limit(struct vy_quota *q, size_t limit) { q->limit = q->watermark = limit; if (q->used >= limit) q->quota_exceeded_cb(q); } /** * Set memory watermark. If current memory usage exceeds * the new watermark, invoke the callback. */ static inline void vy_quota_set_watermark(struct vy_quota *q, size_t watermark) { q->watermark = watermark; if (q->used >= watermark) q->quota_exceeded_cb(q); } /** * Consume @size bytes of memory. In contrast to vy_quota_use() * this function does not throttle the caller. */ static inline void vy_quota_force_use(struct vy_quota *q, size_t size) { q->used += size; if (q->used >= q->watermark) q->quota_exceeded_cb(q); } /** * Release @size bytes of memory. */ static inline void vy_quota_release(struct vy_quota *q, size_t size) { assert(q->used >= size); q->used -= size; fiber_cond_broadcast(&q->cond); } /** * Try to consume @size bytes of memory, throttle the caller * if the limit is exceeded. @timeout specifies the maximal * time to wait. Return 0 on success, -1 on timeout. */ static inline int vy_quota_use(struct vy_quota *q, size_t size, double timeout) { double start_time = ev_monotonic_now(loop()); double deadline = start_time + timeout; while (q->used + size > q->limit && timeout > 0) { q->quota_exceeded_cb(q); if (fiber_cond_wait_deadline(&q->cond, deadline) != 0) break; /* timed out */ } double wait_time = ev_monotonic_now(loop()) - start_time; if (wait_time > q->too_long_threshold) { say_warn("waited for %zu bytes of vinyl memory quota " "for too long: %.3f sec", size, wait_time); } if (q->used + size > q->limit) return -1; q->used += size; if (q->used >= q->watermark) q->quota_exceeded_cb(q); return 0; } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_QUOTA_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/sysview_index.h0000664000000000000000000000404013306560010020626 0ustar rootroot#ifndef TARANTOOL_BOX_SYSVIEW_INDEX_H_INCLUDED #define TARANTOOL_BOX_SYSVIEW_INDEX_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "index.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct space; struct tuple; struct sysview_engine; typedef bool (*sysview_filter_f)(struct space *source, struct tuple *); struct sysview_index { struct index base; uint32_t source_space_id; uint32_t source_index_id; sysview_filter_f filter; }; struct sysview_index * sysview_index_new(struct sysview_engine *sysview, struct index_def *def, const char *space_name); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_SYSVIEW_INDEX_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/iproto.cc0000664000000000000000000013261713306565107017430 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "iproto.h" #include #include #include #include #include #include #include #include "third_party/base64.h" #include "version.h" #include "fiber.h" #include "cbus.h" #include "say.h" #include "sio.h" #include "evio.h" #include "coio.h" #include "scoped_guard.h" #include "memory.h" #include "port.h" #include "box.h" #include "call.h" #include "tuple_convert.h" #include "session.h" #include "xrow.h" #include "schema.h" /* schema_version */ #include "replication.h" /* instance_uuid */ #include "iproto_constants.h" #include "rmean.h" #include "errinj.h" #include "applier.h" #include "cfg.h" /* The number of iproto messages in flight */ enum { IPROTO_MSG_MAX = 768 }; /** * Network readahead. A signed integer to avoid * automatic type coercion to an unsigned type. * We assign it without locks in txn thread and * use in iproto thread -- it's OK that * readahead has a stale value while until the thread * caches have synchronized, after all, it's used * in new connections only. * * Notice that the default is not a strict power of two. * slab metadata takes some space, and we want * allocation steps to be correlated to slab buddy * sizes, so when we ask slab cache for 16320 bytes, * we get a slab of size 16384, not 32768. */ unsigned iproto_readahead = 16320; /** * How big is a buffer which needs to be shrunk before * it is put back into buffer cache. */ static inline unsigned iproto_max_input_size(void) { return 18 * iproto_readahead; } void iproto_reset_input(struct ibuf *ibuf) { /* * If we happen to have fully processed the input, * move the pos to the start of the input buffer. */ assert(ibuf_used(ibuf) == 0); if (ibuf_capacity(ibuf) < iproto_max_input_size()) { ibuf_reset(ibuf); } else { struct slab_cache *slabc = ibuf->slabc; ibuf_destroy(ibuf); ibuf_create(ibuf, slabc, iproto_readahead); } } /** * This structure represents a position in the output. * Since we use rotating buffers to recycle memory, * it includes not only a position in obuf, but also * a pointer to obuf the position is for. */ struct iproto_wpos { struct obuf *obuf; struct obuf_svp svp; }; static void iproto_wpos_create(struct iproto_wpos *wpos, struct obuf *out) { wpos->obuf = out; wpos->svp = obuf_create_svp(out); } /* {{{ iproto_msg - declaration */ /** * A single msg from io thread. All requests * from all connections are queued into a single queue * and processed in FIFO order. */ struct iproto_msg { struct cmsg base; struct iproto_connection *connection; /* --- Box msgs - actual requests for the transaction processor --- */ /* Request message code and sync. */ struct xrow_header header; union { /** Box request, if this is a DML */ struct request dml; /** Box request, if this is a call or eval. */ struct call_request call; /** Authentication request. */ struct auth_request auth; /** In case of iproto parse error, saved diagnostics. */ struct diag diag; }; /** * Input buffer which stores the request data. It can be * discarded only when the message returns to iproto thread. */ struct ibuf *p_ibuf; /** * How much space the request takes in the * input buffer (len, header and body - all of it) * This also works as a reference counter to * ibuf object. */ size_t len; /** * Position in the connection output buffer. When sending a * message to the tx thread, iproto sets it to its current * flush position so that tx can reuse a buffer that has been * flushed. The tx thread, in turn, sets it to the end of the * data it has just written, to let iproto know that there is * more output to flush. */ struct iproto_wpos wpos; /** * Message sent by the tx thread to notify iproto that input has * been processed and can be discarded before request completion. * Used by long (yielding) CALL/EVAL requests. */ struct cmsg discard_input; /** * Used in "connect" msgs, true if connect trigger failed * and the connection must be closed. */ bool close_connection; }; static struct mempool iproto_msg_pool; static struct iproto_msg * iproto_msg_new(struct iproto_connection *con) { struct iproto_msg *msg = (struct iproto_msg *) mempool_alloc_xc(&iproto_msg_pool); msg->connection = con; return msg; } /** * Resume stopped connections, if any. */ static void iproto_resume(); static void iproto_msg_decode(struct iproto_msg *msg, const char **pos, const char *reqend, bool *stop_input); static inline void iproto_msg_delete(struct iproto_msg *msg) { mempool_free(&iproto_msg_pool, msg); iproto_resume(); } /** * A single global queue for all requests in all connections. All * requests from all connections are processed concurrently. * Is also used as a queue for just established connections and to * execute disconnect triggers. A few notes about these triggers: * - they need to be run in a fiber * - unlike an ordinary request failure, on_connect trigger * failure must lead to connection close. * - on_connect trigger must be processed before any other * request on this connection. */ static struct cpipe tx_pipe; static struct cpipe net_pipe; /** * Network thread. */ static struct cord net_cord; /** * Slab cache used for allocating memory for output network buffers * in the tx thread. */ static struct slab_cache net_slabc; struct rmean *rmean_net; enum rmean_net_name { IPROTO_SENT, IPROTO_RECEIVED, IPROTO_LAST, }; const char *rmean_net_strings[IPROTO_LAST] = { "SENT", "RECEIVED" }; static void tx_process_disconnect(struct cmsg *m); static void net_finish_disconnect(struct cmsg *m); static const struct cmsg_hop disconnect_route[] = { { tx_process_disconnect, &net_pipe }, { net_finish_disconnect, NULL }, }; /* }}} */ /* {{{ iproto_connection - declaration and definition */ /** * Context of a single client connection. * Interaction scheme: * * Receive from the network. * | * +---|---------------------+ +------------+ * | | IPROTO thread | | TX thread | * | v | | | * | ibuf[0]- - - - - - - - -|- -|- - >+ | * | | | | | * | ibuf[1] | | | | * | | | | | * | obuf[0] <- - - - - - - -|- -|- - -+ | * | | | | | | * | | obuf[1] <- - -|- -|- - -+ | * +----|-----------|--------+ +------------+ * | v * | Send to * | network. * v * Send to network after obuf[1], i.e. older responses are sent first. * * ibuf structure: * rpos wpos end * +-------------------|----------------|-------------+ * \________/\________/ \________/\____/ * \ msg msg / msg parse * \______________/ size * response is sent, * messages are * discarded */ struct iproto_connection { /** * Two rotating buffers for input. Input is first read into * ibuf[0]. As soon as it buffer becomes full, the buffers are * rotated. When all input buffers are used up, the input * is suspended. The buffer becomes available for use * again when tx thread completes processing the messages * stored in the buffer. */ struct ibuf ibuf[2]; /** Pointer to the current buffer. */ struct ibuf *p_ibuf; /** * Two rotating buffers for output. The tx thread switches to * another buffer if it finds it to be empty (flushed out). * This guarantees that memory gets recycled as soon as output * is flushed by the iproto thread. */ struct obuf obuf[2]; /** * Position in the output buffer that points to the beginning * of the data awaiting to be flushed. Advanced by the iproto * thread upon successfull flush. */ struct iproto_wpos wpos; /** * Position in the output buffer that points to the end of the * data awaiting to be flushed. Advanced by the iproto thread * upon receiving a message from the tx thread telling that more * output is available (see iproto_msg::wpos). */ struct iproto_wpos wend; /* * Size of readahead which is not parsed yet, i.e. size of * a piece of request which is not fully read. Is always * relative to ibuf.wpos. In other words, ibuf.wpos - * parse_size gives the start of the unparsed request. * A size rather than a pointer is used to be safe in case * ibuf.buf is reallocated. Being relative to ibuf.wpos, * rather than to ibuf.rpos is helpful to make sure * ibuf_reserve() or buffers rotation don't make the value * meaningless. */ size_t parse_size; /** * Nubmer of active long polling requests that have already * discarded their arguments in order not to stall other * connections. */ int long_poll_requests; struct ev_io input; struct ev_io output; /** Logical session. */ struct session *session; ev_loop *loop; /* Pre-allocated disconnect msg. */ struct cmsg disconnect; /** True if disconnect message is sent. Debug-only. */ bool is_disconnected; struct rlist in_stop_list; /** * The following fields are used exclusively by the tx thread. * Align them to prevent false-sharing. */ struct { alignas(CACHELINE_SIZE) /** Pointer to the current output buffer. */ struct obuf *p_obuf; } tx; }; static struct mempool iproto_connection_pool; static RLIST_HEAD(stopped_connections); /** * Return true if we have not enough spare messages * in the message pool. */ static inline bool iproto_check_msg_max() { size_t request_count = mempool_count(&iproto_msg_pool); return request_count > IPROTO_MSG_MAX; } /** * Throttle the queue to the tx thread and ensure the fiber pool * in tx thread is not depleted by a flood of incoming requests: * resume a stopped connection only if there is a spare message * object in the message pool. */ static void iproto_resume() { /* * Most of the time we have nothing to do here: throttling * is not active. */ if (rlist_empty(&stopped_connections)) return; if (iproto_check_msg_max()) return; struct iproto_connection *con; con = rlist_first_entry(&stopped_connections, struct iproto_connection, in_stop_list); ev_feed_event(con->loop, &con->input, EV_READ); } /** * A connection is idle when the client is gone * and there are no outstanding msgs in the msg queue. * An idle connection can be safely garbage collected. * Note: a connection only becomes idle after iproto_connection_close(), * which closes the fd. This is why here the check is for * evio_has_fd(), not ev_is_active() (false if event is not * started). * * ibuf_size() provides an effective reference counter * on connection use in the tx request queue. Any request * in the request queue has a non-zero len, and ibuf_size() * is therefore non-zero as long as there is at least * one request in the tx queue. */ static inline bool iproto_connection_is_idle(struct iproto_connection *con) { return con->long_poll_requests == 0 && ibuf_used(&con->ibuf[0]) == 0 && ibuf_used(&con->ibuf[1]) == 0; } static inline void iproto_connection_stop(struct iproto_connection *con) { say_warn("net_msg_max limit reached, stopping input on connection %s", sio_socketname(con->input.fd)); assert(rlist_empty(&con->in_stop_list)); ev_io_stop(con->loop, &con->input); rlist_add_tail(&stopped_connections, &con->in_stop_list); } /** * Initiate a connection shutdown. This method may * be invoked many times, and does the internal * bookkeeping to only cleanup resources once. */ static inline void iproto_connection_close(struct iproto_connection *con) { if (evio_has_fd(&con->input)) { /* Clears all pending events. */ ev_io_stop(con->loop, &con->input); ev_io_stop(con->loop, &con->output); int fd = con->input.fd; /* Make evio_has_fd() happy */ con->input.fd = con->output.fd = -1; close(fd); /* * Discard unparsed data, to recycle the * connection in net_send_msg() as soon as all * parsed data is processed. It's important this * is done only once. */ con->p_ibuf->wpos -= con->parse_size; } /* * If the connection has no outstanding requests in the * input buffer, then no one (e.g. tx thread) is referring * to it, so it must be destroyed at once. Queue a msg to * run on_disconnect() trigger and destroy the connection. * * Otherwise, it will be destroyed by the last request on * this connection that has finished processing. * * The check is mandatory to not destroy a connection * twice. */ if (iproto_connection_is_idle(con)) { assert(con->is_disconnected == false); con->is_disconnected = true; cpipe_push(&tx_pipe, &con->disconnect); } rlist_del(&con->in_stop_list); } static inline struct ibuf * iproto_connection_next_input(struct iproto_connection *con) { return &con->ibuf[con->p_ibuf == &con->ibuf[0]]; } /** * If there is no space for reading input, we can do one of the * following: * - try to get a new ibuf, so that it can fit the request. * Always getting a new input buffer when there is no space * makes the instance susceptible to input-flood attacks. * Therefore, at most 2 ibufs are used in a single connection, * one is "open", receiving input, and the other is closed, * waiting for flushing output from a corresponding obuf. * - stop input and wait until the client reads piled up output, * so the input buffer can be reused. This complements * the previous strategy. It is only safe to stop input if it * is known that there is output. In this case input event * flow will be resumed when all replies to previous requests * are sent. Since there are two buffers, the input is only * stopped when both of them are fully used up. * * To make this strategy work, each ibuf in use must fit at least * one request. Otherwise, both obufs may end up having no data to * flush, while current ibuf is too small to fit a big incoming * request. */ static struct ibuf * iproto_connection_input_buffer(struct iproto_connection *con) { struct ibuf *old_ibuf = con->p_ibuf; size_t to_read = 3; /* Smallest possible valid request. */ /* The type code is checked in iproto_enqueue_batch() */ if (con->parse_size) { const char *pos = old_ibuf->wpos - con->parse_size; if (mp_check_uint(pos, old_ibuf->wpos) <= 0) to_read = mp_decode_uint(&pos); } if (ibuf_unused(old_ibuf) >= to_read) return old_ibuf; /* * Reuse the buffer if all requests are processed * (in only has unparsed content). */ if (ibuf_used(old_ibuf) == con->parse_size) { ibuf_reserve_xc(old_ibuf, to_read); return old_ibuf; } struct ibuf *new_ibuf = iproto_connection_next_input(con); if (ibuf_used(new_ibuf) != 0) { /* * Wait until the second buffer is flushed * and becomes available for reuse. */ return NULL; } ibuf_reserve_xc(new_ibuf, to_read + con->parse_size); /* * Discard unparsed data in the old buffer, otherwise it * won't be recycled when all parsed requests are processed. */ old_ibuf->wpos -= con->parse_size; if (con->parse_size != 0) { /* Move the cached request prefix to the new buffer. */ memcpy(new_ibuf->rpos, old_ibuf->wpos, con->parse_size); new_ibuf->wpos += con->parse_size; /* * We made ibuf idle. If obuf was already idle it * makes the both ibuf and obuf idle, time to trim * them. */ if (ibuf_used(old_ibuf) == 0) iproto_reset_input(old_ibuf); } /* * Rotate buffers. Not strictly necessary, but * helps preserve response order. */ con->p_ibuf = new_ibuf; return new_ibuf; } /** Enqueue all requests which were read up. */ static inline void iproto_enqueue_batch(struct iproto_connection *con, struct ibuf *in) { int n_requests = 0; bool stop_input = false; while (con->parse_size && stop_input == false) { const char *reqstart = in->wpos - con->parse_size; const char *pos = reqstart; /* Read request length. */ if (mp_typeof(*pos) != MP_UINT) { cpipe_flush_input(&tx_pipe); tnt_raise(ClientError, ER_INVALID_MSGPACK, "packet length"); } if (mp_check_uint(pos, in->wpos) >= 0) break; uint32_t len = mp_decode_uint(&pos); const char *reqend = pos + len; if (reqend > in->wpos) break; struct iproto_msg *msg = iproto_msg_new(con); msg->p_ibuf = con->p_ibuf; msg->wpos = con->wpos; msg->len = reqend - reqstart; /* total request length */ iproto_msg_decode(msg, &pos, reqend, &stop_input); /* * This can't throw, but should not be * done in case of exception. */ cpipe_push_input(&tx_pipe, &msg->base); n_requests++; /* Request is parsed */ assert(reqend > reqstart); assert(con->parse_size >= (size_t) (reqend - reqstart)); con->parse_size -= reqend - reqstart; } if (stop_input) { /** * Don't mess with the file descriptor * while join is running. ev_io_stop() * also clears any pending events, which * is good, since their invocation may * re-start the watcher, ruining our * efforts. */ ev_io_stop(con->loop, &con->output); ev_io_stop(con->loop, &con->input); } else if (n_requests != 1 || con->parse_size != 0) { assert(rlist_empty(&con->in_stop_list)); /* * Keep reading input, as long as the socket * supplies data, but don't waste CPU on an extra * read() if dealing with a blocking client, it * has nothing in the socket for us. * * We look at the amount of enqueued requests * and presence of a partial request in the * input buffer as hints to distinguish * blocking and non-blocking clients: * * For blocking clients, a request typically * is fully read and enqueued. * If there is unparsed data, or 0 queued * requests, keep reading input, if only to avoid * a deadlock on this connection. */ ev_feed_event(con->loop, &con->input, EV_READ); } cpipe_flush_input(&tx_pipe); } static void iproto_connection_on_input(ev_loop *loop, struct ev_io *watcher, int /* revents */) { struct iproto_connection *con = (struct iproto_connection *) watcher->data; int fd = con->input.fd; assert(fd >= 0); if (! rlist_empty(&con->in_stop_list)) { /* Resumed stopped connection. */ rlist_del(&con->in_stop_list); /* * This connection may have no input, so * resume one more connection which might have * input. */ iproto_resume(); } /* * Throttle if there are too many pending requests, * otherwise we might deplete the fiber pool in tx * thread and deadlock. */ if (iproto_check_msg_max()) { iproto_connection_stop(con); return; } try { /* Ensure we have sufficient space for the next round. */ struct ibuf *in = iproto_connection_input_buffer(con); if (in == NULL) { say_warn("readahead limit reached, stopping input on connection %s", sio_socketname(con->input.fd)); ev_io_stop(loop, &con->input); return; } /* Read input. */ int nrd = sio_read(fd, in->wpos, ibuf_unused(in)); if (nrd < 0) { /* Socket is not ready. */ ev_io_start(loop, &con->input); return; } if (nrd == 0) { /* EOF */ iproto_connection_close(con); return; } /* Count statistics */ rmean_collect(rmean_net, IPROTO_RECEIVED, nrd); /* Update the read position and connection state. */ in->wpos += nrd; con->parse_size += nrd; /* Enqueue all requests which are fully read up. */ iproto_enqueue_batch(con, in); } catch (Exception *e) { /* Best effort at sending the error message to the client. */ iproto_write_error(fd, e, ::schema_version, 0); e->log(); iproto_connection_close(con); } } /** writev() to the socket and handle the result. */ static int iproto_flush(struct iproto_connection *con) { int fd = con->output.fd; struct obuf *obuf = con->wpos.obuf; struct obuf_svp obuf_end = obuf_create_svp(obuf); struct obuf_svp *begin = &con->wpos.svp; struct obuf_svp *end = &con->wend.svp; if (con->wend.obuf != obuf) { /* * Flush the current buffer before * advancing to the next one. */ if (begin->used == obuf_end.used) { obuf = con->wpos.obuf = con->wend.obuf; obuf_svp_reset(begin); } else { end = &obuf_end; } } if (begin->used == end->used) { /* Nothing to do. */ return 1; } assert(begin->used < end->used); struct iovec iov[SMALL_OBUF_IOV_MAX+1]; struct iovec *src = obuf->iov; int iovcnt = end->pos - begin->pos + 1; /* * iov[i].iov_len may be concurrently modified in tx thread, * but only for the last position. */ memcpy(iov, src + begin->pos, iovcnt * sizeof(struct iovec)); sio_add_to_iov(iov, -begin->iov_len); /* *Overwrite* iov_len of the last pos as it may be garbage. */ iov[iovcnt-1].iov_len = end->iov_len - begin->iov_len * (iovcnt == 1); ssize_t nwr = sio_writev(fd, iov, iovcnt); /* Count statistics */ rmean_collect(rmean_net, IPROTO_SENT, nwr); if (nwr > 0) { if (begin->used + nwr == end->used) { *begin = *end; return 0; } size_t offset = 0; int advance = 0; advance = sio_move_iov(iov, nwr, &offset); begin->used += nwr; /* advance write position */ begin->iov_len = advance == 0 ? begin->iov_len + offset: offset; begin->pos += advance; assert(begin->pos <= end->pos); } return -1; } static void iproto_connection_on_output(ev_loop *loop, struct ev_io *watcher, int /* revents */) { struct iproto_connection *con = (struct iproto_connection *) watcher->data; try { int rc; while ((rc = iproto_flush(con)) <= 0) { if (rc != 0) { ev_io_start(loop, &con->output); return; } if (! ev_is_active(&con->input) && rlist_empty(&con->in_stop_list)) { ev_feed_event(loop, &con->input, EV_READ); } } if (ev_is_active(&con->output)) ev_io_stop(con->loop, &con->output); } catch (Exception *e) { e->log(); iproto_connection_close(con); } } static struct iproto_connection * iproto_connection_new(int fd) { struct iproto_connection *con = (struct iproto_connection *) mempool_alloc_xc(&iproto_connection_pool); con->input.data = con->output.data = con; con->loop = loop(); ev_io_init(&con->input, iproto_connection_on_input, fd, EV_READ); ev_io_init(&con->output, iproto_connection_on_output, fd, EV_WRITE); ibuf_create(&con->ibuf[0], cord_slab_cache(), iproto_readahead); ibuf_create(&con->ibuf[1], cord_slab_cache(), iproto_readahead); obuf_create(&con->obuf[0], &net_slabc, iproto_readahead); obuf_create(&con->obuf[1], &net_slabc, iproto_readahead); con->p_ibuf = &con->ibuf[0]; con->tx.p_obuf = &con->obuf[0]; iproto_wpos_create(&con->wpos, con->tx.p_obuf); iproto_wpos_create(&con->wend, con->tx.p_obuf); con->parse_size = 0; con->long_poll_requests = 0; con->session = NULL; rlist_create(&con->in_stop_list); /* It may be very awkward to allocate at close. */ cmsg_init(&con->disconnect, disconnect_route); con->is_disconnected = false; return con; } /** Recycle a connection. Never throws. */ static inline void iproto_connection_delete(struct iproto_connection *con) { assert(iproto_connection_is_idle(con)); assert(!evio_has_fd(&con->output)); assert(!evio_has_fd(&con->input)); assert(con->session == NULL); /* * The output buffers must have been deleted * in tx thread. */ ibuf_destroy(&con->ibuf[0]); ibuf_destroy(&con->ibuf[1]); assert(con->obuf[0].pos == 0 && con->obuf[0].iov[0].iov_base == NULL); assert(con->obuf[1].pos == 0 && con->obuf[1].iov[0].iov_base == NULL); mempool_free(&iproto_connection_pool, con); } /* }}} iproto_connection */ /* {{{ iproto_msg - methods and routes */ static void tx_process_misc(struct cmsg *msg); static void tx_process_call(struct cmsg *msg); static void tx_process1(struct cmsg *msg); static void tx_process_select(struct cmsg *msg); static void tx_reply_error(struct iproto_msg *msg); static void tx_reply_iproto_error(struct cmsg *m); static void net_send_msg(struct cmsg *msg); static void net_send_error(struct cmsg *msg); static void tx_process_join_subscribe(struct cmsg *msg); static void net_end_join(struct cmsg *msg); static void net_end_subscribe(struct cmsg *msg); static const struct cmsg_hop misc_route[] = { { tx_process_misc, &net_pipe }, { net_send_msg, NULL }, }; static const struct cmsg_hop call_route[] = { { tx_process_call, &net_pipe }, { net_send_msg, NULL }, }; static const struct cmsg_hop select_route[] = { { tx_process_select, &net_pipe }, { net_send_msg, NULL }, }; static const struct cmsg_hop process1_route[] = { { tx_process1, &net_pipe }, { net_send_msg, NULL }, }; static const struct cmsg_hop *dml_route[IPROTO_TYPE_STAT_MAX] = { NULL, /* IPROTO_OK */ select_route, /* IPROTO_SELECT */ process1_route, /* IPROTO_INSERT */ process1_route, /* IPROTO_REPLACE */ process1_route, /* IPROTO_UPDATE */ process1_route, /* IPROTO_DELETE */ call_route, /* IPROTO_CALL_16 */ misc_route, /* IPROTO_AUTH */ call_route, /* IPROTO_EVAL */ process1_route, /* IPROTO_UPSERT */ call_route, /* IPROTO_CALL */ NULL, /* reserved */ NULL, /* IPROTO_NOP */ }; static const struct cmsg_hop join_route[] = { { tx_process_join_subscribe, &net_pipe }, { net_end_join, NULL }, }; static const struct cmsg_hop subscribe_route[] = { { tx_process_join_subscribe, &net_pipe }, { net_end_subscribe, NULL }, }; static const struct cmsg_hop error_route[] = { { tx_reply_iproto_error, &net_pipe }, { net_send_error, NULL }, }; static void iproto_msg_decode(struct iproto_msg *msg, const char **pos, const char *reqend, bool *stop_input) { uint8_t type; if (xrow_header_decode(&msg->header, pos, reqend)) goto error; assert(*pos == reqend); type = msg->header.type; /* * Parse request before putting it into the queue * to save tx some CPU. More complicated requests are * parsed in tx thread into request type-specific objects. */ switch (type) { case IPROTO_SELECT: case IPROTO_INSERT: case IPROTO_REPLACE: case IPROTO_UPDATE: case IPROTO_DELETE: case IPROTO_UPSERT: if (xrow_decode_dml(&msg->header, &msg->dml, dml_request_key_map(type))) goto error; assert(type < sizeof(dml_route)/sizeof(*dml_route)); cmsg_init(&msg->base, dml_route[type]); break; case IPROTO_CALL_16: case IPROTO_CALL: case IPROTO_EVAL: if (xrow_decode_call(&msg->header, &msg->call)) goto error; cmsg_init(&msg->base, call_route); break; case IPROTO_PING: cmsg_init(&msg->base, misc_route); break; case IPROTO_JOIN: cmsg_init(&msg->base, join_route); *stop_input = true; break; case IPROTO_SUBSCRIBE: cmsg_init(&msg->base, subscribe_route); *stop_input = true; break; case IPROTO_REQUEST_VOTE: cmsg_init(&msg->base, misc_route); break; case IPROTO_AUTH: if (xrow_decode_auth(&msg->header, &msg->auth)) goto error; cmsg_init(&msg->base, misc_route); break; default: diag_set(ClientError, ER_UNKNOWN_REQUEST_TYPE, (uint32_t) type); goto error; } return; error: /** Log and send the error. */ diag_log(); diag_create(&msg->diag); diag_move(&fiber()->diag, &msg->diag); cmsg_init(&msg->base, error_route); } static void tx_fiber_init(struct session *session, uint64_t sync) { session->sync = sync; /* * We do not cleanup fiber keys at the end of each request. * This does not lead to privilege escalation as long as * fibers used to serve iproto requests never mingle with * fibers used to serve background tasks without going * through the purification of fiber_recycle(), which * resets the fiber local storage. Fibers, used to run * background tasks clean up their session in on_stop * trigger as well. */ fiber_set_session(fiber(), session); fiber_set_user(fiber(), &session->credentials); } /** * Fire on_disconnect triggers in the tx * thread and destroy the session object, * as well as output buffers of the connection. */ static void tx_process_disconnect(struct cmsg *m) { struct iproto_connection *con = container_of(m, struct iproto_connection, disconnect); if (con->session) { tx_fiber_init(con->session, 0); /* * The socket is already closed in iproto thread, * prevent box.session.peer() from using it. */ con->session->fd = -1; if (! rlist_empty(&session_on_disconnect)) session_run_on_disconnect_triggers(con->session); session_destroy(con->session); con->session = NULL; /* safety */ } /* * Got to be done in iproto thread since * that's where the memory is allocated. */ obuf_destroy(&con->obuf[0]); obuf_destroy(&con->obuf[1]); } /** * Cleanup the net thread resources of a connection * and close the connection. */ static void net_finish_disconnect(struct cmsg *m) { struct iproto_connection *con = container_of(m, struct iproto_connection, disconnect); /* Runs the trigger, which may yield. */ iproto_connection_delete(con); } static int tx_check_schema(uint32_t new_schema_version) { if (new_schema_version && new_schema_version != schema_version) { diag_set(ClientError, ER_WRONG_SCHEMA_VERSION, new_schema_version, schema_version); return -1; } return 0; } static void net_discard_input(struct cmsg *m) { struct iproto_msg *msg = container_of(m, struct iproto_msg, discard_input); struct iproto_connection *con = msg->connection; msg->p_ibuf->rpos += msg->len; msg->len = 0; con->long_poll_requests++; if (! ev_is_active(&con->input) && rlist_empty(&con->in_stop_list)) ev_feed_event(con->loop, &con->input, EV_READ); } static void tx_discard_input(struct iproto_msg *msg) { static const struct cmsg_hop discard_input_route[] = { { net_discard_input, NULL }, }; cmsg_init(&msg->discard_input, discard_input_route); cpipe_push(&net_pipe, &msg->discard_input); } /** * The goal of this function is to maintain the state of * two rotating connection output buffers in tx thread. * * The function enforces the following rules: * - if both out buffers are empty, any one is selected; * - if one of the buffers is empty, and the other is * not, the empty buffer is selected. * - if neither of the buffers are empty, the function * does not rotate the buffer. */ static struct iproto_msg * tx_accept_msg(struct cmsg *m) { struct iproto_msg *msg = (struct iproto_msg *) m; struct iproto_connection *con = msg->connection; struct obuf *prev = &con->obuf[con->tx.p_obuf == con->obuf]; if (msg->wpos.obuf == con->tx.p_obuf) { /* * We got a message advancing the buffer which * is being appended to. The previous buffer is * guaranteed to have been flushed first, since * buffers are never flushed out of order. */ if (obuf_size(prev) != 0) obuf_reset(prev); } if (obuf_size(con->tx.p_obuf) != 0 && obuf_size(prev) == 0) { /* * If the current buffer is not empty, and the * previous buffer has been flushed, rotate * the current buffer. */ con->tx.p_obuf = prev; } return msg; } /** * Write error message to the output buffer and advance * write position. Doesn't throw. */ static void tx_reply_error(struct iproto_msg *msg) { struct obuf *out = msg->connection->tx.p_obuf; iproto_reply_error(out, diag_last_error(&fiber()->diag), msg->header.sync, ::schema_version); iproto_wpos_create(&msg->wpos, out); } /** * Write error from iproto thread to the output buffer and advance * write position. Doesn't throw. */ static void tx_reply_iproto_error(struct cmsg *m) { struct iproto_msg *msg = tx_accept_msg(m); struct obuf *out = msg->connection->tx.p_obuf; iproto_reply_error(out, diag_last_error(&msg->diag), msg->header.sync, ::schema_version); iproto_wpos_create(&msg->wpos, out); } /** Inject a short delay on tx request processing for testing. */ static inline void tx_inject_delay() { ERROR_INJECT(ERRINJ_IPROTO_TX_DELAY, { if (rand() % 100 < 10) fiber_sleep(0.001); }); } static void tx_process1(struct cmsg *m) { struct iproto_msg *msg = tx_accept_msg(m); tx_fiber_init(msg->connection->session, msg->header.sync); if (tx_check_schema(msg->header.schema_version)) goto error; struct tuple *tuple; struct obuf_svp svp; struct obuf *out; tx_inject_delay(); if (box_process1(&msg->dml, &tuple) != 0) goto error; out = msg->connection->tx.p_obuf; if (iproto_prepare_select(out, &svp) != 0) goto error; if (tuple && tuple_to_obuf(tuple, out)) goto error; iproto_reply_select(out, &svp, msg->header.sync, ::schema_version, tuple != 0); iproto_wpos_create(&msg->wpos, out); return; error: tx_reply_error(msg); } static void tx_process_select(struct cmsg *m) { struct iproto_msg *msg = tx_accept_msg(m); struct obuf *out; struct obuf_svp svp; struct port port; int count; int rc; struct request *req = &msg->dml; tx_fiber_init(msg->connection->session, msg->header.sync); if (tx_check_schema(msg->header.schema_version)) goto error; tx_inject_delay(); rc = box_select(req->space_id, req->index_id, req->iterator, req->offset, req->limit, req->key, req->key_end, &port); if (rc < 0) goto error; out = msg->connection->tx.p_obuf; if (iproto_prepare_select(out, &svp) != 0) { port_destroy(&port); goto error; } /* * SELECT output format has not changed since Tarantool 1.6 */ count = port_dump_16(&port, out); port_destroy(&port); if (count < 0) { /* Discard the prepared select. */ obuf_rollback_to_svp(out, &svp); goto error; } iproto_reply_select(out, &svp, msg->header.sync, ::schema_version, count); iproto_wpos_create(&msg->wpos, out); return; error: tx_reply_error(msg); } static void tx_process_call_on_yield(struct trigger *trigger, void *event) { (void)event; struct iproto_msg *msg = (struct iproto_msg *)trigger->data; TRASH(&msg->call); tx_discard_input(msg); trigger_clear(trigger); } static void tx_process_call(struct cmsg *m) { struct iproto_msg *msg = tx_accept_msg(m); tx_fiber_init(msg->connection->session, msg->header.sync); if (tx_check_schema(msg->header.schema_version)) goto error; /* * CALL/EVAL should copy its arguments so we can discard * input on yield to avoid stalling other connections by * a long polling request. */ struct trigger fiber_on_yield; trigger_create(&fiber_on_yield, tx_process_call_on_yield, msg, NULL); trigger_add(&fiber()->on_yield, &fiber_on_yield); int rc; struct port port; switch (msg->header.type) { case IPROTO_CALL: case IPROTO_CALL_16: rc = box_process_call(&msg->call, &port); break; case IPROTO_EVAL: rc = box_process_eval(&msg->call, &port); break; default: unreachable(); } trigger_clear(&fiber_on_yield); if (rc != 0) goto error; /* * Add all elements returned by the function to iproto. * * To allow clients to understand a complex return from * a procedure, we are compatible with SELECT protocol, * and return the number of return values first, and * then each return value as a tuple. * * (!) Please note that a save point for output buffer * must be taken only after finishing executing of Lua * function because Lua can yield and leave the * buffer in inconsistent state (a parallel request * from the same connection will break the protocol). */ int count; struct obuf *out; struct obuf_svp svp; out = msg->connection->tx.p_obuf; if (iproto_prepare_select(out, &svp) != 0) { port_destroy(&port); goto error; } if (msg->header.type == IPROTO_CALL_16) count = port_dump_16(&port, out); else count = port_dump(&port, out); port_destroy(&port); if (count < 0) { obuf_rollback_to_svp(out, &svp); goto error; } iproto_reply_select(out, &svp, msg->header.sync, ::schema_version, count); iproto_wpos_create(&msg->wpos, out); return; error: tx_reply_error(msg); } static void tx_process_misc(struct cmsg *m) { struct iproto_msg *msg = tx_accept_msg(m); struct obuf *out = msg->connection->tx.p_obuf; tx_fiber_init(msg->connection->session, msg->header.sync); if (tx_check_schema(msg->header.schema_version)) goto error; try { switch (msg->header.type) { case IPROTO_AUTH: box_process_auth(&msg->auth); iproto_reply_ok_xc(out, msg->header.sync, ::schema_version); break; case IPROTO_PING: iproto_reply_ok_xc(out, msg->header.sync, ::schema_version); break; case IPROTO_REQUEST_VOTE: iproto_reply_request_vote_xc(out, msg->header.sync, ::schema_version, &replicaset.vclock, cfg_geti("read_only")); break; default: unreachable(); } iproto_wpos_create(&msg->wpos, out); } catch (Exception *e) { tx_reply_error(msg); } return; error: tx_reply_error(msg); } static void tx_process_join_subscribe(struct cmsg *m) { struct iproto_msg *msg = tx_accept_msg(m); struct iproto_connection *con = msg->connection; tx_fiber_init(con->session, msg->header.sync); try { switch (msg->header.type) { case IPROTO_JOIN: /* * As soon as box_process_subscribe() returns * the lambda in the beginning of the block * will re-activate the watchers for us. */ box_process_join(&con->input, &msg->header); break; case IPROTO_SUBSCRIBE: /* * Subscribe never returns - unless there * is an error/exception. In that case * the write watcher will be re-activated * the same way as for JOIN. */ box_process_subscribe(&con->input, &msg->header); break; default: unreachable(); } } catch (SocketError *e) { throw; /* don't write error response to prevent SIGPIPE */ } catch (Exception *e) { iproto_write_error(con->input.fd, e, ::schema_version, msg->header.sync); } } static void net_send_msg(struct cmsg *m) { struct iproto_msg *msg = (struct iproto_msg *) m; struct iproto_connection *con = msg->connection; if (msg->len != 0) { /* Discard request (see iproto_enqueue_batch()). */ msg->p_ibuf->rpos += msg->len; } else { /* Already discarded by net_discard_input(). */ assert(con->long_poll_requests > 0); con->long_poll_requests--; } con->wend = msg->wpos; if (evio_has_fd(&con->output)) { if (! ev_is_active(&con->output)) ev_feed_event(con->loop, &con->output, EV_WRITE); } else if (iproto_connection_is_idle(con)) { iproto_connection_close(con); } iproto_msg_delete(msg); } /** * Complete sending an iproto error: * recycle the error object and flush output. */ static void net_send_error(struct cmsg *m) { struct iproto_msg *msg = (struct iproto_msg *) m; /* Recycle the exception. */ diag_move(&msg->diag, &fiber()->diag); net_send_msg(m); } static void net_end_join(struct cmsg *m) { struct iproto_msg *msg = (struct iproto_msg *) m; struct iproto_connection *con = msg->connection; msg->p_ibuf->rpos += msg->len; iproto_msg_delete(msg); assert(! ev_is_active(&con->input)); /* * Enqueue any messages if they are in the readahead * queue. Will simply start input otherwise. */ iproto_enqueue_batch(con, msg->p_ibuf); } static void net_end_subscribe(struct cmsg *m) { struct iproto_msg *msg = (struct iproto_msg *) m; struct iproto_connection *con = msg->connection; msg->p_ibuf->rpos += msg->len; iproto_msg_delete(msg); assert(! ev_is_active(&con->input)); iproto_connection_close(con); } /** * Handshake a connection: invoke the on-connect trigger * and possibly authenticate. Try to send the client an error * upon a failure. */ static void tx_process_connect(struct cmsg *m) { struct iproto_msg *msg = (struct iproto_msg *) m; struct iproto_connection *con = msg->connection; struct obuf *out = msg->connection->tx.p_obuf; try { /* connect. */ con->session = session_create(con->input.fd, SESSION_TYPE_BINARY); if (con->session == NULL) diag_raise(); tx_fiber_init(con->session, 0); static __thread char greeting[IPROTO_GREETING_SIZE]; /* TODO: dirty read from tx thread */ struct tt_uuid uuid = INSTANCE_UUID; greeting_encode(greeting, tarantool_version_id(), &uuid, con->session->salt, SESSION_SEED_SIZE); obuf_dup_xc(out, greeting, IPROTO_GREETING_SIZE); if (! rlist_empty(&session_on_connect)) { if (session_run_on_connect_triggers(con->session) != 0) diag_raise(); } iproto_wpos_create(&msg->wpos, out); } catch (Exception *e) { tx_reply_error(msg); msg->close_connection = true; } } /** * Send a response to connect to the client or close the * connection in case on_connect trigger failed. */ static void net_send_greeting(struct cmsg *m) { struct iproto_msg *msg = (struct iproto_msg *) m; struct iproto_connection *con = msg->connection; if (msg->close_connection) { struct obuf *out = msg->wpos.obuf; try { int64_t nwr = sio_writev(con->output.fd, out->iov, obuf_iovcnt(out)); /* Count statistics */ rmean_collect(rmean_net, IPROTO_SENT, nwr); } catch (Exception *e) { e->log(); } assert(iproto_connection_is_idle(con)); iproto_connection_close(con); iproto_msg_delete(msg); return; } con->wend = msg->wpos; /* * Connect is synchronous, so no one could have been * messing up with the connection while it was in * progress. */ assert(evio_has_fd(&con->output)); /* Handshake OK, start reading input. */ ev_feed_event(con->loop, &con->output, EV_WRITE); iproto_msg_delete(msg); } static const struct cmsg_hop connect_route[] = { { tx_process_connect, &net_pipe }, { net_send_greeting, NULL }, }; /** }}} */ /** * Create a connection and start input. */ static void iproto_on_accept(struct evio_service * /* service */, int fd, struct sockaddr *addr, socklen_t addrlen) { (void) addr; (void) addrlen; struct iproto_connection *con; con = iproto_connection_new(fd); /* * Ignore msg allocation failure - the queue size is * fixed so there is a limited number of msgs in * use, all stored in just a few blocks of the memory pool. */ struct iproto_msg *msg = iproto_msg_new(con); cmsg_init(&msg->base, connect_route); msg->p_ibuf = con->p_ibuf; msg->wpos = con->wpos; msg->close_connection = false; cpipe_push(&tx_pipe, &msg->base); } static struct evio_service binary; /* iproto binary listener */ /** * The network io thread main function: * begin serving the message bus. */ static int net_cord_f(va_list /* ap */) { mempool_create(&iproto_msg_pool, &cord()->slabc, sizeof(struct iproto_msg)); mempool_create(&iproto_connection_pool, &cord()->slabc, sizeof(struct iproto_connection)); evio_service_init(loop(), &binary, "binary", iproto_on_accept, NULL); /* Init statistics counter */ rmean_net = rmean_new(rmean_net_strings, IPROTO_LAST); if (rmean_net == NULL) { tnt_raise(OutOfMemory, sizeof(struct rmean), "rmean", "struct rmean"); } struct cbus_endpoint endpoint; /* Create "net" endpoint. */ cbus_endpoint_create(&endpoint, "net", fiber_schedule_cb, fiber()); /* Create a pipe to "tx" thread. */ cpipe_create(&tx_pipe, "tx"); cpipe_set_max_input(&tx_pipe, IPROTO_MSG_MAX/2); /* Process incomming messages. */ cbus_loop(&endpoint); cpipe_destroy(&tx_pipe); /* * Nothing to do in the fiber so far, the service * will take care of creating events for incoming * connections. */ if (evio_service_is_active(&binary)) evio_service_stop(&binary); rmean_delete(rmean_net); return 0; } /** Initialize the iproto subsystem and start network io thread */ void iproto_init() { slab_cache_create(&net_slabc, &runtime); if (cord_costart(&net_cord, "iproto", net_cord_f, NULL)) panic("failed to initialize iproto thread"); /* Create a pipe to "net" thread. */ cpipe_create(&net_pipe, "net"); cpipe_set_max_input(&net_pipe, IPROTO_MSG_MAX/2); } /** * Since there is no way to "synchronously" change the * state of the io thread, to change the listen port * we need to bounce a couple of messages to and * from this thread. */ struct iproto_bind_msg: public cbus_call_msg { const char *uri; }; static int iproto_do_bind(struct cbus_call_msg *m) { const char *uri = ((struct iproto_bind_msg *) m)->uri; try { if (evio_service_is_active(&binary)) evio_service_stop(&binary); if (uri != NULL) evio_service_bind(&binary, uri); } catch (Exception *e) { return -1; } return 0; } static int iproto_do_listen(struct cbus_call_msg *m) { (void) m; try { if (evio_service_is_active(&binary)) evio_service_listen(&binary); } catch (Exception *e) { return -1; } return 0; } void iproto_bind(const char *uri) { static struct iproto_bind_msg m; m.uri = uri; if (cbus_call(&net_pipe, &tx_pipe, &m, iproto_do_bind, NULL, TIMEOUT_INFINITY)) diag_raise(); } void iproto_listen() { /* Declare static to avoid stack corruption on fiber cancel. */ static struct cbus_call_msg m; if (cbus_call(&net_pipe, &tx_pipe, &m, iproto_do_listen, NULL, TIMEOUT_INFINITY)) diag_raise(); } size_t iproto_mem_used(void) { return slab_cache_used(&net_cord.slabc) + slab_cache_used(&net_slabc); } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_read_iterator.h0000664000000000000000000001147113306565107021312 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_READ_ITERATOR_H #define INCLUDES_TARANTOOL_BOX_VY_READ_ITERATOR_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "iterator_type.h" #include "trivia/util.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Vinyl read iterator. * * Used for executing a SELECT request over a Vinyl index. */ struct vy_read_iterator { /** Index to iterate over. */ struct vy_index *index; /** Active transaction or NULL. */ struct vy_tx *tx; /** Iterator type. */ enum iterator_type iterator_type; /** Search key. */ struct tuple *key; /** Read view the iterator lives in. */ const struct vy_read_view **read_view; /** * Set if the resulting statement needs to be * checked to match the search key. */ bool need_check_eq; /** Set on the first call to vy_read_iterator_next(). */ bool search_started; /** Last statement returned by vy_read_iterator_next(). */ struct tuple *last_stmt; /** * Copy of index->range_tree_version. * Used for detecting range tree changes. */ uint32_t range_tree_version; /** * Copy of index->mem_list_version. * Used for detecting memory level changes. */ uint32_t mem_list_version; /** * Copy of curr_range->version. * Used for detecting changes in the current range. */ uint32_t range_version; /** Range the iterator is currently positioned at. */ struct vy_range *curr_range; /** * Array of merge sources. Sources are sorted by age. * In particular, this means that all mutable sources * come first while all sources that may yield (runs) * go last. */ struct vy_read_src *src; /** Number of elements in the src array. */ uint32_t src_count; /** Maximal capacity of the src array. */ uint32_t src_capacity; /** Index of the current merge source. */ uint32_t curr_src; /** Statement returned by the current merge source. */ struct tuple *curr_stmt; /** Offset of the transaction write set source. */ uint32_t txw_src; /** Offset of the cache source. */ uint32_t cache_src; /** Offset of the first memory source. */ uint32_t mem_src; /** Offset of the first disk source. */ uint32_t disk_src; /** Offset of the first skipped source. */ uint32_t skipped_src; /** * front_id of the current source and all sources * that are on the same key. */ uint32_t front_id; /** * front_id from the previous iteration. */ uint32_t prev_front_id; }; /** * Open the read iterator. * @param itr Read iterator. * @param index Vinyl index to iterate. * @param tx Current transaction, if exists. * @param iterator_type Type of the iterator that determines order * of the iteration. * @param key Key for the iteration. * @param rv Read view. */ void vy_read_iterator_open(struct vy_read_iterator *itr, struct vy_index *index, struct vy_tx *tx, enum iterator_type iterator_type, struct tuple *key, const struct vy_read_view **rv); /** * Get the next statement with another key, or start the iterator, * if it wasn't started. * @param itr Read iterator. * @param[out] result Found statement is stored here. * * @retval 0 Success. * @retval -1 Read error. */ NODISCARD int vy_read_iterator_next(struct vy_read_iterator *itr, struct tuple **result); /** * Close the iterator and free resources. */ void vy_read_iterator_close(struct vy_read_iterator *itr); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_READ_ITERATOR_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/authentication.h0000664000000000000000000000346113306565107020767 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_AUTHENTICATION_H #define INCLUDES_TARANTOOL_BOX_AUTHENTICATION_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /** * State passed to authentication trigger. */ struct on_auth_trigger_ctx { /** Authenticated user name. */ const char *username; /* true if authentication was successful */ bool is_authenticated; }; void authenticate(const char *user_name, uint32_t len, const char *tuple); #endif /* INCLUDES_TARANTOOL_BOX_AUTHENTICATION_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/iproto.h0000664000000000000000000000347313306565107017267 0ustar rootroot#ifndef TARANTOOL_IPROTO_H_INCLUDED #define TARANTOOL_IPROTO_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ extern unsigned iproto_readahead; /** * Return size of memory used for storing network buffers. */ size_t iproto_mem_used(void); #if defined(__cplusplus) } /* extern "C" */ void iproto_init(); void iproto_bind(const char *uri); void iproto_listen(); #endif /* defined(__cplusplus) */ #endif tarantool_1.9.1.26.g63eb81e3c/src/box/coll.c0000664000000000000000000001770113306565107016676 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coll.h" #include "third_party/PMurHash.h" #include "error.h" #include "diag.h" #include #include enum { MAX_HASH_BUFFER = 1024, MAX_LOCALE = 1024, }; /** * Compare two string using ICU collation. */ static int coll_icu_cmp(const char *s, size_t slen, const char *t, size_t tlen, const struct coll *coll) { assert(coll->icu.collator != NULL); UErrorCode status = U_ZERO_ERROR; #ifdef HAVE_ICU_STRCOLLUTF8 UCollationResult result = ucol_strcollUTF8(coll->icu.collator, s, slen, t, tlen, &status); #else UCharIterator s_iter, t_iter; uiter_setUTF8(&s_iter, s, slen); uiter_setUTF8(&t_iter, t, tlen); UCollationResult result = ucol_strcollIter(coll->icu.collator, &s_iter, &t_iter, &status); #endif assert(!U_FAILURE(status)); return (int)result; } /** * Get a hash of a string using ICU collation. */ static uint32_t coll_icu_hash(const char *s, size_t s_len, uint32_t *ph, uint32_t *pcarry, struct coll *coll) { uint32_t total_size = 0; UCharIterator itr; uiter_setUTF8(&itr, s, s_len); uint8_t buf[MAX_HASH_BUFFER]; uint32_t state[2] = {0, 0}; UErrorCode status = U_ZERO_ERROR; while (true) { int32_t got = ucol_nextSortKeyPart(coll->icu.collator, &itr, state, buf, MAX_HASH_BUFFER, &status); PMurHash32_Process(ph, pcarry, buf, got); total_size += got; if (got < MAX_HASH_BUFFER) break; } return total_size; } /** * Set up ICU collator and init cmp and hash members of collation. * @param coll - collation to set up. * @param def - collation definition. * @return 0 on success, -1 on error. */ static int coll_icu_init_cmp(struct coll *coll, const struct coll_def *def) { if (coll->icu.collator != NULL) { ucol_close(coll->icu.collator); coll->icu.collator = NULL; } if (def->locale_len >= MAX_LOCALE) { diag_set(ClientError, ER_CANT_CREATE_COLLATION, "too long locale"); return -1; } char locale[MAX_LOCALE]; memcpy(locale, def->locale, def->locale_len); locale[def->locale_len] = '\0'; UErrorCode status = U_ZERO_ERROR; struct UCollator *collator = ucol_open(locale, &status); if (U_FAILURE(status)) { diag_set(ClientError, ER_CANT_CREATE_COLLATION, u_errorName(status)); return -1; } coll->icu.collator = collator; if (def->icu.french_collation != COLL_ICU_DEFAULT) { enum coll_icu_on_off w = def->icu.french_collation; UColAttributeValue v = w == COLL_ICU_ON ? UCOL_ON : w == COLL_ICU_OFF ? UCOL_OFF : UCOL_DEFAULT; ucol_setAttribute(collator, UCOL_FRENCH_COLLATION, v, &status); if (U_FAILURE(status)) { diag_set(ClientError, ER_CANT_CREATE_COLLATION, "failed to set french_collation"); return -1; } } if (def->icu.alternate_handling != COLL_ICU_AH_DEFAULT) { enum coll_icu_alternate_handling w = def->icu.alternate_handling; UColAttributeValue v = w == COLL_ICU_AH_NON_IGNORABLE ? UCOL_NON_IGNORABLE : w == COLL_ICU_AH_SHIFTED ? UCOL_SHIFTED : UCOL_DEFAULT; ucol_setAttribute(collator, UCOL_ALTERNATE_HANDLING, v, &status); if (U_FAILURE(status)) { diag_set(ClientError, ER_CANT_CREATE_COLLATION, "failed to set alternate_handling"); return -1; } } if (def->icu.case_first != COLL_ICU_CF_DEFAULT) { enum coll_icu_case_first w = def->icu.case_first; UColAttributeValue v = w == COLL_ICU_CF_OFF ? UCOL_OFF : w == COLL_ICU_CF_UPPER_FIRST ? UCOL_UPPER_FIRST : w == COLL_ICU_CF_LOWER_FIRST ? UCOL_LOWER_FIRST : UCOL_DEFAULT; ucol_setAttribute(collator, UCOL_CASE_FIRST, v, &status); if (U_FAILURE(status)) { diag_set(ClientError, ER_CANT_CREATE_COLLATION, "failed to set case_first"); return -1; } } if (def->icu.case_level != COLL_ICU_DEFAULT) { enum coll_icu_on_off w = def->icu.case_level; UColAttributeValue v = w == COLL_ICU_ON ? UCOL_ON : w == COLL_ICU_OFF ? UCOL_OFF : UCOL_DEFAULT; ucol_setAttribute(collator, UCOL_CASE_LEVEL , v, &status); if (U_FAILURE(status)) { diag_set(ClientError, ER_CANT_CREATE_COLLATION, "failed to set case_level"); return -1; } } if (def->icu.normalization_mode != COLL_ICU_DEFAULT) { enum coll_icu_on_off w = def->icu.normalization_mode; UColAttributeValue v = w == COLL_ICU_ON ? UCOL_ON : w == COLL_ICU_OFF ? UCOL_OFF : UCOL_DEFAULT; ucol_setAttribute(collator, UCOL_NORMALIZATION_MODE, v, &status); if (U_FAILURE(status)) { diag_set(ClientError, ER_CANT_CREATE_COLLATION, "failed to set normalization_mode"); return -1; } } if (def->icu.strength != COLL_ICU_STRENGTH_DEFAULT) { enum coll_icu_strength w = def->icu.strength; UColAttributeValue v = w == COLL_ICU_STRENGTH_PRIMARY ? UCOL_PRIMARY : w == COLL_ICU_STRENGTH_SECONDARY ? UCOL_SECONDARY : w == COLL_ICU_STRENGTH_TERTIARY ? UCOL_TERTIARY : w == COLL_ICU_STRENGTH_QUATERNARY ? UCOL_QUATERNARY : w == COLL_ICU_STRENGTH_IDENTICAL ? UCOL_IDENTICAL : UCOL_DEFAULT; ucol_setAttribute(collator, UCOL_STRENGTH, v, &status); if (U_FAILURE(status)) { diag_set(ClientError, ER_CANT_CREATE_COLLATION, "failed to set strength"); return -1; } } if (def->icu.numeric_collation != COLL_ICU_DEFAULT) { enum coll_icu_on_off w = def->icu.numeric_collation; UColAttributeValue v = w == COLL_ICU_ON ? UCOL_ON : w == COLL_ICU_OFF ? UCOL_OFF : UCOL_DEFAULT; ucol_setAttribute(collator, UCOL_NUMERIC_COLLATION, v, &status); if (U_FAILURE(status)) { diag_set(ClientError, ER_CANT_CREATE_COLLATION, "failed to set numeric_collation"); return -1; } } coll->cmp = coll_icu_cmp; coll->hash = coll_icu_hash; return 0; } /** * Destroy ICU collation. */ static void coll_icu_destroy(struct coll *coll) { if (coll->icu.collator != NULL) ucol_close(coll->icu.collator); } /** * Create a collation by definition. * @param def - collation definition. * @return - the collation OR NULL on memory error (diag is set). */ struct coll * coll_new(const struct coll_def *def) { assert(def->type == COLL_TYPE_ICU); /* no more types are implemented yet */ size_t total_len = sizeof(struct coll) + def->name_len + 1; struct coll *coll = (struct coll *)calloc(1, total_len); if (coll == NULL) { diag_set(OutOfMemory, total_len, "malloc", "struct coll"); return NULL; } coll->id = def->id; coll->owner_id = def->owner_id; coll->type = def->type; coll->name_len = def->name_len; memcpy(coll->name, def->name, def->name_len); coll->name[coll->name_len] = 0; if (coll_icu_init_cmp(coll, def) != 0) { free(coll); return NULL; } return coll; } /** * Delete a collation. * @param cool - collation to delete. */ void coll_delete(struct coll *coll) { assert(coll->type == COLL_TYPE_ICU); /* no more types are implemented yet */ coll_icu_destroy(coll); free(coll); } tarantool_1.9.1.26.g63eb81e3c/src/box/sysview_engine.c0000664000000000000000000002326313306565107021003 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "sysview_engine.h" #include "sysview_index.h" #include "schema.h" #include "space.h" static void sysview_space_destroy(struct space *space) { free(space); } static size_t sysview_space_bsize(struct space *space) { (void)space; return 0; } static int sysview_space_apply_initial_join_row(struct space *space, struct request *request) { (void)space; (void)request; unreachable(); return 0; } static int sysview_space_execute_replace(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { (void)txn; (void)request; (void)result; diag_set(ClientError, ER_VIEW_IS_RO, space->def->name); return -1; } static int sysview_space_execute_delete(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { (void)txn; (void)request; (void)result; diag_set(ClientError, ER_VIEW_IS_RO, space->def->name); return -1; } static int sysview_space_execute_update(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { (void)txn; (void)request; (void)result; diag_set(ClientError, ER_VIEW_IS_RO, space->def->name); return -1; } static int sysview_space_execute_upsert(struct space *space, struct txn *txn, struct request *request) { (void)txn; (void)request; diag_set(ClientError, ER_VIEW_IS_RO, space->def->name); return -1; } static void sysview_init_system_space(struct space *space) { (void)space; unreachable(); } static int sysview_space_check_index_def(struct space *space, struct index_def *index_def) { (void)space; (void)index_def; return 0; } static struct index * sysview_space_create_index(struct space *space, struct index_def *index_def) { struct sysview_engine *sysview = (struct sysview_engine *)space->engine; return (struct index *)sysview_index_new(sysview, index_def, space_name(space)); } static int sysview_space_add_primary_key(struct space *space) { (void)space; return 0; } static void sysview_space_drop_primary_key(struct space *space) { (void)space; } static int sysview_space_build_secondary_key(struct space *old_space, struct space *new_space, struct index *new_index) { (void)old_space; (void)new_space; (void)new_index; return 0; } static int sysview_space_prepare_truncate(struct space *old_space, struct space *new_space) { (void)old_space; (void)new_space; return 0; } static void sysview_space_commit_truncate(struct space *old_space, struct space *new_space) { (void)old_space; (void)new_space; } static int sysview_space_prepare_alter(struct space *old_space, struct space *new_space) { (void)old_space; (void)new_space; return 0; } static void sysview_space_commit_alter(struct space *old_space, struct space *new_space) { (void)old_space; (void)new_space; } static int sysview_space_check_format(struct space *new_space, struct space *old_space) { (void)old_space; (void)new_space; unreachable(); return 0; } static const struct space_vtab sysview_space_vtab = { /* .destroy = */ sysview_space_destroy, /* .bsize = */ sysview_space_bsize, /* .apply_initial_join_row = */ sysview_space_apply_initial_join_row, /* .execute_replace = */ sysview_space_execute_replace, /* .execute_delete = */ sysview_space_execute_delete, /* .execute_update = */ sysview_space_execute_update, /* .execute_upsert = */ sysview_space_execute_upsert, /* .init_system_space = */ sysview_init_system_space, /* .check_index_def = */ sysview_space_check_index_def, /* .create_index = */ sysview_space_create_index, /* .add_primary_key = */ sysview_space_add_primary_key, /* .drop_primary_key = */ sysview_space_drop_primary_key, /* .check_format = */ sysview_space_check_format, /* .build_secondary_key = */ sysview_space_build_secondary_key, /* .prepare_truncate = */ sysview_space_prepare_truncate, /* .commit_truncate = */ sysview_space_commit_truncate, /* .prepare_alter = */ sysview_space_prepare_alter, /* .commit_alter = */ sysview_space_commit_alter, }; static void sysview_engine_shutdown(struct engine *engine) { struct sysview_engine *sysview = (struct sysview_engine *)engine; if (mempool_is_initialized(&sysview->iterator_pool)) mempool_destroy(&sysview->iterator_pool); free(engine); } static struct space * sysview_engine_create_space(struct engine *engine, struct space_def *def, struct rlist *key_list) { struct space *space = (struct space *)calloc(1, sizeof(*space)); if (space == NULL) { diag_set(OutOfMemory, sizeof(*space), "malloc", "struct space"); return NULL; } if (space_create(space, engine, &sysview_space_vtab, def, key_list, NULL) != 0) { free(space); return NULL; } return space; } static int sysview_engine_begin(struct engine *engine, struct txn *txn) { (void)engine; (void)txn; return 0; } static int sysview_engine_begin_statement(struct engine *engine, struct txn *txn) { (void)engine; (void)txn; return 0; } static int sysview_engine_prepare(struct engine *engine, struct txn *txn) { (void)engine; (void)txn; return 0; } static void sysview_engine_commit(struct engine *engine, struct txn *txn) { (void)engine; (void)txn; } static void sysview_engine_rollback(struct engine *engine, struct txn *txn) { (void)engine; (void)txn; } static void sysview_engine_rollback_statement(struct engine *engine, struct txn *txn, struct txn_stmt *stmt) { (void)engine; (void)txn; (void)stmt; } static int sysview_engine_bootstrap(struct engine *engine) { (void)engine; return 0; } static int sysview_engine_begin_initial_recovery(struct engine *engine, const struct vclock *vclock) { (void)engine; (void)vclock; return 0; } static int sysview_engine_begin_final_recovery(struct engine *engine) { (void)engine; return 0; } static int sysview_engine_end_recovery(struct engine *engine) { (void)engine; return 0; } static int sysview_engine_join(struct engine *engine, struct vclock *vclock, struct xstream *stream) { (void)engine; (void)vclock; (void)stream; return 0; } static int sysview_engine_begin_checkpoint(struct engine *engine) { (void)engine; return 0; } static int sysview_engine_wait_checkpoint(struct engine *engine, struct vclock *vclock) { (void)engine; (void)vclock; return 0; } static void sysview_engine_commit_checkpoint(struct engine *engine, struct vclock *vclock) { (void)engine; (void)vclock; } static void sysview_engine_abort_checkpoint(struct engine *engine) { (void)engine; } static int sysview_engine_collect_garbage(struct engine *engine, int64_t lsn) { (void)engine; (void)lsn; return 0; } static int sysview_engine_backup(struct engine *engine, struct vclock *vclock, engine_backup_cb cb, void *cb_arg) { (void)engine; (void)vclock; (void)cb; (void)cb_arg; return 0; } static void sysview_engine_memory_stat(struct engine *engine, struct engine_memory_stat *stat) { (void)engine; (void)stat; } static int sysview_engine_check_space_def(struct space_def *def) { (void)def; return 0; } static const struct engine_vtab sysview_engine_vtab = { /* .shutdown = */ sysview_engine_shutdown, /* .create_space = */ sysview_engine_create_space, /* .join = */ sysview_engine_join, /* .begin = */ sysview_engine_begin, /* .begin_statement = */ sysview_engine_begin_statement, /* .prepare = */ sysview_engine_prepare, /* .commit = */ sysview_engine_commit, /* .rollback_statement = */ sysview_engine_rollback_statement, /* .rollback = */ sysview_engine_rollback, /* .bootstrap = */ sysview_engine_bootstrap, /* .begin_initial_recovery = */ sysview_engine_begin_initial_recovery, /* .begin_final_recovery = */ sysview_engine_begin_final_recovery, /* .end_recovery = */ sysview_engine_end_recovery, /* .begin_checkpoint = */ sysview_engine_begin_checkpoint, /* .wait_checkpoint = */ sysview_engine_wait_checkpoint, /* .commit_checkpoint = */ sysview_engine_commit_checkpoint, /* .abort_checkpoint = */ sysview_engine_abort_checkpoint, /* .collect_garbage = */ sysview_engine_collect_garbage, /* .backup = */ sysview_engine_backup, /* .memory_stat = */ sysview_engine_memory_stat, /* .check_space_def = */ sysview_engine_check_space_def, }; struct sysview_engine * sysview_engine_new(void) { struct sysview_engine *sysview = calloc(1, sizeof(*sysview)); if (sysview == NULL) { diag_set(OutOfMemory, sizeof(*sysview), "malloc", "struct sysview_engine"); return NULL; } sysview->base.vtab = &sysview_engine_vtab; sysview->base.name = "sysview"; return sysview; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_stat.h0000664000000000000000000001562013306565107017441 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_STAT_H #define INCLUDES_TARANTOOL_BOX_VY_STAT_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "latency.h" #include "tuple.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** Used for accounting statements stored in memory. */ struct vy_stmt_counter { /** Number of statements. */ int64_t rows; /** Size, in bytes. */ int64_t bytes; }; /** Used for accounting statements stored on disk. */ struct vy_disk_stmt_counter { /** Number of statements. */ int64_t rows; /** Size when uncompressed, in bytes. */ int64_t bytes; /** Size when compressed, in bytes */ int64_t bytes_compressed; /** Number of pages. */ int64_t pages; }; /** Memory iterator statistics. */ struct vy_mem_iterator_stat { /** Number of lookups in the memory tree. */ int64_t lookup; /** Number of statements returned by the iterator. */ struct vy_stmt_counter get; }; /** Run iterator statistics. */ struct vy_run_iterator_stat { /** Number of lookups in the page index. */ int64_t lookup; /** Number of statements returned by the iterator. */ struct vy_stmt_counter get; /** * Number of times the bloom filter allowed to * avoid a disk read. */ int64_t bloom_hit; /** * Number of times the bloom filter failed to * prevent a disk read. */ int64_t bloom_miss; /** * Number of statements actually read from the disk. * It may be greater than the number of statements * returned by the iterator, because of page granularity * of disk reads. */ struct vy_disk_stmt_counter read; }; /** TX write set iterator statistics. */ struct vy_txw_iterator_stat { /** Number of lookups in the write set. */ int64_t lookup; /** Number of statements returned by the iterator. */ struct vy_stmt_counter get; }; /** Dump/compaction statistics. */ struct vy_compact_stat { int32_t count; /** Number of input statements. */ struct vy_stmt_counter in; /** Number of output statements. */ struct vy_stmt_counter out; }; /** Vinyl index statistics. */ struct vy_index_stat { /** Number of lookups in the index. */ int64_t lookup; /** Number of statements read from this index. */ struct vy_stmt_counter get; /** Number of statements written to this index. */ struct vy_stmt_counter put; /** Read latency. */ struct latency latency; /** Upsert statistics. */ struct { /** How many upsert chains have been squashed. */ int64_t squashed; /** How many upserts have been applied on read. */ int64_t applied; } upsert; /** Memory related statistics. */ struct { /** Number of statements stored in memory. */ struct vy_stmt_counter count; /** Memory iterator statistics. */ struct vy_mem_iterator_stat iterator; } memory; /** Disk related statistics. */ struct { /** Number of statements stored on disk. */ struct vy_disk_stmt_counter count; /** Run iterator statistics. */ struct vy_run_iterator_stat iterator; /** Dump statistics. */ struct vy_compact_stat dump; /** Compaction statistics. */ struct vy_compact_stat compact; } disk; /** TX write set statistics. */ struct { /** Number of statements in the write set. */ struct vy_stmt_counter count; /** TX write set iterator statistics. */ struct vy_txw_iterator_stat iterator; } txw; }; /** Tuple cache statistics. */ struct vy_cache_stat { /** Number of statements in the cache. */ struct vy_stmt_counter count; /** Number of lookups in the cache. */ int64_t lookup; /** Number of reads from the cache. */ struct vy_stmt_counter get; /** Number of writes to the cache. */ struct vy_stmt_counter put; /** * Number of statements removed from the cache * due to overwrite. */ struct vy_stmt_counter invalidate; /** * Number of statements removed from the cache * due to memory shortage. */ struct vy_stmt_counter evict; }; /** Transaction statistics. */ struct vy_tx_stat { /** Number of committed transactions. */ int64_t commit; /** Number of rolled back transactions. */ int64_t rollback; /** Number of transactions aborted on conflict. */ int64_t conflict; }; static inline int vy_index_stat_create(struct vy_index_stat *stat) { return latency_create(&stat->latency); } static inline void vy_index_stat_destroy(struct vy_index_stat *stat) { latency_destroy(&stat->latency); } static inline void vy_stmt_counter_acct_tuple(struct vy_stmt_counter *c, const struct tuple *tuple) { c->rows++; c->bytes += tuple_size(tuple); } static inline void vy_stmt_counter_unacct_tuple(struct vy_stmt_counter *c, const struct tuple *tuple) { c->rows--; c->bytes -= tuple_size(tuple); } static inline void vy_stmt_counter_add(struct vy_stmt_counter *c1, const struct vy_stmt_counter *c2) { c1->rows += c2->rows; c1->bytes += c2->bytes; } static inline void vy_stmt_counter_sub(struct vy_stmt_counter *c1, const struct vy_stmt_counter *c2) { c1->rows -= c2->rows; c1->bytes -= c2->bytes; } static inline void vy_stmt_counter_add_disk(struct vy_stmt_counter *c1, const struct vy_disk_stmt_counter *c2) { c1->rows += c2->rows; c1->bytes += c2->bytes; } static inline void vy_disk_stmt_counter_add(struct vy_disk_stmt_counter *c1, const struct vy_disk_stmt_counter *c2) { c1->rows += c2->rows; c1->bytes += c2->bytes; c1->bytes_compressed += c2->bytes_compressed; c1->pages += c2->pages; } static inline void vy_disk_stmt_counter_sub(struct vy_disk_stmt_counter *c1, const struct vy_disk_stmt_counter *c2) { c1->rows -= c2->rows; c1->bytes -= c2->bytes; c1->bytes_compressed -= c2->bytes_compressed; c1->pages -= c2->pages; } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_STAT_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_tuple.h0000664000000000000000000000552013306565107020311 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_MEMTX_TUPLE_H #define INCLUDES_TARANTOOL_BOX_MEMTX_TUPLE_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "diag.h" #include "tuple_format.h" #include "tuple.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** Memtx tuple allocator, available to statistics. */ extern struct small_alloc memtx_alloc; /** * Initialize memtx_tuple library */ void memtx_tuple_init(uint64_t tuple_arena_max_size, uint32_t objsize_min, float alloc_factor); /** * Cleanup memtx_tuple library */ void memtx_tuple_free(void); /** Create a tuple in the memtx engine format. @sa tuple_new(). */ struct tuple * memtx_tuple_new(struct tuple_format *format, const char *data, const char *end); /** * Free the tuple of a memtx space. * @pre tuple->refs == 0 */ void memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple); /** Maximal allowed tuple size (box.cfg.memtx_max_tuple_size) */ extern size_t memtx_max_tuple_size; /** tuple format vtab for memtx engine. */ extern struct tuple_format_vtab memtx_tuple_format_vtab; void memtx_tuple_begin_snapshot(); void memtx_tuple_end_snapshot(); #if defined(__cplusplus) } /** * Create a tuple in the memtx engine format. Throw an exception * if an error occured. @sa memtx_tuple_new(). */ static inline struct tuple * memtx_tuple_new_xc(struct tuple_format *format, const char *data, const char *end) { struct tuple *res = memtx_tuple_new(format, data, end); if (res == NULL) diag_raise(); return res; } #endif /* defined(__cplusplus) */ #endif tarantool_1.9.1.26.g63eb81e3c/src/box/identifier.h0000664000000000000000000000450013306565107020065 0ustar rootroot#ifndef TARANTOOL_BOX_IDENTIFIER_H_INCLUDED #define TARANTOOL_BOX_IDENTIFIER_H_INCLUDED /* * Copyright 2010-2018, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include #include "error.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Check object identifier for invalid symbols. * The function checks the str for containing * printable characters only. * * @retval 0 success * @retval -1 error, diagnostics area is set */ int identifier_check(const char *str, size_t str_len); /** * Init identifier check mechanism. * This function allocates necessary for icu structures. */ void identifier_init(); /** * Clean icu structures. */ void identifier_destroy(); #if defined(__cplusplus) } /* extern "C" */ /** * Throw an error if identifier is not valid. */ static inline void identifier_check_xc(const char *str, size_t str_len) { if (identifier_check(str, str_len)) diag_raise(); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_IDENTIFIER_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_stmt_stream.h0000664000000000000000000000475513306560010021023 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_STMT_STREAM_H #define INCLUDES_TARANTOOL_BOX_VY_STMT_STREAM_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct tuple; /** * The stream is a very simple iterator (generally over a mem or a run) * that output all the tuples on increasing order. */ struct vy_stmt_stream; /** * Start streaming */ typedef NODISCARD int (*vy_stream_start_f)(struct vy_stmt_stream *virt_stream); /** * Get next tuple from a stream. */ typedef NODISCARD int (*vy_stream_next_f)(struct vy_stmt_stream *virt_stream, struct tuple **ret); /** * Close the stream. */ typedef void (*vy_stream_close_f)(struct vy_stmt_stream *virt_stream); /** * The interface description for streams over run and mem. */ struct vy_stmt_stream_iface { vy_stream_start_f start; vy_stream_next_f next; vy_stream_close_f stop; vy_stream_close_f close; }; /** * Common interface for streams over run and mem. */ struct vy_stmt_stream { const struct vy_stmt_stream_iface *iface; }; #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_STMT_STREAM_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/iterator_type.h0000664000000000000000000000776313306560010020637 0ustar rootroot#ifndef TARANTOOL_BOX_ITERATOR_TYPE_H_INCLUDED #define TARANTOOL_BOX_ITERATOR_TYPE_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** \cond public */ /** * Controls how to iterate over tuples in an index. * Different index types support different iterator types. * For example, one can start iteration from a particular value * (request key) and then retrieve all tuples where keys are * greater or equal (= GE) to this key. * * If iterator type is not supported by the selected index type, * iterator constructor must fail with ER_UNSUPPORTED. To be * selectable for primary key, an index must support at least * ITER_EQ and ITER_GE types. * * NULL value of request key corresponds to the first or last * key in the index, depending on iteration direction. * (first key for GE and GT types, and last key for LE and LT). * Therefore, to iterate over all tuples in an index, one can * use ITER_GE or ITER_LE iteration types with start key equal * to NULL. * For ITER_EQ, the key must not be NULL. */ enum iterator_type { /* ITER_EQ must be the first member for request_create */ ITER_EQ = 0, /* key == x ASC order */ ITER_REQ = 1, /* key == x DESC order */ ITER_ALL = 2, /* all tuples */ ITER_LT = 3, /* key < x */ ITER_LE = 4, /* key <= x */ ITER_GE = 5, /* key >= x */ ITER_GT = 6, /* key > x */ ITER_BITS_ALL_SET = 7, /* all bits from x are set in key */ ITER_BITS_ANY_SET = 8, /* at least one x's bit is set */ ITER_BITS_ALL_NOT_SET = 9, /* all bits are not set */ ITER_OVERLAPS = 10, /* key overlaps x */ ITER_NEIGHBOR = 11, /* tuples in distance ascending order from specified point */ iterator_type_MAX }; /** \endcond public */ extern const char *iterator_type_strs[]; /** * Determine a direction of the given iterator type. * That is -1 for REQ, LT and LE and +1 for all others. */ static inline int iterator_direction(enum iterator_type type) { const unsigned reverse = (1u << ITER_REQ) | (1u << ITER_LT) | (1u << ITER_LE); return (reverse & (1u << type)) ? -1 : 1; } static inline bool iterator_type_is_reverse(enum iterator_type type) { return type == ITER_REQ || type == ITER_LT || type == ITER_LE; } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_ITERATOR_TYPE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_space.h0000664000000000000000000000562213306565107020256 0ustar rootroot#ifndef TARANTOOL_BOX_MEMTX_SPACE_H_INCLUDED #define TARANTOOL_BOX_MEMTX_SPACE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "space.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct memtx_engine; struct memtx_space { struct space base; /* Number of bytes used in memory by tuples in the space. */ size_t bsize; /** * A pointer to replace function, set to different values * at different stages of recovery. */ int (*replace)(struct space *, struct txn_stmt *, enum dup_replace_mode); }; /** * Change binary size of a space subtracting old tuple's size and * adding new tuple's size. Used also for rollback by swaping old * and new tuple. * * @param space Instance of memtx space. * @param old_tuple Old tuple (replaced or deleted). * @param new_tuple New tuple (inserted). */ void memtx_space_update_bsize(struct space *space, const struct tuple *old_tuple, const struct tuple *new_tuple); int memtx_space_replace_no_keys(struct space *, struct txn_stmt *, enum dup_replace_mode); int memtx_space_replace_build_next(struct space *, struct txn_stmt *, enum dup_replace_mode); int memtx_space_replace_primary_key(struct space *, struct txn_stmt *, enum dup_replace_mode); int memtx_space_replace_all_keys(struct space *, struct txn_stmt *, enum dup_replace_mode); struct space * memtx_space_new(struct memtx_engine *memtx, struct space_def *def, struct rlist *key_list); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_MEMTX_SPACE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/user_def.c0000664000000000000000000000345313306560010017524 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "user_def.h" const char * priv_name(user_access_t access) { static const char *priv_name_strs[] = { "Read", "Write", "Execute", "Session", "Usage", "Create", "Drop", "Alter", "Reference", "Trigger", "Insert", "Update", "Delete", "Grant", "Revoke", }; int bit_no = __builtin_ffs((int) access); if (bit_no > 0 && bit_no <= (int) lengthof(priv_name_strs)) return priv_name_strs[bit_no - 1]; return "Any"; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_cache.c0000664000000000000000000006361313306565107017531 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_cache.h" #include "diag.h" #include "fiber.h" #include "schema_def.h" #ifndef CT_ASSERT_G #define CT_ASSERT_G(e) typedef char CONCAT(__ct_assert_, __LINE__)[(e) ? 1 :-1] #endif CT_ASSERT_G(BOX_INDEX_PART_MAX <= UINT8_MAX); enum { /* Flag in cache entry that means that there are no values in DB * that less than the current and greater than the previous */ VY_CACHE_LEFT_LINKED = 1, /* Flag in cache entry that means that there are no values in DB * that greater than the current and less than the previous */ VY_CACHE_RIGHT_LINKED = 2, /* Max number of deletes that are made by cleanup action per one * cache operation */ VY_CACHE_CLEANUP_MAX_STEPS = 10, }; void vy_cache_env_create(struct vy_cache_env *e, struct slab_cache *slab_cache) { rlist_create(&e->cache_lru); e->mem_used = 0; e->mem_quota = 0; mempool_create(&e->cache_entry_mempool, slab_cache, sizeof(struct vy_cache_entry)); } void vy_cache_env_destroy(struct vy_cache_env *e) { mempool_destroy(&e->cache_entry_mempool); } static inline size_t vy_cache_entry_size(const struct vy_cache_entry *entry) { return sizeof(*entry) + tuple_size(entry->stmt); } static struct vy_cache_entry * vy_cache_entry_new(struct vy_cache_env *env, struct vy_cache *cache, struct tuple *stmt) { struct vy_cache_entry *entry = (struct vy_cache_entry *) mempool_alloc(&env->cache_entry_mempool); if (entry == NULL) return NULL; tuple_ref(stmt); entry->cache = cache; entry->stmt = stmt; entry->flags = 0; entry->left_boundary_level = cache->cmp_def->part_count; entry->right_boundary_level = cache->cmp_def->part_count; rlist_add(&env->cache_lru, &entry->in_lru); env->mem_used += vy_cache_entry_size(entry); vy_stmt_counter_acct_tuple(&cache->stat.count, stmt); return entry; } static void vy_cache_entry_delete(struct vy_cache_env *env, struct vy_cache_entry *entry) { vy_stmt_counter_unacct_tuple(&entry->cache->stat.count, entry->stmt); assert(env->mem_used >= vy_cache_entry_size(entry)); env->mem_used -= vy_cache_entry_size(entry); tuple_unref(entry->stmt); rlist_del(&entry->in_lru); TRASH(entry); mempool_free(&env->cache_entry_mempool, entry); } static void * vy_cache_tree_page_alloc(void *ctx) { struct vy_env *env = (struct vy_env *)ctx; (void)env; void *ret = malloc(VY_CACHE_TREE_EXTENT_SIZE); if (ret == NULL) diag_set(OutOfMemory, VY_CACHE_TREE_EXTENT_SIZE, "malloc", "ret"); return ret; } static void vy_cache_tree_page_free(void *ctx, void *p) { struct vy_env *env = (struct vy_env *)ctx; (void)env; free(p); } void vy_cache_create(struct vy_cache *cache, struct vy_cache_env *env, struct key_def *cmp_def) { cache->env = env; cache->cmp_def = cmp_def; cache->version = 1; vy_cache_tree_create(&cache->cache_tree, cmp_def, vy_cache_tree_page_alloc, vy_cache_tree_page_free, env); } void vy_cache_destroy(struct vy_cache *cache) { struct vy_cache_tree_iterator itr = vy_cache_tree_iterator_first(&cache->cache_tree); while (!vy_cache_tree_iterator_is_invalid(&itr)) { struct vy_cache_entry **entry = vy_cache_tree_iterator_get_elem(&cache->cache_tree, &itr); assert(entry != NULL && *entry != NULL); vy_cache_entry_delete(cache->env, *entry); vy_cache_tree_iterator_next(&cache->cache_tree, &itr); } vy_cache_tree_destroy(&cache->cache_tree); } static void vy_cache_gc_step(struct vy_cache_env *env) { struct rlist *lru = &env->cache_lru; struct vy_cache_entry *entry = rlist_last_entry(lru, struct vy_cache_entry, in_lru); struct vy_cache *cache = entry->cache; struct vy_cache_tree *tree = &cache->cache_tree; if (entry->flags & (VY_CACHE_LEFT_LINKED | VY_CACHE_RIGHT_LINKED)) { bool exact; struct vy_cache_tree_iterator itr = vy_cache_tree_lower_bound(tree, entry->stmt, &exact); assert(exact); if (entry->flags & VY_CACHE_LEFT_LINKED) { struct vy_cache_tree_iterator prev = itr; vy_cache_tree_iterator_prev(tree, &prev); struct vy_cache_entry **prev_entry = vy_cache_tree_iterator_get_elem(tree, &prev); assert((*prev_entry)->flags & VY_CACHE_RIGHT_LINKED); (*prev_entry)->flags &= ~VY_CACHE_RIGHT_LINKED; } if (entry->flags & VY_CACHE_RIGHT_LINKED) { struct vy_cache_tree_iterator next = itr; vy_cache_tree_iterator_next(&cache->cache_tree, &next); struct vy_cache_entry **next_entry = vy_cache_tree_iterator_get_elem(tree, &next); assert((*next_entry)->flags & VY_CACHE_LEFT_LINKED); (*next_entry)->flags &= ~VY_CACHE_LEFT_LINKED; } } cache->version++; vy_stmt_counter_acct_tuple(&cache->stat.evict, entry->stmt); vy_cache_tree_delete(&cache->cache_tree, entry); vy_cache_entry_delete(cache->env, entry); } static void vy_cache_gc(struct vy_cache_env *env) { for (uint32_t i = 0; env->mem_used > env->mem_quota && i < VY_CACHE_CLEANUP_MAX_STEPS; i++) { vy_cache_gc_step(env); } } void vy_cache_env_set_quota(struct vy_cache_env *env, size_t quota) { env->mem_quota = quota; while (env->mem_used > env->mem_quota) { vy_cache_gc(env); /* * Make sure we don't block other tx fibers * for too long. */ fiber_sleep(0); } } void vy_cache_add(struct vy_cache *cache, struct tuple *stmt, struct tuple *prev_stmt, const struct tuple *key, enum iterator_type order) { if (cache->env->mem_quota == 0) { /* Cache is disabled. */ return; } /* Delete some entries if quota overused */ vy_cache_gc(cache->env); if (stmt != NULL && vy_stmt_lsn(stmt) == INT64_MAX) { /* Do not store a statement from write set of a tx */ return; } /* The case of the first or the last result in key+order query */ bool is_boundary = (stmt != NULL) != (prev_stmt != NULL); if (prev_stmt != NULL && vy_stmt_lsn(prev_stmt) == INT64_MAX) { /* Previous statement is from tx write set, can't store it */ prev_stmt = NULL; } if (prev_stmt == NULL && stmt == NULL) { /* Do not store empty ranges */ return; } int direction = iterator_direction(order); /** * Let's determine boundary_level (left/right) of the new record * in cache to be inserted. */ uint8_t boundary_level = cache->cmp_def->part_count; if (stmt != NULL) { if (is_boundary) { /** * That means that the stmt is the first in a result. * Regardless of order, the statement is the first in * sequence of statements that is equal to the key. */ boundary_level = tuple_field_count(key); } } else { assert(prev_stmt != NULL); if (order == ITER_EQ || order == ITER_REQ) { /* that is the last statement that is equal to key */ boundary_level = tuple_field_count(key); } else { /* that is the last statement */ boundary_level = 0; } /** * That means that the search was ended, and prev_stmt was * the last statement of the result. It is equivalent to * first found statement with a reverse order. Let's transform * to the equivalent case in order of further simplification. */ direction = -direction; stmt = prev_stmt; prev_stmt = NULL; } TRASH(&order); assert(vy_stmt_type(stmt) == IPROTO_INSERT || vy_stmt_type(stmt) == IPROTO_REPLACE); assert(prev_stmt == NULL || vy_stmt_type(prev_stmt) == IPROTO_INSERT || vy_stmt_type(prev_stmt) == IPROTO_REPLACE); cache->version++; /* Insert/replace new entry to the tree */ struct vy_cache_entry *entry = vy_cache_entry_new(cache->env, cache, stmt); if (entry == NULL) { /* memory error, let's live without a cache */ return; } struct vy_cache_entry *replaced = NULL; struct vy_cache_tree_iterator inserted; if (vy_cache_tree_insert_get_iterator(&cache->cache_tree, entry, &replaced, &inserted) != 0) { /* memory error, let's live without a cache */ vy_cache_entry_delete(cache->env, entry); return; } assert(!vy_cache_tree_iterator_is_invalid(&inserted)); if (replaced != NULL) { entry->flags = replaced->flags; entry->left_boundary_level = replaced->left_boundary_level; entry->right_boundary_level = replaced->right_boundary_level; vy_cache_entry_delete(cache->env, replaced); } if (direction > 0 && boundary_level < entry->left_boundary_level) entry->left_boundary_level = boundary_level; else if (direction < 0 && boundary_level < entry->right_boundary_level) entry->right_boundary_level = boundary_level; vy_stmt_counter_acct_tuple(&cache->stat.put, stmt); /* Done if it's not a chain */ if (prev_stmt == NULL) return; /* The flag that must be set in the inserted chain entry */ uint32_t flag = direction > 0 ? VY_CACHE_LEFT_LINKED : VY_CACHE_RIGHT_LINKED; #ifndef NDEBUG /** * Usually prev_stmt is already in the cache but there are cases * when it's not (see below). * There must be no entries between (prev_stmt, stmt) interval in * any case. (1) * Farther, if the stmt entry is already linked (in certain direction), * it must be linked with prev_stmt (in that direction). (2) * Let't check (1) and (2) for debug reasons. * * There are two cases in which prev_stmt statement is absent * in the cache: * 1) The statement was in prepared state and then it was * committed or rollbacked. * 2) The entry was popped out by vy_cache_gc. * * Note that case when the prev_stmt is owerwritten by other TX * is impossible because this TX would be sent to read view and * wouldn't be able to add anything to the cache. */ if (direction > 0) vy_cache_tree_iterator_prev(&cache->cache_tree, &inserted); else vy_cache_tree_iterator_next(&cache->cache_tree, &inserted); if (!vy_cache_tree_iterator_is_invalid(&inserted)) { struct vy_cache_entry **prev_check_entry = vy_cache_tree_iterator_get_elem(&cache->cache_tree, &inserted); assert(*prev_check_entry != NULL); struct tuple *prev_check_stmt = (*prev_check_entry)->stmt; int cmp = vy_tuple_compare(prev_stmt, prev_check_stmt, cache->cmp_def); if (entry->flags & flag) { /* The found entry must be exactly prev_stmt. (2) */ assert(cmp == 0); } else { /* * The found entry must be exactly prev_stmt or lay * farther than prev_stmt. (1) */ assert(cmp * direction >= 0); } } else { /* Cannot be in chain (2) */ assert(!(entry->flags & flag)); } #endif if (entry->flags & flag) return; /* Insert/replace entry with previous statement */ struct vy_cache_entry *prev_entry = vy_cache_entry_new(cache->env, cache, prev_stmt); if (prev_entry == NULL) { /* memory error, let's live without a chain */ return; } replaced = NULL; if (vy_cache_tree_insert(&cache->cache_tree, prev_entry, &replaced)) { /* memory error, let's live without a chain */ vy_cache_entry_delete(cache->env, prev_entry); return; } if (replaced != NULL) { prev_entry->flags = replaced->flags; prev_entry->left_boundary_level = replaced->left_boundary_level; prev_entry->right_boundary_level = replaced->right_boundary_level; vy_cache_entry_delete(cache->env, replaced); } /* Set proper flags */ entry->flags |= flag; /* Set inverted flag in the previous entry */ prev_entry->flags |= (VY_CACHE_LEFT_LINKED | VY_CACHE_RIGHT_LINKED) ^ flag; } struct tuple * vy_cache_get(struct vy_cache *cache, const struct tuple *key) { struct vy_cache_entry **entry = vy_cache_tree_find(&cache->cache_tree, key); if (entry == NULL) return NULL; return (*entry)->stmt; } void vy_cache_on_write(struct vy_cache *cache, const struct tuple *stmt, struct tuple **deleted) { vy_cache_gc(cache->env); bool exact = false; struct vy_cache_tree_iterator itr; itr = vy_cache_tree_lower_bound(&cache->cache_tree, stmt, &exact); struct vy_cache_entry **entry = vy_cache_tree_iterator_get_elem(&cache->cache_tree, &itr); assert(!exact || entry != NULL); /* * There are three cases possible * (1) there's a value in cache that is equal to stmt. * ('exact' == true, 'entry' points the equal value in cache) * (2) there's no value in cache that is equal to stmt, and lower_bound * returned the next record. * ('exact' == false, 'entry' points to the equal value in cache) * (3) there's no value in cache that is equal to stmt, and lower_bound * returned invalid iterator, so there's no bigger value. * ('exact' == false, 'entry' == NULL) */ if (vy_stmt_type(stmt) == IPROTO_DELETE && !exact) { /* there was nothing and there is nothing now */ return; } struct vy_cache_tree_iterator prev = itr; vy_cache_tree_iterator_prev(&cache->cache_tree, &prev); struct vy_cache_entry **prev_entry = vy_cache_tree_iterator_get_elem(&cache->cache_tree, &prev); if (entry != NULL && ((*entry)->flags & VY_CACHE_LEFT_LINKED)) { cache->version++; (*entry)->flags &= ~VY_CACHE_LEFT_LINKED; assert((*prev_entry)->flags & VY_CACHE_RIGHT_LINKED); (*prev_entry)->flags &= ~VY_CACHE_RIGHT_LINKED; } if (prev_entry != NULL) { cache->version++; (*prev_entry)->right_boundary_level = cache->cmp_def->part_count; } struct vy_cache_tree_iterator next = itr; vy_cache_tree_iterator_next(&cache->cache_tree, &next); struct vy_cache_entry **next_entry = vy_cache_tree_iterator_get_elem(&cache->cache_tree, &next); if (exact && ((*entry)->flags & VY_CACHE_RIGHT_LINKED)) { cache->version++; (*entry)->flags &= ~VY_CACHE_RIGHT_LINKED; assert((*next_entry)->flags & VY_CACHE_LEFT_LINKED); (*next_entry)->flags &= ~VY_CACHE_LEFT_LINKED; } if (entry && !exact) { cache->version++; (*entry)->left_boundary_level = cache->cmp_def->part_count; } if (exact) { assert(entry != NULL); cache->version++; struct vy_cache_entry *to_delete = *entry; assert(vy_stmt_type(to_delete->stmt) == IPROTO_INSERT || vy_stmt_type(to_delete->stmt) == IPROTO_REPLACE); if (deleted != NULL) { *deleted = to_delete->stmt; tuple_ref(to_delete->stmt); } vy_stmt_counter_acct_tuple(&cache->stat.invalidate, to_delete->stmt); vy_cache_tree_delete(&cache->cache_tree, to_delete); vy_cache_entry_delete(cache->env, to_delete); } } /** * Get a stmt by current position */ static struct tuple * vy_cache_iterator_curr_stmt(struct vy_cache_iterator *itr) { struct vy_cache_tree *tree = &itr->cache->cache_tree; struct vy_cache_entry **entry = vy_cache_tree_iterator_get_elem(tree, &itr->curr_pos); return entry ? (*entry)->stmt : NULL; } /** * Determine whether the merge iterator must be stopped or not. * That is made by examining flags of a cache record. * * @param itr - the iterator * @param entry - current record of the cache */ static inline bool vy_cache_iterator_is_stop(struct vy_cache_iterator *itr, struct vy_cache_entry *entry) { uint8_t key_level = tuple_field_count(itr->key); /* select{} is actually an EQ iterator with part_count == 0 */ bool iter_is_eq = itr->iterator_type == ITER_EQ || key_level == 0; if (iterator_direction(itr->iterator_type) > 0) { if (entry->flags & VY_CACHE_LEFT_LINKED) return true; if (iter_is_eq && entry->left_boundary_level <= key_level) return true; } else { if (entry->flags & VY_CACHE_RIGHT_LINKED) return true; if (iter_is_eq && entry->right_boundary_level <= key_level) return true; } return false; } /** * Determine whether the merge iterator must be stopped or not in case when * there are no more values in the cache for given key. * That is made by examining flags of the previous cache record. * * @param itr - the iterator * @param last_entry - the last record from previous step of the iterator */ static inline bool vy_cache_iterator_is_end_stop(struct vy_cache_iterator *itr, struct vy_cache_entry *last_entry) { uint8_t key_level = tuple_field_count(itr->key); /* select{} is actually an EQ iterator with part_count == 0 */ bool iter_is_eq = itr->iterator_type == ITER_EQ || key_level == 0; if (iterator_direction(itr->iterator_type) > 0) { if (last_entry->flags & VY_CACHE_RIGHT_LINKED) return true; if (iter_is_eq && last_entry->right_boundary_level <= key_level) return true; } else { if (last_entry->flags & VY_CACHE_LEFT_LINKED) return true; if (iter_is_eq && last_entry->left_boundary_level <= key_level) return true; } return false; } /** * Make one tree's iterator step from the current position. * Direction of the step depends on the iterator type. * @param itr Iterator to make step. * @param[out] ret Result tuple. * * @retval Must a merge_iterator stop on @a ret? * The function is implicitly used by merge_iterator_next_key and * return value is used to determine if the merge_iterator can * return @a ret to a read_iterator immediately, without lookups * in mems and runs. It is possible, when @a ret is a part of * continuous cached tuples chain. In such a case mems or runs can * not contain more suitable tuples. */ static inline bool vy_cache_iterator_step(struct vy_cache_iterator *itr, struct tuple **ret) { *ret = NULL; struct vy_cache_tree *tree = &itr->cache->cache_tree; struct vy_cache_entry *prev_entry = *vy_cache_tree_iterator_get_elem(tree, &itr->curr_pos); if (iterator_direction(itr->iterator_type) > 0) vy_cache_tree_iterator_next(tree, &itr->curr_pos); else vy_cache_tree_iterator_prev(tree, &itr->curr_pos); if (vy_cache_tree_iterator_is_invalid(&itr->curr_pos)) return vy_cache_iterator_is_end_stop(itr, prev_entry); struct vy_cache_entry *entry = *vy_cache_tree_iterator_get_elem(tree, &itr->curr_pos); if (itr->iterator_type == ITER_EQ && vy_stmt_compare(itr->key, entry->stmt, itr->cache->cmp_def)) { return vy_cache_iterator_is_end_stop(itr, prev_entry); } *ret = entry->stmt; return vy_cache_iterator_is_stop(itr, entry); } /** * Skip all statements that are invisible in the read view * associated with the iterator. */ static void vy_cache_iterator_skip_to_read_view(struct vy_cache_iterator *itr, bool *stop) { while (itr->curr_stmt != NULL && vy_stmt_lsn(itr->curr_stmt) > (**itr->read_view).vlsn) { /* * The cache stores the latest tuple of the key, * but there could be older tuples in runs. */ *stop = false; vy_cache_iterator_step(itr, &itr->curr_stmt); } } /** * Position the iterator to the first cache entry satisfying * the search criteria for a given key and direction. */ static void vy_cache_iterator_seek(struct vy_cache_iterator *itr, enum iterator_type iterator_type, const struct tuple *key, struct vy_cache_entry **entry) { struct vy_cache_tree *tree = &itr->cache->cache_tree; *entry = NULL; itr->cache->stat.lookup++; if (tuple_field_count(key) > 0) { bool exact; itr->curr_pos = iterator_type == ITER_EQ || iterator_type == ITER_GE || iterator_type == ITER_LT ? vy_cache_tree_lower_bound(tree, key, &exact) : vy_cache_tree_upper_bound(tree, key, &exact); if (iterator_type == ITER_EQ && !exact) return; } else if (iterator_type == ITER_LE) { itr->curr_pos = vy_cache_tree_invalid_iterator(); } else { assert(iterator_type == ITER_GE); itr->curr_pos = vy_cache_tree_iterator_first(tree); } if (iterator_type == ITER_LT || iterator_type == ITER_LE) vy_cache_tree_iterator_prev(tree, &itr->curr_pos); if (vy_cache_tree_iterator_is_invalid(&itr->curr_pos)) return; *entry = *vy_cache_tree_iterator_get_elem(tree, &itr->curr_pos); } void vy_cache_iterator_next(struct vy_cache_iterator *itr, struct tuple **ret, bool *stop) { *ret = NULL; *stop = false; if (!itr->search_started) { assert(itr->curr_stmt == NULL); itr->search_started = true; itr->version = itr->cache->version; struct vy_cache_entry *entry; vy_cache_iterator_seek(itr, itr->iterator_type, itr->key, &entry); if (entry == NULL) return; itr->curr_stmt = entry->stmt; *stop = vy_cache_iterator_is_stop(itr, entry); } else { assert(itr->version == itr->cache->version); if (itr->curr_stmt == NULL) return; tuple_unref(itr->curr_stmt); *stop = vy_cache_iterator_step(itr, &itr->curr_stmt); } vy_cache_iterator_skip_to_read_view(itr, stop); if (itr->curr_stmt != NULL) { *ret = itr->curr_stmt; tuple_ref(itr->curr_stmt); vy_stmt_counter_acct_tuple(&itr->cache->stat.get, itr->curr_stmt); } } void vy_cache_iterator_skip(struct vy_cache_iterator *itr, const struct tuple *last_stmt, struct tuple **ret, bool *stop) { *ret = NULL; *stop = false; assert(!itr->search_started || itr->version == itr->cache->version); /* * Check if the iterator is already positioned * at the statement following last_stmt. */ if (itr->search_started && (itr->curr_stmt == NULL || last_stmt == NULL || iterator_direction(itr->iterator_type) * vy_tuple_compare(itr->curr_stmt, last_stmt, itr->cache->cmp_def) > 0)) { if (itr->curr_stmt == NULL) return; struct vy_cache_tree *tree = &itr->cache->cache_tree; struct vy_cache_entry *entry = *vy_cache_tree_iterator_get_elem(tree, &itr->curr_pos); *ret = itr->curr_stmt; *stop = vy_cache_iterator_is_stop(itr, entry); return; } itr->search_started = true; itr->version = itr->cache->version; if (itr->curr_stmt != NULL) tuple_unref(itr->curr_stmt); itr->curr_stmt = NULL; const struct tuple *key = itr->key; enum iterator_type iterator_type = itr->iterator_type; if (last_stmt != NULL) { key = last_stmt; iterator_type = iterator_direction(iterator_type) > 0 ? ITER_GT : ITER_LT; } struct vy_cache_entry *entry; vy_cache_iterator_seek(itr, iterator_type, key, &entry); if (itr->iterator_type == ITER_EQ && last_stmt != NULL && entry != NULL && vy_stmt_compare(itr->key, entry->stmt, itr->cache->cmp_def) != 0) entry = NULL; if (entry != NULL) { *stop = vy_cache_iterator_is_stop(itr, entry); itr->curr_stmt = entry->stmt; } vy_cache_iterator_skip_to_read_view(itr, stop); if (itr->curr_stmt != NULL) { *ret = itr->curr_stmt; tuple_ref(itr->curr_stmt); vy_stmt_counter_acct_tuple(&itr->cache->stat.get, itr->curr_stmt); } } int vy_cache_iterator_restore(struct vy_cache_iterator *itr, const struct tuple *last_stmt, struct tuple **ret, bool *stop) { struct key_def *def = itr->cache->cmp_def; int dir = iterator_direction(itr->iterator_type); if (!itr->search_started || itr->version == itr->cache->version) return 0; itr->version = itr->cache->version; struct tuple *prev_stmt = itr->curr_stmt; if (prev_stmt != NULL) tuple_unref(prev_stmt); const struct tuple *key = itr->key; enum iterator_type iterator_type = itr->iterator_type; if (last_stmt != NULL) { key = last_stmt; iterator_type = dir > 0 ? ITER_GT : ITER_LT; } if ((prev_stmt == NULL && itr->iterator_type == ITER_EQ) || (prev_stmt != NULL && prev_stmt != vy_cache_iterator_curr_stmt(itr))) { /* * EQ search ended or the iterator was invalidated. * In either case the best we can do is restart the * search. */ struct vy_cache_entry *entry; vy_cache_iterator_seek(itr, iterator_type, key, &entry); itr->curr_stmt = NULL; if (entry != NULL && itr->iterator_type == ITER_EQ && vy_stmt_compare(itr->key, entry->stmt, def) != 0) entry = NULL; if (entry != NULL) { *stop = vy_cache_iterator_is_stop(itr, entry); itr->curr_stmt = entry->stmt; } vy_cache_iterator_skip_to_read_view(itr, stop); } else { /* * The iterator position is still valid, but new * statements may have appeared between last_stmt * and the current statement. Reposition to the * statement closiest to last_stmt. */ struct vy_cache_tree *tree = &itr->cache->cache_tree; struct vy_cache_tree_iterator pos = itr->curr_pos; bool key_belongs = (iterator_type == ITER_EQ || iterator_type == ITER_GE || iterator_type == ITER_LE); if (prev_stmt == NULL) pos = vy_cache_tree_invalid_iterator(); while (true) { if (dir > 0) vy_cache_tree_iterator_prev(tree, &pos); else vy_cache_tree_iterator_next(tree, &pos); if (vy_cache_tree_iterator_is_invalid(&pos)) break; struct vy_cache_entry *entry = *vy_cache_tree_iterator_get_elem(tree, &pos); int cmp = dir * vy_stmt_compare(entry->stmt, key, def); if (cmp < 0 || (cmp == 0 && !key_belongs)) break; if (vy_stmt_lsn(entry->stmt) <= (**itr->read_view).vlsn) { itr->curr_pos = pos; itr->curr_stmt = entry->stmt; *stop = vy_cache_iterator_is_stop(itr, entry); } if (cmp == 0) break; } } *ret = itr->curr_stmt; if (itr->curr_stmt != NULL) { tuple_ref(itr->curr_stmt); vy_stmt_counter_acct_tuple(&itr->cache->stat.get, itr->curr_stmt); return prev_stmt != itr->curr_stmt; } return 0; } void vy_cache_iterator_close(struct vy_cache_iterator *itr) { if (itr->curr_stmt != NULL) { tuple_unref(itr->curr_stmt); itr->curr_stmt = NULL; } TRASH(itr); } void vy_cache_iterator_open(struct vy_cache_iterator *itr, struct vy_cache *cache, enum iterator_type iterator_type, const struct tuple *key, const struct vy_read_view **rv) { itr->cache = cache; itr->iterator_type = iterator_type; itr->key = key; itr->read_view = rv; itr->curr_stmt = NULL; itr->curr_pos = vy_cache_tree_invalid_iterator(); itr->version = 0; itr->search_started = false; } tarantool_1.9.1.26.g63eb81e3c/src/box/alter.h0000664000000000000000000000422013306565107017051 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_ALTER_H #define INCLUDES_TARANTOOL_BOX_ALTER_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trigger.h" extern struct trigger alter_space_on_replace_space; extern struct trigger alter_space_on_replace_index; extern struct trigger on_replace_truncate; extern struct trigger on_replace_schema; extern struct trigger on_replace_user; extern struct trigger on_replace_func; extern struct trigger on_replace_collation; extern struct trigger on_replace_priv; extern struct trigger on_replace_cluster; extern struct trigger on_replace_sequence; extern struct trigger on_replace_sequence_data; extern struct trigger on_replace_space_sequence; extern struct trigger on_stmt_begin_space; extern struct trigger on_stmt_begin_index; extern struct trigger on_stmt_begin_truncate; #endif /* INCLUDES_TARANTOOL_BOX_ALTER_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/box.h0000664000000000000000000002553313306565107016544 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_H #define INCLUDES_TARANTOOL_BOX_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /* * Box - data storage (spaces, indexes) and query * processor (INSERT, UPDATE, DELETE, SELECT, Lua) * subsystem of Tarantool. */ struct port; struct request; struct xrow_header; struct obuf; struct ev_io; struct auth_request; /* * Initialize box library * @throws C++ exception */ void box_init(void); /** * Cleanup box library */ void box_free(void); /** * Load configuration for box library. * Panics on error. */ void box_cfg(void); /** * Return true if box has been configured, i.e. box_cfg() was called. */ bool box_is_configured(void); /** * A pthread_atfork() callback for box */ void box_atfork(void); void box_set_ro(bool ro); bool box_is_ro(void); /** * Wait until the instance switches to a desired mode. * \param ro wait read-only if set or read-write if unset * \param timeout max time to wait * \retval -1 timeout or fiber is cancelled * \retval 0 success */ int box_wait_ro(bool ro, double timeout); /** * Switch this instance from 'orphan' to 'running' state. * Called on initial configuration as soon as this instance * synchronizes with enough replicas to form a quorum. */ void box_clear_orphan(void); /** True if snapshot is in progress. */ extern bool box_checkpoint_is_in_progress; /** Incremented with each next snapshot. */ extern uint32_t snapshot_version; /** * Iterate over all spaces and save them to the * snapshot file. */ int box_checkpoint(void); typedef int (*box_backup_cb)(const char *path, void *arg); /** * Start a backup. This function calls @cb for each file that * needs to be backed up to recover from the last checkpoint. * The caller is supposed to call box_backup_stop() after he's * done copying the files. */ int box_backup_start(box_backup_cb cb, void *cb_arg); /** * Finish backup started with box_backup_start(). */ void box_backup_stop(void); /** * Spit out some basic module status (master/slave, etc. */ const char *box_status(void); #if defined(__cplusplus) } /* extern "C" */ void box_process_auth(struct auth_request *request); void box_process_join(struct ev_io *io, struct xrow_header *header); void box_process_subscribe(struct ev_io *io, struct xrow_header *header); /** * Check Lua configuration before initialization or * in case of a configuration change. */ void box_check_config(); void box_bind(void); void box_listen(void); void box_set_replication(void); void box_set_log_level(void); void box_set_log_format(void); void box_set_io_collect_interval(void); void box_set_snap_io_rate_limit(void); void box_set_too_long_threshold(void); void box_set_readahead(void); void box_set_checkpoint_count(void); void box_set_memtx_max_tuple_size(void); void box_set_vinyl_max_tuple_size(void); void box_set_vinyl_cache(void); void box_set_vinyl_timeout(void); void box_set_replication_timeout(void); void box_set_replication_connect_timeout(void); void box_set_replication_connect_quorum(void); extern "C" { #endif /* defined(__cplusplus) */ typedef struct tuple box_tuple_t; /* box_select is private and used only by FFI */ API_EXPORT int box_select(uint32_t space_id, uint32_t index_id, int iterator, uint32_t offset, uint32_t limit, const char *key, const char *key_end, struct port *port); /** \cond public */ /* * Opaque structure passed to the stored C procedure */ typedef struct box_function_ctx box_function_ctx_t; /** * Return a tuple from stored C procedure. * * Returned tuple is automatically reference counted by Tarantool. * * \param ctx an opaque structure passed to the stored C procedure by * Tarantool * \param tuple a tuple to return * \retval -1 on error (perhaps, out of memory; check box_error_last()) * \retval 0 otherwise */ API_EXPORT int box_return_tuple(box_function_ctx_t *ctx, box_tuple_t *tuple); /** * Find space id by name. * * This function performs SELECT request to _vspace system space. * \param name space name * \param len length of \a name * \retval BOX_ID_NIL on error or if not found (check box_error_last()) * \retval space_id otherwise * \sa box_index_id_by_name */ API_EXPORT uint32_t box_space_id_by_name(const char *name, uint32_t len); /** * Find index id by name. * * This function performs SELECT request to _vindex system space. * \param space_id space identifier * \param name index name * \param len length of \a name * \retval BOX_ID_NIL on error or if not found (check box_error_last()) * \retval index_id otherwise * \sa box_space_id_by_name */ API_EXPORT uint32_t box_index_id_by_name(uint32_t space_id, const char *name, uint32_t len); /** * Execute an INSERT request. * * \param space_id space identifier * \param tuple encoded tuple in MsgPack Array format ([ field1, field2, ...]) * \param tuple_end end of @a tuple * \param[out] result a new tuple. Can be set to NULL to discard result. * \retval -1 on error (check box_error_last()) * \retval 0 on success * \sa \code box.space[space_id]:insert(tuple) \endcode */ API_EXPORT int box_insert(uint32_t space_id, const char *tuple, const char *tuple_end, box_tuple_t **result); /** * Execute an REPLACE request. * * \param space_id space identifier * \param tuple encoded tuple in MsgPack Array format ([ field1, field2, ...]) * \param tuple_end end of @a tuple * \param[out] result a new tuple. Can be set to NULL to discard result. * \retval -1 on error (check box_error_last()) * \retval 0 on success * \sa \code box.space[space_id]:replace(tuple) \endcode */ API_EXPORT int box_replace(uint32_t space_id, const char *tuple, const char *tuple_end, box_tuple_t **result); /** * Execute an DELETE request. * * \param space_id space identifier * \param index_id index identifier * \param key encoded key in MsgPack Array format ([part1, part2, ...]). * \param key_end the end of encoded \a key. * \param[out] result an old tuple. Can be set to NULL to discard result. * \retval -1 on error (check box_error_last()) * \retval 0 on success * \sa \code box.space[space_id].index[index_id]:delete(key) \endcode */ API_EXPORT int box_delete(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result); /** * Execute an UPDATE request. * * \param space_id space identifier * \param index_id index identifier * \param key encoded key in MsgPack Array format ([part1, part2, ...]). * \param key_end the end of encoded \a key. * \param ops encoded operations in MsgPack Arrat format, e.g. * [ [ '=', fieldno, value ], ['!', 2, 'xxx'] ] * \param ops_end the end of encoded \a ops * \param index_base 0 if fieldnos in update operations are zero-based * indexed (like C) or 1 if for one-based indexed field ids (like Lua). * \param[out] result a new tuple. Can be set to NULL to discard result. * \retval -1 on error (check box_error_last()) * \retval 0 on success * \sa \code box.space[space_id].index[index_id]:update(key, ops) \endcode * \sa box_upsert() */ API_EXPORT int box_update(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, const char *ops, const char *ops_end, int index_base, box_tuple_t **result); /** * Execute an UPSERT request. * * \param space_id space identifier * \param index_id index identifier * \param ops encoded operations in MsgPack Arrat format, e.g. * [ [ '=', fieldno, value ], ['!', 2, 'xxx'] ] * \param ops_end the end of encoded \a ops * \param tuple encoded tuple in MsgPack Array format ([ field1, field2, ...]) * \param tuple_end end of @a tuple * \param index_base 0 if fieldnos in update operations are zero-based * indexed (like C) or 1 if for one-based indexed field ids (like Lua). * \param[out] result a new tuple. Can be set to NULL to discard result. * \retval -1 on error (check box_error_last()) * \retval 0 on success * \sa \code box.space[space_id].index[index_id]:update(key, ops) \endcode * \sa box_update() */ API_EXPORT int box_upsert(uint32_t space_id, uint32_t index_id, const char *tuple, const char *tuple_end, const char *ops, const char *ops_end, int index_base, box_tuple_t **result); /** * Truncate space. * * \param space_id space identifier */ API_EXPORT int box_truncate(uint32_t space_id); /** * Advance a sequence. * * \param seq_id sequence identifier * \param[out] result pointer to a variable where the next sequence * value will be stored on success * \retval -1 on error (check box_error_last()) * \retval 0 on success */ API_EXPORT int box_sequence_next(uint32_t seq_id, int64_t *result); /** * Set a sequence value. * * \param seq_id sequence identifier * \param value new sequence value; on success the next call to * box_sequence_next() will return the value following \a value * \retval -1 on error (check box_error_last()) * \retval 0 on success */ API_EXPORT int box_sequence_set(uint32_t seq_id, int64_t value); /** * Reset a sequence. * * \param seq_id sequence identifier * \retval -1 on error (check box_error_last()) * \retval 0 on success */ API_EXPORT int box_sequence_reset(uint32_t seq_id); /** \endcond public */ /** * The main entry point to the * Box: callbacks into the request processor. * These are function pointers since they can * change when entering/leaving read-only mode * (master->slave propagation). */ int box_process1(struct request *request, box_tuple_t **result); int boxk(int type, uint32_t space_id, const char *format, ...); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_scheduler.c0000664000000000000000000013504013306565107020436 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_scheduler.h" #include #include #include #include #include #include #include #include #include #include "checkpoint.h" #include "diag.h" #include "errcode.h" #include "errinj.h" #include "fiber.h" #include "fiber_cond.h" #include "salad/stailq.h" #include "say.h" #include "vy_index.h" #include "vy_log.h" #include "vy_mem.h" #include "vy_range.h" #include "vy_run.h" #include "vy_write_iterator.h" #include "trivia/util.h" #include "tt_pthread.h" /** * Yield after iterating over this many objects (e.g. ranges). * Yield more often in debug mode. */ #if defined(NDEBUG) enum { VY_YIELD_LOOPS = 128 }; #else enum { VY_YIELD_LOOPS = 2 }; #endif /* Min and max values for vy_scheduler::timeout. */ #define VY_SCHEDULER_TIMEOUT_MIN 1 #define VY_SCHEDULER_TIMEOUT_MAX 60 static void *vy_worker_f(void *); static int vy_scheduler_f(va_list); struct vy_task; struct vy_task_ops { /** * This function is called from a worker. It is supposed to do work * which is too heavy for the tx thread (like IO or compression). * Returns 0 on success. On failure returns -1 and sets diag. */ int (*execute)(struct vy_scheduler *scheduler, struct vy_task *task); /** * This function is called by the scheduler upon task completion. * It may be used to finish the task from the tx thread context. * * Returns 0 on success. On failure returns -1 and sets diag. */ int (*complete)(struct vy_scheduler *scheduler, struct vy_task *task); /** * This function is called by the scheduler if either ->execute * or ->complete failed. It may be used to undo changes done to * the index when preparing the task. * * If @in_shutdown is set, the callback is invoked from the * engine destructor. */ void (*abort)(struct vy_scheduler *scheduler, struct vy_task *task, bool in_shutdown); }; struct vy_task { const struct vy_task_ops *ops; /** Return code of ->execute. */ int status; /** If ->execute fails, the error is stored here. */ struct diag diag; /** Index this task is for. */ struct vy_index *index; /** * Copies of index->key/cmp_def to protect from * multithread read/write on alter. */ struct key_def *cmp_def; struct key_def *key_def; /** Range to compact. */ struct vy_range *range; /** Run written by this task. */ struct vy_run *new_run; /** Write iterator producing statements for the new run. */ struct vy_stmt_stream *wi; /** * First (newest) and last (oldest) slices to compact. * * While a compaction task is in progress, a new slice * can be added to a range by concurrent dump, so we * need to remember the slices we are compacting. */ struct vy_slice *first_slice, *last_slice; /** * Link in the list of pending or processed tasks. * See vy_scheduler::input_queue, output_queue. */ struct stailq_entry link; /** * An estimate of the maximal number of statements that * can be written by the task. Used to create a bloom * filter of the perfect size. */ size_t max_output_count; /** * Index options may be modified while a task is in * progress so we save them here to safely access them * from another thread. */ double bloom_fpr; int64_t page_size; }; /** * Allocate a new task to be executed by a worker thread. * When preparing an asynchronous task, this function must * be called before yielding the current fiber in order to * pin the index the task is for so that a concurrent fiber * does not free it from under us. */ static struct vy_task * vy_task_new(struct mempool *pool, struct vy_index *index, const struct vy_task_ops *ops) { struct vy_task *task = mempool_alloc(pool); if (task == NULL) { diag_set(OutOfMemory, sizeof(*task), "mempool", "struct vy_task"); return NULL; } memset(task, 0, sizeof(*task)); task->ops = ops; task->index = index; task->cmp_def = key_def_dup(index->cmp_def); if (task->cmp_def == NULL) { mempool_free(pool, task); return NULL; } task->key_def = key_def_dup(index->key_def); if (task->key_def == NULL) { key_def_delete(task->cmp_def); mempool_free(pool, task); return NULL; } vy_index_ref(index); diag_create(&task->diag); return task; } /** Free a task allocated with vy_task_new(). */ static void vy_task_delete(struct mempool *pool, struct vy_task *task) { key_def_delete(task->cmp_def); key_def_delete(task->key_def); vy_index_unref(task->index); diag_destroy(&task->diag); TRASH(task); mempool_free(pool, task); } static bool vy_dump_heap_less(struct heap_node *a, struct heap_node *b) { struct vy_index *i1 = container_of(a, struct vy_index, in_dump); struct vy_index *i2 = container_of(b, struct vy_index, in_dump); /* * Indexes that are currently being dumped or can't be scheduled * for dump right now are moved off the top of the heap. */ if (i1->is_dumping != i2->is_dumping) return i1->is_dumping < i2->is_dumping; if (i1->pin_count != i2->pin_count) return i1->pin_count < i2->pin_count; /* Older indexes are dumped first. */ int64_t i1_generation = vy_index_generation(i1); int64_t i2_generation = vy_index_generation(i2); if (i1_generation != i2_generation) return i1_generation < i2_generation; /* * If a space has more than one index, appending a statement * to it requires reading the primary index to get the old * tuple and delete it from secondary indexes. This means that * on local recovery from WAL, the primary index must not be * ahead of secondary indexes of the same space, i.e. it must * be dumped last. */ return i1->id > i2->id; } #define HEAP_NAME vy_dump_heap #define HEAP_LESS(h, l, r) vy_dump_heap_less(l, r) #include "salad/heap.h" #undef HEAP_LESS #undef HEAP_NAME static bool vy_compact_heap_less(struct heap_node *a, struct heap_node *b) { struct vy_index *i1 = container_of(a, struct vy_index, in_compact); struct vy_index *i2 = container_of(b, struct vy_index, in_compact); /* * Prefer indexes whose read amplification will be reduced * most as a result of compaction. */ return vy_index_compact_priority(i1) > vy_index_compact_priority(i2); } #define HEAP_NAME vy_compact_heap #define HEAP_LESS(h, l, r) vy_compact_heap_less(l, r) #include "salad/heap.h" #undef HEAP_LESS #undef HEAP_NAME static void vy_scheduler_async_cb(ev_loop *loop, struct ev_async *watcher, int events) { (void)loop; (void)events; struct vy_scheduler *scheduler = container_of(watcher, struct vy_scheduler, scheduler_async); fiber_cond_signal(&scheduler->scheduler_cond); } static void vy_scheduler_start_workers(struct vy_scheduler *scheduler) { assert(!scheduler->is_worker_pool_running); /* One thread is reserved for dumps, see vy_schedule(). */ assert(scheduler->worker_pool_size >= 2); scheduler->is_worker_pool_running = true; scheduler->workers_available = scheduler->worker_pool_size; scheduler->worker_pool = calloc(scheduler->worker_pool_size, sizeof(struct cord)); if (scheduler->worker_pool == NULL) panic("failed to allocate vinyl worker pool"); ev_async_start(scheduler->scheduler_loop, &scheduler->scheduler_async); for (int i = 0; i < scheduler->worker_pool_size; i++) { char name[FIBER_NAME_MAX]; snprintf(name, sizeof(name), "vinyl.writer.%d", i); if (cord_start(&scheduler->worker_pool[i], name, vy_worker_f, scheduler) != 0) panic("failed to start vinyl worker thread"); } } static void vy_scheduler_stop_workers(struct vy_scheduler *scheduler) { struct stailq task_queue; stailq_create(&task_queue); assert(scheduler->is_worker_pool_running); scheduler->is_worker_pool_running = false; /* Clear the input queue and wake up worker threads. */ tt_pthread_mutex_lock(&scheduler->mutex); stailq_concat(&task_queue, &scheduler->input_queue); pthread_cond_broadcast(&scheduler->worker_cond); tt_pthread_mutex_unlock(&scheduler->mutex); /* Wait for worker threads to exit. */ for (int i = 0; i < scheduler->worker_pool_size; i++) cord_join(&scheduler->worker_pool[i]); ev_async_stop(scheduler->scheduler_loop, &scheduler->scheduler_async); free(scheduler->worker_pool); scheduler->worker_pool = NULL; /* Abort all pending tasks. */ struct vy_task *task, *next; stailq_concat(&task_queue, &scheduler->output_queue); stailq_foreach_entry_safe(task, next, &task_queue, link) { if (task->ops->abort != NULL) task->ops->abort(scheduler, task, true); vy_task_delete(&scheduler->task_pool, task); } } void vy_scheduler_create(struct vy_scheduler *scheduler, int write_threads, vy_scheduler_dump_complete_f dump_complete_cb, struct vy_run_env *run_env, struct rlist *read_views) { memset(scheduler, 0, sizeof(*scheduler)); scheduler->dump_complete_cb = dump_complete_cb; scheduler->read_views = read_views; scheduler->run_env = run_env; scheduler->scheduler_fiber = fiber_new("vinyl.scheduler", vy_scheduler_f); if (scheduler->scheduler_fiber == NULL) panic("failed to allocate vinyl scheduler fiber"); scheduler->scheduler_loop = loop(); fiber_cond_create(&scheduler->scheduler_cond); ev_async_init(&scheduler->scheduler_async, vy_scheduler_async_cb); scheduler->worker_pool_size = write_threads; mempool_create(&scheduler->task_pool, cord_slab_cache(), sizeof(struct vy_task)); stailq_create(&scheduler->input_queue); stailq_create(&scheduler->output_queue); tt_pthread_cond_init(&scheduler->worker_cond, NULL); tt_pthread_mutex_init(&scheduler->mutex, NULL); vy_dump_heap_create(&scheduler->dump_heap); vy_compact_heap_create(&scheduler->compact_heap); diag_create(&scheduler->diag); fiber_cond_create(&scheduler->dump_cond); fiber_start(scheduler->scheduler_fiber, scheduler); } void vy_scheduler_destroy(struct vy_scheduler *scheduler) { /* Stop scheduler fiber. */ scheduler->scheduler_fiber = NULL; /* Sic: fiber_cancel() can't be used here. */ fiber_cond_signal(&scheduler->dump_cond); fiber_cond_signal(&scheduler->scheduler_cond); if (scheduler->is_worker_pool_running) vy_scheduler_stop_workers(scheduler); tt_pthread_cond_destroy(&scheduler->worker_cond); tt_pthread_mutex_destroy(&scheduler->mutex); diag_destroy(&scheduler->diag); mempool_destroy(&scheduler->task_pool); fiber_cond_destroy(&scheduler->dump_cond); fiber_cond_destroy(&scheduler->scheduler_cond); vy_dump_heap_destroy(&scheduler->dump_heap); vy_compact_heap_destroy(&scheduler->compact_heap); TRASH(scheduler); } void vy_scheduler_add_index(struct vy_scheduler *scheduler, struct vy_index *index) { assert(index->in_dump.pos == UINT32_MAX); assert(index->in_compact.pos == UINT32_MAX); vy_dump_heap_insert(&scheduler->dump_heap, &index->in_dump); vy_compact_heap_insert(&scheduler->compact_heap, &index->in_compact); } void vy_scheduler_remove_index(struct vy_scheduler *scheduler, struct vy_index *index) { assert(index->in_dump.pos != UINT32_MAX); assert(index->in_compact.pos != UINT32_MAX); vy_dump_heap_delete(&scheduler->dump_heap, &index->in_dump); vy_compact_heap_delete(&scheduler->compact_heap, &index->in_compact); index->in_dump.pos = UINT32_MAX; index->in_compact.pos = UINT32_MAX; } static void vy_scheduler_update_index(struct vy_scheduler *scheduler, struct vy_index *index) { if (index->is_dropped) { /* Dropped indexes are exempted from scheduling. */ assert(index->in_dump.pos == UINT32_MAX); assert(index->in_compact.pos == UINT32_MAX); return; } assert(index->in_dump.pos != UINT32_MAX); assert(index->in_compact.pos != UINT32_MAX); vy_dump_heap_update(&scheduler->dump_heap, &index->in_dump); vy_compact_heap_update(&scheduler->compact_heap, &index->in_compact); } static void vy_scheduler_pin_index(struct vy_scheduler *scheduler, struct vy_index *index) { assert(!index->is_dumping); if (index->pin_count++ == 0) vy_scheduler_update_index(scheduler, index); } static void vy_scheduler_unpin_index(struct vy_scheduler *scheduler, struct vy_index *index) { assert(!index->is_dumping); assert(index->pin_count > 0); if (--index->pin_count == 0) vy_scheduler_update_index(scheduler, index); } void vy_scheduler_trigger_dump(struct vy_scheduler *scheduler) { assert(scheduler->dump_generation <= scheduler->generation); if (scheduler->dump_generation < scheduler->generation) { /* Dump is already in progress, nothing to do. */ return; } if (scheduler->checkpoint_in_progress) { /* * Do not trigger another dump until checkpoint * is complete so as to make sure no statements * inserted after WAL rotation are written to * the snapshot. */ scheduler->dump_pending = true; return; } scheduler->dump_start = ev_monotonic_now(loop()); scheduler->generation++; scheduler->dump_pending = false; fiber_cond_signal(&scheduler->scheduler_cond); } /** * Check whether the current dump round is complete. * If it is, free memory and proceed to the next dump round. */ static void vy_scheduler_complete_dump(struct vy_scheduler *scheduler) { assert(scheduler->dump_generation < scheduler->generation); if (scheduler->dump_task_count > 0) { /* * There are still dump tasks in progress, * the dump round can't be over yet. */ return; } int64_t min_generation = scheduler->generation; struct heap_node *pn = vy_dump_heap_top(&scheduler->dump_heap); if (pn != NULL) { struct vy_index *index; index = container_of(pn, struct vy_index, in_dump); min_generation = vy_index_generation(index); } if (min_generation == scheduler->dump_generation) { /* * There are still indexes that must be dumped * during the current dump round. */ return; } /* * The oldest index data is newer than @dump_generation, * so the current dump round has been finished. Notify * about dump completion. */ double now = ev_monotonic_now(loop()); double dump_duration = now - scheduler->dump_start; scheduler->dump_start = now; scheduler->dump_generation = min_generation; scheduler->dump_complete_cb(scheduler, min_generation - 1, dump_duration); fiber_cond_signal(&scheduler->dump_cond); } int vy_scheduler_begin_checkpoint(struct vy_scheduler *scheduler) { assert(!scheduler->checkpoint_in_progress); /* * If the scheduler is throttled due to errors, do not wait * until it wakes up as it may take quite a while. Instead * fail checkpoint immediately with the last error seen by * the scheduler. */ if (scheduler->is_throttled) { struct error *e = diag_last_error(&scheduler->diag); diag_add_error(diag_get(), e); say_error("cannot checkpoint vinyl, " "scheduler is throttled with: %s", e->errmsg); return -1; } assert(scheduler->dump_generation <= scheduler->generation); if (scheduler->generation == scheduler->dump_generation) { /* * We are about to start a new dump round. * Remember the current time so that we can update * dump bandwidth when the dump round is complete * (see vy_scheduler_complete_dump()). */ scheduler->dump_start = ev_monotonic_now(loop()); } scheduler->generation++; scheduler->checkpoint_in_progress = true; fiber_cond_signal(&scheduler->scheduler_cond); say_info("vinyl checkpoint started"); return 0; } int vy_scheduler_wait_checkpoint(struct vy_scheduler *scheduler) { if (!scheduler->checkpoint_in_progress) return 0; /* * Wait until all in-memory trees created before * checkpoint started have been dumped. */ while (scheduler->dump_generation < scheduler->generation) { if (scheduler->is_throttled) { /* A dump error occurred, abort checkpoint. */ struct error *e = diag_last_error(&scheduler->diag); diag_add_error(diag_get(), e); say_error("vinyl checkpoint failed: %s", e->errmsg); return -1; } fiber_cond_wait(&scheduler->dump_cond); } say_info("vinyl checkpoint completed"); return 0; } void vy_scheduler_end_checkpoint(struct vy_scheduler *scheduler) { if (!scheduler->checkpoint_in_progress) return; scheduler->checkpoint_in_progress = false; if (scheduler->dump_pending) { /* * Dump was triggered while checkpoint was * in progress and hence it was postponed. * Schedule it now. */ vy_scheduler_trigger_dump(scheduler); } } /** * Allocate a new run for an index and write the information * about it to the metadata log so that we could still find * and delete it in case a write error occured. This function * is called from dump/compaction task constructor. */ static struct vy_run * vy_run_prepare(struct vy_run_env *run_env, struct vy_index *index) { struct vy_run *run = vy_run_new(run_env, vy_log_next_id()); if (run == NULL) return NULL; vy_log_tx_begin(); vy_log_prepare_run(index->commit_lsn, run->id); if (vy_log_tx_commit() < 0) { vy_run_unref(run); return NULL; } return run; } /** * Free an incomplete run and write a record to the metadata * log indicating that the run is not needed any more. * This function is called on dump/compaction task abort. */ static void vy_run_discard(struct vy_run *run) { int64_t run_id = run->id; vy_run_unref(run); ERROR_INJECT(ERRINJ_VY_RUN_DISCARD, {say_error("error injection: run %lld not discarded", (long long)run_id); return;}); vy_log_tx_begin(); /* * The run hasn't been used and can be deleted right away * so set gc_lsn to minimal possible (0). */ vy_log_drop_run(run_id, 0); /* * Leave the record in the vylog buffer on disk error. * If we fail to flush it before restart, we will delete * the run file upon recovery completion. */ vy_log_tx_try_commit(); } static int vy_task_write_run(struct vy_scheduler *scheduler, struct vy_task *task) { struct vy_index *index = task->index; struct vy_stmt_stream *wi = task->wi; ERROR_INJECT(ERRINJ_VY_RUN_WRITE, {diag_set(ClientError, ER_INJECTION, "vinyl dump"); return -1;}); struct errinj *inj = errinj(ERRINJ_VY_RUN_WRITE_TIMEOUT, ERRINJ_DOUBLE); if (inj != NULL && inj->dparam > 0) usleep(inj->dparam * 1000000); struct vy_run_writer writer; if (vy_run_writer_create(&writer, task->new_run, index->env->path, index->space_id, index->id, task->cmp_def, task->key_def, task->page_size, task->bloom_fpr, task->max_output_count) != 0) goto fail; if (wi->iface->start(wi) != 0) goto fail_abort_writer; int rc; struct tuple *stmt = NULL; while ((rc = wi->iface->next(wi, &stmt)) == 0 && stmt != NULL) { inj = errinj(ERRINJ_VY_RUN_WRITE_STMT_TIMEOUT, ERRINJ_DOUBLE); if (inj != NULL && inj->dparam > 0) usleep(inj->dparam * 1000000); rc = vy_run_writer_append_stmt(&writer, stmt); if (rc != 0) break; if (!scheduler->is_worker_pool_running) { diag_set(FiberIsCancelled); rc = -1; break; } } wi->iface->stop(wi); if (rc == 0) rc = vy_run_writer_commit(&writer); if (rc != 0) goto fail_abort_writer; return 0; fail_abort_writer: vy_run_writer_abort(&writer); fail: return -1; } static int vy_task_dump_execute(struct vy_scheduler *scheduler, struct vy_task *task) { return vy_task_write_run(scheduler, task); } static int vy_task_dump_complete(struct vy_scheduler *scheduler, struct vy_task *task) { struct vy_index *index = task->index; struct vy_run *new_run = task->new_run; int64_t dump_lsn = new_run->dump_lsn; struct tuple_format *key_format = index->env->key_format; struct vy_mem *mem, *next_mem; struct vy_slice **new_slices, *slice; struct vy_range *range, *begin_range, *end_range; struct tuple *min_key, *max_key; int i, loops = 0; assert(index->is_dumping); if (vy_run_is_empty(new_run)) { /* * In case the run is empty, we can discard the run * and delete dumped in-memory trees right away w/o * inserting slices into ranges. However, we need * to log index dump anyway. */ vy_log_tx_begin(); vy_log_dump_index(index->commit_lsn, dump_lsn); if (vy_log_tx_commit() < 0) goto fail; vy_run_discard(new_run); goto delete_mems; } assert(new_run->info.min_lsn > index->dump_lsn); assert(new_run->info.max_lsn <= dump_lsn); /* * Figure out which ranges intersect the new run. * @begin_range is the first range intersecting the run. * @end_range is the range following the last range * intersecting the run or NULL if the run itersects all * ranges. */ min_key = vy_key_from_msgpack(key_format, new_run->info.min_key); if (min_key == NULL) goto fail; max_key = vy_key_from_msgpack(key_format, new_run->info.max_key); if (max_key == NULL) { tuple_unref(min_key); goto fail; } begin_range = vy_range_tree_psearch(index->tree, min_key); end_range = vy_range_tree_psearch(index->tree, max_key); /* * If min_key == max_key, the slice has to span over at * least one range. */ end_range = vy_range_tree_next(index->tree, end_range); tuple_unref(min_key); tuple_unref(max_key); /* * For each intersected range allocate a slice of the new run. */ new_slices = calloc(index->range_count, sizeof(*new_slices)); if (new_slices == NULL) { diag_set(OutOfMemory, index->range_count * sizeof(*new_slices), "malloc", "struct vy_slice *"); goto fail; } for (range = begin_range, i = 0; range != end_range; range = vy_range_tree_next(index->tree, range), i++) { slice = vy_slice_new(vy_log_next_id(), new_run, range->begin, range->end, index->cmp_def); if (slice == NULL) goto fail_free_slices; assert(i < index->range_count); new_slices[i] = slice; /* * It's OK to yield here for the range tree can only * be changed from the scheduler fiber. */ if (++loops % VY_YIELD_LOOPS == 0) fiber_sleep(0); } /* * Log change in metadata. */ vy_log_tx_begin(); vy_log_create_run(index->commit_lsn, new_run->id, dump_lsn); for (range = begin_range, i = 0; range != end_range; range = vy_range_tree_next(index->tree, range), i++) { assert(i < index->range_count); slice = new_slices[i]; vy_log_insert_slice(range->id, new_run->id, slice->id, tuple_data_or_null(slice->begin), tuple_data_or_null(slice->end)); if (++loops % VY_YIELD_LOOPS == 0) fiber_sleep(0); /* see comment above */ } vy_log_dump_index(index->commit_lsn, dump_lsn); if (vy_log_tx_commit() < 0) goto fail_free_slices; /* * Account the new run. */ vy_index_add_run(index, new_run); vy_stmt_counter_add_disk(&index->stat.disk.dump.out, &new_run->count); /* Drop the reference held by the task. */ vy_run_unref(new_run); /* * Add new slices to ranges. */ for (range = begin_range, i = 0; range != end_range; range = vy_range_tree_next(index->tree, range), i++) { assert(i < index->range_count); slice = new_slices[i]; vy_index_unacct_range(index, range); vy_range_add_slice(range, slice); vy_index_acct_range(index, range); vy_range_update_compact_priority(range, &index->opts); if (!vy_range_is_scheduled(range)) vy_range_heap_update(&index->range_heap, &range->heap_node); range->version++; /* * If we yield here, a concurrent fiber will see * a range with a run slice containing statements * present in the in-memory trees of the index. * This is OK, because read iterator won't use the * new run slice until index->dump_lsn is bumped, * which is only done after in-memory trees are * removed (see vy_read_iterator_add_disk()). */ if (++loops % VY_YIELD_LOOPS == 0) fiber_sleep(0); } free(new_slices); delete_mems: /* * Delete dumped in-memory trees. */ rlist_foreach_entry_safe(mem, &index->sealed, in_sealed, next_mem) { if (mem->generation > scheduler->dump_generation) continue; vy_stmt_counter_add(&index->stat.disk.dump.in, &mem->count); vy_index_delete_mem(index, mem); } index->dump_lsn = dump_lsn; index->stat.disk.dump.count++; /* The iterator has been cleaned up in a worker thread. */ task->wi->iface->close(task->wi); index->is_dumping = false; vy_scheduler_update_index(scheduler, index); if (index->id != 0) vy_scheduler_unpin_index(scheduler, index->pk); assert(scheduler->dump_task_count > 0); scheduler->dump_task_count--; say_info("%s: dump completed", vy_index_name(index)); vy_scheduler_complete_dump(scheduler); return 0; fail_free_slices: for (i = 0; i < index->range_count; i++) { slice = new_slices[i]; if (slice != NULL) vy_slice_delete(slice); if (++loops % VY_YIELD_LOOPS == 0) fiber_sleep(0); } free(new_slices); fail: return -1; } static void vy_task_dump_abort(struct vy_scheduler *scheduler, struct vy_task *task, bool in_shutdown) { struct vy_index *index = task->index; assert(index->is_dumping); /* The iterator has been cleaned up in a worker thread. */ task->wi->iface->close(task->wi); /* * It's no use alerting the user if the server is * shutting down or the index was dropped. */ if (!in_shutdown && !index->is_dropped) { struct error *e = diag_last_error(&task->diag); error_log(e); say_error("%s: dump failed", vy_index_name(index)); } /* The metadata log is unavailable on shutdown. */ if (!in_shutdown) vy_run_discard(task->new_run); else vy_run_unref(task->new_run); index->is_dumping = false; vy_scheduler_update_index(scheduler, index); if (index->id != 0) vy_scheduler_unpin_index(scheduler, index->pk); assert(scheduler->dump_task_count > 0); scheduler->dump_task_count--; /* * If the index was dropped during dump, we abort the * dump task, but we should still poke the scheduler * to check if the current dump round is complete. * If we don't and this index happens to be the last * one of the current generation, the scheduler will * never be notified about dump completion and hence * memory will never be released. */ if (index->is_dropped) vy_scheduler_complete_dump(scheduler); } /** * Create a task to dump an index. * * On success the task is supposed to dump all in-memory * trees created at @scheduler->dump_generation. */ static int vy_task_dump_new(struct vy_scheduler *scheduler, struct vy_index *index, struct vy_task **p_task) { static struct vy_task_ops dump_ops = { .execute = vy_task_dump_execute, .complete = vy_task_dump_complete, .abort = vy_task_dump_abort, }; assert(!index->is_dropped); assert(!index->is_dumping); assert(index->pin_count == 0); assert(vy_index_generation(index) == scheduler->dump_generation); assert(scheduler->dump_generation < scheduler->generation); struct errinj *inj = errinj(ERRINJ_VY_INDEX_DUMP, ERRINJ_INT); if (inj != NULL && inj->iparam == (int)index->id) { diag_set(ClientError, ER_INJECTION, "vinyl index dump"); goto err; } /* Rotate the active tree if it needs to be dumped. */ if (index->mem->generation == scheduler->dump_generation && vy_index_rotate_mem(index) != 0) goto err; /* * Wait until all active writes to in-memory trees * eligible for dump are over. */ int64_t dump_lsn = -1; size_t max_output_count = 0; struct vy_mem *mem, *next_mem; rlist_foreach_entry_safe(mem, &index->sealed, in_sealed, next_mem) { if (mem->generation > scheduler->dump_generation) continue; vy_mem_wait_pinned(mem); if (mem->tree.size == 0) { /* * The tree is empty so we can delete it * right away, without involving a worker. */ vy_index_delete_mem(index, mem); continue; } dump_lsn = MAX(dump_lsn, mem->max_lsn); max_output_count += mem->tree.size; } if (max_output_count == 0) { /* Nothing to do, pick another index. */ vy_scheduler_update_index(scheduler, index); vy_scheduler_complete_dump(scheduler); return 0; } struct vy_task *task = vy_task_new(&scheduler->task_pool, index, &dump_ops); if (task == NULL) goto err; struct vy_run *new_run = vy_run_prepare(scheduler->run_env, index); if (new_run == NULL) goto err_run; assert(dump_lsn >= 0); new_run->dump_lsn = dump_lsn; struct vy_stmt_stream *wi; bool is_last_level = (index->run_count == 0); wi = vy_write_iterator_new(task->cmp_def, index->disk_format, index->upsert_format, index->id == 0, is_last_level, scheduler->read_views); if (wi == NULL) goto err_wi; rlist_foreach_entry(mem, &index->sealed, in_sealed) { if (mem->generation > scheduler->dump_generation) continue; if (vy_write_iterator_new_mem(wi, mem) != 0) goto err_wi_sub; } task->new_run = new_run; task->wi = wi; task->max_output_count = max_output_count; task->bloom_fpr = index->opts.bloom_fpr; task->page_size = index->opts.page_size; index->is_dumping = true; vy_scheduler_update_index(scheduler, index); if (index->id != 0) { /* * The primary index must be dumped after all * secondary indexes of the same space - see * vy_dump_heap_less(). To make sure it isn't * picked by the scheduler while all secondary * indexes are being dumped, temporarily remove * it from the dump heap. */ vy_scheduler_pin_index(scheduler, index->pk); } scheduler->dump_task_count++; say_info("%s: dump started", vy_index_name(index)); *p_task = task; return 0; err_wi_sub: task->wi->iface->close(wi); err_wi: vy_run_discard(new_run); err_run: vy_task_delete(&scheduler->task_pool, task); err: diag_log(); say_error("%s: could not start dump", vy_index_name(index)); return -1; } static int vy_task_compact_execute(struct vy_scheduler *scheduler, struct vy_task *task) { return vy_task_write_run(scheduler, task); } static int vy_task_compact_complete(struct vy_scheduler *scheduler, struct vy_task *task) { struct vy_index *index = task->index; struct vy_range *range = task->range; struct vy_run *new_run = task->new_run; struct vy_slice *first_slice = task->first_slice; struct vy_slice *last_slice = task->last_slice; struct vy_slice *slice, *next_slice, *new_slice = NULL; struct vy_run *run; /* * Allocate a slice of the new run. * * If the run is empty, we don't need to allocate a new slice * and insert it into the range, but we still need to delete * compacted runs. */ if (!vy_run_is_empty(new_run)) { new_slice = vy_slice_new(vy_log_next_id(), new_run, NULL, NULL, index->cmp_def); if (new_slice == NULL) return -1; } /* * Build the list of runs that became unused * as a result of compaction. */ RLIST_HEAD(unused_runs); for (slice = first_slice; ; slice = rlist_next_entry(slice, in_range)) { slice->run->compacted_slice_count++; if (slice == last_slice) break; } for (slice = first_slice; ; slice = rlist_next_entry(slice, in_range)) { run = slice->run; if (run->compacted_slice_count == run->slice_count) rlist_add_entry(&unused_runs, run, in_unused); slice->run->compacted_slice_count = 0; if (slice == last_slice) break; } /* * Log change in metadata. */ vy_log_tx_begin(); for (slice = first_slice; ; slice = rlist_next_entry(slice, in_range)) { vy_log_delete_slice(slice->id); if (slice == last_slice) break; } int64_t gc_lsn = vy_log_signature(); rlist_foreach_entry(run, &unused_runs, in_unused) vy_log_drop_run(run->id, gc_lsn); if (new_slice != NULL) { vy_log_create_run(index->commit_lsn, new_run->id, new_run->dump_lsn); vy_log_insert_slice(range->id, new_run->id, new_slice->id, tuple_data_or_null(new_slice->begin), tuple_data_or_null(new_slice->end)); } if (vy_log_tx_commit() < 0) { if (new_slice != NULL) vy_slice_delete(new_slice); return -1; } /* * Remove compacted run files that were created after * the last checkpoint (and hence are not referenced * by any checkpoint) immediately to save disk space. */ vy_log_tx_begin(); rlist_foreach_entry(run, &unused_runs, in_unused) { if (run->dump_lsn > gc_lsn && vy_run_remove_files(index->env->path, index->space_id, index->id, run->id) == 0) { vy_log_forget_run(run->id); } } vy_log_tx_try_commit(); /* * Account the new run if it is not empty, * otherwise discard it. */ if (new_slice != NULL) { vy_index_add_run(index, new_run); vy_stmt_counter_add_disk(&index->stat.disk.compact.out, &new_run->count); /* Drop the reference held by the task. */ vy_run_unref(new_run); } else vy_run_discard(new_run); /* * Replace compacted slices with the resulting slice. * * Note, since a slice might have been added to the range * by a concurrent dump while compaction was in progress, * we must insert the new slice at the same position where * the compacted slices were. */ RLIST_HEAD(compacted_slices); vy_index_unacct_range(index, range); if (new_slice != NULL) vy_range_add_slice_before(range, new_slice, first_slice); for (slice = first_slice; ; slice = next_slice) { next_slice = rlist_next_entry(slice, in_range); vy_range_remove_slice(range, slice); rlist_add_entry(&compacted_slices, slice, in_range); vy_stmt_counter_add_disk(&index->stat.disk.compact.in, &slice->count); if (slice == last_slice) break; } range->n_compactions++; range->version++; vy_index_acct_range(index, range); vy_range_update_compact_priority(range, &index->opts); index->stat.disk.compact.count++; /* * Unaccount unused runs and delete compacted slices. */ rlist_foreach_entry(run, &unused_runs, in_unused) vy_index_remove_run(index, run); rlist_foreach_entry_safe(slice, &compacted_slices, in_range, next_slice) { vy_slice_wait_pinned(slice); vy_slice_delete(slice); } /* The iterator has been cleaned up in worker. */ task->wi->iface->close(task->wi); assert(range->heap_node.pos == UINT32_MAX); vy_range_heap_insert(&index->range_heap, &range->heap_node); vy_scheduler_update_index(scheduler, index); say_info("%s: completed compacting range %s", vy_index_name(index), vy_range_str(range)); return 0; } static void vy_task_compact_abort(struct vy_scheduler *scheduler, struct vy_task *task, bool in_shutdown) { struct vy_index *index = task->index; struct vy_range *range = task->range; /* The iterator has been cleaned up in worker. */ task->wi->iface->close(task->wi); /* * It's no use alerting the user if the server is * shutting down or the index was dropped. */ if (!in_shutdown && !index->is_dropped) { struct error *e = diag_last_error(&task->diag); error_log(e); say_error("%s: failed to compact range %s", vy_index_name(index), vy_range_str(range)); } /* The metadata log is unavailable on shutdown. */ if (!in_shutdown) vy_run_discard(task->new_run); else vy_run_unref(task->new_run); assert(range->heap_node.pos == UINT32_MAX); vy_range_heap_insert(&index->range_heap, &range->heap_node); vy_scheduler_update_index(scheduler, index); } static int vy_task_compact_new(struct vy_scheduler *scheduler, struct vy_index *index, struct vy_task **p_task) { static struct vy_task_ops compact_ops = { .execute = vy_task_compact_execute, .complete = vy_task_compact_complete, .abort = vy_task_compact_abort, }; struct heap_node *range_node; struct vy_range *range; assert(!index->is_dropped); range_node = vy_range_heap_top(&index->range_heap); assert(range_node != NULL); range = container_of(range_node, struct vy_range, heap_node); assert(range->compact_priority > 1); if (vy_index_split_range(index, range) || vy_index_coalesce_range(index, range)) { vy_scheduler_update_index(scheduler, index); return 0; } struct vy_task *task = vy_task_new(&scheduler->task_pool, index, &compact_ops); if (task == NULL) goto err_task; struct vy_run *new_run = vy_run_prepare(scheduler->run_env, index); if (new_run == NULL) goto err_run; struct vy_stmt_stream *wi; bool is_last_level = (range->compact_priority == range->slice_count); wi = vy_write_iterator_new(task->cmp_def, index->disk_format, index->upsert_format, index->id == 0, is_last_level, scheduler->read_views); if (wi == NULL) goto err_wi; struct vy_slice *slice; int n = range->compact_priority; rlist_foreach_entry(slice, &range->slices, in_range) { if (vy_write_iterator_new_slice(wi, slice) != 0) goto err_wi_sub; task->max_output_count += slice->count.rows; new_run->dump_lsn = MAX(new_run->dump_lsn, slice->run->dump_lsn); /* Remember the slices we are compacting. */ if (task->first_slice == NULL) task->first_slice = slice; task->last_slice = slice; if (--n == 0) break; } assert(n == 0); assert(new_run->dump_lsn >= 0); task->range = range; task->new_run = new_run; task->wi = wi; task->bloom_fpr = index->opts.bloom_fpr; task->page_size = index->opts.page_size; /* * Remove the range we are going to compact from the heap * so that it doesn't get selected again. */ vy_range_heap_delete(&index->range_heap, range_node); range_node->pos = UINT32_MAX; vy_scheduler_update_index(scheduler, index); say_info("%s: started compacting range %s, runs %d/%d", vy_index_name(index), vy_range_str(range), range->compact_priority, range->slice_count); *p_task = task; return 0; err_wi_sub: task->wi->iface->close(wi); err_wi: vy_run_discard(new_run); err_run: vy_task_delete(&scheduler->task_pool, task); err_task: diag_log(); say_error("%s: could not start compacting range %s: %s", vy_index_name(index), vy_range_str(range)); return -1; } /** * Create a task for dumping an index. The new task is returned * in @ptask. If there's no index that needs to be dumped @ptask * is set to NULL. * * We only dump an index if it needs to be snapshotted or the quota * on memory usage is exceeded. In either case, the oldest index * is selected, because dumping it will free the maximal amount of * memory due to log structured design of the memory allocator. * * Returns 0 on success, -1 on failure. */ static int vy_scheduler_peek_dump(struct vy_scheduler *scheduler, struct vy_task **ptask) { retry: *ptask = NULL; assert(scheduler->dump_generation <= scheduler->generation); if (scheduler->dump_generation == scheduler->generation) { /* * All memory trees of past generations have * been dumped, nothing to do. */ return 0; } /* * Look up the oldest index eligible for dump. */ struct heap_node *pn = vy_dump_heap_top(&scheduler->dump_heap); if (pn == NULL) { /* * There is no vinyl index and so no task to schedule. * Complete the current dump round. */ vy_scheduler_complete_dump(scheduler); return 0; } struct vy_index *index = container_of(pn, struct vy_index, in_dump); if (!index->is_dumping && index->pin_count == 0 && vy_index_generation(index) == scheduler->dump_generation) { /* * Dump is in progress and there is an index that * contains data that must be dumped at the current * round. Try to create a task for it. */ if (vy_task_dump_new(scheduler, index, ptask) != 0) return -1; if (*ptask != NULL) return 0; /* new task */ /* * All in-memory trees eligible for dump were empty * and so were deleted without involving a worker * thread. Check another index. */ goto retry; } /* * Dump is in progress, but all eligible indexes are * already being dumped. Wait until the current round * is complete. */ assert(scheduler->dump_task_count > 0); return 0; } /** * Create a task for compacting a range. The new task is returned * in @ptask. If there's no range that needs to be compacted @ptask * is set to NULL. * * We compact ranges that have more runs in a level than specified * by run_count_per_level configuration option. Among those runs we * give preference to those ranges whose compaction will reduce * read amplification most. * * Returns 0 on success, -1 on failure. */ static int vy_scheduler_peek_compact(struct vy_scheduler *scheduler, struct vy_task **ptask) { retry: *ptask = NULL; struct heap_node *pn = vy_compact_heap_top(&scheduler->compact_heap); if (pn == NULL) return 0; /* nothing to do */ struct vy_index *index = container_of(pn, struct vy_index, in_compact); if (vy_index_compact_priority(index) <= 1) return 0; /* nothing to do */ if (vy_task_compact_new(scheduler, index, ptask) != 0) return -1; if (*ptask == NULL) goto retry; /* index dropped or range split/coalesced */ return 0; /* new task */ } static int vy_schedule(struct vy_scheduler *scheduler, struct vy_task **ptask) { *ptask = NULL; if (vy_scheduler_peek_dump(scheduler, ptask) != 0) goto fail; if (*ptask != NULL) return 0; if (scheduler->workers_available <= 1) { /* * If all worker threads are busy doing compaction * when we run out of quota, ongoing transactions will * hang until one of the threads has finished, which * may take quite a while. To avoid unpredictably long * stalls, always keep one worker thread reserved for * dumps. */ return 0; } if (vy_scheduler_peek_compact(scheduler, ptask) != 0) goto fail; if (*ptask != NULL) return 0; /* no task to run */ return 0; fail: assert(!diag_is_empty(diag_get())); diag_move(diag_get(), &scheduler->diag); return -1; } static int vy_scheduler_complete_task(struct vy_scheduler *scheduler, struct vy_task *task) { if (task->index->is_dropped) { if (task->ops->abort) task->ops->abort(scheduler, task, false); return 0; } struct diag *diag = &task->diag; if (task->status != 0) { assert(!diag_is_empty(diag)); goto fail; /* ->execute fialed */ } ERROR_INJECT(ERRINJ_VY_TASK_COMPLETE, { diag_set(ClientError, ER_INJECTION, "vinyl task completion"); diag_move(diag_get(), diag); goto fail; }); if (task->ops->complete && task->ops->complete(scheduler, task) != 0) { assert(!diag_is_empty(diag_get())); diag_move(diag_get(), diag); goto fail; } return 0; fail: if (task->ops->abort) task->ops->abort(scheduler, task, false); diag_move(diag, &scheduler->diag); return -1; } static int vy_scheduler_f(va_list va) { struct vy_scheduler *scheduler = va_arg(va, struct vy_scheduler *); /* * Yield immediately, until the quota watermark is reached * for the first time or a checkpoint is made. * Then start the worker threads: we know they will be * needed. If quota watermark is never reached, workers * are not started and the scheduler is idle until * shutdown or checkpoint. */ fiber_cond_wait(&scheduler->scheduler_cond); if (scheduler->scheduler_fiber == NULL) return 0; /* destroyed */ vy_scheduler_start_workers(scheduler); while (scheduler->scheduler_fiber != NULL) { struct stailq output_queue; struct vy_task *task, *next; int tasks_failed = 0, tasks_done = 0; bool was_empty; /* Get the list of processed tasks. */ stailq_create(&output_queue); tt_pthread_mutex_lock(&scheduler->mutex); stailq_concat(&output_queue, &scheduler->output_queue); tt_pthread_mutex_unlock(&scheduler->mutex); /* Complete and delete all processed tasks. */ stailq_foreach_entry_safe(task, next, &output_queue, link) { if (vy_scheduler_complete_task(scheduler, task) != 0) tasks_failed++; else tasks_done++; vy_task_delete(&scheduler->task_pool, task); scheduler->workers_available++; assert(scheduler->workers_available <= scheduler->worker_pool_size); } /* * Reset the timeout if we managed to successfully * complete at least one task. */ if (tasks_done > 0) { scheduler->timeout = 0; /* * Task completion callback may yield, which * opens a time window for a worker to submit * a processed task and wake up the scheduler * (via scheduler_async). Hence we should go * and recheck the output_queue in order not * to lose a wakeup event and hang for good. */ continue; } /* Throttle for a while if a task failed. */ if (tasks_failed > 0) goto error; /* All worker threads are busy. */ if (scheduler->workers_available == 0) goto wait; /* Get a task to schedule. */ if (vy_schedule(scheduler, &task) != 0) goto error; /* Nothing to do. */ if (task == NULL) goto wait; /* Queue the task and notify workers if necessary. */ tt_pthread_mutex_lock(&scheduler->mutex); was_empty = stailq_empty(&scheduler->input_queue); stailq_add_tail_entry(&scheduler->input_queue, task, link); if (was_empty) tt_pthread_cond_signal(&scheduler->worker_cond); tt_pthread_mutex_unlock(&scheduler->mutex); scheduler->workers_available--; fiber_reschedule(); continue; error: /* Abort pending checkpoint. */ fiber_cond_signal(&scheduler->dump_cond); /* * A task can fail either due to lack of memory or IO * error. In either case it is pointless to schedule * another task right away, because it is likely to fail * too. So we throttle the scheduler for a while after * each failure. */ scheduler->timeout *= 2; if (scheduler->timeout < VY_SCHEDULER_TIMEOUT_MIN) scheduler->timeout = VY_SCHEDULER_TIMEOUT_MIN; if (scheduler->timeout > VY_SCHEDULER_TIMEOUT_MAX) scheduler->timeout = VY_SCHEDULER_TIMEOUT_MAX; struct errinj *inj; inj = errinj(ERRINJ_VY_SCHED_TIMEOUT, ERRINJ_DOUBLE); if (inj != NULL && inj->dparam != 0) scheduler->timeout = inj->dparam; say_warn("throttling scheduler for %.0f second(s)", scheduler->timeout); scheduler->is_throttled = true; fiber_sleep(scheduler->timeout); scheduler->is_throttled = false; continue; wait: /* Wait for changes */ fiber_cond_wait(&scheduler->scheduler_cond); } return 0; } static void * vy_worker_f(void *arg) { struct vy_scheduler *scheduler = arg; struct vy_task *task = NULL; tt_pthread_mutex_lock(&scheduler->mutex); while (scheduler->is_worker_pool_running) { /* Wait for a task */ if (stailq_empty(&scheduler->input_queue)) { /* Wake scheduler up if there are no more tasks */ ev_async_send(scheduler->scheduler_loop, &scheduler->scheduler_async); tt_pthread_cond_wait(&scheduler->worker_cond, &scheduler->mutex); continue; } task = stailq_shift_entry(&scheduler->input_queue, struct vy_task, link); tt_pthread_mutex_unlock(&scheduler->mutex); assert(task != NULL); /* Execute task */ task->status = task->ops->execute(scheduler, task); if (task->status != 0) { struct diag *diag = diag_get(); assert(!diag_is_empty(diag)); diag_move(diag, &task->diag); } /* Return processed task to scheduler */ tt_pthread_mutex_lock(&scheduler->mutex); stailq_add_tail_entry(&scheduler->output_queue, task, link); } tt_pthread_mutex_unlock(&scheduler->mutex); return NULL; } tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_dictionary.h0000664000000000000000000000651313306560010021313 0ustar rootroot#ifndef TARANTOOL_BOX_TUPLE_DICTIONARY_H_INCLUDED #define TARANTOOL_BOX_TUPLE_DICTIONARY_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include "field_def.h" #ifdef __cplusplus extern "C" { #endif struct mh_strnu32_t; typedef uint32_t (*field_name_hash_f)(const char *str, uint32_t len); extern field_name_hash_f field_name_hash; /** * Shared tuple field names hash. It is referenced by tuple format * and space definition. */ struct tuple_dictionary { /** Field names hash. Key - name, value - field number. */ struct mh_strnu32_t *hash; /** * Array of names. All of them are stored in monolit * memory area. */ char **names; /** Length of a names array. */ uint32_t name_count; /** Reference counter. */ int refs; }; /** * Create a new tuple dictionary. * @param fields Array of space fields. * @param field_count Length of @a fields. * * @retval NULL Memory error. * @retval not NULL Tuple dictionary with one ref. */ struct tuple_dictionary * tuple_dictionary_new(const struct field_def *fields, uint32_t field_count); /** * Swap content of two dictionaries. Reference counters are not * swaped. */ void tuple_dictionary_swap(struct tuple_dictionary *a, struct tuple_dictionary *b); /** * Decrement reference counter. If a new reference counter value * is 0, then the dictionary is deleted. */ void tuple_dictionary_unref(struct tuple_dictionary *dict); /** Increment reference counter. */ void tuple_dictionary_ref(struct tuple_dictionary *dict); /** * Get field number by a name. * @param dict Tuple dictionary. * @param name Name to search. * @param name_len Length of @a name. * @param name_hash Hash of @a name. * @param[out] fieldno Field number, if it is found. * * @retval 0 Field is found. * @retval -1 No such field. */ int tuple_fieldno_by_name(struct tuple_dictionary *dict, const char *name, uint32_t name_len, uint32_t name_hash, uint32_t *fieldno); #ifdef __cplusplus } /* extern "C" */ #endif #endif /*TARANTOOL_BOX_TUPLE_DICTIONARY_H_INCLUDED*/ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_compare.h0000664000000000000000000000410113306565107020577 0ustar rootroot#ifndef TARANTOOL_BOX_TUPLE_COMPARE_H_INCLUDED #define TARANTOOL_BOX_TUPLE_COMPARE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "key_def.h" #include "tuple.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Create a comparison function for the key_def * * @param key_def key_definition * @returns a comparision function */ tuple_compare_t tuple_compare_create(const struct key_def *key_def); /** * @copydoc tuple_compare_create() */ tuple_compare_with_key_t tuple_compare_with_key_create(const struct key_def *key_def); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_TUPLE_COMPARE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_mem.c0000664000000000000000000004533413306565107017244 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_mem.h" #include #include #include #include #include #include "diag.h" #include "tuple.h" /** {{{ vy_mem_env */ enum { /** Slab size for tuple arena. */ SLAB_SIZE = 16 * 1024 * 1024 }; void vy_mem_env_create(struct vy_mem_env *env, size_t memory) { /* Vinyl memory is limited by vy_quota. */ quota_init(&env->quota, QUOTA_MAX); tuple_arena_create(&env->arena, &env->quota, memory, SLAB_SIZE, "vinyl"); lsregion_create(&env->allocator, &env->arena); env->tree_extent_size = 0; } void vy_mem_env_destroy(struct vy_mem_env *env) { lsregion_destroy(&env->allocator); tuple_arena_destroy(&env->arena); } /* }}} vy_mem_env */ /** {{{ vy_mem */ static void * vy_mem_tree_extent_alloc(void *ctx) { struct vy_mem *mem = (struct vy_mem *) ctx; struct vy_mem_env *env = mem->env; void *ret = lsregion_alloc(&env->allocator, VY_MEM_TREE_EXTENT_SIZE, mem->generation); if (ret == NULL) { diag_set(OutOfMemory, VY_MEM_TREE_EXTENT_SIZE, "lsregion_alloc", "ret"); return NULL; } mem->tree_extent_size += VY_MEM_TREE_EXTENT_SIZE; env->tree_extent_size += VY_MEM_TREE_EXTENT_SIZE; return ret; } static void vy_mem_tree_extent_free(void *ctx, void *p) { /* Can't free part of region allocated memory. */ (void)ctx; (void)p; } struct vy_mem * vy_mem_new(struct vy_mem_env *env, int64_t generation, const struct key_def *cmp_def, struct tuple_format *format, struct tuple_format *format_with_colmask, struct tuple_format *upsert_format, uint32_t schema_version) { struct vy_mem *index = calloc(1, sizeof(*index)); if (!index) { diag_set(OutOfMemory, sizeof(*index), "malloc", "struct vy_mem"); return NULL; } index->env = env; index->min_lsn = INT64_MAX; index->max_lsn = -1; index->cmp_def = cmp_def; index->generation = generation; index->schema_version = schema_version; index->format = format; tuple_format_ref(format); index->format_with_colmask = format_with_colmask; tuple_format_ref(format_with_colmask); index->upsert_format = upsert_format; tuple_format_ref(upsert_format); vy_mem_tree_create(&index->tree, cmp_def, vy_mem_tree_extent_alloc, vy_mem_tree_extent_free, index); rlist_create(&index->in_sealed); fiber_cond_create(&index->pin_cond); return index; } void vy_mem_update_formats(struct vy_mem *mem, struct tuple_format *new_format, struct tuple_format *new_format_with_colmask, struct tuple_format *new_upsert_format) { assert(mem->count.rows == 0); tuple_format_unref(mem->format); tuple_format_unref(mem->format_with_colmask); tuple_format_unref(mem->upsert_format); mem->format = new_format; mem->format_with_colmask = new_format_with_colmask; mem->upsert_format = new_upsert_format; tuple_format_ref(mem->format); tuple_format_ref(mem->format_with_colmask); tuple_format_ref(mem->upsert_format); } void vy_mem_delete(struct vy_mem *index) { index->env->tree_extent_size -= index->tree_extent_size; tuple_format_unref(index->format); tuple_format_unref(index->format_with_colmask); tuple_format_unref(index->upsert_format); fiber_cond_destroy(&index->pin_cond); TRASH(index); free(index); } const struct tuple * vy_mem_older_lsn(struct vy_mem *mem, const struct tuple *stmt) { struct tree_mem_key tree_key; tree_key.stmt = stmt; tree_key.lsn = vy_stmt_lsn(stmt) - 1; bool exact = false; struct vy_mem_tree_iterator itr = vy_mem_tree_lower_bound(&mem->tree, &tree_key, &exact); if (vy_mem_tree_iterator_is_invalid(&itr)) return NULL; const struct tuple *result; result = *vy_mem_tree_iterator_get_elem(&mem->tree, &itr); if (vy_tuple_compare(result, stmt, mem->cmp_def) != 0) return NULL; return result; } int vy_mem_insert_upsert(struct vy_mem *mem, const struct tuple *stmt) { assert(vy_stmt_type(stmt) == IPROTO_UPSERT); /* Check if the statement can be inserted in the vy_mem. */ assert(stmt->format_id == tuple_format_id(mem->upsert_format)); /* The statement must be from a lsregion. */ assert(!vy_stmt_is_refable(stmt)); size_t size = tuple_size(stmt); const struct tuple *replaced_stmt = NULL; struct vy_mem_tree_iterator inserted; if (vy_mem_tree_insert_get_iterator(&mem->tree, stmt, &replaced_stmt, &inserted) != 0) return -1; assert(! vy_mem_tree_iterator_is_invalid(&inserted)); assert(*vy_mem_tree_iterator_get_elem(&mem->tree, &inserted) == stmt); if (replaced_stmt == NULL) mem->count.rows++; mem->count.bytes += size; /* * All iterators begin to see the new statement, and * will be aborted in case of rollback. */ mem->version++; /* * Update n_upserts if needed. Get the previous statement * from the inserted one and if it has the same key, then * increment n_upserts of the new statement until the * predefined limit: * * UPSERT, n = 0 * UPSERT, n = 1, * ... * UPSERT, n = threshold, * UPSERT, n = threshold + 1, * UPSERT, n = threshold + 1, all following ones have * ... threshold + 1. * These values are used by vy_index_commit to squash * UPSERTs subsequence. */ vy_mem_tree_iterator_next(&mem->tree, &inserted); const struct tuple **older = vy_mem_tree_iterator_get_elem(&mem->tree, &inserted); if (older == NULL || vy_stmt_type(*older) != IPROTO_UPSERT || vy_tuple_compare(stmt, *older, mem->cmp_def) != 0) return 0; uint8_t n_upserts = vy_stmt_n_upserts(*older); /* * Stop increment if the threshold is reached to avoid * creation of multiple squashing tasks. */ if (n_upserts <= VY_UPSERT_THRESHOLD) n_upserts++; else assert(n_upserts == VY_UPSERT_INF); vy_stmt_set_n_upserts((struct tuple *)stmt, n_upserts); return 0; } int vy_mem_insert(struct vy_mem *mem, const struct tuple *stmt) { assert(vy_stmt_type(stmt) != IPROTO_UPSERT); /* Check if the statement can be inserted in the vy_mem. */ assert(stmt->format_id == tuple_format_id(mem->format_with_colmask) || stmt->format_id == tuple_format_id(mem->format)); /* The statement must be from a lsregion. */ assert(!vy_stmt_is_refable(stmt)); size_t size = tuple_size(stmt); const struct tuple *replaced_stmt = NULL; if (vy_mem_tree_insert(&mem->tree, stmt, &replaced_stmt)) return -1; if (replaced_stmt == NULL) mem->count.rows++; mem->count.bytes += size; /* * All iterators begin to see the new statement, and * will be aborted in case of rollback. */ mem->version++; return 0; } void vy_mem_commit_stmt(struct vy_mem *mem, const struct tuple *stmt) { /* The statement must be from a lsregion. */ assert(!vy_stmt_is_refable(stmt)); int64_t lsn = vy_stmt_lsn(stmt); if (mem->min_lsn == INT64_MAX) mem->min_lsn = lsn; assert(mem->min_lsn <= lsn); if (mem->max_lsn < lsn) mem->max_lsn = lsn; } void vy_mem_rollback_stmt(struct vy_mem *mem, const struct tuple *stmt) { /* This is the statement we've inserted before. */ assert(!vy_stmt_is_refable(stmt)); int rc = vy_mem_tree_delete(&mem->tree, stmt); assert(rc == 0); (void) rc; /* We can't free memory in case of rollback. */ mem->count.rows--; mem->version++; } /* }}} vy_mem */ /* {{{ vy_mem_iterator support functions */ /** * Copy current statement into the out parameter. It is necessary * because vy_mem stores its tuples in the lsregion allocated * area, and lsregion tuples can't be referenced or unreferenced. */ static int vy_mem_iterator_copy_to(struct vy_mem_iterator *itr, struct tuple **ret) { assert(itr->curr_stmt != NULL); if (itr->last_stmt) tuple_unref(itr->last_stmt); itr->last_stmt = vy_stmt_dup(itr->curr_stmt, tuple_format(itr->curr_stmt)); *ret = itr->last_stmt; if (itr->last_stmt != NULL) { vy_stmt_counter_acct_tuple(&itr->stat->get, *ret); return 0; } return -1; } /** * Get a stmt by current position */ static const struct tuple * vy_mem_iterator_curr_stmt(struct vy_mem_iterator *itr) { return *vy_mem_tree_iterator_get_elem(&itr->mem->tree, &itr->curr_pos); } /** * Make a step in directions defined by @iterator_type. * @retval 0 success * @retval 1 EOF */ static int vy_mem_iterator_step(struct vy_mem_iterator *itr, enum iterator_type iterator_type) { if (iterator_type == ITER_LE || iterator_type == ITER_LT) vy_mem_tree_iterator_prev(&itr->mem->tree, &itr->curr_pos); else vy_mem_tree_iterator_next(&itr->mem->tree, &itr->curr_pos); if (vy_mem_tree_iterator_is_invalid(&itr->curr_pos)) return 1; itr->curr_stmt = vy_mem_iterator_curr_stmt(itr); return 0; } /** * Find next record with lsn <= itr->lsn record. * Current position must be at the beginning of serie of records with the * same key it terms of direction of iterator (i.e. left for GE, right for LE) * * @retval 0 Found * @retval 1 Not found */ static int vy_mem_iterator_find_lsn(struct vy_mem_iterator *itr, enum iterator_type iterator_type, const struct tuple *key) { assert(!vy_mem_tree_iterator_is_invalid(&itr->curr_pos)); assert(itr->curr_stmt == vy_mem_iterator_curr_stmt(itr)); const struct key_def *cmp_def = itr->mem->cmp_def; while (vy_stmt_lsn(itr->curr_stmt) > (**itr->read_view).vlsn) { if (vy_mem_iterator_step(itr, iterator_type) != 0 || (iterator_type == ITER_EQ && vy_stmt_compare(key, itr->curr_stmt, cmp_def))) { itr->curr_stmt = NULL; return 1; } } if (iterator_type == ITER_LE || iterator_type == ITER_LT) { struct vy_mem_tree_iterator prev_pos = itr->curr_pos; vy_mem_tree_iterator_prev(&itr->mem->tree, &prev_pos); while (!vy_mem_tree_iterator_is_invalid(&prev_pos)) { const struct tuple *prev_stmt = *vy_mem_tree_iterator_get_elem(&itr->mem->tree, &prev_pos); if (vy_stmt_lsn(prev_stmt) > (**itr->read_view).vlsn || vy_tuple_compare(itr->curr_stmt, prev_stmt, cmp_def) != 0) break; itr->curr_pos = prev_pos; itr->curr_stmt = prev_stmt; vy_mem_tree_iterator_prev(&itr->mem->tree, &prev_pos); } } assert(itr->curr_stmt != NULL); return 0; } /** * Position the iterator to the first entry in the memory tree * satisfying the search criteria for a given key and direction. * * @retval 0 Found * @retval 1 Not found */ static int vy_mem_iterator_seek(struct vy_mem_iterator *itr, enum iterator_type iterator_type, const struct tuple *key) { itr->stat->lookup++; itr->version = itr->mem->version; itr->curr_stmt = NULL; struct tree_mem_key tree_key; tree_key.stmt = key; /* (lsn == INT64_MAX - 1) means that lsn is ignored in comparison */ tree_key.lsn = INT64_MAX - 1; if (tuple_field_count(key) > 0) { if (iterator_type == ITER_EQ) { bool exact; itr->curr_pos = vy_mem_tree_lower_bound(&itr->mem->tree, &tree_key, &exact); if (!exact) return 1; } else if (iterator_type == ITER_LE || iterator_type == ITER_GT) { itr->curr_pos = vy_mem_tree_upper_bound(&itr->mem->tree, &tree_key, NULL); } else { assert(iterator_type == ITER_GE || iterator_type == ITER_LT); itr->curr_pos = vy_mem_tree_lower_bound(&itr->mem->tree, &tree_key, NULL); } } else if (iterator_type == ITER_LE) { itr->curr_pos = vy_mem_tree_invalid_iterator(); } else { assert(iterator_type == ITER_GE); itr->curr_pos = vy_mem_tree_iterator_first(&itr->mem->tree); } if (iterator_type == ITER_LT || iterator_type == ITER_LE) vy_mem_tree_iterator_prev(&itr->mem->tree, &itr->curr_pos); if (vy_mem_tree_iterator_is_invalid(&itr->curr_pos)) return 1; itr->curr_stmt = vy_mem_iterator_curr_stmt(itr); return vy_mem_iterator_find_lsn(itr, iterator_type, key); } /** * Start iteration. * * @retval 0 Found * @retval 1 Not found */ static int vy_mem_iterator_start(struct vy_mem_iterator *itr) { assert(!itr->search_started); itr->search_started = true; return vy_mem_iterator_seek(itr, itr->iterator_type, itr->key); } /* }}} vy_mem_iterator support functions */ /* {{{ vy_mem_iterator API implementation */ void vy_mem_iterator_open(struct vy_mem_iterator *itr, struct vy_mem_iterator_stat *stat, struct vy_mem *mem, enum iterator_type iterator_type, const struct tuple *key, const struct vy_read_view **rv) { itr->stat = stat; assert(key != NULL); itr->mem = mem; itr->iterator_type = iterator_type; itr->key = key; itr->read_view = rv; itr->curr_pos = vy_mem_tree_invalid_iterator(); itr->curr_stmt = NULL; itr->last_stmt = NULL; itr->search_started = false; } /* * Find the next record with different key as current and visible lsn. * @retval 0 Found * @retval 1 Not found */ static NODISCARD int vy_mem_iterator_next_key_impl(struct vy_mem_iterator *itr) { if (!itr->search_started) return vy_mem_iterator_start(itr); if (!itr->curr_stmt) /* End of search. */ return 1; assert(itr->mem->version == itr->version); assert(!vy_mem_tree_iterator_is_invalid(&itr->curr_pos)); assert(itr->curr_stmt == vy_mem_iterator_curr_stmt(itr)); const struct key_def *cmp_def = itr->mem->cmp_def; const struct tuple *prev_stmt = itr->curr_stmt; do { if (vy_mem_iterator_step(itr, itr->iterator_type) != 0) { itr->curr_stmt = NULL; return 1; } } while (vy_tuple_compare(prev_stmt, itr->curr_stmt, cmp_def) == 0); if (itr->iterator_type == ITER_EQ && vy_stmt_compare(itr->key, itr->curr_stmt, cmp_def) != 0) { itr->curr_stmt = NULL; return 1; } return vy_mem_iterator_find_lsn(itr, itr->iterator_type, itr->key); } NODISCARD int vy_mem_iterator_next_key(struct vy_mem_iterator *itr, struct tuple **ret) { *ret = NULL; if (vy_mem_iterator_next_key_impl(itr) == 0) return vy_mem_iterator_copy_to(itr, ret); return 0; } /* * Find next (lower, older) record with the same key as current * @retval 0 Found * @retval 1 Not found */ static NODISCARD int vy_mem_iterator_next_lsn_impl(struct vy_mem_iterator *itr) { assert(itr->search_started); if (!itr->curr_stmt) /* End of search. */ return 1; assert(itr->mem->version == itr->version); assert(!vy_mem_tree_iterator_is_invalid(&itr->curr_pos)); assert(itr->curr_stmt == vy_mem_iterator_curr_stmt(itr)); const struct key_def *cmp_def = itr->mem->cmp_def; struct vy_mem_tree_iterator next_pos = itr->curr_pos; vy_mem_tree_iterator_next(&itr->mem->tree, &next_pos); if (vy_mem_tree_iterator_is_invalid(&next_pos)) return 1; /* EOF */ const struct tuple *next_stmt; next_stmt = *vy_mem_tree_iterator_get_elem(&itr->mem->tree, &next_pos); if (vy_tuple_compare(itr->curr_stmt, next_stmt, cmp_def) == 0) { itr->curr_pos = next_pos; itr->curr_stmt = next_stmt; return 0; } return 1; } NODISCARD int vy_mem_iterator_next_lsn(struct vy_mem_iterator *itr, struct tuple **ret) { *ret = NULL; if (vy_mem_iterator_next_lsn_impl(itr) == 0) return vy_mem_iterator_copy_to(itr, ret); return 0; } NODISCARD int vy_mem_iterator_skip(struct vy_mem_iterator *itr, const struct tuple *last_stmt, struct tuple **ret) { *ret = NULL; assert(!itr->search_started || itr->version == itr->mem->version); /* * Check if the iterator is already positioned * at the statement following last_stmt. */ if (itr->search_started && (itr->curr_stmt == NULL || last_stmt == NULL || iterator_direction(itr->iterator_type) * vy_tuple_compare(itr->curr_stmt, last_stmt, itr->mem->cmp_def) > 0)) { if (itr->curr_stmt != NULL) *ret = itr->last_stmt; return 0; } const struct tuple *key = itr->key; enum iterator_type iterator_type = itr->iterator_type; if (last_stmt != NULL) { key = last_stmt; iterator_type = iterator_direction(iterator_type) > 0 ? ITER_GT : ITER_LT; } itr->search_started = true; vy_mem_iterator_seek(itr, iterator_type, key); if (itr->iterator_type == ITER_EQ && last_stmt != NULL && itr->curr_stmt != NULL && vy_stmt_compare(itr->key, itr->curr_stmt, itr->mem->cmp_def) != 0) itr->curr_stmt = NULL; if (itr->curr_stmt != NULL) return vy_mem_iterator_copy_to(itr, ret); return 0; } NODISCARD int vy_mem_iterator_restore(struct vy_mem_iterator *itr, const struct tuple *last_stmt, struct tuple **ret) { if (!itr->search_started || itr->version == itr->mem->version) return 0; const struct tuple *key = itr->key; enum iterator_type iterator_type = itr->iterator_type; if (last_stmt != NULL) { key = last_stmt; iterator_type = iterator_direction(iterator_type) > 0 ? ITER_GT : ITER_LT; } const struct tuple *prev_stmt = itr->curr_stmt; vy_mem_iterator_seek(itr, iterator_type, key); if (itr->iterator_type == ITER_EQ && itr->curr_stmt != NULL && vy_stmt_compare(itr->key, itr->curr_stmt, itr->mem->cmp_def) != 0) itr->curr_stmt = NULL; if (prev_stmt == itr->curr_stmt) return 0; *ret = NULL; if (itr->curr_stmt != NULL && vy_mem_iterator_copy_to(itr, ret) < 0) return -1; return 1; } void vy_mem_iterator_close(struct vy_mem_iterator *itr) { if (itr->last_stmt != NULL) tuple_unref(itr->last_stmt); TRASH(itr); } static NODISCARD int vy_mem_stream_next(struct vy_stmt_stream *virt_stream, struct tuple **ret) { assert(virt_stream->iface->next == vy_mem_stream_next); struct vy_mem_stream *stream = (struct vy_mem_stream *)virt_stream; struct tuple **res = (struct tuple **) vy_mem_tree_iterator_get_elem(&stream->mem->tree, &stream->curr_pos); if (res == NULL) { *ret = NULL; } else { *ret = *res; vy_mem_tree_iterator_next(&stream->mem->tree, &stream->curr_pos); } return 0; } static const struct vy_stmt_stream_iface vy_mem_stream_iface = { .start = NULL, .next = vy_mem_stream_next, .stop = NULL, .close = NULL }; void vy_mem_stream_open(struct vy_mem_stream *stream, struct vy_mem *mem) { stream->base.iface = &vy_mem_stream_iface; stream->mem = mem; stream->curr_pos = vy_mem_tree_iterator_first(&mem->tree); } /* }}} vy_mem_iterator API implementation */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_read_set.h0000664000000000000000000001502513306565107020253 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_READ_SET_H #define INCLUDES_TARANTOOL_BOX_VY_READ_SET_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #define RB_COMPACT 1 #include #include "salad/stailq.h" #include "trivia/util.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct tuple; struct vy_tx; struct vy_index; /** * A tuple interval read by a transaction. */ struct vy_read_interval { /** Transaction. */ struct vy_tx *tx; /** Index that the transaction read from. */ struct vy_index *index; /** Left boundary of the interval. */ struct tuple *left; /** Right boundary of the interval. */ struct tuple *right; /** Set if the left boundary belongs to the interval. */ bool left_belongs; /** Set if the right boundary belongs to the interval. */ bool right_belongs; /** * The interval with the max right boundary over * all nodes in the subtree rooted at this node. */ const struct vy_read_interval *subtree_last; /** Link in vy_tx->read_set. */ rb_node(struct vy_read_interval) in_tx; /** Link in vy_index->read_set. */ rb_node(struct vy_read_interval) in_index; /** * Auxiliary list node. Used by vy_tx_track() to * link intervals to be merged. */ struct stailq_entry in_merge; }; /** * Compare left boundaries of two intervals. * * Let 'A' and 'B' be the intervals of keys from the left boundary * of 'a' and 'b' to plus infinity, respectively. Assume that * * - a > b iff A is spanned by B * - a = b iff A equals B * - a < b iff A spans B */ int vy_read_interval_cmpl(const struct vy_read_interval *a, const struct vy_read_interval *b); /** * Compare right boundaries of two intervals. * * Let 'A' and 'B' be the intervals of keys from minus infinity to * the right boundary of 'a' and 'b', respectively. Assume that * * - a > b iff A spans B * - a = b iff A equals B * - a < b iff A is spanned by B */ int vy_read_interval_cmpr(const struct vy_read_interval *a, const struct vy_read_interval *b); /** * Return true if two intervals should be merged. * Interval 'l' must start before interval 'r'. * Note, if this function returns true, it does not * necessarily mean that the intervals intersect - * they might complement each other, e.g. * * (10, 12] and (12, 20] */ bool vy_read_interval_should_merge(const struct vy_read_interval *l, const struct vy_read_interval *r); /** * Tree that contains tuple intervals read by a transactions. * Linked by vy_read_interval->in_tx. Sorted by vy_index, then * by vy_read_interval->left. Intervals stored in this tree * must not intersect. */ typedef rb_tree(struct vy_read_interval) vy_tx_read_set_t; static inline int vy_tx_read_set_cmp(const struct vy_read_interval *a, const struct vy_read_interval *b) { assert(a->tx == b->tx); int rc = a->index < b->index ? -1 : a->index > b->index; if (rc == 0) rc = vy_read_interval_cmpl(a, b); return rc; } rb_gen(MAYBE_UNUSED static inline, vy_tx_read_set_, vy_tx_read_set_t, struct vy_read_interval, in_tx, vy_tx_read_set_cmp); /** * Interval tree used for tracking reads done from an index by * all active transactions. Linked by vy_read_interval->in_index. * Sorted by vy_read_interval->left, then by vy_tx. Intervals that * belong to different transactions may intersect. */ typedef rb_tree(struct vy_read_interval) vy_index_read_set_t; static inline int vy_index_read_set_cmp(const struct vy_read_interval *a, const struct vy_read_interval *b) { assert(a->index == b->index); int rc = vy_read_interval_cmpl(a, b); if (rc == 0) rc = a->tx < b->tx ? -1 : a->tx > b->tx; return rc; } static inline void vy_index_read_set_aug(struct vy_read_interval *node, const struct vy_read_interval *left, const struct vy_read_interval *right) { node->subtree_last = node; if (left != NULL && vy_read_interval_cmpr(left->subtree_last, node->subtree_last) > 0) node->subtree_last = left->subtree_last; if (right != NULL && vy_read_interval_cmpr(right->subtree_last, node->subtree_last) > 0) node->subtree_last = right->subtree_last; } rb_gen_aug(MAYBE_UNUSED static inline, vy_index_read_set_, vy_index_read_set_t, struct vy_read_interval, in_index, vy_index_read_set_cmp, vy_index_read_set_aug); /** * Iterator over transactions that conflict with a statement. */ struct vy_tx_conflict_iterator { /** The statement. */ const struct tuple *stmt; /** * Iterator over the index interval tree checked * for intersections with the statement. */ struct vy_index_read_set_walk tree_walk; /** * Direction of tree traversal to be used on the * next iteration. */ int tree_dir; }; static inline void vy_tx_conflict_iterator_init(struct vy_tx_conflict_iterator *it, vy_index_read_set_t *read_set, const struct tuple *stmt) { vy_index_read_set_walk_init(&it->tree_walk, read_set); it->tree_dir = 0; it->stmt = stmt; } /** * Return the next conflicting transaction or NULL. * Note, the same transaction may be returned more than once. */ struct vy_tx * vy_tx_conflict_iterator_next(struct vy_tx_conflict_iterator *it); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_READ_SET_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/user.cc0000664000000000000000000004255213306560010017054 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "user.h" #include "assoc.h" #include "schema.h" #include "space.h" #include "func.h" #include "index.h" #include "bit/bit.h" #include "session.h" #include "scoped_guard.h" #include "sequence.h" struct universe universe; static struct user users[BOX_USER_MAX]; struct user *guest_user = users; struct user *admin_user = users + 1; static struct user_map user_map_nil; struct mh_i32ptr_t *user_registry; /* {{{ user_map */ static inline int user_map_calc_idx(uint8_t auth_token, uint8_t *bit_no) { *bit_no = auth_token & (UMAP_INT_BITS - 1); return auth_token / UMAP_INT_BITS; } /** Set a bit in the user map - add a user. */ static inline void user_map_set(struct user_map *map, uint8_t auth_token) { uint8_t bit_no; int idx = user_map_calc_idx(auth_token, &bit_no); map->m[idx] |= ((umap_int_t) 1) << bit_no; } /** Clear a bit in the user map - remove a user. */ static inline void user_map_clear(struct user_map *map, uint8_t auth_token) { uint8_t bit_no; int idx = user_map_calc_idx(auth_token, &bit_no); map->m[idx] &= ~(((umap_int_t) 1) << bit_no); } /* Check if a bit is set in the user map. */ static inline bool user_map_is_set(struct user_map *map, uint8_t auth_token) { uint8_t bit_no; int idx = user_map_calc_idx(auth_token, &bit_no); return map->m[idx] & (((umap_int_t) 1) << bit_no); } /** * Merge two sets of users: add all users from right argument * to the left one. */ static void user_map_union(struct user_map *lhs, struct user_map *rhs) { for (int i = 0; i < USER_MAP_SIZE; i++) lhs->m[i] |= rhs->m[i]; } /** * Remove all users present in rhs from lhs */ static void user_map_minus(struct user_map *lhs, struct user_map *rhs) { for (int i = 0; i < USER_MAP_SIZE; i++) lhs->m[i] &= ~rhs->m[i]; } /** Iterate over users in the set of users. */ struct user_map_iterator { struct bit_iterator it; }; static void user_map_iterator_init(struct user_map_iterator *it, struct user_map *map) { bit_iterator_init(&it->it, map->m, USER_MAP_SIZE * sizeof(umap_int_t), true); } static struct user * user_map_iterator_next(struct user_map_iterator *it) { size_t auth_token = bit_iterator_next(&it->it); if (auth_token != SIZE_MAX) return users + auth_token; return NULL; } /* }}} */ /* {{{ privset_t - set of effective privileges of a user */ extern "C" { static int priv_def_compare(const struct priv_def *lhs, const struct priv_def *rhs) { if (lhs->object_type != rhs->object_type) return lhs->object_type > rhs->object_type ? 1 : -1; if (lhs->object_id != rhs->object_id) return lhs->object_id > rhs->object_id ? 1 : -1; return 0; } } /* extern "C" */ rb_gen(, privset_, privset_t, struct priv_def, link, priv_def_compare); /* }}} */ /** {{{ user */ static void user_create(struct user *user, uint8_t auth_token) { assert(user->auth_token == 0); user->auth_token = auth_token; privset_new(&user->privs); region_create(&user->pool, &cord()->slabc); } static void user_destroy(struct user *user) { /* * Sic: we don't have to remove a deleted * user from users set of roles, since * to drop a user, one has to revoke * all privileges from them first. */ region_destroy(&user->pool); free(user->def); memset(user, 0, sizeof(*user)); } /** * Add a privilege definition to the list * of effective privileges of a user. */ void user_grant_priv(struct user *user, struct priv_def *def) { struct priv_def *old = privset_search(&user->privs, def); if (old == NULL) { old = (struct priv_def *) region_alloc_xc(&user->pool, sizeof(struct priv_def)); *old = *def; privset_insert(&user->privs, old); } else { old->access |= def->access; } } /** * Find the corresponding access structure * given object type and object id. */ struct access * access_find(struct priv_def *priv) { struct access *access = NULL; switch (priv->object_type) { case SC_UNIVERSE: { access = universe.access; break; } case SC_SPACE: { struct space *space = space_by_id(priv->object_id); if (space) access = space->access; break; } case SC_FUNCTION: { struct func *func = func_by_id(priv->object_id); if (func) access = func->access; break; } case SC_SEQUENCE: { struct sequence *seq = sequence_by_id(priv->object_id); if (seq) access = seq->access; break; } default: break; } return access; } /** * Reset effective access of the user in the * corresponding objects. */ static void user_set_effective_access(struct user *user) { struct credentials *cr = effective_user(); struct privset_iterator it; privset_ifirst(&user->privs, &it); struct priv_def *priv; while ((priv = privset_inext(&it)) != NULL) { struct access *object = access_find(priv); /* Protect against a concurrent drop. */ if (object == NULL) continue; struct access *access = &object[user->auth_token]; access->effective = access->granted | priv->access; /** Update global access in the current session. */ if (priv->object_type == SC_UNIVERSE && user->def->uid == cr->uid) { cr->universal_access = access->effective; } } } /** * Reload user privileges and re-grant them. */ static void user_reload_privs(struct user *user) { if (user->is_dirty == false) return; struct priv_def *priv; /** * Reset effective access of the user in the * corresponding objects to have * only the stuff that it's granted directly. */ struct privset_iterator it; privset_ifirst(&user->privs, &it); while ((priv = privset_inext(&it)) != NULL) { priv->access = 0; } user_set_effective_access(user); region_free(&user->pool); privset_new(&user->privs); /* Load granted privs from _priv space. */ { struct space *space = space_cache_find_xc(BOX_PRIV_ID); char key[6]; /** Primary key - by user id */ struct index *index = index_find_system_xc(space, 0); mp_encode_uint(key, user->def->uid); struct iterator *it = index_create_iterator_xc(index, ITER_EQ, key, 1); IteratorGuard iter_guard(it); struct tuple *tuple; while ((tuple = iterator_next_xc(it)) != NULL) { struct priv_def priv; priv_def_create_from_tuple(&priv, tuple); /** * Skip role grants, we're only * interested in real objects. */ if (priv.object_type != SC_ROLE) user_grant_priv(user, &priv); } } { /* Take into account privs granted through roles. */ struct user_map_iterator it; user_map_iterator_init(&it, &user->roles); struct user *role; while ((role = user_map_iterator_next(&it))) { struct privset_iterator it; privset_ifirst(&role->privs, &it); struct priv_def *def; while ((def = privset_inext(&it))) { user_grant_priv(user, def); } } } user_set_effective_access(user); user->is_dirty = false; } /** }}} */ /* {{{ authentication tokens */ /** A map to quickly look up free slots in users[] array. */ static umap_int_t tokens[USER_MAP_SIZE]; /** * Index of the minimal element of the tokens array which * has an unused token. */ static int min_token_idx = 0; /** * Find and return a spare authentication token. * Raise an exception when the maximal number of users * is reached (and we're out of tokens). */ uint8_t auth_token_get() { uint8_t bit_no = 0; while (min_token_idx < USER_MAP_SIZE) { bit_no = __builtin_ffs(tokens[min_token_idx]); if (bit_no) break; min_token_idx++; } if (bit_no == 0 || bit_no > BOX_USER_MAX) { /* A cap on the number of users was reached. * Check for BOX_USER_MAX to cover case when * USER_MAP_BITS > BOX_USER_MAX. */ tnt_raise(LoggedError, ER_USER_MAX, BOX_USER_MAX); } /* * find-first-set returns bit index starting from 1, * or 0 if no bit is set. Rebase the index to offset 0. */ bit_no--; tokens[min_token_idx] ^= ((umap_int_t) 1) << bit_no; int auth_token = min_token_idx * UMAP_INT_BITS + bit_no; assert(auth_token < UINT8_MAX); return auth_token; } /** * Return an authentication token to the set of unused * tokens. */ void auth_token_put(uint8_t auth_token) { uint8_t bit_no; int idx = user_map_calc_idx(auth_token, &bit_no); tokens[idx] |= ((umap_int_t) 1) << bit_no; if (idx < min_token_idx) min_token_idx = idx; } /* }}} */ /* {{{ user cache */ struct user * user_cache_replace(struct user_def *def) { struct user *user = user_by_id(def->uid); if (user == NULL) { uint8_t auth_token = auth_token_get(); user = users + auth_token; user_create(user, auth_token); struct mh_i32ptr_node_t node = { def->uid, user }; mh_i32ptr_put(user_registry, &node, NULL, NULL); } else { free(user->def); } user->def = def; return user; } void user_cache_delete(uint32_t uid) { mh_int_t k = mh_i32ptr_find(user_registry, uid, NULL); if (k != mh_end(user_registry)) { struct user *user = (struct user *) mh_i32ptr_node(user_registry, k)->val; assert(user->auth_token > ADMIN); auth_token_put(user->auth_token); assert(user_map_is_empty(&user->roles)); assert(user_map_is_empty(&user->users)); user_destroy(user); /* * Sic: we don't have to remove a deleted * user from users hash of roles, since * to drop a user, one has to revoke * all privileges from them first. */ mh_i32ptr_del(user_registry, k, NULL); } } /** Find user by id. */ struct user * user_by_id(uint32_t uid) { mh_int_t k = mh_i32ptr_find(user_registry, uid, NULL); if (k == mh_end(user_registry)) return NULL; return (struct user *) mh_i32ptr_node(user_registry, k)->val; } struct user * user_find(uint32_t uid) { struct user *user = user_by_id(uid); if (user == NULL) diag_set(ClientError, ER_NO_SUCH_USER, int2str(uid)); return user; } /** Find user by name. */ struct user * user_find_by_name(const char *name, uint32_t len) { uint32_t uid = schema_find_id(BOX_USER_ID, 2, name, len); struct user *user = user_by_id(uid); if (user == NULL || user->def->type != SC_USER) { diag_set(ClientError, ER_NO_SUCH_USER, tt_cstr(name, len)); return NULL; } return user; } void user_cache_init() { /** Mark all tokens as unused. */ memset(tokens, 0xFF, sizeof(tokens)); user_registry = mh_i32ptr_new(); /* * Solve a chicken-egg problem: * we need a functional user cache entry for superuser to * perform recovery, but the superuser credentials are * stored in the snapshot. So, pre-create cache entries * for 'guest' and 'admin' users here, they will be * updated with snapshot contents during recovery. */ size_t name_len = strlen("guest"); size_t sz = user_def_sizeof(name_len); struct user_def *def = (struct user_def *) calloc(1, sz); if (def == NULL) tnt_raise(OutOfMemory, sz, "malloc", "def"); /* Free def in a case of exception. */ auto guest_def_guard = make_scoped_guard([=] { free(def); }); memcpy(def->name, "guest", name_len); def->owner = ADMIN; def->type = SC_USER; struct user *user = user_cache_replace(def); /* Now the user cache owns the def. */ guest_def_guard.is_active = false; /* 0 is the auth token and user id by default. */ assert(user->def->uid == GUEST && user->auth_token == GUEST); (void) user; name_len = strlen("admin"); sz = user_def_sizeof(name_len); def = (struct user_def *) calloc(1, sz); if (def == NULL) tnt_raise(OutOfMemory, sz, "malloc", "def"); auto admin_def_guard = make_scoped_guard([=] { free(def); }); memcpy(def->name, "admin", name_len); def->uid = def->owner = ADMIN; def->type = SC_USER; user = user_cache_replace(def); admin_def_guard.is_active = false; /* ADMIN is both the auth token and user id for 'admin' user. */ assert(user->def->uid == ADMIN && user->auth_token == ADMIN); } void user_cache_free() { if (user_registry) mh_i32ptr_delete(user_registry); } /* }}} user cache */ /** {{{ roles */ void role_check(struct user *grantee, struct user *role) { /* * Check that there is no loop from grantee to role: * if grantee is a role, build up a closure of all * immediate and indirect users of grantee, and ensure * the granted role is not in this set. */ struct user_map transitive_closure = user_map_nil; user_map_set(&transitive_closure, grantee->auth_token); struct user_map current_layer = transitive_closure; while (! user_map_is_empty(¤t_layer)) { /* * As long as we're traversing a directed * acyclic graph, we're bound to end at some * point in a layer with no incoming edges. */ struct user_map next_layer = user_map_nil; struct user_map_iterator it; user_map_iterator_init(&it, ¤t_layer); struct user *user; while ((user = user_map_iterator_next(&it))) user_map_union(&next_layer, &user->users); user_map_union(&transitive_closure, &next_layer); current_layer = next_layer; } /* * Check if the role is in the list of roles to which the * grantee is granted. */ if (user_map_is_set(&transitive_closure, role->auth_token)) { tnt_raise(ClientError, ER_ROLE_LOOP, role->def->name, grantee->def->name); } } /** * Re-calculate effective grants of the linked subgraph * this user/role is a part of. */ void rebuild_effective_grants(struct user *grantee) { /* * Recurse over all roles to which grantee is granted * and mark them as dirty - in need for rebuild. */ struct user_map_iterator it; struct user *user; struct user_map current_layer = user_map_nil; user_map_set(¤t_layer, grantee->auth_token); while (!user_map_is_empty(¤t_layer)) { struct user_map next_layer = user_map_nil; user_map_iterator_init(&it, ¤t_layer); while ((user = user_map_iterator_next(&it))) { user->is_dirty = true; user_map_union(&next_layer, &user->users); } /* * Switch to the nodes which are not in the set * yet. */ current_layer = next_layer; } /* * First, construct a subset of the transitive * closure consisting from the nodes with no * incoming edges (roles which have no granted * roles). Build their list of effective grants * from their actual grants. * * Propagate the effective grants through the * outgoing edges of the nodes, avoiding the nodes * with incoming edges from not-yet-evaluated nodes. * Eventually this process will end with a set of * nodes with no outgoing edges. */ struct user_map transitive_closure = user_map_nil; current_layer = user_map_nil; user_map_set(¤t_layer, grantee->auth_token); /* * Propagate effective privileges from the nodes * with no incoming edges to the remaining nodes. */ while (! user_map_is_empty(¤t_layer)) { struct user_map postponed = user_map_nil; struct user_map next_layer = user_map_nil; user_map_iterator_init(&it, ¤t_layer); while ((user = user_map_iterator_next(&it))) { struct user_map indirect_edges = user->roles; user_map_minus(&indirect_edges, &transitive_closure); if (user_map_is_empty(&indirect_edges)) { user_reload_privs(user); user_map_union(&next_layer, &user->users); } else { /* * The user has roles whose * effective grants have not been * calculated yet. Postpone * evaluation of effective grants * of this user till these roles' * effective grants have been * built. */ user_map_union(&next_layer, &indirect_edges); user_map_set(&postponed, user->auth_token); user_map_set(&next_layer, user->auth_token); } } user_map_minus(¤t_layer, &postponed); user_map_union(&transitive_closure, ¤t_layer); current_layer = next_layer; } } /** * Update verges in the graph of dependencies. * Grant all effective privileges of the role to whoever * this role was granted to. */ void role_grant(struct user *grantee, struct user *role) { user_map_set(&role->users, grantee->auth_token); user_map_set(&grantee->roles, role->auth_token); rebuild_effective_grants(grantee); } /** * Update the role dependencies graph. * Rebuild effective privileges of the grantee. */ void role_revoke(struct user *grantee, struct user *role) { user_map_clear(&role->users, grantee->auth_token); user_map_clear(&grantee->roles, role->auth_token); rebuild_effective_grants(grantee); } void priv_grant(struct user *grantee, struct priv_def *priv) { struct access *object = access_find(priv); if (object == NULL) return; struct access *access = &object[grantee->auth_token]; assert(privset_search(&grantee->privs, priv) || access->granted == 0); access->granted = priv->access; rebuild_effective_grants(grantee); } /** }}} */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_index.c0000664000000000000000000007775313306565107017607 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_index.h" #include "trivia/util.h" #include #include #include #include #include "assoc.h" #include "diag.h" #include "errcode.h" #include "histogram.h" #include "index_def.h" #include "say.h" #include "schema.h" #include "tuple.h" #include "vy_log.h" #include "vy_mem.h" #include "vy_range.h" #include "vy_run.h" #include "vy_stat.h" #include "vy_stmt.h" #include "vy_upsert.h" #include "vy_read_set.h" void vy_index_validate_formats(const struct vy_index *index) { (void) index; assert(index->disk_format != NULL); assert(index->mem_format != NULL); assert(index->mem_format_with_colmask != NULL); assert(index->upsert_format != NULL); uint32_t index_field_count = index->mem_format->index_field_count; (void) index_field_count; if (index->id == 0) { assert(index->disk_format == index->mem_format); assert(index->disk_format->index_field_count == index_field_count); assert(index->mem_format_with_colmask->index_field_count == index_field_count); } else { assert(index->disk_format != index->mem_format); assert(index->disk_format->index_field_count <= index_field_count); } assert(index->upsert_format->index_field_count == index_field_count); assert(index->mem_format_with_colmask->index_field_count == index_field_count); } int vy_index_env_create(struct vy_index_env *env, const char *path, int64_t *p_generation, vy_upsert_thresh_cb upsert_thresh_cb, void *upsert_thresh_arg) { env->key_format = tuple_format_new(&vy_tuple_format_vtab, NULL, 0, 0, NULL, 0, NULL); if (env->key_format == NULL) return -1; tuple_format_ref(env->key_format); env->empty_key = vy_stmt_new_select(env->key_format, NULL, 0); if (env->empty_key == NULL) { tuple_format_unref(env->key_format); return -1; } env->path = path; env->p_generation = p_generation; env->upsert_thresh_cb = upsert_thresh_cb; env->upsert_thresh_arg = upsert_thresh_arg; env->too_long_threshold = TIMEOUT_INFINITY; env->index_count = 0; return 0; } void vy_index_env_destroy(struct vy_index_env *env) { tuple_unref(env->empty_key); tuple_format_unref(env->key_format); } const char * vy_index_name(struct vy_index *index) { char *buf = tt_static_buf(); snprintf(buf, TT_STATIC_BUF_LEN, "%u/%u", (unsigned)index->space_id, (unsigned)index->id); return buf; } size_t vy_index_mem_tree_size(struct vy_index *index) { struct vy_mem *mem; size_t size = index->mem->tree_extent_size; rlist_foreach_entry(mem, &index->sealed, in_sealed) size += mem->tree_extent_size; return size; } struct vy_index * vy_index_new(struct vy_index_env *index_env, struct vy_cache_env *cache_env, struct vy_mem_env *mem_env, struct index_def *index_def, struct tuple_format *format, struct vy_index *pk) { static int64_t run_buckets[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 50, 100, }; assert(index_def->key_def->part_count > 0); assert(index_def->iid == 0 || pk != NULL); struct vy_index *index = calloc(1, sizeof(struct vy_index)); if (index == NULL) { diag_set(OutOfMemory, sizeof(struct vy_index), "calloc", "struct vy_index"); goto fail; } index->env = index_env; index->tree = malloc(sizeof(*index->tree)); if (index->tree == NULL) { diag_set(OutOfMemory, sizeof(*index->tree), "malloc", "vy_range_tree_t"); goto fail_tree; } struct key_def *key_def = key_def_dup(index_def->key_def); if (key_def == NULL) goto fail_key_def; struct key_def *cmp_def = key_def_dup(index_def->cmp_def); if (cmp_def == NULL) goto fail_cmp_def; index->cmp_def = cmp_def; index->key_def = key_def; if (index_def->iid == 0) { /* * Disk tuples can be returned to an user from a * primary key. And they must have field * definitions as well as space->format tuples. */ index->disk_format = format; tuple_format_ref(format); } else { index->disk_format = tuple_format_new(&vy_tuple_format_vtab, &cmp_def, 1, 0, NULL, 0, NULL); if (index->disk_format == NULL) goto fail_format; for (uint32_t i = 0; i < cmp_def->part_count; ++i) { uint32_t fieldno = cmp_def->parts[i].fieldno; index->disk_format->fields[fieldno].is_nullable = format->fields[fieldno].is_nullable; } } tuple_format_ref(index->disk_format); if (index_def->iid == 0) { index->upsert_format = vy_tuple_format_new_upsert(format); if (index->upsert_format == NULL) goto fail_upsert_format; tuple_format_ref(index->upsert_format); index->mem_format_with_colmask = vy_tuple_format_new_with_colmask(format); if (index->mem_format_with_colmask == NULL) goto fail_mem_format_with_colmask; tuple_format_ref(index->mem_format_with_colmask); } else { index->mem_format_with_colmask = pk->mem_format_with_colmask; index->upsert_format = pk->upsert_format; tuple_format_ref(index->mem_format_with_colmask); tuple_format_ref(index->upsert_format); } if (vy_index_stat_create(&index->stat) != 0) goto fail_stat; index->run_hist = histogram_new(run_buckets, lengthof(run_buckets)); if (index->run_hist == NULL) goto fail_run_hist; index->mem = vy_mem_new(mem_env, *index->env->p_generation, cmp_def, format, index->mem_format_with_colmask, index->upsert_format, schema_version); if (index->mem == NULL) goto fail_mem; index->refs = 1; index->commit_lsn = -1; index->dump_lsn = -1; vy_cache_create(&index->cache, cache_env, cmp_def); rlist_create(&index->sealed); vy_range_tree_new(index->tree); vy_range_heap_create(&index->range_heap); rlist_create(&index->runs); index->pk = pk; if (pk != NULL) vy_index_ref(pk); index->mem_format = format; tuple_format_ref(index->mem_format); index->in_dump.pos = UINT32_MAX; index->in_compact.pos = UINT32_MAX; index->space_id = index_def->space_id; index->id = index_def->iid; index->opts = index_def->opts; index->check_is_unique = index->opts.is_unique; vy_index_read_set_new(&index->read_set); index_env->index_count++; vy_index_validate_formats(index); return index; fail_mem: histogram_delete(index->run_hist); fail_run_hist: vy_index_stat_destroy(&index->stat); fail_stat: tuple_format_unref(index->mem_format_with_colmask); fail_mem_format_with_colmask: tuple_format_unref(index->upsert_format); fail_upsert_format: tuple_format_unref(index->disk_format); fail_format: key_def_delete(cmp_def); fail_cmp_def: key_def_delete(key_def); fail_key_def: free(index->tree); fail_tree: free(index); fail: return NULL; } static struct vy_range * vy_range_tree_free_cb(vy_range_tree_t *t, struct vy_range *range, void *arg) { (void)t; (void)arg; struct vy_slice *slice; rlist_foreach_entry(slice, &range->slices, in_range) vy_slice_wait_pinned(slice); vy_range_delete(range); return NULL; } void vy_index_delete(struct vy_index *index) { assert(index->refs == 0); assert(index->in_dump.pos == UINT32_MAX); assert(index->in_compact.pos == UINT32_MAX); assert(vy_index_read_set_empty(&index->read_set)); assert(index->env->index_count > 0); index->env->index_count--; if (index->pk != NULL) vy_index_unref(index->pk); struct vy_mem *mem, *next_mem; rlist_foreach_entry_safe(mem, &index->sealed, in_sealed, next_mem) vy_mem_delete(mem); vy_mem_delete(index->mem); struct vy_run *run, *next_run; rlist_foreach_entry_safe(run, &index->runs, in_index, next_run) vy_index_remove_run(index, run); vy_range_tree_iter(index->tree, NULL, vy_range_tree_free_cb, NULL); vy_range_heap_destroy(&index->range_heap); tuple_format_unref(index->disk_format); tuple_format_unref(index->mem_format_with_colmask); tuple_format_unref(index->upsert_format); key_def_delete(index->cmp_def); key_def_delete(index->key_def); histogram_delete(index->run_hist); vy_index_stat_destroy(&index->stat); vy_cache_destroy(&index->cache); tuple_format_unref(index->mem_format); free(index->tree); TRASH(index); free(index); } void vy_index_swap(struct vy_index *old_index, struct vy_index *new_index) { assert(old_index->stat.memory.count.rows == 0); assert(new_index->stat.memory.count.rows == 0); SWAP(old_index->dump_lsn, new_index->dump_lsn); SWAP(old_index->range_count, new_index->range_count); SWAP(old_index->run_count, new_index->run_count); SWAP(old_index->stat, new_index->stat); SWAP(old_index->run_hist, new_index->run_hist); SWAP(old_index->tree, new_index->tree); SWAP(old_index->range_heap, new_index->range_heap); rlist_swap(&old_index->runs, &new_index->runs); } int vy_index_init_range_tree(struct vy_index *index) { struct vy_range *range = vy_range_new(vy_log_next_id(), NULL, NULL, index->cmp_def); if (range == NULL) return -1; assert(index->range_count == 0); vy_index_add_range(index, range); vy_index_acct_range(index, range); return 0; } int vy_index_create(struct vy_index *index) { /* Make index directory. */ int rc; char path[PATH_MAX]; vy_index_snprint_path(path, sizeof(path), index->env->path, index->space_id, index->id); char *path_sep = path; while (*path_sep == '/') { /* Don't create root */ ++path_sep; } while ((path_sep = strchr(path_sep, '/'))) { /* Recursively create path hierarchy */ *path_sep = '\0'; rc = mkdir(path, 0777); if (rc == -1 && errno != EEXIST) { diag_set(SystemError, "failed to create directory '%s'", path); *path_sep = '/'; return -1; } *path_sep = '/'; ++path_sep; } rc = mkdir(path, 0777); if (rc == -1 && errno != EEXIST) { diag_set(SystemError, "failed to create directory '%s'", path); return -1; } /* Allocate initial range. */ return vy_index_init_range_tree(index); } /** vy_index_recovery_cb() argument. */ struct vy_index_recovery_cb_arg { /** Index being recovered. */ struct vy_index *index; /** Last recovered range. */ struct vy_range *range; /** Vinyl run environment. */ struct vy_run_env *run_env; /** * All recovered runs hashed by ID. * It is needed in order not to load the same * run each time a slice is created for it. */ struct mh_i64ptr_t *run_hash; /** * True if force_recovery mode is enabled. */ bool force_recovery; }; /** Index recovery callback, passed to vy_recovery_load_index(). */ static int vy_index_recovery_cb(const struct vy_log_record *record, void *cb_arg) { struct vy_index_recovery_cb_arg *arg = cb_arg; struct vy_index *index = arg->index; struct vy_range *range = arg->range; struct vy_run_env *run_env = arg->run_env; struct mh_i64ptr_t *run_hash = arg->run_hash; bool force_recovery = arg->force_recovery; struct tuple_format *key_format = index->env->key_format; struct tuple *begin = NULL, *end = NULL; struct vy_run *run; struct vy_slice *slice; bool success = false; assert(record->type == VY_LOG_CREATE_INDEX || index->commit_lsn >= 0); if (record->type == VY_LOG_INSERT_RANGE || record->type == VY_LOG_INSERT_SLICE) { if (record->begin != NULL) { begin = vy_key_from_msgpack(key_format, record->begin); if (begin == NULL) goto out; } if (record->end != NULL) { end = vy_key_from_msgpack(key_format, record->end); if (end == NULL) goto out; } } switch (record->type) { case VY_LOG_CREATE_INDEX: assert(record->index_id == index->id); assert(record->space_id == index->space_id); assert(index->commit_lsn < 0); assert(record->index_lsn >= 0); index->commit_lsn = record->index_lsn; break; case VY_LOG_DUMP_INDEX: assert(record->index_lsn == index->commit_lsn); index->dump_lsn = record->dump_lsn; break; case VY_LOG_TRUNCATE_INDEX: assert(record->index_lsn == index->commit_lsn); index->truncate_count = record->truncate_count; break; case VY_LOG_DROP_INDEX: assert(record->index_lsn == index->commit_lsn); index->is_dropped = true; /* * If the index was dropped, we don't need to replay * truncate (see vy_prepare_truncate_space()). */ index->truncate_count = UINT64_MAX; break; case VY_LOG_PREPARE_RUN: break; case VY_LOG_CREATE_RUN: if (record->is_dropped) break; assert(record->index_lsn == index->commit_lsn); run = vy_run_new(run_env, record->run_id); if (run == NULL) goto out; run->dump_lsn = record->dump_lsn; if (vy_run_recover(run, index->env->path, index->space_id, index->id) != 0 && (!force_recovery || vy_run_rebuild_index(run, index->env->path, index->space_id, index->id, index->cmp_def, index->key_def, index->mem_format, index->upsert_format, &index->opts) != 0)) { vy_run_unref(run); goto out; } struct mh_i64ptr_node_t node = { run->id, run }; if (mh_i64ptr_put(run_hash, &node, NULL, NULL) == mh_end(run_hash)) { diag_set(OutOfMemory, 0, "mh_i64ptr_put", "mh_i64ptr_node_t"); vy_run_unref(run); goto out; } break; case VY_LOG_DROP_RUN: break; case VY_LOG_INSERT_RANGE: assert(record->index_lsn == index->commit_lsn); range = vy_range_new(record->range_id, begin, end, index->cmp_def); if (range == NULL) goto out; if (range->begin != NULL && range->end != NULL && vy_key_compare(range->begin, range->end, index->cmp_def) >= 0) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("begin >= end for range id %lld", (long long)range->id)); vy_range_delete(range); goto out; } vy_index_add_range(index, range); arg->range = range; break; case VY_LOG_INSERT_SLICE: assert(range != NULL); assert(range->id == record->range_id); mh_int_t k = mh_i64ptr_find(run_hash, record->run_id, NULL); assert(k != mh_end(run_hash)); run = mh_i64ptr_node(run_hash, k)->val; slice = vy_slice_new(record->slice_id, run, begin, end, index->cmp_def); if (slice == NULL) goto out; vy_range_add_slice(range, slice); break; default: unreachable(); } success = true; out: if (begin != NULL) tuple_unref(begin); if (end != NULL) tuple_unref(end); return success ? 0 : -1; } int vy_index_recover(struct vy_index *index, struct vy_recovery *recovery, struct vy_run_env *run_env, int64_t lsn, bool is_checkpoint_recovery, bool force_recovery) { assert(index->range_count == 0); struct vy_index_recovery_cb_arg arg = { .index = index, .range = NULL, .run_env = run_env, .run_hash = NULL, .force_recovery = force_recovery, }; arg.run_hash = mh_i64ptr_new(); if (arg.run_hash == NULL) { diag_set(OutOfMemory, 0, "mh_i64ptr_new", "mh_i64ptr_t"); return -1; } /* * Backward compatibility fixup: historically, we used * box.info.signature for LSN of index creation, which * lags behind the LSN of the record that created the * index by 1. So for legacy indexes use the LSN from * index options. */ if (index->opts.lsn != 0) lsn = index->opts.lsn; int rc = vy_recovery_load_index(recovery, index->space_id, index->id, lsn, is_checkpoint_recovery, vy_index_recovery_cb, &arg); mh_int_t k; mh_foreach(arg.run_hash, k) { struct vy_run *run = mh_i64ptr_node(arg.run_hash, k)->val; if (run->refs > 1) vy_index_add_run(index, run); if (run->refs == 1 && rc == 0) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Unused run %lld in index %lld", (long long)run->id, (long long)index->commit_lsn)); rc = -1; /* * Continue the loop to unreference * all runs in the hash. */ } /* Drop the reference held by the hash. */ vy_run_unref(run); } mh_i64ptr_delete(arg.run_hash); if (rc != 0) { /* Recovery callback failed. */ return -1; } if (index->commit_lsn < 0) { /* Index was not found in the metadata log. */ if (is_checkpoint_recovery) { /* * All indexes created from snapshot rows must * be present in vylog, because snapshot can * only succeed if vylog has been successfully * flushed. */ diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Index %lld not found", (long long)index->commit_lsn)); return -1; } /* * If we failed to log index creation before restart, * we won't find it in the log on recovery. This is * OK as the index doesn't have any runs in this case. * We will retry to log index in vy_index_commit_create(). * For now, just create the initial range. */ return vy_index_init_range_tree(index); } if (index->is_dropped) { /* * Initial range is not stored in the metadata log * for dropped indexes, but we need it for recovery. */ return vy_index_init_range_tree(index); } /* * Account ranges to the index and check that the range tree * does not have holes or overlaps. */ struct vy_range *range, *prev = NULL; for (range = vy_range_tree_first(index->tree); range != NULL; prev = range, range = vy_range_tree_next(index->tree, range)) { if (prev == NULL && range->begin != NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Range %lld is leftmost but " "starts with a finite key", (long long)range->id)); return -1; } int cmp = 0; if (prev != NULL && (prev->end == NULL || range->begin == NULL || (cmp = vy_key_compare(prev->end, range->begin, index->cmp_def)) != 0)) { const char *errmsg = cmp > 0 ? "Nearby ranges %lld and %lld overlap" : "Keys between ranges %lld and %lld not spanned"; diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf(errmsg, (long long)prev->id, (long long)range->id)); return -1; } vy_index_acct_range(index, range); } if (prev == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Index %lld has empty range tree", (long long)index->commit_lsn)); return -1; } if (prev->end != NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Range %lld is rightmost but " "ends with a finite key", (long long)prev->id)); return -1; } return 0; } int64_t vy_index_generation(struct vy_index *index) { struct vy_mem *oldest = rlist_empty(&index->sealed) ? index->mem : rlist_last_entry(&index->sealed, struct vy_mem, in_sealed); return oldest->generation; } int vy_index_compact_priority(struct vy_index *index) { struct heap_node *n = vy_range_heap_top(&index->range_heap); if (n == NULL) return 0; struct vy_range *range = container_of(n, struct vy_range, heap_node); return range->compact_priority; } void vy_index_add_run(struct vy_index *index, struct vy_run *run) { assert(rlist_empty(&run->in_index)); rlist_add_entry(&index->runs, run, in_index); index->run_count++; vy_disk_stmt_counter_add(&index->stat.disk.count, &run->count); index->bloom_size += vy_run_bloom_size(run); index->page_index_size += run->page_index_size; index->env->bloom_size += vy_run_bloom_size(run); index->env->page_index_size += run->page_index_size; } void vy_index_remove_run(struct vy_index *index, struct vy_run *run) { assert(index->run_count > 0); assert(!rlist_empty(&run->in_index)); rlist_del_entry(run, in_index); index->run_count--; vy_disk_stmt_counter_sub(&index->stat.disk.count, &run->count); index->bloom_size -= vy_run_bloom_size(run); index->page_index_size -= run->page_index_size; index->env->bloom_size -= vy_run_bloom_size(run); index->env->page_index_size -= run->page_index_size; } void vy_index_add_range(struct vy_index *index, struct vy_range *range) { assert(range->heap_node.pos == UINT32_MAX); vy_range_heap_insert(&index->range_heap, &range->heap_node); vy_range_tree_insert(index->tree, range); index->range_count++; } void vy_index_remove_range(struct vy_index *index, struct vy_range *range) { assert(range->heap_node.pos != UINT32_MAX); vy_range_heap_delete(&index->range_heap, &range->heap_node); vy_range_tree_remove(index->tree, range); index->range_count--; } void vy_index_acct_range(struct vy_index *index, struct vy_range *range) { histogram_collect(index->run_hist, range->slice_count); } void vy_index_unacct_range(struct vy_index *index, struct vy_range *range) { histogram_discard(index->run_hist, range->slice_count); } int vy_index_rotate_mem(struct vy_index *index) { struct vy_mem *mem; assert(index->mem != NULL); mem = vy_mem_new(index->mem->env, *index->env->p_generation, index->cmp_def, index->mem_format, index->mem_format_with_colmask, index->upsert_format, schema_version); if (mem == NULL) return -1; rlist_add_entry(&index->sealed, index->mem, in_sealed); index->mem = mem; index->mem_list_version++; return 0; } void vy_index_delete_mem(struct vy_index *index, struct vy_mem *mem) { assert(!rlist_empty(&mem->in_sealed)); rlist_del_entry(mem, in_sealed); vy_stmt_counter_sub(&index->stat.memory.count, &mem->count); vy_mem_delete(mem); index->mem_list_version++; } int vy_index_set(struct vy_index *index, struct vy_mem *mem, const struct tuple *stmt, const struct tuple **region_stmt) { assert(vy_stmt_is_refable(stmt)); assert(*region_stmt == NULL || !vy_stmt_is_refable(*region_stmt)); /* Allocate region_stmt on demand. */ if (*region_stmt == NULL) { *region_stmt = vy_stmt_dup_lsregion(stmt, &mem->env->allocator, mem->generation); if (*region_stmt == NULL) return -1; } /* We can't free region_stmt below, so let's add it to the stats */ index->stat.memory.count.bytes += tuple_size(stmt); uint32_t format_id = stmt->format_id; if (vy_stmt_type(*region_stmt) != IPROTO_UPSERT) { /* Abort transaction if format was changed by DDL */ if (format_id != tuple_format_id(mem->format_with_colmask) && format_id != tuple_format_id(mem->format)) { diag_set(ClientError, ER_TRANSACTION_CONFLICT); return -1; } return vy_mem_insert(mem, *region_stmt); } else { /* Abort transaction if format was changed by DDL */ if (format_id != tuple_format_id(mem->upsert_format)) { diag_set(ClientError, ER_TRANSACTION_CONFLICT); return -1; } return vy_mem_insert_upsert(mem, *region_stmt); } } /** * Calculate and record the number of sequential upserts, squash * immediately or schedule upsert process if needed. * Additional handler used in vy_index_commit_stmt() for UPSERT * statements. * * @param index Index the statement was committed to. * @param mem In-memory tree where the statement was saved. * @param stmt UPSERT statement to squash. */ static void vy_index_commit_upsert(struct vy_index *index, struct vy_mem *mem, const struct tuple *stmt) { assert(vy_stmt_type(stmt) == IPROTO_UPSERT); assert(vy_stmt_lsn(stmt) < MAX_LSN); /* * UPSERT is enabled only for the spaces with the single * index. */ assert(index->id == 0); const struct tuple *older; int64_t lsn = vy_stmt_lsn(stmt); uint8_t n_upserts = vy_stmt_n_upserts(stmt); /* * If there are a lot of successive upserts for the same key, * select might take too long to squash them all. So once the * number of upserts exceeds a certain threshold, we schedule * a fiber to merge them and insert the resulting statement * after the latest upsert. */ if (n_upserts == VY_UPSERT_INF) { /* * If UPSERT has n_upserts > VY_UPSERT_THRESHOLD, * it means the mem has older UPSERTs for the same * key which already are beeing processed in the * squashing task. At the end, the squashing task * will merge its result with this UPSERT * automatically. */ return; } if (n_upserts == VY_UPSERT_THRESHOLD) { /* * Start single squashing task per one-mem and * one-key continous UPSERTs sequence. */ #ifndef NDEBUG older = vy_mem_older_lsn(mem, stmt); assert(older != NULL && vy_stmt_type(older) == IPROTO_UPSERT && vy_stmt_n_upserts(older) == VY_UPSERT_THRESHOLD - 1); #endif if (index->env->upsert_thresh_cb == NULL) { /* Squash callback is not installed. */ return; } struct tuple *dup = vy_stmt_dup(stmt, index->upsert_format); if (dup != NULL) { index->env->upsert_thresh_cb(index, dup, index->env->upsert_thresh_arg); tuple_unref(dup); } /* * Ignore dup == NULL, because the optimization is * good, but is not necessary. */ return; } /* * If there are no other mems and runs and n_upserts == 0, * then we can turn the UPSERT into the REPLACE. */ if (n_upserts == 0 && index->stat.memory.count.rows == index->mem->count.rows && index->run_count == 0) { older = vy_mem_older_lsn(mem, stmt); assert(older == NULL || vy_stmt_type(older) != IPROTO_UPSERT); struct tuple *upserted = vy_apply_upsert(stmt, older, index->cmp_def, index->mem_format, index->upsert_format, false); index->stat.upsert.applied++; if (upserted == NULL) { /* OOM */ diag_clear(diag_get()); return; } int64_t upserted_lsn = vy_stmt_lsn(upserted); if (upserted_lsn != lsn) { /** * This could only happen if the upsert completely * failed and the old tuple was returned. * In this case we shouldn't insert the same replace * again. */ assert(older == NULL || upserted_lsn == vy_stmt_lsn(older)); tuple_unref(upserted); return; } assert(older == NULL || upserted_lsn != vy_stmt_lsn(older)); assert(vy_stmt_type(upserted) == IPROTO_REPLACE); const struct tuple *region_stmt = vy_stmt_dup_lsregion(upserted, &mem->env->allocator, mem->generation); if (region_stmt == NULL) { /* OOM */ tuple_unref(upserted); diag_clear(diag_get()); return; } int rc = vy_index_set(index, mem, upserted, ®ion_stmt); /** * Since we have already allocated mem statement and * now we replacing one statement with another, the * vy_index_set() cannot fail. */ assert(rc == 0); (void)rc; tuple_unref(upserted); vy_mem_commit_stmt(mem, region_stmt); index->stat.upsert.squashed++; } } void vy_index_commit_stmt(struct vy_index *index, struct vy_mem *mem, const struct tuple *stmt) { vy_mem_commit_stmt(mem, stmt); index->stat.memory.count.rows++; if (vy_stmt_type(stmt) == IPROTO_UPSERT) vy_index_commit_upsert(index, mem, stmt); vy_stmt_counter_acct_tuple(&index->stat.put, stmt); /* Invalidate cache element. */ vy_cache_on_write(&index->cache, stmt, NULL); } void vy_index_rollback_stmt(struct vy_index *index, struct vy_mem *mem, const struct tuple *stmt) { vy_mem_rollback_stmt(mem, stmt); /* Invalidate cache element. */ vy_cache_on_write(&index->cache, stmt, NULL); } bool vy_index_split_range(struct vy_index *index, struct vy_range *range) { struct tuple_format *key_format = index->env->key_format; const char *split_key_raw; if (!vy_range_needs_split(range, &index->opts, &split_key_raw)) return false; /* Split a range in two parts. */ const int n_parts = 2; /* * Determine new ranges' boundaries. */ struct tuple *split_key = vy_key_from_msgpack(key_format, split_key_raw); if (split_key == NULL) goto fail; struct tuple *keys[3]; keys[0] = range->begin; keys[1] = split_key; keys[2] = range->end; /* * Allocate new ranges and create slices of * the old range's runs for them. */ struct vy_slice *slice, *new_slice; struct vy_range *part, *parts[2] = {NULL, }; for (int i = 0; i < n_parts; i++) { part = vy_range_new(vy_log_next_id(), keys[i], keys[i + 1], index->cmp_def); if (part == NULL) goto fail; parts[i] = part; /* * vy_range_add_slice() adds a slice to the list head, * so to preserve the order of the slices list, we have * to iterate backward. */ rlist_foreach_entry_reverse(slice, &range->slices, in_range) { if (vy_slice_cut(slice, vy_log_next_id(), part->begin, part->end, index->cmp_def, &new_slice) != 0) goto fail; if (new_slice != NULL) vy_range_add_slice(part, new_slice); } part->compact_priority = range->compact_priority; } /* * Log change in metadata. */ vy_log_tx_begin(); rlist_foreach_entry(slice, &range->slices, in_range) vy_log_delete_slice(slice->id); vy_log_delete_range(range->id); for (int i = 0; i < n_parts; i++) { part = parts[i]; vy_log_insert_range(index->commit_lsn, part->id, tuple_data_or_null(part->begin), tuple_data_or_null(part->end)); rlist_foreach_entry(slice, &part->slices, in_range) vy_log_insert_slice(part->id, slice->run->id, slice->id, tuple_data_or_null(slice->begin), tuple_data_or_null(slice->end)); } if (vy_log_tx_commit() < 0) goto fail; /* * Replace the old range in the index. */ vy_index_unacct_range(index, range); vy_index_remove_range(index, range); for (int i = 0; i < n_parts; i++) { part = parts[i]; vy_index_add_range(index, part); vy_index_acct_range(index, part); } index->range_tree_version++; say_info("%s: split range %s by key %s", vy_index_name(index), vy_range_str(range), tuple_str(split_key)); rlist_foreach_entry(slice, &range->slices, in_range) vy_slice_wait_pinned(slice); vy_range_delete(range); tuple_unref(split_key); return true; fail: for (int i = 0; i < n_parts; i++) { if (parts[i] != NULL) vy_range_delete(parts[i]); } if (split_key != NULL) tuple_unref(split_key); diag_log(); say_error("%s: failed to split range %s", vy_index_name(index), vy_range_str(range)); return false; } bool vy_index_coalesce_range(struct vy_index *index, struct vy_range *range) { struct vy_range *first, *last; if (!vy_range_needs_coalesce(range, index->tree, &index->opts, &first, &last)) return false; struct vy_range *result = vy_range_new(vy_log_next_id(), first->begin, last->end, index->cmp_def); if (result == NULL) goto fail_range; struct vy_range *it; struct vy_range *end = vy_range_tree_next(index->tree, last); /* * Log change in metadata. */ vy_log_tx_begin(); vy_log_insert_range(index->commit_lsn, result->id, tuple_data_or_null(result->begin), tuple_data_or_null(result->end)); for (it = first; it != end; it = vy_range_tree_next(index->tree, it)) { struct vy_slice *slice; rlist_foreach_entry(slice, &it->slices, in_range) vy_log_delete_slice(slice->id); vy_log_delete_range(it->id); rlist_foreach_entry(slice, &it->slices, in_range) { vy_log_insert_slice(result->id, slice->run->id, slice->id, tuple_data_or_null(slice->begin), tuple_data_or_null(slice->end)); } } if (vy_log_tx_commit() < 0) goto fail_commit; /* * Move run slices of the coalesced ranges to the * resulting range and delete the former. */ it = first; while (it != end) { struct vy_range *next = vy_range_tree_next(index->tree, it); vy_index_unacct_range(index, it); vy_index_remove_range(index, it); rlist_splice(&result->slices, &it->slices); result->slice_count += it->slice_count; vy_disk_stmt_counter_add(&result->count, &it->count); vy_range_delete(it); it = next; } /* * Coalescing increases read amplification and breaks the log * structured layout of the run list, so, although we could * leave the resulting range as it is, we'd better compact it * as soon as we can. */ result->compact_priority = result->slice_count; vy_index_acct_range(index, result); vy_index_add_range(index, result); index->range_tree_version++; say_info("%s: coalesced ranges %s", vy_index_name(index), vy_range_str(result)); return true; fail_commit: vy_range_delete(result); fail_range: diag_log(); say_error("%s: failed to coalesce range %s", vy_index_name(index), vy_range_str(range)); return false; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_log.h0000664000000000000000000004141113306565107017244 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_LOG_H #define INCLUDES_TARANTOOL_BOX_VY_LOG_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "salad/stailq.h" /* * Data stored in vinyl is organized in ranges and runs. * Runs correspond to data files written to disk, while * ranges are used to group runs together. Sometimes, we * need to manipulate several ranges or runs atomically, * e.g. on compaction several runs are replaced with a * single one. To reflect events like this on disk and be * able to recover to a consistent state after restart, we * need to log all metadata changes. This module implements * the infrastructure necessary for such logging as well * as recovery. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct xlog; struct vclock; struct key_def; struct key_part_def; struct vy_recovery; /** Type of a metadata log record. */ enum vy_log_record_type { /** * Create a new vinyl index. * Requires vy_log_record::index_lsn, index_id, space_id, * key_def (with primary key parts). */ VY_LOG_CREATE_INDEX = 0, /** * Drop an index. * Requires vy_log_record::index_lsn. */ VY_LOG_DROP_INDEX = 1, /** * Insert a new range into a vinyl index. * Requires vy_log_record::index_lsn, range_id, begin, end. */ VY_LOG_INSERT_RANGE = 2, /** * Delete a vinyl range and all its runs. * Requires vy_log_record::range_id. */ VY_LOG_DELETE_RANGE = 3, /** * Prepare a vinyl run file. * Requires vy_log_record::index_lsn, run_id. * * Record of this type is written before creating a run file. * It is needed to keep track of unfinished due to errors run * files so that we could remove them after recovery. */ VY_LOG_PREPARE_RUN = 4, /** * Commit a vinyl run file creation. * Requires vy_log_record::index_lsn, run_id, dump_lsn. * * Written after a run file was successfully created. */ VY_LOG_CREATE_RUN = 5, /** * Drop a vinyl run. * Requires vy_log_record::run_id, gc_lsn. * * A record of this type indicates that the run is not in use * any more and its files can be safely removed. When the log * is recovered from, this only marks the run as deleted, * because we still need it for garbage collection. A run is * actually freed by VY_LOG_FORGET_RUN. Runs that were * deleted, but not "forgotten" are not expunged from the log * on rotation. */ VY_LOG_DROP_RUN = 6, /** * Forget a vinyl run. * Requires vy_log_record::run_id. * * A record of this type is written after all files left from * an unused run have been successfully removed. On recovery, * this results in freeing all structures associated with the * run. Information about "forgotten" runs is not included in * the new log on rotation. */ VY_LOG_FORGET_RUN = 7, /** * Insert a run slice into a range. * Requires vy_log_record::range_id, run_id, slice_id, begin, end. */ VY_LOG_INSERT_SLICE = 8, /** * Delete a run slice. * Requires vy_log_record::slice_id. */ VY_LOG_DELETE_SLICE = 9, /** * Update LSN of the last index dump. * Requires vy_log_record::index_lsn, dump_lsn. */ VY_LOG_DUMP_INDEX = 10, /** * We don't split vylog into snapshot and log - all records * are written to the same file. Since we need to load a * consistent view from a given checkpoint (for replication * and backup), we write a marker after the last record * corresponding to the snapshot. The following record * represents the marker. * * See also: @only_checkpoint argument of vy_recovery_new(). */ VY_LOG_SNAPSHOT = 11, /** * Update truncate count of a vinyl index. * Requires vy_log_record::index_lsn, truncate_count. */ VY_LOG_TRUNCATE_INDEX = 12, vy_log_record_type_MAX }; /** Record in the metadata log. */ struct vy_log_record { /** Type of the record. */ enum vy_log_record_type type; /** * LSN from the time of index creation. * Used to identify indexes in vylog. */ int64_t index_lsn; /** Unique ID of the vinyl range. */ int64_t range_id; /** Unique ID of the vinyl run. */ int64_t run_id; /** Unique ID of the run slice. */ int64_t slice_id; /** * For VY_LOG_CREATE_RUN record: hint that the run * is dropped, i.e. there is a VY_LOG_DROP_RUN record * following this one. */ bool is_dropped; /** * Msgpack key for start of the range/slice. * NULL if the range/slice starts from -inf. */ const char *begin; /** * Msgpack key for end of the range/slice. * NULL if the range/slice ends with +inf. */ const char *end; /** Ordinal index number in the space. */ uint32_t index_id; /** Space ID. */ uint32_t space_id; /** Index key definition, as defined by the user. */ const struct key_def *key_def; /** Array of key part definitions. */ struct key_part_def *key_parts; /** Number of key parts. */ uint32_t key_part_count; /** Max LSN stored on disk. */ int64_t dump_lsn; /** * For deleted runs: LSN of the last checkpoint * that uses this run. */ int64_t gc_lsn; /** Index truncate count. */ int64_t truncate_count; /** Link in vy_log::tx. */ struct stailq_entry in_tx; }; /** * Initialize the metadata log. * @dir is the directory where log files are stored. */ void vy_log_init(const char *dir); /** * Destroy the metadata log. */ void vy_log_free(void); /** * Open current vy_log file. */ int vy_log_open(struct xlog *xlog); /** * Rotate the metadata log. This function creates a new * xlog file in the log directory having vclock @vclock * and writes records required to recover active indexes. * The goal of log rotation is to compact the log file by * discarding records cancelling each other and records left * from dropped indexes. * * Returns 0 on success, -1 on failure. */ int vy_log_rotate(const struct vclock *vclock); /** * Remove metadata log files that are not needed to recover * from the snapshot with the given signature or newer. */ void vy_log_collect_garbage(int64_t signature); /** * Return the signature of the newest vylog to the time. */ int64_t vy_log_signature(void); /** * Return the path to the log file that needs to be backed up * in order to recover to checkpoint @vclock. */ const char * vy_log_backup_path(struct vclock *vclock); /** Allocate a unique ID for a vinyl object. */ int64_t vy_log_next_id(void); /** * Begin a transaction in the metadata log. * * To commit the transaction, call vy_log_tx_commit() or * vy_log_tx_try_commit(). */ void vy_log_tx_begin(void); /** * Commit a transaction started with vy_log_tx_begin(). * * This function flushes all buffered records to disk. If it fails, * all records of the current transaction are discarded. * * See also vy_log_tx_try_commit(). * * Returns 0 on success, -1 on failure. */ int vy_log_tx_commit(void); /** * Try to commit a transaction started with vy_log_tx_begin(). * * Similarly to vy_log_tx_commit(), this function tries to write all * buffered records to disk, but in case of failure pending records * are not expunged from the buffer, so that the next transaction * will retry to flush them. */ void vy_log_tx_try_commit(void); /** * Write a record to the metadata log. * * This function simply appends the record to the internal buffer. * It must be called inside a vy_log_tx_begin/commit block, and it * is up to vy_log_tx_commit() to actually write the record to disk. * * Returns 0 on success, -1 on failure. */ void vy_log_write(const struct vy_log_record *record); /** * Bootstrap vy_log. */ int vy_log_bootstrap(void); /** * Prepare the metadata log for recovery from the file having * vclock @vclock and return the recovery context. * * After this function is called, vinyl indexes may be recovered from * the log using vy_recovery methods. When recovery is complete, * one must call vy_log_end_recovery(). After that the recovery context * may be deleted with vy_recovery_delete(). * * Returns NULL on failure. */ struct vy_recovery * vy_log_begin_recovery(const struct vclock *vclock); /** * Finish recovery from the metadata log. * * This function destroys the recovery context that was created by * vy_log_begin_recovery(), opens the log file for appending, and * flushes all records written to the log buffer during recovery. * * Return 0 on success, -1 on failure. */ int vy_log_end_recovery(void); /** * Create a recovery context from the metadata log created * by checkpoint with the given signature. * * If @only_checkpoint is set, do not load records appended to * the log after checkpoint (i.e. get a consistent view of * Vinyl at the time of the checkpoint). * * Returns NULL on failure. */ struct vy_recovery * vy_recovery_new(int64_t signature, bool only_checkpoint); /** * Free a recovery context created by vy_recovery_new(). */ void vy_recovery_delete(struct vy_recovery *recovery); typedef int (*vy_recovery_cb)(const struct vy_log_record *record, void *arg); /** * Iterate over all objects stored in a recovery context. * * This function invokes callback @cb for each object (index, run, etc) * stored in the given recovery context. The callback is passed a record * used to log the object and optional argument @cb_arg. If the callback * returns a value different from 0, iteration stops and -1 is returned, * otherwise the function returns 0. * * To ease the work done by the callback, records corresponding to * slices of a range always go right after the range, in the * chronological order, while an index's runs go after the index * and before its ranges. */ int vy_recovery_iterate(struct vy_recovery *recovery, vy_recovery_cb cb, void *cb_arg); /** * Load an index from a recovery context. * * Call @cb for each object related to the index. Break the loop and * return -1 if @cb returned a non-zero value, otherwise return 0. * Objects are loaded in the same order as by vy_recovery_iterate(). * * Note, this function returns 0 if there's no index with the requested * id in the recovery context. In this case, @cb isn't called at all. * * The @is_checkpoint_recovery flag indicates that the row that created * the index was loaded from a snapshot, in which case @index_lsn is * the snapshot signature. Otherwise @index_lsn is the LSN of the WAL * row that created the index. * * The index is looked up by @space_id and @index_id while @index_lsn * is used to discern different incarnations of the same index as * follows. Let @record denote the vylog record corresponding to the * last incarnation of the index. Then * * - If @is_checkpoint_recovery is set and @index_lsn >= @record->index_lsn, * the last index incarnation was created before the snapshot and we * need to load it right now. * * - If @is_checkpoint_recovery is set and @index_lsn < @record->index_lsn, * the last index incarnation was created after the snapshot, i.e. * the index loaded now is going to be dropped so load a dummy. * * - If @is_checkpoint_recovery is unset and @index_lsn < @record->index_lsn, * the last index incarnation is created further in WAL, load a dummy. * * - If @is_checkpoint_recovery is unset and @index_lsn == @record->index_lsn, * load the last index incarnation. * * - If @is_checkpoint_recovery is unset and @index_lsn > @record->index_lsn, * it seems we failed to log index creation before restart. In this * case don't do anything. The caller is supposed to retry logging. */ int vy_recovery_load_index(struct vy_recovery *recovery, uint32_t space_id, uint32_t index_id, int64_t index_lsn, bool is_checkpoint_recovery, vy_recovery_cb cb, void *cb_arg); /** * Initialize a log record with default values. * Note, a key is only written to the log file * if its value is different from default. */ static inline void vy_log_record_init(struct vy_log_record *record) { memset(record, 0, sizeof(*record)); } /** Helper to log a vinyl index creation. */ static inline void vy_log_create_index(int64_t index_lsn, uint32_t index_id, uint32_t space_id, const struct key_def *key_def) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_CREATE_INDEX; record.index_lsn = index_lsn; record.index_id = index_id; record.space_id = space_id; record.key_def = key_def; vy_log_write(&record); } /** Helper to log a vinyl index drop. */ static inline void vy_log_drop_index(int64_t index_lsn) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_DROP_INDEX; record.index_lsn = index_lsn; vy_log_write(&record); } /** Helper to log a vinyl range insertion. */ static inline void vy_log_insert_range(int64_t index_lsn, int64_t range_id, const char *begin, const char *end) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_INSERT_RANGE; record.index_lsn = index_lsn; record.range_id = range_id; record.begin = begin; record.end = end; vy_log_write(&record); } /** Helper to log a vinyl range deletion. */ static inline void vy_log_delete_range(int64_t range_id) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_DELETE_RANGE; record.range_id = range_id; vy_log_write(&record); } /** Helper to log a vinyl run file creation. */ static inline void vy_log_prepare_run(int64_t index_lsn, int64_t run_id) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_PREPARE_RUN; record.index_lsn = index_lsn; record.run_id = run_id; vy_log_write(&record); } /** Helper to log a vinyl run creation. */ static inline void vy_log_create_run(int64_t index_lsn, int64_t run_id, int64_t dump_lsn) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_CREATE_RUN; record.index_lsn = index_lsn; record.run_id = run_id; record.dump_lsn = dump_lsn; vy_log_write(&record); } /** Helper to log a run deletion. */ static inline void vy_log_drop_run(int64_t run_id, int64_t gc_lsn) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_DROP_RUN; record.run_id = run_id; record.gc_lsn = gc_lsn; vy_log_write(&record); } /** Helper to log a run cleanup. */ static inline void vy_log_forget_run(int64_t run_id) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_FORGET_RUN; record.run_id = run_id; vy_log_write(&record); } /** Helper to log creation of a run slice. */ static inline void vy_log_insert_slice(int64_t range_id, int64_t run_id, int64_t slice_id, const char *begin, const char *end) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_INSERT_SLICE; record.range_id = range_id; record.run_id = run_id; record.slice_id = slice_id; record.begin = begin; record.end = end; vy_log_write(&record); } /** Helper to log deletion of a run slice. */ static inline void vy_log_delete_slice(int64_t slice_id) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_DELETE_SLICE; record.slice_id = slice_id; vy_log_write(&record); } /** Helper to log index dump. */ static inline void vy_log_dump_index(int64_t index_lsn, int64_t dump_lsn) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_DUMP_INDEX; record.index_lsn = index_lsn; record.dump_lsn = dump_lsn; vy_log_write(&record); } /** Helper to log index truncation. */ static inline void vy_log_truncate_index(int64_t index_lsn, int64_t truncate_count) { struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_TRUNCATE_INDEX; record.index_lsn = index_lsn; record.truncate_count = truncate_count; vy_log_write(&record); } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_LOG_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/space.c0000664000000000000000000003261213306565107017036 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "space.h" #include #include #include #include "tuple_format.h" #include "trigger.h" #include "user.h" #include "session.h" #include "txn.h" #include "tuple.h" #include "tuple_update.h" #include "request.h" #include "xrow.h" #include "iproto_constants.h" int access_check_space(struct space *space, user_access_t access) { struct credentials *cr = effective_user(); /* Any space access also requires global USAGE privilege. */ access |= PRIV_U; /* * If a user has a global permission, clear the respective * privilege from the list of privileges required * to execute the request. * No special check for ADMIN user is necessary * since ADMIN has universal access. */ user_access_t space_access = access & ~cr->universal_access; if (space_access && /* Check for missing Usage access, ignore owner rights. */ (space_access & PRIV_U || /* Check for missing specific access, respect owner rights. */ (space->def->uid != cr->uid && space_access & ~space->access[cr->auth_token].effective))) { /* * Report access violation. Throw "no such user" * error if there is no user with this id. * It is possible that the user was dropped * from a different connection. */ struct user *user = user_find(cr->uid); if (user != NULL) { if (!(cr->universal_access & PRIV_U)) { diag_set(AccessDeniedError, priv_name(PRIV_U), schema_object_name(SC_UNIVERSE), "", user->def->name); } else { diag_set(AccessDeniedError, priv_name(access), schema_object_name(SC_SPACE), space->def->name, user->def->name); } } return -1; } return 0; } void space_fill_index_map(struct space *space) { uint32_t index_count = 0; for (uint32_t j = 0; j <= space->index_id_max; j++) { struct index *index = space->index_map[j]; if (index) { assert(index_count < space->index_count); space->index[index_count++] = index; } } } int space_create(struct space *space, struct engine *engine, const struct space_vtab *vtab, struct space_def *def, struct rlist *key_list, struct tuple_format *format) { if (!rlist_empty(key_list)) { /* Primary key must go first. */ struct index_def *pk = rlist_first_entry(key_list, struct index_def, link); assert(pk->iid == 0); (void)pk; } uint32_t index_id_max = 0; uint32_t index_count = 0; struct index_def *index_def; rlist_foreach_entry(index_def, key_list, link) { index_count++; index_id_max = MAX(index_id_max, index_def->iid); } memset(space, 0, sizeof(*space)); space->vtab = vtab; space->engine = engine; space->index_count = index_count; space->index_id_max = index_id_max; rlist_create(&space->before_replace); rlist_create(&space->on_replace); rlist_create(&space->on_stmt_begin); space->run_triggers = true; space->format = format; if (format != NULL) tuple_format_ref(format); space->def = space_def_dup(def); if (space->def == NULL) goto fail; /* Create indexes and fill the index map. */ space->index_map = (struct index **) calloc(index_count + index_id_max + 1, sizeof(struct index *)); if (space->index_map == NULL) { diag_set(OutOfMemory, (index_count + index_id_max + 1) * sizeof(struct index *), "malloc", "index_map"); goto fail; } space->index = space->index_map + index_id_max + 1; rlist_foreach_entry(index_def, key_list, link) { struct index *index = space_create_index(space, index_def); if (index == NULL) goto fail_free_indexes; space->index_map[index_def->iid] = index; } space_fill_index_map(space); return 0; fail_free_indexes: for (uint32_t i = 0; i <= index_id_max; i++) { struct index *index = space->index_map[i]; if (index != NULL) index_delete(index); } fail: free(space->index_map); if (space->def != NULL) space_def_delete(space->def); if (space->format != NULL) tuple_format_unref(space->format); return -1; } struct space * space_new(struct space_def *def, struct rlist *key_list) { struct engine *engine = engine_find(def->engine_name); if (engine == NULL) return NULL; return engine_create_space(engine, def, key_list); } void space_delete(struct space *space) { for (uint32_t j = 0; j <= space->index_id_max; j++) { struct index *index = space->index_map[j]; if (index != NULL) index_delete(index); } free(space->index_map); if (space->format != NULL) tuple_format_unref(space->format); trigger_destroy(&space->before_replace); trigger_destroy(&space->on_replace); trigger_destroy(&space->on_stmt_begin); space_def_delete(space->def); space->vtab->destroy(space); } /** Do nothing if the space is already recovered. */ void space_noop(struct space *space) { (void)space; } void space_dump_def(const struct space *space, struct rlist *key_list) { rlist_create(key_list); /** Ensure the primary key is added first. */ for (unsigned j = 0; j < space->index_count; j++) rlist_add_tail_entry(key_list, space->index[j]->def, link); } struct key_def * space_index_key_def(struct space *space, uint32_t id) { if (id <= space->index_id_max && space->index_map[id]) return space->index_map[id]->def->key_def; return NULL; } void space_swap_index(struct space *lhs, struct space *rhs, uint32_t lhs_id, uint32_t rhs_id) { struct index *tmp = lhs->index_map[lhs_id]; lhs->index_map[lhs_id] = rhs->index_map[rhs_id]; rhs->index_map[rhs_id] = tmp; } void space_run_triggers(struct space *space, bool yesno) { space->run_triggers = yesno; } size_t space_bsize(struct space *space) { return space->vtab->bsize(space); } struct index_def * space_index_def(struct space *space, int n) { return space->index[n]->def; } const char * index_name_by_id(struct space *space, uint32_t id) { struct index *index = space_index(space, id); if (index != NULL) return index->def->name; return NULL; } /** * Run BEFORE triggers registered for a space. If a trigger * changes the current statement, this function updates the * request accordingly. */ static int space_before_replace(struct space *space, struct txn *txn, struct request *request) { if (space->index_count == 0) { /* Empty space, nothing to do. */ return 0; } struct region *gc = &fiber()->gc; enum iproto_type type = request->type; struct index *pk = space->index[0]; const char *key; uint32_t part_count; struct index *index; /* * Lookup the old tuple. */ if (type == IPROTO_UPDATE || type == IPROTO_DELETE) { index = index_find_unique(space, request->index_id); if (index == NULL) return -1; key = request->key; part_count = mp_decode_array(&key); if (exact_key_validate(index->def->key_def, key, part_count) != 0) return -1; } else if (type == IPROTO_INSERT || type == IPROTO_REPLACE || type == IPROTO_UPSERT) { index = pk; key = tuple_extract_key_raw(request->tuple, request->tuple_end, index->def->key_def, NULL); if (key == NULL) return -1; part_count = mp_decode_array(&key); } else { /* Unknown request type, nothing to do. */ return 0; } struct tuple *old_tuple; if (index_get(index, key, part_count, &old_tuple) != 0) return -1; /* * Create the new tuple. */ uint32_t new_size, old_size; const char *new_data, *new_data_end; const char *old_data, *old_data_end; switch (request->type) { case IPROTO_INSERT: case IPROTO_REPLACE: new_data = request->tuple; new_data_end = request->tuple_end; break; case IPROTO_UPDATE: if (old_tuple == NULL) { /* Nothing to update. */ return 0; } old_data = tuple_data_range(old_tuple, &old_size); old_data_end = old_data + old_size; new_data = tuple_update_execute(region_aligned_alloc_cb, gc, request->tuple, request->tuple_end, old_data, old_data_end, &new_size, request->index_base, NULL); if (new_data == NULL) return -1; new_data_end = new_data + new_size; break; case IPROTO_DELETE: if (old_tuple == NULL) { /* Nothing to delete. */ return 0; } new_data = new_data_end = NULL; break; case IPROTO_UPSERT: if (old_tuple == NULL) { /* * Turn UPSERT into INSERT, but still check * provided operations. */ new_data = request->tuple; new_data_end = request->tuple_end; if (tuple_update_check_ops(region_aligned_alloc_cb, gc, request->ops, request->ops_end, request->index_base) != 0) return -1; break; } old_data = tuple_data_range(old_tuple, &old_size); old_data_end = old_data + old_size; new_data = tuple_upsert_execute(region_aligned_alloc_cb, gc, request->ops, request->ops_end, old_data, old_data_end, &new_size, request->index_base, false, NULL); new_data_end = new_data + new_size; break; default: unreachable(); } struct tuple *new_tuple = NULL; if (new_data != NULL) { new_tuple = tuple_new(tuple_format_runtime, new_data, new_data_end); if (new_tuple == NULL) return -1; tuple_ref(new_tuple); } assert(old_tuple != NULL || new_tuple != NULL); /* * Execute all registered BEFORE triggers. * * We pass the old and new tuples to the triggers in * txn_current_stmt(), which should be empty, because * the engine method (execute_replace or similar) has * not been called yet. Triggers may update new_tuple * in place so the next trigger sees the result of the * previous one. After we are done, we clear old_tuple * and new_tuple in txn_current_stmt() to be set by * the engine. */ struct txn_stmt *stmt = txn_current_stmt(txn); assert(stmt->old_tuple == NULL && stmt->new_tuple == NULL); stmt->old_tuple = old_tuple; stmt->new_tuple = new_tuple; int rc = trigger_run(&space->before_replace, txn); /* * BEFORE riggers cannot change the old tuple, * but they may replace the new tuple. */ bool request_changed = (stmt->new_tuple != new_tuple); new_tuple = stmt->new_tuple; assert(stmt->old_tuple == old_tuple); stmt->old_tuple = NULL; stmt->new_tuple = NULL; if (rc != 0) goto out; /* * We don't allow to change the value of the primary key * in the same statement. */ if (request_changed && old_tuple != NULL && new_tuple != NULL && tuple_compare(old_tuple, new_tuple, pk->def->key_def) != 0) { diag_set(ClientError, ER_CANT_UPDATE_PRIMARY_KEY, pk->def->name, space->def->name); rc = -1; goto out; } /* * BEFORE triggers changed the resulting tuple. * Fix the request to conform. */ if (request_changed) rc = request_create_from_tuple(request, space, old_tuple, new_tuple); out: if (new_tuple != NULL) tuple_unref(new_tuple); return rc; } int space_execute_dml(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { if (unlikely(space->sequence != NULL) && (request->type == IPROTO_INSERT || request->type == IPROTO_REPLACE)) { /* * The space has a sequence associated with it. * If the tuple has 'nil' for the primary key, * we should replace it with the next sequence * value. */ if (request_handle_sequence(request, space) != 0) return -1; } if (unlikely(!rlist_empty(&space->before_replace) && space->run_triggers)) { /* * Call BEFORE triggers if any before dispatching * the request. Note, it may change the request * type and arguments. */ if (space_before_replace(space, txn, request) != 0) return -1; } switch (request->type) { case IPROTO_INSERT: case IPROTO_REPLACE: if (space->vtab->execute_replace(space, txn, request, result) != 0) return -1; break; case IPROTO_UPDATE: if (space->vtab->execute_update(space, txn, request, result) != 0) return -1; if (*result != NULL && request->index_id != 0) { /* * XXX: this is going to break with sync replication * for cases when tuple is NULL, since the leader * will be unable to certify such updates correctly. */ request_rebind_to_primary_key(request, space, *result); } break; case IPROTO_DELETE: if (space->vtab->execute_delete(space, txn, request, result) != 0) return -1; if (*result != NULL && request->index_id != 0) request_rebind_to_primary_key(request, space, *result); break; case IPROTO_UPSERT: *result = NULL; if (space->vtab->execute_upsert(space, txn, request) != 0) return -1; break; default: *result = NULL; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_tx.h0000664000000000000000000003044713306565107017125 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_TX_H #define INCLUDES_TARANTOOL_BOX_VY_TX_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #define RB_COMPACT 1 #include #include #include "iterator_type.h" #include "salad/stailq.h" #include "trivia/util.h" #include "vy_index.h" #include "vy_stat.h" #include "vy_read_set.h" #include "vy_read_view.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct tuple; struct tx_manager; struct vy_mem; struct vy_tx; /** Transaction state. */ enum tx_state { /** Initial state. */ VINYL_TX_READY, /** * Transaction is finished and validated in the engine. * It may still be rolled back if there is an error * writing the WAL. */ VINYL_TX_COMMIT, /** Transaction is aborted by a conflict. */ VINYL_TX_ABORT, }; /** * A single write operation performed by a transaction. */ struct txv { /** Transaction. */ struct vy_tx *tx; /** Index this operation is for. */ struct vy_index *index; /** In-memory tree to insert the statement into. */ struct vy_mem *mem; /** Statement of this operation. */ struct tuple *stmt; /** Statement allocated on vy_mem->allocator. */ const struct tuple *region_stmt; /** Next in the transaction log. */ struct stailq_entry next_in_log; /** Member the transaction write set. */ rb_node(struct txv) in_set; /** * True if there is no tuple committed to the database * matching the key this operation is for, i.e. either * there is no statements for this key at all or the * last committed statement is DELETE. */ bool is_first_insert; /** * True if the txv was overwritten by another txv of * the same transaction. */ bool is_overwritten; /** txv that was overwritten by the current txv. */ struct txv *overwritten; }; /** * Index of all modifications made by a transaction. * Ordered by index, then by key in the index. */ struct write_set_key { struct vy_index *index; const struct tuple *stmt; }; int write_set_cmp(struct txv *a, struct txv *b); int write_set_key_cmp(struct write_set_key *a, struct txv *b); typedef rb_tree(struct txv) write_set_t; rb_gen_ext_key(MAYBE_UNUSED static inline, write_set_, write_set_t, struct txv, in_set, write_set_cmp, struct write_set_key *, write_set_key_cmp); static inline struct txv * write_set_search_key(write_set_t *tree, struct vy_index *index, const struct tuple *stmt) { struct write_set_key key = { .index = index, .stmt = stmt }; return write_set_search(tree, &key); } /** Transaction object. */ struct vy_tx { /** Transaction manager. */ struct tx_manager *xm; /** * In memory transaction log. Contains both reads * and writes. */ struct stailq log; /** * Writes of the transaction segregated by the changed * vy_index object. */ write_set_t write_set; /** * Version of write_set state; if the state changes * (insert/remove), the version is incremented. */ uint32_t write_set_version; /** * Total size of memory occupied by statements of * the write set. */ size_t write_size; /** Current state of the transaction.*/ enum tx_state state; /** * The read view of this transaction. When a transaction * is started, it is set to the "read committed" state, * or actually, "read prepared" state, in other words, * all changes of all prepared transactions are visible * to this transaction. Upon a conflict, the transaction's * read view is changed: it begins to point to the * last state of the database before the conflicting * change. */ struct vy_read_view *read_view; /** * Tree of all intervals read by this transaction. Linked * by vy_tx_interval->in_tx. Used to merge intersecting * intervals. */ vy_tx_read_set_t read_set; /** * Prepare sequence number or -1 if the transaction * is not prepared. */ int64_t psn; /* List of triggers invoked when this transaction ends. */ struct rlist on_destroy; }; static inline const struct vy_read_view ** vy_tx_read_view(struct vy_tx *tx) { return (const struct vy_read_view **)&tx->read_view; } /** Transaction manager object. */ struct tx_manager { /** * The last committed log sequence number known to * vinyl. Updated in vy_commit(). */ int64_t lsn; /** * A global transaction prepare counter: a transaction * is assigned an id at the time of vy_prepare(). Is used * to order statements of prepared but not yet committed * transactions in vy_mem. */ int64_t psn; /** * The last prepared (but not committed) transaction, * or NULL if there are no prepared transactions. */ struct vy_tx *last_prepared_tx; /** * The list of TXs with a read view in order of vlsn. */ struct rlist read_views; /** * Global read view - all prepared transactions are * visible in this view. The global read view * LSN is always INT64_MAX and it never changes. */ const struct vy_read_view global_read_view; /** * It is possible to create a cursor without an active * transaction, e.g. a write iterator; * this pointer represents a skeleton * transaction to use in such places. */ const struct vy_read_view *p_global_read_view; /** * Committed read view - all committed transactions are * visible in this view. The global read view * LSN is always (MAX_LSN - 1) and it never changes. */ const struct vy_read_view committed_read_view; /** * It is possible to create a cursor without an active * transaction, e.g. when squashing upserts; * this pointer represents a skeleton * transaction to use in such places. */ const struct vy_read_view *p_committed_read_view; /** Transaction statistics. */ struct vy_tx_stat stat; /** Sum size of statements pinned by the write set. */ size_t write_set_size; /** Sum size of statements pinned by the read set. */ size_t read_set_size; /** Memory pool for struct vy_tx allocations. */ struct mempool tx_mempool; /** Memory pool for struct txv allocations. */ struct mempool txv_mempool; /** Memory pool for struct vy_read_interval allocations. */ struct mempool read_interval_mempool; /** Memory pool for struct vy_read_view allocations. */ struct mempool read_view_mempool; }; /** Allocate a tx manager object. */ struct tx_manager * tx_manager_new(void); /** Delete a tx manager object. */ void tx_manager_delete(struct tx_manager *xm); /* * Determine the lowest possible vlsn, i.e. the level below * which the history could be compacted. * * If there are active read views, it is the first's vlsn. * If there is no active read view, a read view could be * created at any moment with vlsn = m->lsn, so m->lsn must * be chosen. */ int64_t tx_manager_vlsn(struct tx_manager *xm); /** Initialize a tx object. */ void vy_tx_create(struct tx_manager *xm, struct vy_tx *tx); /** Destroy a tx object. */ void vy_tx_destroy(struct vy_tx *tx); /** Begin a new transaction. */ struct vy_tx * vy_tx_begin(struct tx_manager *xm); /** Prepare a transaction to be committed. */ int vy_tx_prepare(struct vy_tx *tx); /** * Commit a transaction with a given LSN and destroy * the tx object. */ void vy_tx_commit(struct vy_tx *tx, int64_t lsn); /** * Rollback a transaction and destroy the tx object. */ void vy_tx_rollback(struct vy_tx *tx); /** * Return the save point corresponding to the current * transaction state. The transaction can be rolled back * to a save point with vy_tx_rollback_to_savepoint(). */ static inline void * vy_tx_savepoint(struct vy_tx *tx) { assert(tx->state == VINYL_TX_READY); return stailq_last(&tx->log); } /** Rollback a transaction to a given save point. */ void vy_tx_rollback_to_savepoint(struct vy_tx *tx, void *svp); /** * Remember a read interval in the conflict manager index. * On success, this function guarantees that if another * transaction successfully commits a statement within a * tracked interval, the transaction the interval belongs * to will be aborted. * * @param tx Transaction that invoked the read. * @param index Index that was read from. * @param left Left boundary of the read interval. * @param left_belongs Set if the left boundary belongs to * the interval. * @param right Right boundary of the read interval. * @param right_belongs Set if the right boundary belongs to * the interval. * * @retval 0 Success. * @retval -1 Memory error. */ int vy_tx_track(struct vy_tx *tx, struct vy_index *index, struct tuple *left, bool left_belongs, struct tuple *right, bool right_belongs); /** * Remember a point read in the conflict manager index. * * @param tx Transaction that invoked the read. * @param index Index that was read from. * @param stmt Key that was read. * * @retval 0 Success. * @retval -1 Memory error. * * Note, this function isn't just a shortcut to vy_tx_track(). * Before adding the key to the conflict manager index, it checks * if the key was overwritten by the transaction itself. If this * is the case, there is no point in tracking the key, because the * transaction read it from its own write set. */ int vy_tx_track_point(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt); /** Add a statement to a transaction. */ int vy_tx_set(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt); /** * Iterator over the write set of a transaction. */ struct vy_txw_iterator { /** Iterator statistics. */ struct vy_txw_iterator_stat *stat; /** Transaction whose write set is iterated. */ struct vy_tx *tx; /** Index of interest. */ struct vy_index *index; /** * Iterator type. * * Note if key is NULL, GT and EQ are changed to GE * while LT is changed to LE. */ enum iterator_type iterator_type; /** Search key. */ const struct tuple *key; /* Last seen value of the write set version. */ uint32_t version; /* Current position in the write set. */ struct txv *curr_txv; /* Is false until first .._get ot .._next_.. method is called */ bool search_started; }; /** * Initialize a txw iterator. */ void vy_txw_iterator_open(struct vy_txw_iterator *itr, struct vy_txw_iterator_stat *stat, struct vy_tx *tx, struct vy_index *index, enum iterator_type iterator_type, const struct tuple *key); /** * Advance a txw iterator to the next statement. * The next statement is returned in @ret (NULL if EOF). */ void vy_txw_iterator_next(struct vy_txw_iterator *itr, struct tuple **ret); /** * Advance a txw iterator to the statement following @last_stmt. * The statement is returned in @ret (NULL if EOF). */ void vy_txw_iterator_skip(struct vy_txw_iterator *itr, const struct tuple *last_stmt, struct tuple **ret); /** * Check if a txw iterator was invalidated and needs to be restored. * If it does, set the iterator position to the statement following * @last_stmt and return 1, otherwise return 0. */ int vy_txw_iterator_restore(struct vy_txw_iterator *itr, const struct tuple *last_stmt, struct tuple **ret); /** * Close a txw iterator. */ void vy_txw_iterator_close(struct vy_txw_iterator *itr); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_TX_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/schema_def.c0000664000000000000000000000443013306560010020002 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "schema_def.h" static const char *object_type_strs[] = { /* [SC_UKNNOWN] = */ "unknown", /* [SC_UNIVERSE] = */ "universe", /* [SC_SPACE] = */ "space", /* [SC_FUNCTION] = */ "function", /* [SC_USER] = */ "user", /* [SC_ROLE] = */ "role", /* [SC_SEQUENCE] = */ "sequence", /* [SC_COLLATION] = */ "collation", }; enum schema_object_type schema_object_type(const char *name) { /** * There may be other places in which we look object type by * name, and they are case-sensitive, so be case-sensitive * here too. */ int n_strs = sizeof(object_type_strs)/sizeof(*object_type_strs); int index = strindex(object_type_strs, name, n_strs); return (enum schema_object_type) (index == n_strs ? 0 : index); } const char * schema_object_name(enum schema_object_type type) { assert((int) type < (int) schema_object_type_MAX); return object_type_strs[type]; } tarantool_1.9.1.26.g63eb81e3c/src/box/coll.h0000664000000000000000000000521613306565107016701 0ustar rootroot#ifndef TARANTOOL_BOX_COLL_H_INCLUDED #define TARANTOOL_BOX_COLL_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coll_def.h" #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct coll; typedef int (*coll_cmp_f)(const char *s, size_t s_len, const char *t, size_t t_len, const struct coll *coll); typedef uint32_t (*coll_hash_f)(const char *s, size_t s_len, uint32_t *ph, uint32_t *pcarry, struct coll *coll); /** * ICU collation specific data. */ struct UCollator; struct coll_icu { struct UCollator *collator; }; /** * A collation. */ struct coll { /** Personal ID */ uint32_t id; /** Owner ID */ uint32_t owner_id; /** Collation type. */ enum coll_type type; /** Type specific data. */ struct coll_icu icu; /** String comparator. */ coll_cmp_f cmp; coll_hash_f hash; /** Collation name. */ size_t name_len; char name[0]; }; /** * Create a collation by definition. * @param def - collation definition. * @return - the collation OR NULL on memory error (diag is set). */ struct coll * coll_new(const struct coll_def *def); /** * Delete a collation. * @param cool - collation to delete. */ void coll_delete(struct coll *coll); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_COLL_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/journal.c0000664000000000000000000000435713306560010017406 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "journal.h" #include #include #include /** * Used to load from a memtx snapshot. LSN is not used, * but txn_commit() must work. */ static int64_t dummy_journal_write(struct journal *journal, struct journal_entry *entry) { (void) journal; (void) entry; return 0; } static struct journal dummy_journal = { dummy_journal_write, NULL, }; struct journal *current_journal = &dummy_journal; struct journal_entry * journal_entry_new(size_t n_rows) { struct journal_entry *entry; size_t size = (sizeof(struct journal_entry) + sizeof(entry->rows[0]) * n_rows); entry = region_aligned_alloc(&fiber()->gc, size, alignof(struct journal_entry)); if (entry == NULL) { diag_set(OutOfMemory, size, "region", "struct journal_entry"); return NULL; } entry->n_rows = n_rows; entry->res = -1; entry->fiber = fiber(); return entry; } tarantool_1.9.1.26.g63eb81e3c/src/box/schema.h0000664000000000000000000001377213306565107017216 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_SCHEMA_H #define INCLUDES_TARANTOOL_BOX_SCHEMA_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include /* snprintf */ #include "error.h" #include "space.h" #include "latch.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ extern uint32_t schema_version; /** * Persistent version of the schema, stored in _schema["version"]. */ extern uint32_t dd_version_id; /** * Lock of schema modification */ extern struct latch schema_lock; /** * Try to look up a space by space number in the space cache. * FFI-friendly no-exception-thrown space lookup function. * * @return NULL if space not found, otherwise space object. */ struct space * space_by_id(uint32_t id); uint32_t box_schema_version(); static inline struct space * space_cache_find(uint32_t id) { static uint32_t prev_schema_version = 0; static struct space *space = NULL; if (prev_schema_version != schema_version) space = NULL; if (space && space->def->id == id) return space; if ((space = space_by_id(id))) { prev_schema_version = schema_version; return space; } diag_set(ClientError, ER_NO_SUCH_SPACE, int2str(id)); return NULL; } struct func * func_by_name(const char *name, uint32_t name_len); /** Call a visitor function on every space in the space cache. */ int space_foreach(int (*func)(struct space *sp, void *udata), void *udata); /** * Try to look up object name by id and type of object. * * @return NULL if object of type not found, otherwise name of object. */ const char * schema_find_name(enum schema_object_type type, uint32_t object_id); #if defined(__cplusplus) } /* extern "C" */ static inline struct space * space_cache_find_xc(uint32_t id) { struct space *space = space_cache_find(id); if (space == NULL) diag_raise(); return space; } /** * Update contents of the space cache. Typically the new space is * an altered version of the original space. * Returns the old space, if any. */ struct space * space_cache_replace(struct space *space); /** Delete a space from the space cache. */ struct space * space_cache_delete(uint32_t id); bool space_is_system(struct space *space); void schema_init(); void schema_free(); struct space *schema_space(uint32_t id); /* * Find object id by object name. */ uint32_t schema_find_id(uint32_t system_space_id, uint32_t index_id, const char *name, uint32_t len); /** * Insert a new function or update the old one. * * @param def Function definition. In a case of success the ownership * of @a def is transfered to the data dictionary, thus the caller * must not delete it. */ void func_cache_replace(struct func_def *def); void func_cache_delete(uint32_t fid); struct func; struct func * func_by_id(uint32_t fid); static inline struct func * func_cache_find(uint32_t fid) { struct func *func = func_by_id(fid); if (func == NULL) tnt_raise(ClientError, ER_NO_SUCH_FUNCTION, int2str(fid)); return func; } /** * Check whether or not an object has grants on it (restrict * constraint in drop object). * _priv space to look up by space id * @retval true object has grants * @retval false object has no grants */ bool schema_find_grants(const char *type, uint32_t id); /** * Find a sequence by id. Return NULL if the sequence was * not found. */ struct sequence * sequence_by_id(uint32_t id); /** * A wrapper around sequence_by_id() that raises an exception * if the sequence was not found in the cache. */ struct sequence * sequence_cache_find(uint32_t id); /** * Insert a new sequence object into the cache or update * an existing one if there's already a sequence with * the given id in the cache. */ void sequence_cache_replace(struct sequence_def *def); /** Delete a sequence from the sequence cache. */ void sequence_cache_delete(uint32_t id); #endif /* defined(__cplusplus) */ /** * Triggers fired after committing a change in space definition. * The space is passed to the trigger callback in the event * argument. It is the new space in case of create/update or * the old space in case of drop. */ extern struct rlist on_alter_space; /** * Triggers fired after committing a change in sequence definition. * It is passed the txn statement that altered the sequence. */ extern struct rlist on_alter_sequence; /** * Triggers fired after access denied error is created. */ extern struct rlist on_access_denied; /** * Context passed to on_access_denied trigger. */ struct on_access_denied_ctx { /** Type of declined access */ const char *access_type; /** Type of object the required access was denied to */ const char *object_type; /** Name of object the required access was denied to */ const char *object_name; }; #endif /* INCLUDES_TARANTOOL_BOX_SCHEMA_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_extract_key.cc0000664000000000000000000002313513306560010021625 0ustar rootroot#include "tuple_extract_key.h" #include "tuple.h" #include "fiber.h" enum { MSGPACK_NULL = 0xc0 }; /** True, if a key con contain two or more parts in sequence. */ static bool key_def_contains_sequential_parts(const struct key_def *def) { for (uint32_t i = 0; i < def->part_count - 1; ++i) { if (def->parts[i].fieldno + 1 == def->parts[i + 1].fieldno) return true; } return false; } /** * Optimized version of tuple_extract_key_raw() for sequential key defs * @copydoc tuple_extract_key_raw() */ template static char * tuple_extract_key_sequential_raw(const char *data, const char *data_end, const struct key_def *key_def, uint32_t *key_size) { assert(!has_optional_parts || key_def->is_nullable); assert(key_def_is_sequential(key_def)); assert(has_optional_parts == key_def->has_optional_parts); assert(data_end != NULL); assert(mp_sizeof_nil() == 1); const char *field_start = data; uint32_t bsize = mp_sizeof_array(key_def->part_count); uint32_t field_count = mp_decode_array(&field_start); const char *field_end = field_start; uint32_t null_count; if (!has_optional_parts || field_count > key_def->part_count) { for (uint32_t i = 0; i < key_def->part_count; i++) mp_next(&field_end); null_count = 0; } else { assert(key_def->is_nullable); null_count = key_def->part_count - field_count; field_end = data_end; bsize += null_count * mp_sizeof_nil(); } assert(field_end - field_start <= data_end - data); bsize += field_end - field_start; char *key = (char *) region_alloc(&fiber()->gc, bsize); if (key == NULL) { diag_set(OutOfMemory, bsize, "region", "tuple_extract_key_raw_sequential"); return NULL; } char *key_buf = mp_encode_array(key, key_def->part_count); memcpy(key_buf, field_start, field_end - field_start); if (has_optional_parts && null_count > 0) { key_buf += field_end - field_start; memset(key_buf, MSGPACK_NULL, null_count); } if (key_size != NULL) *key_size = bsize; return key; } /** * Optimized version of tuple_extract_key() for sequential key defs * @copydoc tuple_extract_key() */ template static inline char * tuple_extract_key_sequential(const struct tuple *tuple, const struct key_def *key_def, uint32_t *key_size) { assert(key_def_is_sequential(key_def)); assert(!has_optional_parts || key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); const char *data = tuple_data(tuple); const char *data_end = data + tuple->bsize; return tuple_extract_key_sequential_raw(data, data_end, key_def, key_size); } /** * General-purpose implementation of tuple_extract_key() * @copydoc tuple_extract_key() */ template static char * tuple_extract_key_slowpath(const struct tuple *tuple, const struct key_def *key_def, uint32_t *key_size) { assert(!has_optional_parts || key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); assert(contains_sequential_parts == key_def_contains_sequential_parts(key_def)); assert(mp_sizeof_nil() == 1); const char *data = tuple_data(tuple); uint32_t part_count = key_def->part_count; uint32_t bsize = mp_sizeof_array(part_count); const struct tuple_format *format = tuple_format(tuple); const uint32_t *field_map = tuple_field_map(tuple); const char *tuple_end = data + tuple->bsize; /* Calculate the key size. */ for (uint32_t i = 0; i < part_count; ++i) { const char *field = tuple_field_raw(format, data, field_map, key_def->parts[i].fieldno); if (has_optional_parts && field == NULL) { bsize += mp_sizeof_nil(); continue; } assert(field != NULL); const char *end = field; if (contains_sequential_parts) { /* * Skip sequential part in order to * minimize tuple_field_raw() calls. */ for (; i < part_count - 1; i++) { if (key_def->parts[i].fieldno + 1 != key_def->parts[i + 1].fieldno) { /* * End of sequential part. */ break; } if (!has_optional_parts || end < tuple_end) mp_next(&end); else bsize += mp_sizeof_nil(); } } if (!has_optional_parts || end < tuple_end) mp_next(&end); else bsize += mp_sizeof_nil(); bsize += end - field; } char *key = (char *) region_alloc(&fiber()->gc, bsize); if (key == NULL) { diag_set(OutOfMemory, bsize, "region", "tuple_extract_key"); return NULL; } char *key_buf = mp_encode_array(key, part_count); for (uint32_t i = 0; i < part_count; ++i) { const char *field = tuple_field_raw(format, data, field_map, key_def->parts[i].fieldno); if (has_optional_parts && field == NULL) { key_buf = mp_encode_nil(key_buf); continue; } const char *end = field; uint32_t null_count = 0; if (contains_sequential_parts) { /* * Skip sequential part in order to * minimize tuple_field_raw() calls. */ for (; i < part_count - 1; i++) { if (key_def->parts[i].fieldno + 1 != key_def->parts[i + 1].fieldno) { /* * End of sequential part. */ break; } if (!has_optional_parts || end < tuple_end) mp_next(&end); else ++null_count; } } if (!has_optional_parts || end < tuple_end) mp_next(&end); else ++null_count; bsize = end - field; memcpy(key_buf, field, bsize); key_buf += bsize; if (has_optional_parts && null_count != 0) { memset(key_buf, MSGPACK_NULL, null_count); key_buf += null_count * mp_sizeof_nil(); } } if (key_size != NULL) *key_size = key_buf - key; return key; } /** * General-purpose version of tuple_extract_key_raw() * @copydoc tuple_extract_key_raw() */ template static char * tuple_extract_key_slowpath_raw(const char *data, const char *data_end, const struct key_def *key_def, uint32_t *key_size) { assert(!has_optional_parts || key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); assert(mp_sizeof_nil() == 1); /* allocate buffer with maximal possible size */ char *key = (char *) region_alloc(&fiber()->gc, data_end - data); if (key == NULL) { diag_set(OutOfMemory, data_end - data, "region", "tuple_extract_key_raw"); return NULL; } char *key_buf = mp_encode_array(key, key_def->part_count); const char *field0 = data; uint32_t field_count = mp_decode_array(&field0); /* * A tuple can not be empty - at least a pk always exists. */ assert(field_count > 0); (void) field_count; const char *field0_end = field0; mp_next(&field0_end); const char *field = field0; const char *field_end = field0_end; uint32_t current_fieldno = 0; for (uint32_t i = 0; i < key_def->part_count; i++) { uint32_t fieldno = key_def->parts[i].fieldno; uint32_t null_count = 0; for (; i < key_def->part_count - 1; i++) { if (key_def->parts[i].fieldno + 1 != key_def->parts[i + 1].fieldno) break; } uint32_t end_fieldno = key_def->parts[i].fieldno; if (fieldno < current_fieldno) { /* Rewind. */ field = field0; field_end = field0_end; current_fieldno = 0; } /* * First fieldno in a key columns can be out of * tuple size for nullable indexes because of * absense of indexed fields. Treat such fields * as NULLs. */ if (has_optional_parts && fieldno >= field_count) { /* Nullify entire columns range. */ null_count = fieldno - end_fieldno + 1; memset(key_buf, MSGPACK_NULL, null_count); key_buf += null_count * mp_sizeof_nil(); continue; } while (current_fieldno < fieldno) { /* search first field of key in tuple raw data */ field = field_end; mp_next(&field_end); current_fieldno++; } /* * If the last fieldno is out of tuple size, then * fill rest of columns with NULLs. */ if (has_optional_parts && end_fieldno >= field_count) { null_count = end_fieldno - field_count + 1; field_end = data_end; } else { while (current_fieldno < end_fieldno) { mp_next(&field_end); current_fieldno++; } } memcpy(key_buf, field, field_end - field); key_buf += field_end - field; if (has_optional_parts && null_count != 0) { memset(key_buf, MSGPACK_NULL, null_count); key_buf += null_count * mp_sizeof_nil(); } else { assert(key_buf - key <= data_end - data); } } if (key_size != NULL) *key_size = (uint32_t)(key_buf - key); return key; } /** * Initialize tuple_extract_key() and tuple_extract_key_raw() */ void tuple_extract_key_set(struct key_def *key_def) { if (key_def_is_sequential(key_def)) { if (key_def->has_optional_parts) { assert(key_def->is_nullable); key_def->tuple_extract_key = tuple_extract_key_sequential; key_def->tuple_extract_key_raw = tuple_extract_key_sequential_raw; } else { key_def->tuple_extract_key = tuple_extract_key_sequential; key_def->tuple_extract_key_raw = tuple_extract_key_sequential_raw; } } else { if (key_def->has_optional_parts) { assert(key_def->is_nullable); if (key_def_contains_sequential_parts(key_def)) { key_def->tuple_extract_key = tuple_extract_key_slowpath; } else { key_def->tuple_extract_key = tuple_extract_key_slowpath; } } else { if (key_def_contains_sequential_parts(key_def)) { key_def->tuple_extract_key = tuple_extract_key_slowpath; } else { key_def->tuple_extract_key = tuple_extract_key_slowpath; } } } if (key_def->has_optional_parts) { assert(key_def->is_nullable); key_def->tuple_extract_key_raw = tuple_extract_key_slowpath_raw; } else { key_def->tuple_extract_key_raw = tuple_extract_key_slowpath_raw; } } tarantool_1.9.1.26.g63eb81e3c/src/box/checkpoint.h0000664000000000000000000000572013306560010020063 0ustar rootroot#ifndef TARANTOOL_BOX_CHECKPOINT_H_INCLUDED #define TARANTOOL_BOX_CHECKPOINT_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include /** * This module implements a simple API for working with checkpoints. * As checkpoints are, in fact, memtx snapshots, functions exported * by this module are C wrappers around corresponding memtx_engine * methods. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct vclock; /** * Return LSN and vclock (unless @vclock is NULL) of the most * recent checkpoint or -1 if there is no checkpoint. */ int64_t checkpoint_last(struct vclock *vclock); /** Iterator over all existing checkpoints. */ struct checkpoint_iterator { const struct vclock *curr; }; /** * Init a checkpoint iterator. The iterator is valid as long * as the caller doesn't yield. */ static inline void checkpoint_iterator_init(struct checkpoint_iterator *it) { it->curr = NULL; } /** * Iterate to the next checkpoint. Return NULL if the current * checkpoint is the most recent one. * * If called on the last iteration, this function positions * the iterator to the oldest checkpoint. */ const struct vclock * checkpoint_iterator_next(struct checkpoint_iterator *it); /** * Iterate to the previous checkpoint. Return NULL if the current * checkpoint is the oldest one. * * If called on the first iteration, this function positions * the iterator to the newest checkpoint. */ const struct vclock * checkpoint_iterator_prev(struct checkpoint_iterator *it); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_CHECKPOINT_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/txn.h0000664000000000000000000002343313306565107016562 0ustar rootroot#ifndef TARANTOOL_BOX_TXN_H_INCLUDED #define TARANTOOL_BOX_TXN_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "salad/stailq.h" #include "trigger.h" #include "fiber.h" #include "space.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** box statistics */ extern struct rmean *rmean_box; struct engine; struct space; struct tuple; struct xrow_header; enum { /** * Maximum recursion depth for on_replace triggers. * Large numbers may corrupt C stack. */ TXN_SUB_STMT_MAX = 3 }; /** * A single statement of a multi-statement * transaction: undo and redo info. */ struct txn_stmt { /* (!) Please update txn_stmt_new() after changing members */ /** A linked list of all statements. */ struct stailq_entry next; /** Undo info. */ struct space *space; struct tuple *old_tuple; struct tuple *new_tuple; /** Engine savepoint for the start of this statement. */ void *engine_savepoint; /** Redo info: the binary log row */ struct xrow_header *row; }; /** * Transaction savepoint object. Allocated on a transaction * region and becames invalid after the transaction's end. * Allows to rollback a transaction partially. */ struct txn_savepoint { /** * Saved substatement level at the time of a savepoint * creation. */ int in_sub_stmt; /** * Statement, on which a savepoint is created. On rollback * to this savepoint all newer statements are rolled back. * Initialized to NULL in case a savepoint is created in * an empty transaction. */ struct stailq_entry *stmt; }; extern double too_long_threshold; struct txn { /** * A sequentially growing transaction id, assigned when * a transaction is initiated. Used to identify * a transaction after it has possibly been destroyed. */ int64_t id; /** List of statements in a transaction. */ struct stailq stmts; /** Total number of WAL rows in this txn. */ int n_rows; /** * True if this transaction is running in autocommit mode * (statement end causes an automatic transaction commit). */ bool is_autocommit; /** True if on_commit and on_rollback lists are non-empty. */ bool has_triggers; /** The number of active nested statement-level transactions. */ int in_sub_stmt; /** * First statement at each statement-level. * Needed to rollback sub statements. */ struct stailq_entry *sub_stmt_begin[TXN_SUB_STMT_MAX]; /** LSN of this transaction when written to WAL. */ int64_t signature; /** Engine involved in multi-statement transaction. */ struct engine *engine; /** Engine-specific transaction data */ void *engine_tx; /** * Triggers on fiber yield and stop to abort transaction * for in-memory engine. */ struct trigger fiber_on_yield, fiber_on_stop; /** Commit and rollback triggers */ struct rlist on_commit, on_rollback; }; /* Pointer to the current transaction (if any) */ static inline struct txn * in_txn() { return (struct txn *) fiber_get_key(fiber(), FIBER_KEY_TXN); } /** * Start a transaction explicitly. * @pre no transaction is active */ struct txn * txn_begin(bool is_autocommit); /** * Commit a transaction. * @pre txn == in_txn() * * Return 0 on success. On error, rollback * the transaction and return -1. */ int txn_commit(struct txn *txn); /** Rollback a transaction, if any. */ void txn_rollback(); /** * Most txns don't have triggers, and txn objects * are created on every access to data, so txns * are partially initialized. */ static inline void txn_init_triggers(struct txn *txn) { if (txn->has_triggers == false) { rlist_create(&txn->on_commit); rlist_create(&txn->on_rollback); txn->has_triggers = true; } } static inline void txn_on_commit(struct txn *txn, struct trigger *trigger) { txn_init_triggers(txn); trigger_add(&txn->on_commit, trigger); } static inline void txn_on_rollback(struct txn *txn, struct trigger *trigger) { txn_init_triggers(txn); trigger_add(&txn->on_rollback, trigger); } /** * Start a new statement. If no current transaction, * start a new transaction with autocommit = true. */ struct txn * txn_begin_stmt(struct space *space); int txn_begin_in_engine(struct engine *engine, struct txn *txn); /** * This is an optimization, which exists to speed up selects * in autocommit mode. For such selects, we only need to * manage fiber garbage heap. If autocommit mode is * off, however, we must start engine transaction with the first * select. */ static inline int txn_begin_ro_stmt(struct space *space, struct txn **txn) { *txn = in_txn(); if (*txn != NULL) { struct engine *engine = space->engine; return txn_begin_in_engine(engine, *txn); } return 0; } static inline void txn_commit_ro_stmt(struct txn *txn) { assert(txn == in_txn()); if (txn) { assert(txn->engine); /* nothing to do */ } else { fiber_gc(); } } /** * End a statement. In autocommit mode, end * the current transaction as well. * * Return 0 on success. On error, rollback * the statement and return -1. */ int txn_commit_stmt(struct txn *txn, struct request *request); /** * Rollback a statement. In autocommit mode, * rolls back the entire transaction. */ void txn_rollback_stmt(); /** * Raise an error if this is a multi-statement * transaction: DDL can not be part of a multi-statement * transaction and must be run in autocommit mode. */ int txn_check_singlestatement(struct txn *txn, const char *where); /** The current statement of the transaction. */ static inline struct txn_stmt * txn_current_stmt(struct txn *txn) { if (txn->in_sub_stmt == 0) return NULL; struct stailq_entry *stmt = txn->sub_stmt_begin[txn->in_sub_stmt - 1]; stmt = stmt != NULL ? stailq_next(stmt) : stailq_first(&txn->stmts); return stailq_entry(stmt, struct txn_stmt, next); } /** The last statement of the transaction. */ static inline struct txn_stmt * txn_last_stmt(struct txn *txn) { return stailq_last_entry(&txn->stmts, struct txn_stmt, next); } /** * FFI bindings: do not throw exceptions, do not accept extra * arguments */ /** \cond public */ /** * Transaction id - a non-persistent unique identifier * of the current transaction. -1 if there is no current * transaction. */ API_EXPORT int64_t box_txn_id(void); /** * Return true if there is an active transaction. */ API_EXPORT bool box_txn(void); /** * Begin a transaction in the current fiber. * * A transaction is attached to caller fiber, therefore one fiber can have * only one active transaction. * * @retval 0 - success * @retval -1 - failed, perhaps a transaction has already been * started */ API_EXPORT int box_txn_begin(void); /** * Commit the current transaction. * @retval 0 - success * @retval -1 - failed, perhaps a disk write failure. * started */ API_EXPORT int box_txn_commit(void); /** * Rollback the current transaction. * May fail if called from a nested * statement. */ API_EXPORT int box_txn_rollback(void); /** * Allocate memory on txn memory pool. * The memory is automatically deallocated when the transaction * is committed or rolled back. * * @retval NULL out of memory */ API_EXPORT void * box_txn_alloc(size_t size); /** \endcond public */ typedef struct txn_savepoint box_txn_savepoint_t; /** * Create a new savepoint. * @retval not NULL Savepoint object. * @retval NULL Client or memory error. */ API_EXPORT box_txn_savepoint_t * box_txn_savepoint(void); /** * Rollback to @a savepoint. Rollback all statements newer than a * saved statement. @A savepoint can be rolled back multiple * times. All existing savepoints, newer than @a savepoint, are * deleted and can not be used. * @A savepoint must be from a current transaction, else the * rollback crashes. To validate savepoints store transaction id * together with @a savepoint. * @retval 0 Success. * @retval -1 Client error. */ API_EXPORT int box_txn_rollback_to_savepoint(box_txn_savepoint_t *savepoint); #if defined(__cplusplus) } /* extern "C" */ #include "diag.h" static inline struct txn * txn_begin_stmt_xc(struct space *space) { struct txn *txn = txn_begin_stmt(space); if (txn == NULL) diag_raise(); return txn; } static inline struct txn * txn_begin_ro_stmt_xc(struct space *space) { struct txn *txn; if (txn_begin_ro_stmt(space, &txn) != 0) diag_raise(); return txn; } static inline void txn_commit_stmt_xc(struct txn *txn, struct request *request) { if (txn_commit_stmt(txn, request) != 0) diag_raise(); } static inline void txn_check_singlestatement_xc(struct txn *txn, const char *where) { if (txn_check_singlestatement(txn, where) != 0) diag_raise(); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_TXN_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_run.c0000664000000000000000000022054313306565107017267 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_run.h" #include #include "fiber.h" #include "fiber_cond.h" #include "fio.h" #include "cbus.h" #include "memory.h" #include "coio_file.h" #include "replication.h" #include "tuple_hash.h" /* for bloom filter */ #include "xlog.h" #include "xrow.h" static const uint64_t vy_page_info_key_map = (1 << VY_PAGE_INFO_OFFSET) | (1 << VY_PAGE_INFO_SIZE) | (1 << VY_PAGE_INFO_UNPACKED_SIZE) | (1 << VY_PAGE_INFO_ROW_COUNT) | (1 << VY_PAGE_INFO_MIN_KEY) | (1 << VY_PAGE_INFO_ROW_INDEX_OFFSET); static const uint64_t vy_run_info_key_map = (1 << VY_RUN_INFO_MIN_KEY) | (1 << VY_RUN_INFO_MAX_KEY) | (1 << VY_RUN_INFO_MIN_LSN) | (1 << VY_RUN_INFO_MAX_LSN) | (1 << VY_RUN_INFO_PAGE_COUNT); enum { VY_BLOOM_VERSION = 0 }; /** xlog meta type for .run files */ #define XLOG_META_TYPE_RUN "RUN" /** xlog meta type for .index files */ #define XLOG_META_TYPE_INDEX "INDEX" const char *vy_file_suffix[] = { "index", /* VY_FILE_INDEX */ "run", /* VY_FILE_RUN */ }; /** * We read runs in background threads so as not to stall tx. * This structure represents such a thread. */ struct vy_run_reader { /** Thread that processes read requests. */ struct cord cord; /** Pipe from tx to the reader thread. */ struct cpipe reader_pipe; /** Pipe from the reader thread to tx. */ struct cpipe tx_pipe; }; /** Cbus task for vinyl page read. */ struct vy_page_read_task { /** parent */ struct cbus_call_msg base; /** vinyl page metadata */ struct vy_page_info page_info; /** vy_run with fd - ref. counted */ struct vy_run *run; /** [out] resulting vinyl page */ struct vy_page *page; }; /** Destructor for env->zdctx_key thread-local variable */ static void vy_free_zdctx(void *arg) { assert(arg != NULL); ZSTD_freeDStream(arg); } /** Run reader thread function. */ static int vy_run_reader_f(va_list ap) { struct vy_run_reader *reader = va_arg(ap, struct vy_run_reader *); struct cbus_endpoint endpoint; cpipe_create(&reader->tx_pipe, "tx_prio"); cbus_endpoint_create(&endpoint, cord_name(cord()), fiber_schedule_cb, fiber()); cbus_loop(&endpoint); cbus_endpoint_destroy(&endpoint, cbus_process); cpipe_destroy(&reader->tx_pipe); return 0; } /** Start run reader threads. */ static void vy_run_env_start_readers(struct vy_run_env *env, int threads) { assert(threads > 0); assert(env->reader_pool == NULL); env->reader_pool_size = threads; env->reader_pool = calloc(env->reader_pool_size, sizeof(*env->reader_pool)); if (env->reader_pool == NULL) panic("failed to allocate vinyl reader thread pool"); for (int i = 0; i < env->reader_pool_size; i++) { struct vy_run_reader *reader = &env->reader_pool[i]; char name[FIBER_NAME_MAX]; snprintf(name, sizeof(name), "vinyl.reader.%d", i); if (cord_costart(&reader->cord, name, vy_run_reader_f, reader) != 0) panic("failed to start vinyl reader thread"); cpipe_create(&reader->reader_pipe, name); } env->next_reader = 0; } /** Join run reader threads. */ static void vy_run_env_stop_readers(struct vy_run_env *env) { for (int i = 0; i < env->reader_pool_size; i++) { struct vy_run_reader *reader = &env->reader_pool[i]; cbus_stop_loop(&reader->reader_pipe); cpipe_destroy(&reader->reader_pipe); if (cord_join(&reader->cord) != 0) panic("failed to join vinyl reader thread"); } free(env->reader_pool); } /** * Initialize vinyl run environment */ void vy_run_env_create(struct vy_run_env *env) { memset(env, 0, sizeof(*env)); tt_pthread_key_create(&env->zdctx_key, vy_free_zdctx); mempool_create(&env->read_task_pool, cord_slab_cache(), sizeof(struct vy_page_read_task)); } /** * Destroy vinyl run environment */ void vy_run_env_destroy(struct vy_run_env *env) { if (env->reader_pool != NULL) vy_run_env_stop_readers(env); mempool_destroy(&env->read_task_pool); tt_pthread_key_delete(env->zdctx_key); } /** * Enable coio reads for a vinyl run environment. */ void vy_run_env_enable_coio(struct vy_run_env *env, int threads) { if (env->reader_pool != NULL) return; /* already enabled */ vy_run_env_start_readers(env, threads); } /** * Initialize page info struct * * @retval 0 for Success * @retval -1 for error */ static int vy_page_info_create(struct vy_page_info *page_info, uint64_t offset, const char *min_key) { memset(page_info, 0, sizeof(*page_info)); page_info->offset = offset; page_info->unpacked_size = 0; page_info->min_key = vy_key_dup(min_key); return page_info->min_key == NULL ? -1 : 0; } /** * Destroy page info struct */ static void vy_page_info_destroy(struct vy_page_info *page_info) { if (page_info->min_key != NULL) free(page_info->min_key); } struct vy_run * vy_run_new(struct vy_run_env *env, int64_t id) { struct vy_run *run = calloc(1, sizeof(struct vy_run)); if (unlikely(run == NULL)) { diag_set(OutOfMemory, sizeof(struct vy_run), "malloc", "struct vy_run"); return NULL; } run->env = env; run->id = id; run->dump_lsn = -1; run->fd = -1; run->refs = 1; rlist_create(&run->in_index); rlist_create(&run->in_unused); TRASH(&run->info.bloom); return run; } static void vy_run_clear(struct vy_run *run) { if (run->page_info != NULL) { uint32_t page_no; for (page_no = 0; page_no < run->info.page_count; ++page_no) vy_page_info_destroy(run->page_info + page_no); free(run->page_info); } run->page_info = NULL; run->page_index_size = 0; run->info.page_count = 0; if (run->info.has_bloom) bloom_destroy(&run->info.bloom, runtime.quota); run->info.has_bloom = false; free(run->info.min_key); run->info.min_key = NULL; free(run->info.max_key); run->info.max_key = NULL; } void vy_run_delete(struct vy_run *run) { assert(run->refs == 0); if (run->fd >= 0 && close(run->fd) < 0) say_syserror("close failed"); vy_run_clear(run); TRASH(run); free(run); } /** * Find a page from which the iteration of a given key must be started. * LE and LT: the found page definitely contains the position * for iteration start. * GE, GT, EQ: Since page search uses only min_key of pages, * it may happen that the found page doesn't contain the position * for iteration start. In this case it is certain that the iteration * must be started from the beginning of the next page. * * @param run - run * @param key - key to find * @param key_def - key_def for comparison * @param itype - iterator type (see above) * @param equal_key: *equal_key is set to true if there is a page * with min_key equal to the given key. * @return offset of the page in page index OR run->info.page_count if * there no pages fulfilling the conditions. */ static uint32_t vy_page_index_find_page(struct vy_run *run, const struct tuple *key, const struct key_def *cmp_def, enum iterator_type itype, bool *equal_key) { if (itype == ITER_EQ) itype = ITER_GE; /* One day it'll become obsolete */ assert(itype == ITER_GE || itype == ITER_GT || itype == ITER_LE || itype == ITER_LT); int dir = iterator_direction(itype); *equal_key = false; /** * Binary search in page index. Depends on given iterator_type: * ITER_GE: lowest page with min_key >= given key. * ITER_GT: lowest page with min_key > given key. * ITER_LE: highest page with min_key <= given key. * ITER_LT: highest page with min_key < given key. * * Example: we are searching for a value 2 in the run of 10 pages: * min_key: [1 1 2 2 2 2 2 3 3 3] * we want to find: [ LT GE LE GT ] * For LT and GE it's a classical lower_bound search. * Let's set up a range with left page's min_key < key and * right page's min >= key; binary cut the range until it * becomes of length 1 and then LT pos = left bound of the range * and GE pos = right bound of the range. * For LE and GT it's a classical upper_bound search. * Let's set up a range with left page's min_key <= key and * right page's min > key; binary cut the range until it * becomes of length 1 and then LE pos = left bound of the range * and GT pos = right bound of the range. */ bool is_lower_bound = itype == ITER_LT || itype == ITER_GE; assert(run->info.page_count > 0); /* Initially the range is set with virtual positions */ int32_t range[2] = { -1, run->info.page_count }; assert(run->info.page_count > 0); do { int32_t mid = range[0] + (range[1] - range[0]) / 2; struct vy_page_info *info = vy_run_page_info(run, mid); int cmp = vy_stmt_compare_with_raw_key(key, info->min_key, cmp_def); if (is_lower_bound) range[cmp <= 0] = mid; else range[cmp < 0] = mid; *equal_key = *equal_key || cmp == 0; } while (range[1] - range[0] > 1); if (range[0] < 0) range[0] = run->info.page_count; uint32_t page = range[dir > 0]; /** * Since page search uses only min_key of pages, * for GE, GT and EQ the previous page can contain * the point where iteration must be started. */ if (page > 0 && dir > 0) return page - 1; return page; } struct vy_slice * vy_slice_new(int64_t id, struct vy_run *run, struct tuple *begin, struct tuple *end, const struct key_def *cmp_def) { struct vy_slice *slice = malloc(sizeof(*slice)); if (slice == NULL) { diag_set(OutOfMemory, sizeof(*slice), "malloc", "struct vy_slice"); return NULL; } memset(slice, 0, sizeof(*slice)); slice->id = id; slice->run = run; vy_run_ref(run); run->slice_count++; if (begin != NULL) tuple_ref(begin); slice->begin = begin; if (end != NULL) tuple_ref(end); slice->end = end; rlist_create(&slice->in_range); fiber_cond_create(&slice->pin_cond); if (run->info.page_count == 0) { /* The run is empty hence the slice is empty too. */ return slice; } /** Lookup the first and the last pages spanned by the slice. */ bool unused; if (slice->begin == NULL) { slice->first_page_no = 0; } else { slice->first_page_no = vy_page_index_find_page(run, slice->begin, cmp_def, ITER_GE, &unused); assert(slice->first_page_no < run->info.page_count); } if (slice->end == NULL) { slice->last_page_no = run->info.page_count - 1; } else { slice->last_page_no = vy_page_index_find_page(run, slice->end, cmp_def, ITER_LT, &unused); if (slice->last_page_no == run->info.page_count) { /* It's an empty slice */ slice->first_page_no = 0; slice->last_page_no = 0; return slice; } } assert(slice->last_page_no >= slice->first_page_no); /** Estimate the number of statements in the slice. */ uint32_t run_pages = run->info.page_count; uint32_t slice_pages = slice->last_page_no - slice->first_page_no + 1; slice->count.pages = slice_pages; slice->count.rows = DIV_ROUND_UP(run->count.rows * slice_pages, run_pages); slice->count.bytes = DIV_ROUND_UP(run->count.bytes * slice_pages, run_pages); slice->count.bytes_compressed = DIV_ROUND_UP( run->count.bytes_compressed * slice_pages, run_pages); return slice; } void vy_slice_delete(struct vy_slice *slice) { assert(slice->pin_count == 0); assert(slice->run->slice_count > 0); slice->run->slice_count--; vy_run_unref(slice->run); if (slice->begin != NULL) tuple_unref(slice->begin); if (slice->end != NULL) tuple_unref(slice->end); fiber_cond_destroy(&slice->pin_cond); TRASH(slice); free(slice); } int vy_slice_cut(struct vy_slice *slice, int64_t id, struct tuple *begin, struct tuple *end, const struct key_def *cmp_def, struct vy_slice **result) { *result = NULL; if (begin != NULL && slice->end != NULL && vy_key_compare(begin, slice->end, cmp_def) >= 0) return 0; /* no intersection: begin >= slice->end */ if (end != NULL && slice->begin != NULL && vy_key_compare(end, slice->begin, cmp_def) <= 0) return 0; /* no intersection: end <= slice->end */ /* begin = MAX(begin, slice->begin) */ if (slice->begin != NULL && (begin == NULL || vy_key_compare(begin, slice->begin, cmp_def) < 0)) begin = slice->begin; /* end = MIN(end, slice->end) */ if (slice->end != NULL && (end == NULL || vy_key_compare(end, slice->end, cmp_def) > 0)) end = slice->end; *result = vy_slice_new(id, slice->run, begin, end, cmp_def); if (*result == NULL) return -1; /* OOM */ return 0; } /** * Decode page information from xrow. * * @param[out] page Page information. * @param xrow Xrow to decode. * @param filename Filename for error reporting. * * @retval 0 Success. * @retval -1 Error. */ static int vy_page_info_decode(struct vy_page_info *page, const struct xrow_header *xrow, const char *filename) { assert(xrow->type == VY_INDEX_PAGE_INFO); const char *pos = xrow->body->iov_base; memset(page, 0, sizeof(*page)); uint64_t key_map = vy_page_info_key_map; uint32_t map_size = mp_decode_map(&pos); uint32_t map_item; const char *key_beg; for (map_item = 0; map_item < map_size; ++map_item) { uint32_t key = mp_decode_uint(&pos); key_map &= ~(1ULL << key); switch (key) { case VY_PAGE_INFO_OFFSET: page->offset = mp_decode_uint(&pos); break; case VY_PAGE_INFO_SIZE: page->size = mp_decode_uint(&pos); break; case VY_PAGE_INFO_ROW_COUNT: page->row_count = mp_decode_uint(&pos); break; case VY_PAGE_INFO_MIN_KEY: key_beg = pos; mp_next(&pos); page->min_key = vy_key_dup(key_beg); if (page->min_key == NULL) return -1; break; case VY_PAGE_INFO_UNPACKED_SIZE: page->unpacked_size = mp_decode_uint(&pos); break; case VY_PAGE_INFO_ROW_INDEX_OFFSET: page->row_index_offset = mp_decode_uint(&pos); break; default: diag_set(ClientError, ER_INVALID_INDEX_FILE, filename, tt_sprintf("Can't decode page info: " "unknown key %u", (unsigned)key)); return -1; } } if (key_map) { enum vy_page_info_key key = bit_ctz_u64(key_map); diag_set(ClientError, ER_INVALID_INDEX_FILE, filename, tt_sprintf("Can't decode page info: " "missing mandatory key %s", vy_page_info_key_name(key))); return -1; } return 0; } /** * Read bloom filter from given buffer. * @param bloom - a bloom filter to read. * @param buffer[in/out] - a buffer to read from. * The pointer is incremented on the number of bytes read. * @param filename Filename for error reporting. * @return - 0 on success or -1 on format/memory error */ static int vy_run_bloom_decode(struct bloom *bloom, const char **buffer, const char *filename) { const char **pos = buffer; memset(bloom, 0, sizeof(*bloom)); uint32_t array_size = mp_decode_array(pos); if (array_size != 4) { diag_set(ClientError, ER_INVALID_INDEX_FILE, filename, tt_sprintf("Can't decode bloom meta: " "wrong array size (expected %d, got %u)", 4, (unsigned)array_size)); return -1; } uint64_t version = mp_decode_uint(pos); if (version != VY_BLOOM_VERSION) { diag_set(ClientError, ER_INVALID_INDEX_FILE, filename, tt_sprintf("Can't decode bloom meta: " "wrong version (expected %d, got %u)", VY_BLOOM_VERSION, (unsigned)version)); } bloom->table_size = mp_decode_uint(pos); bloom->hash_count = mp_decode_uint(pos); size_t table_size = mp_decode_binl(pos); if (table_size != bloom_store_size(bloom)) { diag_set(ClientError, ER_INVALID_INDEX_FILE, filename, tt_sprintf("Can't decode bloom meta: " "wrong table size (expected %zu, got %zu)", bloom_store_size(bloom), table_size)); return -1; } if (bloom_load_table(bloom, *pos, runtime.quota) != 0) { diag_set(OutOfMemory, bloom_store_size(bloom), "mmap", "bloom"); return -1; } *pos += table_size; return 0; } /** * Decode the run metadata from xrow. * * @param xrow xrow to decode * @param[out] run_info the run information * @param filename File name for error reporting. * * @retval 0 success * @retval -1 error (check diag) */ int vy_run_info_decode(struct vy_run_info *run_info, const struct xrow_header *xrow, const char *filename) { assert(xrow->type == VY_INDEX_RUN_INFO); /* decode run */ const char *pos = xrow->body->iov_base; memset(run_info, 0, sizeof(*run_info)); uint64_t key_map = vy_run_info_key_map; uint32_t map_size = mp_decode_map(&pos); uint32_t map_item; const char *tmp; /* decode run values */ for (map_item = 0; map_item < map_size; ++map_item) { uint32_t key = mp_decode_uint(&pos); key_map &= ~(1ULL << key); switch (key) { case VY_RUN_INFO_MIN_KEY: tmp = pos; mp_next(&pos); run_info->min_key = vy_key_dup(tmp); if (run_info->min_key == NULL) return -1; break; case VY_RUN_INFO_MAX_KEY: tmp = pos; mp_next(&pos); run_info->max_key = vy_key_dup(tmp); if (run_info->max_key == NULL) return -1; break; case VY_RUN_INFO_MIN_LSN: run_info->min_lsn = mp_decode_uint(&pos); break; case VY_RUN_INFO_MAX_LSN: run_info->max_lsn = mp_decode_uint(&pos); break; case VY_RUN_INFO_PAGE_COUNT: run_info->page_count = mp_decode_uint(&pos); break; case VY_RUN_INFO_BLOOM: if (vy_run_bloom_decode(&run_info->bloom, &pos, filename) == 0) run_info->has_bloom = true; else return -1; break; default: diag_set(ClientError, ER_INVALID_INDEX_FILE, filename, "Can't decode run info: unknown key %u", (unsigned)key); return -1; } } if (key_map) { enum vy_run_info_key key = bit_ctz_u64(key_map); diag_set(ClientError, ER_INVALID_INDEX_FILE, filename, tt_sprintf("Can't decode run info: " "missing mandatory key %s", vy_run_info_key_name(key))); return -1; } return 0; } static struct vy_page * vy_page_new(const struct vy_page_info *page_info) { struct vy_page *page = malloc(sizeof(*page)); if (page == NULL) { diag_set(OutOfMemory, sizeof(*page), "load_page", "page cache"); return NULL; } page->unpacked_size = page_info->unpacked_size; page->row_count = page_info->row_count; page->row_index = calloc(page_info->row_count, sizeof(uint32_t)); if (page->row_index == NULL) { diag_set(OutOfMemory, page_info->row_count * sizeof(uint32_t), "malloc", "page->row_index"); free(page); return NULL; } page->data = (char *)malloc(page_info->unpacked_size); if (page->data == NULL) { diag_set(OutOfMemory, page_info->unpacked_size, "malloc", "page->data"); free(page->row_index); free(page); return NULL; } return page; } static void vy_page_delete(struct vy_page *page) { uint32_t *row_index = page->row_index; char *data = page->data; #if !defined(NDEBUG) memset(row_index, '#', sizeof(uint32_t) * page->row_count); memset(data, '#', page->unpacked_size); memset(page, '#', sizeof(*page)); #endif /* !defined(NDEBUG) */ free(row_index); free(data); free(page); } static int vy_page_xrow(struct vy_page *page, uint32_t stmt_no, struct xrow_header *xrow) { assert(stmt_no < page->row_count); const char *data = page->data + page->row_index[stmt_no]; const char *data_end = stmt_no + 1 < page->row_count ? page->data + page->row_index[stmt_no + 1] : page->data + page->unpacked_size; return xrow_header_decode(xrow, &data, data_end); } /* {{{ vy_run_iterator vy_run_iterator support functions */ /** * Read raw stmt data from the page * @param page Page. * @param stmt_no Statement position in the page. * @param cmp_def Key definition of an index, including * primary key parts. * @param format Format for REPLACE/DELETE tuples. * @param upsert_format Format for UPSERT tuples. * @param is_primary True if the index is primary. * * @retval not NULL Statement read from page. * @retval NULL Memory error. */ static struct tuple * vy_page_stmt(struct vy_page *page, uint32_t stmt_no, const struct key_def *cmp_def, struct tuple_format *format, struct tuple_format *upsert_format, bool is_primary) { struct xrow_header xrow; if (vy_page_xrow(page, stmt_no, &xrow) != 0) return NULL; return vy_stmt_decode(&xrow, cmp_def, format, upsert_format, is_primary); } /** * End iteration and free cached data. */ static void vy_run_iterator_stop(struct vy_run_iterator *itr) { if (itr->curr_stmt != NULL) { tuple_unref(itr->curr_stmt); itr->curr_stmt = NULL; } if (itr->curr_page != NULL) { vy_page_delete(itr->curr_page); if (itr->prev_page != NULL) vy_page_delete(itr->prev_page); itr->curr_page = itr->prev_page = NULL; } itr->search_ended = true; } static int vy_row_index_decode(uint32_t *row_index, uint32_t row_count, struct xrow_header *xrow) { assert(xrow->type == VY_RUN_ROW_INDEX); const char *pos = xrow->body->iov_base; uint32_t map_size = mp_decode_map(&pos); uint32_t map_item; uint32_t size = 0; for (map_item = 0; map_item < map_size; ++map_item) { uint32_t key = mp_decode_uint(&pos); switch (key) { case VY_ROW_INDEX_DATA: size = mp_decode_binl(&pos); break; } } if (size != sizeof(uint32_t) * row_count) { diag_set(ClientError, ER_INVALID_RUN_FILE, tt_sprintf("Wrong row index size " "(expected %zu, got %u", sizeof(uint32_t) * row_count, (unsigned)size)); return -1; } for (uint32_t i = 0; i < row_count; ++i) { row_index[i] = mp_load_u32(&pos); } assert(pos == xrow->body->iov_base + xrow->body->iov_len); return 0; } /** Return the name of a run data file. */ static inline const char * vy_run_filename(struct vy_run *run) { char *buf = tt_static_buf(); vy_run_snprint_filename(buf, TT_STATIC_BUF_LEN, run->id, VY_FILE_RUN); return buf; } /** * Read a page requests from vinyl xlog data file. * * @retval 0 on success * @retval -1 on error, check diag */ static int vy_page_read(struct vy_page *page, const struct vy_page_info *page_info, struct vy_run *run, ZSTD_DStream *zdctx) { /* read xlog tx from xlog file */ size_t region_svp = region_used(&fiber()->gc); char *data = (char *)region_alloc(&fiber()->gc, page_info->size); if (data == NULL) { diag_set(OutOfMemory, page_info->size, "region gc", "page"); return -1; } ssize_t readen = fio_pread(run->fd, data, page_info->size, page_info->offset); ERROR_INJECT(ERRINJ_VYRUN_DATA_READ, { readen = -1; errno = EIO;}); if (readen < 0) { diag_set(SystemError, "failed to read from file"); goto error; } if (readen != (ssize_t)page_info->size) { diag_set(ClientError, ER_INVALID_RUN_FILE, "Unexpected end of file"); goto error; } struct errinj *inj = errinj(ERRINJ_VY_READ_PAGE_TIMEOUT, ERRINJ_DOUBLE); if (inj != NULL && inj->dparam > 0) usleep(inj->dparam * 1000000); /* decode xlog tx */ const char *data_pos = data; const char *data_end = data + readen; char *rows = page->data; char *rows_end = rows + page_info->unpacked_size; if (xlog_tx_decode(data, data_end, rows, rows_end, zdctx) != 0) goto error; struct xrow_header xrow; data_pos = page->data + page_info->row_index_offset; data_end = page->data + page_info->unpacked_size; if (xrow_header_decode(&xrow, &data_pos, data_end) == -1) goto error; if (xrow.type != VY_RUN_ROW_INDEX) { diag_set(ClientError, ER_INVALID_RUN_FILE, tt_sprintf("Wrong row index type " "(expected %d, got %u)", VY_RUN_ROW_INDEX, (unsigned)xrow.type)); goto error; } if (vy_row_index_decode(page->row_index, page->row_count, &xrow) != 0) goto error; region_truncate(&fiber()->gc, region_svp); ERROR_INJECT(ERRINJ_VY_READ_PAGE, { diag_set(ClientError, ER_INJECTION, "vinyl page read"); return -1;}); return 0; error: region_truncate(&fiber()->gc, region_svp); diag_log(); say_error("error reading %s@%llu:%u", vy_run_filename(run), (unsigned long long)page_info->offset, (unsigned)page_info->size); return -1; } /** * Get thread local zstd decompression context */ static ZSTD_DStream * vy_env_get_zdctx(struct vy_run_env *env) { ZSTD_DStream *zdctx = tt_pthread_getspecific(env->zdctx_key); if (zdctx == NULL) { zdctx = ZSTD_createDStream(); if (zdctx == NULL) { diag_set(OutOfMemory, sizeof(zdctx), "malloc", "zstd context"); return NULL; } tt_pthread_setspecific(env->zdctx_key, zdctx); } return zdctx; } /** * vinyl read task callback */ static int vy_page_read_cb(struct cbus_call_msg *base) { struct vy_page_read_task *task = (struct vy_page_read_task *)base; ZSTD_DStream *zdctx = vy_env_get_zdctx(task->run->env); if (zdctx == NULL) return -1; return vy_page_read(task->page, &task->page_info, task->run, zdctx); } /** * vinyl read task cleanup callback */ static int vy_page_read_cb_free(struct cbus_call_msg *base) { struct vy_page_read_task *task = (struct vy_page_read_task *)base; struct vy_run_env *env = task->run->env; vy_page_delete(task->page); vy_run_unref(task->run); mempool_free(&env->read_task_pool, task); return 0; } /** * Read a page from disk given its number. * The function caches two most recently read pages. * * @retval 0 success * @retval -1 critical error */ static NODISCARD int vy_run_iterator_load_page(struct vy_run_iterator *itr, uint32_t page_no, struct vy_page **result) { struct vy_slice *slice = itr->slice; struct vy_run_env *env = slice->run->env; /* Check cache */ if (itr->curr_page != NULL) { if (itr->curr_page->page_no == page_no) { *result = itr->curr_page; return 0; } if (itr->prev_page != NULL && itr->prev_page->page_no == page_no) { SWAP(itr->prev_page, itr->curr_page); *result = itr->curr_page; return 0; } } /* Allocate buffers */ struct vy_page_info *page_info = vy_run_page_info(slice->run, page_no); struct vy_page *page = vy_page_new(page_info); if (page == NULL) return -1; /* Read page data from the disk */ int rc; if (env->reader_pool != NULL) { /* Allocate a cbus task. */ struct vy_page_read_task *task; task = mempool_alloc(&env->read_task_pool); if (task == NULL) { diag_set(OutOfMemory, sizeof(*task), "mempool", "vy_page_read_task"); vy_page_delete(page); return -1; } /* Pick a reader thread. */ struct vy_run_reader *reader; reader = &env->reader_pool[env->next_reader++]; env->next_reader %= env->reader_pool_size; task->run = slice->run; task->page_info = *page_info; task->page = page; vy_run_ref(task->run); /* Post task to the reader thread. */ rc = cbus_call(&reader->reader_pipe, &reader->tx_pipe, &task->base, vy_page_read_cb, vy_page_read_cb_free, TIMEOUT_INFINITY); if (!task->base.complete) return -1; /* timed out or cancelled */ vy_run_unref(task->run); mempool_free(&env->read_task_pool, task); if (rc != 0) { /* posted, but failed */ vy_page_delete(page); return -1; } } else { /* * Optimization: use blocked I/O for non-TX threads or * during WAL recovery (env->status != VINYL_ONLINE). */ ZSTD_DStream *zdctx = vy_env_get_zdctx(env); if (zdctx == NULL) { vy_page_delete(page); return -1; } if (vy_page_read(page, page_info, slice->run, zdctx) != 0) { vy_page_delete(page); return -1; } } /* Update cache */ if (itr->prev_page != NULL) vy_page_delete(itr->prev_page); itr->prev_page = itr->curr_page; itr->curr_page = page; page->page_no = page_no; /* Update read statistics. */ itr->stat->read.rows += page_info->row_count; itr->stat->read.bytes += page_info->unpacked_size; itr->stat->read.bytes_compressed += page_info->size; itr->stat->read.pages++; *result = page; return 0; } /** * Read key and lsn by a given wide position. * For the first record in a page reads the result from the page * index instead of fetching it from disk. * * @retval 0 success * @retval -1 read error or out of memory. */ static NODISCARD int vy_run_iterator_read(struct vy_run_iterator *itr, struct vy_run_iterator_pos pos, struct tuple **stmt) { struct vy_page *page; int rc = vy_run_iterator_load_page(itr, pos.page_no, &page); if (rc != 0) return rc; *stmt = vy_page_stmt(page, pos.pos_in_page, itr->cmp_def, itr->format, itr->upsert_format, itr->is_primary); if (*stmt == NULL) return -1; return 0; } /** * Binary search in page * In terms of STL, makes lower_bound for EQ,GE,LT and upper_bound for GT,LE * Additionally *equal_key argument is set to true if the found value is * equal to given key (untouched otherwise) * @retval position in the page */ static uint32_t vy_run_iterator_search_in_page(struct vy_run_iterator *itr, enum iterator_type iterator_type, const struct tuple *key, struct vy_page *page, bool *equal_key) { uint32_t beg = 0; uint32_t end = page->row_count; /* for upper bound we change zero comparison result to -1 */ int zero_cmp = (iterator_type == ITER_GT || iterator_type == ITER_LE ? -1 : 0); while (beg != end) { uint32_t mid = beg + (end - beg) / 2; struct tuple *fnd_key = vy_page_stmt(page, mid, itr->cmp_def, itr->format, itr->upsert_format, itr->is_primary); if (fnd_key == NULL) return end; int cmp = vy_stmt_compare(fnd_key, key, itr->cmp_def); cmp = cmp ? cmp : zero_cmp; *equal_key = *equal_key || cmp == 0; if (cmp < 0) beg = mid + 1; else end = mid; tuple_unref(fnd_key); } return end; } /** * Binary search in a run for the given key. * In terms of STL, makes lower_bound for EQ,GE,LT and upper_bound for GT,LE * Resulting wide position is stored it *pos argument * Additionally *equal_key argument is set to true if the found value is * equal to given key (untouched otherwise) * * @retval 0 success * @retval -1 read or memory error */ static NODISCARD int vy_run_iterator_search(struct vy_run_iterator *itr, enum iterator_type iterator_type, const struct tuple *key, struct vy_run_iterator_pos *pos, bool *equal_key) { pos->page_no = vy_page_index_find_page(itr->slice->run, key, itr->cmp_def, iterator_type, equal_key); if (pos->page_no == itr->slice->run->info.page_count) { itr->search_ended = true; return 0; } struct vy_page *page; int rc = vy_run_iterator_load_page(itr, pos->page_no, &page); if (rc != 0) return rc; bool equal_in_page = false; pos->pos_in_page = vy_run_iterator_search_in_page(itr, iterator_type, key, page, &equal_in_page); if (pos->pos_in_page == page->row_count) { pos->page_no++; pos->pos_in_page = 0; } else { *equal_key = equal_in_page; } return 0; } /** * Increment (or decrement, depending on the order) the current * wide position. * @retval 0 success, set *pos to new value * @retval 1 EOF * Affects: curr_loaded_page */ static NODISCARD int vy_run_iterator_next_pos(struct vy_run_iterator *itr, enum iterator_type iterator_type, struct vy_run_iterator_pos *pos) { struct vy_run *run = itr->slice->run; *pos = itr->curr_pos; if (iterator_type == ITER_LE || iterator_type == ITER_LT) { assert(pos->page_no <= run->info.page_count); if (pos->pos_in_page > 0) { pos->pos_in_page--; } else { if (pos->page_no == 0) return 1; pos->page_no--; struct vy_page_info *page_info = vy_run_page_info(run, pos->page_no); assert(page_info->row_count > 0); pos->pos_in_page = page_info->row_count - 1; } } else { assert(iterator_type == ITER_GE || iterator_type == ITER_GT || iterator_type == ITER_EQ); assert(pos->page_no < run->info.page_count); struct vy_page_info *page_info = vy_run_page_info(run, pos->page_no); assert(page_info->row_count > 0); pos->pos_in_page++; if (pos->pos_in_page >= page_info->row_count) { pos->page_no++; pos->pos_in_page = 0; if (pos->page_no == run->info.page_count) return 1; } } return 0; } /** * Find the next record with lsn <= itr->lsn record. * The current position must be at the beginning of a series of * records with the same key it terms of direction of iterator * (i.e. left for GE, right for LE). * @retval 0 success or EOF (*ret == NULL) * @retval -1 read or memory error * Affects: curr_loaded_page, curr_pos, search_ended */ static NODISCARD int vy_run_iterator_find_lsn(struct vy_run_iterator *itr, enum iterator_type iterator_type, const struct tuple *key, struct tuple **ret) { struct vy_slice *slice = itr->slice; const struct key_def *cmp_def = itr->cmp_def; *ret = NULL; assert(itr->search_started); assert(!itr->search_ended); assert(itr->curr_stmt != NULL); assert(itr->curr_pos.page_no < slice->run->info.page_count); while (vy_stmt_lsn(itr->curr_stmt) > (**itr->read_view).vlsn) { if (vy_run_iterator_next_pos(itr, iterator_type, &itr->curr_pos) != 0) { vy_run_iterator_stop(itr); return 0; } tuple_unref(itr->curr_stmt); itr->curr_stmt = NULL; if (vy_run_iterator_read(itr, itr->curr_pos, &itr->curr_stmt) != 0) return -1; if (iterator_type == ITER_EQ && vy_stmt_compare(itr->curr_stmt, key, cmp_def) != 0) { vy_run_iterator_stop(itr); return 0; } } if (iterator_type == ITER_LE || iterator_type == ITER_LT) { struct vy_run_iterator_pos test_pos; while (vy_run_iterator_next_pos(itr, iterator_type, &test_pos) == 0) { struct tuple *test_stmt; if (vy_run_iterator_read(itr, test_pos, &test_stmt) != 0) return -1; if (vy_stmt_lsn(test_stmt) > (**itr->read_view).vlsn || vy_tuple_compare(itr->curr_stmt, test_stmt, cmp_def) != 0) { tuple_unref(test_stmt); break; } tuple_unref(itr->curr_stmt); itr->curr_stmt = test_stmt; itr->curr_pos = test_pos; } } /* Check if the result is within the slice boundaries. */ if (iterator_type == ITER_LE || iterator_type == ITER_LT) { if (slice->begin != NULL && vy_tuple_compare_with_key(itr->curr_stmt, slice->begin, cmp_def) < 0) { vy_run_iterator_stop(itr); return 0; } } else { assert(iterator_type == ITER_GE || iterator_type == ITER_GT || iterator_type == ITER_EQ); if (slice->end != NULL && vy_tuple_compare_with_key(itr->curr_stmt, slice->end, cmp_def) >= 0) { vy_run_iterator_stop(itr); return 0; } } vy_stmt_counter_acct_tuple(&itr->stat->get, itr->curr_stmt); *ret = itr->curr_stmt; return 0; } static NODISCARD int vy_run_iterator_do_seek(struct vy_run_iterator *itr, enum iterator_type iterator_type, const struct tuple *key, struct tuple **ret) { struct vy_run *run = itr->slice->run; *ret = NULL; const struct key_def *key_def = itr->key_def; bool is_full_key = (tuple_field_count(key) >= key_def->part_count); if (run->info.has_bloom && iterator_type == ITER_EQ && is_full_key) { uint32_t hash; if (vy_stmt_type(key) == IPROTO_SELECT) { const char *data = tuple_data(key); mp_decode_array(&data); hash = key_hash(data, key_def); } else { hash = tuple_hash(key, key_def); } if (!bloom_possible_has(&run->info.bloom, hash)) { itr->search_ended = true; itr->stat->bloom_hit++; return 0; } } itr->stat->lookup++; struct vy_run_iterator_pos end_pos = {run->info.page_count, 0}; bool equal_found = false; int rc; if (tuple_field_count(key) > 0) { rc = vy_run_iterator_search(itr, iterator_type, key, &itr->curr_pos, &equal_found); if (rc != 0 || itr->search_ended) return rc; } else if (iterator_type == ITER_LE) { itr->curr_pos = end_pos; } else { assert(iterator_type == ITER_GE); itr->curr_pos.page_no = 0; itr->curr_pos.pos_in_page = 0; } if (iterator_type == ITER_EQ && !equal_found) { vy_run_iterator_stop(itr); if (run->info.has_bloom && is_full_key) itr->stat->bloom_miss++; return 0; } if ((iterator_type == ITER_GE || iterator_type == ITER_GT) && itr->curr_pos.page_no == end_pos.page_no) { vy_run_iterator_stop(itr); return 0; } if (iterator_type == ITER_LT || iterator_type == ITER_LE) { /** * 1) in case of ITER_LT we now positioned on the value >= than * given, so we need to make a step on previous key * 2) in case if ITER_LE we now positioned on the value > than * given (special branch of code in vy_run_iterator_search), * so we need to make a step on previous key */ if (vy_run_iterator_next_pos(itr, iterator_type, &itr->curr_pos) > 0) { vy_run_iterator_stop(itr); return 0; } } else { assert(iterator_type == ITER_GE || iterator_type == ITER_GT || iterator_type == ITER_EQ); /** * 1) in case of ITER_GT we now positioned on the value > than * given (special branch of code in vy_run_iterator_search), * so we need just to find proper lsn * 2) in case if ITER_GE or ITER_EQ we now positioned on the * value >= given, so we need just to find proper lsn */ } if (itr->curr_stmt != NULL) { tuple_unref(itr->curr_stmt); itr->curr_stmt = NULL; } if (vy_run_iterator_read(itr, itr->curr_pos, &itr->curr_stmt) != 0) return -1; return vy_run_iterator_find_lsn(itr, iterator_type, key, ret); } /** * Position the iterator to the first statement satisfying * the search criteria for a given key and direction. */ static NODISCARD int vy_run_iterator_seek(struct vy_run_iterator *itr, enum iterator_type iterator_type, const struct tuple *key, struct tuple **ret) { const struct key_def *cmp_def = itr->cmp_def; struct vy_slice *slice = itr->slice; const struct tuple *check_eq_key = NULL; int cmp; if (slice->begin != NULL && (iterator_type == ITER_GT || iterator_type == ITER_GE || iterator_type == ITER_EQ)) { /* * original | start * --------------+-------+-----+ * KEY | DIR | KEY | DIR | * --------+-----+-------+-----+ * > begin | * | key | * | * = begin | gt | key | gt | * | ge | begin | ge | * | eq | begin | ge | * < begin | gt | begin | ge | * | ge | begin | ge | * | eq | stop | */ cmp = vy_stmt_compare_with_key(key, slice->begin, cmp_def); if (cmp < 0 && iterator_type == ITER_EQ) { vy_run_iterator_stop(itr); return 0; } if (cmp < 0 || (cmp == 0 && iterator_type != ITER_GT)) { if (iterator_type == ITER_EQ) check_eq_key = key; iterator_type = ITER_GE; key = slice->begin; } } if (slice->end != NULL && (iterator_type == ITER_LT || iterator_type == ITER_LE)) { /* * original | start * --------------+-------+-----+ * KEY | DIR | KEY | DIR | * --------+-----+-------+-----+ * < end | * | key | * | * = end | lt | key | lt | * | le | end | lt | * > end | lt | end | lt | * | le | end | lt | */ cmp = vy_stmt_compare_with_key(key, slice->end, cmp_def); if (cmp > 0 || (cmp == 0 && iterator_type != ITER_LT)) { iterator_type = ITER_LT; key = slice->end; } } if (vy_run_iterator_do_seek(itr, iterator_type, key, ret) != 0) return -1; if (check_eq_key != NULL && *ret != NULL && vy_stmt_compare(check_eq_key, *ret, cmp_def) != 0) { vy_run_iterator_stop(itr); *ret = NULL; } return 0; } /* }}} vy_run_iterator vy_run_iterator support functions */ /* {{{ vy_run_iterator API implementation */ void vy_run_iterator_open(struct vy_run_iterator *itr, struct vy_run_iterator_stat *stat, struct vy_slice *slice, enum iterator_type iterator_type, const struct tuple *key, const struct vy_read_view **rv, const struct key_def *cmp_def, const struct key_def *key_def, struct tuple_format *format, struct tuple_format *upsert_format, bool is_primary) { itr->stat = stat; itr->cmp_def = cmp_def; itr->key_def = key_def; itr->format = format; itr->upsert_format = upsert_format; itr->is_primary = is_primary; itr->slice = slice; itr->iterator_type = iterator_type; itr->key = key; itr->read_view = rv; itr->curr_stmt = NULL; itr->curr_pos.page_no = slice->run->info.page_count; itr->curr_page = NULL; itr->prev_page = NULL; itr->search_started = false; itr->search_ended = false; } NODISCARD int vy_run_iterator_next_key(struct vy_run_iterator *itr, struct tuple **ret) { *ret = NULL; if (itr->search_ended) return 0; if (!itr->search_started) { itr->search_started = true; return vy_run_iterator_seek(itr, itr->iterator_type, itr->key, ret); } assert(itr->curr_stmt != NULL); assert(itr->curr_pos.page_no < itr->slice->run->info.page_count); struct tuple *next_key = NULL; do { if (next_key != NULL) tuple_unref(next_key); if (vy_run_iterator_next_pos(itr, itr->iterator_type, &itr->curr_pos) != 0) { vy_run_iterator_stop(itr); return 0; } if (vy_run_iterator_read(itr, itr->curr_pos, &next_key) != 0) return -1; } while (vy_tuple_compare(itr->curr_stmt, next_key, itr->cmp_def) == 0); tuple_unref(itr->curr_stmt); itr->curr_stmt = next_key; if (itr->iterator_type == ITER_EQ && vy_stmt_compare(next_key, itr->key, itr->cmp_def) != 0) { vy_run_iterator_stop(itr); return 0; } return vy_run_iterator_find_lsn(itr, itr->iterator_type, itr->key, ret); } NODISCARD int vy_run_iterator_next_lsn(struct vy_run_iterator *itr, struct tuple **ret) { *ret = NULL; assert(itr->search_started); if (itr->search_ended) return 0; assert(itr->curr_stmt != NULL); assert(itr->curr_pos.page_no < itr->slice->run->info.page_count); struct vy_run_iterator_pos next_pos; if (vy_run_iterator_next_pos(itr, ITER_GE, &next_pos) != 0) { vy_run_iterator_stop(itr); return 0; } struct tuple *next_key; if (vy_run_iterator_read(itr, next_pos, &next_key) != 0) return -1; if (vy_tuple_compare(itr->curr_stmt, next_key, itr->cmp_def) != 0) { tuple_unref(next_key); return 0; } tuple_unref(itr->curr_stmt); itr->curr_stmt = next_key; itr->curr_pos = next_pos; vy_stmt_counter_acct_tuple(&itr->stat->get, itr->curr_stmt); *ret = itr->curr_stmt; return 0; } NODISCARD int vy_run_iterator_skip(struct vy_run_iterator *itr, const struct tuple *last_stmt, struct tuple **ret) { *ret = NULL; if (itr->search_ended) return 0; /* * Check if the iterator is already positioned * at the statement following last_stmt. */ if (itr->search_started && (itr->curr_stmt == NULL || last_stmt == NULL || iterator_direction(itr->iterator_type) * vy_tuple_compare(itr->curr_stmt, last_stmt, itr->cmp_def) > 0)) { *ret = itr->curr_stmt; return 0; } const struct tuple *key = itr->key; enum iterator_type iterator_type = itr->iterator_type; if (last_stmt != NULL) { key = last_stmt; iterator_type = iterator_direction(iterator_type) > 0 ? ITER_GT : ITER_LT; } itr->search_started = true; if (vy_run_iterator_seek(itr, iterator_type, key, ret) != 0) return -1; if (itr->iterator_type == ITER_EQ && last_stmt != NULL && *ret != NULL && vy_stmt_compare(itr->key, *ret, itr->cmp_def) != 0) { vy_run_iterator_stop(itr); *ret = NULL; } return 0; } void vy_run_iterator_close(struct vy_run_iterator *itr) { vy_run_iterator_stop(itr); TRASH(itr); } /* }}} vy_run_iterator API implementation */ /** Account a page to run statistics. */ static void vy_run_acct_page(struct vy_run *run, struct vy_page_info *page) { const char *min_key_end = page->min_key; mp_next(&min_key_end); run->page_index_size += sizeof(struct vy_page_info); run->page_index_size += min_key_end - page->min_key; run->count.rows += page->row_count; run->count.bytes += page->unpacked_size; run->count.bytes_compressed += page->size; run->count.pages++; } int vy_run_recover(struct vy_run *run, const char *dir, uint32_t space_id, uint32_t iid) { char path[PATH_MAX]; vy_run_snprint_path(path, sizeof(path), dir, space_id, iid, run->id, VY_FILE_INDEX); struct xlog_cursor cursor; if (xlog_cursor_open(&cursor, path)) goto fail; struct xlog_meta *meta = &cursor.meta; if (strcmp(meta->filetype, XLOG_META_TYPE_INDEX) != 0) { diag_set(ClientError, ER_INVALID_XLOG_TYPE, XLOG_META_TYPE_INDEX, meta->filetype); goto fail_close; } /* Read run header. */ struct xrow_header xrow; ERROR_INJECT(ERRINJ_VYRUN_INDEX_GARBAGE, { errinj(ERRINJ_XLOG_GARBAGE, ERRINJ_BOOL)->bparam = true; }); /* all rows should be in one tx */ int rc = xlog_cursor_next_tx(&cursor); ERROR_INJECT(ERRINJ_VYRUN_INDEX_GARBAGE, { errinj(ERRINJ_XLOG_GARBAGE, ERRINJ_BOOL)->bparam = false; }); if (rc != 0) { if (rc > 0) diag_set(ClientError, ER_INVALID_INDEX_FILE, path, "Unexpected end of file"); goto fail_close; } rc = xlog_cursor_next_row(&cursor, &xrow); if (rc != 0) { if (rc > 0) diag_set(ClientError, ER_INVALID_INDEX_FILE, path, "Unexpected end of file"); goto fail_close; } if (xrow.type != VY_INDEX_RUN_INFO) { diag_set(ClientError, ER_INVALID_INDEX_FILE, path, tt_sprintf("Wrong xrow type (expected %d, got %u)", VY_INDEX_RUN_INFO, (unsigned)xrow.type)); goto fail_close; } if (vy_run_info_decode(&run->info, &xrow, path) != 0) goto fail_close; /* Allocate buffer for page info. */ run->page_info = calloc(run->info.page_count, sizeof(struct vy_page_info)); if (run->page_info == NULL) { diag_set(OutOfMemory, run->info.page_count * sizeof(struct vy_page_info), "malloc", "struct vy_page_info"); goto fail_close; } for (uint32_t page_no = 0; page_no < run->info.page_count; page_no++) { int rc = xlog_cursor_next_row(&cursor, &xrow); if (rc != 0) { if (rc > 0) { /** To few pages in file */ diag_set(ClientError, ER_INVALID_INDEX_FILE, path, "Unexpected end of file"); } /* * Limit the count of pages to * successfully created pages. */ run->info.page_count = page_no; goto fail_close; } if (xrow.type != VY_INDEX_PAGE_INFO) { diag_set(ClientError, ER_INVALID_INDEX_FILE, tt_sprintf("Wrong xrow type " "(expected %d, got %u)", VY_INDEX_PAGE_INFO, (unsigned)xrow.type)); goto fail_close; } struct vy_page_info *page = run->page_info + page_no; if (vy_page_info_decode(page, &xrow, path) < 0) { /** * Limit the count of pages to successfully * created pages */ run->info.page_count = page_no; goto fail_close; } vy_run_acct_page(run, page); } /* We don't need to keep metadata file open any longer. */ xlog_cursor_close(&cursor, false); /* Prepare data file for reading. */ vy_run_snprint_path(path, sizeof(path), dir, space_id, iid, run->id, VY_FILE_RUN); if (xlog_cursor_open(&cursor, path)) goto fail; meta = &cursor.meta; if (strcmp(meta->filetype, XLOG_META_TYPE_RUN) != 0) { diag_set(ClientError, ER_INVALID_XLOG_TYPE, XLOG_META_TYPE_RUN, meta->filetype); goto fail_close; } run->fd = cursor.fd; xlog_cursor_close(&cursor, true); return 0; fail_close: xlog_cursor_close(&cursor, false); fail: vy_run_clear(run); diag_log(); say_error("failed to load `%s'", path); return -1; } /* dump statement to the run page buffers (stmt header and data) */ static int vy_run_dump_stmt(const struct tuple *value, struct xlog *data_xlog, struct vy_page_info *info, const struct key_def *key_def, bool is_primary) { struct xrow_header xrow; int rc = (is_primary ? vy_stmt_encode_primary(value, key_def, 0, &xrow) : vy_stmt_encode_secondary(value, key_def, &xrow)); if (rc != 0) return -1; ssize_t row_size; if ((row_size = xlog_write_row(data_xlog, &xrow)) < 0) return -1; info->unpacked_size += row_size; info->row_count++; return 0; } /** * Encode uint32_t array of row offsets (row index) as xrow * * @param row_index row index * @param row_count size of row index * @param[out] xrow xrow to fill. * @retval 0 for success * @retval -1 for error */ static int vy_row_index_encode(const uint32_t *row_index, uint32_t row_count, struct xrow_header *xrow) { memset(xrow, 0, sizeof(*xrow)); xrow->type = VY_RUN_ROW_INDEX; size_t size = mp_sizeof_map(1) + mp_sizeof_uint(VY_ROW_INDEX_DATA) + mp_sizeof_bin(sizeof(uint32_t) * row_count); char *pos = region_alloc(&fiber()->gc, size); if (pos == NULL) { diag_set(OutOfMemory, size, "region", "row index"); return -1; } xrow->body->iov_base = pos; pos = mp_encode_map(pos, 1); pos = mp_encode_uint(pos, VY_ROW_INDEX_DATA); pos = mp_encode_binl(pos, sizeof(uint32_t) * row_count); for (uint32_t i = 0; i < row_count; ++i) pos = mp_store_u32(pos, row_index[i]); xrow->body->iov_len = (void *)pos - xrow->body->iov_base; assert(xrow->body->iov_len == size); xrow->bodycnt = 1; return 0; } /** * Helper to extend run page info array */ static inline int vy_run_alloc_page_info(struct vy_run *run, uint32_t *page_info_capacity) { uint32_t cap = *page_info_capacity > 0 ? *page_info_capacity * 2 : 16; struct vy_page_info *page_info = realloc(run->page_info, cap * sizeof(*page_info)); if (page_info == NULL) { diag_set(OutOfMemory, cap * sizeof(*page_info), "realloc", "struct vy_page_info"); return -1; } run->page_info = page_info; *page_info_capacity = cap; return 0; } /** {{{ vy_page_info */ /** * Encode vy_page_info as xrow. * Allocates using region_alloc. * * @param page_info page information to encode * @param[out] xrow xrow to fill * * @retval 0 success * @retval -1 error, check diag */ static int vy_page_info_encode(const struct vy_page_info *page_info, struct xrow_header *xrow) { struct region *region = &fiber()->gc; uint32_t min_key_size; const char *tmp = page_info->min_key; assert(mp_typeof(*tmp) == MP_ARRAY); mp_next(&tmp); min_key_size = tmp - page_info->min_key; /* calc tuple size */ uint32_t size; /* 3 items: page offset, size, and map */ size = mp_sizeof_map(6) + mp_sizeof_uint(VY_PAGE_INFO_OFFSET) + mp_sizeof_uint(page_info->offset) + mp_sizeof_uint(VY_PAGE_INFO_SIZE) + mp_sizeof_uint(page_info->size) + mp_sizeof_uint(VY_PAGE_INFO_ROW_COUNT) + mp_sizeof_uint(page_info->row_count) + mp_sizeof_uint(VY_PAGE_INFO_MIN_KEY) + min_key_size + mp_sizeof_uint(VY_PAGE_INFO_UNPACKED_SIZE) + mp_sizeof_uint(page_info->unpacked_size) + mp_sizeof_uint(VY_PAGE_INFO_ROW_INDEX_OFFSET) + mp_sizeof_uint(page_info->row_index_offset); char *pos = region_alloc(region, size); if (pos == NULL) { diag_set(OutOfMemory, size, "region", "page encode"); return -1; } memset(xrow, 0, sizeof(*xrow)); /* encode page */ xrow->body->iov_base = pos; pos = mp_encode_map(pos, 6); pos = mp_encode_uint(pos, VY_PAGE_INFO_OFFSET); pos = mp_encode_uint(pos, page_info->offset); pos = mp_encode_uint(pos, VY_PAGE_INFO_SIZE); pos = mp_encode_uint(pos, page_info->size); pos = mp_encode_uint(pos, VY_PAGE_INFO_ROW_COUNT); pos = mp_encode_uint(pos, page_info->row_count); pos = mp_encode_uint(pos, VY_PAGE_INFO_MIN_KEY); memcpy(pos, page_info->min_key, min_key_size); pos += min_key_size; pos = mp_encode_uint(pos, VY_PAGE_INFO_UNPACKED_SIZE); pos = mp_encode_uint(pos, page_info->unpacked_size); pos = mp_encode_uint(pos, VY_PAGE_INFO_ROW_INDEX_OFFSET); pos = mp_encode_uint(pos, page_info->row_index_offset); xrow->body->iov_len = (void *)pos - xrow->body->iov_base; xrow->bodycnt = 1; xrow->type = VY_INDEX_PAGE_INFO; return 0; } /** vy_page_info }}} */ /** {{{ vy_run_info */ /** * Calculate the size on disk that is needed to store give bloom filter. * @param bloom - storing bloom filter. * @return - calculated size. */ static size_t vy_run_bloom_encode_size(const struct bloom *bloom) { size_t size = mp_sizeof_array(4); size += mp_sizeof_uint(VY_BLOOM_VERSION); /* version */ size += mp_sizeof_uint(bloom->table_size); size += mp_sizeof_uint(bloom->hash_count); size += mp_sizeof_bin(bloom_store_size(bloom)); return size; } /** * Write bloom filter to given buffer. * The buffer must have at least vy_run_bloom_encode_size() * @param bloom - a bloom filter to write. * @param buffer - a buffer to write to. * @return - buffer + number of bytes written. */ char * vy_run_bloom_encode(const struct bloom *bloom, char *buffer) { char *pos = buffer; pos = mp_encode_array(pos, 4); pos = mp_encode_uint(pos, VY_BLOOM_VERSION); pos = mp_encode_uint(pos, bloom->table_size); pos = mp_encode_uint(pos, bloom->hash_count); pos = mp_encode_binl(pos, bloom_store_size(bloom)); pos = bloom_store(bloom, pos); return pos; } /** * Encode vy_run_info as xrow * Allocates using region alloc * * @param run_info the run information * @param xrow xrow to fill. * * @retval 0 success * @retval -1 on error, check diag */ static int vy_run_info_encode(const struct vy_run_info *run_info, struct xrow_header *xrow) { const char *tmp; tmp = run_info->min_key; mp_next(&tmp); size_t min_key_size = tmp - run_info->min_key; tmp = run_info->max_key; mp_next(&tmp); size_t max_key_size = tmp - run_info->max_key; uint32_t key_count = 5; if (run_info->has_bloom) key_count++; size_t size = mp_sizeof_map(key_count); size += mp_sizeof_uint(VY_RUN_INFO_MIN_KEY) + min_key_size; size += mp_sizeof_uint(VY_RUN_INFO_MAX_KEY) + max_key_size; size += mp_sizeof_uint(VY_RUN_INFO_MIN_LSN) + mp_sizeof_uint(run_info->min_lsn); size += mp_sizeof_uint(VY_RUN_INFO_MAX_LSN) + mp_sizeof_uint(run_info->max_lsn); size += mp_sizeof_uint(VY_RUN_INFO_PAGE_COUNT) + mp_sizeof_uint(run_info->page_count); if (run_info->has_bloom) size += mp_sizeof_uint(VY_RUN_INFO_BLOOM) + vy_run_bloom_encode_size(&run_info->bloom); char *pos = region_alloc(&fiber()->gc, size); if (pos == NULL) { diag_set(OutOfMemory, size, "region", "run encode"); return -1; } memset(xrow, 0, sizeof(*xrow)); xrow->body->iov_base = pos; /* encode values */ pos = mp_encode_map(pos, key_count); pos = mp_encode_uint(pos, VY_RUN_INFO_MIN_KEY); memcpy(pos, run_info->min_key, min_key_size); pos += min_key_size; pos = mp_encode_uint(pos, VY_RUN_INFO_MAX_KEY); memcpy(pos, run_info->max_key, max_key_size); pos += max_key_size; pos = mp_encode_uint(pos, VY_RUN_INFO_MIN_LSN); pos = mp_encode_uint(pos, run_info->min_lsn); pos = mp_encode_uint(pos, VY_RUN_INFO_MAX_LSN); pos = mp_encode_uint(pos, run_info->max_lsn); pos = mp_encode_uint(pos, VY_RUN_INFO_PAGE_COUNT); pos = mp_encode_uint(pos, run_info->page_count); if (run_info->has_bloom) { pos = mp_encode_uint(pos, VY_RUN_INFO_BLOOM); pos = vy_run_bloom_encode(&run_info->bloom, pos); } xrow->body->iov_len = (void *)pos - xrow->body->iov_base; xrow->bodycnt = 1; xrow->type = VY_INDEX_RUN_INFO; return 0; } /* vy_run_info }}} */ /** * Write run index to file. */ static int vy_run_write_index(struct vy_run *run, const char *dirpath, uint32_t space_id, uint32_t iid) { struct region *region = &fiber()->gc; size_t mem_used = region_used(region); char path[PATH_MAX]; vy_run_snprint_path(path, sizeof(path), dirpath, space_id, iid, run->id, VY_FILE_INDEX); say_info("writing `%s'", path); struct xlog index_xlog; struct xlog_meta meta = { .filetype = XLOG_META_TYPE_INDEX, .instance_uuid = INSTANCE_UUID, }; if (xlog_create(&index_xlog, path, 0, &meta) < 0) return -1; xlog_tx_begin(&index_xlog); struct xrow_header xrow; if (vy_run_info_encode(&run->info, &xrow) != 0 || xlog_write_row(&index_xlog, &xrow) < 0) goto fail; for (uint32_t page_no = 0; page_no < run->info.page_count; ++page_no) { struct vy_page_info *page_info = vy_run_page_info(run, page_no); if (vy_page_info_encode(page_info, &xrow) < 0) { goto fail; } if (xlog_write_row(&index_xlog, &xrow) < 0) goto fail; } if (xlog_tx_commit(&index_xlog) < 0 || xlog_flush(&index_xlog) < 0 || xlog_rename(&index_xlog) < 0) goto fail; xlog_close(&index_xlog, false); region_truncate(region, mem_used); return 0; fail: region_truncate(region, mem_used); xlog_tx_rollback(&index_xlog); xlog_close(&index_xlog, false); unlink(path); return -1; } int vy_run_writer_create(struct vy_run_writer *writer, struct vy_run *run, const char *dirpath, uint32_t space_id, uint32_t iid, const struct key_def *cmp_def, const struct key_def *key_def, uint64_t page_size, double bloom_fpr, size_t max_output_count) { memset(writer, 0, sizeof(*writer)); writer->run = run; writer->dirpath = dirpath; writer->space_id = space_id; writer->iid = iid; writer->cmp_def = cmp_def; writer->key_def = key_def; writer->page_size = page_size; writer->has_bloom = (max_output_count > 0 && bloom_fpr < 1); if (writer->has_bloom && bloom_spectrum_create(&writer->bloom, max_output_count, bloom_fpr, runtime.quota) != 0) { diag_set(OutOfMemory, 0, "bloom_spectrum_create", "bloom_spectrum"); return -1; } xlog_clear(&writer->data_xlog); ibuf_create(&writer->row_index_buf, &cord()->slabc, 4096 * sizeof(uint32_t)); run->info.min_lsn = INT64_MAX; run->info.max_lsn = -1; assert(run->page_info == NULL); return 0; } /** * Create an xlog to write run. * @param writer Run writer. * @retval -1 Memory or IO error. * @retval 0 Success. */ static int vy_run_writer_create_xlog(struct vy_run_writer *writer) { assert(!xlog_is_open(&writer->data_xlog)); char path[PATH_MAX]; vy_run_snprint_path(path, sizeof(path), writer->dirpath, writer->space_id, writer->iid, writer->run->id, VY_FILE_RUN); say_info("writing `%s'", path); const struct xlog_meta meta = { .filetype = XLOG_META_TYPE_RUN, .instance_uuid = INSTANCE_UUID, }; return xlog_create(&writer->data_xlog, path, 0, &meta); } /** * Start a new page with a min_key stored in @a first_stmt. * @param writer Run writer. * @param first_stmt First statement of a page. * * @retval -1 Memory error. * @retval 0 Success. */ static int vy_run_writer_start_page(struct vy_run_writer *writer, const struct tuple *first_stmt) { struct vy_run *run = writer->run; if (run->info.page_count >= writer->page_info_capacity && vy_run_alloc_page_info(run, &writer->page_info_capacity) != 0) return -1; const char *key = tuple_extract_key(first_stmt, writer->cmp_def, NULL); if (key == NULL) return -1; if (run->info.page_count == 0) { assert(run->info.min_key == NULL); run->info.min_key = vy_key_dup(key); if (run->info.min_key == NULL) return -1; } struct vy_page_info *page = run->page_info + run->info.page_count; if (vy_page_info_create(page, writer->data_xlog.offset, key) != 0) return -1; xlog_tx_begin(&writer->data_xlog); return 0; } /** * Write @a stmt into a current page. * @param writer Run writer. * @param stmt Statement to write. * * @retval -1 Memory or IO error. * @retval 0 Success. */ static int vy_run_writer_write_to_page(struct vy_run_writer *writer, struct tuple *stmt) { if (writer->last_stmt != NULL) vy_stmt_unref_if_possible(writer->last_stmt); writer->last_stmt = stmt; vy_stmt_ref_if_possible(stmt); struct vy_run *run = writer->run; struct vy_page_info *page = run->page_info + run->info.page_count; uint32_t *offset = (uint32_t *)ibuf_alloc(&writer->row_index_buf, sizeof(uint32_t)); if (offset == NULL) { diag_set(OutOfMemory, sizeof(uint32_t), "ibuf", "row index"); return -1; } *offset = page->unpacked_size; if (vy_run_dump_stmt(stmt, &writer->data_xlog, page, writer->cmp_def, writer->iid == 0) != 0) return -1; if (writer->has_bloom) { bloom_spectrum_add(&writer->bloom, tuple_hash(stmt, writer->key_def)); } int64_t lsn = vy_stmt_lsn(stmt); run->info.min_lsn = MIN(run->info.min_lsn, lsn); run->info.max_lsn = MAX(run->info.max_lsn, lsn); return 0; } /** * Finish a current page. * @param writer Run writer. * @retval -1 Memory or IO error. * @retval 0 Success. */ static int vy_run_writer_end_page(struct vy_run_writer *writer) { struct vy_run *run = writer->run; struct vy_page_info *page = run->page_info + run->info.page_count; assert(page->row_count > 0); assert(ibuf_used(&writer->row_index_buf) == sizeof(uint32_t) * page->row_count); struct xrow_header xrow; uint32_t *row_index = (uint32_t *)writer->row_index_buf.rpos; if (vy_row_index_encode(row_index, page->row_count, &xrow) < 0) return -1; ssize_t written = xlog_write_row(&writer->data_xlog, &xrow); if (written < 0) return -1; page->row_index_offset = page->unpacked_size; page->unpacked_size += written; written = xlog_tx_commit(&writer->data_xlog); if (written == 0) written = xlog_flush(&writer->data_xlog); if (written < 0) return -1; page->size = written; run->info.page_count++; vy_run_acct_page(run, page); ibuf_reset(&writer->row_index_buf); return 0; } int vy_run_writer_append_stmt(struct vy_run_writer *writer, struct tuple *stmt) { int rc = -1; size_t region_svp = region_used(&fiber()->gc); if (!xlog_is_open(&writer->data_xlog) && vy_run_writer_create_xlog(writer) != 0) goto out; if (ibuf_used(&writer->row_index_buf) == 0 && vy_run_writer_start_page(writer, stmt) != 0) goto out; if (vy_run_writer_write_to_page(writer, stmt) != 0) goto out; if (obuf_size(&writer->data_xlog.obuf) >= writer->page_size && vy_run_writer_end_page(writer) != 0) goto out; rc = 0; out: region_truncate(&fiber()->gc, region_svp); return rc; } /** * Destroy a run writer. * @param writer Writer to destroy. * @param reuse_fd True in a case of success run write. And else * false. */ static void vy_run_writer_destroy(struct vy_run_writer *writer, bool reuse_fd) { if (writer->last_stmt != NULL) vy_stmt_unref_if_possible(writer->last_stmt); if (xlog_is_open(&writer->data_xlog)) xlog_close(&writer->data_xlog, reuse_fd); if (writer->has_bloom) bloom_spectrum_destroy(&writer->bloom, runtime.quota); ibuf_destroy(&writer->row_index_buf); } int vy_run_writer_commit(struct vy_run_writer *writer) { int rc = -1; size_t region_svp = region_used(&fiber()->gc); if (ibuf_used(&writer->row_index_buf) != 0 && vy_run_writer_end_page(writer) != 0) goto out; struct vy_run *run = writer->run; if (vy_run_is_empty(run)) { vy_run_writer_destroy(writer, false); rc = 0; goto out; } assert(writer->last_stmt != NULL); const char *key = tuple_extract_key(writer->last_stmt, writer->cmp_def, NULL); if (key == NULL) goto out; assert(run->info.max_key == NULL); run->info.max_key = vy_key_dup(key); if (run->info.max_key == NULL) goto out; /* Sync data and link the file to the final name. */ if (xlog_sync(&writer->data_xlog) < 0 || xlog_rename(&writer->data_xlog) < 0) goto out; if (writer->has_bloom) { bloom_spectrum_choose(&writer->bloom, &run->info.bloom); run->info.has_bloom = true; } if (vy_run_write_index(run, writer->dirpath, writer->space_id, writer->iid) != 0) goto out; run->fd = writer->data_xlog.fd; vy_run_writer_destroy(writer, true); rc = 0; out: region_truncate(&fiber()->gc, region_svp); return rc; } void vy_run_writer_abort(struct vy_run_writer *writer) { vy_run_writer_destroy(writer, false); } int vy_run_rebuild_index(struct vy_run *run, const char *dir, uint32_t space_id, uint32_t iid, const struct key_def *cmp_def, const struct key_def *key_def, struct tuple_format *mem_format, struct tuple_format *upsert_format, const struct index_opts *opts) { assert(run->info.has_bloom == false); assert(run->page_info == NULL); struct region *region = &fiber()->gc; size_t mem_used = region_used(region); struct xlog_cursor cursor; char path[PATH_MAX]; vy_run_snprint_path(path, sizeof(path), dir, space_id, iid, run->id, VY_FILE_RUN); say_info("rebuilding index for `%s'", path); if (xlog_cursor_open(&cursor, path)) return -1; int rc = 0; uint32_t page_info_capacity = 0; uint32_t run_row_count = 0; const char *key = NULL; int64_t max_lsn = 0; int64_t min_lsn = INT64_MAX; off_t page_offset, next_page_offset = xlog_cursor_pos(&cursor); while ((rc = xlog_cursor_next_tx(&cursor)) == 0) { page_offset = next_page_offset; next_page_offset = xlog_cursor_pos(&cursor); if (run->info.page_count == page_info_capacity && vy_run_alloc_page_info(run, &page_info_capacity) != 0) goto close_err; const char *page_min_key = NULL; uint32_t page_row_count = 0; uint64_t page_row_index_offset = 0; uint64_t row_offset = xlog_cursor_tx_pos(&cursor); struct xrow_header xrow; while ((rc = xlog_cursor_next_row(&cursor, &xrow)) == 0) { if (xrow.type == VY_RUN_ROW_INDEX) { page_row_index_offset = row_offset; row_offset = xlog_cursor_tx_pos(&cursor); continue; } ++page_row_count; struct tuple *tuple = vy_stmt_decode(&xrow, cmp_def, mem_format, upsert_format, iid == 0); if (tuple == NULL) goto close_err; key = tuple_extract_key(tuple, cmp_def, NULL); tuple_unref(tuple); if (key == NULL) goto close_err; if (run->info.min_key == NULL) { run->info.min_key = vy_key_dup(key); if (run->info.min_key == NULL) goto close_err; } if (page_min_key == NULL) page_min_key = key; if (xrow.lsn > max_lsn) max_lsn = xrow.lsn; if (xrow.lsn < min_lsn) min_lsn = xrow.lsn; row_offset = xlog_cursor_tx_pos(&cursor); } struct vy_page_info *info; info = run->page_info + run->info.page_count; if (vy_page_info_create(info, page_offset, page_min_key) != 0) goto close_err; info->row_count = page_row_count; info->size = next_page_offset - page_offset; info->unpacked_size = xlog_cursor_tx_pos(&cursor); info->row_index_offset = page_row_index_offset; ++run->info.page_count; run_row_count += page_row_count; vy_run_acct_page(run, info); region_truncate(region, mem_used); } if (key != NULL) { run->info.max_key = vy_key_dup(key); if (run->info.max_key == NULL) goto close_err; } run->info.max_lsn = max_lsn; run->info.min_lsn = min_lsn; if (opts->bloom_fpr >= 1) goto done; if (xlog_cursor_reset(&cursor) != 0) goto close_err; if (bloom_create(&run->info.bloom, run_row_count, opts->bloom_fpr, runtime.quota) != 0) { diag_set(OutOfMemory, 0, "bloom_create", "bloom"); goto close_err; } struct xrow_header xrow; while ((rc = xlog_cursor_next(&cursor, &xrow, false)) == 0) { if (xrow.type == VY_RUN_ROW_INDEX) continue; struct tuple *tuple = vy_stmt_decode(&xrow, cmp_def, mem_format, upsert_format, iid == 0); if (tuple == NULL) goto close_err; bloom_add(&run->info.bloom, tuple_hash(tuple, key_def)); } run->info.has_bloom = true; done: region_truncate(region, mem_used); run->fd = cursor.fd; xlog_cursor_close(&cursor, true); /* New run index is ready for write, unlink old file if exists */ vy_run_snprint_path(path, sizeof(path), dir, space_id, iid, run->id, VY_FILE_INDEX); if (unlink(path) < 0 && errno != ENOENT) { diag_set(SystemError, "failed to unlink file '%s'", path); goto close_err; } if (vy_run_write_index(run, dir, space_id, iid) != 0) goto close_err; return 0; close_err: vy_run_clear(run); region_truncate(region, mem_used); xlog_cursor_close(&cursor, false); return -1; } int vy_run_remove_files(const char *dir, uint32_t space_id, uint32_t iid, int64_t run_id) { ERROR_INJECT(ERRINJ_VY_GC, {say_error("error injection: vinyl run %lld not deleted", (long long)run_id); return -1;}); int ret = 0; char path[PATH_MAX]; for (int type = 0; type < vy_file_MAX; type++) { vy_run_snprint_path(path, sizeof(path), dir, space_id, iid, run_id, type); say_info("removing %s", path); if (coio_unlink(path) < 0 && errno != ENOENT) { say_syserror("error while removing %s", path); ret = -1; } } return ret; } /** * Read a page with stream->page_no from the run and save it in stream->page. * Support function of slice stream. * @param stream - the stream. * @return 0 on success, -1 of memory or read error (diag is set). */ static NODISCARD int vy_slice_stream_read_page(struct vy_slice_stream *stream) { struct vy_run *run = stream->slice->run; assert(stream->page == NULL); ZSTD_DStream *zdctx = vy_env_get_zdctx(run->env); if (zdctx == NULL) return -1; struct vy_page_info *page_info = vy_run_page_info(run, stream->page_no); stream->page = vy_page_new(page_info); if (stream->page == NULL) return -1; if (vy_page_read(stream->page, page_info, run, zdctx) != 0) { vy_page_delete(stream->page); stream->page = NULL; return -1; } return 0; } /** * Binary search in a run for the given key. Find the first position with * a tuple greater or equal to slice * @retval 0 success * @retval -1 read or memory error */ static NODISCARD int vy_slice_stream_search(struct vy_stmt_stream *virt_stream) { assert(virt_stream->iface->start == vy_slice_stream_search); struct vy_slice_stream *stream = (struct vy_slice_stream *)virt_stream; assert(stream->page == NULL); if (stream->slice->begin == NULL) { /* Already at the beginning */ assert(stream->page_no == 0); assert(stream->pos_in_page == 0); return 0; } if (vy_slice_stream_read_page(stream) != 0) return -1; /** * Binary search in page. Find the first position in page with * tuple >= stream->slice->begin. */ uint32_t beg = 0; uint32_t end = stream->page->row_count; while (beg != end) { uint32_t mid = beg + (end - beg) / 2; struct tuple *fnd_key = vy_page_stmt(stream->page, mid, stream->cmp_def, stream->format, stream->upsert_format, stream->is_primary); if (fnd_key == NULL) return -1; int cmp = vy_tuple_compare_with_key(fnd_key, stream->slice->begin, stream->cmp_def); if (cmp < 0) beg = mid + 1; else end = mid; tuple_unref(fnd_key); } stream->pos_in_page = end; if (stream->pos_in_page == stream->page->row_count) { /* The first tuple is in the beginning of the next page */ vy_page_delete(stream->page); stream->page = NULL; stream->page_no++; stream->pos_in_page = 0; } return 0; } /** * Get the value from the stream and move to the next position. * Set *ret to the value or NULL if EOF. * @param virt_stream - virtual stream. * @param ret - pointer to pointer to the result. * @return 0 on success, -1 on memory or read error. */ static NODISCARD int vy_slice_stream_next(struct vy_stmt_stream *virt_stream, struct tuple **ret) { assert(virt_stream->iface->next == vy_slice_stream_next); struct vy_slice_stream *stream = (struct vy_slice_stream *)virt_stream; *ret = NULL; /* If the slice is ended, return EOF */ if (stream->page_no > stream->slice->last_page_no) return 0; /* If current page is not already read, read it */ if (stream->page == NULL && vy_slice_stream_read_page(stream) != 0) return -1; /* Read current tuple from the page */ struct tuple *tuple = vy_page_stmt(stream->page, stream->pos_in_page, stream->cmp_def, stream->format, stream->upsert_format, stream->is_primary); if (tuple == NULL) /* Read or memory error */ return -1; /* Check that the tuple is not out of slice bounds = */ if (stream->slice->end != NULL && stream->page_no >= stream->slice->last_page_no && vy_tuple_compare_with_key(tuple, stream->slice->end, stream->cmp_def) >= 0) return 0; /* We definitely has the next non-null tuple. Save it in stream */ if (stream->tuple != NULL) tuple_unref(stream->tuple); stream->tuple = tuple; *ret = tuple; /* Increment position */ stream->pos_in_page++; /* Check whether the position is out of page */ struct vy_page_info *page_info = vy_run_page_info(stream->slice->run, stream->page_no); if (stream->pos_in_page >= page_info->row_count) { /** * Out of page. Free page, move the position to the next page * and * nullify page pointer to read it on the next iteration. */ vy_page_delete(stream->page); stream->page = NULL; stream->page_no++; stream->pos_in_page = 0; } return 0; } /** * Free resources. */ static void vy_slice_stream_close(struct vy_stmt_stream *virt_stream) { assert(virt_stream->iface->close == vy_slice_stream_close); struct vy_slice_stream *stream = (struct vy_slice_stream *)virt_stream; if (stream->page != NULL) { vy_page_delete(stream->page); stream->page = NULL; } if (stream->tuple != NULL) { tuple_unref(stream->tuple); stream->tuple = NULL; } } static const struct vy_stmt_stream_iface vy_slice_stream_iface = { .start = vy_slice_stream_search, .next = vy_slice_stream_next, .stop = NULL, .close = vy_slice_stream_close }; void vy_slice_stream_open(struct vy_slice_stream *stream, struct vy_slice *slice, const struct key_def *cmp_def, struct tuple_format *format, struct tuple_format *upsert_format, bool is_primary) { stream->base.iface = &vy_slice_stream_iface; stream->page_no = slice->first_page_no; stream->pos_in_page = 0; /* We'll find it later */ stream->page = NULL; stream->tuple = NULL; stream->slice = slice; stream->cmp_def = cmp_def; stream->format = format; stream->upsert_format = upsert_format; stream->is_primary = is_primary; } tarantool_1.9.1.26.g63eb81e3c/src/box/authentication.cc0000664000000000000000000000661313306565107021127 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "authentication.h" #include "user.h" #include "session.h" #include "msgpuck.h" #include "error.h" static char zero_hash[SCRAMBLE_SIZE]; void authenticate(const char *user_name, uint32_t len, const char *tuple) { struct user *user = user_find_by_name_xc(user_name, len); struct session *session = current_session(); uint32_t part_count; uint32_t scramble_len; const char *scramble; struct on_auth_trigger_ctx auth_res = { user->def->name, true }; /* * Allow authenticating back to GUEST user without * checking a password. This is useful for connection * pooling. */ part_count = mp_decode_array(&tuple); if (part_count == 0 && user->def->uid == GUEST && memcmp(user->def->hash2, zero_hash, SCRAMBLE_SIZE) == 0) { /* No password is set for GUEST, OK. */ goto ok; } access_check_session_xc(user); if (part_count < 2) { /* Expected at least: authentication mechanism and data. */ tnt_raise(ClientError, ER_INVALID_MSGPACK, "authentication request body"); } mp_next(&tuple); /* Skip authentication mechanism. */ if (mp_typeof(*tuple) == MP_STR) { scramble = mp_decode_str(&tuple, &scramble_len); } else if (mp_typeof(*tuple) == MP_BIN) { /* * scramble is not a character stream, so some * codecs automatically pack it as MP_BIN */ scramble = mp_decode_bin(&tuple, &scramble_len); } else { tnt_raise(ClientError, ER_INVALID_MSGPACK, "authentication scramble"); } if (scramble_len != SCRAMBLE_SIZE) { /* Authentication mechanism, data. */ tnt_raise(ClientError, ER_INVALID_MSGPACK, "invalid scramble size"); } if (scramble_check(scramble, session->salt, user->def->hash2)) { auth_res.is_authenticated = false; if (session_run_on_auth_triggers(&auth_res) != 0) diag_raise(); tnt_raise(ClientError, ER_PASSWORD_MISMATCH, user->def->name); } /* check and run auth triggers on success */ if (! rlist_empty(&session_on_auth) && session_run_on_auth_triggers(&auth_res) != 0) diag_raise(); ok: credentials_init(&session->credentials, user->auth_token, user->def->uid); } tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_rtree.c0000664000000000000000000002502413306565107020275 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "memtx_rtree.h" #include #include #include "errinj.h" #include "fiber.h" #include "trivia/util.h" #include "tuple.h" #include "space.h" #include "memtx_engine.h" /* {{{ Utilities. *************************************************/ static inline int mp_decode_num(const char **data, uint32_t fieldno, double *ret) { if (mp_read_double(data, ret) != 0) { diag_set(ClientError, ER_FIELD_TYPE, fieldno + TUPLE_INDEX_BASE, field_type_strs[FIELD_TYPE_NUMBER]); return -1; } return 0; } /** * Extract coordinates of rectangle from message packed string. * There must be or numbers in that string. */ static inline int mp_decode_rect(struct rtree_rect *rect, unsigned dimension, const char *mp, unsigned count, const char *what) { coord_t c; if (count == dimension) { /* point */ for (unsigned i = 0; i < dimension; i++) { if (mp_decode_num(&mp, i, &c) < 0) return -1; rect->coords[i * 2] = c; rect->coords[i * 2 + 1] = c; } } else if (count == dimension * 2) { /* box */ for (unsigned i = 0; i < dimension; i++) { if (mp_decode_num(&mp, i, &c) < 0) return -1; rect->coords[i * 2] = c; } for (unsigned i = 0; i < dimension; i++) { if (mp_decode_num(&mp, i + dimension, &c) < 0) return -1; rect->coords[i * 2 + 1] = c; } } else { diag_set(ClientError, ER_RTREE_RECT, what, dimension, dimension * 2); return -1; } rtree_rect_normalize(rect, dimension); return 0; } /** * Extract rectangle from message packed key. * Due to historical issues, * in key a rectangle could be written in two variants: * a)array with appropriate number of coordinates * b)array with on element - array with appropriate number of coordinates */ static inline int mp_decode_rect_from_key(struct rtree_rect *rect, unsigned dimension, const char *mp, uint32_t part_count) { if (part_count == 1) part_count = mp_decode_array(&mp); return mp_decode_rect(rect, dimension, mp, part_count, "Key"); } static inline int extract_rectangle(struct rtree_rect *rect, const struct tuple *tuple, struct index_def *index_def) { assert(index_def->key_def->part_count == 1); const char *elems = tuple_field(tuple, index_def->key_def->parts[0].fieldno); unsigned dimension = index_def->opts.dimension; uint32_t count = mp_decode_array(&elems); return mp_decode_rect(rect, dimension, elems, count, "Field"); } /* {{{ MemtxRTree Iterators ****************************************/ struct index_rtree_iterator { struct iterator base; struct rtree_iterator impl; /** Memory pool the iterator was allocated from. */ struct mempool *pool; }; static void index_rtree_iterator_free(struct iterator *i) { struct index_rtree_iterator *itr = (struct index_rtree_iterator *)i; rtree_iterator_destroy(&itr->impl); mempool_free(itr->pool, itr); } static int index_rtree_iterator_next(struct iterator *i, struct tuple **ret) { struct index_rtree_iterator *itr = (struct index_rtree_iterator *)i; *ret = (struct tuple *)rtree_iterator_next(&itr->impl); return 0; } /* }}} */ /* {{{ MemtxRTree **********************************************************/ static void memtx_rtree_index_destroy(struct index *base) { struct memtx_rtree_index *index = (struct memtx_rtree_index *)base; rtree_destroy(&index->tree); free(index); } static ssize_t memtx_rtree_index_size(struct index *base) { struct memtx_rtree_index *index = (struct memtx_rtree_index *)base; return rtree_number_of_records(&index->tree); } static ssize_t memtx_rtree_index_bsize(struct index *base) { struct memtx_rtree_index *index = (struct memtx_rtree_index *)base; return rtree_used_size(&index->tree); } static ssize_t memtx_rtree_index_count(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { if (type == ITER_ALL) return memtx_rtree_index_size(base); /* optimization */ return generic_index_count(base, type, key, part_count); } static int memtx_rtree_index_get(struct index *base, const char *key, uint32_t part_count, struct tuple **result) { struct memtx_rtree_index *index = (struct memtx_rtree_index *)base; struct rtree_iterator iterator; rtree_iterator_init(&iterator); struct rtree_rect rect; if (mp_decode_rect_from_key(&rect, index->dimension, key, part_count)) unreachable(); *result = NULL; if (rtree_search(&index->tree, &rect, SOP_OVERLAPS, &iterator)) *result = (struct tuple *)rtree_iterator_next(&iterator); rtree_iterator_destroy(&iterator); return 0; } static int memtx_rtree_index_replace(struct index *base, struct tuple *old_tuple, struct tuple *new_tuple, enum dup_replace_mode mode, struct tuple **result) { (void)mode; struct memtx_rtree_index *index = (struct memtx_rtree_index *)base; struct rtree_rect rect; if (new_tuple) { if (extract_rectangle(&rect, new_tuple, base->def) != 0) return -1; rtree_insert(&index->tree, &rect, new_tuple); } if (old_tuple) { if (extract_rectangle(&rect, old_tuple, base->def) != 0) return -1; if (!rtree_remove(&index->tree, &rect, old_tuple)) old_tuple = NULL; } *result = old_tuple; return 0; } static struct iterator * memtx_rtree_index_create_iterator(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { struct memtx_rtree_index *index = (struct memtx_rtree_index *)base; struct memtx_engine *memtx = (struct memtx_engine *)base->engine; struct rtree_rect rect; if (part_count == 0) { if (type != ITER_ALL) { diag_set(UnsupportedIndexFeature, base->def, "empty keys for requested iterator type"); return NULL; } } else if (mp_decode_rect_from_key(&rect, index->dimension, key, part_count)) { return NULL; } enum spatial_search_op op; switch (type) { case ITER_ALL: op = SOP_ALL; break; case ITER_EQ: op = SOP_EQUALS; break; case ITER_GT: op = SOP_STRICT_CONTAINS; break; case ITER_GE: op = SOP_CONTAINS; break; case ITER_LT: op = SOP_STRICT_BELONGS; break; case ITER_LE: op = SOP_BELONGS; break; case ITER_OVERLAPS: op = SOP_OVERLAPS; break; case ITER_NEIGHBOR: op = SOP_NEIGHBOR; break; default: diag_set(UnsupportedIndexFeature, base->def, "requested iterator type"); return NULL; } struct index_rtree_iterator *it = mempool_alloc(&memtx->rtree_iterator_pool); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct index_rtree_iterator), "memtx_rtree_index", "iterator"); return NULL; } iterator_create(&it->base, base); it->pool = &memtx->rtree_iterator_pool; it->base.next = index_rtree_iterator_next; it->base.free = index_rtree_iterator_free; rtree_iterator_init(&it->impl); rtree_search(&index->tree, &rect, op, &it->impl); return (struct iterator *)it; } static void memtx_rtree_index_begin_build(struct index *base) { struct memtx_rtree_index *index = (struct memtx_rtree_index *)base; rtree_purge(&index->tree); } static const struct index_vtab memtx_rtree_index_vtab = { /* .destroy = */ memtx_rtree_index_destroy, /* .commit_create = */ generic_index_commit_create, /* .commit_drop = */ generic_index_commit_drop, /* .update_def = */ generic_index_update_def, /* .size = */ memtx_rtree_index_size, /* .bsize = */ memtx_rtree_index_bsize, /* .min = */ generic_index_min, /* .max = */ generic_index_max, /* .random = */ generic_index_random, /* .count = */ memtx_rtree_index_count, /* .get = */ memtx_rtree_index_get, /* .replace = */ memtx_rtree_index_replace, /* .create_iterator = */ memtx_rtree_index_create_iterator, /* .create_snapshot_iterator = */ generic_index_create_snapshot_iterator, /* .info = */ generic_index_info, /* .begin_build = */ memtx_rtree_index_begin_build, /* .reserve = */ generic_index_reserve, /* .build_next = */ generic_index_build_next, /* .end_build = */ generic_index_end_build, }; struct memtx_rtree_index * memtx_rtree_index_new(struct memtx_engine *memtx, struct index_def *def) { assert(def->key_def->part_count == 1); assert(def->key_def->parts[0].type == FIELD_TYPE_ARRAY); assert(def->opts.is_unique == false); if (def->opts.dimension < 1 || def->opts.dimension > RTREE_MAX_DIMENSION) { diag_set(UnsupportedIndexFeature, def, tt_sprintf("dimension (%lld): must belong to " "range [%u, %u]", def->opts.dimension, 1, RTREE_MAX_DIMENSION)); return NULL; } assert((int)RTREE_EUCLID == (int)RTREE_INDEX_DISTANCE_TYPE_EUCLID); assert((int)RTREE_MANHATTAN == (int)RTREE_INDEX_DISTANCE_TYPE_MANHATTAN); enum rtree_distance_type distance_type = (enum rtree_distance_type)def->opts.distance; memtx_index_arena_init(); if (!mempool_is_initialized(&memtx->rtree_iterator_pool)) { mempool_create(&memtx->rtree_iterator_pool, cord_slab_cache(), sizeof(struct index_rtree_iterator)); } struct memtx_rtree_index *index = (struct memtx_rtree_index *)calloc(1, sizeof(*index)); if (index == NULL) { diag_set(OutOfMemory, sizeof(*index), "malloc", "struct memtx_rtree_index"); return NULL; } if (index_create(&index->base, (struct engine *)memtx, &memtx_rtree_index_vtab, def) != 0) { free(index); return NULL; } index->dimension = def->opts.dimension; rtree_init(&index->tree, index->dimension, MEMTX_EXTENT_SIZE, memtx_index_extent_alloc, memtx_index_extent_free, NULL, distance_type); return index; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_cache.h0000664000000000000000000002135613306565107017534 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_CACHE_H #define INCLUDES_TARANTOOL_BOX_VY_CACHE_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "iterator_type.h" #include "vy_stmt.h" /* for comparators */ #include "vy_read_view.h" #include "vy_stat.h" #include "small/mempool.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * A record in tuple cache */ struct vy_cache_entry { /* Cache */ struct vy_cache *cache; /* Statement in cache */ struct tuple *stmt; /* Link in LRU list */ struct rlist in_lru; /* VY_CACHE_LEFT_LINKED and/or VY_CACHE_RIGHT_LINKED, see * description of them for more information */ uint32_t flags; /* Number of parts in key when the value was the first in EQ search */ uint8_t left_boundary_level; /* Number of parts in key when the value was the last in EQ search */ uint8_t right_boundary_level; }; /** * Internal comparator (1) for BPS tree. */ static inline int vy_cache_tree_cmp(struct vy_cache_entry *a, struct vy_cache_entry *b, struct key_def *cmp_def) { return vy_tuple_compare(a->stmt, b->stmt, cmp_def); } /** * Internal comparator (2) for BPS tree. */ static inline int vy_cache_tree_key_cmp(struct vy_cache_entry *a, const struct tuple *b, struct key_def *cmp_def) { return vy_stmt_compare(a->stmt, b, cmp_def); } #define VY_CACHE_TREE_EXTENT_SIZE (16 * 1024) #define BPS_TREE_NAME vy_cache_tree #define BPS_TREE_BLOCK_SIZE 512 #define BPS_TREE_EXTENT_SIZE VY_CACHE_TREE_EXTENT_SIZE #define BPS_TREE_COMPARE(a, b, cmp_def) vy_cache_tree_cmp(a, b, cmp_def) #define BPS_TREE_COMPARE_KEY(a, b, cmp_def) vy_cache_tree_key_cmp(a, b, cmp_def) #define bps_tree_elem_t struct vy_cache_entry * #define bps_tree_key_t const struct tuple * #define bps_tree_arg_t struct key_def * #define BPS_TREE_NO_DEBUG #include "salad/bps_tree.h" #undef BPS_TREE_NAME #undef BPS_TREE_BLOCK_SIZE #undef BPS_TREE_EXTENT_SIZE #undef BPS_TREE_COMPARE #undef BPS_TREE_COMPARE_KEY #undef bps_tree_elem_t #undef bps_tree_key_t #undef bps_tree_arg_t #undef BPS_TREE_NO_DEBUG /** * Environment of the cache */ struct vy_cache_env { /** Common LRU list of read cache. The first element is the newest */ struct rlist cache_lru; /** Common mempool for vy_cache_entry struct */ struct mempool cache_entry_mempool; /** Size of memory occupied by cached tuples */ size_t mem_used; /** Max memory size that can be used for cache */ size_t mem_quota; }; /** * Initialize common cache environment. * @param e - the environment. * @param slab_cache - source of memory. */ void vy_cache_env_create(struct vy_cache_env *env, struct slab_cache *slab_cache); /** * Destroy and free resources of cache environment. * @param e - the environment. */ void vy_cache_env_destroy(struct vy_cache_env *e); /** * Set memory limit for the cache. * @param e - the environment. * @param quota - memory limit for the cache. * * This function blocks until it manages to free enough memory * to fit in the new limit. */ void vy_cache_env_set_quota(struct vy_cache_env *e, size_t quota); /** * Tuple cache (of one particular index) */ struct vy_cache { /** * Key definition for tuple comparison, includes primary * key parts */ struct key_def *cmp_def; /* Tree of cache entries */ struct vy_cache_tree cache_tree; /* The vesrion of state of cache_tree. Increments on every change */ uint32_t version; /* Saved pointer to common cache environment */ struct vy_cache_env *env; /* Cache statistics. */ struct vy_cache_stat stat; }; /** * Allocate and initialize tuple cache. * @param env - pointer to common cache environment. * @param cmp_def - key definition for tuple comparison. */ void vy_cache_create(struct vy_cache *cache, struct vy_cache_env *env, struct key_def *cmp_def); /** * Destroy and deallocate tuple cache. * @param cache - pointer to tuple cache to destroy. */ void vy_cache_destroy(struct vy_cache *cache); /** * Add a value to the cache. Can be used only if the reader read the latest * data (vlsn = INT64_MAX). * @param cache - pointer to tuple cache. * @param stmt - statement that was recently read and should be added to the * cache. * @param prev_stmt - previous statement that was read by the reader in one * sequence (by one iterator). * @param direction - direction in which the reader (iterator) observes data, * +1 - forward, -1 - backward. */ void vy_cache_add(struct vy_cache *cache, struct tuple *stmt, struct tuple *prev_stmt, const struct tuple *key, enum iterator_type order); /** * Find value in cache. * @return A tuple equal to key or NULL if not found. */ struct tuple * vy_cache_get(struct vy_cache *cache, const struct tuple *key); /** * Invalidate possibly cached value due to its overwriting * @param cache - pointer to tuple cache. * @param stmt - overwritten statement. * @param[out] deleted - If not NULL, then is set to deleted * statement. */ void vy_cache_on_write(struct vy_cache *cache, const struct tuple *stmt, struct tuple **deleted); /** * Cache iterator */ struct vy_cache_iterator { /* The cache */ struct vy_cache *cache; /** * Iterator type, that specifies direction, start position and stop * criteria if the key is not specified, GT and EQ are changed to * GE, LT to LE for beauty. */ enum iterator_type iterator_type; /* Search key data in terms of vinyl, vy_stmt_compare argument */ const struct tuple *key; /* LSN visibility, iterator shows values with lsn <= vlsn */ const struct vy_read_view **read_view; /* State of iterator */ /* Current position in tree */ struct vy_cache_tree_iterator curr_pos; /* stmt in current position in tree */ struct tuple *curr_stmt; /* Last version of cache */ uint32_t version; /* Is false until first .._get or .._next_.. method is called */ bool search_started; }; /** * Open an iterator over cache. * @param itr - iterator to open. * @param cache - the cache. * @param iterator_type - iterator type (EQ, GT, GE, LT, LE or ALL) * @param key - search key data in terms of vinyl, vy_stmt_compare argument * @param vlsn - LSN visibility, iterator shows values with lsn <= vlsn */ void vy_cache_iterator_open(struct vy_cache_iterator *itr, struct vy_cache *cache, enum iterator_type iterator_type, const struct tuple *key, const struct vy_read_view **rv); /** * Advance a cache iterator to the next statement. * The next statement is returned in @ret (NULL if EOF). * @stop flag is set if a chain was found in the cache * and so there shouldn't be statements preceding the * returned statement in memory or on disk. */ void vy_cache_iterator_next(struct vy_cache_iterator *itr, struct tuple **ret, bool *stop); /** * Advance a cache iterator to the statement following @last_stmt. * The statement is returned in @ret (NULL if EOF). */ void vy_cache_iterator_skip(struct vy_cache_iterator *itr, const struct tuple *last_stmt, struct tuple **ret, bool *stop); /** * Check if a cache iterator was invalidated and needs to be restored. * If it does, set the iterator position to the statement following * @last_stmt and return 1, otherwise return 0. */ int vy_cache_iterator_restore(struct vy_cache_iterator *itr, const struct tuple *last_stmt, struct tuple **ret, bool *stop); /** * Close a cache iterator. */ void vy_cache_iterator_close(struct vy_cache_iterator *itr); #if defined(__cplusplus) } /* extern "C" { */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_CACHE_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_rtree.h0000664000000000000000000000367313306560010020274 0ustar rootroot#ifndef TARANTOOL_BOX_MEMTX_RTREE_H_INCLUDED #define TARANTOOL_BOX_MEMTX_RTREE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "index.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct memtx_engine; struct memtx_rtree_index { struct index base; unsigned dimension; struct rtree tree; }; struct memtx_rtree_index * memtx_rtree_index_new(struct memtx_engine *memtx, struct index_def *def); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_MEMTX_RTREE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/func_def.h0000664000000000000000000000513713306560010017507 0ustar rootroot#ifndef TARANTOOL_BOX_FUNC_DEF_H_INCLUDED #define TARANTOOL_BOX_FUNC_DEF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include /** * The supported language of the stored function. */ enum func_language { FUNC_LANGUAGE_LUA, FUNC_LANGUAGE_C, func_language_MAX, }; extern const char *func_language_strs[]; /** * Definition of a function. Function body is not stored * or replicated (yet). */ struct func_def { /** Function id. */ uint32_t fid; /** Owner of the function. */ uint32_t uid; /** * True if the function requires change of user id before * invocation. */ bool setuid; /** * The language of the stored function. */ enum func_language language; /** Function name. */ char name[0]; }; /** * @param name_len length of func_def->name * @returns size in bytes needed to allocate for struct func_def * for a function of length @a a name_len. */ static inline size_t func_def_sizeof(uint32_t name_len) { /* +1 for '\0' name terminating. */ return sizeof(struct func_def) + name_len + 1; } /** * API of C stored function. */ typedef struct box_function_ctx box_function_ctx_t; typedef int (*box_function_f)(box_function_ctx_t *ctx, const char *args, const char *args_end); #endif /* TARANTOOL_BOX_FUNC_DEF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_read_view.h0000664000000000000000000000541413306560010020417 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_READ_VIEW_H #define INCLUDES_TARANTOOL_BOX_VY_READ_VIEW_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** The state of the database the cursor should be looking at. */ struct vy_read_view { /** * Consistent read view LSN. Originally read-only transactions * receive a read view lsn upon creation and do not see further * changes. * Other transactions are expected to be read-write and * have vlsn == INT64_MAX to read newest data. Once a value read * by such a transaction (T) is overwritten by another * commiting transaction, T permanently goes to read view that does * not see this change. * If T does not have any write statements by the commit time it will * be committed successfully, or aborted as conflicted otherwise. */ int64_t vlsn; /** The link in read_views of the TX manager */ struct rlist in_read_views; /** * The number of references to this read view. The global * read view has zero refs, we don't do reference * count it as it is missing from read_views list. */ int refs; /** * Is set to true when the read view which includes * a prepared but not committed transaction, is * compromised by a cascading rollback. */ bool is_aborted; }; #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_READ_VIEW_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/func.h0000664000000000000000000000577013306560010016674 0ustar rootroot#ifndef TARANTOOL_BOX_FUNC_H_INCLUDED #define TARANTOOL_BOX_FUNC_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "small/rlist.h" #include "func_def.h" #include "user_def.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Dynamic shared module. */ struct module { /** Module dlhandle. */ void *handle; /** List of imported functions. */ struct rlist funcs; /** Count of active calls. */ size_t calls; /** True if module is being unloaded. */ bool is_unloading; }; /** * Stored function. */ struct func { struct func_def *def; /** * Anchor for module membership. */ struct rlist item; /** * For C functions, the body of the function. */ box_function_f func; /** * Each stored function keeps a handle to the * dynamic library for the C callback. */ struct module *module; /** * Authentication id of the owner of the function, * used for set-user-id functions. */ struct credentials owner_credentials; /** * Cached runtime access information. */ struct access access[BOX_USER_MAX]; }; /** * Initialize modules subsystem. */ int module_init(void); /** * Cleanup modules subsystem. */ void module_free(void); struct func * func_new(struct func_def *def); void func_update(struct func *func, struct func_def *def); void func_delete(struct func *func); /** * Call stored C function using @a args. */ int func_call(struct func *func, box_function_ctx_t *ctx, const char *args, const char *args_end); int func_reload(struct func *func); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_FUNC_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/space.h0000664000000000000000000003474613306565107017055 0ustar rootroot#ifndef TARANTOOL_BOX_SPACE_H_INCLUDED #define TARANTOOL_BOX_SPACE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "user_def.h" #include "space_def.h" #include "small/rlist.h" #include "engine.h" #include "index.h" #include "error.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct space; struct engine; struct sequence; struct txn; struct request; struct port; struct tuple; struct space_vtab { /** Free a space instance. */ void (*destroy)(struct space *); /** Return binary size of a space. */ size_t (*bsize)(struct space *); int (*apply_initial_join_row)(struct space *, struct request *); int (*execute_replace)(struct space *, struct txn *, struct request *, struct tuple **result); int (*execute_delete)(struct space *, struct txn *, struct request *, struct tuple **result); int (*execute_update)(struct space *, struct txn *, struct request *, struct tuple **result); int (*execute_upsert)(struct space *, struct txn *, struct request *); void (*init_system_space)(struct space *); /** * Check an index definition for violation of * various limits. */ int (*check_index_def)(struct space *, struct index_def *); /** * Create an instance of space index. Used in alter * space before commit to WAL. The created index is * deleted with delete operator. */ struct index *(*create_index)(struct space *, struct index_def *); /** * Called by alter when a primary key is added, * after create_index is invoked for the new * key and before the write to WAL. */ int (*add_primary_key)(struct space *); /** * Called by alter when the primary key is dropped. * Do whatever is necessary with the space object, * to not crash in DML. */ void (*drop_primary_key)(struct space *); /** * Check that new fields of a space format are * compatible with existing tuples. */ int (*check_format)(struct space *new_space, struct space *old_space); /** * Called with the new empty secondary index. * Fill the new index with data from the primary * key of the space. */ int (*build_secondary_key)(struct space *old_space, struct space *new_space, struct index *new_index); /** * Notify the enigne about upcoming space truncation * so that it can prepare new_space object. */ int (*prepare_truncate)(struct space *old_space, struct space *new_space); /** * Commit space truncation. Called after space truncate * record was written to WAL hence must not fail. * * The old_space is the space that was replaced with the * new_space as a result of truncation. The callback is * supposed to release resources associated with the * old_space and commit the new_space. */ void (*commit_truncate)(struct space *old_space, struct space *new_space); /** * Notify the engine about the changed space, * before it's done, to prepare 'new_space' object. */ int (*prepare_alter)(struct space *old_space, struct space *new_space); /** * Notify the engine engine after altering a space and * replacing old_space with new_space in the space cache, * to, e.g., update all references to struct space * and replace old_space with new_space. */ void (*commit_alter)(struct space *old_space, struct space *new_space); }; struct space { /** Virtual function table. */ const struct space_vtab *vtab; /** Cached runtime access information. */ struct access access[BOX_USER_MAX]; /** Engine used by this space. */ struct engine *engine; /** Triggers fired before executing a request. */ struct rlist before_replace; /** Triggers fired after space_replace() -- see txn_commit_stmt(). */ struct rlist on_replace; /** Triggers fired before space statement */ struct rlist on_stmt_begin; /** * The number of *enabled* indexes in the space. * * After all indexes are built, it is equal to the number * of non-nil members of the index[] array. */ uint32_t index_count; /** * There may be gaps index ids, i.e. index 0 and 2 may exist, * while index 1 is not defined. This member stores the * max id of a defined index in the space. It defines the * size of index_map array. */ uint32_t index_id_max; /** Space meta. */ struct space_def *def; /** Sequence attached to this space or NULL. */ struct sequence *sequence; /** * Number of times the space has been truncated. * Updating this counter via _truncate space triggers * space truncation. */ uint64_t truncate_count; /** Enable/disable triggers. */ bool run_triggers; /** * Space format or NULL if space does not have format * (sysview engine, for example). */ struct tuple_format *format; /** * Sparse array of indexes defined on the space, indexed * by id. Used to quickly find index by id (for SELECTs). */ struct index **index_map; /** * Dense array of indexes defined on the space, in order * of index id. */ struct index **index; }; /** Initialize a base space instance. */ int space_create(struct space *space, struct engine *engine, const struct space_vtab *vtab, struct space_def *def, struct rlist *key_list, struct tuple_format *format); /** Get space ordinal number. */ static inline uint32_t space_id(struct space *space) { return space->def->id; } /** Get space name. */ static inline const char * space_name(const struct space *space) { return space->def->name; } /** Return true if space is temporary. */ static inline bool space_is_temporary(struct space *space) { return space->def->opts.temporary; } void space_run_triggers(struct space *space, bool yesno); /** * Get index by index id. * @return NULL if the index is not found. */ static inline struct index * space_index(struct space *space, uint32_t id) { if (id <= space->index_id_max) return space->index_map[id]; return NULL; } /** * Return key_def of the index identified by id or NULL * if there is no such index. */ struct key_def * space_index_key_def(struct space *space, uint32_t id); /** * Look up the index by id. */ static inline struct index * index_find(struct space *space, uint32_t index_id) { struct index *index = space_index(space, index_id); if (index == NULL) { diag_set(ClientError, ER_NO_SUCH_INDEX, index_id, space_name(space)); diag_log(); } return index; } /** * Wrapper around index_find() which checks that * the found index is unique. */ static inline struct index * index_find_unique(struct space *space, uint32_t index_id) { struct index *index = index_find(space, index_id); if (index != NULL && !index->def->opts.is_unique) { diag_set(ClientError, ER_MORE_THAN_ONE_TUPLE); return NULL; } return index; } /** * Returns number of bytes used in memory by tuples in the space. */ size_t space_bsize(struct space *space); /** Get definition of the n-th index of the space. */ struct index_def * space_index_def(struct space *space, int n); /** * Get name of the index by its identifier and parent space. * * @param space Parent space. * @param id Index identifier. * * @retval not NULL Index name. * @retval NULL No index with the specified identifier. */ const char * index_name_by_id(struct space *space, uint32_t id); /** * Check whether or not the current user can be granted * the requested access to the space. */ int access_check_space(struct space *space, user_access_t access); static inline int space_apply_initial_join_row(struct space *space, struct request *request) { return space->vtab->apply_initial_join_row(space, request); } /** * Execute a DML request on the given space. */ int space_execute_dml(struct space *space, struct txn *txn, struct request *request, struct tuple **result); static inline void init_system_space(struct space *space) { space->vtab->init_system_space(space); } static inline int space_check_index_def(struct space *space, struct index_def *index_def) { return space->vtab->check_index_def(space, index_def); } static inline struct index * space_create_index(struct space *space, struct index_def *index_def) { return space->vtab->create_index(space, index_def); } static inline int space_add_primary_key(struct space *space) { return space->vtab->add_primary_key(space); } static inline int space_check_format(struct space *new_space, struct space *old_space) { assert(old_space->vtab == new_space->vtab); return new_space->vtab->check_format(new_space, old_space); } static inline void space_drop_primary_key(struct space *space) { space->vtab->drop_primary_key(space); } static inline int space_build_secondary_key(struct space *old_space, struct space *new_space, struct index *new_index) { assert(old_space->vtab == new_space->vtab); return new_space->vtab->build_secondary_key(old_space, new_space, new_index); } static inline int space_prepare_truncate(struct space *old_space, struct space *new_space) { assert(old_space->vtab == new_space->vtab); return new_space->vtab->prepare_truncate(old_space, new_space); } static inline void space_commit_truncate(struct space *old_space, struct space *new_space) { assert(old_space->vtab == new_space->vtab); new_space->vtab->commit_truncate(old_space, new_space); } static inline int space_prepare_alter(struct space *old_space, struct space *new_space) { assert(old_space->vtab == new_space->vtab); return new_space->vtab->prepare_alter(old_space, new_space); } static inline void space_commit_alter(struct space *old_space, struct space *new_space) { assert(old_space->vtab == new_space->vtab); new_space->vtab->commit_alter(old_space, new_space); } static inline bool space_is_memtx(struct space *space) { return space->engine->id == 0; } /** Return true if space is run under vinyl engine. */ static inline bool space_is_vinyl(struct space *space) { return strcmp(space->engine->name, "vinyl") == 0; } void space_noop(struct space *space); struct field_def; /** * Allocate and initialize a space. * @param space_def Space definition. * @param key_list List of index_defs. * @retval Space object. */ struct space * space_new(struct space_def *space_def, struct rlist *key_list); /** Destroy and free a space. */ void space_delete(struct space *space); /** * Dump space definition (key definitions, key count) * for ALTER. */ void space_dump_def(const struct space *space, struct rlist *key_list); /** * Exchange two index objects in two spaces. Used * to update a space with a newly built index, while * making sure the old index doesn't leak. */ void space_swap_index(struct space *lhs, struct space *rhs, uint32_t lhs_id, uint32_t rhs_id); /** Rebuild index map in a space after a series of swap index. */ void space_fill_index_map(struct space *space); #if defined(__cplusplus) } /* extern "C" */ static inline struct space * space_new_xc(struct space_def *space_def, struct rlist *key_list) { struct space *space = space_new(space_def, key_list); if (space == NULL) diag_raise(); return space; } static inline void access_check_space_xc(struct space *space, user_access_t access) { if (access_check_space(space, access) != 0) diag_raise(); } /** * Look up the index by id, and throw an exception if not found. */ static inline struct index * index_find_xc(struct space *space, uint32_t index_id) { struct index *index = index_find(space, index_id); if (index == NULL) diag_raise(); return index; } static inline struct index * index_find_unique_xc(struct space *space, uint32_t index_id) { struct index *index = index_find_unique(space, index_id); if (index == NULL) diag_raise(); return index; } /** * Find an index in a system space. Throw an error * if we somehow deal with a non-memtx space (it can't * be used for system spaces. */ static inline struct index * index_find_system_xc(struct space *space, uint32_t index_id) { if (! space_is_memtx(space)) { tnt_raise(ClientError, ER_UNSUPPORTED, space->engine->name, "system data"); } return index_find_xc(space, index_id); } static inline void space_apply_initial_join_row_xc(struct space *space, struct request *request) { if (space_apply_initial_join_row(space, request) != 0) diag_raise(); } static inline void space_check_index_def_xc(struct space *space, struct index_def *index_def) { if (space_check_index_def(space, index_def) != 0) diag_raise(); } static inline struct index * space_create_index_xc(struct space *space, struct index_def *index_def) { struct index *index = space_create_index(space, index_def); if (index == NULL) diag_raise(); return index; } static inline void space_add_primary_key_xc(struct space *space) { if (space_add_primary_key(space) != 0) diag_raise(); } static inline void space_check_format_xc(struct space *new_space, struct space *old_space) { if (space_check_format(new_space, old_space) != 0) diag_raise(); } static inline void space_build_secondary_key_xc(struct space *old_space, struct space *new_space, struct index *new_index) { if (space_build_secondary_key(old_space, new_space, new_index) != 0) diag_raise(); } static inline void space_prepare_truncate_xc(struct space *old_space, struct space *new_space) { if (space_prepare_truncate(old_space, new_space) != 0) diag_raise(); } static inline void space_prepare_alter_xc(struct space *old_space, struct space *new_space) { if (space_prepare_alter(old_space, new_space) != 0) diag_raise(); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_SPACE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/func_def.c0000664000000000000000000000011013306560010017464 0ustar rootroot#include "func_def.h" const char *func_language_strs[] = {"LUA", "C"}; tarantool_1.9.1.26.g63eb81e3c/src/box/index.cc0000664000000000000000000004036613306565107017222 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "index.h" #include "tuple.h" #include "say.h" #include "schema.h" #include "user_def.h" #include "space.h" #include "iproto_constants.h" #include "txn.h" #include "rmean.h" #include "info.h" /* {{{ Utilities. **********************************************/ UnsupportedIndexFeature::UnsupportedIndexFeature(const char *file, unsigned line, struct index_def *index_def, const char *what) : ClientError(file, line, ER_UNKNOWN) { struct space *space = space_cache_find_xc(index_def->space_id); m_errcode = ER_UNSUPPORTED_INDEX_FEATURE; error_format_msg(this, tnt_errcode_desc(m_errcode), index_def->name, index_type_strs[index_def->type], space->def->name, space->def->engine_name, what); } struct error * BuildUnsupportedIndexFeature(const char *file, unsigned line, struct index_def *index_def, const char *what) { try { return new UnsupportedIndexFeature(file, line, index_def, what); } catch (OutOfMemory *e) { return e; } } int key_validate(const struct index_def *index_def, enum iterator_type type, const char *key, uint32_t part_count) { assert(key != NULL || part_count == 0); if (part_count == 0) { /* * Zero key parts are allowed: * - for TREE index, all iterator types, * - ITER_ALL iterator type, all index types * - ITER_GT iterator in HASH index (legacy) */ if (index_def->type == TREE || type == ITER_ALL || (index_def->type == HASH && type == ITER_GT)) return 0; /* Fall through. */ } if (index_def->type == RTREE) { unsigned d = index_def->opts.dimension; if (part_count != 1 && part_count != d && part_count != d * 2) { diag_set(ClientError, ER_KEY_PART_COUNT, d * 2, part_count); return -1; } if (part_count == 1) { enum mp_type mp_type = mp_typeof(*key); if (key_mp_type_validate(FIELD_TYPE_ARRAY, mp_type, ER_KEY_PART_TYPE, 0, false)) return -1; uint32_t array_size = mp_decode_array(&key); if (array_size != d && array_size != d * 2) { diag_set(ClientError, ER_RTREE_RECT, "Key", d, d * 2); return -1; } for (uint32_t part = 0; part < array_size; part++) { enum mp_type mp_type = mp_typeof(*key); mp_next(&key); if (key_mp_type_validate(FIELD_TYPE_NUMBER, mp_type, ER_KEY_PART_TYPE, 0, false)) return -1; } } else { for (uint32_t part = 0; part < part_count; part++) { enum mp_type mp_type = mp_typeof(*key); mp_next(&key); if (key_mp_type_validate(FIELD_TYPE_NUMBER, mp_type, ER_KEY_PART_TYPE, part, false)) return -1; } } } else { if (part_count > index_def->key_def->part_count) { diag_set(ClientError, ER_KEY_PART_COUNT, index_def->key_def->part_count, part_count); return -1; } /* Partial keys are allowed only for TREE index type. */ if (index_def->type != TREE && part_count < index_def->key_def->part_count) { diag_set(ClientError, ER_PARTIAL_KEY, index_type_strs[index_def->type], index_def->key_def->part_count, part_count); return -1; } if (key_validate_parts(index_def->key_def, key, part_count, true) != 0) return -1; } return 0; } int exact_key_validate(struct key_def *key_def, const char *key, uint32_t part_count) { assert(key != NULL || part_count == 0); if (key_def->part_count != part_count) { diag_set(ClientError, ER_EXACT_MATCH, key_def->part_count, part_count); return -1; } return key_validate_parts(key_def, key, part_count, false); } char * box_tuple_extract_key(const box_tuple_t *tuple, uint32_t space_id, uint32_t index_id, uint32_t *key_size) { struct space *space = space_cache_find(space_id); if (space == NULL) return NULL; struct index *index = index_find(space, index_id); if (index == NULL) return NULL; return tuple_extract_key(tuple, index->def->key_def, key_size); } static inline int check_index(uint32_t space_id, uint32_t index_id, struct space **space, struct index **index) { *space = space_cache_find(space_id); if (*space == NULL) return -1; if (access_check_space(*space, PRIV_R) != 0) return -1; *index = index_find(*space, index_id); if (*index == NULL) return -1; return 0; } /* }}} */ /* {{{ Public API */ ssize_t box_index_len(uint32_t space_id, uint32_t index_id) { struct space *space; struct index *index; if (check_index(space_id, index_id, &space, &index) != 0) return -1; /* No tx management, len() doesn't work in vinyl anyway. */ return index_size(index); } ssize_t box_index_bsize(uint32_t space_id, uint32_t index_id) { struct space *space; struct index *index; if (check_index(space_id, index_id, &space, &index) != 0) return -1; /* No tx management for statistics. */ return index_bsize(index); } int box_index_random(uint32_t space_id, uint32_t index_id, uint32_t rnd, box_tuple_t **result) { assert(result != NULL); struct space *space; struct index *index; if (check_index(space_id, index_id, &space, &index) != 0) return -1; /* No tx management, random() is for approximation anyway. */ if (index_random(index, rnd, result) != 0) return -1; if (*result != NULL && tuple_bless(*result) == NULL) return -1; return 0; } int box_index_get(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result) { assert(key != NULL && key_end != NULL && result != NULL); mp_tuple_assert(key, key_end); struct space *space; struct index *index; if (check_index(space_id, index_id, &space, &index) != 0) return -1; if (!index->def->opts.is_unique) { diag_set(ClientError, ER_MORE_THAN_ONE_TUPLE); return -1; } uint32_t part_count = mp_decode_array(&key); if (exact_key_validate(index->def->key_def, key, part_count)) return -1; /* Start transaction in the engine. */ struct txn *txn; if (txn_begin_ro_stmt(space, &txn) != 0) return -1; if (index_get(index, key, part_count, result) != 0) { txn_rollback_stmt(); return -1; } txn_commit_ro_stmt(txn); /* Count statistics. */ rmean_collect(rmean_box, IPROTO_SELECT, 1); if (*result != NULL && tuple_bless(*result) == NULL) return -1; return 0; } int box_index_min(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result) { assert(key != NULL && key_end != NULL && result != NULL); mp_tuple_assert(key, key_end); struct space *space; struct index *index; if (check_index(space_id, index_id, &space, &index) != 0) return -1; if (index->def->type != TREE) { /* Show nice error messages in Lua. */ diag_set(UnsupportedIndexFeature, index->def, "min()"); return -1; } uint32_t part_count = mp_decode_array(&key); if (key_validate(index->def, ITER_GE, key, part_count)) return -1; /* Start transaction in the engine. */ struct txn *txn; if (txn_begin_ro_stmt(space, &txn) != 0) return -1; if (index_min(index, key, part_count, result) != 0) { txn_rollback_stmt(); return -1; } txn_commit_ro_stmt(txn); if (*result != NULL && tuple_bless(*result) == NULL) return -1; return 0; } int box_index_max(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result) { mp_tuple_assert(key, key_end); assert(result != NULL); struct space *space; struct index *index; if (check_index(space_id, index_id, &space, &index) != 0) return -1; if (index->def->type != TREE) { /* Show nice error messages in Lua. */ diag_set(UnsupportedIndexFeature, index->def, "max()"); return -1; } uint32_t part_count = mp_decode_array(&key); if (key_validate(index->def, ITER_LE, key, part_count)) return -1; /* Start transaction in the engine. */ struct txn *txn; if (txn_begin_ro_stmt(space, &txn) != 0) return -1; if (index_max(index, key, part_count, result) != 0) { txn_rollback_stmt(); return -1; } txn_commit_ro_stmt(txn); if (*result != NULL && tuple_bless(*result) == NULL) return -1; return 0; } ssize_t box_index_count(uint32_t space_id, uint32_t index_id, int type, const char *key, const char *key_end) { assert(key != NULL && key_end != NULL); mp_tuple_assert(key, key_end); if (type < 0 || type >= iterator_type_MAX) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "Invalid iterator type"); return -1; } enum iterator_type itype = (enum iterator_type) type; struct space *space; struct index *index; if (check_index(space_id, index_id, &space, &index) != 0) return -1; uint32_t part_count = mp_decode_array(&key); if (key_validate(index->def, itype, key, part_count)) return -1; /* Start transaction in the engine. */ struct txn *txn; if (txn_begin_ro_stmt(space, &txn) != 0) return -1; ssize_t count = index_count(index, itype, key, part_count); if (count < 0) { txn_rollback_stmt(); return -1; } txn_commit_ro_stmt(txn); return count; } /* }}} */ /* {{{ Iterators ************************************************/ box_iterator_t * box_index_iterator(uint32_t space_id, uint32_t index_id, int type, const char *key, const char *key_end) { assert(key != NULL && key_end != NULL); mp_tuple_assert(key, key_end); if (type < 0 || type >= iterator_type_MAX) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "Invalid iterator type"); return NULL; } enum iterator_type itype = (enum iterator_type) type; struct space *space; struct index *index; if (check_index(space_id, index_id, &space, &index) != 0) return NULL; assert(mp_typeof(*key) == MP_ARRAY); /* checked by Lua */ uint32_t part_count = mp_decode_array(&key); if (key_validate(index->def, itype, key, part_count)) return NULL; struct txn *txn; if (txn_begin_ro_stmt(space, &txn) != 0) return NULL; struct iterator *it = index_create_iterator(index, itype, key, part_count); if (it == NULL) { txn_rollback_stmt(); return NULL; } txn_commit_ro_stmt(txn); return it; } int box_iterator_next(box_iterator_t *itr, box_tuple_t **result) { assert(result != NULL); if (iterator_next(itr, result) != 0) return -1; if (*result != NULL && tuple_bless(*result) == NULL) return -1; return 0; } void box_iterator_free(box_iterator_t *it) { iterator_delete(it); } /* }}} */ /* {{{ Introspection */ int box_index_info(uint32_t space_id, uint32_t index_id, struct info_handler *info) { struct space *space; struct index *index; if (check_index(space_id, index_id, &space, &index) != 0) return -1; index_info(index, info); return 0; } /* }}} */ /* {{{ Internal API */ void iterator_create(struct iterator *it, struct index *index) { it->next = NULL; it->free = NULL; it->schema_version = schema_version; it->space_id = index->def->space_id; it->index_id = index->def->iid; it->index = index; } int iterator_next(struct iterator *it, struct tuple **ret) { assert(it->next != NULL); if (unlikely(it->schema_version != schema_version)) { struct space *space = space_by_id(it->space_id); if (space == NULL) goto invalidate; struct index *index = space_index(space, it->index_id); if (index != it->index || index->schema_version > it->schema_version) goto invalidate; it->schema_version = schema_version; } return it->next(it, ret); invalidate: *ret = NULL; return 0; } void iterator_delete(struct iterator *it) { assert(it->free != NULL); it->free(it); } int index_create(struct index *index, struct engine *engine, const struct index_vtab *vtab, struct index_def *def) { def = index_def_dup(def); if (def == NULL) return -1; index->vtab = vtab; index->engine = engine; index->def = def; index->schema_version = schema_version; return 0; } void index_delete(struct index *index) { index_def_delete(index->def); index->vtab->destroy(index); } int index_build(struct index *index, struct index *pk) { ssize_t n_tuples = index_size(pk); if (n_tuples < 0) return -1; uint32_t estimated_tuples = n_tuples * 1.2; index_begin_build(index); if (index_reserve(index, estimated_tuples) < 0) return -1; if (n_tuples > 0) { say_info("Adding %zd keys to %s index '%s' ...", n_tuples, index_type_strs[index->def->type], index->def->name); } struct iterator *it = index_create_iterator(pk, ITER_ALL, NULL, 0); if (it == NULL) return -1; int rc = 0; while (true) { struct tuple *tuple; rc = iterator_next(it, &tuple); if (rc != 0) break; if (tuple == NULL) break; rc = index_build_next(index, tuple); if (rc != 0) break; } iterator_delete(it); if (rc != 0) return -1; index_end_build(index); return 0; } /* }}} */ /* {{{ Virtual method stubs */ void generic_index_commit_create(struct index *, int64_t) { } void generic_index_commit_drop(struct index *) { } void generic_index_update_def(struct index *) { } ssize_t generic_index_size(struct index *index) { diag_set(UnsupportedIndexFeature, index->def, "size()"); return -1; } int generic_index_min(struct index *index, const char *key, uint32_t part_count, struct tuple **result) { struct iterator *it = index_create_iterator(index, ITER_GE, key, part_count); if (it == NULL) return -1; int rc = iterator_next(it, result); iterator_delete(it); return rc; } int generic_index_max(struct index *index, const char *key, uint32_t part_count, struct tuple **result) { struct iterator *it = index_create_iterator(index, ITER_LE, key, part_count); if (it == NULL) return -1; int rc = iterator_next(it, result); iterator_delete(it); return rc; } int generic_index_random(struct index *index, uint32_t rnd, struct tuple **result) { (void)rnd; (void)result; diag_set(UnsupportedIndexFeature, index->def, "random()"); return -1; } ssize_t generic_index_count(struct index *index, enum iterator_type type, const char *key, uint32_t part_count) { struct iterator *it = index_create_iterator(index, type, key, part_count); if (it == NULL) return -1; int rc = 0; size_t count = 0; struct tuple *tuple = NULL; while ((rc = iterator_next(it, &tuple)) == 0 && tuple != NULL) ++count; iterator_delete(it); if (rc < 0) return rc; return count; } int generic_index_get(struct index *index, const char *key, uint32_t part_count, struct tuple **result) { (void)key; (void)part_count; (void)result; diag_set(UnsupportedIndexFeature, index->def, "get()"); return -1; } int generic_index_replace(struct index *index, struct tuple *old_tuple, struct tuple *new_tuple, enum dup_replace_mode mode, struct tuple **result) { (void)old_tuple; (void)new_tuple; (void)mode; (void)result; diag_set(UnsupportedIndexFeature, index->def, "replace()"); return -1; } struct snapshot_iterator * generic_index_create_snapshot_iterator(struct index *index) { diag_set(UnsupportedIndexFeature, index->def, "consistent read view"); return NULL; } void generic_index_info(struct index *index, struct info_handler *handler) { (void)index; info_begin(handler); info_end(handler); } void generic_index_begin_build(struct index *) { } int generic_index_reserve(struct index *, uint32_t) { return 0; } int generic_index_build_next(struct index *index, struct tuple *tuple) { struct tuple *unused; return index_replace(index, NULL, tuple, DUP_INSERT, &unused); } void generic_index_end_build(struct index *) { } /* }}} */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_tx.c0000664000000000000000000007134013306565107017115 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_tx.h" #include #include #include #include #include #include #include #include "diag.h" #include "errcode.h" #include "fiber.h" #include "iproto_constants.h" #include "iterator_type.h" #include "salad/stailq.h" #include "schema.h" /* schema_version */ #include "trigger.h" #include "trivia/util.h" #include "tuple.h" #include "vy_cache.h" #include "vy_index.h" #include "vy_mem.h" #include "vy_stat.h" #include "vy_stmt.h" #include "vy_upsert.h" #include "vy_read_set.h" #include "vy_read_view.h" int write_set_cmp(struct txv *a, struct txv *b) { int rc = a->index < b->index ? -1 : a->index > b->index; if (rc == 0) return vy_tuple_compare(a->stmt, b->stmt, a->index->cmp_def); return rc; } int write_set_key_cmp(struct write_set_key *a, struct txv *b) { int rc = a->index < b->index ? -1 : a->index > b->index; if (rc == 0) return vy_stmt_compare(a->stmt, b->stmt, a->index->cmp_def); return rc; } /** * Initialize an instance of a global read view. * To be used exclusively by the transaction manager. */ static void vy_global_read_view_create(struct vy_read_view *rv, int64_t lsn) { rlist_create(&rv->in_read_views); /* * By default, the transaction is assumed to be * read-write, and it reads the latest changes of all * prepared transactions. This makes it possible to * use the tuple cache in it. */ rv->vlsn = lsn; rv->refs = 0; rv->is_aborted = false; } struct tx_manager * tx_manager_new(void) { struct tx_manager *xm = calloc(1, sizeof(*xm)); if (xm == NULL) { diag_set(OutOfMemory, sizeof(*xm), "malloc", "struct tx_manager"); return NULL; } rlist_create(&xm->read_views); vy_global_read_view_create((struct vy_read_view *)&xm->global_read_view, INT64_MAX); xm->p_global_read_view = &xm->global_read_view; vy_global_read_view_create((struct vy_read_view *)&xm->committed_read_view, MAX_LSN - 1); xm->p_committed_read_view = &xm->committed_read_view; struct slab_cache *slab_cache = cord_slab_cache(); mempool_create(&xm->tx_mempool, slab_cache, sizeof(struct vy_tx)); mempool_create(&xm->txv_mempool, slab_cache, sizeof(struct txv)); mempool_create(&xm->read_interval_mempool, slab_cache, sizeof(struct vy_read_interval)); mempool_create(&xm->read_view_mempool, slab_cache, sizeof(struct vy_read_view)); return xm; } void tx_manager_delete(struct tx_manager *xm) { mempool_destroy(&xm->read_view_mempool); mempool_destroy(&xm->read_interval_mempool); mempool_destroy(&xm->txv_mempool); mempool_destroy(&xm->tx_mempool); free(xm); } /** Create or reuse an instance of a read view. */ static struct vy_read_view * tx_manager_read_view(struct tx_manager *xm) { struct vy_read_view *rv; /* * Check if the last read view can be reused. Reference * and return it if it's the case. */ if (!rlist_empty(&xm->read_views)) { rv = rlist_last_entry(&xm->read_views, struct vy_read_view, in_read_views); /** Reuse an existing read view */ if ((xm->last_prepared_tx == NULL && rv->vlsn == xm->lsn) || (xm->last_prepared_tx != NULL && rv->vlsn == MAX_LSN + xm->last_prepared_tx->psn)) { rv->refs++; return rv; } } rv = mempool_alloc(&xm->read_view_mempool); if (rv == NULL) { diag_set(OutOfMemory, sizeof(*rv), "mempool", "read view"); return NULL; } rv->is_aborted = false; if (xm->last_prepared_tx != NULL) { rv->vlsn = MAX_LSN + xm->last_prepared_tx->psn; xm->last_prepared_tx->read_view = rv; rv->refs = 2; } else { rv->vlsn = xm->lsn; rv->refs = 1; } /* * Add to the tail of the list, so that tx_manager_vlsn() * works correctly. */ rlist_add_tail_entry(&xm->read_views, rv, in_read_views); return rv; } /** Dereference and possibly destroy a read view. */ static void tx_manager_destroy_read_view(struct tx_manager *xm, const struct vy_read_view *read_view) { struct vy_read_view *rv = (struct vy_read_view *) read_view; if (rv == xm->p_global_read_view) return; assert(rv->refs); if (--rv->refs == 0) { rlist_del_entry(rv, in_read_views); mempool_free(&xm->read_view_mempool, rv); } } int64_t tx_manager_vlsn(struct tx_manager *xm) { if (rlist_empty(&xm->read_views)) return xm->lsn; struct vy_read_view *oldest = rlist_first_entry(&xm->read_views, struct vy_read_view, in_read_views); return oldest->vlsn; } static struct txv * txv_new(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt) { struct tx_manager *xm = tx->xm; struct txv *v = mempool_alloc(&xm->txv_mempool); if (v == NULL) { diag_set(OutOfMemory, sizeof(*v), "mempool", "struct txv"); return NULL; } v->index = index; vy_index_ref(v->index); v->mem = NULL; v->stmt = stmt; tuple_ref(stmt); v->region_stmt = NULL; v->tx = tx; v->is_first_insert = false; v->is_overwritten = false; v->overwritten = NULL; xm->write_set_size += tuple_size(stmt); return v; } static void txv_delete(struct txv *v) { struct tx_manager *xm = v->tx->xm; xm->write_set_size -= tuple_size(v->stmt); tuple_unref(v->stmt); vy_index_unref(v->index); mempool_free(&xm->txv_mempool, v); } static struct vy_read_interval * vy_read_interval_new(struct vy_tx *tx, struct vy_index *index, struct tuple *left, bool left_belongs, struct tuple *right, bool right_belongs) { struct tx_manager *xm = tx->xm; struct vy_read_interval *interval; interval = mempool_alloc(&xm->read_interval_mempool); if (interval == NULL) { diag_set(OutOfMemory, sizeof(*interval), "mempool", "struct vy_read_interval"); return NULL; } interval->tx = tx; vy_index_ref(index); interval->index = index; tuple_ref(left); interval->left = left; interval->left_belongs = left_belongs; tuple_ref(right); interval->right = right; interval->right_belongs = right_belongs; interval->subtree_last = NULL; xm->read_set_size += tuple_size(left); if (left != right) xm->read_set_size += tuple_size(right); return interval; } static void vy_read_interval_delete(struct vy_read_interval *interval) { struct tx_manager *xm = interval->tx->xm; xm->read_set_size -= tuple_size(interval->left); if (interval->left != interval->right) xm->read_set_size -= tuple_size(interval->right); vy_index_unref(interval->index); tuple_unref(interval->left); tuple_unref(interval->right); mempool_free(&xm->read_interval_mempool, interval); } static struct vy_read_interval * vy_tx_read_set_free_cb(vy_tx_read_set_t *read_set, struct vy_read_interval *interval, void *arg) { (void)arg; (void)read_set; vy_index_read_set_remove(&interval->index->read_set, interval); vy_read_interval_delete(interval); return NULL; } void vy_tx_create(struct tx_manager *xm, struct vy_tx *tx) { stailq_create(&tx->log); write_set_new(&tx->write_set); tx->write_set_version = 0; tx->write_size = 0; tx->xm = xm; tx->state = VINYL_TX_READY; tx->read_view = (struct vy_read_view *)xm->p_global_read_view; vy_tx_read_set_new(&tx->read_set); tx->psn = 0; rlist_create(&tx->on_destroy); } void vy_tx_destroy(struct vy_tx *tx) { trigger_run(&tx->on_destroy, NULL); trigger_destroy(&tx->on_destroy); tx_manager_destroy_read_view(tx->xm, tx->read_view); struct txv *v, *tmp; stailq_foreach_entry_safe(v, tmp, &tx->log, next_in_log) { vy_stmt_counter_unacct_tuple(&v->index->stat.txw.count, v->stmt); txv_delete(v); } vy_tx_read_set_iter(&tx->read_set, NULL, vy_tx_read_set_free_cb, NULL); } /** Return true if the transaction is read-only. */ static bool vy_tx_is_ro(struct vy_tx *tx) { return write_set_empty(&tx->write_set); } /** Return true if the transaction is in read view. */ static bool vy_tx_is_in_read_view(struct vy_tx *tx) { return tx->read_view->vlsn != INT64_MAX; } /** * Send to read view all transactions that are reading key @v * modified by transaction @tx. */ static int vy_tx_send_to_read_view(struct vy_tx *tx, struct txv *v) { struct vy_tx_conflict_iterator it; vy_tx_conflict_iterator_init(&it, &v->index->read_set, v->stmt); struct vy_tx *abort; while ((abort = vy_tx_conflict_iterator_next(&it)) != NULL) { /* Don't abort self. */ if (abort == tx) continue; /* Abort only active TXs */ if (abort->state != VINYL_TX_READY) continue; /* already in (earlier) read view */ if (vy_tx_is_in_read_view(abort)) continue; struct vy_read_view *rv = tx_manager_read_view(tx->xm); if (rv == NULL) return -1; abort->read_view = rv; } return 0; } /** * Abort all transaction that are reading key @v modified * by transaction @tx. */ static void vy_tx_abort_readers(struct vy_tx *tx, struct txv *v) { struct vy_tx_conflict_iterator it; vy_tx_conflict_iterator_init(&it, &v->index->read_set, v->stmt); struct vy_tx *abort; while ((abort = vy_tx_conflict_iterator_next(&it)) != NULL) { /* Don't abort self. */ if (abort == tx) continue; /* Abort only active TXs */ if (abort->state != VINYL_TX_READY) continue; abort->state = VINYL_TX_ABORT; } } struct vy_tx * vy_tx_begin(struct tx_manager *xm) { struct vy_tx *tx = mempool_alloc(&xm->tx_mempool); if (unlikely(tx == NULL)) { diag_set(OutOfMemory, sizeof(*tx), "mempool", "struct vy_tx"); return NULL; } vy_tx_create(xm, tx); return tx; } /** * Rotate the active in-memory tree if necessary and pin it to make * sure it is not dumped until the transaction is complete. */ static int vy_tx_write_prepare(struct txv *v) { struct vy_index *index = v->index; /* * Allocate a new in-memory tree if either of the following * conditions is true: * * - Generation has increased after the tree was created. * In this case we need to dump the tree as is in order to * guarantee dump consistency. * * - Schema version has increased after the tree was created. * We have to seal the tree, because we don't support mixing * statements of different formats in the same tree. */ if (unlikely(index->mem->schema_version != schema_version || index->mem->generation != *index->env->p_generation)) { if (vy_index_rotate_mem(index) != 0) return -1; } vy_mem_pin(index->mem); v->mem = index->mem; return 0; } /** * Write a single statement into an index. If the statement has * an lsregion copy then use it, else create it. * * @param index Index to write to. * @param mem In-memory tree to write to. * @param stmt Statement allocated with malloc(). * @param region_stmt NULL or the same statement as stmt, * but allocated on lsregion. * * @retval 0 Success. * @retval -1 Memory error. */ static int vy_tx_write(struct vy_index *index, struct vy_mem *mem, struct tuple *stmt, const struct tuple **region_stmt) { assert(vy_stmt_is_refable(stmt)); assert(*region_stmt == NULL || !vy_stmt_is_refable(*region_stmt)); /* * The UPSERT statement can be applied to the cached * statement, because the cache always contains only * newest REPLACE statements. In such a case the UPSERT, * applied to the cached statement, can be inserted * instead of the original UPSERT. */ if (vy_stmt_type(stmt) == IPROTO_UPSERT) { struct tuple *deleted = NULL; /* Invalidate cache element. */ vy_cache_on_write(&index->cache, stmt, &deleted); if (deleted != NULL) { struct tuple *applied = vy_apply_upsert(stmt, deleted, mem->cmp_def, mem->format, mem->upsert_format, false); tuple_unref(deleted); if (applied != NULL) { assert(vy_stmt_type(applied) == IPROTO_REPLACE); int rc = vy_index_set(index, mem, applied, region_stmt); tuple_unref(applied); return rc; } /* * Ignore a memory error, because it is * not critical to apply the optimization. */ } } else { /* Invalidate cache element. */ vy_cache_on_write(&index->cache, stmt, NULL); } return vy_index_set(index, mem, stmt, region_stmt); } int vy_tx_prepare(struct vy_tx *tx) { struct tx_manager *xm = tx->xm; if (vy_tx_is_ro(tx)) { assert(tx->state == VINYL_TX_READY); tx->state = VINYL_TX_COMMIT; return 0; } if (vy_tx_is_in_read_view(tx) || tx->state == VINYL_TX_ABORT) { xm->stat.conflict++; diag_set(ClientError, ER_TRANSACTION_CONFLICT); return -1; } assert(tx->state == VINYL_TX_READY); tx->state = VINYL_TX_COMMIT; assert(tx->read_view == &xm->global_read_view); tx->psn = ++xm->psn; /** Send to read view read/write intersection. */ struct txv *v; struct write_set_iterator it; write_set_ifirst(&tx->write_set, &it); while ((v = write_set_inext(&it)) != NULL) { if (vy_tx_send_to_read_view(tx, v)) return -1; } /* * Flush transactional changes to the index. * Sic: the loop below must not yield after recovery. */ /* repsert - REPLACE/UPSERT */ const struct tuple *delete = NULL, *repsert = NULL; MAYBE_UNUSED uint32_t current_space_id = 0; stailq_foreach_entry(v, &tx->log, next_in_log) { struct vy_index *index = v->index; if (index->id == 0) { /* The beginning of the new txn_stmt is met. */ current_space_id = index->space_id; repsert = NULL; delete = NULL; } assert(index->space_id == current_space_id); /* Do not save statements that was overwritten by the same tx */ if (v->is_overwritten) continue; enum iproto_type type = vy_stmt_type(v->stmt); /* Optimize out INSERT + DELETE for the same key. */ if (v->is_first_insert && type == IPROTO_DELETE) continue; if (v->is_first_insert && type == IPROTO_REPLACE) { /* * There is no committed statement for the * given key or the last statement is DELETE * so we can turn REPLACE into INSERT. */ type = IPROTO_INSERT; vy_stmt_set_type(v->stmt, type); } if (!v->is_first_insert && type == IPROTO_INSERT) { /* * INSERT following REPLACE means nothing, * turn it into REPLACE. */ type = IPROTO_REPLACE; vy_stmt_set_type(v->stmt, type); } if (vy_tx_write_prepare(v) != 0) return -1; assert(v->mem != NULL); /* In secondary indexes only REPLACE/DELETE can be written. */ vy_stmt_set_lsn(v->stmt, MAX_LSN + tx->psn); const struct tuple **region_stmt = (type == IPROTO_DELETE) ? &delete : &repsert; if (vy_tx_write(index, v->mem, v->stmt, region_stmt) != 0) return -1; v->region_stmt = *region_stmt; } xm->last_prepared_tx = tx; return 0; } void vy_tx_commit(struct vy_tx *tx, int64_t lsn) { assert(tx->state == VINYL_TX_COMMIT); struct tx_manager *xm = tx->xm; xm->stat.commit++; if (xm->last_prepared_tx == tx) xm->last_prepared_tx = NULL; if (vy_tx_is_ro(tx)) goto out; assert(xm->lsn < lsn); xm->lsn = lsn; /* Fix LSNs of the records and commit changes. */ struct txv *v; stailq_foreach_entry(v, &tx->log, next_in_log) { if (v->region_stmt != NULL) { vy_stmt_set_lsn((struct tuple *)v->region_stmt, lsn); vy_index_commit_stmt(v->index, v->mem, v->region_stmt); } if (v->mem != NULL) vy_mem_unpin(v->mem); } /* Update read views of dependant transactions. */ if (tx->read_view != &xm->global_read_view) tx->read_view->vlsn = lsn; out: vy_tx_destroy(tx); mempool_free(&xm->tx_mempool, tx); } static void vy_tx_rollback_after_prepare(struct vy_tx *tx) { assert(tx->state == VINYL_TX_COMMIT); struct tx_manager *xm = tx->xm; /* * There are two reasons of rollback_after_prepare: * 1) Fail in the middle of vy_tx_prepare call. * 2) Cascading rollback after WAL fail. * * If a TX is the latest prepared TX and the it is rollbacked, * it's certainly the case (2) and we should set xm->last_prepared_tx * to the previous prepared TX, if any. * But doesn't know the previous TX. * On the other hand we may expect that cascading rollback will * concern all the prepared TXs, all of them will be rollbacked * and xm->last_prepared_tx must be set to NULL in the end. * Thus we can set xm->last_prepared_tx to NULL now and it will be * correct in the end of the cascading rollback. * * We must not change xm->last_prepared_tx in all other cases, * it will be changed by the corresponding TX. */ if (xm->last_prepared_tx == tx) xm->last_prepared_tx = NULL; struct txv *v; stailq_foreach_entry(v, &tx->log, next_in_log) { if (v->region_stmt != NULL) vy_index_rollback_stmt(v->index, v->mem, v->region_stmt); if (v->mem != NULL) vy_mem_unpin(v->mem); } /* Abort read views of dependent transactions. */ if (tx->read_view != &xm->global_read_view) tx->read_view->is_aborted = true; struct write_set_iterator it; write_set_ifirst(&tx->write_set, &it); while ((v = write_set_inext(&it)) != NULL) { vy_tx_abort_readers(tx, v); } } void vy_tx_rollback(struct vy_tx *tx) { struct tx_manager *xm = tx->xm; xm->stat.rollback++; if (tx->state == VINYL_TX_COMMIT) vy_tx_rollback_after_prepare(tx); vy_tx_destroy(tx); mempool_free(&xm->tx_mempool, tx); } void vy_tx_rollback_to_savepoint(struct vy_tx *tx, void *svp) { assert(tx->state == VINYL_TX_READY); struct stailq_entry *last = svp; struct stailq tail; stailq_cut_tail(&tx->log, last, &tail); /* Rollback statements in LIFO order. */ stailq_reverse(&tail); struct txv *v, *tmp; stailq_foreach_entry_safe(v, tmp, &tail, next_in_log) { write_set_remove(&tx->write_set, v); if (v->overwritten != NULL) { /* Restore overwritten statement. */ write_set_insert(&tx->write_set, v->overwritten); v->overwritten->is_overwritten = false; } tx->write_set_version++; txv_delete(v); } } int vy_tx_track(struct vy_tx *tx, struct vy_index *index, struct tuple *left, bool left_belongs, struct tuple *right, bool right_belongs) { if (vy_tx_is_in_read_view(tx)) { /* No point in tracking reads. */ return 0; } struct vy_read_interval *new_interval; new_interval = vy_read_interval_new(tx, index, left, left_belongs, right, right_belongs); if (new_interval == NULL) return -1; /* * Search for intersections in the transaction read set. */ struct stailq merge; stailq_create(&merge); struct vy_tx_read_set_iterator it; vy_tx_read_set_isearch_le(&tx->read_set, new_interval, &it); struct vy_read_interval *interval; interval = vy_tx_read_set_inext(&it); if (interval != NULL && interval->index == index) { if (vy_read_interval_cmpr(interval, new_interval) >= 0) { /* * There is an interval in the tree spanning * the new interval. Nothing to do. */ vy_read_interval_delete(new_interval); return 0; } if (vy_read_interval_should_merge(interval, new_interval)) stailq_add_tail_entry(&merge, interval, in_merge); } if (interval == NULL) vy_tx_read_set_isearch_gt(&tx->read_set, new_interval, &it); while ((interval = vy_tx_read_set_inext(&it)) != NULL && interval->index == index && vy_read_interval_should_merge(new_interval, interval)) stailq_add_tail_entry(&merge, interval, in_merge); /* * Merge intersecting intervals with the new interval and * remove them from the transaction and index read sets. */ if (!stailq_empty(&merge)) { interval = stailq_first_entry(&merge, struct vy_read_interval, in_merge); if (vy_read_interval_cmpl(new_interval, interval) > 0) { tuple_ref(interval->left); tuple_unref(new_interval->left); new_interval->left = interval->left; new_interval->left_belongs = interval->left_belongs; } interval = stailq_last_entry(&merge, struct vy_read_interval, in_merge); if (vy_read_interval_cmpr(new_interval, interval) < 0) { tuple_ref(interval->right); tuple_unref(new_interval->right); new_interval->right = interval->right; new_interval->right_belongs = interval->right_belongs; } struct vy_read_interval *next_interval; stailq_foreach_entry_safe(interval, next_interval, &merge, in_merge) { vy_tx_read_set_remove(&tx->read_set, interval); vy_index_read_set_remove(&index->read_set, interval); vy_read_interval_delete(interval); } } vy_tx_read_set_insert(&tx->read_set, new_interval); vy_index_read_set_insert(&index->read_set, new_interval); return 0; } int vy_tx_track_point(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt) { assert(tuple_field_count(stmt) >= index->cmp_def->part_count); if (vy_tx_is_in_read_view(tx)) { /* No point in tracking reads. */ return 0; } struct txv *v = write_set_search_key(&tx->write_set, index, stmt); if (v != NULL && vy_stmt_type(v->stmt) != IPROTO_UPSERT) { /* Reading from own write set is serializable. */ return 0; } return vy_tx_track(tx, index, stmt, true, stmt, true); } int vy_tx_set(struct vy_tx *tx, struct vy_index *index, struct tuple *stmt) { assert(vy_stmt_type(stmt) != 0); /** * A statement in write set must have and unique lsn * in order to differ it from cachable statements in mem and run. */ vy_stmt_set_lsn(stmt, INT64_MAX); struct tuple *applied = NULL; /* Update concurrent index */ struct txv *old = write_set_search_key(&tx->write_set, index, stmt); /* Found a match of the previous action of this transaction */ if (old != NULL && vy_stmt_type(stmt) == IPROTO_UPSERT) { assert(index->id == 0); uint8_t old_type = vy_stmt_type(old->stmt); assert(old_type == IPROTO_UPSERT || old_type == IPROTO_INSERT || old_type == IPROTO_REPLACE || old_type == IPROTO_DELETE); (void) old_type; applied = vy_apply_upsert(stmt, old->stmt, index->cmp_def, index->mem_format, index->upsert_format, true); index->stat.upsert.applied++; if (applied == NULL) return -1; stmt = applied; assert(vy_stmt_type(stmt) != 0); index->stat.upsert.squashed++; } /* Allocate a MVCC container. */ struct txv *v = txv_new(tx, index, stmt); if (applied != NULL) tuple_unref(applied); if (v == NULL) return -1; if (old != NULL) { /* Leave the old txv in TX log but remove it from write set */ assert(tx->write_size >= tuple_size(old->stmt)); tx->write_size -= tuple_size(old->stmt); write_set_remove(&tx->write_set, old); old->is_overwritten = true; v->is_first_insert = old->is_first_insert; } if (old == NULL && vy_stmt_type(stmt) == IPROTO_INSERT) v->is_first_insert = true; if (old != NULL && vy_stmt_type(stmt) != IPROTO_UPSERT) { /* * Inherit the column mask of the overwritten statement * so as not to skip both statements on dump. */ uint64_t column_mask = vy_stmt_column_mask(stmt); if (column_mask != UINT64_MAX) vy_stmt_set_column_mask(stmt, column_mask | vy_stmt_column_mask(old->stmt)); } v->overwritten = old; write_set_insert(&tx->write_set, v); tx->write_set_version++; tx->write_size += tuple_size(stmt); vy_stmt_counter_acct_tuple(&index->stat.txw.count, stmt); stailq_add_tail_entry(&tx->log, v, next_in_log); return 0; } void vy_txw_iterator_open(struct vy_txw_iterator *itr, struct vy_txw_iterator_stat *stat, struct vy_tx *tx, struct vy_index *index, enum iterator_type iterator_type, const struct tuple *key) { itr->stat = stat; itr->tx = tx; itr->index = index; itr->iterator_type = iterator_type; itr->key = key; itr->version = UINT32_MAX; itr->curr_txv = NULL; itr->search_started = false; } /** * Position the iterator to the first entry in the transaction * write set satisfying the search criteria for a given key and * direction. */ static void vy_txw_iterator_seek(struct vy_txw_iterator *itr, enum iterator_type iterator_type, const struct tuple *key) { itr->stat->lookup++; itr->version = itr->tx->write_set_version; itr->curr_txv = NULL; struct vy_index *index = itr->index; struct write_set_key k = { index, key }; struct txv *txv; if (tuple_field_count(key) > 0) { if (iterator_type == ITER_EQ) txv = write_set_search(&itr->tx->write_set, &k); else if (iterator_type == ITER_GE || iterator_type == ITER_GT) txv = write_set_nsearch(&itr->tx->write_set, &k); else txv = write_set_psearch(&itr->tx->write_set, &k); if (txv == NULL || txv->index != index) return; if (vy_stmt_compare(key, txv->stmt, index->cmp_def) == 0) { while (true) { struct txv *next; if (iterator_type == ITER_LE || iterator_type == ITER_GT) next = write_set_next(&itr->tx->write_set, txv); else next = write_set_prev(&itr->tx->write_set, txv); if (next == NULL || next->index != index) break; if (vy_stmt_compare(key, next->stmt, index->cmp_def) != 0) break; txv = next; } if (iterator_type == ITER_GT) txv = write_set_next(&itr->tx->write_set, txv); else if (iterator_type == ITER_LT) txv = write_set_prev(&itr->tx->write_set, txv); } } else if (iterator_type == ITER_LE) { txv = write_set_nsearch(&itr->tx->write_set, &k); } else { assert(iterator_type == ITER_GE); txv = write_set_psearch(&itr->tx->write_set, &k); } if (txv == NULL || txv->index != index) return; itr->curr_txv = txv; } void vy_txw_iterator_next(struct vy_txw_iterator *itr, struct tuple **ret) { *ret = NULL; if (!itr->search_started) { itr->search_started = true; vy_txw_iterator_seek(itr, itr->iterator_type, itr->key); goto out; } assert(itr->version == itr->tx->write_set_version); if (itr->curr_txv == NULL) return; if (itr->iterator_type == ITER_LE || itr->iterator_type == ITER_LT) itr->curr_txv = write_set_prev(&itr->tx->write_set, itr->curr_txv); else itr->curr_txv = write_set_next(&itr->tx->write_set, itr->curr_txv); if (itr->curr_txv != NULL && itr->curr_txv->index != itr->index) itr->curr_txv = NULL; if (itr->curr_txv != NULL && itr->iterator_type == ITER_EQ && vy_stmt_compare(itr->key, itr->curr_txv->stmt, itr->index->cmp_def) != 0) itr->curr_txv = NULL; out: if (itr->curr_txv != NULL) { *ret = itr->curr_txv->stmt; vy_stmt_counter_acct_tuple(&itr->stat->get, *ret); } } void vy_txw_iterator_skip(struct vy_txw_iterator *itr, const struct tuple *last_stmt, struct tuple **ret) { *ret = NULL; assert(!itr->search_started || itr->version == itr->tx->write_set_version); /* * Check if the iterator is already positioned * at the statement following last_stmt. */ if (itr->search_started && (itr->curr_txv == NULL || last_stmt == NULL || iterator_direction(itr->iterator_type) * vy_tuple_compare(itr->curr_txv->stmt, last_stmt, itr->index->cmp_def) > 0)) { if (itr->curr_txv != NULL) *ret = itr->curr_txv->stmt; return; } const struct tuple *key = itr->key; enum iterator_type iterator_type = itr->iterator_type; if (last_stmt != NULL) { key = last_stmt; iterator_type = iterator_direction(iterator_type) > 0 ? ITER_GT : ITER_LT; } itr->search_started = true; vy_txw_iterator_seek(itr, iterator_type, key); if (itr->iterator_type == ITER_EQ && last_stmt != NULL && itr->curr_txv != NULL && vy_stmt_compare(itr->key, itr->curr_txv->stmt, itr->index->cmp_def) != 0) itr->curr_txv = NULL; if (itr->curr_txv != NULL) { *ret = itr->curr_txv->stmt; vy_stmt_counter_acct_tuple(&itr->stat->get, *ret); } } int vy_txw_iterator_restore(struct vy_txw_iterator *itr, const struct tuple *last_stmt, struct tuple **ret) { if (!itr->search_started || itr->version == itr->tx->write_set_version) return 0; const struct tuple *key = itr->key; enum iterator_type iterator_type = itr->iterator_type; if (last_stmt != NULL) { key = last_stmt; iterator_type = iterator_direction(iterator_type) > 0 ? ITER_GT : ITER_LT; } struct txv *prev_txv = itr->curr_txv; vy_txw_iterator_seek(itr, iterator_type, key); if (itr->iterator_type == ITER_EQ && itr->curr_txv != NULL && vy_stmt_compare(itr->key, itr->curr_txv->stmt, itr->index->cmp_def) != 0) itr->curr_txv = NULL; if (prev_txv == itr->curr_txv) return 0; *ret = NULL; if (itr->curr_txv != NULL) { *ret = itr->curr_txv->stmt; vy_stmt_counter_acct_tuple(&itr->stat->get, *ret); } return 1; } /** * Close a txw iterator. */ void vy_txw_iterator_close(struct vy_txw_iterator *itr) { (void)itr; /* suppress warn if NDEBUG */ TRASH(itr); } tarantool_1.9.1.26.g63eb81e3c/src/box/wal.h0000664000000000000000000001232513306565107016532 0ustar rootroot#ifndef TARANTOOL_WAL_WRITER_H_INCLUDED #define TARANTOOL_WAL_WRITER_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "small/rlist.h" #include "cbus.h" #include "journal.h" struct fiber; struct vclock; struct wal_writer; enum wal_mode { WAL_NONE = 0, WAL_WRITE, WAL_FSYNC, WAL_MODE_MAX }; /** String constants for the supported modes. */ extern const char *wal_mode_STRS[]; extern int wal_dir_lock; #if defined(__cplusplus) void wal_thread_start(); void wal_init(enum wal_mode wal_mode, const char *wal_dirname, const struct tt_uuid *instance_uuid, struct vclock *vclock, int64_t wal_max_rows, int64_t wal_max_size); void wal_thread_stop(); struct wal_watcher_msg { struct cmsg cmsg; struct wal_watcher *watcher; unsigned events; }; enum wal_event { /** A row is written to the current WAL. */ WAL_EVENT_WRITE = (1 << 0), /** A new WAL is created. */ WAL_EVENT_ROTATE = (1 << 1), }; struct wal_watcher { /** Link in wal_writer::watchers. */ struct rlist next; /** The watcher callback function. */ void (*cb)(struct wal_watcher *, unsigned events); /** Pipe from the watcher to WAL. */ struct cpipe wal_pipe; /** Pipe from WAL to the watcher. */ struct cpipe watcher_pipe; /** Cbus route used for notifying the watcher. */ struct cmsg_hop route[2]; /** Message sent to notify the watcher. */ struct wal_watcher_msg msg; /** * Bit mask of WAL events that happened while * the notification message was en route. * It indicates that the message must be resend * right upon returning to WAL. */ unsigned events; }; /** * Subscribe to WAL events. * * The caller will receive a notification after a WAL write with * unspecified but reasonable latency. The first notification is * sent right after registering the watcher so that the caller * can process WALs written before the function was called. * * Note WAL notifications are delivered via cbus hence the caller * must have set up the cbus endpoint and started the event loop. * Alternatively, one can pass a callback invoking cbus_process() * to this function. * * @param watcher WAL watcher to register. * @param name Name of the cbus endpoint at the caller's cord. * @param watcher_cb Callback to invoke from the caller's cord * upon receiving a WAL event. Apart from the * watcher itself, it takes a bit mask of events. * Events are described in wal_event enum. * @param process_cb Function called to process cbus messages * while the watcher is being attached or NULL * if the cbus loop is running elsewhere. */ void wal_set_watcher(struct wal_watcher *watcher, const char *name, void (*watcher_cb)(struct wal_watcher *, unsigned events), void (*process_cb)(struct cbus_endpoint *)); /** * Unsubscribe from WAL events. * * @param watcher WAL watcher to unregister. * @param process_cb Function invoked to process cbus messages * while the watcher is being detached or NULL * if the cbus loop is running elsewhere. */ void wal_clear_watcher(struct wal_watcher *watcher, void (*process_cb)(struct cbus_endpoint *)); void wal_atfork(); extern "C" { #endif /* defined(__cplusplus) */ enum wal_mode wal_mode(); /** * Wait till all pending changes to the WAL are flushed. * Rotates the WAL. * * @param[out] vclock WAL vclock * */ int wal_checkpoint(struct vclock *vclock, bool rotate); /** * Remove WAL files that are not needed to recover * from snapshot with @lsn or newer. */ void wal_collect_garbage(int64_t lsn); void wal_init_vy_log(); /** * Write xrows to the vinyl metadata log. */ int wal_write_vy_log(struct journal_entry *req); /** * Rotate the vinyl metadata log. */ void wal_rotate_vy_log(); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_WAL_WRITER_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/call.h0000664000000000000000000000362113306560010016645 0ustar rootroot#ifndef INCLUDES_TARANTOOL_MOD_BOX_CALL_H #define INCLUDES_TARANTOOL_MOD_BOX_CALL_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct port; struct call_request; struct box_function_ctx { struct port *port; }; int box_func_reload(const char *name); int box_process_call(struct call_request *request, struct port *port); int box_process_eval(struct call_request *request, struct port *port); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_MOD_BOX_CALL_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_point_lookup.c0000664000000000000000000003220513306565107021201 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_point_lookup.h" #include #include #include #include #include #include "fiber.h" #include "vy_index.h" #include "vy_stmt.h" #include "vy_tx.h" #include "vy_mem.h" #include "vy_run.h" #include "vy_cache.h" #include "vy_upsert.h" /** * ID of an iterator source type. Can be used in bitmaps. */ enum iterator_src_type { ITER_SRC_TXW = 1, ITER_SRC_CACHE = 2, ITER_SRC_MEM = 4, ITER_SRC_RUN = 8, }; /** * History of a key in vinyl is a continuous sequence of statements of the * same key in order of decreasing lsn. The history can be represented as a * list, the structure below describes one node of the list. */ struct vy_stmt_history_node { /* Type of source that the history statement came from */ enum iterator_src_type src_type; /* The history statement. Referenced for runs. */ struct tuple *stmt; /* Link in the history list */ struct rlist link; }; /** * Allocate (region) new history node. * @return new node or NULL on memory error (diag is set). */ static struct vy_stmt_history_node * vy_stmt_history_node_new(void) { struct region *region = &fiber()->gc; struct vy_stmt_history_node *node = region_alloc(region, sizeof(*node)); if (node == NULL) diag_set(OutOfMemory, sizeof(*node), "region", "struct vy_stmt_history_node"); return node; } /** * Unref statement if necessary, remove node from history if it's there. */ static void vy_stmt_history_cleanup(struct rlist *history, size_t region_svp) { struct vy_stmt_history_node *node; rlist_foreach_entry(node, history, link) if (node->src_type == ITER_SRC_RUN) tuple_unref(node->stmt); region_truncate(&fiber()->gc, region_svp); } /** * Return true if the history of a key contains terminal node in the end, * i.e. REPLACE of DELETE statement. */ static bool vy_stmt_history_is_terminal(struct rlist *history) { if (rlist_empty(history)) return false; struct vy_stmt_history_node *node = rlist_last_entry(history, struct vy_stmt_history_node, link); assert(vy_stmt_type(node->stmt) == IPROTO_REPLACE || vy_stmt_type(node->stmt) == IPROTO_DELETE || vy_stmt_type(node->stmt) == IPROTO_INSERT || vy_stmt_type(node->stmt) == IPROTO_UPSERT); return vy_stmt_type(node->stmt) != IPROTO_UPSERT; } /** * Scan TX write set for given key. * Add one or no statement to the history list. */ static int vy_point_lookup_scan_txw(struct vy_index *index, struct vy_tx *tx, struct tuple *key, struct rlist *history) { if (tx == NULL) return 0; index->stat.txw.iterator.lookup++; struct txv *txv = write_set_search_key(&tx->write_set, index, key); assert(txv == NULL || txv->index == index); if (txv == NULL) return 0; vy_stmt_counter_acct_tuple(&index->stat.txw.iterator.get, txv->stmt); struct vy_stmt_history_node *node = vy_stmt_history_node_new(); if (node == NULL) return -1; node->src_type = ITER_SRC_TXW; node->stmt = txv->stmt; rlist_add_tail(history, &node->link); return 0; } /** * Scan index cache for given key. * Add one or no statement to the history list. */ static int vy_point_lookup_scan_cache(struct vy_index *index, const struct vy_read_view **rv, struct tuple *key, struct rlist *history) { index->cache.stat.lookup++; struct tuple *stmt = vy_cache_get(&index->cache, key); if (stmt == NULL || vy_stmt_lsn(stmt) > (*rv)->vlsn) return 0; vy_stmt_counter_acct_tuple(&index->cache.stat.get, stmt); struct vy_stmt_history_node *node = vy_stmt_history_node_new(); if (node == NULL) return -1; node->src_type = ITER_SRC_CACHE; node->stmt = stmt; rlist_add_tail(history, &node->link); return 0; } /** * Scan one particular mem. * Add found statements to the history list up to terminal statement. */ static int vy_point_lookup_scan_mem(struct vy_index *index, struct vy_mem *mem, const struct vy_read_view **rv, struct tuple *key, struct rlist *history) { struct tree_mem_key tree_key; tree_key.stmt = key; tree_key.lsn = (*rv)->vlsn; bool exact; struct vy_mem_tree_iterator mem_itr = vy_mem_tree_lower_bound(&mem->tree, &tree_key, &exact); index->stat.memory.iterator.lookup++; const struct tuple *stmt = NULL; if (!vy_mem_tree_iterator_is_invalid(&mem_itr)) { stmt = *vy_mem_tree_iterator_get_elem(&mem->tree, &mem_itr); if (vy_stmt_compare(stmt, key, mem->cmp_def) != 0) stmt = NULL; } if (stmt == NULL) return 0; while (true) { struct vy_stmt_history_node *node = vy_stmt_history_node_new(); if (node == NULL) return -1; vy_stmt_counter_acct_tuple(&index->stat.memory.iterator.get, stmt); node->src_type = ITER_SRC_MEM; node->stmt = (struct tuple *)stmt; rlist_add_tail(history, &node->link); if (vy_stmt_history_is_terminal(history)) break; if (!vy_mem_tree_iterator_next(&mem->tree, &mem_itr)) break; const struct tuple *prev_stmt = stmt; stmt = *vy_mem_tree_iterator_get_elem(&mem->tree, &mem_itr); if (vy_stmt_lsn(stmt) >= vy_stmt_lsn(prev_stmt)) break; if (vy_stmt_compare(stmt, key, mem->cmp_def) != 0) break; } return 0; } /** * Scan all mems that belongs to the index. * Add found statements to the history list up to terminal statement. */ static int vy_point_lookup_scan_mems(struct vy_index *index, const struct vy_read_view **rv, struct tuple *key, struct rlist *history) { assert(index->mem != NULL); int rc = vy_point_lookup_scan_mem(index, index->mem, rv, key, history); struct vy_mem *mem; rlist_foreach_entry(mem, &index->sealed, in_sealed) { if (rc != 0 || vy_stmt_history_is_terminal(history)) return rc; rc = vy_point_lookup_scan_mem(index, mem, rv, key, history); } return 0; } /** * Scan one particular slice. * Add found statements to the history list up to terminal statement. * Set *terminal_found to true if the terminal statement (DELETE or REPLACE) * was found. */ static int vy_point_lookup_scan_slice(struct vy_index *index, struct vy_slice *slice, const struct vy_read_view **rv, struct tuple *key, struct rlist *history, bool *terminal_found) { int rc = 0; /* * The format of the statement must be exactly the space * format with the same identifier to fully match the * format in vy_mem. */ struct vy_run_iterator run_itr; vy_run_iterator_open(&run_itr, &index->stat.disk.iterator, slice, ITER_EQ, key, rv, index->cmp_def, index->key_def, index->disk_format, index->upsert_format, index->id == 0); struct tuple *stmt; rc = vy_run_iterator_next_key(&run_itr, &stmt); while (rc == 0 && stmt != NULL) { struct vy_stmt_history_node *node = vy_stmt_history_node_new(); if (node == NULL) { rc = -1; break; } node->src_type = ITER_SRC_RUN; node->stmt = stmt; tuple_ref(stmt); rlist_add_tail(history, &node->link); if (vy_stmt_type(stmt) != IPROTO_UPSERT) { *terminal_found = true; break; } rc = vy_run_iterator_next_lsn(&run_itr, &stmt); } vy_run_iterator_close(&run_itr); return rc; } /** * Find a range and scan all slices that belongs to the range. * Add found statements to the history list up to terminal statement. * All slices are pinned before first slice scan, so it's guaranteed * that complete history from runs will be extracted. */ static int vy_point_lookup_scan_slices(struct vy_index *index, const struct vy_read_view **rv, struct tuple *key, struct rlist *history) { struct vy_range *range = vy_range_tree_find_by_key(index->tree, ITER_EQ, key); assert(range != NULL); int slice_count = range->slice_count; struct vy_slice **slices = (struct vy_slice **) region_alloc(&fiber()->gc, slice_count * sizeof(*slices)); if (slices == NULL) { diag_set(OutOfMemory, slice_count * sizeof(*slices), "region", "slices array"); return -1; } int i = 0; struct vy_slice *slice; rlist_foreach_entry(slice, &range->slices, in_range) { vy_slice_pin(slice); slices[i++] = slice; } assert(i == slice_count); int rc = 0; bool terminal_found = false; for (i = 0; i < slice_count; i++) { if (rc == 0 && !terminal_found) rc = vy_point_lookup_scan_slice(index, slices[i], rv, key, history, &terminal_found); vy_slice_unpin(slices[i]); } return rc; } /** * Get a resultant statement from collected history. Add to cache if possible. */ static int vy_point_lookup_apply_history(struct vy_index *index, const struct vy_read_view **rv, struct tuple *key, struct rlist *history, struct tuple **ret) { *ret = NULL; if (rlist_empty(history)) return 0; struct tuple *curr_stmt = NULL; struct vy_stmt_history_node *node = rlist_last_entry(history, struct vy_stmt_history_node, link); if (vy_stmt_history_is_terminal(history)) { if (vy_stmt_type(node->stmt) == IPROTO_DELETE) { /* Ignore terminal delete */ } else if (node->src_type == ITER_SRC_MEM) { curr_stmt = vy_stmt_dup(node->stmt, tuple_format(node->stmt)); } else { curr_stmt = node->stmt; tuple_ref(curr_stmt); } node = rlist_prev_entry_safe(node, history, link); } while (node != NULL) { assert(vy_stmt_type(node->stmt) == IPROTO_UPSERT); /* We could not read the data that is invisible now */ assert(node->src_type == ITER_SRC_TXW || vy_stmt_lsn(node->stmt) <= (*rv)->vlsn); struct tuple *stmt = vy_apply_upsert(node->stmt, curr_stmt, index->cmp_def, index->mem_format, index->upsert_format, true); index->stat.upsert.applied++; if (stmt == NULL) return -1; if (curr_stmt != NULL) tuple_unref(curr_stmt); curr_stmt = stmt; node = rlist_prev_entry_safe(node, history, link); } if (curr_stmt != NULL) { vy_stmt_counter_acct_tuple(&index->stat.get, curr_stmt); *ret = curr_stmt; } /** * Add a statement to the cache */ if ((*rv)->vlsn == INT64_MAX) /* Do not store non-latest data */ vy_cache_add(&index->cache, curr_stmt, NULL, key, ITER_EQ); return 0; } int vy_point_lookup(struct vy_index *index, struct vy_tx *tx, const struct vy_read_view **rv, struct tuple *key, struct tuple **ret) { assert(tuple_field_count(key) >= index->cmp_def->part_count); *ret = NULL; size_t region_svp = region_used(&fiber()->gc); double start_time = ev_monotonic_now(loop()); int rc = 0; index->stat.lookup++; /* History list */ struct rlist history; restart: rlist_create(&history); rc = vy_point_lookup_scan_txw(index, tx, key, &history); if (rc != 0 || vy_stmt_history_is_terminal(&history)) goto done; rc = vy_point_lookup_scan_cache(index, rv, key, &history); if (rc != 0 || vy_stmt_history_is_terminal(&history)) goto done; rc = vy_point_lookup_scan_mems(index, rv, key, &history); if (rc != 0 || vy_stmt_history_is_terminal(&history)) goto done; /* Save version before yield */ uint32_t mem_list_version = index->mem_list_version; rc = vy_point_lookup_scan_slices(index, rv, key, &history); if (rc != 0) goto done; ERROR_INJECT(ERRINJ_VY_POINT_ITER_WAIT, { while (mem_list_version == index->mem_list_version) fiber_sleep(0.01); /* Turn of the injection to avoid infinite loop */ errinj(ERRINJ_VY_POINT_ITER_WAIT, ERRINJ_BOOL)->bparam = false; }); if (mem_list_version != index->mem_list_version) { /* * Mem list was changed during yield. This could be rotation * or a dump. In case of dump the memory referenced by * statement history is gone and we need to reread new history. * This in unnecessary in case of rotation but since we * cannot distinguish these two cases we always restart. */ vy_stmt_history_cleanup(&history, region_svp); goto restart; } done: if (rc == 0) { rc = vy_point_lookup_apply_history(index, rv, key, &history, ret); } vy_stmt_history_cleanup(&history, region_svp); if (rc != 0) return -1; double latency = ev_monotonic_now(loop()) - start_time; latency_collect(&index->stat.latency, latency); if (latency > index->env->too_long_threshold) { say_warn("%s: get(%s) => %s took too long: %.3f sec", vy_index_name(index), tuple_str(key), vy_stmt_str(*ret), latency); } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_bitset.h0000664000000000000000000000432113306560010020434 0ustar rootroot#ifndef TARANTOOL_BOX_MEMTX_BITSET_H_INCLUDED #define TARANTOOL_BOX_MEMTX_BITSET_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * @brief Index API wrapper for bitset_index * @see bitset/index.h */ #include "index.h" #include "bitset/index.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct memtx_engine; #ifndef OLD_GOOD_BITSET struct matras; struct mh_bitset_index_t; #endif /*#ifndef OLD_GOOD_BITSET*/ struct memtx_bitset_index { struct index base; struct bitset_index index; #ifndef OLD_GOOD_BITSET struct matras *id_to_tuple; struct mh_bitset_index_t *tuple_to_id; uint32_t spare_id; #endif /*#ifndef OLD_GOOD_BITSET*/ }; struct memtx_bitset_index * memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_MEMTX_BITSET_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/bootstrap.snap0000664000000000000000000000274013306565107020476 0ustar rootrootSNAP 0.13 Version: 1.7.6-126-g13d45fb Instance: 09b28f68-2abc-4e3c-83ca-4b9fa453cbb0 VClock: {1: 0} պ df (/P*|I00 S2-,C ^A8:UINrFZXdPG)g680 ad8  $ sٌb8xoQQ %B 0, Mi$w=Qy V ~zWAmy>yp8Av_ q\&/ oHQ-Hq-8Ê?ґ3Mґ3Mh;§;4:aTBhiAkaT E 0Ȳ^ZOhߤ>ۑ :*&*Ms^w֨oa(({ [?"`ALqkՐp[ E). Lĉ~ j4gZ0˒"'G&G⏉:<$RӱW!ubsfKil =9sͲ4 y4͘NCuW¸e*72y-Z<Ɍ ;B Eb<ׇ`@UXƿ: ju- ԰דtarantool_1.9.1.26.g63eb81e3c/src/box/memtx_engine.h0000664000000000000000000001256713306565107020436 0ustar rootroot#ifndef TARANTOOL_BOX_MEMTX_ENGINE_H_INCLUDED #define TARANTOOL_BOX_MEMTX_ENGINE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include "engine.h" #include "xlog.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * The state of memtx recovery process. * There is a global state of the entire engine state of each * space. The state of a space is initialized from the engine * state when the space is created. The exception is system * spaces, which are always created in the final (OK) state. * * The states exist to speed up recovery: initial state * assumes write-only flow of sorted rows from a snapshot. * It's followed by a state for read-write recovery * of rows from the write ahead log; these rows are * inserted only into the primary key. The final * state is for a fully functional space. */ enum memtx_recovery_state { /** The space has no indexes. */ MEMTX_INITIALIZED, /** * The space has only the primary index, which is in * write-only bulk insert mode. */ MEMTX_INITIAL_RECOVERY, /** * The space has the primary index, which can be * used for reads and writes, but secondary indexes are * empty. The will be built at the end of recovery. */ MEMTX_FINAL_RECOVERY, /** * The space and all its indexes are fully built. */ MEMTX_OK, }; /** Memtx extents pool, available to statistics. */ extern struct mempool memtx_index_extent_pool; struct memtx_engine { struct engine base; /** Engine recovery state. */ enum memtx_recovery_state state; /** Non-zero if there is a checkpoint (snapshot) in progress. */ struct checkpoint *checkpoint; /** The directory where to store snapshots. */ struct xdir snap_dir; /** Limit disk usage of checkpointing (bytes per second). */ uint64_t snap_io_rate_limit; /** Skip invalid snapshot records if this flag is set. */ bool force_recovery; /** Memory pool for tree index iterator. */ struct mempool tree_iterator_pool; /** Memory pool for rtree index iterator. */ struct mempool rtree_iterator_pool; /** Memory pool for hash index iterator. */ struct mempool hash_iterator_pool; /** Memory pool for bitset index iterator. */ struct mempool bitset_iterator_pool; }; struct memtx_engine * memtx_engine_new(const char *snap_dirname, bool force_recovery, uint64_t tuple_arena_max_size, uint32_t objsize_min, float alloc_factor); int memtx_engine_recover_snapshot(struct memtx_engine *memtx, const struct vclock *vclock); void memtx_engine_set_snap_io_rate_limit(struct memtx_engine *memtx, double limit); void memtx_engine_set_max_tuple_size(struct memtx_engine *memtx, size_t max_size); enum { MEMTX_EXTENT_SIZE = 16 * 1024, MEMTX_SLAB_SIZE = 4 * 1024 * 1024 }; /** * Initialize arena for indexes. * The arena is used for memtx_index_extent_alloc * and memtx_index_extent_free. * Can be called several times, only first call do the work. */ void memtx_index_arena_init(void); /** * Allocate a block of size MEMTX_EXTENT_SIZE for memtx index */ void * memtx_index_extent_alloc(void *ctx); /** * Free a block previously allocated by memtx_index_extent_alloc */ void memtx_index_extent_free(void *ctx, void *extent); /** * Reserve num extents in pool. * Ensure that next num extent_alloc will succeed w/o an error */ int memtx_index_extent_reserve(int num); #if defined(__cplusplus) } /* extern "C" */ #include "diag.h" static inline struct memtx_engine * memtx_engine_new_xc(const char *snap_dirname, bool force_recovery, uint64_t tuple_arena_max_size, uint32_t objsize_min, float alloc_factor) { struct memtx_engine *memtx; memtx = memtx_engine_new(snap_dirname, force_recovery, tuple_arena_max_size, objsize_min, alloc_factor); if (memtx == NULL) diag_raise(); return memtx; } static inline void memtx_engine_recover_snapshot_xc(struct memtx_engine *memtx, const struct vclock *vclock) { if (memtx_engine_recover_snapshot(memtx, vclock) != 0) diag_raise(); } #endif /* defined(__plusplus) */ #endif /* TARANTOOL_BOX_MEMTX_ENGINE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/opt_def.h0000664000000000000000000000733213306565107017371 0ustar rootroot#ifndef TARANTOOL_BOX_OPT_DEF_H_INCLUDED #define TARANTOOL_BOX_OPT_DEF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum opt_type { OPT_BOOL, /* bool */ OPT_UINT32, /* uint32_t */ OPT_INT64, /* int64_t */ OPT_FLOAT, /* double */ OPT_STR, /* char[] */ OPT_STRPTR, /* char* */ OPT_ENUM, /* enum */ opt_type_MAX, }; extern const char *opt_type_strs[]; typedef int64_t (*opt_def_to_enum_cb)(const char *str, uint32_t len); struct opt_def { const char *name; enum opt_type type; size_t offset; uint32_t len; const char *enum_name; int enum_size; const char **enum_strs; uint32_t enum_max; /** If not NULL, used to get a enum value by a string. */ opt_def_to_enum_cb to_enum; }; #define OPT_DEF(key, type, opts, field) \ { key, type, offsetof(opts, field), sizeof(((opts *)0)->field), \ NULL, 0, NULL, 0, NULL } #define OPT_DEF_ENUM(key, enum_name, opts, field, to_enum) \ { key, OPT_ENUM, offsetof(opts, field), sizeof(int), #enum_name, \ sizeof(enum enum_name), enum_name##_strs, enum_name##_MAX, to_enum } #define OPT_END {NULL, opt_type_MAX, 0, 0, NULL, 0, NULL, 0, NULL} struct region; /** * Populate key options from their msgpack-encoded representation * (msgpack map). */ int opts_decode(void *opts, const struct opt_def *reg, const char **map, uint32_t errcode, uint32_t field_no, struct region *region); /** * Decode one option and store it into @a opts struct as a field. * @param opts[out] Options to decode to. * @param reg Options definition. * @param key Name of an option. * @param key_len Length of @a key. * @param data Option value. * @param errcode Code of error to set if something is wrong. * @param field_no Field number of an option in a parent element. * @param region Region to allocate OPT_STRPTR option. * @param skip_unknown_options If true, do not set error, if an * option is unknown. Useful, when it is neccessary to * allow to store custom fields in options. */ int opts_parse_key(void *opts, const struct opt_def *reg, const char *key, uint32_t key_len, const char **data, uint32_t errcode, uint32_t field_no, struct region *region, bool skip_unknown_options); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_OPT_DEF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_index.h0000664000000000000000000004024113306565107017572 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_INDEX_H #define INCLUDES_TARANTOOL_BOX_VY_INDEX_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include "index_def.h" #define HEAP_FORWARD_DECLARATION #include "salad/heap.h" #include "vy_cache.h" #include "vy_range.h" #include "vy_stat.h" #include "vy_read_set.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct histogram; struct tuple; struct tuple_format; struct vy_index; struct vy_mem; struct vy_mem_env; struct vy_recovery; struct vy_run; struct vy_run_env; typedef void (*vy_upsert_thresh_cb)(struct vy_index *index, struct tuple *stmt, void *arg); /** Common index environment. */ struct vy_index_env { /** Path to the data directory. */ const char *path; /** Memory generation counter. */ int64_t *p_generation; /** Tuple format for keys (SELECT). */ struct tuple_format *key_format; /** Key (SELECT) with no parts. */ struct tuple *empty_key; /** * If read of a single statement takes longer than * the given value, warn about it in the log. */ double too_long_threshold; /** * Callback invoked when the number of upserts for * the same key exceeds VY_UPSERT_THRESHOLD. */ vy_upsert_thresh_cb upsert_thresh_cb; /** Argument passed to upsert_thresh_cb. */ void *upsert_thresh_arg; /** Number of indexes in this environment. */ int index_count; /** Size of memory used for bloom filters. */ size_t bloom_size; /** Size of memory used for page index. */ size_t page_index_size; }; /** Create a common index environment. */ int vy_index_env_create(struct vy_index_env *env, const char *path, int64_t *p_generation, vy_upsert_thresh_cb upsert_thresh_cb, void *upsert_thresh_arg); /** Destroy a common index environment. */ void vy_index_env_destroy(struct vy_index_env *env); /** * A struct for primary and secondary Vinyl indexes. * * Vinyl primary and secondary indexes work differently: * * - the primary index is fully covering (also known as * "clustered" in MS SQL circles). * It stores all tuple fields of the tuple coming from * INSERT/REPLACE/UPDATE/DELETE operations. This index is * the only place where the full tuple is stored. * * - a secondary index only stores parts participating in the * secondary key, coalesced with parts of the primary key. * Duplicate parts, i.e. identical parts of the primary and * secondary key are only stored once. (@sa key_def_merge * function). This reduces the disk and RAM space necessary to * maintain a secondary index, but adds an extra look-up in the * primary key for every fetched tuple. * * When a search in a secondary index is made, we first look up * the secondary index tuple, containing the primary key, and then * use this key to find the original tuple in the primary index. * While the primary index has only one key_def that is * used for validating and comparing tuples, secondary index needs * two: * * - the first one is defined by the user. It contains the key * parts of the secondary key, as present in the original tuple. * This is key_def. * * - the second one is used to fetch key parts of the secondary * key, *augmented* with the parts of the primary key from the * original tuple and compare secondary index tuples. These * parts concatenated together construe the tuple of the * secondary key, i.e. the tuple stored. This is key_def. */ struct vy_index { /** Common index environment. */ struct vy_index_env *env; /** * Reference counter. Used to postpone index deletion * until all pending operations have completed. */ int refs; /** Index ID visible to the user. */ uint32_t id; /** ID of the space this index belongs to. */ uint32_t space_id; /** Index options. */ struct index_opts opts; /** Key definition used to compare tuples. */ struct key_def *cmp_def; /** Key definition passed by the user. */ struct key_def *key_def; /** * If the following flag is set the index is unique and * it must be checked for duplicates on INSERT. Otherwise, * the check can be skipped, either because this index * is not unique or it is a part of another unique index. */ bool check_is_unique; /** * Tuple format for tuples of this index created when * reading pages from disk. * Is distinct from mem_format only for secondary keys, * whose tuples have MP_NIL in all "gap" positions between * positions of the secondary and primary key fields. * These gaps are necessary to make such tuples comparable * with tuples from vy_mem, while using the same cmp_def. * Since upserts are never present in secondary keys, is * used only for REPLACE and DELETE * tuples. */ struct tuple_format *disk_format; /** Tuple format of the space this index belongs to. */ struct tuple_format *mem_format; /** * Format for tuples of type REPLACE or DELETE which * are a result of an UPDATE operation. Such tuples * contain a column mask which preserves the list * of actually changed columns. Used when creating * tuples for vy_mem, and used only by primary key. */ struct tuple_format *mem_format_with_colmask; /* * Format for UPSERT statements. Note, UPSERTs can only * appear in spaces with a single index. */ struct tuple_format *upsert_format; /** * Primary index of the same space or NULL if this index * is primary. Referenced by each secondary index. */ struct vy_index *pk; /** Index statistics. */ struct vy_index_stat stat; /** * Merge cache of this index. Contains hottest tuples * with continuation markers. */ struct vy_cache cache; /** Active in-memory index, i.e. the one used for insertions. */ struct vy_mem *mem; /** * List of sealed in-memory indexes, i.e. indexes that can't be * inserted into, only read from, linked by vy_mem->in_sealed. * The newer an index, the closer it to the list head. */ struct rlist sealed; /** * Tree of all ranges of this index, linked by * vy_range->tree_node, ordered by vy_range->begin. */ vy_range_tree_t *tree; /** Number of ranges in this index. */ int range_count; /** Heap of ranges, prioritized by compact_priority. */ heap_t range_heap; /** * List of all runs created for this index, * linked by vy_run->in_index. */ struct rlist runs; /** Number of entries in all ranges. */ int run_count; /** * Histogram accounting how many ranges of the index * have a particular number of runs. */ struct histogram *run_hist; /** Size of memory used for bloom filters. */ size_t bloom_size; /** Size of memory used for page index. */ size_t page_index_size; /** * Incremented for each change of the mem list, * to invalidate iterators. */ uint32_t mem_list_version; /** * Incremented for each change of the range list, * to invalidate iterators. */ uint32_t range_tree_version; /** * LSN of the last dump or -1 if the index has not * been dumped yet. */ int64_t dump_lsn; /** * LSN of the row that committed the index or -1 if * the index was not committed to the metadata log. */ int64_t commit_lsn; /** * This flag is set if the index was dropped. * It is also set on local recovery if the index * will be dropped when WAL is replayed. */ bool is_dropped; /** * Number of times the index was truncated. * * After recovery is complete, it equals space->truncate_count. * On local recovery, it is loaded from the metadata log and may * be greater than space->truncate_count, which indicates that * the space is truncated in WAL. */ uint64_t truncate_count; /** * If pin_count > 0 the index can't be scheduled for dump. * Used to make sure that the primary index is dumped last. */ int pin_count; /** Set if the index is currently being dumped. */ bool is_dumping; /** Link in vy_scheduler->dump_heap. */ struct heap_node in_dump; /** Link in vy_scheduler->compact_heap. */ struct heap_node in_compact; /** * Interval tree containing reads from this index done by all * active transactions. Linked by vy_tx_interval->in_index. * Used to abort transactions that conflict with a write to * the index. */ vy_index_read_set_t read_set; }; /** * Assert if an index formats are inconsistent. * @param index Index to validate. */ void vy_index_validate_formats(const struct vy_index *index); /** Return index name. Used for logging. */ const char * vy_index_name(struct vy_index *index); /** Return sum size of memory tree extents. */ size_t vy_index_mem_tree_size(struct vy_index *index); /** Allocate a new index object. */ struct vy_index * vy_index_new(struct vy_index_env *index_env, struct vy_cache_env *cache_env, struct vy_mem_env *mem_env, struct index_def *index_def, struct tuple_format *format, struct vy_index *pk); /** Free an index object. */ void vy_index_delete(struct vy_index *index); /** * Increment the reference counter of a vinyl index. * An index cannot be deleted if its reference counter * is elevated. */ static inline void vy_index_ref(struct vy_index *index) { assert(index->refs >= 0); index->refs++; } /** * Decrement the reference counter of a vinyl index. * If the reference counter reaches 0, the index is * deleted with vy_index_delete(). */ static inline void vy_index_unref(struct vy_index *index) { assert(index->refs > 0); if (--index->refs == 0) vy_index_delete(index); } /** * Swap disk contents (ranges, runs, and corresponding stats) * between two indexes. Used only on recovery, to skip reloading * indexes of a truncated space. The in-memory tree of the index * can't be populated - see vy_is_committed_one(). */ void vy_index_swap(struct vy_index *old_index, struct vy_index *new_index); /** Initialize the range tree of a new index. */ int vy_index_init_range_tree(struct vy_index *index); /** * Create a new vinyl index. * * This function is called when an index is created after recovery * is complete or during remote recovery. It initializes the range * tree and makes the index directory. */ int vy_index_create(struct vy_index *index); /** * Load a vinyl index from disk. Called on local recovery. * * This function retrieves the index structure from the * metadata log, rebuilds the range tree, and opens run * files. * * If @is_checkpoint_recovery is set, the index is recovered from * the last snapshot. In particular, this means that the index * must have been logged in the metadata log and so if the * function does not find it in the recovery context, it will * fail. If the flag is unset, the index is recovered from a * WAL, in which case a missing index is OK - it just means we * failed to log it before restart and have to retry during * WAL replay. * * @lsn is the LSN of the row that created the index. * If the index is recovered from a snapshot, it is set * to the snapshot signature. */ int vy_index_recover(struct vy_index *index, struct vy_recovery *recovery, struct vy_run_env *run_env, int64_t lsn, bool is_checkpoint_recovery, bool force_recovery); /** * Return generation of in-memory data stored in an index * (min over vy_mem->generation). */ int64_t vy_index_generation(struct vy_index *index); /** Return max compact_priority among ranges of an index. */ int vy_index_compact_priority(struct vy_index *index); /** Add a run to the list of runs of an index. */ void vy_index_add_run(struct vy_index *index, struct vy_run *run); /** Remove a run from the list of runs of an index. */ void vy_index_remove_run(struct vy_index *index, struct vy_run *run); /** * Add a range to both the range tree and the range heap * of an index. */ void vy_index_add_range(struct vy_index *index, struct vy_range *range); /** * Remove a range from both the range tree and the range * heap of an index. */ void vy_index_remove_range(struct vy_index *index, struct vy_range *range); /** Account a range to the run histogram of an index. */ void vy_index_acct_range(struct vy_index *index, struct vy_range *range); /** Unaccount a range from the run histogram of an index. */ void vy_index_unacct_range(struct vy_index *index, struct vy_range *range); /** * Allocate a new active in-memory index for an index while moving * the old one to the sealed list. Used by the dump task in order * not to bother about synchronization with concurrent insertions * while an index is being dumped. */ int vy_index_rotate_mem(struct vy_index *index); /** * Remove an in-memory tree from the sealed list of a vinyl index, * unaccount and delete it. */ void vy_index_delete_mem(struct vy_index *index, struct vy_mem *mem); /** * Split a range if it has grown too big, return true if the range * was split. Splitting is done by making slices of the runs used * by the original range, adding them to new ranges, and reflecting * the change in the metadata log, i.e. it doesn't involve heavy * operations, like writing a run file, and is done immediately. */ bool vy_index_split_range(struct vy_index *index, struct vy_range *range); /** * Coalesce a range with one or more its neighbors if it is too small, * return true if the range was coalesced. We coalesce ranges by * splicing their lists of run slices and reflecting the change in the * log. No long-term operation involving a worker thread, like writing * a new run file, is necessary, because the merge iterator can deal * with runs that intersect by LSN coexisting in the same range as long * as they do not intersect for each particular key, which is true in * case of merging key ranges. */ bool vy_index_coalesce_range(struct vy_index *index, struct vy_range *range); /** * Insert a statement into the index's in-memory tree. If the * region_stmt is NULL and the statement is successfully inserted * then the new lsregion statement is returned via @a region_stmt. * Either vy_index_commit_stmt() or vy_index_rollback_stmt() must * be called on success. * * @param index Index the statement is for. * @param mem In-memory tree to insert the statement into. * @param stmt Statement, allocated on malloc(). * @param region_stmt NULL or the same statement, allocated on * lsregion. * * @retval 0 Success. * @retval -1 Memory error. */ int vy_index_set(struct vy_index *index, struct vy_mem *mem, const struct tuple *stmt, const struct tuple **region_stmt); /** * Confirm that the statement stays in the index's in-memory tree. * * @param index Index the statement is for. * @param mem In-memory tree where the statement was saved. * @param stmt Statement allocated from lsregion. */ void vy_index_commit_stmt(struct vy_index *index, struct vy_mem *mem, const struct tuple *stmt); /** * Erase a statement from the index's in-memory tree. * * @param index Index to erase from. * @param mem In-memory tree where the statement was saved. * @param stmt Statement allocated from lsregion. */ void vy_index_rollback_stmt(struct vy_index *index, struct vy_mem *mem, const struct tuple *stmt); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_INDEX_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_dictionary.c0000664000000000000000000001355613306560010021313 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "tuple_dictionary.h" #include "error.h" #include "diag.h" field_name_hash_f field_name_hash; #define mh_name _strnu32 struct mh_strnu32_key_t { const char *str; size_t len; uint32_t hash; }; #define mh_key_t struct mh_strnu32_key_t * struct mh_strnu32_node_t { const char *str; size_t len; uint32_t hash; uint32_t val; }; #define mh_node_t struct mh_strnu32_node_t #define mh_arg_t void * #define mh_hash(a, arg) ((a)->hash) #define mh_hash_key(a, arg) mh_hash(a, arg) #define mh_cmp(a, b, arg) ((a)->len != (b)->len || \ memcmp((a)->str, (b)->str, (a)->len)) #define mh_cmp_key(a, b, arg) mh_cmp(a, b, arg) #define MH_SOURCE 1 #include "salad/mhash.h" /* Create mh_strnu32_t hash. */ /** Free names hash and its content. */ static inline void tuple_dictionary_delete_hash(struct mh_strnu32_t *hash) { while (mh_size(hash)) { mh_int_t i = mh_first(hash); mh_strnu32_del(hash, i, NULL); } mh_strnu32_delete(hash); } /** Free tuple dictionary and its content. */ static inline void tuple_dictionary_delete(struct tuple_dictionary *dict) { assert(dict->refs == 0); if (dict->hash != NULL) { tuple_dictionary_delete_hash(dict->hash); free(dict->names); } else { assert(dict->names == NULL); } free(dict); } /** * Set a new name in a dictionary. Check duplicates. Memory must * be reserved already. * @param dict Tuple dictionary. * @param name New name. * @param name_len Length of @a name. * @param fieldno Field number. * * @retval 0 Success. * @retval -1 Duplicate name error. */ static inline int tuple_dictionary_set_name(struct tuple_dictionary *dict, const char *name, uint32_t name_len, uint32_t fieldno) { assert(fieldno < dict->name_count); uint32_t name_hash = field_name_hash(name, name_len); struct mh_strnu32_key_t key = { name, name_len, name_hash }; mh_int_t rc = mh_strnu32_find(dict->hash, &key, NULL); if (rc != mh_end(dict->hash)) { diag_set(ClientError, ER_SPACE_FIELD_IS_DUPLICATE, name); return -1; } struct mh_strnu32_node_t name_node = { name, name_len, name_hash, fieldno }; rc = mh_strnu32_put(dict->hash, &name_node, NULL, NULL); /* Memory was reserved in new(). */ assert(rc != mh_end(dict->hash)); (void) rc; return 0; } struct tuple_dictionary * tuple_dictionary_new(const struct field_def *fields, uint32_t field_count) { struct tuple_dictionary *dict = (struct tuple_dictionary *)calloc(1, sizeof(*dict)); if (dict == NULL) { diag_set(OutOfMemory, sizeof(*dict), "malloc", "dict"); return NULL; } dict->refs = 1; dict->name_count = field_count; if (field_count == 0) return dict; uint32_t names_offset = sizeof(dict->names[0]) * field_count; uint32_t total = names_offset; for (uint32_t i = 0; i < field_count; ++i) total += strlen(fields[i].name) + 1; dict->names = (char **) malloc(total); if (dict->names == NULL) { diag_set(OutOfMemory, total, "malloc", "dict->names"); goto err_memory; } dict->hash = mh_strnu32_new(); if (dict->hash == NULL) { diag_set(OutOfMemory, sizeof(*dict->hash), "mh_strnu32_new", "dict->hash"); goto err_hash; } if (mh_strnu32_reserve(dict->hash, field_count, NULL) != 0) { diag_set(OutOfMemory, field_count * sizeof(struct mh_strnu32_node_t), "mh_strnu32_reserve", "dict->hash"); goto err_name; } char *pos = (char *) dict->names + names_offset; for (uint32_t i = 0; i < field_count; ++i) { int len = strlen(fields[i].name); memcpy(pos, fields[i].name, len); pos[len] = 0; dict->names[i] = pos; if (tuple_dictionary_set_name(dict, pos, len, i) != 0) goto err_name; pos += len + 1; } return dict; err_name: tuple_dictionary_delete_hash(dict->hash); err_hash: free(dict->names); err_memory: free(dict); return NULL; } void tuple_dictionary_swap(struct tuple_dictionary *a, struct tuple_dictionary *b) { int a_refs = a->refs; int b_refs = b->refs; struct tuple_dictionary t = *a; *a = *b; *b = t; a->refs = a_refs; b->refs = b_refs; } void tuple_dictionary_unref(struct tuple_dictionary *dict) { assert(dict->refs > 0); if (--dict->refs == 0) tuple_dictionary_delete(dict); } void tuple_dictionary_ref(struct tuple_dictionary *dict) { ++dict->refs; } int tuple_fieldno_by_name(struct tuple_dictionary *dict, const char *name, uint32_t name_len, uint32_t name_hash, uint32_t *fieldno) { struct mh_strnu32_t *hash = dict->hash; if (hash == NULL) return -1; struct mh_strnu32_key_t key = {name, name_len, name_hash}; mh_int_t rc = mh_strnu32_find(hash, &key, NULL); if (rc == mh_end(hash)) return -1; *fieldno = mh_strnu32_node(hash, rc)->val; return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_hash.h0000664000000000000000000000361013306565107020101 0ustar rootroot#ifndef TARANTOOL_BOX_MEMTX_HASH_H_INCLUDED #define TARANTOOL_BOX_MEMTX_HASH_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "index.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct memtx_engine; struct light_index_core; struct memtx_hash_index { struct index base; struct light_index_core *hash_table; }; struct memtx_hash_index * memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_MEMTX_HASH_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_format.h0000664000000000000000000002575013306565107020456 0ustar rootroot#ifndef TARANTOOL_BOX_TUPLE_FORMAT_H_INCLUDED #define TARANTOOL_BOX_TUPLE_FORMAT_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "key_def.h" #include "field_def.h" #include "errinj.h" #include "tuple_dictionary.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Destroy tuple format subsystem and free resourses */ void tuple_format_free(); enum { FORMAT_ID_MAX = UINT16_MAX - 1, FORMAT_ID_NIL = UINT16_MAX }; enum { FORMAT_REF_MAX = INT32_MAX}; /* * We don't pass TUPLE_INDEX_BASE around dynamically all the time, * at least hard code it so that in most cases it's a nice error * message */ enum { TUPLE_INDEX_BASE = 1 }; /* * A special value to indicate that tuple format doesn't store * an offset for a field_id. */ enum { TUPLE_OFFSET_SLOT_NIL = INT32_MAX }; struct tuple; struct tuple_format; /** Engine-specific tuple format methods. */ struct tuple_format_vtab { /** Free allocated tuple using engine-specific memory allocator. */ void (*destroy)(struct tuple_format *format, struct tuple *tuple); }; /** Tuple field meta information for tuple_format. */ struct tuple_field { /** * Field type of an indexed field. * If a field participates in at least one of space indexes * then its type is stored in this member. * If a field does not participate in an index * then UNKNOWN is stored for it. */ enum field_type type; /** * Offset slot in field map in tuple. Normally tuple * stores field map - offsets of all fields participating * in indexes. This allows quick access to most used * fields without parsing entire mspack. This member * stores position in the field map of tuple for current * field. If the field does not participate in indexes * then it has no offset in field map and INT_MAX is * stored in this member. Due to specific field map in * tuple (it is stored before tuple), the positions in * field map is negative. */ int32_t offset_slot; /** True if this field is used by an index. */ bool is_key_part; /** True, if a field can store NULL. */ bool is_nullable; }; /** * @brief Tuple format * Tuple format describes how tuple is stored and information about its fields */ struct tuple_format { /** Virtual function table */ struct tuple_format_vtab vtab; /** Identifier */ uint16_t id; /** Reference counter */ int refs; /** * The number of extra bytes to reserve in tuples before * field map. * \sa struct tuple */ uint16_t extra_size; /** * Size of field map of tuple in bytes. * \sa struct tuple */ uint16_t field_map_size; /** * If not set (== 0), any tuple in the space can have any number of * fields. If set, each tuple must have exactly this number of fields. */ uint32_t exact_field_count; /** * The longest field array prefix in which the last * element is used by an index. */ uint32_t index_field_count; /** * The minimal field count that must be specified. * index_field_count <= min_field_count <= field_count. */ uint32_t min_field_count; /* Length of 'fields' array. */ uint32_t field_count; /** * Shared names storage used by all formats of a space. */ struct tuple_dictionary *dict; /* Formats of the fields */ struct tuple_field fields[0]; }; extern struct tuple_format **tuple_formats; static inline uint32_t tuple_format_id(const struct tuple_format *format) { assert(tuple_formats[format->id] == format); return format->id; } static inline struct tuple_format * tuple_format_by_id(uint32_t tuple_format_id) { return tuple_formats[tuple_format_id]; } /** Delete a format with zero ref count. */ void tuple_format_delete(struct tuple_format *format); static inline void tuple_format_ref(struct tuple_format *format) { assert((uint64_t)format->refs + 1 <= FORMAT_REF_MAX); format->refs++; } static inline void tuple_format_unref(struct tuple_format *format) { assert(format->refs >= 1); if (--format->refs == 0) tuple_format_delete(format); } /** * Allocate, construct and register a new in-memory tuple format. * @param vtab Virtual function table for specific engines. * @param keys Array of key_defs of a space. * @param key_count The number of keys in @a keys array. * @param extra_size Extra bytes to reserve in tuples metadata. * @param space_fields Array of fields, defined in a space format. * @param space_field_count Length of @a space_fields. * * @retval not NULL Tuple format. * @retval NULL Memory error. */ struct tuple_format * tuple_format_new(struct tuple_format_vtab *vtab, struct key_def * const *keys, uint16_t key_count, uint16_t extra_size, const struct field_def *space_fields, uint32_t space_field_count, struct tuple_dictionary *dict); /** * Check, if @a format1 can store any tuples of @a format2. For * example, if a field is not nullable in format1 and the same * field is nullable in format2, or the field type is integer * in format1 and unsigned in format2, then format1 can not store * format2 tuples. * @param format1 tuple format to check for compatibility of * @param format2 tuple format to check compatibility with * * @retval True, if @a format1 can store any tuples of @a format2. */ bool tuple_format1_can_store_format2_tuples(const struct tuple_format *format1, const struct tuple_format *format2); /** * Check that two tuple formats are identical. * @param a format a * @param b format b */ bool tuple_format_eq(const struct tuple_format *a, const struct tuple_format *b); /** * Register the duplicate of the specified format. * @param src Original format. * * @retval not NULL Success. * @retval NULL Memory or format register error. */ struct tuple_format * tuple_format_dup(struct tuple_format *src); /** * Returns the total size of tuple metadata of this format. * See @link struct tuple @endlink for explanation of tuple layout. * * @param format Tuple Format. * @returns the total size of tuple metadata */ static inline uint16_t tuple_format_meta_size(const struct tuple_format *format) { return format->extra_size + format->field_map_size; } /** * Calculate minimal field count of tuples with specified keys and * space format. * @param keys Array of key definitions of indexes. * @param key_count Length of @a keys. * @param space_fields Array of fields from a space format. * @param space_field_count Length of @a space_fields. * * @retval Minimal field count. */ uint32_t tuple_format_min_field_count(struct key_def * const *keys, uint16_t key_count, const struct field_def *space_fields, uint32_t space_field_count); typedef struct tuple_format box_tuple_format_t; /** \cond public */ /** * Return new in-memory tuple format based on passed key definitions. * * \param keys array of keys defined for the format * \key_count count of keys * \retval new tuple format if success * \retval NULL for error */ box_tuple_format_t * box_tuple_format_new(struct key_def **keys, uint16_t key_count); /** * Increment tuple format ref count. * * \param tuple_format the tuple format to ref */ void box_tuple_format_ref(box_tuple_format_t *format); /** * Decrement tuple format ref count. * * \param tuple_format the tuple format to unref */ void box_tuple_format_unref(box_tuple_format_t *format); /** \endcond public */ /** * Fill the field map of tuple with field offsets. * @param format Tuple format. * @param field_map A pointer behind the last element of the field * map. * @param tuple MessagePack array. * * @retval 0 Success. * @retval -1 Format error. * +-------------------+ * Result: | offN | ... | off1 | * +-------------------+ * ^ * field_map * tuple + off_i = indexed_field_i; */ int tuple_init_field_map(const struct tuple_format *format, uint32_t *field_map, const char *tuple); /** * Get a field at the specific position in this MessagePack array. * Returns a pointer to MessagePack data. * @param format tuple format * @param tuple a pointer to MessagePack array * @param field_map a pointer to the LAST element of field map * @param field_no the index of field to return * * @returns field data if field exists or NULL * @sa tuple_init_field_map() */ static inline const char * tuple_field_raw(const struct tuple_format *format, const char *tuple, const uint32_t *field_map, uint32_t field_no) { if (likely(field_no < format->index_field_count)) { /* Indexed field */ if (field_no == 0) { mp_decode_array(&tuple); return tuple; } int32_t offset_slot = format->fields[field_no].offset_slot; if (offset_slot != TUPLE_OFFSET_SLOT_NIL) { if (field_map[offset_slot] != 0) return tuple + field_map[offset_slot]; else return NULL; } } ERROR_INJECT(ERRINJ_TUPLE_FIELD, return NULL); uint32_t field_count = mp_decode_array(&tuple); if (unlikely(field_no >= field_count)) return NULL; for (uint32_t k = 0; k < field_no; k++) mp_next(&tuple); return tuple; } /** * Get tuple field by its name. * @param format Tuple format. * @param tuple MessagePack tuple's body. * @param field_map Tuple field map. * @param name Field name. * @param name_len Length of @a name. * @param name_hash Hash of @a name. * * @retval not NULL MessagePack field. * @retval NULL No field with @a name. */ static inline const char * tuple_field_raw_by_name(struct tuple_format *format, const char *tuple, const uint32_t *field_map, const char *name, uint32_t name_len, uint32_t name_hash) { uint32_t fieldno; if (tuple_fieldno_by_name(format->dict, name, name_len, name_hash, &fieldno) != 0) return NULL; return tuple_field_raw(format, tuple, field_map, fieldno); } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* #ifndef TARANTOOL_BOX_TUPLE_FORMAT_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/xstream.h0000664000000000000000000000413613306560010017417 0ustar rootroot#ifndef TARANTOOL_XSTREAM_H_INCLUDED #define TARANTOOL_XSTREAM_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "diag.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct xrow_header; struct xstream; typedef void (*xstream_write_f)(struct xstream *, struct xrow_header *); struct xstream { xstream_write_f write; }; static inline void xstream_create(struct xstream *xstream, xstream_write_f write) { xstream->write = write; } int xstream_write(struct xstream *stream, struct xrow_header *row); #if defined(__cplusplus) } /* extern C */ static inline void xstream_write_xc(struct xstream *stream, struct xrow_header *row) { if (xstream_write(stream, row) != 0) diag_raise(); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_XSTREAM_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/xrow.c0000664000000000000000000007321013306565107016741 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "xrow.h" #include #include #include #include "third_party/base64.h" #include "fiber.h" #include "version.h" #include "error.h" #include "vclock.h" #include "scramble.h" #include "iproto_constants.h" int xrow_header_decode(struct xrow_header *header, const char **pos, const char *end) { memset(header, 0, sizeof(struct xrow_header)); const char *tmp = *pos; if (mp_check(&tmp, end) != 0) { error: diag_set(ClientError, ER_INVALID_MSGPACK, "packet header"); return -1; } if (mp_typeof(**pos) != MP_MAP) goto error; uint32_t size = mp_decode_map(pos); for (uint32_t i = 0; i < size; i++) { if (mp_typeof(**pos) != MP_UINT) goto error; uint64_t key = mp_decode_uint(pos); if (key >= IPROTO_KEY_MAX || iproto_key_type[key] != mp_typeof(**pos)) goto error; switch (key) { case IPROTO_REQUEST_TYPE: header->type = mp_decode_uint(pos); break; case IPROTO_SYNC: header->sync = mp_decode_uint(pos); break; case IPROTO_REPLICA_ID: header->replica_id = mp_decode_uint(pos); break; case IPROTO_LSN: header->lsn = mp_decode_uint(pos); break; case IPROTO_TIMESTAMP: header->tm = mp_decode_double(pos); break; case IPROTO_SCHEMA_VERSION: header->schema_version = mp_decode_uint(pos); break; default: /* unknown header */ mp_next(pos); } } assert(*pos <= end); if (*pos < end) { const char *body = *pos; if (mp_check(pos, end)) { diag_set(ClientError, ER_INVALID_MSGPACK, "packet body"); return -1; } header->bodycnt = 1; header->body[0].iov_base = (void *) body; header->body[0].iov_len = *pos - body; } return 0; } /** * @pre pos points at a valid msgpack */ static inline int xrow_decode_uuid(const char **pos, struct tt_uuid *out) { if (mp_typeof(**pos) != MP_STR) { error: diag_set(ClientError, ER_INVALID_MSGPACK, "UUID"); return -1; } uint32_t len = mp_decode_strl(pos); if (tt_uuid_from_strl(*pos, len, out) != 0) goto error; *pos += len; return 0; } int xrow_header_encode(const struct xrow_header *header, uint64_t sync, struct iovec *out, size_t fixheader_len) { /* allocate memory for sign + header */ out->iov_base = region_alloc(&fiber()->gc, XROW_HEADER_LEN_MAX + fixheader_len); if (out->iov_base == NULL) { diag_set(OutOfMemory, XROW_HEADER_LEN_MAX + fixheader_len, "gc arena", "xrow header encode"); return -1; } char *data = (char *) out->iov_base + fixheader_len; /* Header */ char *d = data + 1; /* Skip 1 byte for MP_MAP */ int map_size = 0; if (true) { d = mp_encode_uint(d, IPROTO_REQUEST_TYPE); d = mp_encode_uint(d, header->type); map_size++; } if (sync) { d = mp_encode_uint(d, IPROTO_SYNC); d = mp_encode_uint(d, sync); map_size++; } if (header->replica_id) { d = mp_encode_uint(d, IPROTO_REPLICA_ID); d = mp_encode_uint(d, header->replica_id); map_size++; } if (header->lsn) { d = mp_encode_uint(d, IPROTO_LSN); d = mp_encode_uint(d, header->lsn); map_size++; } if (header->tm) { d = mp_encode_uint(d, IPROTO_TIMESTAMP); d = mp_encode_double(d, header->tm); map_size++; } assert(d <= data + XROW_HEADER_LEN_MAX); mp_encode_map(data, map_size); out->iov_len = d - (char *) out->iov_base; out++; memcpy(out, header->body, sizeof(*out) * header->bodycnt); assert(1 + header->bodycnt <= XROW_IOVMAX); return 1 + header->bodycnt; /* new iovcnt */ } static inline char * xrow_encode_uuid(char *pos, const struct tt_uuid *in) { return mp_encode_str(pos, tt_uuid_str(in), UUID_STR_LEN); } /* m_ - msgpack meta, k_ - key, v_ - value */ struct PACKED iproto_header_bin { uint8_t m_len; /* MP_UINT32 */ uint32_t v_len; /* length */ uint8_t m_header; /* MP_MAP */ uint8_t k_code; /* IPROTO_REQUEST_TYPE */ uint8_t m_code; /* MP_UINT32 */ uint32_t v_code; /* response status */ uint8_t k_sync; /* IPROTO_SYNC */ uint8_t m_sync; /* MP_UINT64 */ uint64_t v_sync; /* sync */ uint8_t k_schema_version; /* IPROTO_SCHEMA_VERSION */ uint8_t m_schema_version; /* MP_UINT32 */ uint32_t v_schema_version; /* schema_version */ }; static_assert(sizeof(struct iproto_header_bin) == IPROTO_HEADER_LEN, "sizeof(iproto_header_bin)"); void iproto_header_encode(char *out, uint32_t type, uint64_t sync, uint32_t schema_version, uint32_t body_length) { struct iproto_header_bin header; header.m_len = 0xce; /* 5 - sizeof(m_len and v_len fields). */ header.v_len = mp_bswap_u32(sizeof(header) + body_length - 5); header.m_header = 0x83; header.k_code = IPROTO_REQUEST_TYPE; header.m_code = 0xce; header.v_code = mp_bswap_u32(type); header.k_sync = IPROTO_SYNC; header.m_sync = 0xcf; header.v_sync = mp_bswap_u64(sync); header.k_schema_version = IPROTO_SCHEMA_VERSION; header.m_schema_version = 0xce; header.v_schema_version = mp_bswap_u32(schema_version); memcpy(out, &header, sizeof(header)); } struct PACKED iproto_body_bin { uint8_t m_body; /* MP_MAP */ uint8_t k_data; /* IPROTO_DATA or IPROTO_ERROR */ uint8_t m_data; /* MP_STR or MP_ARRAY */ uint32_t v_data_len; /* string length of array size */ }; static const struct iproto_body_bin iproto_body_bin = { 0x81, IPROTO_DATA, 0xdd, 0 }; static const struct iproto_body_bin iproto_error_bin = { 0x81, IPROTO_ERROR, 0xdb, 0 }; /** Return a 4-byte numeric error code, with status flags. */ static inline uint32_t iproto_encode_error(uint32_t error) { return error | IPROTO_TYPE_ERROR; } int iproto_reply_ok(struct obuf *out, uint64_t sync, uint32_t schema_version) { char *buf = (char *)obuf_alloc(out, IPROTO_HEADER_LEN + 1); if (buf == NULL) { diag_set(OutOfMemory, IPROTO_HEADER_LEN + 1, "obuf_alloc", "buf"); return -1; } iproto_header_encode(buf, IPROTO_OK, sync, schema_version, 1); buf[IPROTO_HEADER_LEN] = 0x80; /* empty MessagePack Map */ return 0; } int iproto_reply_request_vote(struct obuf *out, uint64_t sync, uint32_t schema_version, const struct vclock *vclock, bool read_only) { uint32_t replicaset_size = vclock_size(vclock); size_t max_size = IPROTO_HEADER_LEN + mp_sizeof_map(2) + mp_sizeof_uint(UINT32_MAX) + mp_sizeof_map(replicaset_size) + replicaset_size * (mp_sizeof_uint(UINT32_MAX) + mp_sizeof_uint(UINT64_MAX)) + mp_sizeof_uint(UINT32_MAX) + mp_sizeof_bool(true); char *buf = obuf_reserve(out, max_size); if (buf == NULL) { diag_set(OutOfMemory, max_size, "obuf_alloc", "buf"); return -1; } char *data = buf + IPROTO_HEADER_LEN; data = mp_encode_map(data, 2); data = mp_encode_uint(data, IPROTO_SERVER_IS_RO); data = mp_encode_bool(data, read_only); data = mp_encode_uint(data, IPROTO_VCLOCK); data = mp_encode_map(data, replicaset_size); struct vclock_iterator it; vclock_iterator_init(&it, vclock); vclock_foreach(&it, replica) { data = mp_encode_uint(data, replica.id); data = mp_encode_uint(data, replica.lsn); } size_t size = data - buf; assert(size <= max_size); iproto_header_encode(buf, IPROTO_OK, sync, schema_version, size - IPROTO_HEADER_LEN); char *ptr = obuf_alloc(out, size); assert(ptr == buf); return 0; } int iproto_reply_error(struct obuf *out, const struct error *e, uint64_t sync, uint32_t schema_version) { uint32_t msg_len = strlen(e->errmsg); uint32_t errcode = box_error_code(e); struct iproto_body_bin body = iproto_error_bin; char *header = (char *)obuf_alloc(out, IPROTO_HEADER_LEN); if (header == NULL) return -1; iproto_header_encode(header, iproto_encode_error(errcode), sync, schema_version, sizeof(body) + msg_len); body.v_data_len = mp_bswap_u32(msg_len); /* Malformed packet appears to be a lesser evil than abort. */ return obuf_dup(out, &body, sizeof(body)) != sizeof(body) || obuf_dup(out, e->errmsg, msg_len) != msg_len ? -1 : 0; } void iproto_write_error(int fd, const struct error *e, uint32_t schema_version, uint64_t sync) { uint32_t msg_len = strlen(e->errmsg); uint32_t errcode = box_error_code(e); char header[IPROTO_HEADER_LEN]; struct iproto_body_bin body = iproto_error_bin; iproto_header_encode(header, iproto_encode_error(errcode), sync, schema_version, sizeof(body) + msg_len); body.v_data_len = mp_bswap_u32(msg_len); (void) write(fd, header, sizeof(header)); (void) write(fd, &body, sizeof(body)); (void) write(fd, e->errmsg, msg_len); } enum { SVP_SIZE = IPROTO_HEADER_LEN + sizeof(iproto_body_bin) }; int iproto_prepare_select(struct obuf *buf, struct obuf_svp *svp) { /** * Reserve memory before taking a savepoint. * This ensures that we get a contiguous chunk of memory * and the savepoint is pointing at the beginning of it. */ void *ptr = obuf_reserve(buf, SVP_SIZE); if (ptr == NULL) { diag_set(OutOfMemory, SVP_SIZE, "obuf", "reserve"); return -1; } *svp = obuf_create_svp(buf); ptr = obuf_alloc(buf, SVP_SIZE); assert(ptr != NULL); return 0; } void iproto_reply_select(struct obuf *buf, struct obuf_svp *svp, uint64_t sync, uint32_t schema_version, uint32_t count) { char *pos = (char *) obuf_svp_to_ptr(buf, svp); iproto_header_encode(pos, IPROTO_OK, sync, schema_version, obuf_size(buf) - svp->used - IPROTO_HEADER_LEN); struct iproto_body_bin body = iproto_body_bin; body.v_data_len = mp_bswap_u32(count); memcpy(pos + IPROTO_HEADER_LEN, &body, sizeof(body)); } int xrow_decode_dml(struct xrow_header *row, struct request *request, uint64_t key_map) { if (row->bodycnt == 0) { diag_set(ClientError, ER_INVALID_MSGPACK, "missing request body"); return 1; } assert(row->bodycnt == 1); const char *data = (const char *) row->body[0].iov_base; const char *end = data + row->body[0].iov_len; assert((end - data) > 0); if (mp_typeof(*data) != MP_MAP || mp_check_map(data, end) > 0) { error: diag_set(ClientError, ER_INVALID_MSGPACK, "packet body"); return -1; } memset(request, 0, sizeof(*request)); request->header = row; request->type = row->type; uint32_t size = mp_decode_map(&data); for (uint32_t i = 0; i < size; i++) { if (! iproto_dml_body_has_key(data, end)) { if (mp_check(&data, end) != 0 || mp_check(&data, end) != 0) goto error; continue; } uint64_t key = mp_decode_uint(&data); const char *value = data; if (mp_check(&data, end) || key >= IPROTO_KEY_MAX || iproto_key_type[key] != mp_typeof(*value)) goto error; key_map &= ~iproto_key_bit(key); switch (key) { case IPROTO_SPACE_ID: request->space_id = mp_decode_uint(&value); break; case IPROTO_INDEX_ID: request->index_id = mp_decode_uint(&value); break; case IPROTO_OFFSET: request->offset = mp_decode_uint(&value); break; case IPROTO_INDEX_BASE: request->index_base = mp_decode_uint(&value); break; case IPROTO_LIMIT: request->limit = mp_decode_uint(&value); break; case IPROTO_ITERATOR: request->iterator = mp_decode_uint(&value); break; case IPROTO_TUPLE: request->tuple = value; request->tuple_end = data; break; case IPROTO_KEY: request->key = value; request->key_end = data; break; case IPROTO_OPS: request->ops = value; request->ops_end = data; break; default: break; } } if (data != end) { diag_set(ClientError, ER_INVALID_MSGPACK, "packet end"); return -1; } if (key_map) { enum iproto_key key = (enum iproto_key) bit_ctz_u64(key_map); diag_set(ClientError, ER_MISSING_REQUEST_FIELD, iproto_key_name(key)); return -1; } return 0; } const char * request_str(const struct request *request) { char *buf = tt_static_buf(); char *end = buf + TT_STATIC_BUF_LEN; char *pos = buf; pos += snprintf(pos, end - pos, "{type: '%s', lsn: %lld, "\ "space_id: %u, index_id: %u", iproto_type_name(request->type), (long long) request->header->lsn, (unsigned) request->space_id, (unsigned) request->index_id); if (request->key != NULL) { pos += snprintf(pos, end - pos, ", key: "); pos += mp_snprint(pos, end - pos, request->key); } if (request->tuple != NULL) { pos += snprintf(pos, end - pos, ", tuple: "); pos += mp_snprint(pos, end - pos, request->tuple); } if (request->ops != NULL) { pos += snprintf(pos, end - pos, ", ops: "); pos += mp_snprint(pos, end - pos, request->ops); } pos += snprintf(pos, end - pos, "}"); return buf; } int xrow_encode_dml(const struct request *request, struct iovec *iov) { int iovcnt = 1; const int MAP_LEN_MAX = 40; uint32_t key_len = request->key_end - request->key; uint32_t ops_len = request->ops_end - request->ops; uint32_t len = MAP_LEN_MAX + key_len + ops_len; char *begin = (char *) region_alloc(&fiber()->gc, len); if (begin == NULL) { diag_set(OutOfMemory, len, "region_alloc", "begin"); return -1; } char *pos = begin + 1; /* skip 1 byte for MP_MAP */ int map_size = 0; if (request->space_id) { pos = mp_encode_uint(pos, IPROTO_SPACE_ID); pos = mp_encode_uint(pos, request->space_id); map_size++; } if (request->index_id) { pos = mp_encode_uint(pos, IPROTO_INDEX_ID); pos = mp_encode_uint(pos, request->index_id); map_size++; } if (request->index_base) { /* UPDATE/UPSERT */ pos = mp_encode_uint(pos, IPROTO_INDEX_BASE); pos = mp_encode_uint(pos, request->index_base); map_size++; } if (request->key) { pos = mp_encode_uint(pos, IPROTO_KEY); memcpy(pos, request->key, key_len); pos += key_len; map_size++; } if (request->ops) { pos = mp_encode_uint(pos, IPROTO_OPS); memcpy(pos, request->ops, ops_len); pos += ops_len; map_size++; } if (request->tuple) { pos = mp_encode_uint(pos, IPROTO_TUPLE); iov[iovcnt].iov_base = (void *) request->tuple; iov[iovcnt].iov_len = (request->tuple_end - request->tuple); iovcnt++; map_size++; } assert(pos <= begin + len); mp_encode_map(begin, map_size); iov[0].iov_base = begin; iov[0].iov_len = pos - begin; return iovcnt; } int xrow_to_iovec(const struct xrow_header *row, struct iovec *out) { assert(mp_sizeof_uint(UINT32_MAX) == 5); int iovcnt = xrow_header_encode(row, row->sync, out, 5); if (iovcnt < 0) return -1; ssize_t len = -5; for (int i = 0; i < iovcnt; i++) len += out[i].iov_len; /* Encode length */ char *data = (char *) out[0].iov_base; *(data++) = 0xce; /* MP_UINT32 */ *(uint32_t *) data = mp_bswap_u32(len); assert(iovcnt <= XROW_IOVMAX); return iovcnt; } int xrow_decode_call(const struct xrow_header *row, struct call_request *request) { if (row->bodycnt == 0) { diag_set(ClientError, ER_INVALID_MSGPACK, "missing request body"); return 1; } assert(row->bodycnt == 1); const char *data = (const char *) row->body[0].iov_base; const char *end = data + row->body[0].iov_len; assert((end - data) > 0); if (mp_typeof(*data) != MP_MAP || mp_check_map(data, end) > 0) { error: diag_set(ClientError, ER_INVALID_MSGPACK, "packet body"); return 1; } memset(request, 0, sizeof(*request)); request->header = row; uint32_t map_size = mp_decode_map(&data); for (uint32_t i = 0; i < map_size; ++i) { if ((end - data) < 1 || mp_typeof(*data) != MP_UINT) goto error; uint64_t key = mp_decode_uint(&data); const char *value = data; if (mp_check(&data, end) != 0) goto error; switch (key) { case IPROTO_FUNCTION_NAME: if (mp_typeof(*value) != MP_STR) goto error; request->name = value; break; case IPROTO_EXPR: if (mp_typeof(*value) != MP_STR) goto error; request->expr = value; break; case IPROTO_TUPLE: if (mp_typeof(*value) != MP_ARRAY) goto error; request->args = value; request->args_end = data; break; default: continue; /* unknown key */ } } if (data != end) { diag_set(ClientError, ER_INVALID_MSGPACK, "packet end"); return 1; } if (row->type == IPROTO_EVAL) { if (request->expr == NULL) { diag_set(ClientError, ER_MISSING_REQUEST_FIELD, iproto_key_name(IPROTO_EXPR)); return 1; } } else if (request->name == NULL) { assert(row->type == IPROTO_CALL_16 || row->type == IPROTO_CALL); diag_set(ClientError, ER_MISSING_REQUEST_FIELD, iproto_key_name(IPROTO_FUNCTION_NAME)); return 1; } if (request->args == NULL) { static const char empty_args[] = { (char)0x90 }; request->args = empty_args; request->args_end = empty_args + sizeof(empty_args); } return 0; } int xrow_decode_auth(const struct xrow_header *row, struct auth_request *request) { if (row->bodycnt == 0) { diag_set(ClientError, ER_INVALID_MSGPACK, "missing request body"); return 1; } assert(row->bodycnt == 1); const char *data = (const char *) row->body[0].iov_base; const char *end = data + row->body[0].iov_len; assert((end - data) > 0); if (mp_typeof(*data) != MP_MAP || mp_check_map(data, end) > 0) { error: diag_set(ClientError, ER_INVALID_MSGPACK, "packet body"); return 1; } memset(request, 0, sizeof(*request)); uint32_t map_size = mp_decode_map(&data); for (uint32_t i = 0; i < map_size; ++i) { if ((end - data) < 1 || mp_typeof(*data) != MP_UINT) goto error; uint64_t key = mp_decode_uint(&data); const char *value = data; if (mp_check(&data, end) != 0) goto error; switch (key) { case IPROTO_USER_NAME: if (mp_typeof(*value) != MP_STR) goto error; request->user_name = value; break; case IPROTO_TUPLE: if (mp_typeof(*value) != MP_ARRAY) goto error; request->scramble = value; break; default: continue; /* unknown key */ } } if (data != end) { diag_set(ClientError, ER_INVALID_MSGPACK, "packet end"); return 1; } if (request->user_name == NULL) { diag_set(ClientError, ER_MISSING_REQUEST_FIELD, iproto_key_name(IPROTO_USER_NAME)); return 1; } if (request->scramble == NULL) { diag_set(ClientError, ER_MISSING_REQUEST_FIELD, iproto_key_name(IPROTO_TUPLE)); return 1; } return 0; } int xrow_encode_auth(struct xrow_header *packet, const char *salt, size_t salt_len, const char *login, size_t login_len, const char *password, size_t password_len) { assert(login != NULL); memset(packet, 0, sizeof(*packet)); size_t buf_size = XROW_BODY_LEN_MAX + login_len + SCRAMBLE_SIZE; char *buf = (char *) region_alloc(&fiber()->gc, buf_size); if (buf == NULL) { diag_set(OutOfMemory, buf_size, "region_alloc", "buf"); return -1; } char *d = buf; d = mp_encode_map(d, password != NULL ? 2 : 1); d = mp_encode_uint(d, IPROTO_USER_NAME); d = mp_encode_str(d, login, login_len); if (password != NULL) { /* password can be omitted */ assert(salt_len >= SCRAMBLE_SIZE); /* greetingbuf_decode */ (void) salt_len; char scramble[SCRAMBLE_SIZE]; scramble_prepare(scramble, salt, password, password_len); d = mp_encode_uint(d, IPROTO_TUPLE); d = mp_encode_array(d, 2); d = mp_encode_str(d, "chap-sha1", strlen("chap-sha1")); d = mp_encode_str(d, scramble, SCRAMBLE_SIZE); } assert(d <= buf + buf_size); packet->body[0].iov_base = buf; packet->body[0].iov_len = (d - buf); packet->bodycnt = 1; packet->type = IPROTO_AUTH; return 0; } void xrow_decode_error(struct xrow_header *row) { uint32_t code = row->type & (IPROTO_TYPE_ERROR - 1); char error[DIAG_ERRMSG_MAX] = { 0 }; const char *pos; uint32_t map_size; if (row->bodycnt == 0) goto error; pos = (char *) row->body[0].iov_base; if (mp_check(&pos, pos + row->body[0].iov_len)) goto error; pos = (char *) row->body[0].iov_base; if (mp_typeof(*pos) != MP_MAP) goto error; map_size = mp_decode_map(&pos); for (uint32_t i = 0; i < map_size; i++) { if (mp_typeof(*pos) != MP_UINT) { mp_next(&pos); /* key */ mp_next(&pos); /* value */ continue; } uint8_t key = mp_decode_uint(&pos); if (key != IPROTO_ERROR || mp_typeof(*pos) != MP_STR) { mp_next(&pos); /* value */ continue; } uint32_t len; const char *str = mp_decode_str(&pos, &len); snprintf(error, sizeof(error), "%.*s", len, str); } error: box_error_set(__FILE__, __LINE__, code, error); } void xrow_encode_request_vote(struct xrow_header *row) { memset(row, 0, sizeof(*row)); row->type = IPROTO_REQUEST_VOTE; } int xrow_encode_subscribe(struct xrow_header *row, const struct tt_uuid *replicaset_uuid, const struct tt_uuid *instance_uuid, const struct vclock *vclock) { memset(row, 0, sizeof(*row)); uint32_t replicaset_size = vclock_size(vclock); size_t size = XROW_BODY_LEN_MAX + replicaset_size * (mp_sizeof_uint(UINT32_MAX) + mp_sizeof_uint(UINT64_MAX)); char *buf = (char *) region_alloc(&fiber()->gc, size); if (buf == NULL) { diag_set(OutOfMemory, size, "region_alloc", "buf"); return -1; } char *data = buf; data = mp_encode_map(data, 4); data = mp_encode_uint(data, IPROTO_CLUSTER_UUID); data = xrow_encode_uuid(data, replicaset_uuid); data = mp_encode_uint(data, IPROTO_INSTANCE_UUID); data = xrow_encode_uuid(data, instance_uuid); data = mp_encode_uint(data, IPROTO_VCLOCK); data = mp_encode_map(data, replicaset_size); struct vclock_iterator it; vclock_iterator_init(&it, vclock); vclock_foreach(&it, replica) { data = mp_encode_uint(data, replica.id); data = mp_encode_uint(data, replica.lsn); } data = mp_encode_uint(data, IPROTO_SERVER_VERSION); data = mp_encode_uint(data, tarantool_version_id()); assert(data <= buf + size); row->body[0].iov_base = buf; row->body[0].iov_len = (data - buf); row->bodycnt = 1; row->type = IPROTO_SUBSCRIBE; return 0; } int xrow_decode_subscribe(struct xrow_header *row, struct tt_uuid *replicaset_uuid, struct tt_uuid *instance_uuid, struct vclock *vclock, uint32_t *version_id, bool *read_only) { if (row->bodycnt == 0) { diag_set(ClientError, ER_INVALID_MSGPACK, "request body"); return -1; } assert(row->bodycnt == 1); const char *data = (const char *) row->body[0].iov_base; const char *end = data + row->body[0].iov_len; const char *d = data; if (mp_check(&d, end) != 0 || mp_typeof(*data) != MP_MAP) { diag_set(ClientError, ER_INVALID_MSGPACK, "request body"); return -1; } /* For backward compatibility initialize read-only with false. */ if (read_only) *read_only = false; const char *lsnmap = NULL; d = data; uint32_t map_size = mp_decode_map(&d); for (uint32_t i = 0; i < map_size; i++) { if (mp_typeof(*d) != MP_UINT) { mp_next(&d); /* key */ mp_next(&d); /* value */ continue; } uint8_t key = mp_decode_uint(&d); switch (key) { case IPROTO_CLUSTER_UUID: if (replicaset_uuid == NULL) goto skip; if (xrow_decode_uuid(&d, replicaset_uuid) != 0) return -1; break; case IPROTO_INSTANCE_UUID: if (instance_uuid == NULL) goto skip; if (xrow_decode_uuid(&d, instance_uuid) != 0) return -1; break; case IPROTO_VCLOCK: if (vclock == NULL) goto skip; if (mp_typeof(*d) != MP_MAP) { diag_set(ClientError, ER_INVALID_MSGPACK, "invalid VCLOCK"); return -1; } lsnmap = d; mp_next(&d); break; case IPROTO_SERVER_VERSION: if (version_id == NULL) goto skip; if (mp_typeof(*d) != MP_UINT) { diag_set(ClientError, ER_INVALID_MSGPACK, "invalid VERSION"); return -1; } *version_id = mp_decode_uint(&d); break; case IPROTO_SERVER_IS_RO: if (read_only == NULL) goto skip; if (mp_typeof(*d) != MP_BOOL) { diag_set(ClientError, ER_INVALID_MSGPACK, "invalid STATUS"); return -1; } *read_only = mp_decode_bool(&d); break; default: skip: mp_next(&d); /* value */ } } if (lsnmap == NULL) return 0; /* Check & save LSNMAP */ d = lsnmap; uint32_t lsnmap_size = mp_decode_map(&d); for (uint32_t i = 0; i < lsnmap_size; i++) { if (mp_typeof(*d) != MP_UINT) { map_error: diag_set(ClientError, ER_INVALID_MSGPACK, "VCLOCK"); return -1; } uint32_t id = mp_decode_uint(&d); if (mp_typeof(*d) != MP_UINT) goto map_error; int64_t lsn = (int64_t) mp_decode_uint(&d); if (lsn > 0) vclock_follow(vclock, id, lsn); } return 0; } int xrow_encode_join(struct xrow_header *row, const struct tt_uuid *instance_uuid) { memset(row, 0, sizeof(*row)); size_t size = 64; char *buf = (char *) region_alloc(&fiber()->gc, size); if (buf == NULL) { diag_set(OutOfMemory, size, "region_alloc", "buf"); return -1; } char *data = buf; data = mp_encode_map(data, 1); data = mp_encode_uint(data, IPROTO_INSTANCE_UUID); /* Greet the remote replica with our replica UUID */ data = xrow_encode_uuid(data, instance_uuid); assert(data <= buf + size); row->body[0].iov_base = buf; row->body[0].iov_len = (data - buf); row->bodycnt = 1; row->type = IPROTO_JOIN; return 0; } int xrow_encode_vclock(struct xrow_header *row, const struct vclock *vclock) { memset(row, 0, sizeof(*row)); /* Add vclock to response body */ uint32_t replicaset_size = vclock_size(vclock); size_t size = 8 + replicaset_size * (mp_sizeof_uint(UINT32_MAX) + mp_sizeof_uint(UINT64_MAX)); char *buf = (char *) region_alloc(&fiber()->gc, size); if (buf == NULL) { diag_set(OutOfMemory, size, "region_alloc", "buf"); return -1; } char *data = buf; data = mp_encode_map(data, 1); data = mp_encode_uint(data, IPROTO_VCLOCK); data = mp_encode_map(data, replicaset_size); struct vclock_iterator it; vclock_iterator_init(&it, vclock); vclock_foreach(&it, replica) { data = mp_encode_uint(data, replica.id); data = mp_encode_uint(data, replica.lsn); } assert(data <= buf + size); row->body[0].iov_base = buf; row->body[0].iov_len = (data - buf); row->bodycnt = 1; row->type = IPROTO_OK; return 0; } void xrow_encode_timestamp(struct xrow_header *row, uint32_t replica_id, double tm) { memset(row, 0, sizeof(*row)); row->type = IPROTO_OK; row->replica_id = replica_id; row->tm = tm; } void greeting_encode(char *greetingbuf, uint32_t version_id, const struct tt_uuid *uuid, const char *salt, uint32_t salt_len) { int h = IPROTO_GREETING_SIZE / 2; int r = snprintf(greetingbuf, h + 1, "Tarantool %u.%u.%u (Binary) ", version_id_major(version_id), version_id_minor(version_id), version_id_patch(version_id)); assert(r + UUID_STR_LEN < h); tt_uuid_to_string(uuid, greetingbuf + r); r += UUID_STR_LEN; memset(greetingbuf + r, ' ', h - r - 1); greetingbuf[h - 1] = '\n'; assert(base64_bufsize(salt_len, 0) + 1 < h); r = base64_encode(salt, salt_len, greetingbuf + h, h - 1, 0); assert(r < h); memset(greetingbuf + h + r, ' ', h - r - 1); greetingbuf[IPROTO_GREETING_SIZE - 1] = '\n'; } int greeting_decode(const char *greetingbuf, struct greeting *greeting) { /* Check basic structure - magic string and \n delimiters */ if (memcmp(greetingbuf, "Tarantool ", strlen("Tarantool ")) != 0 || greetingbuf[IPROTO_GREETING_SIZE / 2 - 1] != '\n' || greetingbuf[IPROTO_GREETING_SIZE - 1] != '\n') return -1; memset(greeting, 0, sizeof(*greeting)); int h = IPROTO_GREETING_SIZE / 2; const char *pos = greetingbuf + strlen("Tarantool "); const char *end = greetingbuf + h; for (; pos < end && *pos == ' '; ++pos); /* skip spaces */ /* Extract a version string - a string until ' ' */ char version[20]; const char *vend = (const char *) memchr(pos, ' ', end - pos); if (vend == NULL || (size_t)(vend - pos) >= sizeof(version)) return -1; memcpy(version, pos, vend - pos); version[vend - pos] = '\0'; pos = vend + 1; for (; pos < end && *pos == ' '; ++pos); /* skip spaces */ /* Parse a version string - 1.6.6-83-gc6b2129 or 1.6.7 */ unsigned major, minor, patch; if (sscanf(version, "%u.%u.%u", &major, &minor, &patch) != 3) return -1; greeting->version_id = version_id(major, minor, patch); if (*pos == '(') { /* Extract protocol name - a string between (parentheses) */ vend = (const char *) memchr(pos + 1, ')', end - pos); if (!vend || (vend - pos - 1) > GREETING_PROTOCOL_LEN_MAX) return -1; memcpy(greeting->protocol, pos + 1, vend - pos - 1); greeting->protocol[vend - pos - 1] = '\0'; pos = vend + 1; /* Parse protocol name - Binary or Lua console. */ if (strcmp(greeting->protocol, "Binary") != 0) return 0; if (greeting->version_id >= version_id(1, 6, 7)) { if (*(pos++) != ' ') return -1; for (; pos < end && *pos == ' '; ++pos); /* spaces */ if (end - pos < UUID_STR_LEN) return -1; if (tt_uuid_from_strl(pos, UUID_STR_LEN, &greeting->uuid)) return -1; } } else if (greeting->version_id < version_id(1, 6, 7)) { /* Tarantool < 1.6.7 doesn't add "(Binary)" to greeting */ strcpy(greeting->protocol, "Binary"); } else { return -1; /* Sorry, don't want to parse this greeting */ } /* Decode salt for binary protocol */ greeting->salt_len = base64_decode(greetingbuf + h, h - 1, greeting->salt, sizeof(greeting->salt)); if (greeting->salt_len < SCRAMBLE_SIZE || greeting->salt_len >= (uint32_t)h) return -1; return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_update.h0000664000000000000000000000630413306560010020426 0ustar rootroot#ifndef TARANTOOL_BOX_TUPLE_UPDATE_H_INCLUDED #define TARANTOOL_BOX_TUPLE_UPDATE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "trivia/util.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { /** A limit on how many operations a single UPDATE can have. */ BOX_UPDATE_OP_CNT_MAX = 4000, }; typedef void *(*tuple_update_alloc_func)(void *, size_t); int tuple_update_check_ops(tuple_update_alloc_func alloc, void *alloc_ctx, const char *expr, const char *expr_end, int index_base); const char * tuple_update_execute(tuple_update_alloc_func alloc, void *alloc_ctx, const char *expr,const char *expr_end, const char *old_data, const char *old_data_end, uint32_t *p_new_size, int index_base, uint64_t *column_mask); const char * tuple_upsert_execute(tuple_update_alloc_func alloc, void *alloc_ctx, const char *expr, const char *expr_end, const char *old_data, const char *old_data_end, uint32_t *p_new_size, int index_base, bool suppress_error, uint64_t *column_mask); /** * Try to merge two update/upsert expressions to an equivalent one. * Resulting expression is allocated on given allocator. * Due to optimization reasons resulting expression * is located inside a bigger allocation. There also some hidden * internal allocations are made in this function. * Thus the only allocator that can be used in this function * is region allocator. * If it isn't possible to merge expressions NULL is returned. */ const char * tuple_upsert_squash(tuple_update_alloc_func alloc, void *alloc_ctx, const char *expr1, const char *expr1_end, const char *expr2, const char *expr2_end, size_t *result_size, int index_base); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_TUPLE_UPDATE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/schema_def.h0000664000000000000000000001376313306565107020034 0ustar rootroot#ifndef TARANTOOL_BOX_SCHEMA_DEF_H_INCLUDED #define TARANTOOL_BOX_SCHEMA_DEF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { BOX_ENGINE_MAX = 3, /* + 1 to the actual number of engines */ BOX_SPACE_MAX = INT32_MAX, BOX_FUNCTION_MAX = 32000, BOX_INDEX_MAX = 128, BOX_NAME_MAX = 65000, BOX_INVALID_NAME_MAX = 64, ENGINE_NAME_MAX = 16, FIELD_TYPE_NAME_MAX = 16, GRANT_NAME_MAX = 16, BOX_FIELD_MAX = INT32_MAX, BOX_USER_MAX = 32, /** * A fairly arbitrary limit which is still necessary * to keep tuple_format object small. */ BOX_INDEX_FIELD_MAX = INT16_MAX, /** Yet another arbitrary limit which simply needs to * exist. */ BOX_INDEX_PART_MAX = UINT8_MAX }; static_assert(BOX_INVALID_NAME_MAX <= BOX_NAME_MAX, "invalid name max is less than name max"); /** \cond public */ enum { /** Start of the reserved range of system spaces. */ BOX_SYSTEM_ID_MIN = 256, /** Space id of _schema. */ BOX_SCHEMA_ID = 272, /** Space id of _collation. */ BOX_COLLATION_ID = 276, /** Space id of _space. */ BOX_SPACE_ID = 280, /** Space id of _vspace view. */ BOX_VSPACE_ID = 281, /** Space id of _sequence. */ BOX_SEQUENCE_ID = 284, /** Space id of _sequence_data. */ BOX_SEQUENCE_DATA_ID = 285, /** Space id of _index. */ BOX_INDEX_ID = 288, /** Space id of _vindex view. */ BOX_VINDEX_ID = 289, /** Space id of _func. */ BOX_FUNC_ID = 296, /** Space id of _vfunc view. */ BOX_VFUNC_ID = 297, /** Space id of _user. */ BOX_USER_ID = 304, /** Space id of _vuser view. */ BOX_VUSER_ID = 305, /** Space id of _priv. */ BOX_PRIV_ID = 312, /** Space id of _vpriv view. */ BOX_VPRIV_ID = 313, /** Space id of _cluster. */ BOX_CLUSTER_ID = 320, /** Space id of _truncate. */ BOX_TRUNCATE_ID = 330, /** Space id of _space_sequence. */ BOX_SPACE_SEQUENCE_ID = 340, /** End of the reserved range of system spaces. */ BOX_SYSTEM_ID_MAX = 511, BOX_ID_NIL = 2147483647 }; /** \endcond public */ /** _space fields. */ enum { BOX_SPACE_FIELD_ID = 0, BOX_SPACE_FIELD_UID = 1, BOX_SPACE_FIELD_NAME = 2, BOX_SPACE_FIELD_ENGINE = 3, BOX_SPACE_FIELD_FIELD_COUNT = 4, BOX_SPACE_FIELD_OPTS = 5, BOX_SPACE_FIELD_FORMAT = 6, }; /** _index fields. */ enum { BOX_INDEX_FIELD_SPACE_ID = 0, BOX_INDEX_FIELD_ID = 1, BOX_INDEX_FIELD_NAME = 2, BOX_INDEX_FIELD_TYPE = 3, BOX_INDEX_FIELD_OPTS = 4, BOX_INDEX_FIELD_IS_UNIQUE_165 = 4, BOX_INDEX_FIELD_PARTS = 5, BOX_INDEX_FIELD_PART_COUNT_165 = 5, BOX_INDEX_FIELD_PARTS_165 = 6, }; /** _user fields. */ enum { BOX_USER_FIELD_ID = 0, BOX_USER_FIELD_UID = 1, BOX_USER_FIELD_NAME = 2, BOX_USER_FIELD_TYPE = 3, BOX_USER_FIELD_AUTH_MECH_LIST = 4, }; /** _priv fields. */ enum { BOX_PRIV_FIELD_ID = 0, BOX_PRIV_FIELD_UID = 1, BOX_PRIV_FIELD_OBJECT_TYPE = 2, BOX_PRIV_FIELD_OBJECT_ID = 3, BOX_PRIV_FIELD_ACCESS = 4, }; /** _func fields. */ enum { BOX_FUNC_FIELD_ID = 0, BOX_FUNC_FIELD_UID = 1, BOX_FUNC_FIELD_NAME = 2, BOX_FUNC_FIELD_SETUID = 3, BOX_FUNC_FIELD_LANGUAGE = 4, }; /** _collation fields. */ enum { BOX_COLLATION_FIELD_ID = 0, BOX_COLLATION_FIELD_NAME = 1, BOX_COLLATION_FIELD_UID = 2, BOX_COLLATION_FIELD_TYPE = 3, BOX_COLLATION_FIELD_LOCALE = 4, BOX_COLLATION_FIELD_OPTIONS = 5, }; /** _schema fields. */ enum { BOX_SCHEMA_FIELD_KEY = 0, }; /** _cluster fields. */ enum { BOX_CLUSTER_FIELD_ID = 0, BOX_CLUSTER_FIELD_UUID = 1, }; /** _truncate fields. */ enum { BOX_TRUNCATE_FIELD_SPACE_ID = 0, BOX_TRUNCATE_FIELD_COUNT = 1, }; /** _sequence fields. */ enum { BOX_SEQUENCE_FIELD_ID = 0, BOX_SEQUENCE_FIELD_UID = 1, BOX_SEQUENCE_FIELD_NAME = 2, BOX_SEQUENCE_FIELD_STEP = 3, BOX_SEQUENCE_FIELD_MIN = 4, BOX_SEQUENCE_FIELD_MAX = 5, BOX_SEQUENCE_FIELD_START = 6, BOX_SEQUENCE_FIELD_CACHE = 7, BOX_SEQUENCE_FIELD_CYCLE = 8, }; /** _sequence_data fields. */ enum { BOX_SEQUENCE_DATA_FIELD_ID = 0, BOX_SEQUENCE_DATA_FIELD_VALUE = 1, }; /** _space_seq fields. */ enum { BOX_SPACE_SEQUENCE_FIELD_ID = 0, BOX_SPACE_SEQUENCE_FIELD_SEQUENCE_ID = 1, BOX_SPACE_SEQUENCE_FIELD_IS_GENERATED = 2, }; /* * Different objects which can be subject to access * control. * * Use 0 for unknown to use the same index consistently * even when there are more object types in the future. */ enum schema_object_type { SC_UNKNOWN = 0, SC_UNIVERSE = 1, SC_SPACE = 2, SC_FUNCTION = 3, SC_USER = 4, SC_ROLE = 5, SC_SEQUENCE = 6, SC_COLLATION = 7, schema_object_type_MAX = 8 }; enum schema_object_type schema_object_type(const char *name); const char * schema_object_name(enum schema_object_type type); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_SCHEMA_DEF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/user.h0000664000000000000000000001273613306560010016717 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_USER_H #define INCLUDES_TARANTOOL_BOX_USER_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "user_def.h" #include "small/region.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** Global grants. */ struct universe { /** Global privileges this user has on the universe. */ struct access access[BOX_USER_MAX]; }; /** A single instance of the universe. */ extern struct universe universe; /** Bitmap type for used/unused authentication token map. */ typedef unsigned int umap_int_t; enum { UMAP_INT_BITS = CHAR_BIT * sizeof(umap_int_t), USER_MAP_SIZE = (BOX_USER_MAX + UMAP_INT_BITS - 1)/UMAP_INT_BITS }; struct user_map { umap_int_t m[USER_MAP_SIZE]; }; static inline bool user_map_is_empty(struct user_map *map) { for (int i = 0; i < USER_MAP_SIZE; i++) if (map->m[i]) return false; return true; } typedef rb_tree(struct priv_def) privset_t; rb_proto(, privset_, privset_t, struct priv_def); struct user { struct user_def *def; /** * An id in privileges array to quickly find a * respective privilege. */ uint8_t auth_token; /** List of users or roles this role has been granted to */ struct user_map users; /** List of roles granted to this role or user. */ struct user_map roles; /** A cache of effective privileges of this user. */ privset_t privs; /** True if this user privileges need to be reloaded. */ bool is_dirty; /** Memory pool for privs */ struct region pool; }; /** Find user by id. */ struct user * user_by_id(uint32_t uid); struct user * user_find_by_name(const char *name, uint32_t len); /* Find a user by name. Used by authentication. */ struct user * user_find(uint32_t uid); #if defined(__cplusplus) } /* extern "C" */ /** * For best performance, all users are maintained in this array. * Position in the array is store in user->auth_token and also * in session->auth_token. This way it's easy to quickly find * the current user of the session. * An auth token, instead of a direct pointer, is stored in the * session because it makes dropping of a signed in user safe. * The same auth token (index in an array) * is also used to find out user privileges when accessing stored * objects, such as spaces and functions. */ extern struct user *guest_user, *admin_user; /* * Insert or update user object (a cache entry * for user). * This is called from a trigger on _user table * and from trigger on _priv table, (in the latter * case, only when making a grant on the universe). * * If a user already exists, update it, otherwise * find space in users[] array and store the new * user in it. Update user->auth_token * with an index in the users[] array. */ struct user * user_cache_replace(struct user_def *user); /** * Find a user by id and delete it from the * users cache. */ void user_cache_delete(uint32_t uid); /* Find a user by name. Used by authentication. */ static inline struct user * user_find_xc(uint32_t uid) { struct user *user = user_find(uid); if (user == NULL) diag_raise(); return user; } static inline struct user * user_find_by_name_xc(const char *name, uint32_t len) { struct user *user = user_find_by_name(name, len); if (user == NULL) diag_raise(); return user; } /** Initialize the user cache and access control subsystem. */ void user_cache_init(); /** Cleanup the user cache and access control subsystem */ void user_cache_free(); /* {{{ Roles */ /** * Check, mainly, that users & roles form an acyclic graph, * and no loop in the graph will occur when grantee gets * a given role. */ void role_check(struct user *grantee, struct user *role); /** * Grant a role to a user or another role. */ void role_grant(struct user *grantee, struct user *role); /** * Revoke a role from a user or another role. */ void role_revoke(struct user *grantee, struct user *role); /** * Grant or revoke a single privilege to a user or role * and re-evaluate effective access of all users of this * role if this role. */ void priv_grant(struct user *grantee, struct priv_def *priv); void priv_def_create_from_tuple(struct priv_def *priv, struct tuple *tuple); /* }}} */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_USER_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/session.h0000664000000000000000000001612613306565107017435 0ustar rootroot#ifndef INCLUDES_TARANTOOL_SESSION_H #define INCLUDES_TARANTOOL_SESSION_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "trigger.h" #include "fiber.h" #include "user.h" #include "authentication.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ void session_init(); void session_free(); enum { SESSION_SEED_SIZE = 32, SESSION_DELIM_SIZE = 16 }; enum session_type { SESSION_TYPE_BACKGROUND = 0, SESSION_TYPE_BINARY, SESSION_TYPE_CONSOLE, SESSION_TYPE_REPL, SESSION_TYPE_APPLIER, session_type_MAX, }; extern const char *session_type_strs[]; /** * Abstraction of a single user session: * for now, only provides accounting of established * sessions and on-connect/on-disconnect event * handling, user credentials. In future: the * client/server protocol, etc. * Session identifiers grow monotonically. * 0 sid is reserved to mean 'no session'. */ struct session { /** Session id. */ uint64_t id; /** File descriptor - socket of the connected peer. * Only if the session has a peer. */ int fd; /** * For iproto requests, we set this field * to the value of packet sync. Since the * session may be reused between many requests, * the value is true only at the beginning * of the request, and gets distorted after * the first yield. */ uint64_t sync; enum session_type type; /** Authentication salt. */ char salt[SESSION_SEED_SIZE]; /** Session user id and global grants */ struct credentials credentials; /** Trigger for fiber on_stop to cleanup created on-demand session */ struct trigger fiber_on_stop; }; /** * Find a session by id. */ struct session * session_find(uint64_t sid); /** Global on-connect triggers. */ extern struct rlist session_on_connect; extern struct rlist session_on_auth; /** * Get the current session from @a fiber * @param fiber fiber * @return session if any * @retval NULL if there is no active session */ static inline struct session * fiber_get_session(struct fiber *fiber) { return (struct session *) fiber_get_key(fiber, FIBER_KEY_SESSION); } /** * Set the current session in @a fiber * @param fiber fiber * @param session a value to set */ static inline void fiber_set_user(struct fiber *fiber, struct credentials *cr) { fiber_set_key(fiber, FIBER_KEY_USER, cr); } static inline void fiber_set_session(struct fiber *fiber, struct session *session) { fiber_set_key(fiber, FIBER_KEY_SESSION, session); } static inline void credentials_init(struct credentials *cr, uint8_t auth_token, uint32_t uid) { cr->auth_token = auth_token; cr->universal_access = universe.access[cr->auth_token].effective; cr->uid = uid; } /* * For use in local hot standby, which runs directly * from ev watchers (without current fiber), but needs * to execute transactions. */ extern struct credentials admin_credentials; /** * Create a new session on demand, and set fiber on_stop * trigger to destroy it when this fiber ends. */ struct session * session_create_on_demand(int fd); /* * When creating a new fiber, the database (box) * may not be initialized yet. When later on * this fiber attempts to access the database, * we have no other choice but initialize fiber-specific * database state (something like a database connection) * on demand. This is why this function needs to * check whether or not the current session exists * and create it otherwise. */ static inline struct session * current_session() { struct session *session = fiber_get_session(fiber()); if (session == NULL) { session = session_create_on_demand(-1); if (session == NULL) diag_raise(); } return session; } /* * Return the current user. Create it if it doesn't * exist yet. * The same rationale for initializing the current * user on demand as in current_session() applies. */ static inline struct credentials * effective_user() { struct credentials *u = (struct credentials *) fiber_get_key(fiber(), FIBER_KEY_USER); if (u == NULL) { session_create_on_demand(-1); u = (struct credentials *) fiber_get_key(fiber(), FIBER_KEY_USER); } return u; } /** Global on-disconnect triggers. */ extern struct rlist session_on_disconnect; void session_storage_cleanup(int sid); /** * Create a session. * Invokes a Lua trigger box.session.on_connect if it is * defined. Issues a new session identifier. * Must called by the networking layer * when a new connection is established. * * @return handle for a created session * @exception tnt_Exception or lua error if session * trigger fails or runs out of resources. */ struct session * session_create(int fd, enum session_type type); /** * Destroy a session. * Must be called by the networking layer on disconnect. * Invokes a Lua trigger box.session.on_disconnect if it * is defined. * @param session session to destroy. may be NULL. * * @exception none */ void session_destroy(struct session *); /** Run on-connect triggers */ int session_run_on_connect_triggers(struct session *session); /** Run on-disconnect triggers */ void session_run_on_disconnect_triggers(struct session *session); /** Run auth triggers */ int session_run_on_auth_triggers(const struct on_auth_trigger_ctx *result); /** * Check whether or not the current user is authorized to connect */ int access_check_session(struct user *user); /** * Check whether or not the current user can be granted * the requested access to the universe. */ int access_check_universe(user_access_t access); #if defined(__cplusplus) } /* extern "C" */ #include "diag.h" static inline void access_check_session_xc(struct user *user) { if (access_check_session(user) != 0) diag_raise(); } static inline void access_check_universe_xc(user_access_t access) { if (access_check_universe(access) != 0) diag_raise(); } #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_SESSION_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/request.h0000664000000000000000000000570313306560010017425 0ustar rootroot#ifndef TARANTOOL_BOX_REQUEST_H_INCLUDED #define TARANTOOL_BOX_REQUEST_H_INCLUDED /* * Copyright 2010-2018, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct request; struct space; struct tuple; /** * Given old and new tuples, initialize the corresponding * request to be written to WAL. * * @param request - request to fix * @param space - space corresponding to request * @param old_tuple - the old tuple * @param new_tuple - the new tuple * * If old_tuple and new_tuple are the same, the request is turned into NOP. * If new_tuple is NULL, the request is turned into DELETE(old_tuple). * If new_tuple is not NULL, the request is turned into REPLACE(new_tuple). */ int request_create_from_tuple(struct request *request, struct space *space, struct tuple *old_tuple, struct tuple *new_tuple); /** * Convert a request accessing a secondary key to a primary * key undo record, given it found a tuple. * Flush iproto header of the request to be reconstructed in * txn_add_redo(). * * @param request - request to fix * @param space - space corresponding to request * @param found_tuple - tuple found by secondary key */ void request_rebind_to_primary_key(struct request *request, struct space *space, struct tuple *found_tuple); /** * Handle INSERT/REPLACE in a space with a sequence attached. * * @param request - request to fix * @param space - space corresponding to request */ int request_handle_sequence(struct request *request, struct space *space); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_REQUEST_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_hash.h0000664000000000000000000000465413306565107020111 0ustar rootroot#ifndef TARANTOOL_BOX_TUPLE_HASH_H_INCLUDED #define TARANTOOL_BOX_TUPLE_HASH_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "key_def.h" #include "tuple.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Initialize tuple_hash() and key_hash() function for the key_def * @param key_def key definition */ void tuple_hash_func_set(struct key_def *def); /** * Calculates a common hash value for a tuple * @param tuple - a tuple * @param key_def - key_def for field description * @return - hash value */ static inline uint32_t tuple_hash(const struct tuple *tuple, const struct key_def *key_def) { return key_def->tuple_hash(tuple, key_def); } /** * Calculate a common hash value for a key * @param key - full key (msgpack fields w/o array marker) * @param key_def - key_def for field description * @return - hash value */ static inline uint32_t key_hash(const char *key, const struct key_def *key_def) { return key_def->key_hash(key, key_def); } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_TUPLE_HASH_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vclock.c0000664000000000000000000001202713306560010017206 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vclock.h" #include #include #include #include "diag.h" int64_t vclock_follow(struct vclock *vclock, uint32_t replica_id, int64_t lsn) { assert(lsn >= 0); assert(replica_id < VCLOCK_MAX); int64_t prev_lsn = vclock->lsn[replica_id]; if (lsn <= prev_lsn) { /* Never confirm LSN out of order. */ panic("LSN for %u is used twice or COMMIT order is broken: " "confirmed: %lld, new: %lld", (unsigned) replica_id, (long long) prev_lsn, (long long) lsn); } /* Easier add each time than check. */ vclock->map |= 1 << replica_id; vclock->lsn[replica_id] = lsn; vclock->signature += lsn - prev_lsn; return prev_lsn; } CFORMAT(printf, 4, 0) static inline int rsnprintf(char **buf, char **pos, char **end, const char *fmt, ...) { int rc = 0; va_list ap; while (1) { va_start(ap, fmt); int n = vsnprintf(*pos, *end - *pos, fmt, ap); va_end(ap); assert(n > -1); /* glibc >= 2.0.6, see vsnprintf(3) */ if (n < *end - *pos) { *pos += n; break; } /* Reallocate buffer */ ptrdiff_t cap = (*end - *buf) > 0 ? (*end - *buf) : 32; while (cap <= *pos - *buf + n) cap *= 2; char *chunk = (char *) realloc(*buf, cap); if (chunk == NULL) { diag_set(OutOfMemory, cap, "malloc", "vclock"); free(*buf); *buf = *end = *pos = NULL; rc = -1; break; } *pos = chunk + (*pos - *buf); *end = chunk + cap; *buf = chunk; } return rc; } char * vclock_to_string(const struct vclock *vclock) { (void) vclock; char *buf = NULL, *pos = NULL, *end = NULL; if (rsnprintf(&buf, &pos, &end, "{") != 0) return NULL; const char *sep = ""; struct vclock_iterator it; vclock_iterator_init(&it, vclock); vclock_foreach(&it, replica) { if (rsnprintf(&buf, &pos, &end, "%s%u: %lld", sep, replica.id, (long long) replica.lsn) != 0) return NULL; sep = ", "; } if (rsnprintf(&buf, &pos, &end, "}") != 0) return NULL; return buf; } size_t vclock_from_string(struct vclock *vclock, const char *str) { long replica_id; long long lsn; const char *p = str; begin: if (*p == '{') { ++p; goto key; } else if (isblank(*p)) { ++p; goto begin; } goto error; key: if (isdigit(*p)) { errno = 0; replica_id = strtol(p, (char **) &p, 10); if (errno != 0 || replica_id < 0 || replica_id >= VCLOCK_MAX) goto error; goto sep; } else if (*p == '}') { ++p; goto end; } else if (isblank(*p)) { ++p; goto key; } goto error; sep: if (*p == ':') { ++p; goto val; } else if (isblank(*p)) { ++p; goto sep; } goto error; val: if (isblank(*p)) { ++p; goto val; } else if (isdigit(*p)) { errno = 0; lsn = strtoll(p, (char **) &p, 10); if (errno != 0 || lsn < 0 || lsn > INT64_MAX || replica_id >= VCLOCK_MAX || vclock->lsn[replica_id] > 0) goto error; vclock->map |= 1 << replica_id; vclock->lsn[replica_id] = lsn; goto comma; } goto error; comma: if (isspace(*p)) { ++p; goto comma; } else if (*p == '}') { ++p; goto end; } else if (*p == ',') { ++p; goto key; } goto error; end: if (*p == '\0') { vclock->signature = vclock_calc_sum(vclock); return 0; } else if (isblank(*p)) { ++p; goto end; } /* goto error; */ error: return p - str + 1; /* error */ } static int vclockset_node_compare(const struct vclock *a, const struct vclock *b) { int res = vclock_compare(a, b); /* * In a vclock set, we do not allow clocks which are not * strictly ordered. * See also xdir_scan(), in which we check & skip * duplicate vclocks. */ if (res == VCLOCK_ORDER_UNDEFINED) return 0; return res; } rb_gen(, vclockset_, vclockset_t, struct vclock, link, vclockset_node_compare); tarantool_1.9.1.26.g63eb81e3c/src/box/applier.h0000664000000000000000000001354613306560010017375 0ustar rootroot#ifndef TARANTOOL_APPLIER_H_INCLUDED #define TARANTOOL_APPLIER_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include "fiber_cond.h" #include "trigger.h" #include "trivia/util.h" #include "tt_uuid.h" #include "uri.h" #include "vclock.h" struct xstream; enum { APPLIER_SOURCE_MAXLEN = 1024 }; /* enough to fit URI with passwords */ #define applier_STATE(_) \ _(APPLIER_OFF, 0) \ _(APPLIER_CONNECT, 1) \ _(APPLIER_CONNECTED, 2) \ _(APPLIER_AUTH, 3) \ _(APPLIER_READY, 4) \ _(APPLIER_INITIAL_JOIN, 5) \ _(APPLIER_FINAL_JOIN, 6) \ _(APPLIER_JOINED, 7) \ _(APPLIER_SYNC, 8) \ _(APPLIER_FOLLOW, 9) \ _(APPLIER_STOPPED, 10) \ _(APPLIER_DISCONNECTED, 11) \ _(APPLIER_LOADING, 12) \ /** States for the applier */ ENUM(applier_state, applier_STATE); extern const char *applier_state_strs[]; /** * State of a replication connection to the master */ struct applier { /** Background fiber */ struct fiber *reader; /** Background fiber to reply with vclock */ struct fiber *writer; /** Writer cond. */ struct fiber_cond writer_cond; /** Finite-state machine */ enum applier_state state; /** Local time of this replica when the last row has been received */ ev_tstamp last_row_time; /** Number of seconds this replica is behind the remote master */ ev_tstamp lag; /** The last box_error_code() logged to avoid log flooding */ uint32_t last_logged_errcode; /** Remote instance UUID */ struct tt_uuid uuid; /** Remote URI (string) */ char source[APPLIER_SOURCE_MAXLEN]; /** Remote URI (parsed) */ struct uri uri; /** Remote version encoded as a number, see version_id() macro */ uint32_t version_id; /** Remote vclock at time of connect. */ struct vclock vclock; /** Remote peer mode, true if read-only, default: false */ bool remote_is_ro; /** Remote address */ union { struct sockaddr addr; struct sockaddr_storage addrstorage; }; /** Length of addr */ socklen_t addr_len; /** EV watcher for I/O */ struct ev_io io; /** Input buffer */ struct ibuf ibuf; /** Triggers invoked on state change */ struct rlist on_state; /** * Set if the applier was paused (see applier_pause()) and is now * waiting on resume_cond to be resumed (see applier_resume()). */ bool is_paused; /** Condition variable signaled to resume the applier. */ struct fiber_cond resume_cond; /** xstream to process rows during initial JOIN */ struct xstream *join_stream; /** xstream to process rows during final JOIN and SUBSCRIBE */ struct xstream *subscribe_stream; }; /** * Start a client to a remote master using a background fiber. * * If recovery is finalized (i.e. r->writer != NULL) then the client * connect to a master and follow remote updates using SUBSCRIBE command. * * If recovery is not finalized (i.e. r->writer == NULL) then the * client connects to a master, downloads and processes * a checkpoint using JOIN command and then switches to 'follow' * mode. * * \sa fiber_start() */ void applier_start(struct applier *applier); /** * Stop a client. */ void applier_stop(struct applier *applier); /** * Allocate an instance of applier object, create applier and initialize * remote uri (copied to struct applier). * * @pre the uri is a valid and checked one * @error throws OutOfMemory exception if out of memory. */ struct applier * applier_new(const char *uri, struct xstream *join_stream, struct xstream *subscribe_stream); /** * Destroy and delete a applier. */ void applier_delete(struct applier *applier); /* * Resume execution of applier until \a state. */ void applier_resume_to_state(struct applier *applier, enum applier_state state, double timeout); /* * Resume execution of applier. */ void applier_resume(struct applier *applier); /* * Pause execution of applier. * * Note, in contrast to applier_resume() this function may * only be called by the applier fiber (e.g. from on_state * trigger). */ void applier_pause(struct applier *applier); #endif /* TARANTOOL_APPLIER_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/index.h0000664000000000000000000004456513306565107017071 0ustar rootroot#ifndef TARANTOOL_BOX_INDEX_H_INCLUDED #define TARANTOOL_BOX_INDEX_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "trivia/util.h" #include "iterator_type.h" #include "index_def.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct tuple; struct engine; struct index; struct index_def; struct key_def; struct info_handler; /** \cond public */ typedef struct tuple box_tuple_t; typedef struct key_def box_key_def_t; typedef struct iterator box_iterator_t; /** * Allocate and initialize iterator for space_id, index_id. * * A returned iterator must be destroyed by box_iterator_free(). * * \param space_id space identifier. * \param index_id index identifier. * \param type \link iterator_type iterator type \endlink * \param key encoded key in MsgPack Array format ([part1, part2, ...]). * \param key_end the end of encoded \a key * \retval NULL on error (check box_error_last()) * \retval iterator otherwise * \sa box_iterator_next() * \sa box_iterator_free() */ box_iterator_t * box_index_iterator(uint32_t space_id, uint32_t index_id, int type, const char *key, const char *key_end); /** * Retrive the next item from the \a iterator. * * \param iterator an iterator returned by box_index_iterator(). * \param[out] result a tuple or NULL if there is no more data. * \retval -1 on error (check box_error_last() for details) * \retval 0 on success. The end of data is not an error. */ int box_iterator_next(box_iterator_t *iterator, box_tuple_t **result); /** * Destroy and deallocate iterator. * * \param iterator an interator returned by box_index_iterator() */ void box_iterator_free(box_iterator_t *iterator); /** * Return the number of element in the index. * * \param space_id space identifier * \param index_id index identifier * \retval -1 on error (check box_error_last()) * \retval >= 0 otherwise */ ssize_t box_index_len(uint32_t space_id, uint32_t index_id); /** * Return the number of bytes used in memory by the index. * * \param space_id space identifier * \param index_id index identifier * \retval -1 on error (check box_error_last()) * \retval >= 0 otherwise */ ssize_t box_index_bsize(uint32_t space_id, uint32_t index_id); /** * Return a random tuple from the index (useful for statistical analysis). * * \param space_id space identifier * \param index_id index identifier * \param rnd random seed * \param[out] result a tuple or NULL if index is empty * \retval -1 on error (check box_error_last()) * \retval 0 on success * \sa \code box.space[space_id].index[index_id]:random(rnd) \endcode */ int box_index_random(uint32_t space_id, uint32_t index_id, uint32_t rnd, box_tuple_t **result); /** * Get a tuple from index by the key. * * Please note that this function works much more faster than * box_select() or box_index_iterator() + box_iterator_next(). * * \param space_id space identifier * \param index_id index identifier * \param key encoded key in MsgPack Array format ([part1, part2, ...]). * \param key_end the end of encoded \a key * \param[out] result a tuple or NULL if index is empty * \retval -1 on error (check box_error_last()) * \retval 0 on success * \pre key != NULL * \sa \code box.space[space_id].index[index_id]:get(key) \endcode */ int box_index_get(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result); /** * Return a first (minimal) tuple matched the provided key. * * \param space_id space identifier * \param index_id index identifier * \param key encoded key in MsgPack Array format ([part1, part2, ...]). * \param key_end the end of encoded \a key. * \param[out] result a tuple or NULL if index is empty * \retval -1 on error (check box_error_last()) * \retval 0 on success * \sa \code box.space[space_id].index[index_id]:min(key) \endcode */ int box_index_min(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result); /** * Return a last (maximal) tuple matched the provided key. * * \param space_id space identifier * \param index_id index identifier * \param key encoded key in MsgPack Array format ([part1, part2, ...]). * \param key_end the end of encoded \a key. * \param[out] result a tuple or NULL if index is empty * \retval -1 on error (check box_error_last()) * \retval 0 on success * \sa \code box.space[space_id].index[index_id]:max(key) \endcode */ int box_index_max(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result); /** * Count the number of tuple matched the provided key. * * \param space_id space identifier * \param index_id index identifier * \param type iterator type - enum \link iterator_type \endlink * \param key encoded key in MsgPack Array format ([part1, part2, ...]). * \param key_end the end of encoded \a key. * \retval -1 on error (check box_error_last()) * \retval >=0 on success * \sa \code box.space[space_id].index[index_id]:count(key, * { iterator = type }) \endcode */ ssize_t box_index_count(uint32_t space_id, uint32_t index_id, int type, const char *key, const char *key_end); /** * Extract key from tuple according to key definition of given * index. Returned buffer is allocated on box_txn_alloc() with * this key. * @param tuple Tuple from which need to extract key. * @param space_id Space identifier. * @param index_id Index identifier. * @retval not NULL Success * @retval NULL Memory Allocation error */ char * box_tuple_extract_key(const box_tuple_t *tuple, uint32_t space_id, uint32_t index_id, uint32_t *key_size); /** \endcond public */ /** * Index introspection (index:info()) * * \param space_id space identifier * \param index_id index identifier * \param info info handler * \retval -1 on error (check box_error_last()) * \retval >=0 on success */ int box_index_info(uint32_t space_id, uint32_t index_id, struct info_handler *info); struct iterator { /** * Iterate to the next tuple. * The tuple is returned in @ret (NULL if EOF). * Returns 0 on success, -1 on error. */ int (*next)(struct iterator *it, struct tuple **ret); /** Destroy the iterator. */ void (*free)(struct iterator *); /** Schema version at the time of the last index lookup. */ uint32_t schema_version; /** ID of the space the iterator is for. */ uint32_t space_id; /** ID of the index the iterator is for. */ uint32_t index_id; /** * Pointer to the index the iterator is for. * Guaranteed to be valid only if the schema * version has not changed since the last lookup. */ struct index *index; }; /** * Initialize a base iterator structure. * * This function is supposed to be used only by * index implementation so never call it directly, * use index_create_iterator() instead. */ void iterator_create(struct iterator *it, struct index *index); /** * Iterate to the next tuple. * * The tuple is returned in @ret (NULL if EOF). * Returns 0 on success, -1 on error. */ int iterator_next(struct iterator *it, struct tuple **ret); /** * Destroy an iterator instance and free associated memory. */ void iterator_delete(struct iterator *it); /** * Snapshot iterator. * \sa index::create_snapshot_iterator(). */ struct snapshot_iterator { /** * Iterate to the next tuple in the snapshot. * Returns a pointer to the tuple data and its * size or NULL if EOF. */ const char *(*next)(struct snapshot_iterator *, uint32_t *size); /** * Destroy the iterator. */ void (*free)(struct snapshot_iterator *); }; /** * Check that the key has correct part count and correct part size * for use in an index iterator. * * @param index_def key definition * @param type iterator type (see enum iterator_type) * @param key msgpack-encoded key * @param part_count number of parts in \a key * * @retval 0 The key is valid. * @retval -1 The key is invalid. */ int key_validate(const struct index_def *index_def, enum iterator_type type, const char *key, uint32_t part_count); /** * Check that the supplied key is valid for a search in a unique * index (i.e. the key must be fully specified). * @retval 0 The key is valid. * @retval -1 The key is invalid. */ int exact_key_validate(struct key_def *key_def, const char *key, uint32_t part_count); /** * The manner in which replace in a unique index must treat * duplicates (tuples with the same value of indexed key), * possibly present in the index. */ enum dup_replace_mode { /** * If a duplicate is found, delete it and insert * a new tuple instead. Otherwise, insert a new tuple. */ DUP_REPLACE_OR_INSERT, /** * If a duplicate is found, produce an error. * I.e. require that no old key exists with the same * value. */ DUP_INSERT, /** * Unless a duplicate exists, throw an error. */ DUP_REPLACE }; struct index_vtab { /** Free an index instance. */ void (*destroy)(struct index *); /** * Called after WAL write to commit index creation. * Must not fail. * * @signature is the LSN that was assigned to the row * that created the index. If the index was created by * a snapshot row, it is set to the snapshot signature. */ void (*commit_create)(struct index *index, int64_t signature); /** * Called after WAL write to commit index drop. * Must not fail. */ void (*commit_drop)(struct index *); /** * Called after index definition update that did not * require index rebuild. */ void (*update_def)(struct index *); ssize_t (*size)(struct index *); ssize_t (*bsize)(struct index *); int (*min)(struct index *index, const char *key, uint32_t part_count, struct tuple **result); int (*max)(struct index *index, const char *key, uint32_t part_count, struct tuple **result); int (*random)(struct index *index, uint32_t rnd, struct tuple **result); ssize_t (*count)(struct index *index, enum iterator_type type, const char *key, uint32_t part_count); int (*get)(struct index *index, const char *key, uint32_t part_count, struct tuple **result); int (*replace)(struct index *index, struct tuple *old_tuple, struct tuple *new_tuple, enum dup_replace_mode mode, struct tuple **result); /** Create an index iterator. */ struct iterator *(*create_iterator)(struct index *index, enum iterator_type type, const char *key, uint32_t part_count); /** * Create an ALL iterator with personal read view so further * index modifications will not affect the iteration results. * Must be destroyed by iterator_delete() after usage. */ struct snapshot_iterator *(*create_snapshot_iterator)(struct index *); /** Introspection (index:info()) */ void (*info)(struct index *, struct info_handler *); /** * Two-phase index creation: begin building, add tuples, finish. */ void (*begin_build)(struct index *); /** * Optional hint, given to the index, about * the total size of the index. Called after * begin_build(). */ int (*reserve)(struct index *index, uint32_t size_hint); int (*build_next)(struct index *index, struct tuple *tuple); void (*end_build)(struct index *index); }; struct index { /** Virtual function table. */ const struct index_vtab *vtab; /** Engine used by this index. */ struct engine *engine; /* Description of a possibly multipart key. */ struct index_def *def; /* Schema version at the time of construction. */ uint32_t schema_version; }; /** * Check if replacement of an old tuple with a new one is * allowed. */ static inline uint32_t replace_check_dup(struct tuple *old_tuple, struct tuple *dup_tuple, enum dup_replace_mode mode) { if (dup_tuple == NULL) { if (mode == DUP_REPLACE) { /* * dup_replace_mode is DUP_REPLACE, and * a tuple with the same key is not found. */ return old_tuple ? ER_CANT_UPDATE_PRIMARY_KEY : ER_TUPLE_NOT_FOUND; } } else { /* dup_tuple != NULL */ if (dup_tuple != old_tuple && (old_tuple != NULL || mode == DUP_INSERT)) { /* * There is a duplicate of new_tuple, * and it's not old_tuple: we can't * possibly delete more than one tuple * at once. */ return ER_TUPLE_FOUND; } } return 0; } /** * Initialize an index instance. * Note, this function copies the given index definition. */ int index_create(struct index *index, struct engine *engine, const struct index_vtab *vtab, struct index_def *def); /** Free an index instance. */ void index_delete(struct index *index); /** Build this index based on the contents of another index. */ int index_build(struct index *index, struct index *pk); static inline void index_commit_create(struct index *index, int64_t signature) { index->vtab->commit_create(index, signature); } static inline void index_commit_drop(struct index *index) { index->vtab->commit_drop(index); } static inline void index_update_def(struct index *index) { index->vtab->update_def(index); } static inline ssize_t index_size(struct index *index) { return index->vtab->size(index); } static inline ssize_t index_bsize(struct index *index) { return index->vtab->bsize(index); } static inline int index_min(struct index *index, const char *key, uint32_t part_count, struct tuple **result) { return index->vtab->min(index, key, part_count, result); } static inline int index_max(struct index *index, const char *key, uint32_t part_count, struct tuple **result) { return index->vtab->max(index, key, part_count, result); } static inline int index_random(struct index *index, uint32_t rnd, struct tuple **result) { return index->vtab->random(index, rnd, result); } static inline ssize_t index_count(struct index *index, enum iterator_type type, const char *key, uint32_t part_count) { return index->vtab->count(index, type, key, part_count); } static inline int index_get(struct index *index, const char *key, uint32_t part_count, struct tuple **result) { return index->vtab->get(index, key, part_count, result); } static inline int index_replace(struct index *index, struct tuple *old_tuple, struct tuple *new_tuple, enum dup_replace_mode mode, struct tuple **result) { return index->vtab->replace(index, old_tuple, new_tuple, mode, result); } static inline struct iterator * index_create_iterator(struct index *index, enum iterator_type type, const char *key, uint32_t part_count) { return index->vtab->create_iterator(index, type, key, part_count); } static inline struct snapshot_iterator * index_create_snapshot_iterator(struct index *index) { return index->vtab->create_snapshot_iterator(index); } static inline void index_info(struct index *index, struct info_handler *handler) { index->vtab->info(index, handler); } static inline void index_begin_build(struct index *index) { index->vtab->begin_build(index); } static inline int index_reserve(struct index *index, uint32_t size_hint) { return index->vtab->reserve(index, size_hint); } static inline int index_build_next(struct index *index, struct tuple *tuple) { return index->vtab->build_next(index, tuple); } static inline void index_end_build(struct index *index) { index->vtab->end_build(index); } /* * Virtual method stubs. */ void generic_index_commit_create(struct index *, int64_t); void generic_index_commit_drop(struct index *); void generic_index_update_def(struct index *); ssize_t generic_index_size(struct index *); int generic_index_min(struct index *, const char *, uint32_t, struct tuple **); int generic_index_max(struct index *, const char *, uint32_t, struct tuple **); int generic_index_random(struct index *, uint32_t, struct tuple **); ssize_t generic_index_count(struct index *, enum iterator_type, const char *, uint32_t); int generic_index_get(struct index *, const char *, uint32_t, struct tuple **); int generic_index_replace(struct index *, struct tuple *, struct tuple *, enum dup_replace_mode, struct tuple **); struct snapshot_iterator *generic_index_create_snapshot_iterator(struct index *); void generic_index_info(struct index *, struct info_handler *); void generic_index_begin_build(struct index *); int generic_index_reserve(struct index *, uint32_t); int generic_index_build_next(struct index *, struct tuple *); void generic_index_end_build(struct index *); #if defined(__cplusplus) } /* extern "C" */ #include "diag.h" /* * A wrapper for ClientError(ER_UNSUPPORTED_INDEX_FEATURE, ...) to format * nice error messages (see gh-1042). You never need to catch this class. */ class UnsupportedIndexFeature: public ClientError { public: UnsupportedIndexFeature(const char *file, unsigned line, struct index_def *index_def, const char *what); }; struct IteratorGuard { struct iterator *it; IteratorGuard(struct iterator *it_arg) : it(it_arg) {} ~IteratorGuard() { iterator_delete(it); } }; /* * C++ wrappers around index methods. * They throw an exception in case of error. */ static inline struct iterator * index_create_iterator_xc(struct index *index, enum iterator_type type, const char *key, uint32_t part_count) { struct iterator *it = index_create_iterator(index, type, key, part_count); if (it == NULL) diag_raise(); return it; } static inline struct tuple * iterator_next_xc(struct iterator *it) { struct tuple *tuple; if (iterator_next(it, &tuple) != 0) diag_raise(); return tuple; } #endif /* defined(__plusplus) */ #endif /* TARANTOOL_BOX_INDEX_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_convert.c0000664000000000000000000001635613306560010020627 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "tuple.h" #include #include #include "third_party/base64.h" #include #include #include "fiber.h" #include int tuple_to_obuf(const struct tuple *tuple, struct obuf *buf) { uint32_t bsize; const char *data = tuple_data_range(tuple, &bsize); if (obuf_dup(buf, data, bsize) != bsize) { diag_set(OutOfMemory, bsize, "tuple_to_obuf", "dup"); return -1; } return 0; } int append_output(void *arg, unsigned char *buf, size_t len) { (void) arg; char *buf_out = region_alloc(&fiber()->gc, len + 1); if (!buf_out) { diag_set(OutOfMemory, len , "region", "tuple_to_yaml"); return 0; } memcpy(buf_out, buf, len); buf_out[len] = '\0'; return 1; } static int encode_node(yaml_emitter_t *emitter, const char **data); static int encode_table(yaml_emitter_t *emitter, const char **data) { yaml_event_t ev; yaml_mapping_style_t yaml_style = YAML_FLOW_MAPPING_STYLE; if (!yaml_mapping_start_event_initialize(&ev, NULL, NULL, 0, yaml_style) || !yaml_emitter_emit(emitter, &ev)) { diag_set(SystemError, "failed to init event libyaml"); return 0; } uint32_t size = mp_decode_map(data); for (uint32_t i = 0; i < size; i++) { if (!encode_node(emitter, data)) return 0; if (!encode_node(emitter, data)) return 0; } if (!yaml_mapping_end_event_initialize(&ev) || !yaml_emitter_emit(emitter, &ev)) { diag_set(SystemError, "failed to end event libyaml"); return 0; } return 1; } static int encode_array(yaml_emitter_t *emitter, const char **data) { yaml_event_t ev; yaml_sequence_style_t yaml_style = YAML_FLOW_SEQUENCE_STYLE; if (!yaml_sequence_start_event_initialize(&ev, NULL, NULL, 0, yaml_style) || !yaml_emitter_emit(emitter, &ev)) { diag_set(SystemError, "failed to init event libyaml"); return 0; } uint32_t size = mp_decode_array(data); for (uint32_t i = 0; i < size; i++) { if (!encode_node(emitter, data)) return 0; } if (!yaml_sequence_end_event_initialize(&ev) || !yaml_emitter_emit(emitter, &ev)) { diag_set(SystemError, "failed to end event libyaml"); return 0; } return 1; } #define LUAYAML_TAG_PREFIX "tag:yaml.org,2002:" static int encode_node(yaml_emitter_t *emitter, const char **data) { size_t len = 0; const char *str = ""; size_t binlen = 0; char *bin = NULL; yaml_char_t *tag = NULL; yaml_event_t ev; yaml_scalar_style_t style = YAML_PLAIN_SCALAR_STYLE; char buf[FPCONV_G_FMT_BUFSIZE]; int type = mp_typeof(**data); switch(type) { case MP_UINT: len = snprintf(buf, sizeof(buf), "%llu", (unsigned long long) mp_decode_uint(data)); buf[len] = 0; str = buf; break; case MP_INT: len = snprintf(buf, sizeof(buf), "%lld", (long long) mp_decode_int(data)); buf[len] = 0; str = buf; break; case MP_FLOAT: fpconv_g_fmt(buf, mp_decode_float(data), FPCONV_G_FMT_MAX_PRECISION); str = buf; len = strlen(buf); break; case MP_DOUBLE: fpconv_g_fmt(buf, mp_decode_double(data), FPCONV_G_FMT_MAX_PRECISION); str = buf; len = strlen(buf); break; case MP_ARRAY: return encode_array(emitter, data); case MP_MAP: return encode_table(emitter, data); case MP_STR: case MP_BIN: len = mp_decode_strbinl(data); str = *data; *data += len; if (type == MP_STR && utf8_check_printable(str, len)) { style = YAML_SINGLE_QUOTED_SCALAR_STYLE; break; } style = YAML_ANY_SCALAR_STYLE; /* Binary or not UTF8 */ binlen = base64_bufsize(len, 0); bin = (char *) malloc(binlen); if (bin == NULL) { diag_set(OutOfMemory, binlen, "malloc", "tuple_to_yaml"); return 0; } binlen = base64_encode(str, len, bin, binlen, 0); str = bin; len = binlen; tag = (yaml_char_t *) LUAYAML_TAG_PREFIX "binary"; break; case MP_BOOL: if (mp_decode_bool(data)) { str = "true"; len = 4; } else { str = "false"; len = 5; } break; case MP_NIL: case MP_EXT: if (type == MP_NIL) { mp_decode_nil(data); } else { mp_next(data); } style = YAML_PLAIN_SCALAR_STYLE; str = "null"; len = 4; break; default: unreachable(); } int rc = 1; if (!yaml_scalar_event_initialize(&ev, NULL, tag, (unsigned char *)str, len, bin == NULL, bin == NULL, style) || !yaml_emitter_emit(emitter, &ev)) { diag_set(OutOfMemory, len, "malloc", "tuple_to_yaml"); rc = 0; } if (bin != NULL) free(bin); return rc; } char * tuple_to_yaml(const struct tuple *tuple) { const char *data = tuple_data(tuple); yaml_emitter_t emitter; yaml_event_t ev; size_t used = region_used(&fiber()->gc); if (!yaml_emitter_initialize(&emitter)) { diag_set(SystemError, "failed to init libyaml"); return NULL; } yaml_emitter_set_unicode(&emitter, 1); yaml_emitter_set_indent(&emitter, 2); yaml_emitter_set_width(&emitter, INT_MAX); yaml_emitter_set_output(&emitter, &append_output, NULL); if (!yaml_stream_start_event_initialize(&ev, YAML_UTF8_ENCODING) || !yaml_emitter_emit(&emitter, &ev) || !yaml_document_start_event_initialize(&ev, NULL, NULL, NULL, 1) || !yaml_emitter_emit(&emitter, &ev)) { diag_set(SystemError, "failed to init event libyaml"); goto error; } if (!encode_node(&emitter, &data)) goto error; if (!yaml_document_end_event_initialize(&ev, 1) || !yaml_emitter_emit(&emitter, &ev) || !yaml_stream_end_event_initialize(&ev) || !yaml_emitter_emit(&emitter, &ev) || !yaml_emitter_flush(&emitter)) { diag_set(SystemError, "failed to end event libyaml"); goto error; } yaml_emitter_delete(&emitter); size_t total_len = region_used(&fiber()->gc) - used; char *buf = (char *) region_join(&fiber()->gc, total_len); if (buf == NULL) { diag_set(OutOfMemory, total_len, "region", "tuple_to_yaml"); return NULL; } /* Remove trailing "\n\0" added by libyaml */ assert(total_len > 2); assert(buf[total_len - 1] == '\0' && buf[total_len - 2] == '\n'); buf[total_len - 2] = '\0'; return buf; error: yaml_emitter_delete(&emitter); return NULL; } tarantool_1.9.1.26.g63eb81e3c/src/box/journal.h0000664000000000000000000001070613306560010017406 0ustar rootroot#ifndef TARANTOOL_JOURNAL_H_INCLUDED #define TARANTOOL_JOURNAL_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "salad/stailq.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct xrow_header; /** * An entry for an abstract journal. * Simply put, a write ahead log request. * * In case of synchronous replication, this request will travel * first to a Raft leader before going to the local WAL. */ struct journal_entry { /** A helper to include requests into a FIFO queue. */ struct stailq_entry fifo; /** * On success, contains vclock signature of * the committed transaction, on error is -1 */ int64_t res; /** * The fiber issuing the request. */ struct fiber *fiber; /** * The number of rows in the request. */ int n_rows; /** * The rows. */ struct xrow_header *rows[]; }; /** * Create a new journal entry. * * @return NULL if out of memory, fiber diagnostics area is set */ struct journal_entry * journal_entry_new(size_t n_rows); /** * An API for an abstract journal for all transactions of this * instance, as well as for multiple instances in case of * synchronous replication. */ struct journal { int64_t (*write)(struct journal *journal, struct journal_entry *req); void (*destroy)(struct journal *journal); }; /** * Depending on the step of recovery and instance configuration * points at a concrete implementation of the journal. */ extern struct journal *current_journal; /** * Record a single entry. * * @return a log sequence number (vclock signature) of the entry * or -1 on error. */ static inline int64_t journal_write(struct journal_entry *entry) { return current_journal->write(current_journal, entry); } /** * Change the current implementation of the journaling API. * Happens during life cycle of an instance: * * 1. When recovering a snapshot, the log sequence numbers * don't matter and are not used, transactions * can be recovered in any order. A stub API simply * returns 0 for every write request. * * 2. When recovering from the local write ahead * log, the LSN of each entry is already known. In this case, * the journal API should simply return the existing * log sequence numbers of records and do nothing else. * * 2. After recovery, in wal_mode = NONE, the implementation * fakes a WAL by using a simple counter to provide * log sequence numbers. * * 3. If the write ahead log is on, the WAL thread * is issuing the log sequence numbers. */ static inline void journal_set(struct journal *new_journal) { if (current_journal && current_journal->destroy) current_journal->destroy(current_journal); current_journal = new_journal; } static inline void journal_create(struct journal *journal, int64_t (*write)(struct journal *, struct journal_entry *), void (*destroy)(struct journal *)) { journal->write = write; journal->destroy = destroy; } static inline bool journal_is_initialized(struct journal *journal) { return journal->write != NULL; } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_JOURNAL_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/error.cc0000664000000000000000000001375013306565107017241 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "error.h" #include #include #include /* {{{ public API */ const char * box_error_type(const box_error_t *e) { return e->type->name; } uint32_t box_error_code(const box_error_t *e) { return ClientError::get_errcode(e); } const char * box_error_message(const box_error_t *error) { return error->errmsg; } box_error_t * box_error_last(void) { return diag_last_error(&fiber()->diag); } void box_error_clear(void) { diag_clear(&fiber()->diag); } int box_error_set(const char *file, unsigned line, uint32_t code, const char *fmt, ...) { struct error *e = BuildClientError(file, line, ER_UNKNOWN); ClientError *client_error = type_cast(ClientError, e); if (client_error) { client_error->m_errcode = code; va_list ap; va_start(ap, fmt); error_vformat_msg(e, fmt, ap); va_end(ap); } diag_add_error(&fiber()->diag, e); return -1; } /* }}} */ struct rmean *rmean_error = NULL; const char *rmean_error_strings[RMEAN_ERROR_LAST] = { "ERROR" }; static struct method_info clienterror_methods[] = { make_method(&type_ClientError, "code", &ClientError::errcode), METHODS_SENTINEL }; const struct type_info type_ClientError = make_type("ClientError", &type_Exception, clienterror_methods); ClientError::ClientError(const type_info *type, const char *file, unsigned line, uint32_t errcode) :Exception(type, file, line) { m_errcode = errcode; if (rmean_error) rmean_collect(rmean_error, RMEAN_ERROR, 1); } ClientError::ClientError(const char *file, unsigned line, uint32_t errcode, ...) :Exception(&type_ClientError, file, line) { m_errcode = errcode; if (rmean_error) rmean_collect(rmean_error, RMEAN_ERROR, 1); va_list ap; va_start(ap, errcode); error_vformat_msg(this, tnt_errcode_desc(m_errcode), ap); va_end(ap); } struct error * BuildClientError(const char *file, unsigned line, uint32_t errcode, ...) { try { ClientError *e = new ClientError(file, line, ER_UNKNOWN); va_list ap; va_start(ap, errcode); error_vformat_msg(e, tnt_errcode_desc(errcode), ap); va_end(ap); e->m_errcode = errcode; return e; } catch (OutOfMemory *e) { return e; } } void ClientError::log() const { say_file_line(S_ERROR, file, line, errmsg, "%s", tnt_errcode_str(m_errcode)); } uint32_t ClientError::get_errcode(const struct error *e) { ClientError *client_error = type_cast(ClientError, e); if (client_error) return client_error->errcode(); if (type_cast(OutOfMemory, e)) return ER_MEMORY_ISSUE; if (type_cast(SystemError, e)) return ER_SYSTEM; return ER_PROC_LUA; } const struct type_info type_XlogError = make_type("XlogError", &type_Exception); struct error * BuildXlogError(const char *file, unsigned line, const char *format, ...) { try { va_list ap; va_start(ap, format); XlogError *e = new XlogError(file, line, format, ap); va_end(ap); return e; } catch (OutOfMemory *e) { return e; } } #include "schema.h" #include "trigger.h" struct rlist on_access_denied = RLIST_HEAD_INITIALIZER(on_access_denied); static struct method_info accessdeniederror_methods[] = { make_method(&type_AccessDeniedError, "access_type", &AccessDeniedError::access_type), make_method(&type_AccessDeniedError, "object_type", &AccessDeniedError::object_type), make_method(&type_AccessDeniedError, "object_name", &AccessDeniedError::object_name), METHODS_SENTINEL }; const struct type_info type_AccessDeniedError = make_type("AccessDeniedError", &type_ClientError, accessdeniederror_methods); AccessDeniedError::AccessDeniedError(const char *file, unsigned int line, const char *access_type, const char *object_type, const char *object_name, const char *user_name) :ClientError(&type_AccessDeniedError, file, line, ER_ACCESS_DENIED) { error_format_msg(this, tnt_errcode_desc(m_errcode), access_type, object_type, object_name, user_name); struct on_access_denied_ctx ctx = {access_type, object_type, object_name}; trigger_run(&on_access_denied, (void *) &ctx); /* * We want to use ctx parameters as error parameters * later, so we have to alloc space for it. * As m_access_type and m_object_type are constant * literals they are statically allocated. We must copy * only m_object_name. */ m_object_type = object_type; m_access_type = access_type; m_object_name = strdup(object_name); } struct error * BuildAccessDeniedError(const char *file, unsigned int line, const char *access_type, const char *object_type, const char *object_name, const char *user_name) { try { return new AccessDeniedError(file, line, access_type, object_type, object_name, user_name); } catch (OutOfMemory *e) { return e; } } tarantool_1.9.1.26.g63eb81e3c/src/box/user_def.h0000664000000000000000000001167313306560010017534 0ustar rootroot#ifndef TARANTOOL_BOX_USER_DEF_H_INCLUDED #define TARANTOOL_BOX_USER_DEF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "schema_def.h" /* for SCHEMA_OBJECT_TYPE */ #include "scramble.h" /* for SCRAMBLE_SIZE */ #define RB_COMPACT 1 #include "small/rb.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ typedef uint16_t user_access_t; /** * Effective session user. A cache of user data * and access stored in session and fiber local storage. * Differs from the authenticated user when executing * setuid functions. */ struct credentials { /** A look up key to quickly find session user. */ uint8_t auth_token; /** * Cached global grants, to avoid an extra look up * when checking global grants. */ user_access_t universal_access; /** User id of the authenticated user. */ uint32_t uid; }; enum priv_type { /* SELECT */ PRIV_R = 1, /* INSERT, UPDATE, UPSERT, DELETE, REPLACE */ PRIV_W = 2, /* CALL */ PRIV_X = 4, /* SESSION */ PRIV_S = 8, /* USAGE */ PRIV_U = 16, /* CREATE */ PRIV_C = 32, /* DROP */ PRIV_D = 64, /* ALTER */ PRIV_A = 128, /* REFERENCE - required by ANSI - not implemented */ PRIV_REFERENCE = 256, /* TRIGGER - required by ANSI - not implemented */ PRIV_TRIGGER = 512, /* INSERT - required by ANSI - not implemented */ PRIV_INSERT = 1024, /* UPDATE - required by ANSI - not implemented */ PRIV_UPDATE = 2048, /* DELETE - required by ANSI - not implemented */ PRIV_DELETE = 4096, /* This is never granted, but used internally. */ PRIV_GRANT = 8192, /* Never granted, but used internally. */ PRIV_REVOKE = 16384, /* all bits */ PRIV_ALL = ~((user_access_t) 0), }; /** * Definition of a privilege */ struct priv_def { /** Who grants the privilege. */ uint32_t grantor_id; /** Whom the privilege is granted. */ uint32_t grantee_id; /* Object id - is only defined for object type */ uint32_t object_id; /* Object type - function, space, universe */ enum schema_object_type object_type; /** * What is being granted, has been granted, or is being * revoked. */ user_access_t access; /** To maintain a set of effective privileges. */ rb_node(struct priv_def) link; }; /* Privilege name for error messages */ const char * priv_name(user_access_t access); /** * Encapsulates privileges of a user on an object. * I.e. "space" object has an instance of this * structure for each user. */ struct access { /** * Granted access has been given to a user explicitly * via some form of a grant. */ user_access_t granted; /** * Effective access is a sum of granted access and * all privileges inherited by a user on this object * via some role. Since roles may be granted to other * roles, this may include indirect grants. */ user_access_t effective; }; /** * A cache entry for an existing user. Entries for all existing * users are always present in the cache. The entry is maintained * in sync with _user and _priv system spaces by system space * triggers. * @sa alter.cc */ struct user_def { /** User id. */ uint32_t uid; /** Creator of the user */ uint32_t owner; /** 'user' or 'role' */ enum schema_object_type type; /** User password - hash2 */ char hash2[SCRAMBLE_SIZE]; /** User name - for error messages and debugging */ char name[0]; }; static inline size_t user_def_sizeof(uint32_t name_len) { return sizeof(struct user_def) + name_len + 1; } /** Predefined user ids. */ enum { BOX_SYSTEM_USER_ID_MIN = 0, GUEST = 0, ADMIN = 1, PUBLIC = 2, /* role */ SUPER = 31, /* role */ BOX_SYSTEM_USER_ID_MAX = PUBLIC }; #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_USER_DEF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/sysview_index.c0000664000000000000000000002273113306565107020644 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "sysview_index.h" #include "sysview_engine.h" #include #include "fiber.h" #include "schema.h" #include "space.h" #include "func.h" #include "tuple.h" #include "session.h" struct sysview_iterator { struct iterator base; struct iterator *source; struct space *space; /** Memory pool the iterator was allocated from. */ struct mempool *pool; }; static inline struct sysview_iterator * sysview_iterator(struct iterator *ptr) { return (struct sysview_iterator *) ptr; } static void sysview_iterator_free(struct iterator *ptr) { struct sysview_iterator *it = sysview_iterator(ptr); iterator_delete(it->source); mempool_free(it->pool, it); } static int sysview_iterator_next(struct iterator *iterator, struct tuple **ret) { assert(iterator->free == sysview_iterator_free); struct sysview_iterator *it = sysview_iterator(iterator); *ret = NULL; if (it->source->schema_version != schema_version) return 0; /* invalidate iterator */ struct sysview_index *index = (struct sysview_index *)iterator->index; int rc; while ((rc = iterator_next(it->source, ret)) == 0 && *ret != NULL) { if (index->filter(it->space, *ret)) break; } return rc; } static void sysview_index_destroy(struct index *index) { free(index); } static ssize_t sysview_index_bsize(struct index *index) { (void)index; return 0; } static struct iterator * sysview_index_create_iterator(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { struct sysview_index *index = (struct sysview_index *)base; struct sysview_engine *sysview = (struct sysview_engine *)base->engine; struct space *source = space_cache_find(index->source_space_id); if (source == NULL) return NULL; struct index *pk = index_find(source, index->source_index_id); if (pk == NULL) return NULL; /* * Explicitly validate that key matches source's index_def. * It is possible to change a source space without changing * the view. */ if (key_validate(pk->def, type, key, part_count)) return NULL; struct sysview_iterator *it = mempool_alloc(&sysview->iterator_pool); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct sysview_iterator), "mempool", "struct sysview_iterator"); return NULL; } iterator_create(&it->base, base); it->pool = &sysview->iterator_pool; it->base.next = sysview_iterator_next; it->base.free = sysview_iterator_free; it->source = index_create_iterator(pk, type, key, part_count); if (it->source == NULL) { mempool_free(&sysview->iterator_pool, it); return NULL; } it->space = source; return (struct iterator *)it; } static int sysview_index_get(struct index *base, const char *key, uint32_t part_count, struct tuple **result) { struct sysview_index *index = (struct sysview_index *)base; struct space *source = space_cache_find(index->source_space_id); if (source == NULL) return -1; struct index *pk = index_find(source, index->source_index_id); if (pk == NULL) return -1; if (!pk->def->opts.is_unique) { diag_set(ClientError, ER_MORE_THAN_ONE_TUPLE); return -1; } if (exact_key_validate(pk->def->key_def, key, part_count) != 0) return -1; struct tuple *tuple; if (index_get(pk, key, part_count, &tuple) != 0) return -1; if (tuple == NULL || !index->filter(source, tuple)) *result = NULL; else *result = tuple; return 0; } static const struct index_vtab sysview_index_vtab = { /* .destroy = */ sysview_index_destroy, /* .commit_create = */ generic_index_commit_create, /* .commit_drop = */ generic_index_commit_drop, /* .update_def = */ generic_index_update_def, /* .size = */ generic_index_size, /* .bsize = */ sysview_index_bsize, /* .min = */ generic_index_min, /* .max = */ generic_index_max, /* .random = */ generic_index_random, /* .count = */ generic_index_count, /* .get = */ sysview_index_get, /* .replace = */ generic_index_replace, /* .create_iterator = */ sysview_index_create_iterator, /* .create_snapshot_iterator = */ generic_index_create_snapshot_iterator, /* .info = */ generic_index_info, /* .begin_build = */ generic_index_begin_build, /* .reserve = */ generic_index_reserve, /* .build_next = */ generic_index_build_next, /* .end_build = */ generic_index_end_build, }; static bool vspace_filter(struct space *source, struct tuple *tuple) { struct credentials *cr = effective_user(); if (PRIV_R & cr->universal_access) return true; /* read access to unverse */ if (PRIV_R & source->access[cr->auth_token].effective) return true; /* read access to original space */ uint32_t space_id; if (tuple_field_u32(tuple, BOX_SPACE_FIELD_ID, &space_id) != 0) return false; struct space *space = space_cache_find(space_id); if (space == NULL) return false; uint8_t effective = space->access[cr->auth_token].effective; return ((PRIV_R | PRIV_W) & (cr->universal_access | effective) || space->def->uid == cr->uid); } static bool vuser_filter(struct space *source, struct tuple *tuple) { struct credentials *cr = effective_user(); if (PRIV_R & cr->universal_access) return true; /* read access to unverse */ if (PRIV_R & source->access[cr->auth_token].effective) return true; /* read access to original space */ uint32_t uid; if (tuple_field_u32(tuple, BOX_USER_FIELD_ID, &uid) != 0) return false; uint32_t owner_id; if (tuple_field_u32(tuple, BOX_USER_FIELD_UID, &owner_id) != 0) return false; return uid == cr->uid || owner_id == cr->uid; } static bool vpriv_filter(struct space *source, struct tuple *tuple) { struct credentials *cr = effective_user(); if (PRIV_R & cr->universal_access) return true; /* read access to unverse */ if (PRIV_R & source->access[cr->auth_token].effective) return true; /* read access to original space */ uint32_t grantor_id; if (tuple_field_u32(tuple, BOX_PRIV_FIELD_ID, &grantor_id) != 0) return false; uint32_t grantee_id; if (tuple_field_u32(tuple, BOX_PRIV_FIELD_UID, &grantee_id) != 0) return false; return grantor_id == cr->uid || grantee_id == cr->uid; } static bool vfunc_filter(struct space *source, struct tuple *tuple) { struct credentials *cr = effective_user(); if ((PRIV_R | PRIV_X) & cr->universal_access) return true; /* read or execute access to unverse */ if (PRIV_R & source->access[cr->auth_token].effective) return true; /* read access to original space */ const char *name = tuple_field_cstr(tuple, BOX_FUNC_FIELD_NAME); if (name == NULL) return false; uint32_t name_len = strlen(name); struct func *func = func_by_name(name, name_len); assert(func != NULL); uint8_t effective = func->access[cr->auth_token].effective; if (func->def->uid == cr->uid || (PRIV_X & effective)) return true; return false; } struct sysview_index * sysview_index_new(struct sysview_engine *sysview, struct index_def *def, const char *space_name) { assert(def->type == TREE); if (!mempool_is_initialized(&sysview->iterator_pool)) { mempool_create(&sysview->iterator_pool, cord_slab_cache(), sizeof(struct sysview_iterator)); } uint32_t source_space_id; uint32_t source_index_id; sysview_filter_f filter; switch (def->space_id) { case BOX_VSPACE_ID: source_space_id = BOX_SPACE_ID; source_index_id = def->iid; filter = vspace_filter; break; case BOX_VINDEX_ID: source_space_id = BOX_INDEX_ID; source_index_id = def->iid; filter = vspace_filter; break; case BOX_VUSER_ID: source_space_id = BOX_USER_ID; source_index_id = def->iid; filter = vuser_filter; break; case BOX_VFUNC_ID: source_space_id = BOX_FUNC_ID; source_index_id = def->iid; filter = vfunc_filter; break; case BOX_VPRIV_ID: source_space_id = BOX_PRIV_ID; source_index_id = def->iid; filter = vpriv_filter; break; default: diag_set(ClientError, ER_MODIFY_INDEX, def->name, space_name, "unknown space for system view"); return NULL; } struct sysview_index *index = (struct sysview_index *)calloc(1, sizeof(*index)); if (index == NULL) { diag_set(OutOfMemory, sizeof(*index), "malloc", "struct sysview_index"); return NULL; } if (index_create(&index->base, (struct engine *)sysview, &sysview_index_vtab, def) != 0) { free(index); return NULL; } index->source_space_id = source_space_id; index->source_index_id = source_index_id; index->filter = filter; return index; } tarantool_1.9.1.26.g63eb81e3c/src/box/identifier.c0000664000000000000000000000535013306565107020064 0ustar rootroot/* * Copyright 2010-2018, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "identifier.h" #include "say.h" #include "diag.h" #include #include /* ICU returns this character in case of unknown symbol */ #define REPLACEMENT_CHARACTER (0xFFFD) static UConverter* utf8conv = NULL; int identifier_check(const char *str, size_t str_len) { assert(utf8conv); const char *end = str + str_len; if (str == end) goto error; ucnv_reset(utf8conv); while (str < end) { int8_t type; UErrorCode status = U_ZERO_ERROR; UChar32 c = ucnv_getNextUChar(utf8conv, &str, end, &status); if (U_FAILURE(status)) goto error; type = u_charType(c); /** * The icu library has a function named u_isprint, however, * this function does not return any errors. * Here the `c` symbol printability is determined by comparison * with unicode category types explicitly. */ if (c == REPLACEMENT_CHARACTER || type == U_UNASSIGNED || type == U_LINE_SEPARATOR || type == U_CONTROL_CHAR || type == U_PARAGRAPH_SEPARATOR) goto error; } return 0; error: diag_set(ClientError, ER_IDENTIFIER, tt_cstr(str, str_len)); return -1; } void identifier_init() { assert(utf8conv == NULL); UErrorCode status = U_ZERO_ERROR ; utf8conv = ucnv_open("utf8", &status); if (U_FAILURE(status)) panic("ICU ucnv_open(\"utf8\") failed"); } void identifier_destroy() { assert(utf8conv); ucnv_close(utf8conv); utf8conv = NULL; } tarantool_1.9.1.26.g63eb81e3c/src/box/session.cc0000664000000000000000000001667513306565107017604 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "session.h" #include "fiber.h" #include "memory.h" #include "assoc.h" #include "trigger.h" #include "random.h" #include "user.h" #include "error.h" const char *session_type_strs[] = { "background", "binary", "console", "repl", "applier", "unknown", }; static struct mh_i64ptr_t *session_registry; struct mempool session_pool; RLIST_HEAD(session_on_connect); RLIST_HEAD(session_on_disconnect); RLIST_HEAD(session_on_auth); static inline uint64_t sid_max() { static uint64_t sid_max = 0; /* Return the next sid rolling over the reserved value of 0. */ while (++sid_max == 0) ; return sid_max; } static void session_on_stop(struct trigger *trigger, void * /* event */) { /* * Remove on_stop trigger from the fiber, otherwise the * fiber will attempt to destroy the trigger eventually, * after the trigger and its memory is long gone. */ trigger_clear(trigger); struct session *session = (struct session *) fiber_get_key(fiber(), FIBER_KEY_SESSION); /* Destroy the session */ session_destroy(session); } struct session * session_create(int fd, enum session_type type) { struct session *session = (struct session *) mempool_alloc(&session_pool); if (session == NULL) { diag_set(OutOfMemory, session_pool.objsize, "mempool", "new slab"); return NULL; } session->id = sid_max(); session->fd = fd; session->sync = 0; session->type = type; /* For on_connect triggers. */ credentials_init(&session->credentials, guest_user->auth_token, guest_user->def->uid); if (fd >= 0) random_bytes(session->salt, SESSION_SEED_SIZE); struct mh_i64ptr_node_t node; node.key = session->id; node.val = session; mh_int_t k = mh_i64ptr_put(session_registry, &node, NULL, NULL); if (k == mh_end(session_registry)) { mempool_free(&session_pool, session); diag_set(OutOfMemory, 0, "session hash", "new session"); return NULL; } return session; } struct session * session_create_on_demand(int fd) { assert(fiber_get_session(fiber()) == NULL); /* Create session on demand */ struct session *s = session_create(fd, SESSION_TYPE_BACKGROUND); if (s == NULL) return NULL; s->fiber_on_stop = { RLIST_LINK_INITIALIZER, session_on_stop, NULL, NULL }; /* Add a trigger to destroy session on fiber stop */ trigger_add(&fiber()->on_stop, &s->fiber_on_stop); credentials_init(&s->credentials, admin_user->auth_token, admin_user->def->uid); /* * At bootstrap, admin user access is not loaded yet (is * 0), force global access. @sa comment in session_init() */ s->credentials.universal_access = ~(user_access_t) 0; fiber_set_session(fiber(), s); fiber_set_user(fiber(), &s->credentials); return s; } /** * To quickly switch to admin user when executing * on_connect/on_disconnect triggers in iproto. */ struct credentials admin_credentials; static int session_run_triggers(struct session *session, struct rlist *triggers) { struct fiber *fiber = fiber(); assert(session == current_session()); /* Run triggers with admin credentials */ fiber_set_user(fiber, &admin_credentials); int rc = trigger_run(triggers, NULL); /* Restore original credentials */ fiber_set_user(fiber, &session->credentials); return rc; } void session_run_on_disconnect_triggers(struct session *session) { if (session_run_triggers(session, &session_on_disconnect) != 0) diag_log(); } int session_run_on_connect_triggers(struct session *session) { return session_run_triggers(session, &session_on_connect); } int session_run_on_auth_triggers(const struct on_auth_trigger_ctx *result) { return trigger_run(&session_on_auth, (void *)result); } void session_destroy(struct session *session) { session_storage_cleanup(session->id); struct mh_i64ptr_node_t node = { session->id, NULL }; mh_i64ptr_remove(session_registry, &node, NULL); mempool_free(&session_pool, session); } struct session * session_find(uint64_t sid) { mh_int_t k = mh_i64ptr_find(session_registry, sid, NULL); if (k == mh_end(session_registry)) return NULL; return (struct session *) mh_i64ptr_node(session_registry, k)->val; } void session_init() { session_registry = mh_i64ptr_new(); if (session_registry == NULL) panic("out of memory"); mempool_create(&session_pool, &cord()->slabc, sizeof(struct session)); credentials_init(&admin_credentials, ADMIN, ADMIN); /* * For performance reasons, we do not always explicitly * look at user id in access checks, while still need to * ensure 'admin' user has full access to all objects in * the universe. * * This is why _priv table contains a record with grants * of full access to universe to 'admin' user. * * Making a record in _priv table is, however, * insufficient, since some checks are done at bootstrap, * before _priv table is read (e.g. when we're * bootstrapping a replica in applier fiber). * * When session_init() is called, admin user access is not * loaded yet (is 0), force global access. */ admin_credentials.universal_access = ~((user_access_t) 0); } void session_free() { if (session_registry) mh_i64ptr_delete(session_registry); } int access_check_session(struct user *user) { /* * Can't use here access_check_universe * as current_user is not assigned yet */ if (!(universe.access[user->auth_token].effective & PRIV_S)) { diag_set(AccessDeniedError, priv_name(PRIV_S), schema_object_name(SC_UNIVERSE), "", user->def->name); return -1; } return 0; } int access_check_universe(user_access_t access) { struct credentials *credentials = effective_user(); access |= PRIV_U; if ((credentials->universal_access & access) ^ access) { /* * Access violation, report error. * The user may not exist already, if deleted * from a different connection. */ int denied_access = access & ((credentials->universal_access & access) ^ access); struct user *user = user_find(credentials->uid); if (user != NULL) { diag_set(AccessDeniedError, priv_name(denied_access), schema_object_name(SC_UNIVERSE), "", user->def->name); } else { /* * The user may have been dropped, in * which case user_find() will set the * error. */ assert(!diag_is_empty(&fiber()->diag)); } return -1; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_range.c0000664000000000000000000003460313306565107017557 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_range.h" #include #include #include #include #include #include #define RB_COMPACT 1 #include #include #include "diag.h" #include "iterator_type.h" #include "key_def.h" #include "trivia/util.h" #include "tuple.h" #include "vy_run.h" #include "vy_stat.h" #include "vy_stmt.h" int vy_range_tree_cmp(struct vy_range *range_a, struct vy_range *range_b) { if (range_a == range_b) return 0; /* Any key > -inf. */ if (range_a->begin == NULL) return -1; if (range_b->begin == NULL) return 1; assert(range_a->cmp_def == range_b->cmp_def); return vy_key_compare(range_a->begin, range_b->begin, range_a->cmp_def); } int vy_range_tree_key_cmp(const struct tuple *stmt, struct vy_range *range) { /* Any key > -inf. */ if (range->begin == NULL) return 1; return vy_stmt_compare_with_key(stmt, range->begin, range->cmp_def); } struct vy_range * vy_range_tree_find_by_key(vy_range_tree_t *tree, enum iterator_type iterator_type, const struct tuple *key) { uint32_t key_field_count = tuple_field_count(key); if (key_field_count == 0) { switch (iterator_type) { case ITER_LT: case ITER_LE: case ITER_REQ: return vy_range_tree_last(tree); case ITER_GT: case ITER_GE: case ITER_EQ: return vy_range_tree_first(tree); default: unreachable(); return NULL; } } struct vy_range *range; if (iterator_type == ITER_GE || iterator_type == ITER_GT || iterator_type == ITER_EQ) { /** * Case 1. part_count == 1, looking for [10]. ranges: * {1, 3, 5} {7, 8, 9} {10, 15 20} {22, 32, 42} * ^looking for this * Case 2. part_count == 1, looking for [10]. ranges: * {1, 2, 4} {5, 6, 7, 8} {50, 100, 200} * ^looking for this * Case 3. part_count == 2, looking for [10]. ranges: * {[1, 2], [2, 3]} {[9, 1], [10, 1], [10 2], [11 3]} {[12,..} * ^looking for this * Case 4. part_count == 2, looking for [10]. ranges: * {[1, 2], [10, 1]} {[10, 2] [10 3] [11 3]} {[12, 1]..} * ^looking for this * Case 5. part_count does not matter, looking for [10]. * ranges: * {100, 200}, {300, 400} * ^looking for this */ /** * vy_range_tree_psearch finds least range with begin == key * or previous if equal was not found */ range = vy_range_tree_psearch(tree, key); /* switch to previous for case (4) */ if (range != NULL && range->begin != NULL && key_field_count < range->cmp_def->part_count && vy_stmt_compare_with_key(key, range->begin, range->cmp_def) == 0) range = vy_range_tree_prev(tree, range); /* for case 5 or subcase of case 4 */ if (range == NULL) range = vy_range_tree_first(tree); } else { assert(iterator_type == ITER_LT || iterator_type == ITER_LE || iterator_type == ITER_REQ); /** * Case 1. part_count == 1, looking for [10]. ranges: * {1, 3, 5} {7, 8, 9} {10, 15 20} {22, 32, 42} * ^looking for this * Case 2. part_count == 1, looking for [10]. ranges: * {1, 2, 4} {5, 6, 7, 8} {50, 100, 200} * ^looking for this * Case 3. part_count == 2, looking for [10]. ranges: * {[1, 2], [2, 3]} {[9, 1], [10, 1], [10 2], [11 3]} {[12,..} * ^looking for this * Case 4. part_count == 2, looking for [10]. ranges: * {[1, 2], [10, 1]} {[10, 2] [10 3] [11 3]} {[12, 1]..} * ^looking for this * Case 5. part_count does not matter, looking for [10]. * ranges: * {1, 2}, {3, 4, ..} * ^looking for this */ /** * vy_range_tree_nsearch finds most range with begin == key * or next if equal was not found */ range = vy_range_tree_nsearch(tree, key); if (range != NULL) { /* fix curr_range for cases 2 and 3 */ if (range->begin != NULL && vy_stmt_compare_with_key(key, range->begin, range->cmp_def) != 0) { struct vy_range *prev; prev = vy_range_tree_prev(tree, range); if (prev != NULL) range = prev; } } else { /* Case 5 */ range = vy_range_tree_last(tree); } } return range; } struct vy_range * vy_range_new(int64_t id, struct tuple *begin, struct tuple *end, const struct key_def *cmp_def) { struct vy_range *range = calloc(1, sizeof(*range)); if (range == NULL) { diag_set(OutOfMemory, sizeof(*range), "malloc", "struct vy_range"); return NULL; } range->id = id; if (begin != NULL) { tuple_ref(begin); range->begin = begin; } if (end != NULL) { tuple_ref(end); range->end = end; } range->cmp_def = cmp_def; rlist_create(&range->slices); range->heap_node.pos = UINT32_MAX; return range; } void vy_range_delete(struct vy_range *range) { if (range->begin != NULL) tuple_unref(range->begin); if (range->end != NULL) tuple_unref(range->end); struct vy_slice *slice, *next_slice; rlist_foreach_entry_safe(slice, &range->slices, in_range, next_slice) vy_slice_delete(slice); TRASH(range); free(range); } int vy_range_snprint(char *buf, int size, const struct vy_range *range) { int total = 0; SNPRINT(total, snprintf, buf, size, "("); if (range->begin != NULL) SNPRINT(total, tuple_snprint, buf, size, range->begin); else SNPRINT(total, snprintf, buf, size, "-inf"); SNPRINT(total, snprintf, buf, size, ".."); if (range->end != NULL) SNPRINT(total, tuple_snprint, buf, size, range->end); else SNPRINT(total, snprintf, buf, size, "inf"); SNPRINT(total, snprintf, buf, size, ")"); return total; } void vy_range_add_slice(struct vy_range *range, struct vy_slice *slice) { rlist_add_entry(&range->slices, slice, in_range); range->slice_count++; vy_disk_stmt_counter_add(&range->count, &slice->count); } void vy_range_add_slice_before(struct vy_range *range, struct vy_slice *slice, struct vy_slice *next_slice) { rlist_add_tail(&next_slice->in_range, &slice->in_range); range->slice_count++; vy_disk_stmt_counter_add(&range->count, &slice->count); } void vy_range_remove_slice(struct vy_range *range, struct vy_slice *slice) { assert(range->slice_count > 0); assert(!rlist_empty(&range->slices)); rlist_del_entry(slice, in_range); range->slice_count--; vy_disk_stmt_counter_sub(&range->count, &slice->count); } /** * To reduce write amplification caused by compaction, we follow * the LSM tree design. Runs in each range are divided into groups * called levels: * * level 1: runs 1 .. L_1 * level 2: runs L_1 + 1 .. L_2 * ... * level N: runs L_{N-1} .. L_N * * where L_N is the total number of runs, N is the total number of * levels, older runs have greater numbers. Runs at each subsequent * are run_size_ratio times larger than on the previous one. When * the number of runs at a level exceeds run_count_per_level, we * compact all its runs along with all runs from the upper levels * and in-memory indexes. Including previous levels into * compaction is relatively cheap, because of the level size * ratio. * * Given a range, this function computes the maximal level that needs * to be compacted and sets @compact_priority to the number of runs in * this level and all preceding levels. */ void vy_range_update_compact_priority(struct vy_range *range, const struct index_opts *opts) { assert(opts->run_count_per_level > 0); assert(opts->run_size_ratio > 1); range->compact_priority = 0; /* Total number of checked runs. */ uint32_t total_run_count = 0; /* The total size of runs checked so far. */ uint64_t total_size = 0; /* Estimated size of a compacted run, if compaction is scheduled. */ uint64_t est_new_run_size = 0; /* The number of runs at the current level. */ uint32_t level_run_count = 0; /* * The target (perfect) size of a run at the current level. * For the first level, it's the size of the newest run. * For lower levels it's computed as first level run size * times run_size_ratio. */ uint64_t target_run_size = 0; struct vy_slice *slice; rlist_foreach_entry(slice, &range->slices, in_range) { uint64_t size = slice->count.bytes_compressed; /* * The size of the first level is defined by * the size of the most recent run. */ if (target_run_size == 0) target_run_size = size; total_size += size; level_run_count++; total_run_count++; while (size > target_run_size) { /* * The run size exceeds the threshold * set for the current level. Move this * run down to a lower level. Switch the * current level and reset the level run * count. */ level_run_count = 1; /* * If we have already scheduled * a compaction of an upper level, and * estimated compacted run will end up at * this level, include the new run into * this level right away to avoid * a cascading compaction. */ if (est_new_run_size > target_run_size) level_run_count++; /* * Calculate the target run size for this * level. */ target_run_size *= opts->run_size_ratio; /* * Keep pushing the run down until * we find an appropriate level for it. */ } if (level_run_count > opts->run_count_per_level) { /* * The number of runs at the current level * exceeds the configured maximum. Arrange * for compaction. We compact all runs at * this level and upper levels. */ range->compact_priority = total_run_count; est_new_run_size = total_size; } } } /** * Return true and set split_key accordingly if the range needs to be * split in two. * * - We should never split a range until it was merged at least once * (actually, it should be a function of run_count_per_level/number * of runs used for the merge: with low run_count_per_level it's more * than once, with high run_count_per_level it's once). * - We should use the last run size as the size of the range. * - We should split around the last run middle key. * - We should only split if the last run size is greater than * 4/3 * range_size. */ bool vy_range_needs_split(struct vy_range *range, const struct index_opts *opts, const char **p_split_key) { struct vy_slice *slice; /* The range hasn't been merged yet - too early to split it. */ if (range->n_compactions < 1) return false; /* Find the oldest run. */ assert(!rlist_empty(&range->slices)); slice = rlist_last_entry(&range->slices, struct vy_slice, in_range); /* The range is too small to be split. */ if (slice->count.bytes_compressed < opts->range_size * 4 / 3) return false; /* Find the median key in the oldest run (approximately). */ struct vy_page_info *mid_page; mid_page = vy_run_page_info(slice->run, slice->first_page_no + (slice->last_page_no - slice->first_page_no) / 2); struct vy_page_info *first_page = vy_run_page_info(slice->run, slice->first_page_no); /* No point in splitting if a new range is going to be empty. */ if (key_compare(first_page->min_key, mid_page->min_key, range->cmp_def) == 0) return false; /* * In extreme cases the median key can be < the beginning * of the slice, e.g. * * RUN: * ... |---- page N ----|-- page N + 1 --|-- page N + 2 -- * | min_key = [10] | min_key = [50] | min_key = [100] * * SLICE: * begin = [30], end = [70] * first_page_no = N, last_page_no = N + 1 * * which makes mid_page_no = N and mid_page->min_key = [10]. * * In such cases there's no point in splitting the range. */ if (slice->begin != NULL && key_compare(mid_page->min_key, tuple_data(slice->begin), range->cmp_def) <= 0) return false; /* * The median key can't be >= the end of the slice as we * take the min key of a page for the median key. */ assert(slice->end == NULL || key_compare(mid_page->min_key, tuple_data(slice->end), range->cmp_def) < 0); *p_split_key = mid_page->min_key; return true; } /** * Check if a range should be coalesced with one or more its neighbors. * If it should, return true and set @p_first and @p_last to the first * and last ranges to coalesce, otherwise return false. * * We coalesce ranges together when they become too small, less than * half the target range size to avoid split-coalesce oscillations. */ bool vy_range_needs_coalesce(struct vy_range *range, vy_range_tree_t *tree, const struct index_opts *opts, struct vy_range **p_first, struct vy_range **p_last) { struct vy_range *it; /* Size of the coalesced range. */ uint64_t total_size = range->count.bytes_compressed; /* Coalesce ranges until total_size > max_size. */ uint64_t max_size = opts->range_size / 2; /* * We can't coalesce a range that was scheduled for dump * or compaction, because it is about to be processed by * a worker thread. */ assert(!vy_range_is_scheduled(range)); *p_first = *p_last = range; for (it = vy_range_tree_next(tree, range); it != NULL && !vy_range_is_scheduled(it); it = vy_range_tree_next(tree, it)) { uint64_t size = it->count.bytes_compressed; if (total_size + size > max_size) break; total_size += size; *p_last = it; } for (it = vy_range_tree_prev(tree, range); it != NULL && !vy_range_is_scheduled(it); it = vy_range_tree_prev(tree, it)) { uint64_t size = it->count.bytes_compressed; if (total_size + size > max_size) break; total_size += size; *p_first = it; } return *p_first != *p_last; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_read_iterator.c0000664000000000000000000007304313306565107021310 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_read_iterator.h" #include "vy_run.h" #include "vy_mem.h" #include "vy_cache.h" #include "vy_tx.h" #include "fiber.h" #include "vy_upsert.h" #include "vy_index.h" #include "vy_stat.h" /** * Merge source, support structure for vy_read_iterator. * Contains source iterator and merge state. */ struct vy_read_src { /** Source iterator. */ union { struct vy_run_iterator run_iterator; struct vy_mem_iterator mem_iterator; struct vy_txw_iterator txw_iterator; struct vy_cache_iterator cache_iterator; }; /** Set if the iterator was started. */ bool is_started; /** See vy_read_iterator->front_id. */ uint32_t front_id; /** Statement the iterator is at. */ struct tuple *stmt; }; /** * Extend internal source array capacity to fit capacity sources. * Not necessary to call is but calling it allows to optimize internal memory * allocation */ static NODISCARD int vy_read_iterator_reserve(struct vy_read_iterator *itr, uint32_t capacity) { if (itr->src_capacity >= capacity) return 0; struct vy_read_src *new_src = calloc(capacity, sizeof(*new_src)); if (new_src == NULL) { diag_set(OutOfMemory, capacity * sizeof(*new_src), "calloc", "new_src"); return -1; } if (itr->src_count > 0) { memcpy(new_src, itr->src, itr->src_count * sizeof(*new_src)); free(itr->src); } itr->src = new_src; itr->src_capacity = capacity; return 0; } /** * Add another source to read iterator. Must be called before actual * iteration start and must not be called after. */ static struct vy_read_src * vy_read_iterator_add_src(struct vy_read_iterator *itr) { if (itr->src_count == itr->src_capacity) { if (vy_read_iterator_reserve(itr, itr->src_count + 1) != 0) return NULL; } itr->src[itr->src_count].front_id = 0; struct vy_read_src *src = &itr->src[itr->src_count++]; memset(src, 0, sizeof(*src)); return src; } /** * Pin all slices open by the read iterator. * Used to make sure no run slice is invalidated by * compaction while we are fetching data from disk. */ static void vy_read_iterator_pin_slices(struct vy_read_iterator *itr) { for (uint32_t i = itr->disk_src; i < itr->src_count; i++) { struct vy_read_src *src = &itr->src[i]; vy_slice_pin(src->run_iterator.slice); } } /** * Unpin all slices open by the read iterator. * See also: vy_read_iterator_pin_slices(). */ static void vy_read_iterator_unpin_slices(struct vy_read_iterator *itr) { for (uint32_t i = itr->disk_src; i < itr->src_count; i++) { struct vy_read_src *src = &itr->src[i]; vy_slice_unpin(src->run_iterator.slice); } } /** * Return true if the current statement is outside the current * range and hence we should move to the next range. * * If we are looking for a match (EQ, REQ) and the search key * doesn't intersect with the current range's boundary, the next * range can't contain statements matching the search criteria * and hence there's no point in iterating to it. */ static bool vy_read_iterator_range_is_done(struct vy_read_iterator *itr) { struct tuple *stmt = itr->curr_stmt; struct vy_range *range = itr->curr_range; struct key_def *cmp_def = itr->index->cmp_def; int dir = iterator_direction(itr->iterator_type); if (dir > 0 && range->end != NULL && (stmt == NULL || vy_tuple_compare_with_key(stmt, range->end, cmp_def) >= 0) && (itr->iterator_type != ITER_EQ || vy_stmt_compare_with_key(itr->key, range->end, cmp_def) >= 0)) return true; if (dir < 0 && range->begin != NULL && (stmt == NULL || vy_tuple_compare_with_key(stmt, range->begin, cmp_def) < 0) && (itr->iterator_type != ITER_REQ || vy_stmt_compare_with_key(itr->key, range->begin, cmp_def) <= 0)) return true; return false; } /** * Compare two tuples from the read iterator perspective. * * Returns: * -1 if statement @a precedes statement @b in the iterator output * 0 if statements @a and @b are at the same position * 1 if statement @a supersedes statement @b * * NULL denotes the statement following the last one. */ static inline int vy_read_iterator_cmp_stmt(struct vy_read_iterator *itr, const struct tuple *a, const struct tuple *b) { if (a == NULL && b != NULL) return 1; if (a != NULL && b == NULL) return -1; if (a == NULL && b == NULL) return 0; return iterator_direction(itr->iterator_type) * vy_tuple_compare(a, b, itr->index->cmp_def); } /** * Return true if the statement matches search criteria * and older sources don't need to be scanned. */ static bool vy_read_iterator_is_exact_match(struct vy_read_iterator *itr, struct tuple *stmt) { struct tuple *key = itr->key; enum iterator_type type = itr->iterator_type; struct key_def *cmp_def = itr->index->cmp_def; /* * If the index is unique and the search key is full, * we can avoid disk accesses on the first iteration * in case the key is found in memory. */ return itr->last_stmt == NULL && stmt != NULL && (type == ITER_EQ || type == ITER_REQ || type == ITER_GE || type == ITER_LE) && tuple_field_count(key) >= cmp_def->part_count && vy_stmt_compare(stmt, key, cmp_def) == 0; } /** * Check if the statement at which the given read source * is positioned precedes the current candidate for the * next key ('curr_stmt') and update the latter if so. * The 'stop' flag is set if the next key is found and * older sources don't need to be evaluated. */ static void vy_read_iterator_evaluate_src(struct vy_read_iterator *itr, struct vy_read_src *src, bool *stop) { uint32_t src_id = src - itr->src; int cmp = vy_read_iterator_cmp_stmt(itr, src->stmt, itr->curr_stmt); if (cmp < 0) { assert(src->stmt != NULL); tuple_ref(src->stmt); if (itr->curr_stmt != NULL) tuple_unref(itr->curr_stmt); itr->curr_stmt = src->stmt; itr->curr_src = src_id; itr->front_id++; } if (cmp <= 0) src->front_id = itr->front_id; itr->skipped_src = MAX(itr->skipped_src, src_id + 1); if (cmp < 0 && vy_read_iterator_is_exact_match(itr, src->stmt)) { itr->skipped_src = src_id + 1; *stop = true; } } /* * Each of the functions from the vy_read_iterator_scan_* family * is used by vy_read_iterator_next_key() to: * * 1. Update the position of a read source, which implies: * * - Starting iteration over the source if it has not been done * yet or restoring the iterator position in case the source * has been modified since the last iteration. * * - Advancing the iterator position to the first statement * following the one returned on the previous iteration. * To avoid an extra tuple comparison, we maintain front_id * for each source: all sources with front_id equal to the * front_id of the read iterator were used on the previous * iteration and hence need to be advanced. * * 2. Update the candidate for the next key ('curr_stmt') if the * statement at which the source is positioned precedes it. * The 'stop' flag is set if older sources do not need to be * scanned (e.g. because a chain was found in the cache). * See also vy_read_iterator_evaluate_src(). */ static void vy_read_iterator_scan_txw(struct vy_read_iterator *itr, bool *stop) { struct vy_read_src *src = &itr->src[itr->txw_src]; struct vy_txw_iterator *src_itr = &src->txw_iterator; if (itr->tx == NULL) return; assert(itr->txw_src < itr->skipped_src); int rc = vy_txw_iterator_restore(src_itr, itr->last_stmt, &src->stmt); if (rc == 0) { if (!src->is_started) { vy_txw_iterator_skip(src_itr, itr->last_stmt, &src->stmt); } else if (src->front_id == itr->prev_front_id) { vy_txw_iterator_next(src_itr, &src->stmt); } src->is_started = true; } vy_read_iterator_evaluate_src(itr, src, stop); } static void vy_read_iterator_scan_cache(struct vy_read_iterator *itr, bool *stop) { bool is_interval = false; struct vy_read_src *src = &itr->src[itr->cache_src]; struct vy_cache_iterator *src_itr = &src->cache_iterator; int rc = vy_cache_iterator_restore(src_itr, itr->last_stmt, &src->stmt, &is_interval); if (rc == 0) { if (!src->is_started || itr->cache_src >= itr->skipped_src) { vy_cache_iterator_skip(src_itr, itr->last_stmt, &src->stmt, &is_interval); } else if (src->front_id == itr->prev_front_id) { vy_cache_iterator_next(src_itr, &src->stmt, &is_interval); } src->is_started = true; } vy_read_iterator_evaluate_src(itr, src, stop); if (is_interval) { itr->skipped_src = itr->cache_src + 1; *stop = true; } } static NODISCARD int vy_read_iterator_scan_mem(struct vy_read_iterator *itr, uint32_t mem_src, bool *stop) { int rc; struct vy_read_src *src = &itr->src[mem_src]; struct vy_mem_iterator *src_itr = &src->mem_iterator; assert(mem_src >= itr->mem_src && mem_src < itr->disk_src); rc = vy_mem_iterator_restore(src_itr, itr->last_stmt, &src->stmt); if (rc == 0) { if (!src->is_started || mem_src >= itr->skipped_src) { rc = vy_mem_iterator_skip(src_itr, itr->last_stmt, &src->stmt); } else if (src->front_id == itr->prev_front_id) { rc = vy_mem_iterator_next_key(src_itr, &src->stmt); } src->is_started = true; } if (rc < 0) return -1; vy_read_iterator_evaluate_src(itr, src, stop); return 0; } static NODISCARD int vy_read_iterator_scan_disk(struct vy_read_iterator *itr, uint32_t disk_src, bool *stop) { int rc = 0; struct vy_read_src *src = &itr->src[disk_src]; struct vy_run_iterator *src_itr = &src->run_iterator; assert(disk_src >= itr->disk_src && disk_src < itr->src_count); if (!src->is_started || disk_src >= itr->skipped_src) rc = vy_run_iterator_skip(src_itr, itr->last_stmt, &src->stmt); else if (src->front_id == itr->prev_front_id) rc = vy_run_iterator_next_key(src_itr, &src->stmt); src->is_started = true; if (rc < 0) return -1; vy_read_iterator_evaluate_src(itr, src, stop); return 0; } /** * Restore the position of the active in-memory tree iterator * after a yield caused by a disk read and update 'curr_stmt' * if necessary. */ static NODISCARD int vy_read_iterator_restore_mem(struct vy_read_iterator *itr) { int rc; int cmp; struct vy_read_src *src = &itr->src[itr->mem_src]; rc = vy_mem_iterator_restore(&src->mem_iterator, itr->last_stmt, &src->stmt); if (rc < 0) return -1; /* memory allocation error */ if (rc == 0) return 0; /* nothing changed */ cmp = vy_read_iterator_cmp_stmt(itr, src->stmt, itr->curr_stmt); if (cmp > 0) { /* * Memory trees are append-only so if the * source is not on top of the heap after * restoration, it was not before. */ assert(src->front_id < itr->front_id); return 0; } if (cmp < 0 || itr->curr_src != itr->txw_src) { /* * The new statement precedes the current * candidate for the next key or it is a * newer version of the same key. */ tuple_ref(src->stmt); if (itr->curr_stmt != NULL) tuple_unref(itr->curr_stmt); itr->curr_stmt = src->stmt; itr->curr_src = itr->mem_src; } else { /* * Make sure we don't read the old value * from the cache while applying UPSERTs. */ itr->src[itr->cache_src].front_id = 0; } if (cmp < 0) itr->front_id++; src->front_id = itr->front_id; return 0; } static void vy_read_iterator_restore(struct vy_read_iterator *itr); static void vy_read_iterator_next_range(struct vy_read_iterator *itr); static int vy_read_iterator_track_read(struct vy_read_iterator *itr, struct tuple *stmt); /** * Iterate to the next key * @retval 0 success or EOF (*ret == NULL) * @retval -1 read error */ static NODISCARD int vy_read_iterator_next_key(struct vy_read_iterator *itr, struct tuple **ret) { uint32_t i; bool stop = false; if (itr->last_stmt != NULL && (itr->iterator_type == ITER_EQ || itr->iterator_type == ITER_REQ) && tuple_field_count(itr->key) >= itr->index->cmp_def->part_count) { /* * There may be one statement at max satisfying * EQ with a full key. */ *ret = NULL; return 0; } /* * Restore the iterator position if the index has changed * since the last iteration. */ if (itr->mem_list_version != itr->index->mem_list_version || itr->range_tree_version != itr->index->range_tree_version || itr->range_version != itr->curr_range->version) { vy_read_iterator_restore(itr); } restart: if (itr->curr_stmt != NULL) tuple_unref(itr->curr_stmt); itr->curr_stmt = NULL; itr->curr_src = UINT32_MAX; itr->prev_front_id = itr->front_id; /* * Look up the next key in read sources starting * from the one that stores newest data. */ vy_read_iterator_scan_txw(itr, &stop); if (stop) goto done; vy_read_iterator_scan_cache(itr, &stop); if (stop) goto done; for (i = itr->mem_src; i < itr->disk_src; i++) { if (vy_read_iterator_scan_mem(itr, i, &stop) != 0) return -1; if (stop) goto done; } rescan_disk: /* The following code may yield as it needs to access disk. */ vy_read_iterator_pin_slices(itr); for (i = itr->disk_src; i < itr->src_count; i++) { if (vy_read_iterator_scan_disk(itr, i, &stop) != 0) goto err_disk; if (stop) break; } vy_read_iterator_unpin_slices(itr); /* * The list of in-memory indexes and/or the range tree could * have been modified by dump/compaction while we were fetching * data from disk. Restart the iterator if this is the case. * Note, we don't need to check the current range's version, * because all slices were pinned and hence could not be * removed. */ if (itr->mem_list_version != itr->index->mem_list_version || itr->range_tree_version != itr->index->range_tree_version) { vy_read_iterator_restore(itr); goto restart; } /* * The transaction write set couldn't change during the yield * as it is owned exclusively by the current fiber so the only * source to check is the active in-memory tree. */ if (vy_read_iterator_restore_mem(itr) != 0) return -1; /* * Scan the next range in case we transgressed the current * range's boundaries. */ if (vy_read_iterator_range_is_done(itr)) { vy_read_iterator_next_range(itr); goto rescan_disk; } done: if (itr->last_stmt != NULL && itr->curr_stmt != NULL) assert(vy_read_iterator_cmp_stmt(itr, itr->curr_stmt, itr->last_stmt) > 0); if (itr->need_check_eq && itr->curr_stmt != NULL && vy_stmt_compare(itr->curr_stmt, itr->key, itr->index->cmp_def) != 0) itr->curr_stmt = NULL; if (vy_read_iterator_track_read(itr, itr->curr_stmt) != 0) return -1; *ret = itr->curr_stmt; return 0; err_disk: vy_read_iterator_unpin_slices(itr); return -1; } /** * Iterate to the next (elder) version of the same key * @retval 0 success or EOF (*ret == NULL) * @retval -1 read error */ static NODISCARD int vy_read_iterator_next_lsn(struct vy_read_iterator *itr, struct tuple **ret) { uint32_t i; bool unused; struct vy_read_src *src; assert(itr->curr_stmt != NULL); assert(itr->curr_src < itr->skipped_src); /* Cache stores only terminal statements. */ assert(itr->curr_src != itr->cache_src); if (itr->curr_src == itr->txw_src) { /* * Write set does not store statement history. * Look up the older statement in the cache and * if it isn't there proceed to mems and runs. */ src = &itr->src[itr->cache_src]; if (itr->cache_src >= itr->skipped_src) vy_read_iterator_scan_cache(itr, &unused); if (src->front_id == itr->front_id) goto found; } /* Look up the older statement in in-memory trees. */ for (i = MAX(itr->curr_src, itr->mem_src); i < itr->disk_src; i++) { src = &itr->src[i]; if (i >= itr->skipped_src && vy_read_iterator_scan_mem(itr, i, &unused) != 0) return -1; if (src->front_id != itr->front_id) continue; if (i == itr->curr_src && vy_mem_iterator_next_lsn(&src->mem_iterator, &src->stmt) != 0) return -1; if (src->stmt != NULL) goto found; } /* * Look up the older statement in on-disk runs. * * Note, we don't need to check the index version after the yield * caused by the disk read, because once we've come to this point, * we won't read any source except run slices, which are pinned * and hence cannot be removed during the yield. */ vy_read_iterator_pin_slices(itr); for (i = MAX(itr->curr_src, itr->disk_src); i < itr->src_count; i++) { src = &itr->src[i]; if (i >= itr->skipped_src && vy_read_iterator_scan_disk(itr, i, &unused) != 0) goto err_disk; if (src->front_id != itr->front_id) continue; if (i == itr->curr_src && vy_run_iterator_next_lsn(&src->run_iterator, &src->stmt) != 0) goto err_disk; if (src->stmt != NULL) break; } vy_read_iterator_unpin_slices(itr); if (i < itr->src_count) goto found; /* Searched everywhere, found nothing. */ *ret = NULL; return 0; found: tuple_ref(src->stmt); if (itr->curr_stmt != NULL) tuple_unref(itr->curr_stmt); itr->curr_stmt = src->stmt; itr->curr_src = src - itr->src; *ret = itr->curr_stmt; return 0; err_disk: vy_read_iterator_unpin_slices(itr); return -1; } /** * Squash in a single REPLACE all UPSERTs for the current key. * * @retval 0 success * @retval -1 error */ static NODISCARD int vy_read_iterator_squash_upsert(struct vy_read_iterator *itr, struct tuple **ret) { *ret = NULL; struct vy_index *index = itr->index; struct tuple *t = itr->curr_stmt; /* Upserts enabled only in the primary index. */ assert(vy_stmt_type(t) != IPROTO_UPSERT || index->id == 0); tuple_ref(t); while (vy_stmt_type(t) == IPROTO_UPSERT) { struct tuple *next; int rc = vy_read_iterator_next_lsn(itr, &next); if (rc != 0) { tuple_unref(t); return rc; } struct tuple *applied = vy_apply_upsert(t, next, index->cmp_def, index->mem_format, index->upsert_format, true); index->stat.upsert.applied++; tuple_unref(t); if (applied == NULL) return -1; t = applied; if (next == NULL) break; } *ret = t; return 0; } static void vy_read_iterator_add_tx(struct vy_read_iterator *itr) { assert(itr->tx != NULL); enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ? itr->iterator_type : ITER_LE); struct vy_txw_iterator_stat *stat = &itr->index->stat.txw.iterator; struct vy_read_src *sub_src = vy_read_iterator_add_src(itr); vy_txw_iterator_open(&sub_src->txw_iterator, stat, itr->tx, itr->index, iterator_type, itr->key); } static void vy_read_iterator_add_cache(struct vy_read_iterator *itr) { enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ? itr->iterator_type : ITER_LE); struct vy_read_src *sub_src = vy_read_iterator_add_src(itr); vy_cache_iterator_open(&sub_src->cache_iterator, &itr->index->cache, iterator_type, itr->key, itr->read_view); } static void vy_read_iterator_add_mem(struct vy_read_iterator *itr) { enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ? itr->iterator_type : ITER_LE); struct vy_index *index = itr->index; struct vy_read_src *sub_src; /* Add the active in-memory index. */ assert(index->mem != NULL); sub_src = vy_read_iterator_add_src(itr); vy_mem_iterator_open(&sub_src->mem_iterator, &index->stat.memory.iterator, index->mem, iterator_type, itr->key, itr->read_view); /* Add sealed in-memory indexes. */ struct vy_mem *mem; rlist_foreach_entry(mem, &index->sealed, in_sealed) { sub_src = vy_read_iterator_add_src(itr); vy_mem_iterator_open(&sub_src->mem_iterator, &index->stat.memory.iterator, mem, iterator_type, itr->key, itr->read_view); } } static void vy_read_iterator_add_disk(struct vy_read_iterator *itr) { assert(itr->curr_range != NULL); enum iterator_type iterator_type = (itr->iterator_type != ITER_REQ ? itr->iterator_type : ITER_LE); struct vy_index *index = itr->index; struct vy_slice *slice; /* * The format of the statement must be exactly the space * format with the same identifier to fully match the * format in vy_mem. */ rlist_foreach_entry(slice, &itr->curr_range->slices, in_range) { /* * vy_task_dump_complete() may yield after adding * a new run slice to a range and before removing * dumped in-memory trees. We must not add both * the slice and the trees in this case, because * the read iterator can't deal with duplicates. * Since index->dump_lsn is bumped after deletion * of dumped in-memory trees, we can filter out * the run slice containing duplicates by LSN. */ if (slice->run->info.min_lsn > index->dump_lsn) continue; assert(slice->run->info.max_lsn <= index->dump_lsn); struct vy_read_src *sub_src = vy_read_iterator_add_src(itr); vy_run_iterator_open(&sub_src->run_iterator, &index->stat.disk.iterator, slice, iterator_type, itr->key, itr->read_view, index->cmp_def, index->key_def, index->disk_format, index->upsert_format, index->id == 0); } } /** * Close all open sources and reset the merge state. */ static void vy_read_iterator_cleanup(struct vy_read_iterator *itr) { uint32_t i; struct vy_read_src *src; if (itr->txw_src < itr->src_count) { src = &itr->src[itr->txw_src]; vy_txw_iterator_close(&src->txw_iterator); } if (itr->cache_src < itr->src_count) { src = &itr->src[itr->cache_src]; vy_cache_iterator_close(&src->cache_iterator); } for (i = itr->mem_src; i < itr->disk_src; i++) { src = &itr->src[i]; vy_mem_iterator_close(&src->mem_iterator); } for (i = itr->disk_src; i < itr->src_count; i++) { src = &itr->src[i]; vy_run_iterator_close(&src->run_iterator); } if (itr->curr_stmt != NULL) tuple_unref(itr->curr_stmt); itr->curr_stmt = NULL; itr->curr_src = UINT32_MAX; itr->txw_src = UINT32_MAX; itr->cache_src = UINT32_MAX; itr->mem_src = UINT32_MAX; itr->disk_src = UINT32_MAX; itr->skipped_src = UINT32_MAX; itr->src_count = 0; } void vy_read_iterator_open(struct vy_read_iterator *itr, struct vy_index *index, struct vy_tx *tx, enum iterator_type iterator_type, struct tuple *key, const struct vy_read_view **rv) { memset(itr, 0, sizeof(*itr)); itr->index = index; itr->tx = tx; itr->iterator_type = iterator_type; itr->key = key; itr->read_view = rv; if (tuple_field_count(key) == 0) { /* * Strictly speaking, a GT/LT iterator should return * nothing if the key is empty, because every key is * equal to the empty key, but historically we return * all keys instead. So use GE/LE instead of GT/LT * in this case. */ itr->iterator_type = iterator_direction(iterator_type) > 0 ? ITER_GE : ITER_LE; } if (iterator_type == ITER_ALL) itr->iterator_type = ITER_GE; if (iterator_type == ITER_REQ) { /* * Source iterators cannot handle ITER_REQ and * use ITER_LE instead, so we need to enable EQ * check in this case. * * See vy_read_iterator_add_{tx,cache,mem,run}. */ itr->need_check_eq = true; } } /** * Restart the read iterator from the position following * the last statement returned to the user. Called when * the current range or the whole range tree is changed. * Also used for preparing the iterator for the first * iteration. */ static void vy_read_iterator_restore(struct vy_read_iterator *itr) { vy_read_iterator_cleanup(itr); itr->mem_list_version = itr->index->mem_list_version; itr->range_tree_version = itr->index->range_tree_version; itr->curr_range = vy_range_tree_find_by_key(itr->index->tree, itr->iterator_type, itr->last_stmt ?: itr->key); itr->range_version = itr->curr_range->version; if (itr->tx != NULL) { itr->txw_src = itr->src_count; vy_read_iterator_add_tx(itr); } itr->cache_src = itr->src_count; vy_read_iterator_add_cache(itr); itr->mem_src = itr->src_count; vy_read_iterator_add_mem(itr); itr->disk_src = itr->src_count; vy_read_iterator_add_disk(itr); } /** * Iterate to the next range. */ static void vy_read_iterator_next_range(struct vy_read_iterator *itr) { struct vy_range *range = itr->curr_range; struct key_def *cmp_def = itr->index->cmp_def; int dir = iterator_direction(itr->iterator_type); assert(range != NULL); while (true) { range = dir > 0 ? vy_range_tree_next(itr->index->tree, range) : vy_range_tree_prev(itr->index->tree, range); assert(range != NULL); if (itr->last_stmt == NULL) break; /* * We could skip an entire range due to the cache. * Make sure the next statement falls in the range. */ if (dir > 0 && (range->end == NULL || vy_tuple_compare_with_key(itr->last_stmt, range->end, cmp_def) < 0)) break; if (dir < 0 && (range->begin == NULL || vy_tuple_compare_with_key(itr->last_stmt, range->begin, cmp_def) > 0)) break; } itr->curr_range = range; itr->range_version = range->version; for (uint32_t i = itr->disk_src; i < itr->src_count; i++) { struct vy_read_src *src = &itr->src[i]; vy_run_iterator_close(&src->run_iterator); } itr->src_count = itr->disk_src; vy_read_iterator_add_disk(itr); } /** * Track a read in the conflict manager. */ static int vy_read_iterator_track_read(struct vy_read_iterator *itr, struct tuple *stmt) { if (itr->tx == NULL) return 0; if (stmt == NULL) { stmt = (itr->iterator_type == ITER_EQ || itr->iterator_type == ITER_REQ ? itr->key : itr->index->env->empty_key); } int rc; if (iterator_direction(itr->iterator_type) >= 0) { rc = vy_tx_track(itr->tx, itr->index, itr->key, itr->iterator_type != ITER_GT, stmt, true); } else { rc = vy_tx_track(itr->tx, itr->index, stmt, true, itr->key, itr->iterator_type != ITER_LT); } return rc; } NODISCARD int vy_read_iterator_next(struct vy_read_iterator *itr, struct tuple **result) { ev_tstamp start_time = ev_monotonic_now(loop()); *result = NULL; if (!itr->search_started) { itr->search_started = true; itr->index->stat.lookup++; vy_read_iterator_restore(itr); } struct tuple *prev_key = itr->last_stmt; if (prev_key != NULL) tuple_ref(prev_key); bool skipped_txw_delete = false; struct tuple *t = NULL; struct vy_index *index = itr->index; int rc = 0; while (true) { rc = vy_read_iterator_next_key(itr, &t); if (rc != 0) goto clear; if (t == NULL) { if (itr->last_stmt != NULL) tuple_unref(itr->last_stmt); itr->last_stmt = NULL; rc = 0; /* No more data. */ break; } rc = vy_read_iterator_squash_upsert(itr, &t); if (rc != 0) goto clear; if (itr->last_stmt != NULL) tuple_unref(itr->last_stmt); itr->last_stmt = t; if (vy_stmt_type(t) == IPROTO_INSERT || vy_stmt_type(t) == IPROTO_REPLACE) break; assert(vy_stmt_type(t) == IPROTO_DELETE); if (vy_stmt_lsn(t) == INT64_MAX) /* t is from write set */ skipped_txw_delete = true; } *result = itr->last_stmt; assert(*result == NULL || vy_stmt_type(*result) == IPROTO_INSERT || vy_stmt_type(*result) == IPROTO_REPLACE); if (*result != NULL) vy_stmt_counter_acct_tuple(&index->stat.get, *result); #ifndef NDEBUG /* Check constraints. */ int dir = iterator_direction(itr->iterator_type); /* * Each result statement with iterator type GE/GT must * be >= iterator key. And with LT/LE must * be <= iterator_key. @sa gh-2614. */ if (itr->last_stmt != NULL && tuple_field_count(itr->key) > 0) { int cmp = dir * vy_stmt_compare(*result, itr->key, itr->index->cmp_def); assert(cmp >= 0); } /* * Ensure the read iterator does not return duplicates * and respects statements order (index->cmp_def includes * primary parts, so prev_key != itr->last_stmt for any * index). */ if (prev_key != NULL && itr->last_stmt != NULL) { assert(dir * vy_tuple_compare(prev_key, itr->last_stmt, index->cmp_def) < 0); } #endif /** * Add a statement to the cache */ if ((**itr->read_view).vlsn == INT64_MAX) { /* Do not store non-latest data */ struct tuple *cache_prev = prev_key; if (skipped_txw_delete) { /* * If we skipped DELETE that was read from TX write * set, there is a chance that the database actually * has the deleted key and we must not consider * previous+current tuple as an unbroken chain. */ cache_prev = NULL; } vy_cache_add(&itr->index->cache, *result, cache_prev, itr->key, itr->iterator_type); } clear: if (prev_key != NULL) tuple_unref(prev_key); ev_tstamp latency = ev_monotonic_now(loop()) - start_time; latency_collect(&index->stat.latency, latency); if (latency > index->env->too_long_threshold) { say_warn("%s: select(%s, %s) => %s took too long: %.3f sec", vy_index_name(index), tuple_str(itr->key), iterator_type_strs[itr->iterator_type], vy_stmt_str(itr->last_stmt), latency); } return rc; } /** * Close the iterator and free resources */ void vy_read_iterator_close(struct vy_read_iterator *itr) { if (itr->last_stmt != NULL) tuple_unref(itr->last_stmt); vy_read_iterator_cleanup(itr); free(itr->src); TRASH(itr); } tarantool_1.9.1.26.g63eb81e3c/src/box/request.c0000664000000000000000000001350713306560010017421 0ustar rootroot/* * Copyright 2010-2018, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "request.h" #include #include #include #include #include #include "fiber.h" #include "space.h" #include "index.h" #include "sequence.h" #include "key_def.h" #include "tuple.h" #include "xrow.h" #include "iproto_constants.h" int request_create_from_tuple(struct request *request, struct space *space, struct tuple *old_tuple, struct tuple *new_tuple) { memset(request, 0, sizeof(*request)); request->space_id = space->def->id; if (old_tuple == new_tuple) { /* * Old and new tuples are the same, * turn this request into no-op. */ request->type = IPROTO_NOP; return 0; } if (new_tuple == NULL) { uint32_t size, key_size; const char *data = tuple_data_range(old_tuple, &size); request->key = tuple_extract_key_raw(data, data + size, space->index[0]->def->key_def, &key_size); if (request->key == NULL) return -1; request->key_end = request->key + key_size; request->type = IPROTO_DELETE; } else { uint32_t size; const char *data = tuple_data_range(new_tuple, &size); /* * We have to copy the tuple data to region, because * the tuple is allocated on runtime arena and not * referenced and hence may be freed before the * current transaction ends while we need to write * the tuple data to WAL on commit. */ char *buf = region_alloc(&fiber()->gc, size); if (buf == NULL) return -1; memcpy(buf, data, size); request->tuple = buf; request->tuple_end = buf + size; request->type = IPROTO_REPLACE; } return 0; } void request_rebind_to_primary_key(struct request *request, struct space *space, struct tuple *found_tuple) { struct index *pk = space_index(space, 0); assert(pk != NULL); uint32_t key_len; char *key = tuple_extract_key(found_tuple, pk->def->key_def, &key_len); assert(key != NULL); request->key = key; request->key_end = key + key_len; request->index_id = 0; /* Clear the *body* to ensure it's rebuilt at commit. */ request->header = NULL; } int request_handle_sequence(struct request *request, struct space *space) { struct sequence *seq = space->sequence; assert(seq != NULL); assert(request->type == IPROTO_INSERT || request->type == IPROTO_REPLACE); /* * An automatically generated sequence inherits * privileges of the space it is used with. */ if (!seq->is_generated && access_check_sequence(seq) != 0) return -1; struct index *pk = space_index(space, 0); if (unlikely(pk == NULL)) return 0; /* * Look up the first field of the primary key. */ const char *data = request->tuple; const char *data_end = request->tuple_end; int len = mp_decode_array(&data); int fieldno = pk->def->key_def->parts[0].fieldno; if (unlikely(len < fieldno + 1)) return 0; const char *key = data; if (unlikely(fieldno > 0)) { do { mp_next(&key); } while (--fieldno > 0); } int64_t value; if (mp_typeof(*key) == MP_NIL) { /* * If the first field of the primary key is nil, * this is an auto increment request and we need * to replace the nil with the next value generated * by the space sequence. */ if (unlikely(sequence_next(seq, &value) != 0)) return -1; const char *key_end = key; mp_decode_nil(&key_end); size_t buf_size = (request->tuple_end - request->tuple) + mp_sizeof_uint(UINT64_MAX); char *tuple = region_alloc(&fiber()->gc, buf_size); if (tuple == NULL) return -1; char *tuple_end = mp_encode_array(tuple, len); if (unlikely(key != data)) { memcpy(tuple_end, data, key - data); tuple_end += key - data; } if (value >= 0) tuple_end = mp_encode_uint(tuple_end, value); else tuple_end = mp_encode_int(tuple_end, value); memcpy(tuple_end, key_end, data_end - key_end); tuple_end += data_end - key_end; assert(tuple_end <= tuple + buf_size); request->tuple = tuple; request->tuple_end = tuple_end; } else { /* * If the first field is not nil, update the space * sequence with its value, to make sure that an * auto increment request never tries to insert a * value that is already in the space. Note, this * code is also invoked on final recovery to restore * the sequence value from WAL. */ if (likely(mp_read_int64(&key, &value) == 0)) return sequence_update(seq, value); } /* * As the request body was changed, we have to update body in header. */ struct xrow_header *row = request->header; if (row != NULL) { row->bodycnt = xrow_encode_dml(request, row->body); if (row->bodycnt < 0) return -1; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/gc.c0000664000000000000000000001676013306560010016326 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "gc.h" #include #include #include #include #define RB_COMPACT 1 #include #include #include "diag.h" #include "say.h" #include "latch.h" #include "vclock.h" #include "checkpoint.h" #include "engine.h" /* engine_collect_garbage() */ #include "wal.h" /* wal_collect_garbage() */ typedef rb_node(struct gc_consumer) gc_node_t; /** * The object of this type is used to prevent garbage * collection from removing files that are still in use. */ struct gc_consumer { /** Link in gc_state::consumers. */ gc_node_t node; /** Human-readable name. */ char *name; /** The vclock signature tracked by this consumer. */ int64_t signature; }; typedef rb_tree(struct gc_consumer) gc_tree_t; /** Garbage collection state. */ struct gc_state { /** Number of checkpoints to maintain. */ int checkpoint_count; /** Max signature garbage collection has been called for. */ int64_t signature; /** Registered consumers, linked by gc_consumer::node. */ gc_tree_t consumers; /** * Latch serializing concurrent invocations of engine * garbage collection callbacks. */ struct latch latch; }; static struct gc_state gc; /** * Comparator used for ordering gc_consumer objects by signature * in a binary tree. */ static inline int gc_consumer_cmp(const struct gc_consumer *a, const struct gc_consumer *b) { if (a->signature < b->signature) return -1; if (a->signature > b->signature) return 1; if ((intptr_t)a < (intptr_t)b) return -1; if ((intptr_t)a > (intptr_t)b) return 1; return 0; } rb_gen(MAYBE_UNUSED static inline, gc_tree_, gc_tree_t, struct gc_consumer, node, gc_consumer_cmp); /** Allocate a consumer object. */ static struct gc_consumer * gc_consumer_new(const char *name, int64_t signature) { struct gc_consumer *consumer = calloc(1, sizeof(*consumer)); if (consumer == NULL) { diag_set(OutOfMemory, sizeof(*consumer), "malloc", "struct gc_consumer"); return NULL; } consumer->name = strdup(name); if (consumer->name == NULL) { diag_set(OutOfMemory, strlen(name) + 1, "malloc", "struct gc_consumer"); free(consumer); return NULL; } consumer->signature = signature; return consumer; } /** Free a consumer object. */ static void gc_consumer_delete(struct gc_consumer *consumer) { free(consumer->name); TRASH(consumer); free(consumer); } void gc_init(void) { gc.signature = -1; gc_tree_new(&gc.consumers); latch_create(&gc.latch); } void gc_free(void) { /* Free all registered consumers. */ struct gc_consumer *consumer = gc_tree_first(&gc.consumers); while (consumer != NULL) { struct gc_consumer *next = gc_tree_next(&gc.consumers, consumer); gc_tree_remove(&gc.consumers, consumer); gc_consumer_delete(consumer); consumer = next; } latch_destroy(&gc.latch); } void gc_run(void) { int checkpoint_count = gc.checkpoint_count; assert(checkpoint_count > 0); /* Look up the consumer that uses the oldest snapshot. */ struct gc_consumer *leftmost = gc_tree_first(&gc.consumers); /* * Find the oldest checkpoint that must be preserved. * We have to maintain @checkpoint_count oldest snapshots, * plus we can't remove snapshots that are still in use. */ int64_t gc_signature = -1; struct checkpoint_iterator checkpoints; checkpoint_iterator_init(&checkpoints); const struct vclock *vclock; while ((vclock = checkpoint_iterator_prev(&checkpoints)) != NULL) { if (--checkpoint_count > 0) continue; if (leftmost != NULL && leftmost->signature < vclock_sum(vclock)) continue; gc_signature = vclock_sum(vclock); break; } if (gc_signature <= gc.signature) return; /* nothing to do */ gc.signature = gc_signature; /* * Engine callbacks may sleep, because they use coio for * removing files. Make sure we won't try to remove the * same file multiple times by serializing concurrent gc * executions. */ latch_lock(&gc.latch); /* * Run garbage collection. * * The order is important here: we must invoke garbage * collection for memtx snapshots first and abort if it * fails - see comment to memtx_engine_collect_garbage(). */ if (engine_collect_garbage(gc_signature) == 0) wal_collect_garbage(gc_signature); latch_unlock(&gc.latch); } void gc_set_checkpoint_count(int checkpoint_count) { gc.checkpoint_count = checkpoint_count; } struct gc_consumer * gc_consumer_register(const char *name, int64_t signature) { struct gc_consumer *consumer = gc_consumer_new(name, signature); if (consumer != NULL) gc_tree_insert(&gc.consumers, consumer); return consumer; } void gc_consumer_unregister(struct gc_consumer *consumer) { int64_t signature = consumer->signature; gc_tree_remove(&gc.consumers, consumer); gc_consumer_delete(consumer); /* * Rerun garbage collection after removing the consumer * if it referenced the oldest vclock. */ struct gc_consumer *leftmost = gc_tree_first(&gc.consumers); if (leftmost == NULL || leftmost->signature > signature) gc_run(); } void gc_consumer_advance(struct gc_consumer *consumer, int64_t signature) { int64_t prev_signature = consumer->signature; assert(signature >= prev_signature); if (signature == prev_signature) return; /* nothing to do */ /* * Do not update the tree unless the tree invariant * is violated. */ struct gc_consumer *next = gc_tree_next(&gc.consumers, consumer); bool update_tree = (next != NULL && signature >= next->signature); if (update_tree) gc_tree_remove(&gc.consumers, consumer); consumer->signature = signature; if (update_tree) gc_tree_insert(&gc.consumers, consumer); /* * Rerun garbage collection after advancing the consumer * if it referenced the oldest vclock. */ struct gc_consumer *leftmost = gc_tree_first(&gc.consumers); if (leftmost == NULL || leftmost->signature > prev_signature) gc_run(); } const char * gc_consumer_name(const struct gc_consumer *consumer) { return consumer->name; } int64_t gc_consumer_signature(const struct gc_consumer *consumer) { return consumer->signature; } struct gc_consumer * gc_consumer_iterator_next(struct gc_consumer_iterator *it) { if (it->curr != NULL) it->curr = gc_tree_next(&gc.consumers, it->curr); else it->curr = gc_tree_first(&gc.consumers); return it->curr; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_point_lookup.h0000664000000000000000000000630213306565107021205 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_POINT_LOOKUP_H #define INCLUDES_TARANTOOL_BOX_VY_POINT_LOOKUP_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * Point lookup is a special case of read iterator that is designed for * retrieving one value from index by a full key (all parts are present). * * Iterator collects necessary history of the given key from different sources * (txw, cache, mems, runs) that consists of some number of sequential upserts * and possibly one terminal statement (replace or delete). The iterator * sequentially scans txw, cache, mems and runs until a terminal statement is * met. After reading the slices the iterator checks that the list of mems * hasn't been changed and restarts if it is the case. * After the history is collected the iterator calculates resultant statement * and, if the result is the latest version of the key, adds it to cache. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct vy_index; struct vy_tx; struct vy_read_view; struct tuple; /** * Given a key that has all index parts (including primary index * parts in case of a secondary index), lookup the corresponding * tuple in the index. The tuple is returned in @ret with its * reference counter elevated. * * The caller must guarantee that if the tuple looked up by this * function is modified, the transaction will be sent to read view. * This is needed to avoid inserting a stale value into the cache. * In other words, vy_tx_track() must be called for the search key * before calling this function unless this is a primary index and * the tuple is already tracked in a secondary index. */ int vy_point_lookup(struct vy_index *index, struct vy_tx *tx, const struct vy_read_view **rv, struct tuple *key, struct tuple **ret); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_POINT_LOOKUP_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/CMakeLists.txt0000664000000000000000000000560013306565107020334 0ustar rootrootfile(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/src/box/lua) set(lua_sources) lua_source(lua_sources lua/load_cfg.lua) lua_source(lua_sources lua/schema.lua) lua_source(lua_sources lua/tuple.lua) lua_source(lua_sources lua/session.lua) lua_source(lua_sources lua/checkpoint_daemon.lua) lua_source(lua_sources lua/net_box.lua) lua_source(lua_sources lua/upgrade.lua) lua_source(lua_sources lua/console.lua) lua_source(lua_sources lua/xlog.lua) set(bin_sources) bin_source(bin_sources bootstrap.snap bootstrap.h) add_custom_target(box_generate_lua_sources WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/src/box DEPENDS ${lua_sources}) set_property(DIRECTORY PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ${lua_sources}) include_directories(${ZSTD_INCLUDE_DIRS}) add_library(box_error STATIC error.cc errcode.c) target_link_libraries(box_error core stat) add_library(vclock STATIC vclock.c) target_link_libraries(vclock core) add_library(xrow STATIC xrow.c iproto_constants.c) target_link_libraries(xrow server core small vclock misc box_error scramble ${MSGPUCK_LIBRARIES}) add_library(tuple STATIC tuple.c tuple_format.c tuple_update.c tuple_compare.cc tuple_extract_key.cc tuple_hash.cc tuple_dictionary.c key_def.cc coll_def.c coll.c coll_cache.c field_def.c opt_def.c ) target_link_libraries(tuple box_error core ${MSGPUCK_LIBRARIES} ${ICU_LIBRARIES} misc bit) add_library(xlog STATIC xlog.c) target_link_libraries(xlog core box_error crc32 ${ZSTD_LIBRARIES}) add_library(box STATIC iproto.cc error.cc xrow_io.cc tuple_convert.c identifier.c index.cc index_def.c iterator_type.c memtx_hash.c memtx_tree.c memtx_rtree.c memtx_bitset.c engine.c memtx_engine.c memtx_space.c memtx_tuple.cc sysview_engine.c sysview_index.c vinyl.c vy_stmt.c vy_mem.c vy_run.c vy_range.c vy_index.c vy_tx.c vy_write_iterator.c vy_read_iterator.c vy_point_lookup.c vy_cache.c vy_log.c vy_upsert.c vy_read_set.c vy_scheduler.c request.c space.c space_def.c sequence.c func.c func_def.c alter.cc schema.cc schema_def.c session.cc port.c txn.c box.cc gc.c checkpoint.cc user_def.c user.cc authentication.cc replication.cc recovery.cc xstream.cc applier.cc relay.cc journal.c wal.cc call.c ${lua_sources} lua/init.c lua/call.c lua/cfg.cc lua/console.c lua/tuple.c lua/slab.c lua/index.c lua/space.cc lua/sequence.c lua/misc.cc lua/info.c lua/stat.c lua/ctl.c lua/error.cc lua/session.c lua/net_box.c lua/xlog.c ${bin_sources}) target_link_libraries(box box_error tuple stat xrow xlog vclock crc32 scramble ${common_libraries}) add_dependencies(box build_bundled_libs) tarantool_1.9.1.26.g63eb81e3c/src/box/info.h0000664000000000000000000001325313306560010016667 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_INFO_H #define INCLUDES_TARANTOOL_BOX_INFO_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /** * @file * This module provides an adapter for Lua/C API to generate box.info() * and index:info() introspection trees. The primary purpose of this * adapter is to eliminate Engine <-> Lua interdependency. * * TREE STRUCTURE * * { -- info_begin * section = { -- info_begin_table * key1 = int; -- info_append_int * key2 = double; -- info_append_double * key3 = str; -- info_append_str * }; -- info_end_table * * section2 = { * ... * }; * ... * } -- info_end * * * IMPLEMENTATION DETAILS * * Current implementation calls Lua/C API under the hood without any * pcall() wrapping. As you may now, idiosyncratic Lua/C API unwinds * C stacks on errors in a way you can't handle in C. Please ensure that * all blocks of code which call info_append_XXX() functions are * exception/longjmp safe. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Virtual method table for struct info_handler. */ struct info_handler_vtab { /** The begin of document. */ void (*begin)(struct info_handler *); /** The end of document. */ void (*end)(struct info_handler *); /** The begin of associative array (a map). */ void (*begin_table)(struct info_handler *, const char *key); /** The end of associative array (a map). */ void (*end_table)(struct info_handler *); /** Set string value. */ void (*append_str)(struct info_handler *, const char *key, const char *value); /** Set int64_t value. */ void (*append_int)(struct info_handler *, const char *key, int64_t value); /** Set double value. */ void (*append_double)(struct info_handler *, const char *key, double value); }; /** * Adapter for Lua/C API to generate box.info() sections from engines. */ struct info_handler { struct info_handler_vtab *vtab; /** Context for this callback. */ void *ctx; }; /** * Starts a new document and creates root-level associative array. * @param info box.info() adapter. * @throws C++ exception on OOM, see info.h comments. * @pre must be called once before any other functions. */ static inline void info_begin(struct info_handler *info) { return info->vtab->begin(info); } /** * Finishes the document and closes root-level associative array. * @param info box.info() adapter. * @throws C++ exception on OOM, see info.h comments. * @pre must be called at the end. */ static inline void info_end(struct info_handler *info) { return info->vtab->end(info); } /** * Associates int64_t value with @a key in the current associative array. * @param info box.info() adapter. * @param key key. * @param value value. * @throws C++ exception on OOM, see info.h comments. * @pre associative array is started. */ static inline void info_append_int(struct info_handler *info, const char *key, int64_t value) { return info->vtab->append_int(info, key, value); } /** * Associates zero-terminated string with @a key in the current associative * array. * @param info box.info() adapter. * @param key key. * @param value value. * @throws C++ exception on OOM, see info.h comments. */ static inline void info_append_str(struct info_handler *info, const char *key, const char *value) { return info->vtab->append_str(info, key, value); } /** * Associates double value with @a key in the current associative * array. * @param info box.info() adapter. * @param key key. * @param value value. * @throws C++ exception on OOM, see info.h comments. */ static inline void info_append_double(struct info_handler *info, const char *key, double value) { return info->vtab->append_double(info, key, value); } /* * Associates a new associative array with @a key. * @param info box.info() adapter. * @param key key. * @throws C++ exception on OOM, see info.h comments. */ static inline void info_table_begin(struct info_handler *info, const char *key) { return info->vtab->begin_table(info, key); } /* * Finishes the current active associative array. * @param info box.info() adapter * @throws C++ exception on OOM, see info.h comments. */ static inline void info_table_end(struct info_handler *info) { return info->vtab->end_table(info); } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_INFO_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/field_def.c0000664000000000000000000001006413306565107017641 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "field_def.h" #include "trivia/util.h" const char *field_type_strs[] = { /* [FIELD_TYPE_ANY] = */ "any", /* [FIELD_TYPE_UNSIGNED] = */ "unsigned", /* [FIELD_TYPE_STRING] = */ "string", /* [FIELD_TYPE_NUMBER] = */ "number", /* [FIELD_TYPE_INTEGER] = */ "integer", /* [FIELD_TYPE_BOOLEAN] = */ "boolean", /* [FIELD_TYPE_SCALAR] = */ "scalar", /* [FIELD_TYPE_ARRAY] = */ "array", /* [FIELD_TYPE_MAP] = */ "map", }; static int64_t field_type_by_name_wrapper(const char *str, uint32_t len) { return field_type_by_name(str, len); } /** * Table of a field types compatibility. * For an i row and j column the value is true, if the i type * values can be stored in the j type. */ static const bool field_type_compatibility[] = { /* ANY UNSIGNED STRING NUMBER INTEGER BOOLEAN SCALAR ARRAY MAP */ /* ANY */ true, false, false, false, false, false, false, false, false, /* UNSIGNED */ true, true, false, true, true, false, true, false, false, /* STRING */ true, false, true, false, false, false, true, false, false, /* NUMBER */ true, false, false, true, false, false, true, false, false, /* INTEGER */ true, false, false, true, true, false, true, false, false, /* BOOLEAN */ true, false, false, false, false, true, true, false, false, /* SCALAR */ true, false, false, false, false, false, true, false, false, /* ARRAY */ true, false, false, false, false, false, false, true, false, /* MAP */ true, false, false, false, false, false, false, false, true, }; bool field_type1_contains_type2(enum field_type type1, enum field_type type2) { int idx = type2 * field_type_MAX + type1; return field_type_compatibility[idx]; } const struct opt_def field_def_reg[] = { OPT_DEF_ENUM("type", field_type, struct field_def, type, field_type_by_name_wrapper), OPT_DEF("name", OPT_STRPTR, struct field_def, name), OPT_DEF("is_nullable", OPT_BOOL, struct field_def, is_nullable), OPT_END, }; const struct field_def field_def_default = { .type = FIELD_TYPE_ANY, .name = NULL, .is_nullable = false, }; enum field_type field_type_by_name(const char *name, size_t len) { enum field_type field_type = strnindex(field_type_strs, name, len, field_type_MAX); if (field_type != field_type_MAX) return field_type; /* 'num' and 'str' in _index are deprecated since Tarantool 1.7 */ if (strncasecmp(name, "num", len) == 0) return FIELD_TYPE_UNSIGNED; else if (strncasecmp(name, "str", len) == 0) return FIELD_TYPE_STRING; else if (len == 1 && name[0] == '*') return FIELD_TYPE_ANY; return field_type_MAX; } tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_tree.c0000664000000000000000000004531313306565107020116 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "memtx_tree.h" #include "memtx_engine.h" #include "space.h" #include "schema.h" /* space_cache_find() */ #include "errinj.h" #include "memory.h" #include "fiber.h" #include "tuple.h" #include #include /* {{{ Utilities. *************************************************/ static int memtx_tree_qcompare(const void* a, const void *b, void *c) { return tuple_compare(*(struct tuple **)a, *(struct tuple **)b, (struct key_def *)c); } /* {{{ MemtxTree Iterators ****************************************/ struct tree_iterator { struct iterator base; const struct memtx_tree *tree; struct index_def *index_def; struct memtx_tree_iterator tree_iterator; enum iterator_type type; struct memtx_tree_key_data key_data; struct tuple *current_tuple; /** Memory pool the iterator was allocated from. */ struct mempool *pool; }; static void tree_iterator_free(struct iterator *iterator); static inline struct tree_iterator * tree_iterator(struct iterator *it) { assert(it->free == tree_iterator_free); return (struct tree_iterator *) it; } static void tree_iterator_free(struct iterator *iterator) { struct tree_iterator *it = tree_iterator(iterator); if (it->current_tuple != NULL) tuple_unref(it->current_tuple); mempool_free(it->pool, it); } static int tree_iterator_dummie(struct iterator *iterator, struct tuple **ret) { (void)iterator; *ret = NULL; return 0; } static int tree_iterator_next(struct iterator *iterator, struct tuple **ret) { struct tuple **res; struct tree_iterator *it = tree_iterator(iterator); assert(it->current_tuple != NULL); struct tuple **check = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); if (check == NULL || *check != it->current_tuple) it->tree_iterator = memtx_tree_upper_bound_elem(it->tree, it->current_tuple, NULL); else memtx_tree_iterator_next(it->tree, &it->tree_iterator); tuple_unref(it->current_tuple); it->current_tuple = NULL; res = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); if (res == NULL) { iterator->next = tree_iterator_dummie; *ret = NULL; } else { *ret = it->current_tuple = *res; tuple_ref(it->current_tuple); } return 0; } static int tree_iterator_prev(struct iterator *iterator, struct tuple **ret) { struct tree_iterator *it = tree_iterator(iterator); assert(it->current_tuple != NULL); struct tuple **check = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); if (check == NULL || *check != it->current_tuple) it->tree_iterator = memtx_tree_lower_bound_elem(it->tree, it->current_tuple, NULL); memtx_tree_iterator_prev(it->tree, &it->tree_iterator); tuple_unref(it->current_tuple); it->current_tuple = NULL; struct tuple **res = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); if (!res) { iterator->next = tree_iterator_dummie; *ret = NULL; } else { *ret = it->current_tuple = *res; tuple_ref(it->current_tuple); } return 0; } static int tree_iterator_next_equal(struct iterator *iterator, struct tuple **ret) { struct tree_iterator *it = tree_iterator(iterator); assert(it->current_tuple != NULL); struct tuple **check = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); if (check == NULL || *check != it->current_tuple) it->tree_iterator = memtx_tree_upper_bound_elem(it->tree, it->current_tuple, NULL); else memtx_tree_iterator_next(it->tree, &it->tree_iterator); tuple_unref(it->current_tuple); it->current_tuple = NULL; struct tuple **res = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); /* Use user key def to save a few loops. */ if (!res || memtx_tree_compare_key(*res, &it->key_data, it->index_def->key_def) != 0) { iterator->next = tree_iterator_dummie; *ret = NULL; } else { *ret = it->current_tuple = *res; tuple_ref(it->current_tuple); } return 0; } static int tree_iterator_prev_equal(struct iterator *iterator, struct tuple **ret) { struct tree_iterator *it = tree_iterator(iterator); assert(it->current_tuple != NULL); struct tuple **check = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); if (check == NULL || *check != it->current_tuple) it->tree_iterator = memtx_tree_lower_bound_elem(it->tree, it->current_tuple, NULL); memtx_tree_iterator_prev(it->tree, &it->tree_iterator); tuple_unref(it->current_tuple); it->current_tuple = NULL; struct tuple **res = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); /* Use user key def to save a few loops. */ if (!res || memtx_tree_compare_key(*res, &it->key_data, it->index_def->key_def) != 0) { iterator->next = tree_iterator_dummie; *ret = NULL; } else { *ret = it->current_tuple = *res; tuple_ref(it->current_tuple); } return 0; } static void tree_iterator_set_next_method(struct tree_iterator *it) { assert(it->current_tuple != NULL); switch (it->type) { case ITER_EQ: it->base.next = tree_iterator_next_equal; break; case ITER_REQ: it->base.next = tree_iterator_prev_equal; break; case ITER_ALL: it->base.next = tree_iterator_next; break; case ITER_LT: case ITER_LE: it->base.next = tree_iterator_prev; break; case ITER_GE: case ITER_GT: it->base.next = tree_iterator_next; break; default: /* The type was checked in initIterator */ assert(false); } } static int tree_iterator_start(struct iterator *iterator, struct tuple **ret) { *ret = NULL; struct tree_iterator *it = tree_iterator(iterator); it->base.next = tree_iterator_dummie; const struct memtx_tree *tree = it->tree; enum iterator_type type = it->type; bool exact = false; assert(it->current_tuple == NULL); if (it->key_data.key == 0) { if (iterator_type_is_reverse(it->type)) it->tree_iterator = memtx_tree_iterator_last(tree); else it->tree_iterator = memtx_tree_iterator_first(tree); } else { if (type == ITER_ALL || type == ITER_EQ || type == ITER_GE || type == ITER_LT) { it->tree_iterator = memtx_tree_lower_bound(tree, &it->key_data, &exact); if (type == ITER_EQ && !exact) return 0; } else { // ITER_GT, ITER_REQ, ITER_LE it->tree_iterator = memtx_tree_upper_bound(tree, &it->key_data, &exact); if (type == ITER_REQ && !exact) return 0; } if (iterator_type_is_reverse(type)) { /* * Because of limitations of tree search API we use use * lower_bound for LT search and upper_bound for LE * and REQ searches. Thus we found position to the * right of the target one. Let's make a step to the * left to reach target position. * If we found an invalid iterator all the elements in * the tree are less (less or equal) to the key, and * iterator_next call will convert the iterator to the * last position in the tree, that's what we need. */ memtx_tree_iterator_prev(it->tree, &it->tree_iterator); } } struct tuple **res = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); if (!res) return 0; *ret = it->current_tuple = *res; tuple_ref(it->current_tuple); tree_iterator_set_next_method(it); return 0; } /* }}} */ /* {{{ MemtxTree **********************************************************/ /** * Return the key def to use for comparing tuples stored * in the given tree index. * * We use extended key def for non-unique and nullable * indexes. Unique but nullable index can store multiple * NULLs. To correctly compare these NULLs extended key * def must be used. For details @sa tuple_compare.cc. */ static struct key_def * memtx_tree_index_cmp_def(struct memtx_tree_index *index) { struct index_def *def = index->base.def; return def->opts.is_unique && !def->key_def->is_nullable ? def->key_def : def->cmp_def; } static void memtx_tree_index_destroy(struct index *base) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; memtx_tree_destroy(&index->tree); free(index->build_array); free(index); } static void memtx_tree_index_update_def(struct index *base) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; index->tree.arg = memtx_tree_index_cmp_def(index); } static ssize_t memtx_tree_index_size(struct index *base) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; return memtx_tree_size(&index->tree); } static ssize_t memtx_tree_index_bsize(struct index *base) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; return memtx_tree_mem_used(&index->tree); } static int memtx_tree_index_random(struct index *base, uint32_t rnd, struct tuple **result) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; struct tuple **res = memtx_tree_random(&index->tree, rnd); *result = res != NULL ? *res : NULL; return 0; } static ssize_t memtx_tree_index_count(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { if (type == ITER_ALL) return memtx_tree_index_size(base); /* optimization */ return generic_index_count(base, type, key, part_count); } static int memtx_tree_index_get(struct index *base, const char *key, uint32_t part_count, struct tuple **result) { assert(base->def->opts.is_unique && part_count == base->def->key_def->part_count); struct memtx_tree_index *index = (struct memtx_tree_index *)base; struct memtx_tree_key_data key_data; key_data.key = key; key_data.part_count = part_count; struct tuple **res = memtx_tree_find(&index->tree, &key_data); *result = res != NULL ? *res : NULL; return 0; } static int memtx_tree_index_replace(struct index *base, struct tuple *old_tuple, struct tuple *new_tuple, enum dup_replace_mode mode, struct tuple **result) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; if (new_tuple) { struct tuple *dup_tuple = NULL; /* Try to optimistically replace the new_tuple. */ int tree_res = memtx_tree_insert(&index->tree, new_tuple, &dup_tuple); if (tree_res) { diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "memtx_tree_index", "replace"); return -1; } uint32_t errcode = replace_check_dup(old_tuple, dup_tuple, mode); if (errcode) { memtx_tree_delete(&index->tree, new_tuple); if (dup_tuple) memtx_tree_insert(&index->tree, dup_tuple, 0); struct space *sp = space_cache_find(base->def->space_id); if (sp != NULL) diag_set(ClientError, errcode, base->def->name, space_name(sp)); return -1; } if (dup_tuple) { *result = dup_tuple; return 0; } } if (old_tuple) { memtx_tree_delete(&index->tree, old_tuple); } *result = old_tuple; return 0; } static struct iterator * memtx_tree_index_create_iterator(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; struct memtx_engine *memtx = (struct memtx_engine *)base->engine; assert(part_count == 0 || key != NULL); if (type > ITER_GT) { diag_set(UnsupportedIndexFeature, base->def, "requested iterator type"); return NULL; } if (part_count == 0) { /* * If no key is specified, downgrade equality * iterators to a full range. */ type = iterator_type_is_reverse(type) ? ITER_LE : ITER_GE; key = NULL; } struct tree_iterator *it = mempool_alloc(&memtx->tree_iterator_pool); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct tree_iterator), "memtx_tree_index", "iterator"); return NULL; } iterator_create(&it->base, base); it->pool = &memtx->tree_iterator_pool; it->base.next = tree_iterator_start; it->base.free = tree_iterator_free; it->type = type; it->key_data.key = key; it->key_data.part_count = part_count; it->index_def = base->def; it->tree = &index->tree; it->tree_iterator = memtx_tree_invalid_iterator(); it->current_tuple = NULL; return (struct iterator *)it; } static void memtx_tree_index_begin_build(struct index *base) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; assert(memtx_tree_size(&index->tree) == 0); (void)index; } static int memtx_tree_index_reserve(struct index *base, uint32_t size_hint) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; if (size_hint < index->build_array_alloc_size) return 0; struct tuple **tmp = (struct tuple **)realloc(index->build_array, size_hint * sizeof(*tmp)); if (tmp == NULL) { diag_set(OutOfMemory, size_hint * sizeof(*tmp), "memtx_tree_index", "reserve"); return -1; } index->build_array = tmp; index->build_array_alloc_size = size_hint; return 0; } static int memtx_tree_index_build_next(struct index *base, struct tuple *tuple) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; if (index->build_array == NULL) { index->build_array = (struct tuple **)malloc(MEMTX_EXTENT_SIZE); if (index->build_array == NULL) { diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "memtx_tree_index", "build_next"); return -1; } index->build_array_alloc_size = MEMTX_EXTENT_SIZE / sizeof(struct tuple*); } assert(index->build_array_size <= index->build_array_alloc_size); if (index->build_array_size == index->build_array_alloc_size) { index->build_array_alloc_size = index->build_array_alloc_size + index->build_array_alloc_size / 2; struct tuple **tmp = (struct tuple **) realloc(index->build_array, index->build_array_alloc_size * sizeof(*tmp)); if (tmp == NULL) { diag_set(OutOfMemory, index->build_array_alloc_size * sizeof(*tmp), "memtx_tree_index", "build_next"); return -1; } index->build_array = tmp; } index->build_array[index->build_array_size++] = tuple; return 0; } static void memtx_tree_index_end_build(struct index *base) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; struct key_def *cmp_def = memtx_tree_index_cmp_def(index); qsort_arg(index->build_array, index->build_array_size, sizeof(struct tuple *), memtx_tree_qcompare, cmp_def); memtx_tree_build(&index->tree, index->build_array, index->build_array_size); free(index->build_array); index->build_array = NULL; index->build_array_size = 0; index->build_array_alloc_size = 0; } struct tree_snapshot_iterator { struct snapshot_iterator base; struct memtx_tree *tree; struct memtx_tree_iterator tree_iterator; }; static void tree_snapshot_iterator_free(struct snapshot_iterator *iterator) { assert(iterator->free == tree_snapshot_iterator_free); struct tree_snapshot_iterator *it = (struct tree_snapshot_iterator *)iterator; struct memtx_tree *tree = (struct memtx_tree *)it->tree; memtx_tree_iterator_destroy(tree, &it->tree_iterator); free(iterator); } static const char * tree_snapshot_iterator_next(struct snapshot_iterator *iterator, uint32_t *size) { assert(iterator->free == tree_snapshot_iterator_free); struct tree_snapshot_iterator *it = (struct tree_snapshot_iterator *)iterator; struct tuple **res = memtx_tree_iterator_get_elem(it->tree, &it->tree_iterator); if (res == NULL) return NULL; memtx_tree_iterator_next(it->tree, &it->tree_iterator); return tuple_data_range(*res, size); } /** * Create an ALL iterator with personal read view so further * index modifications will not affect the iteration results. * Must be destroyed by iterator->free after usage. */ static struct snapshot_iterator * memtx_tree_index_create_snapshot_iterator(struct index *base) { struct memtx_tree_index *index = (struct memtx_tree_index *)base; struct tree_snapshot_iterator *it = (struct tree_snapshot_iterator *) calloc(1, sizeof(*it)); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct tree_snapshot_iterator), "memtx_tree_index", "create_snapshot_iterator"); return NULL; } it->base.free = tree_snapshot_iterator_free; it->base.next = tree_snapshot_iterator_next; it->tree = &index->tree; it->tree_iterator = memtx_tree_iterator_first(&index->tree); memtx_tree_iterator_freeze(&index->tree, &it->tree_iterator); return (struct snapshot_iterator *) it; } static const struct index_vtab memtx_tree_index_vtab = { /* .destroy = */ memtx_tree_index_destroy, /* .commit_create = */ generic_index_commit_create, /* .commit_drop = */ generic_index_commit_drop, /* .update_def = */ memtx_tree_index_update_def, /* .size = */ memtx_tree_index_size, /* .bsize = */ memtx_tree_index_bsize, /* .min = */ generic_index_min, /* .max = */ generic_index_max, /* .random = */ memtx_tree_index_random, /* .count = */ memtx_tree_index_count, /* .get = */ memtx_tree_index_get, /* .replace = */ memtx_tree_index_replace, /* .create_iterator = */ memtx_tree_index_create_iterator, /* .create_snapshot_iterator = */ memtx_tree_index_create_snapshot_iterator, /* .info = */ generic_index_info, /* .begin_build = */ memtx_tree_index_begin_build, /* .reserve = */ memtx_tree_index_reserve, /* .build_next = */ memtx_tree_index_build_next, /* .end_build = */ memtx_tree_index_end_build, }; struct memtx_tree_index * memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def) { memtx_index_arena_init(); if (!mempool_is_initialized(&memtx->tree_iterator_pool)) { mempool_create(&memtx->tree_iterator_pool, cord_slab_cache(), sizeof(struct tree_iterator)); } struct memtx_tree_index *index = (struct memtx_tree_index *)calloc(1, sizeof(*index)); if (index == NULL) { diag_set(OutOfMemory, sizeof(*index), "malloc", "struct memtx_tree_index"); return NULL; } if (index_create(&index->base, (struct engine *)memtx, &memtx_tree_index_vtab, def) != 0) { free(index); return NULL; } struct key_def *cmp_def = memtx_tree_index_cmp_def(index); memtx_tree_create(&index->tree, cmp_def, memtx_index_extent_alloc, memtx_index_extent_free, NULL); return index; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_range.h0000664000000000000000000002027513306565107017564 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_RANGE_H #define INCLUDES_TARANTOOL_BOX_VY_RANGE_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #define RB_COMPACT 1 #include #include #include "iterator_type.h" #define HEAP_FORWARD_DECLARATION #include "salad/heap.h" #include "trivia/util.h" #include "vy_stat.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct index_opts; struct key_def; struct tuple; struct vy_slice; /** * Range of keys in an index stored on disk. */ struct vy_range { /** Unique ID of this range. */ int64_t id; /** * Range lower bound. NULL if range is leftmost. * Both 'begin' and 'end' statements have SELECT type with * the full idexed key. */ struct tuple *begin; /** Range upper bound. NULL if range is rightmost. */ struct tuple *end; /** Key definition for comparing range boundaries. * Contains secondary and primary key parts for secondary * keys, to ensure an always distinct result for * non-unique keys. */ const struct key_def *cmp_def; /** An estimate of the number of statements in this range. */ struct vy_disk_stmt_counter count; /** * List of run slices in this range, linked by vy_slice->in_range. * The newer a slice, the closer it to the list head. */ struct rlist slices; /** Number of entries in the ->slices list. */ int slice_count; /** * The goal of compaction is to reduce read amplification. * All ranges for which the LSM tree has more runs per * level than run_count_per_level or run size larger than * one defined by run_size_ratio of this level are candidates * for compaction. * Unlike other LSM implementations, Vinyl can have many * sorted runs in a single level, and is able to compact * runs from any number of adjacent levels. Moreover, * higher levels are always taken in when compacting * a lower level - i.e. L1 is always included when * compacting L2, and both L1 and L2 are always included * when compacting L3. * * This variable contains the number of runs the next * compaction of this range will include. * * The lower the level is scheduled for compaction, * the bigger it tends to be because upper levels are * taken in. * @sa vy_range_update_compact_priority() to see * how we decide how many runs to compact next time. */ int compact_priority; /** Number of times the range was compacted. */ int n_compactions; /** Link in vy_index->tree. */ rb_node(struct vy_range) tree_node; /** Link in vy_index->range_heap. */ struct heap_node heap_node; /** * Incremented whenever an in-memory index or on disk * run is added to or deleted from this range. Used to * invalidate iterators. */ uint32_t version; }; /** * Heap of all ranges of the same index, prioritized by * vy_range->compact_priority. */ #define HEAP_NAME vy_range_heap static inline bool vy_range_heap_less(struct heap_node *a, struct heap_node *b) { struct vy_range *r1 = container_of(a, struct vy_range, heap_node); struct vy_range *r2 = container_of(b, struct vy_range, heap_node); return r1->compact_priority > r2->compact_priority; } #define HEAP_LESS(h, l, r) vy_range_heap_less(l, r) #include "salad/heap.h" #undef HEAP_LESS #undef HEAP_NAME /** Return true if a task is scheduled for a given range. */ static inline bool vy_range_is_scheduled(struct vy_range *range) { return range->heap_node.pos == UINT32_MAX; } /** * Search tree of all ranges of the same index, sorted by * vy_range->begin. Ranges in a tree are supposed to span * all possible keys without overlaps. */ int vy_range_tree_cmp(struct vy_range *range_a, struct vy_range *range_b); int vy_range_tree_key_cmp(const struct tuple *stmt, struct vy_range *range); typedef rb_tree(struct vy_range) vy_range_tree_t; rb_gen_ext_key(MAYBE_UNUSED static inline, vy_range_tree_, vy_range_tree_t, struct vy_range, tree_node, vy_range_tree_cmp, const struct tuple *, vy_range_tree_key_cmp); /** * Find the first range in which a given key should be looked up. * * @param tree Range tree to search. * @param iterator_type Iterator type. * @param key Key to look up. * * @retval The first range to look up the key in. */ struct vy_range * vy_range_tree_find_by_key(vy_range_tree_t *tree, enum iterator_type iterator_type, const struct tuple *key); /** * Allocate and initialize a range (either a new one or for * restore from disk). * * @param id Range id. * @param begin Range begin (inclusive) or NULL for -inf. * @param end Range end (exclusive) or NULL for +inf. * @param cmp_def Key definition for comparing range boundaries. * * @retval not NULL The new range. * @retval NULL Out of memory. */ struct vy_range * vy_range_new(int64_t id, struct tuple *begin, struct tuple *end, const struct key_def *cmp_def); /** * Free a range and all its slices. * * @param range Range to free. */ void vy_range_delete(struct vy_range *range); /** An snprint-style function to print boundaries of a range. */ int vy_range_snprint(char *buf, int size, const struct vy_range *range); static inline const char * vy_range_str(struct vy_range *range) { char *buf = tt_static_buf(); vy_range_snprint(buf, TT_STATIC_BUF_LEN, range); return buf; } /** Add a run slice to the head of a range's list. */ void vy_range_add_slice(struct vy_range *range, struct vy_slice *slice); /** Add a run slice to a range's list before @next_slice. */ void vy_range_add_slice_before(struct vy_range *range, struct vy_slice *slice, struct vy_slice *next_slice); /** Remove a run slice from a range's list. */ void vy_range_remove_slice(struct vy_range *range, struct vy_slice *slice); /** * Update compaction priority of a range. * * @param range The range. * @param opts Index options. */ void vy_range_update_compact_priority(struct vy_range *range, const struct index_opts *opts); /** * Check if a range needs to be split in two. * * @param range The range. * @param opts Index options. * @param[out] p_split_key Key to split the range by. * * @retval true If the range needs to be split. */ bool vy_range_needs_split(struct vy_range *range, const struct index_opts *opts, const char **p_split_key); /** * Check if a range needs to be coalesced with adjacent * ranges in a range tree. * * @param range The range. * @param tree The range tree. * @param opts Index options. * @param[out] p_first The first range in the tree to coalesce. * @param[out] p_last The last range in the tree to coalesce. * * @retval true If the range needs to be coalesced. */ bool vy_range_needs_coalesce(struct vy_range *range, vy_range_tree_t *tree, const struct index_opts *opts, struct vy_range **p_first, struct vy_range **p_last); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_RANGE_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/errcode.h0000664000000000000000000003471613306565107017402 0ustar rootroot#ifndef TARANTOOL_BOX_ERRCODE_H_INCLUDED #define TARANTOOL_BOX_ERRCODE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "trivia/util.h" #ifdef __cplusplus extern "C" { #endif struct errcode_record { const char *errstr; const char *errdesc; }; /* * To add a new error code to Tarantool, extend this array. * * !IMPORTANT! Currently you need to manually update the user * guide (doc/user/errcode.xml) with each added error code. * Please don't forget to do it! */ #define ERROR_CODES(_) \ /* 0 */_(ER_UNKNOWN, "Unknown error") \ /* 1 */_(ER_ILLEGAL_PARAMS, "Illegal parameters, %s") \ /* 2 */_(ER_MEMORY_ISSUE, "Failed to allocate %u bytes in %s for %s") \ /* 3 */_(ER_TUPLE_FOUND, "Duplicate key exists in unique index '%s' in space '%s'") \ /* 4 */_(ER_TUPLE_NOT_FOUND, "Tuple doesn't exist in index '%s' in space '%s'") \ /* 5 */_(ER_UNSUPPORTED, "%s does not support %s") \ /* 6 */_(ER_NONMASTER, "Can't modify data on a replication slave. My master is: %s") \ /* 7 */_(ER_READONLY, "Can't modify data because this instance is in read-only mode.") \ /* 8 */_(ER_INJECTION, "Error injection '%s'") \ /* 9 */_(ER_CREATE_SPACE, "Failed to create space '%s': %s") \ /* 10 */_(ER_SPACE_EXISTS, "Space '%s' already exists") \ /* 11 */_(ER_DROP_SPACE, "Can't drop space '%s': %s") \ /* 12 */_(ER_ALTER_SPACE, "Can't modify space '%s': %s") \ /* 13 */_(ER_INDEX_TYPE, "Unsupported index type supplied for index '%s' in space '%s'") \ /* 14 */_(ER_MODIFY_INDEX, "Can't create or modify index '%s' in space '%s': %s") \ /* 15 */_(ER_LAST_DROP, "Can't drop the primary key in a system space, space '%s'") \ /* 16 */_(ER_TUPLE_FORMAT_LIMIT, "Tuple format limit reached: %u") \ /* 17 */_(ER_DROP_PRIMARY_KEY, "Can't drop primary key in space '%s' while secondary keys exist") \ /* 18 */_(ER_KEY_PART_TYPE, "Supplied key type of part %u does not match index part type: expected %s") \ /* 19 */_(ER_EXACT_MATCH, "Invalid key part count in an exact match (expected %u, got %u)") \ /* 20 */_(ER_INVALID_MSGPACK, "Invalid MsgPack - %s") \ /* 21 */_(ER_PROC_RET, "msgpack.encode: can not encode Lua type '%s'") \ /* 22 */_(ER_TUPLE_NOT_ARRAY, "Tuple/Key must be MsgPack array") \ /* 23 */_(ER_FIELD_TYPE, "Tuple field %u type does not match one required by operation: expected %s") \ /* 24 */_(ER_INDEX_PART_TYPE_MISMATCH, "Field %s has type '%s' in one index, but type '%s' in another") \ /* 25 */_(ER_SPLICE, "SPLICE error on field %u: %s") \ /* 26 */_(ER_UPDATE_ARG_TYPE, "Argument type in operation '%c' on field %u does not match field type: expected %s") \ /* 27 */_(ER_FORMAT_MISMATCH_INDEX_PART, "Field %s has type '%s' in space format, but type '%s' in index definition") \ /* 28 */_(ER_UNKNOWN_UPDATE_OP, "Unknown UPDATE operation") \ /* 29 */_(ER_UPDATE_FIELD, "Field %u UPDATE error: %s") \ /* 30 */_(ER_FUNCTION_TX_ACTIVE, "Transaction is active at return from function") \ /* 31 */_(ER_KEY_PART_COUNT, "Invalid key part count (expected [0..%u], got %u)") \ /* 32 */_(ER_PROC_LUA, "%s") \ /* 33 */_(ER_NO_SUCH_PROC, "Procedure '%.*s' is not defined") \ /* 34 */_(ER_NO_SUCH_TRIGGER, "Trigger is not found") \ /* 35 */_(ER_NO_SUCH_INDEX, "No index #%u is defined in space '%s'") \ /* 36 */_(ER_NO_SUCH_SPACE, "Space '%s' does not exist") \ /* 37 */_(ER_NO_SUCH_FIELD, "Field %d was not found in the tuple") \ /* 38 */_(ER_EXACT_FIELD_COUNT, "Tuple field count %u does not match space field count %u") \ /* 39 */_(ER_MIN_FIELD_COUNT, "Tuple field count %u is less than required by space format or defined indexes (expected at least %u)") \ /* 40 */_(ER_WAL_IO, "Failed to write to disk") \ /* 41 */_(ER_MORE_THAN_ONE_TUPLE, "Get() doesn't support partial keys and non-unique indexes") \ /* 42 */_(ER_ACCESS_DENIED, "%s access to %s '%s' is denied for user '%s'") \ /* 43 */_(ER_CREATE_USER, "Failed to create user '%s': %s") \ /* 44 */_(ER_DROP_USER, "Failed to drop user or role '%s': %s") \ /* 45 */_(ER_NO_SUCH_USER, "User '%s' is not found") \ /* 46 */_(ER_USER_EXISTS, "User '%s' already exists") \ /* 47 */_(ER_PASSWORD_MISMATCH, "Incorrect password supplied for user '%s'") \ /* 48 */_(ER_UNKNOWN_REQUEST_TYPE, "Unknown request type %u") \ /* 49 */_(ER_UNKNOWN_SCHEMA_OBJECT, "Unknown object type '%s'") \ /* 50 */_(ER_CREATE_FUNCTION, "Failed to create function '%s': %s") \ /* 51 */_(ER_NO_SUCH_FUNCTION, "Function '%s' does not exist") \ /* 52 */_(ER_FUNCTION_EXISTS, "Function '%s' already exists") \ /* 53 */_(ER_BEFORE_REPLACE_RET, "Invalid return value of space:before_replace trigger: expected tuple or nil, got %s") \ /* 54 */_(ER_FUNCTION_MAX, "A limit on the total number of functions has been reached: %u") \ /* 55 */_(ER_UNUSED4, "") \ /* 56 */_(ER_USER_MAX, "A limit on the total number of users has been reached: %u") \ /* 57 */_(ER_NO_SUCH_ENGINE, "Space engine '%s' does not exist") \ /* 58 */_(ER_RELOAD_CFG, "Can't set option '%s' dynamically") \ /* 59 */_(ER_CFG, "Incorrect value for option '%s': %s") \ /* 60 */_(ER_SAVEPOINT_EMPTY_TX, "Can not set a savepoint in an empty transaction") \ /* 61 */_(ER_NO_SUCH_SAVEPOINT, "Can not rollback to savepoint: the savepoint does not exist") \ /* 62 */_(ER_UNKNOWN_REPLICA, "Replica %s is not registered with replica set %s") \ /* 63 */_(ER_REPLICASET_UUID_MISMATCH, "Replica set UUID mismatch: expected %s, got %s") \ /* 64 */_(ER_INVALID_UUID, "Invalid UUID: %s") \ /* 65 */_(ER_REPLICASET_UUID_IS_RO, "Can't reset replica set UUID: it is already assigned") \ /* 66 */_(ER_INSTANCE_UUID_MISMATCH, "Instance UUID mismatch: expected %s, got %s") \ /* 67 */_(ER_REPLICA_ID_IS_RESERVED, "Can't initialize replica id with a reserved value %u") \ /* 68 */_(ER_INVALID_ORDER, "Invalid LSN order for instance %u: previous LSN = %llu, new lsn = %llu") \ /* 69 */_(ER_MISSING_REQUEST_FIELD, "Missing mandatory field '%s' in request") \ /* 70 */_(ER_IDENTIFIER, "Invalid identifier '%s' (expected printable symbols only)") \ /* 71 */_(ER_DROP_FUNCTION, "Can't drop function %u: %s") \ /* 72 */_(ER_ITERATOR_TYPE, "Unknown iterator type '%s'") \ /* 73 */_(ER_REPLICA_MAX, "Replica count limit reached: %u") \ /* 74 */_(ER_INVALID_XLOG, "Failed to read xlog: %lld") \ /* 75 */_(ER_INVALID_XLOG_NAME, "Invalid xlog name: expected %lld got %lld") \ /* 76 */_(ER_INVALID_XLOG_ORDER, "Invalid xlog order: %lld and %lld") \ /* 77 */_(ER_NO_CONNECTION, "Connection is not established") \ /* 78 */_(ER_TIMEOUT, "Timeout exceeded") \ /* 79 */_(ER_ACTIVE_TRANSACTION, "Operation is not permitted when there is an active transaction ") \ /* 80 */_(ER_CURSOR_NO_TRANSACTION, "The transaction the cursor belongs to has ended") \ /* 81 */_(ER_CROSS_ENGINE_TRANSACTION, "A multi-statement transaction can not use multiple storage engines") \ /* 82 */_(ER_NO_SUCH_ROLE, "Role '%s' is not found") \ /* 83 */_(ER_ROLE_EXISTS, "Role '%s' already exists") \ /* 84 */_(ER_CREATE_ROLE, "Failed to create role '%s': %s") \ /* 85 */_(ER_INDEX_EXISTS, "Index '%s' already exists") \ /* 86 */_(ER_TUPLE_REF_OVERFLOW, "Tuple reference counter overflow") \ /* 87 */_(ER_ROLE_LOOP, "Granting role '%s' to role '%s' would create a loop") \ /* 88 */_(ER_GRANT, "Incorrect grant arguments: %s") \ /* 89 */_(ER_PRIV_GRANTED, "User '%s' already has %s access on %s '%s'") \ /* 90 */_(ER_ROLE_GRANTED, "User '%s' already has role '%s'") \ /* 91 */_(ER_PRIV_NOT_GRANTED, "User '%s' does not have %s access on %s '%s'") \ /* 92 */_(ER_ROLE_NOT_GRANTED, "User '%s' does not have role '%s'") \ /* 93 */_(ER_MISSING_SNAPSHOT, "Can't find snapshot") \ /* 94 */_(ER_CANT_UPDATE_PRIMARY_KEY, "Attempt to modify a tuple field which is part of index '%s' in space '%s'") \ /* 95 */_(ER_UPDATE_INTEGER_OVERFLOW, "Integer overflow when performing '%c' operation on field %u") \ /* 96 */_(ER_GUEST_USER_PASSWORD, "Setting password for guest user has no effect") \ /* 97 */_(ER_TRANSACTION_CONFLICT, "Transaction has been aborted by conflict") \ /* 98 */_(ER_UNSUPPORTED_ROLE_PRIV, "Unsupported role privilege '%s'") \ /* 99 */_(ER_LOAD_FUNCTION, "Failed to dynamically load function '%s': %s") \ /*100 */_(ER_FUNCTION_LANGUAGE, "Unsupported language '%s' specified for function '%s'") \ /*101 */_(ER_RTREE_RECT, "RTree: %s must be an array with %u (point) or %u (rectangle/box) numeric coordinates") \ /*102 */_(ER_PROC_C, "%s") \ /*103 */_(ER_UNKNOWN_RTREE_INDEX_DISTANCE_TYPE, "Unknown RTREE index distance type %s") \ /*104 */_(ER_PROTOCOL, "%s") \ /*105 */_(ER_UPSERT_UNIQUE_SECONDARY_KEY, "Space %s has a unique secondary index and does not support UPSERT") \ /*106 */_(ER_WRONG_INDEX_RECORD, "Wrong record in _index space: got {%s}, expected {%s}") \ /*107 */_(ER_WRONG_INDEX_PARTS, "Wrong index parts: %s; expected field1 id (number), field1 type (string), ...") \ /*108 */_(ER_WRONG_INDEX_OPTIONS, "Wrong index options (field %u): %s") \ /*109 */_(ER_WRONG_SCHEMA_VERSION, "Wrong schema version, current: %d, in request: %u") \ /*110 */_(ER_MEMTX_MAX_TUPLE_SIZE, "Failed to allocate %u bytes for tuple: tuple is too large. Check 'memtx_max_tuple_size' configuration option.") \ /*111 */_(ER_WRONG_SPACE_OPTIONS, "Wrong space options (field %u): %s") \ /*112 */_(ER_UNSUPPORTED_INDEX_FEATURE, "Index '%s' (%s) of space '%s' (%s) does not support %s") \ /*113 */_(ER_VIEW_IS_RO, "View '%s' is read-only") \ /*114 */_(ER_SAVEPOINT_NO_TRANSACTION, "Can not set a savepoint in absence of active transaction") \ /*115 */_(ER_SYSTEM, "%s") \ /*116 */_(ER_LOADING, "Instance bootstrap hasn't finished yet") \ /*117 */_(ER_CONNECTION_TO_SELF, "Connection to self") \ /*118 */_(ER_KEY_PART_IS_TOO_LONG, "Key part is too long: %u of %u bytes") \ /*119 */_(ER_COMPRESSION, "Compression error: %s") \ /*120 */_(ER_CHECKPOINT_IN_PROGRESS, "Snapshot is already in progress") \ /*121 */_(ER_SUB_STMT_MAX, "Can not execute a nested statement: nesting limit reached") \ /*122 */_(ER_COMMIT_IN_SUB_STMT, "Can not commit transaction in a nested statement") \ /*123 */_(ER_ROLLBACK_IN_SUB_STMT, "Rollback called in a nested statement") \ /*124 */_(ER_DECOMPRESSION, "Decompression error: %s") \ /*125 */_(ER_INVALID_XLOG_TYPE, "Invalid xlog type: expected %s, got %s") \ /*126 */_(ER_ALREADY_RUNNING, "Failed to lock WAL directory %s and hot_standby mode is off") \ /*127 */_(ER_INDEX_FIELD_COUNT_LIMIT, "Indexed field count limit reached: %d indexed fields") \ /*128 */_(ER_LOCAL_INSTANCE_ID_IS_READ_ONLY, "The local instance id %u is read-only") \ /*129 */_(ER_BACKUP_IN_PROGRESS, "Backup is already in progress") \ /*130 */_(ER_READ_VIEW_ABORTED, "The read view is aborted") \ /*131 */_(ER_INVALID_INDEX_FILE, "Invalid INDEX file %s: %s") \ /*132 */_(ER_INVALID_RUN_FILE, "Invalid RUN file: %s") \ /*133 */_(ER_INVALID_VYLOG_FILE, "Invalid VYLOG file: %s") \ /*134 */_(ER_CHECKPOINT_ROLLBACK, "Can't start a checkpoint while in cascading rollback") \ /*135 */_(ER_VY_QUOTA_TIMEOUT, "Timed out waiting for Vinyl memory quota") \ /*136 */_(ER_PARTIAL_KEY, "%s index does not support selects via a partial key (expected %u parts, got %u). Please Consider changing index type to TREE.") \ /*137 */_(ER_TRUNCATE_SYSTEM_SPACE, "Can't truncate a system space, space '%s'") \ /*138 */_(ER_LOAD_MODULE, "Failed to dynamically load module '%.*s': %s") \ /*139 */_(ER_VINYL_MAX_TUPLE_SIZE, "Failed to allocate %u bytes for tuple: tuple is too large. Check 'vinyl_max_tuple_size' configuration option.") \ /*140 */_(ER_WRONG_DD_VERSION, "Wrong _schema version: expected 'major.minor[.patch]'") \ /*141 */_(ER_WRONG_SPACE_FORMAT, "Wrong space format (field %u): %s") \ /*142 */_(ER_CREATE_SEQUENCE, "Failed to create sequence '%s': %s") \ /*143 */_(ER_ALTER_SEQUENCE, "Can't modify sequence '%s': %s") \ /*144 */_(ER_DROP_SEQUENCE, "Can't drop sequence '%s': %s") \ /*145 */_(ER_NO_SUCH_SEQUENCE, "Sequence '%s' does not exist") \ /*146 */_(ER_SEQUENCE_EXISTS, "Sequence '%s' already exists") \ /*147 */_(ER_SEQUENCE_OVERFLOW, "Sequence '%s' has overflowed") \ /*148 */_(ER_UNUSED5, "") \ /*149 */_(ER_SPACE_FIELD_IS_DUPLICATE, "Space field '%s' is duplicate") \ /*150 */_(ER_CANT_CREATE_COLLATION, "Failed to initialize collation: %s.") \ /*151 */_(ER_WRONG_COLLATION_OPTIONS, "Wrong collation options (field %u): %s") \ /*152 */_(ER_NULLABLE_PRIMARY, "Primary index of the space '%s' can not contain nullable parts") \ /*153 */_(ER_NULLABLE_MISMATCH, "Field %d is %s in space format, but %s in index parts") \ /* * !IMPORTANT! Please follow instructions at start of the file * when adding new errors. */ ENUM0(box_error_code, ERROR_CODES); extern struct errcode_record box_error_codes[]; /** Return a string representation of error name, e.g. "ER_OK". */ static inline const char *tnt_errcode_str(uint32_t errcode) { if (errcode >= box_error_code_MAX) { /* Unknown error code - can be triggered using box.error() */ return "ER_UNKNOWN"; } return box_error_codes[errcode].errstr; } /** Return a description of the error. */ static inline const char *tnt_errcode_desc(uint32_t errcode) { if (errcode >= box_error_code_MAX) return "Unknown error"; return box_error_codes[errcode].errdesc; } #ifdef __cplusplus } /* extern "C" */ #endif #endif /* TARANTOOL_BOX_ERRCODE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/index_def.h0000664000000000000000000001736413306565107017704 0ustar rootroot#ifndef TARANTOOL_BOX_INDEX_DEF_H_INCLUDED #define TARANTOOL_BOX_INDEX_DEF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "key_def.h" #include "opt_def.h" #include "small/rlist.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum index_type { HASH = 0, /* HASH Index */ TREE, /* TREE Index */ BITSET, /* BITSET Index */ RTREE, /* R-Tree Index */ index_type_MAX, }; extern const char *index_type_strs[]; enum rtree_index_distance_type { /* Euclid distance, sqrt(dx*dx + dy*dy) */ RTREE_INDEX_DISTANCE_TYPE_EUCLID, /* Manhattan distance, fabs(dx) + fabs(dy) */ RTREE_INDEX_DISTANCE_TYPE_MANHATTAN, rtree_index_distance_type_MAX }; extern const char *rtree_index_distance_type_strs[]; /** Index options */ struct index_opts { /** * Is this index unique or not - relevant to HASH/TREE * index */ bool is_unique; /** * RTREE index dimension. */ int64_t dimension; /** * RTREE distance type. */ enum rtree_index_distance_type distance; /** * Vinyl index options. */ int64_t range_size; int64_t page_size; /** * Maximal number of runs that can be created in a level * of the LSM tree before triggering compaction. */ int64_t run_count_per_level; /** * The LSM tree multiplier. Each subsequent level of * the LSM tree is run_size_ratio times larger than * previous one. */ double run_size_ratio; /* Bloom filter false positive rate. */ double bloom_fpr; /** * LSN from the time of index creation. */ int64_t lsn; }; extern const struct index_opts index_opts_default; extern const struct opt_def index_opts_reg[]; /** * Create index options using default values */ static inline void index_opts_create(struct index_opts *opts) { *opts = index_opts_default; } static inline int index_opts_cmp(const struct index_opts *o1, const struct index_opts *o2) { if (o1->is_unique != o2->is_unique) return o1->is_unique < o2->is_unique ? -1 : 1; if (o1->dimension != o2->dimension) return o1->dimension < o2->dimension ? -1 : 1; if (o1->distance != o2->distance) return o1->distance < o2->distance ? -1 : 1; if (o1->range_size != o2->range_size) return o1->range_size < o2->range_size ? -1 : 1; if (o1->page_size != o2->page_size) return o1->page_size < o2->page_size ? -1 : 1; if (o1->run_count_per_level != o2->run_count_per_level) return o1->run_count_per_level < o2->run_count_per_level ? -1 : 1; if (o1->run_size_ratio != o2->run_size_ratio) return o1->run_size_ratio < o2->run_size_ratio ? -1 : 1; if (o1->bloom_fpr != o2->bloom_fpr) return o1->bloom_fpr < o2->bloom_fpr ? -1 : 1; return 0; } /* Definition of an index. */ struct index_def { /* A link in key list. */ struct rlist link; /** Ordinal index number in the index array. */ uint32_t iid; /* Space id. */ uint32_t space_id; /** Index name. */ char *name; /** Index type. */ enum index_type type; struct index_opts opts; /* Index key definition. */ struct key_def *key_def; /** * User-defined key definition, merged with the primary * key parts. Used by non-unique keys to uniquely identify * iterator position. */ struct key_def *cmp_def; }; struct index_def * index_def_dup(const struct index_def *def); /* Destroy and free an index_def. */ void index_def_delete(struct index_def *def); /** * Update 'has_optional_parts' property of key definitions. * @param def Index def, containing key definitions to update. * @param min_field_count Minimal field count. All parts out of * this value are optional. */ static inline void index_def_update_optionality(struct index_def *def, uint32_t min_field_count) { key_def_update_optionality(def->key_def, min_field_count); key_def_update_optionality(def->cmp_def, min_field_count); } /** * Add an index definition to a list, preserving the * first position of the primary key. * * In non-unique indexes, secondary keys must contain key parts * of the primary key. This is necessary to make ordered * retrieval from a secondary key useful to SQL * optimizer and make iterators over secondary keys stable * in presence of concurrent updates. * Thus we always create the primary key first, and put * the primary key key_def first in the index_def list. */ static inline void index_def_list_add(struct rlist *index_def_list, struct index_def *index_def) { /** Preserve the position of the primary key */ if (index_def->iid == 0) rlist_add_entry(index_def_list, index_def, link); else rlist_add_tail_entry(index_def_list, index_def, link); } /** * True, if the index change by alter requires an index rebuild. * * Some changes, such as a new page size or bloom_fpr do not * take effect immediately, so do not require a rebuild. * * Others, such as index name change, do not change the data, only * metadata, so do not require a rebuild either. * * Finally, changing index type or number of parts always requires * a rebuild. */ bool index_def_change_requires_rebuild(const struct index_def *old_index_def, const struct index_def *new_index_def); /** * Create a new index definition definition. * * @param key_def key definition, must be fully built * @param pk_def primary key definition, pass non-NULL * for secondary keys to construct * index_def::cmp_def * @retval not NULL Success. * @retval NULL Memory error. */ struct index_def * index_def_new(uint32_t space_id, uint32_t iid, const char *name, uint32_t name_len, enum index_type type, const struct index_opts *opts, struct key_def *key_def, struct key_def *pk_def); /** * One key definition is greater than the other if it's id is * greater, it's name is greater, it's index type is greater * (HASH < TREE < BITSET) or its key part array is greater. */ int index_def_cmp(const struct index_def *key1, const struct index_def *key2); /** * Check a key definition for violation of various limits. * * @param index_def index definition * @param old_space space definition */ bool index_def_is_valid(struct index_def *index_def, const char *space_name); #if defined(__cplusplus) } /* extern "C" */ static inline struct index_def * index_def_dup_xc(const struct index_def *def) { struct index_def *ret = index_def_dup(def); if (ret == NULL) diag_raise(); return ret; } static inline void index_def_check_xc(struct index_def *index_def, const char *space_name) { if (! index_def_is_valid(index_def, space_name)) diag_raise(); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_INDEX_DEF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/space_def.h0000664000000000000000000001244213306565107017660 0ustar rootroot#ifndef TARANTOOL_BOX_SPACE_DEF_H_INCLUDED #define TARANTOOL_BOX_SPACE_DEF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include "tuple_dictionary.h" #include "schema_def.h" #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** Space options */ struct space_opts { /** * The space is a temporary: * - it is empty at server start * - changes are not written to WAL * - changes are not part of a snapshot */ bool temporary; }; extern const struct space_opts space_opts_default; extern const struct opt_def space_opts_reg[]; /** * Create space options using default values. */ static inline void space_opts_create(struct space_opts *opts) { /* default values of opts */ *opts = space_opts_default; } /** Space metadata. */ struct space_def { /** Space id. */ uint32_t id; /** User id of the creator of the space */ uint32_t uid; /** * If not set (is 0), any tuple in the * space can have any number of fields. * If set, each tuple * must have exactly this many fields. */ uint32_t exact_field_count; char engine_name[ENGINE_NAME_MAX + 1]; /** * Tuple field names dictionary, shared with a space's * tuple format. */ struct tuple_dictionary *dict; /** Space fields, specified by a user. */ struct field_def *fields; /** Length of @a fields. */ uint32_t field_count; struct space_opts opts; char name[0]; }; /** * Delete the space_def object. * @param def Def to delete. */ static inline void space_def_delete(struct space_def *def) { tuple_dictionary_unref(def->dict); TRASH(def); free(def); } /** * Duplicate space_def object. * @param src Def to duplicate. * @retval Copy of the @src. */ struct space_def * space_def_dup(const struct space_def *src); /** * Create a new space definition. * @param id Space identifier. * @param uid Owner identifier. * @param exact_field_count Space tuples field count. * 0 for any count. * @param name Space name. * @param name_len Length of the @name. * @param engine_name Engine name. * @param engine_len Length of the @engine. * @param opts Space options. * @param fields Field definitions. * @param field_count Length of @a fields. * * @retval Space definition. */ struct space_def * space_def_new(uint32_t id, uint32_t uid, uint32_t exact_field_count, const char *name, uint32_t name_len, const char *engine_name, uint32_t engine_len, const struct space_opts *opts, const struct field_def *fields, uint32_t field_count); /** * Check that a space with @an old_def can be altered to have * @a new_def. * @param old_def Old space definition. * @param new_def New space definition. * @param is_space_empty True, if a space is empty. * * @retval 0 Space definition can be altered to @a new_def. * @retval -1 Client error. */ int space_def_check_compatibility(const struct space_def *old_def, const struct space_def *new_def, bool is_space_empty); #if defined(__cplusplus) } /* extern "C" */ #include "diag.h" static inline struct space_def * space_def_dup_xc(const struct space_def *src) { struct space_def *ret = space_def_dup(src); if (ret == NULL) diag_raise(); return ret; } static inline struct space_def * space_def_new_xc(uint32_t id, uint32_t uid, uint32_t exact_field_count, const char *name, uint32_t name_len, const char *engine_name, uint32_t engine_len, const struct space_opts *opts, const struct field_def *fields, uint32_t field_count) { struct space_def *ret = space_def_new(id, uid, exact_field_count, name, name_len, engine_name, engine_len, opts, fields, field_count); if (ret == NULL) diag_raise(); return ret; } static inline void space_def_check_compatibility_xc(const struct space_def *old_def, const struct space_def *new_def, bool is_space_empty) { if (space_def_check_compatibility(old_def, new_def, is_space_empty) != 0) diag_raise(); } #endif /* __cplusplus */ #endif /* TARANTOOL_BOX_SPACE_DEF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/coll_cache.c0000664000000000000000000000600713306565107020016 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coll_cache.h" #include "diag.h" #include "assoc.h" /** mhash table (id -> collation) */ static struct mh_i32ptr_t *coll_cache_id = NULL; /** Create global hash tables if necessary. */ int coll_cache_init() { coll_cache_id = mh_i32ptr_new(); if (coll_cache_id == NULL) { diag_set(OutOfMemory, sizeof(*coll_cache_id), "malloc", "coll_cache_id"); return -1; } return 0; } /** Delete global hash tables. */ void coll_cache_destroy() { mh_i32ptr_delete(coll_cache_id); } /** * Insert or replace a collation into collation cache. * @param coll - collation to insert/replace. * @return - NULL if inserted, replaced collation if replaced. */ int coll_cache_replace(struct coll *coll, struct coll **replaced) { const struct mh_i32ptr_node_t id_node = {coll->id, coll}; struct mh_i32ptr_node_t repl_id_node = {0, NULL}; struct mh_i32ptr_node_t *prepl_id_node = &repl_id_node; if (mh_i32ptr_put(coll_cache_id, &id_node, &prepl_id_node, NULL) == mh_end(coll_cache_id)) { diag_set(OutOfMemory, sizeof(id_node), "malloc", "coll_cache_id"); return -1; } assert(repl_id_node.val == NULL); *replaced = repl_id_node.val; return 0; } /** * Delete a collation from collation cache. * @param coll - collation to delete. */ void coll_cache_delete(const struct coll *coll) { mh_int_t i = mh_i32ptr_find(coll_cache_id, coll->id, NULL); if (i == mh_end(coll_cache_id)) return; mh_i32ptr_del(coll_cache_id, i, NULL); } /** * Find a collation object by its id. */ struct coll * coll_by_id(uint32_t id) { mh_int_t pos = mh_i32ptr_find(coll_cache_id, id, NULL); if (pos == mh_end(coll_cache_id)) return NULL; return mh_i32ptr_node(coll_cache_id, pos)->val; } tarantool_1.9.1.26.g63eb81e3c/src/box/alter.cc0000664000000000000000000030526413306565107017223 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "alter.h" #include "schema.h" #include "user.h" #include "space.h" #include "index.h" #include "func.h" #include "coll_cache.h" #include "txn.h" #include "tuple.h" #include "fiber.h" /* for gc_pool */ #include "scoped_guard.h" #include "third_party/base64.h" #include /* for placement new */ #include /* snprintf() */ #include #include "replication.h" /* for replica_set_id() */ #include "session.h" /* to fetch the current user. */ #include "vclock.h" /* VCLOCK_MAX */ #include "xrow.h" #include "iproto_constants.h" #include "identifier.h" #include "memtx_tuple.h" #include "version.h" #include "sequence.h" /** * chap-sha1 of empty string, i.e. * base64_encode(sha1(sha1(""), 0) */ #define CHAP_SHA1_EMPTY_PASSWORD "vhvewKp0tNyweZQ+cFKAlsyphfg=" /* {{{ Auxiliary functions and methods. */ static void access_check_ddl(const char *name, uint32_t owner_uid, enum schema_object_type type, enum priv_type priv_type, bool is_17_compat_mode) { struct credentials *cr = effective_user(); user_access_t has_access = cr->universal_access; /* * XXX: pre 1.7.7 there was no specific 'CREATE' or * 'ALTER' ACL, instead, read and write access on universe * was used to allow create/alter. * For backward compatibility, if a user has read and write * access on the universe, grant it CREATE access * automatically. * The legacy fix does not affect sequences since they * were added in 1.7.7 only. */ if (is_17_compat_mode && has_access & PRIV_R && has_access & PRIV_W) has_access |= PRIV_C | PRIV_A; user_access_t access = ((PRIV_U | (user_access_t) priv_type) & ~has_access); bool is_owner = owner_uid == cr->uid || cr->uid == ADMIN; /* * Only the owner of the object or someone who has * specific DDL privilege on the object can execute * DDL. If a user has no USAGE access and is owner, * deny access as well. */ if (access == 0 || (is_owner && !(access & PRIV_U))) return; /* Access granted. */ struct user *user = user_find_xc(cr->uid); if (is_owner) { tnt_raise(AccessDeniedError, priv_name(PRIV_U), schema_object_name(SC_UNIVERSE), "", user->def->name); } else { tnt_raise(AccessDeniedError, priv_name(access), schema_object_name(type), name, user->def->name); } } /** * Throw an exception if the given index definition * is incompatible with a sequence. */ static void index_def_check_sequence(struct index_def *index_def, const char *space_name) { enum field_type type = index_def->key_def->parts[0].type; if (type != FIELD_TYPE_UNSIGNED && type != FIELD_TYPE_INTEGER) { tnt_raise(ClientError, ER_MODIFY_INDEX, index_def->name, space_name, "sequence cannot be used with " "a non-integer key"); } } /** * Support function for index_def_new_from_tuple(..) * Checks tuple (of _index space) and throws a nice error if it is invalid * Checks only types of fields and their count! * Additionally determines version of tuple structure * is_166plus is set as true if tuple structure is 1.6.6+ * is_166plus is set as false if tuple structure is 1.6.5- */ static void index_def_check_tuple(const struct tuple *tuple, bool *is_166plus) { *is_166plus = true; const mp_type common_template[] = {MP_UINT, MP_UINT, MP_STR, MP_STR}; const char *data = tuple_data(tuple); uint32_t field_count = mp_decode_array(&data); const char *field_start = data; if (field_count < 6) goto err; for (size_t i = 0; i < lengthof(common_template); i++) { enum mp_type type = mp_typeof(*data); if (type != common_template[i]) goto err; mp_next(&data); } if (mp_typeof(*data) == MP_UINT) { /* old 1.6.5- version */ /* TODO: removed it in newer versions, find all 1.6.5- */ *is_166plus = false; mp_next(&data); if (mp_typeof(*data) != MP_UINT) goto err; if (field_count % 2) goto err; mp_next(&data); for (uint32_t i = 6; i < field_count; i += 2) { if (mp_typeof(*data) != MP_UINT) goto err; mp_next(&data); if (mp_typeof(*data) != MP_STR) goto err; mp_next(&data); } } else { if (field_count != 6) goto err; if (mp_typeof(*data) != MP_MAP) goto err; mp_next(&data); if (mp_typeof(*data) != MP_ARRAY) goto err; } return; err: char got[DIAG_ERRMSG_MAX]; char *p = got, *e = got + sizeof(got); data = field_start; for (uint32_t i = 0; i < field_count && p < e; i++) { enum mp_type type = mp_typeof(*data); mp_next(&data); const char *type_name; switch (type) { case MP_UINT: type_name = "number"; break; case MP_STR: type_name = "string"; break; case MP_ARRAY: type_name = "array"; break; case MP_MAP: type_name = "map"; break; default: type_name = "unknown"; break; } p += snprintf(p, e - p, i ? ", %s" : "%s", type_name); } const char *expected; if (*is_166plus) { expected = "space id (number), index id (number), " "name (string), type (string), " "options (map), parts (array)"; } else { expected = "space id (number), index id (number), " "name (string), type (string), " "is_unique (number), part count (number) " "part0 field no (number), " "part0 field type (string), ..."; } tnt_raise(ClientError, ER_WRONG_INDEX_RECORD, got, expected); } /** * Fill index_opts structure from opts field in tuple of space _index * Throw an error is unrecognized option. */ static void index_opts_decode(struct index_opts *opts, const char *map) { index_opts_create(opts); if (opts_decode(opts, index_opts_reg, &map, ER_WRONG_INDEX_OPTIONS, BOX_INDEX_FIELD_OPTS, NULL) != 0) diag_raise(); if (opts->distance == rtree_index_distance_type_MAX) { tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS, BOX_INDEX_FIELD_OPTS, "distance must be either "\ "'euclid' or 'manhattan'"); } if (opts->range_size <= 0) { tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS, BOX_INDEX_FIELD_OPTS, "range_size must be greater than 0"); } if (opts->page_size <= 0 || opts->page_size > opts->range_size) { tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS, BOX_INDEX_FIELD_OPTS, "page_size must be greater than 0 and " "less than or equal to range_size"); } if (opts->run_count_per_level <= 0) { tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS, BOX_INDEX_FIELD_OPTS, "run_count_per_level must be greater than 0"); } if (opts->run_size_ratio <= 1) { tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS, BOX_INDEX_FIELD_OPTS, "run_size_ratio must be greater than 1"); } if (opts->bloom_fpr <= 0 || opts->bloom_fpr > 1) { tnt_raise(ClientError, ER_WRONG_INDEX_OPTIONS, BOX_INDEX_FIELD_OPTS, "bloom_fpr must be greater than 0 and " "less than or equal to 1"); } } /** * Create a index_def object from a record in _index * system space. * * Check that: * - index id is within range * - index type is supported * - part count > 0 * - there are parts for the specified part count * - types of parts in the parts array are known to the system * - fieldno of each part in the parts array is within limits */ static struct index_def * index_def_new_from_tuple(struct tuple *tuple, struct space *space) { bool is_166plus; index_def_check_tuple(tuple, &is_166plus); struct index_opts opts; index_opts_create(&opts); uint32_t id = tuple_field_u32_xc(tuple, BOX_INDEX_FIELD_SPACE_ID); uint32_t index_id = tuple_field_u32_xc(tuple, BOX_INDEX_FIELD_ID); enum index_type type = STR2ENUM(index_type, tuple_field_cstr_xc(tuple, BOX_INDEX_FIELD_TYPE)); uint32_t name_len; const char *name = tuple_field_str_xc(tuple, BOX_INDEX_FIELD_NAME, &name_len); uint32_t part_count; const char *parts; if (is_166plus) { /* 1.6.6+ _index space structure */ const char *opts_field = tuple_field_with_type_xc(tuple, BOX_INDEX_FIELD_OPTS, MP_MAP); index_opts_decode(&opts, opts_field); parts = tuple_field(tuple, BOX_INDEX_FIELD_PARTS); part_count = mp_decode_array(&parts); } else { /* 1.6.5- _index space structure */ /* TODO: remove it in newer versions, find all 1.6.5- */ opts.is_unique = tuple_field_u32_xc(tuple, BOX_INDEX_FIELD_IS_UNIQUE_165); part_count = tuple_field_u32_xc(tuple, BOX_INDEX_FIELD_PART_COUNT_165); parts = tuple_field(tuple, BOX_INDEX_FIELD_PARTS_165); } if (name_len > BOX_NAME_MAX) { tnt_raise(ClientError, ER_MODIFY_INDEX, tt_cstr(name, BOX_INVALID_NAME_MAX), space_name(space), "index name is too long"); } identifier_check_xc(name, name_len); struct key_def *key_def = NULL; struct key_part_def *part_def = (struct key_part_def *) malloc(sizeof(*part_def) * part_count); if (part_def == NULL) { tnt_raise(OutOfMemory, sizeof(*part_def) * part_count, "malloc", "key_part_def"); } auto key_def_guard = make_scoped_guard([&] { free(part_def); if (key_def != NULL) key_def_delete(key_def); }); if (is_166plus) { /* 1.6.6+ */ if (key_def_decode_parts(part_def, part_count, &parts, space->def->fields, space->def->field_count) != 0) diag_raise(); } else { /* 1.6.5- TODO: remove it in newer versions, find all 1.6.5- */ if (key_def_decode_parts_160(part_def, part_count, &parts, space->def->fields, space->def->field_count) != 0) diag_raise(); } key_def = key_def_new_with_parts(part_def, part_count); if (key_def == NULL) diag_raise(); struct index_def *index_def = index_def_new(id, index_id, name, name_len, type, &opts, key_def, space_index_key_def(space, 0)); if (index_def == NULL) diag_raise(); auto index_def_guard = make_scoped_guard([=] { index_def_delete(index_def); }); index_def_check_xc(index_def, space_name(space)); space_check_index_def_xc(space, index_def); if (index_def->iid == 0 && space->sequence != NULL) index_def_check_sequence(index_def, space_name(space)); index_def_guard.is_active = false; return index_def; } /** * Fill space opts from the msgpack stream (MP_MAP field in the * tuple). */ static void space_opts_decode(struct space_opts *opts, const char *data) { space_opts_create(opts); if (data == NULL) return; bool is_170_plus = (mp_typeof(*data) == MP_MAP); if (!is_170_plus) { /* Tarantool < 1.7.0 compatibility */ if (mp_typeof(*data) != MP_STR) { tnt_raise(ClientError, ER_FIELD_TYPE, BOX_SPACE_FIELD_OPTS + TUPLE_INDEX_BASE, mp_type_strs[MP_STR]); } uint32_t len; const char *flags = mp_decode_str(&data, &len); flags = tt_cstr(flags, len); while (flags && *flags) { while (isspace(*flags)) /* skip space */ flags++; if (strncmp(flags, "temporary", strlen("temporary")) == 0) opts->temporary = true; flags = strchr(flags, ','); if (flags) flags++; } } else if (opts_decode(opts, space_opts_reg, &data, ER_WRONG_SPACE_OPTIONS, BOX_SPACE_FIELD_OPTS, NULL) != 0) { diag_raise(); } } /** * Decode field definition from MessagePack map. Format: * {name: , type: }. Type is optional. * @param[out] field Field to decode to. * @param data MessagePack map to decode. * @param space_name Name of a space, from which the field is got. * Used in error messages. * @param name_len Length of @a space_name. * @param errcode Error code to use for client errors. Either * create or modify space errors. * @param fieldno Field number to decode. Used in error messages. * @param region Region to allocate field name. */ static void field_def_decode(struct field_def *field, const char **data, const char *space_name, uint32_t name_len, uint32_t errcode, uint32_t fieldno, struct region *region) { if (mp_typeof(**data) != MP_MAP) { tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len), tt_sprintf("field %d is not map", fieldno + TUPLE_INDEX_BASE)); } int count = mp_decode_map(data); *field = field_def_default; for (int i = 0; i < count; ++i) { if (mp_typeof(**data) != MP_STR) { tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len), tt_sprintf("field %d format is not map"\ " with string keys", fieldno + TUPLE_INDEX_BASE)); } uint32_t key_len; const char *key = mp_decode_str(data, &key_len); if (opts_parse_key(field, field_def_reg, key, key_len, data, ER_WRONG_SPACE_FORMAT, fieldno + TUPLE_INDEX_BASE, region, true) != 0) diag_raise(); } if (field->name == NULL) { tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len), tt_sprintf("field %d name is not specified", fieldno + TUPLE_INDEX_BASE)); } size_t field_name_len = strlen(field->name); if (field_name_len > BOX_NAME_MAX) { tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len), tt_sprintf("field %d name is too long", fieldno + TUPLE_INDEX_BASE)); } identifier_check_xc(field->name, field_name_len); if (field->type == field_type_MAX) { tnt_raise(ClientError, errcode, tt_cstr(space_name, name_len), tt_sprintf("field %d has unknown field type", fieldno + TUPLE_INDEX_BASE)); } } /** * Decode MessagePack array of fields. * @param data MessagePack array of fields. * @param[out] out_count Length of a result array. * @param space_name Space name to use in error messages. * @param errcode Errcode for client errors. * @param region Region to allocate result array. * * @retval Array of fields. */ static struct field_def * space_format_decode(const char *data, uint32_t *out_count, const char *space_name, uint32_t name_len, uint32_t errcode, struct region *region) { /* Type is checked by _space format. */ assert(mp_typeof(*data) == MP_ARRAY); uint32_t count = mp_decode_array(&data); *out_count = count; if (count == 0) return NULL; size_t size = count * sizeof(struct field_def); struct field_def *region_defs = (struct field_def *) region_alloc_xc(region, size); for (uint32_t i = 0; i < count; ++i) { field_def_decode(®ion_defs[i], &data, space_name, name_len, errcode, i, region); } return region_defs; } /** * Fill space_def structure from struct tuple. */ static struct space_def * space_def_new_from_tuple(struct tuple *tuple, uint32_t errcode, struct region *region) { uint32_t name_len; const char *name = tuple_field_str_xc(tuple, BOX_SPACE_FIELD_NAME, &name_len); if (name_len > BOX_NAME_MAX) tnt_raise(ClientError, errcode, tt_cstr(name, BOX_INVALID_NAME_MAX), "space name is too long"); identifier_check_xc(name, name_len); uint32_t id = tuple_field_u32_xc(tuple, BOX_SPACE_FIELD_ID); if (id > BOX_SPACE_MAX) { tnt_raise(ClientError, errcode, tt_cstr(name, name_len), "space id is too big"); } if (id == 0) { tnt_raise(ClientError, errcode, tt_cstr(name, name_len), "space id 0 is reserved"); } uint32_t uid = tuple_field_u32_xc(tuple, BOX_SPACE_FIELD_UID); uint32_t exact_field_count = tuple_field_u32_xc(tuple, BOX_SPACE_FIELD_FIELD_COUNT); uint32_t engine_name_len; const char *engine_name = tuple_field_str_xc(tuple, BOX_SPACE_FIELD_ENGINE, &engine_name_len); /* * Engines are compiled-in so their names are known in * advance to be shorter than names of other identifiers. */ if (engine_name_len > ENGINE_NAME_MAX) { tnt_raise(ClientError, errcode, tt_cstr(name, name_len), "space engine name is too long"); } identifier_check_xc(engine_name, engine_name_len); const char *space_opts; struct field_def *fields; uint32_t field_count; if (dd_version_id >= version_id(1, 7, 6)) { /* Check space opts. */ space_opts = tuple_field_with_type_xc(tuple, BOX_SPACE_FIELD_OPTS, MP_MAP); /* Check space format */ const char *format = tuple_field_with_type_xc(tuple, BOX_SPACE_FIELD_FORMAT, MP_ARRAY); fields = space_format_decode(format, &field_count, name, name_len, errcode, region); if (exact_field_count != 0 && exact_field_count < field_count) { tnt_raise(ClientError, errcode, tt_cstr(name, name_len), "exact_field_count must be either 0 or >= "\ "formatted field count"); } } else { fields = NULL; field_count = 0; space_opts = tuple_field(tuple, BOX_SPACE_FIELD_OPTS); } struct space_opts opts; space_opts_decode(&opts, space_opts); struct space_def *def = space_def_new_xc(id, uid, exact_field_count, name, name_len, engine_name, engine_name_len, &opts, fields, field_count); auto def_guard = make_scoped_guard([=] { space_def_delete(def); }); struct engine *engine = engine_find_xc(def->engine_name); engine_check_space_def_xc(engine, def); def_guard.is_active = false; return def; } /** * Space old and new space triggers (move the original triggers * to the new space, or vice versa, restore the original triggers * in the old space). */ static void space_swap_triggers(struct space *new_space, struct space *old_space) { rlist_swap(&new_space->before_replace, &old_space->before_replace); rlist_swap(&new_space->on_replace, &old_space->on_replace); rlist_swap(&new_space->on_stmt_begin, &old_space->on_stmt_begin); } /** * True if the space has records identified by key 'uid'. * Uses 'iid' index. */ bool space_has_data(uint32_t id, uint32_t iid, uint32_t uid) { struct space *space = space_by_id(id); if (space == NULL) return false; if (space_index(space, iid) == NULL) return false; struct index *index = index_find_system_xc(space, iid); char key[6]; assert(mp_sizeof_uint(BOX_SYSTEM_ID_MIN) <= sizeof(key)); mp_encode_uint(key, uid); struct iterator *it = index_create_iterator_xc(index, ITER_EQ, key, 1); IteratorGuard iter_guard(it); if (iterator_next_xc(it) != NULL) return true; return false; } /* }}} */ /* {{{ struct alter_space - the body of a full blown alter */ struct alter_space; class AlterSpaceOp { public: AlterSpaceOp(struct alter_space *alter); struct rlist link; virtual void alter_def(struct alter_space * /* alter */) {} virtual void alter(struct alter_space * /* alter */) {} virtual void commit(struct alter_space * /* alter */, int64_t /* signature */) {} virtual void rollback(struct alter_space * /* alter */) {} virtual ~AlterSpaceOp() {} void *operator new(size_t size) { return region_aligned_calloc_xc(&fiber()->gc, size, alignof(uint64_t)); } void operator delete(void * /* ptr */) {} }; /** * A trigger installed on transaction commit/rollback events of * the transaction which initiated the alter. */ static struct trigger * txn_alter_trigger_new(trigger_f run, void *data) { struct trigger *trigger = (struct trigger *) region_calloc_object_xc(&fiber()->gc, struct trigger); trigger->run = run; trigger->data = data; trigger->destroy = NULL; return trigger; } struct alter_space { /** List of alter operations */ struct rlist ops; /** Definition of the new space - space_def. */ struct space_def *space_def; /** Definition of the new space - keys. */ struct rlist key_list; /** Old space. */ struct space *old_space; /** New space. */ struct space *new_space; /** * Assigned to the new primary key definition if we're * rebuilding the primary key, i.e. changing its key parts * substantially. */ struct key_def *pk_def; /** * Min field count of a new space. It is calculated before * the new space is created and used to update optionality * of key_defs and key_parts. */ uint32_t new_min_field_count; }; static struct alter_space * alter_space_new(struct space *old_space) { struct alter_space *alter = region_calloc_object_xc(&fiber()->gc, struct alter_space); rlist_create(&alter->ops); alter->old_space = old_space; alter->space_def = space_def_dup_xc(alter->old_space->def); if (old_space->format != NULL) alter->new_min_field_count = old_space->format->min_field_count; else alter->new_min_field_count = 0; return alter; } /** Destroy alter. */ static void alter_space_delete(struct alter_space *alter) { /* Destroy the ops. */ while (! rlist_empty(&alter->ops)) { AlterSpaceOp *op = rlist_shift_entry(&alter->ops, AlterSpaceOp, link); delete op; } /* Delete the new space, if any. */ if (alter->new_space) space_delete(alter->new_space); space_def_delete(alter->space_def); } AlterSpaceOp::AlterSpaceOp(struct alter_space *alter) { /* Add to the tail: operations must be processed in order. */ rlist_add_tail_entry(&alter->ops, this, link); } /** * Commit the alter. * * Move all unchanged indexes from the old space to the new space. * Set the newly built indexes in the new space, or free memory * of the dropped indexes. * Replace the old space with a new one in the space cache. */ static void alter_space_commit(struct trigger *trigger, void *event) { struct txn *txn = (struct txn *) event; struct alter_space *alter = (struct alter_space *) trigger->data; /* * Commit alter ops, this will move the changed * indexes into their new places. */ class AlterSpaceOp *op; rlist_foreach_entry(op, &alter->ops, link) { op->commit(alter, txn->signature); } trigger_run_xc(&on_alter_space, alter->new_space); alter->new_space = NULL; /* for alter_space_delete(). */ /* * Delete the old version of the space, we are not * going to use it. */ space_delete(alter->old_space); alter_space_delete(alter); } /** * Rollback all effects of space alter. This is * a transaction trigger, and it fires most likely * upon a failed write to the WAL. * * Keep in mind that we may end up here in case of * alter_space_commit() failure (unlikely) */ static void alter_space_rollback(struct trigger *trigger, void * /* event */) { struct alter_space *alter = (struct alter_space *) trigger->data; /* Rollback alter ops */ class AlterSpaceOp *op; rlist_foreach_entry(op, &alter->ops, link) { op->rollback(alter); } /* Rebuild index maps once for all indexes. */ space_fill_index_map(alter->old_space); space_fill_index_map(alter->new_space); /* * Don't forget about space triggers. */ space_swap_triggers(alter->new_space, alter->old_space); struct space *new_space = space_cache_replace(alter->old_space); assert(new_space == alter->new_space); (void) new_space; alter_space_delete(alter); } /** * alter_space_do() - do all the work necessary to * create a new space. * * If something may fail during alter, it must be done here, * before a record is written to the Write Ahead Log. Only * trivial and infallible actions are left to the commit phase * of the alter. * * The implementation of this function follows "Template Method" * pattern, providing a skeleton of the alter, while all the * details are encapsulated in AlterSpaceOp methods. * * These are the major steps of alter defining the structure of * the algorithm and performed regardless of what is altered: * * - a copy of the definition of the old space is created * - the definition of the old space is altered, to get * definition of a new space * - an instance of the new space is created, according to the new * definition; the space is so far empty * - data structures of the new space are built; sometimes, it * doesn't need to happen, e.g. when alter only changes the name * of a space or an index, or other accidental property. * If any data structure needs to be built, e.g. a new index, * only this index is built, not the entire space with all its * indexes. * - at commit, the new space is coalesced with the old one. * On rollback, the new space is deleted. */ static void alter_space_do(struct txn *txn, struct alter_space *alter) { /* Create a definition of the new space. */ space_dump_def(alter->old_space, &alter->key_list); class AlterSpaceOp *op; /* * Alter the definition of the old space, so that * a new space can be created with a new definition. */ rlist_foreach_entry(op, &alter->ops, link) op->alter_def(alter); /* * Create a new (empty) space for the new definition. * Sic: the triggers are not moved over yet. */ alter->new_space = space_new_xc(alter->space_def, &alter->key_list); /* * Copy the replace function, the new space is at the same recovery * phase as the old one. This hack is especially necessary for * system spaces, which may be altered in some row in the * snapshot/xlog, but needs to continue staying "fully * built". */ space_prepare_alter_xc(alter->old_space, alter->new_space); alter->new_space->sequence = alter->old_space->sequence; alter->new_space->truncate_count = alter->old_space->truncate_count; memcpy(alter->new_space->access, alter->old_space->access, sizeof(alter->old_space->access)); /* * Change the new space: build the new index, rename, * change the fixed field count. */ try { rlist_foreach_entry(op, &alter->ops, link) op->alter(alter); } catch (Exception *e) { /* * Undo space changes from the last successful * operation back to the first. Skip the operation * which failed. An operation may fail during * alter if, e.g. if it adds a unique key and * there is a duplicate. */ while (op != rlist_first_entry(&alter->ops, class AlterSpaceOp, link)) { op = rlist_prev_entry(op, link); op->rollback(alter); } throw; } /* Rebuild index maps once for all indexes. */ space_fill_index_map(alter->old_space); space_fill_index_map(alter->new_space); /* * Don't forget about space triggers. */ space_swap_triggers(alter->new_space, alter->old_space); /* * The new space is ready. Time to update the space * cache with it. */ space_commit_alter(alter->old_space, alter->new_space); struct space *old_space = space_cache_replace(alter->new_space); (void) old_space; assert(old_space == alter->old_space); /* * Install transaction commit/rollback triggers to either * finish or rollback the DDL depending on the results of * writing to WAL. */ struct trigger *on_commit = txn_alter_trigger_new(alter_space_commit, alter); txn_on_commit(txn, on_commit); struct trigger *on_rollback = txn_alter_trigger_new(alter_space_rollback, alter); txn_on_rollback(txn, on_rollback); } /* }}} */ /* {{{ AlterSpaceOp descendants - alter operations, such as Add/Drop index */ /** * The operation is executed on each space format change. * Now the single purpose is to update an old field names * dictionary, used by old space formats, and use it in a new * formats (vinyl creates many formats, not one). */ class ModifySpaceFormat: public AlterSpaceOp { /** * Newely created field dictionary. When new space_def is * created, it allocates new dictionary. Alter moves new * names into an old dictionary and deletes new one. */ struct tuple_dictionary *new_dict; /** * Old tuple dictionary stored to rollback in destructor, * if an exception had been raised after alter_def(), but * before alter(). */ struct tuple_dictionary *old_dict; /** * New space definition. It can not be got from alter, * because alter_def() is called before * ModifySpace::alter_def(). */ struct space_def *new_def; public: ModifySpaceFormat(struct alter_space *alter, struct space_def *new_def) : AlterSpaceOp(alter), new_dict(NULL), old_dict(NULL), new_def(new_def) {} virtual void alter(struct alter_space *alter); virtual void alter_def(struct alter_space *alter); virtual void commit(struct alter_space *alter, int64_t lsn); virtual ~ModifySpaceFormat(); }; void ModifySpaceFormat::alter_def(struct alter_space *alter) { /* * Move new names into an old dictionary, which already is * referenced by existing tuple formats. New dictionary * object is deleted later, in destructor. */ new_dict = new_def->dict; old_dict = alter->old_space->def->dict; tuple_dictionary_swap(new_dict, old_dict); new_def->dict = old_dict; tuple_dictionary_ref(old_dict); } void ModifySpaceFormat::alter(struct alter_space *alter) { struct space *new_space = alter->new_space; struct space *old_space = alter->old_space; struct tuple_format *new_format = new_space->format; struct tuple_format *old_format = old_space->format; if (old_format != NULL) { assert(new_format != NULL); if (! tuple_format1_can_store_format2_tuples(new_format, old_format)) space_check_format_xc(new_space, old_space); } } void ModifySpaceFormat::commit(struct alter_space *alter, int64_t lsn) { (void) alter; (void) lsn; old_dict = NULL; } ModifySpaceFormat::~ModifySpaceFormat() { if (new_dict != NULL) { /* Return old names into the old dict. */ if (old_dict != NULL) tuple_dictionary_swap(new_dict, old_dict); tuple_dictionary_unref(new_dict); } } /** Change non-essential properties of a space. */ class ModifySpace: public AlterSpaceOp { public: ModifySpace(struct alter_space *alter, struct space_def *def_arg) :AlterSpaceOp(alter), def(def_arg) {} /* New space definition. */ struct space_def *def; virtual void alter_def(struct alter_space *alter); virtual ~ModifySpace(); }; /** Amend the definition of the new space. */ void ModifySpace::alter_def(struct alter_space *alter) { space_def_delete(alter->space_def); alter->space_def = def; /* Now alter owns the def. */ def = NULL; } ModifySpace::~ModifySpace() { if (def != NULL) space_def_delete(def); } /** DropIndex - remove an index from space. */ class DropIndex: public AlterSpaceOp { public: DropIndex(struct alter_space *alter, struct index_def *def_arg) :AlterSpaceOp(alter), old_index_def(def_arg) {} /** A reference to the definition of the dropped index. */ struct index_def *old_index_def; virtual void alter_def(struct alter_space *alter); virtual void alter(struct alter_space *alter); virtual void commit(struct alter_space *alter, int64_t lsn); }; /* * Alter the definition of the new space and remove * the new index from it. */ void DropIndex::alter_def(struct alter_space * /* alter */) { rlist_del_entry(old_index_def, link); } /* Do the drop. */ void DropIndex::alter(struct alter_space *alter) { /* * If it's not the primary key, nothing to do -- * the dropped index didn't exist in the new space * definition, so does not exist in the created space. */ if (space_index(alter->new_space, 0) != NULL) return; /* * OK to drop the primary key. Inform the engine about it, * since it may have to reset handler->replace function, * so that: * - DML returns proper errors rather than crashes the * program * - when a new primary key is finally added, the space * can be put back online properly. */ space_drop_primary_key(alter->new_space); } void DropIndex::commit(struct alter_space *alter, int64_t /* signature */) { struct index *index = index_find_xc(alter->old_space, old_index_def->iid); index_commit_drop(index); } /** * A no-op to preserve the old index data in the new space. * Added to the alter specification when the index at hand * is not affected by alter in any way. */ class MoveIndex: public AlterSpaceOp { public: MoveIndex(struct alter_space *alter, uint32_t iid_arg) :AlterSpaceOp(alter), iid(iid_arg) {} /** id of the index on the move. */ uint32_t iid; virtual void alter(struct alter_space *alter); virtual void rollback(struct alter_space *alter); }; void MoveIndex::alter(struct alter_space *alter) { space_swap_index(alter->old_space, alter->new_space, iid, iid); } void MoveIndex::rollback(struct alter_space *alter) { space_swap_index(alter->old_space, alter->new_space, iid, iid); } /** * Change non-essential properties of an index, i.e. * properties not involving index data or layout on disk. */ class ModifyIndex: public AlterSpaceOp { public: ModifyIndex(struct alter_space *alter, struct index_def *new_index_def_arg, struct index_def *old_index_def_arg) : AlterSpaceOp(alter), new_index_def(new_index_def_arg), old_index_def(old_index_def_arg) { if (new_index_def->iid == 0 && key_part_cmp(new_index_def->key_def->parts, new_index_def->key_def->part_count, old_index_def->key_def->parts, old_index_def->key_def->part_count) != 0) { /* * Primary parts have been changed - * update non-unique secondary indexes. */ alter->pk_def = new_index_def->key_def; } } struct index_def *new_index_def; struct index_def *old_index_def; virtual void alter_def(struct alter_space *alter); virtual void alter(struct alter_space *alter); virtual void rollback(struct alter_space *alter); virtual ~ModifyIndex(); }; /** Update the definition of the new space */ void ModifyIndex::alter_def(struct alter_space *alter) { rlist_del_entry(old_index_def, link); index_def_list_add(&alter->key_list, new_index_def); } void ModifyIndex::alter(struct alter_space *alter) { assert(old_index_def->iid == new_index_def->iid); /* * Move the old index to the new space to preserve the * original data, but use the new definition. */ space_swap_index(alter->old_space, alter->new_space, old_index_def->iid, new_index_def->iid); struct index *old_index = space_index(alter->old_space, old_index_def->iid); assert(old_index != NULL); struct index *new_index = space_index(alter->new_space, new_index_def->iid); assert(new_index != NULL); SWAP(old_index->def, new_index->def); index_update_def(new_index); } void ModifyIndex::rollback(struct alter_space *alter) { assert(old_index_def->iid == new_index_def->iid); /* * Restore indexes. */ space_swap_index(alter->old_space, alter->new_space, old_index_def->iid, new_index_def->iid); struct index *old_index = space_index(alter->old_space, old_index_def->iid); assert(old_index != NULL); struct index *new_index = space_index(alter->new_space, new_index_def->iid); assert(new_index != NULL); SWAP(old_index->def, new_index->def); index_update_def(old_index); } ModifyIndex::~ModifyIndex() { index_def_delete(new_index_def); } /** CreateIndex - add a new index to the space. */ class CreateIndex: public AlterSpaceOp { public: CreateIndex(struct alter_space *alter) :AlterSpaceOp(alter), new_index_def(NULL) {} /** New index index_def. */ struct index_def *new_index_def; virtual void alter_def(struct alter_space *alter); virtual void alter(struct alter_space *alter); virtual void commit(struct alter_space *alter, int64_t lsn); virtual ~CreateIndex(); }; /** Add definition of the new key to the new space def. */ void CreateIndex::alter_def(struct alter_space *alter) { index_def_list_add(&alter->key_list, new_index_def); } /** * Optionally build the new index. * * During recovery the space is often not fully constructed yet * anyway, so there is no need to fully populate index with data, * it is done at the end of recovery. * * Note, that system spaces are exception to this, since * they are fully enabled at all times. */ void CreateIndex::alter(struct alter_space *alter) { if (new_index_def->iid == 0) { /* * Adding a primary key: bring the space * up to speed with the current recovery * state. During snapshot recovery it * means preparing the primary key for * build (beginBuild()). During xlog * recovery, it means building the primary * key. After recovery, it means building * all keys. */ space_add_primary_key_xc(alter->new_space); return; } /** * Get the new index and build it. */ struct index *new_index = index_find_xc(alter->new_space, new_index_def->iid); space_build_secondary_key_xc(alter->new_space, alter->new_space, new_index); } void CreateIndex::commit(struct alter_space *alter, int64_t signature) { struct index *new_index = index_find_xc(alter->new_space, new_index_def->iid); index_commit_create(new_index, signature); } CreateIndex::~CreateIndex() { if (new_index_def) index_def_delete(new_index_def); } /** * RebuildIndex - drop the old index data and rebuild index * from by reading the primary key. Used when key_def of * an index is changed. */ class RebuildIndex: public AlterSpaceOp { public: RebuildIndex(struct alter_space *alter, struct index_def *new_index_def_arg, struct index_def *old_index_def_arg) :AlterSpaceOp(alter), new_index_def(new_index_def_arg), old_index_def(old_index_def_arg) { /* We may want to rebuild secondary keys as well. */ if (new_index_def->iid == 0) alter->pk_def = new_index_def->key_def; } /** New index index_def. */ struct index_def *new_index_def; /** Old index index_def. */ struct index_def *old_index_def; virtual void alter_def(struct alter_space *alter); virtual void alter(struct alter_space *alter); virtual void commit(struct alter_space *alter, int64_t signature); virtual ~RebuildIndex(); }; /** Add definition of the new key to the new space def. */ void RebuildIndex::alter_def(struct alter_space *alter) { rlist_del_entry(old_index_def, link); index_def_list_add(&alter->key_list, new_index_def); } void RebuildIndex::alter(struct alter_space *alter) { /* Get the new index and build it. */ struct index *new_index = space_index(alter->new_space, new_index_def->iid); assert(new_index != NULL); space_build_secondary_key_xc(new_index_def->iid != 0 ? alter->new_space : alter->old_space, alter->new_space, new_index); } void RebuildIndex::commit(struct alter_space *alter, int64_t signature) { struct index *old_index = space_index(alter->old_space, old_index_def->iid); struct index *new_index = space_index(alter->new_space, new_index_def->iid); index_commit_drop(old_index); index_commit_create(new_index, signature); } RebuildIndex::~RebuildIndex() { if (new_index_def) index_def_delete(new_index_def); } /* }}} */ /** * Delete the space. It is already removed from the space cache. */ static void on_drop_space_commit(struct trigger *trigger, void *event) { (void) event; struct space *space = (struct space *)trigger->data; trigger_run_xc(&on_alter_space, space); space_delete(space); } /** * Return the original space back into the cache. The effect * of all other events happened after the space was removed were * reverted by the cascading rollback. */ static void on_drop_space_rollback(struct trigger *trigger, void *event) { (void) event; struct space *space = (struct space *)trigger->data; space_cache_replace(space); } /** * Run the triggers registered on commit of a change in _space. */ static void on_create_space_commit(struct trigger *trigger, void *event) { (void) event; struct space *space = (struct space *)trigger->data; trigger_run_xc(&on_alter_space, space); } /** * A trigger invoked on commit/rollback of DROP/ADD space. * The trigger removes the space from the space cache. * * By the time the space is removed, it should be empty: we * rely on cascading rollback. */ static void on_create_space_rollback(struct trigger *trigger, void *event) { (void) event; struct space *space = (struct space *)trigger->data; struct space *cached = space_cache_delete(space_id(space)); (void) cached; assert(cached == space); space_delete(space); } /** * Create MoveIndex operation for a range of indexes in a space * for range [begin, end) */ void alter_space_move_indexes(struct alter_space *alter, uint32_t begin, uint32_t end) { struct space *old_space = alter->old_space; bool is_min_field_count_changed; if (old_space->format != NULL) { is_min_field_count_changed = old_space->format->min_field_count != alter->new_min_field_count; } else { is_min_field_count_changed = false; } for (uint32_t index_id = begin; index_id < end; ++index_id) { struct index *old_index = space_index(old_space, index_id); if (old_index == NULL) continue; struct index_def *old_def = old_index->def; struct index_def *new_def; uint32_t min_field_count = alter->new_min_field_count; if ((old_def->opts.is_unique && !old_def->key_def->is_nullable) || old_def->type != TREE || alter->pk_def == NULL) { if (is_min_field_count_changed) { new_def = index_def_dup(old_def); index_def_update_optionality(new_def, min_field_count); (void) new ModifyIndex(alter, new_def, old_def); } else { (void) new MoveIndex(alter, old_def->iid); } continue; } /* * Rebuild non-unique secondary keys along with * the primary, since primary key parts have * changed. */ new_def = index_def_new(old_def->space_id, old_def->iid, old_def->name, strlen(old_def->name), old_def->type, &old_def->opts, old_def->key_def, alter->pk_def); index_def_update_optionality(new_def, min_field_count); auto guard = make_scoped_guard([=] { index_def_delete(new_def); }); (void) new RebuildIndex(alter, new_def, old_def); guard.is_active = false; } } /** * A trigger which is invoked on replace in a data dictionary * space _space. * * Generally, whenever a data dictionary change occurs * 2 things should be done: * * - space cache should be updated, and changes in the space * cache should be reflected in Lua bindings * (this is done in space_cache_replace() and * space_cache_delete()) * * - the space which is changed should be rebuilt according * to the nature of the modification, i.e. indexes added/dropped, * tuple format changed, etc. * * When dealing with an update of _space space, we have 3 major * cases: * * 1) insert a new tuple: creates a new space * The trigger prepares a space structure to insert * into the space cache and registers an on commit * hook to perform the registration. Should the statement * itself fail, transaction is rolled back, the transaction * rollback hook must be there to delete the created space * object, avoiding a memory leak. The hooks are written * in a way that excludes the possibility of a failure. * * 2) delete a tuple: drops an existing space. * * A space can be dropped only if it has no indexes. * The only reason for this restriction is that there * must be no tuples in _index without a corresponding tuple * in _space. It's not possible to delete such tuples * automatically (this would require multi-statement * transactions), so instead the trigger verifies that the * records have been deleted by the user. * * Then the trigger registers transaction commit hook to * perform the deletion from the space cache. No rollback hook * is required: if the transaction is rolled back, nothing is * done. * * 3) modify an existing tuple: some space * properties are immutable, but it's OK to change * space name or field count. This is done in WAL-error- * safe mode. * * A note about memcached_space: Tarantool 1.4 had a check * which prevented re-definition of memcached_space. With * dynamic space configuration such a check would be particularly * clumsy, so it is simply not done. */ static void on_replace_dd_space(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; txn_check_singlestatement_xc(txn, "Space _space"); struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; struct region *region = &fiber()->gc; /* * Things to keep in mind: * - old_tuple is set only in case of UPDATE. For INSERT * or REPLACE it is NULL. * - the trigger may be called inside recovery from a snapshot, * when index look up is not possible * - _space, _index and other metaspaces initially don't * have a tuple which represents it, this tuple is only * created during recovery from a snapshot. * * Let's establish whether an old space exists. Use * old_tuple ID field, if old_tuple is set, since UPDATE * may have changed space id. */ uint32_t old_id = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple, BOX_SPACE_FIELD_ID); struct space *old_space = space_by_id(old_id); if (new_tuple != NULL && old_space == NULL) { /* INSERT */ struct space_def *def = space_def_new_from_tuple(new_tuple, ER_CREATE_SPACE, region); access_check_ddl(def->name, def->uid, SC_SPACE, PRIV_C, true); auto def_guard = make_scoped_guard([=] { space_def_delete(def); }); RLIST_HEAD(empty_list); struct space *space = space_new_xc(def, &empty_list); /** * The new space must be inserted in the space * cache right away to achieve linearisable * execution on a replica. */ (void) space_cache_replace(space); /* * So may happen that until the DDL change record * is written to the WAL, the space is used for * insert/update/delete. All these updates are * rolled back by the pipelined rollback mechanism, * so it's safe to simply drop the space on * rollback. */ struct trigger *on_commit = txn_alter_trigger_new(on_create_space_commit, space); txn_on_commit(txn, on_commit); struct trigger *on_rollback = txn_alter_trigger_new(on_create_space_rollback, space); txn_on_rollback(txn, on_rollback); } else if (new_tuple == NULL) { /* DELETE */ access_check_ddl(old_space->def->name, old_space->def->uid, SC_SPACE, PRIV_D, true); /* Verify that the space is empty (has no indexes) */ if (old_space->index_count) { tnt_raise(ClientError, ER_DROP_SPACE, space_name(old_space), "the space has indexes"); } if (schema_find_grants("space", old_space->def->id)) { tnt_raise(ClientError, ER_DROP_SPACE, space_name(old_space), "the space has grants"); } /* * Before 1.7.6 a space record was removed before * the corresponding record in the _truncate system * space so the following check should be disabled. */ if (dd_version_id >= version_id(1, 7, 6) && space_has_data(BOX_TRUNCATE_ID, 0, old_space->def->id)) tnt_raise(ClientError, ER_DROP_SPACE, space_name(old_space), "the space has truncate record"); /** * The space must be deleted from the space * cache right away to achieve linearisable * execution on a replica. */ struct space *space = space_cache_delete(space_id(old_space)); struct trigger *on_commit = txn_alter_trigger_new(on_drop_space_commit, space); txn_on_commit(txn, on_commit); struct trigger *on_rollback = txn_alter_trigger_new(on_drop_space_rollback, space); txn_on_rollback(txn, on_rollback); } else { /* UPDATE, REPLACE */ assert(old_space != NULL && new_tuple != NULL); struct space_def *def = space_def_new_from_tuple(new_tuple, ER_ALTER_SPACE, region); access_check_ddl(def->name, def->uid, SC_SPACE, PRIV_A, true); auto def_guard = make_scoped_guard([=] { space_def_delete(def); }); /* * Check basic options. Assume the space to be * empty, because we can not calculate here * a size of a vinyl space. */ space_def_check_compatibility_xc(old_space->def, def, true); /* * Allow change of space properties, but do it * in WAL-error-safe mode. */ struct alter_space *alter = alter_space_new(old_space); auto alter_guard = make_scoped_guard([=] {alter_space_delete(alter);}); /* * Calculate a new min_field_count. It can be * changed by resetting space:format(), if an old * format covers some nullable indexed fields in * the format tail. And when the format is reset, * these fields become optional - index * comparators must be updated. */ struct key_def **keys; size_t bsize = old_space->index_count * sizeof(keys[0]); keys = (struct key_def **) region_alloc_xc(&fiber()->gc, bsize); for (uint32_t i = 0; i < old_space->index_count; ++i) keys[i] = old_space->index[i]->def->key_def; alter->new_min_field_count = tuple_format_min_field_count(keys, old_space->index_count, def->fields, def->field_count); (void) new ModifySpaceFormat(alter, def); (void) new ModifySpace(alter, def); def_guard.is_active = false; /* Create MoveIndex ops for all space indexes. */ alter_space_move_indexes(alter, 0, old_space->index_id_max + 1); alter_space_do(txn, alter); alter_guard.is_active = false; } } /** * Just like with _space, 3 major cases: * * - insert a tuple = addition of a new index. The * space should exist. * * - delete a tuple - drop index. * * - update a tuple - change of index type or key parts. * Change of index type is the same as deletion of the old * index and addition of the new one. * * A new index needs to be built before we attempt to commit * a record to the write ahead log, since: * * 1) if it fails, it's not good to end up with a corrupt index * which is already committed to WAL * * 2) Tarantool indexes also work as constraints (min number of * fields in the space, field uniqueness), and it's not good to * commit to WAL a constraint which is not enforced in the * current data set. * * When adding a new index, ideally we'd also need to rebuild * all tuple formats in all tuples, since the old format may not * be ideal for the new index. We, however, do not do that, * since that would entail rebuilding all indexes at once. * Instead, the default tuple format of the space is changed, * and as tuples get updated/replaced, all tuples acquire a new * format. * * The same is the case with dropping an index: nothing is * rebuilt right away, but gradually the extra space reserved * for offsets is relinquished to the slab allocator as tuples * are modified. */ static void on_replace_dd_index(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; txn_check_singlestatement_xc(txn, "Space _index"); struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; uint32_t id = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple, BOX_INDEX_FIELD_SPACE_ID); uint32_t iid = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple, BOX_INDEX_FIELD_ID); struct space *old_space = space_cache_find_xc(id); enum priv_type priv_type = new_tuple ? PRIV_C : PRIV_D; if (old_tuple && new_tuple) priv_type = PRIV_A; access_check_ddl(old_space->def->name, old_space->def->uid, SC_SPACE, priv_type, true); struct index *old_index = space_index(old_space, iid); /* * Deal with various cases of dropping of the primary key. */ if (iid == 0 && new_tuple == NULL) { /* * Dropping the primary key in a system space: off limits. */ if (space_is_system(old_space)) tnt_raise(ClientError, ER_LAST_DROP, space_name(old_space)); /* * Can't drop primary key before secondary keys. */ if (old_space->index_count > 1) { tnt_raise(ClientError, ER_DROP_PRIMARY_KEY, space_name(old_space)); } /* * Can't drop primary key before space sequence. */ if (old_space->sequence != NULL) { tnt_raise(ClientError, ER_ALTER_SPACE, space_name(old_space), "can not drop primary key while " "space sequence exists"); } } if (iid != 0 && space_index(old_space, 0) == NULL) { /* * A secondary index can not be created without * a primary key. */ tnt_raise(ClientError, ER_ALTER_SPACE, space_name(old_space), "can not add a secondary key before primary"); } struct alter_space *alter = alter_space_new(old_space); auto scoped_guard = make_scoped_guard([=] { alter_space_delete(alter); }); /* * Handle the following 4 cases: * 1. Simple drop of an index. * 2. Creation of a new index: primary or secondary. * 3. Change of an index which does not require a rebuild. * 4. Change of an index which does require a rebuild. */ /* * First, move all unchanged indexes from the old space * to the new one. */ /* Case 1: drop the index, if it is dropped. */ if (old_index != NULL && new_tuple == NULL) { alter_space_move_indexes(alter, 0, iid); (void) new DropIndex(alter, old_index->def); } /* Case 2: create an index, if it is simply created. */ if (old_index == NULL && new_tuple != NULL) { alter_space_move_indexes(alter, 0, iid); CreateIndex *create_index = new CreateIndex(alter); create_index->new_index_def = index_def_new_from_tuple(new_tuple, old_space); index_def_update_optionality(create_index->new_index_def, alter->new_min_field_count); } /* Case 3 and 4: check if we need to rebuild index data. */ if (old_index != NULL && new_tuple != NULL) { struct index_def *index_def; index_def = index_def_new_from_tuple(new_tuple, old_space); auto index_def_guard = make_scoped_guard([=] { index_def_delete(index_def); }); /* * To detect which key parts are optional, * min_field_count is required. But * min_field_count from the old space format can * not be used. For example, consider the case, * when a space has no format, has a primary index * on the first field and has a single secondary * index on a non-nullable second field. Min field * count here is 2. Now alter the secondary index * to make its part be nullable. In the * 'old_space' min_field_count is still 2, but * actually it is already 1. Actual * min_field_count must be calculated using old * unchanged indexes, NEW definition of an updated * index and a space format, defined by a user. */ struct key_def **keys; size_t bsize = old_space->index_count * sizeof(keys[0]); keys = (struct key_def **) region_alloc_xc(&fiber()->gc, bsize); for (uint32_t i = 0, j = 0; i < old_space->index_count; ++i) { struct index_def *d = old_space->index[i]->def; if (d->iid != index_def->iid) keys[j++] = d->key_def; else keys[j++] = index_def->key_def; } struct space_def *def = old_space->def; alter->new_min_field_count = tuple_format_min_field_count(keys, old_space->index_count, def->fields, def->field_count); index_def_update_optionality(index_def, alter->new_min_field_count); alter_space_move_indexes(alter, 0, iid); if (index_def_cmp(index_def, old_index->def) == 0) { /* Index is not changed so just move it. */ (void) new MoveIndex(alter, old_index->def->iid); } else if (index_def_change_requires_rebuild(old_index->def, index_def)) { /* * Operation demands an index rebuild. */ (void) new RebuildIndex(alter, index_def, old_index->def); index_def_guard.is_active = false; } else { (void) new ModifySpaceFormat(alter, old_space->def); /* * Operation can be done without index rebuild. */ (void) new ModifyIndex(alter, index_def, old_index->def); index_def_guard.is_active = false; } } /* * Create MoveIndex ops for the remaining indexes in the * old space. */ alter_space_move_indexes(alter, iid + 1, old_space->index_id_max + 1); alter_space_do(txn, alter); scoped_guard.is_active = false; } /* {{{ space truncate */ struct truncate_space { /** Space being truncated. */ struct space *old_space; /** Space created as a result of truncation. */ struct space *new_space; /** Trigger executed to commit truncation. */ struct trigger on_commit; /** Trigger executed to rollback truncation. */ struct trigger on_rollback; }; /** * Call the engine specific method to commit truncation * and delete the old space. */ static void truncate_space_commit(struct trigger *trigger, void * /* event */) { struct truncate_space *truncate = (struct truncate_space *) trigger->data; space_commit_truncate(truncate->old_space, truncate->new_space); space_delete(truncate->old_space); } /** * Move the old space back to the cache and delete * the new space. */ static void truncate_space_rollback(struct trigger *trigger, void * /* event */) { struct truncate_space *truncate = (struct truncate_space *) trigger->data; if (space_cache_replace(truncate->old_space) != truncate->new_space) unreachable(); space_swap_triggers(truncate->new_space, truncate->old_space); space_delete(truncate->new_space); } /** * A trigger invoked on replace in space _truncate. * * In a nutshell, we truncate a space by replacing it with * a new empty space with the same definition and indexes. * Note, although we instantiate the new space before WAL * write, we don't propagate changes to the old space in * case a WAL write error happens and we have to rollback. * This is OK, because a WAL write error implies cascading * rollback of all transactions following this one. */ static void on_replace_dd_truncate(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; struct txn_stmt *stmt = txn_current_stmt(txn); txn_check_singlestatement_xc(txn, "Space _truncate"); struct tuple *new_tuple = stmt->new_tuple; if (new_tuple == NULL) { /* Space drop - nothing to do. */ return; } uint32_t space_id = tuple_field_u32_xc(new_tuple, BOX_TRUNCATE_FIELD_SPACE_ID); uint64_t truncate_count = tuple_field_u64_xc(new_tuple, BOX_TRUNCATE_FIELD_COUNT); struct space *old_space = space_cache_find_xc(space_id); if (stmt->row->type == IPROTO_INSERT) { /* * Space creation during initial recovery - * initialize truncate_count. */ old_space->truncate_count = truncate_count; return; } /* * System spaces use triggers to keep records in sync * with internal objects. Since space truncation doesn't * invoke triggers, we don't permit it for system spaces. */ if (space_is_system(old_space)) tnt_raise(ClientError, ER_TRUNCATE_SYSTEM_SPACE, space_name(old_space)); /* * Check if a write privilege was given, raise an error if not. */ access_check_space_xc(old_space, PRIV_W); /* * Truncate counter is updated - truncate the space. */ struct truncate_space *truncate = region_calloc_object_xc(&fiber()->gc, struct truncate_space); /* Create an empty copy of the old space. */ struct rlist key_list; space_dump_def(old_space, &key_list); struct space *new_space = space_new_xc(old_space->def, &key_list); new_space->truncate_count = truncate_count; auto space_guard = make_scoped_guard([=] { space_delete(new_space); }); /* Notify the engine about upcoming space truncation. */ space_prepare_truncate_xc(old_space, new_space); space_guard.is_active = false; /* Preserve the access control lists during truncate. */ memcpy(new_space->access, old_space->access, sizeof(old_space->access)); /* Truncate does not affect space sequence. */ new_space->sequence = old_space->sequence; /* * Replace the old space with the new one in the space * cache. Requests processed after this point will see * the space as truncated. */ if (space_cache_replace(new_space) != old_space) unreachable(); /* * Register the trigger that will commit or rollback * truncation depending on whether WAL write succeeds * or fails. */ truncate->old_space = old_space; truncate->new_space = new_space; trigger_create(&truncate->on_commit, truncate_space_commit, truncate, NULL); txn_on_commit(txn, &truncate->on_commit); trigger_create(&truncate->on_rollback, truncate_space_rollback, truncate, NULL); txn_on_rollback(txn, &truncate->on_rollback); space_swap_triggers(truncate->new_space, truncate->old_space); } /* }}} */ /* {{{ access control */ bool user_has_data(struct user *user) { uint32_t uid = user->def->uid; uint32_t spaces[] = { BOX_SPACE_ID, BOX_FUNC_ID, BOX_SEQUENCE_ID, BOX_PRIV_ID, BOX_PRIV_ID }; /* * owner index id #1 for _space and _func and _priv. * For _priv also check that the user has no grants. */ uint32_t indexes[] = { 1, 1, 1, 1, 0 }; uint32_t count = sizeof(spaces)/sizeof(*spaces); for (uint32_t i = 0; i < count; i++) { if (space_has_data(spaces[i], indexes[i], uid)) return true; } if (! user_map_is_empty(&user->users)) return true; /* * If there was a role, the previous check would have * returned true. */ assert(user_map_is_empty(&user->roles)); return false; } /** * Supposedly a user may have many authentication mechanisms * defined, but for now we only support chap-sha1. Get * password of chap-sha1 from the _user space. */ void user_def_fill_auth_data(struct user_def *user, const char *auth_data) { uint8_t type = mp_typeof(*auth_data); if (type == MP_ARRAY || type == MP_NIL) { /* * Nothing useful. * MP_ARRAY is a special case since Lua arrays are * indistinguishable from tables, so an empty * table may well be encoded as an msgpack array. * Treat as no data. */ return; } if (mp_typeof(*auth_data) != MP_MAP) { /** Prevent users from making silly mistakes */ tnt_raise(ClientError, ER_CREATE_USER, user->name, "invalid password format, " "use box.schema.user.passwd() to reset password"); } uint32_t mech_count = mp_decode_map(&auth_data); for (uint32_t i = 0; i < mech_count; i++) { if (mp_typeof(*auth_data) != MP_STR) { mp_next(&auth_data); mp_next(&auth_data); continue; } uint32_t len; const char *mech_name = mp_decode_str(&auth_data, &len); if (strncasecmp(mech_name, "chap-sha1", 9) != 0) { mp_next(&auth_data); continue; } const char *hash2_base64 = mp_decode_str(&auth_data, &len); if (len != 0 && len != SCRAMBLE_BASE64_SIZE) { tnt_raise(ClientError, ER_CREATE_USER, user->name, "invalid user password"); } if (user->uid == GUEST) { /** Guest user is permitted to have empty password */ if (strncmp(hash2_base64, CHAP_SHA1_EMPTY_PASSWORD, len)) tnt_raise(ClientError, ER_GUEST_USER_PASSWORD); } base64_decode(hash2_base64, len, user->hash2, sizeof(user->hash2)); break; } } static struct user_def * user_def_new_from_tuple(struct tuple *tuple) { uint32_t name_len; const char *name = tuple_field_str_xc(tuple, BOX_USER_FIELD_NAME, &name_len); if (name_len > BOX_NAME_MAX) { tnt_raise(ClientError, ER_CREATE_USER, tt_cstr(name, BOX_INVALID_NAME_MAX), "user name is too long"); } size_t size = user_def_sizeof(name_len); /* Use calloc: in case user password is empty, fill it with \0 */ struct user_def *user = (struct user_def *) malloc(size); if (user == NULL) tnt_raise(OutOfMemory, size, "malloc", "user"); auto def_guard = make_scoped_guard([=] { free(user); }); user->uid = tuple_field_u32_xc(tuple, BOX_USER_FIELD_ID); user->owner = tuple_field_u32_xc(tuple, BOX_USER_FIELD_UID); const char *user_type = tuple_field_cstr_xc(tuple, BOX_USER_FIELD_TYPE); user->type= schema_object_type(user_type); memcpy(user->name, name, name_len); user->name[name_len] = 0; if (user->type != SC_ROLE && user->type != SC_USER) { tnt_raise(ClientError, ER_CREATE_USER, user->name, "unknown user type"); } identifier_check_xc(user->name, name_len); /* * AUTH_DATA field in _user space should contain * chap-sha1 -> base64_encode(sha1(sha1(password), 0). * Check for trivial errors when a plain text * password is saved in this field instead. */ if (tuple_field_count(tuple) > BOX_USER_FIELD_AUTH_MECH_LIST) { const char *auth_data = tuple_field(tuple, BOX_USER_FIELD_AUTH_MECH_LIST); const char *tmp = auth_data; bool is_auth_empty; if (mp_typeof(*auth_data) == MP_ARRAY && mp_decode_array(&tmp) == 0) { is_auth_empty = true; } else if (mp_typeof(*auth_data) == MP_MAP && mp_decode_map(&tmp) == 0) { is_auth_empty = true; } else { is_auth_empty = false; } if (!is_auth_empty && user->type == SC_ROLE) tnt_raise(ClientError, ER_CREATE_ROLE, user->name, "authentication data can not be set for a "\ "role"); user_def_fill_auth_data(user, auth_data); } def_guard.is_active = false; return user; } static void user_cache_remove_user(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; struct txn_stmt *stmt = txn_last_stmt(txn); uint32_t uid = tuple_field_u32_xc(stmt->old_tuple ? stmt->old_tuple : stmt->new_tuple, BOX_USER_FIELD_ID); user_cache_delete(uid); } static void user_cache_alter_user(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; struct txn_stmt *stmt = txn_last_stmt(txn); struct user_def *user = user_def_new_from_tuple(stmt->new_tuple); auto def_guard = make_scoped_guard([=] { free(user); }); /* Can throw if, e.g. too many users. */ user_cache_replace(user); def_guard.is_active = false; } /** * A trigger invoked on replace in the user table. */ static void on_replace_dd_user(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; struct txn_stmt *stmt = txn_current_stmt(txn); txn_check_singlestatement_xc(txn, "Space _user"); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; uint32_t uid = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple, BOX_USER_FIELD_ID); struct user *old_user = user_by_id(uid); if (new_tuple != NULL && old_user == NULL) { /* INSERT */ struct user_def *user = user_def_new_from_tuple(new_tuple); access_check_ddl(user->name, user->owner, SC_USER, PRIV_C, true); auto def_guard = make_scoped_guard([=] { free(user); }); (void) user_cache_replace(user); def_guard.is_active = false; struct trigger *on_rollback = txn_alter_trigger_new(user_cache_remove_user, NULL); txn_on_rollback(txn, on_rollback); } else if (new_tuple == NULL) { /* DELETE */ access_check_ddl(old_user->def->name, old_user->def->owner, SC_USER, PRIV_D, true); /* Can't drop guest or super user */ if (uid <= (uint32_t) BOX_SYSTEM_USER_ID_MAX || uid == SUPER) { tnt_raise(ClientError, ER_DROP_USER, old_user->def->name, "the user or the role is a system"); } /* * Can only delete user if it has no spaces, * no functions and no grants. */ if (user_has_data(old_user)) { tnt_raise(ClientError, ER_DROP_USER, old_user->def->name, "the user has objects"); } struct trigger *on_commit = txn_alter_trigger_new(user_cache_remove_user, NULL); txn_on_commit(txn, on_commit); } else { /* UPDATE, REPLACE */ assert(old_user != NULL && new_tuple != NULL); /* * Allow change of user properties (name, * password) but first check that the change is * correct. */ struct user_def *user = user_def_new_from_tuple(new_tuple); access_check_ddl(user->name, user->uid, SC_USER, PRIV_A, true); auto def_guard = make_scoped_guard([=] { free(user); }); struct trigger *on_commit = txn_alter_trigger_new(user_cache_alter_user, NULL); txn_on_commit(txn, on_commit); } } /** * Get function identifiers from a tuple. * * @param tuple Tuple to get ids from. * @param[out] fid Function identifier. * @param[out] uid Owner identifier. */ static inline void func_def_get_ids_from_tuple(const struct tuple *tuple, uint32_t *fid, uint32_t *uid) { *fid = tuple_field_u32_xc(tuple, BOX_FUNC_FIELD_ID); *uid = tuple_field_u32_xc(tuple, BOX_FUNC_FIELD_UID); } /** Create a function definition from tuple. */ static struct func_def * func_def_new_from_tuple(const struct tuple *tuple) { uint32_t len; const char *name = tuple_field_str_xc(tuple, BOX_FUNC_FIELD_NAME, &len); if (len > BOX_NAME_MAX) tnt_raise(ClientError, ER_CREATE_FUNCTION, tt_cstr(name, BOX_INVALID_NAME_MAX), "function name is too long"); identifier_check_xc(name, len); struct func_def *def = (struct func_def *) malloc(func_def_sizeof(len)); if (def == NULL) tnt_raise(OutOfMemory, func_def_sizeof(len), "malloc", "def"); auto def_guard = make_scoped_guard([=] { free(def); }); func_def_get_ids_from_tuple(tuple, &def->fid, &def->uid); memcpy(def->name, name, len); def->name[len] = 0; if (tuple_field_count(tuple) > BOX_FUNC_FIELD_SETUID) def->setuid = tuple_field_u32_xc(tuple, BOX_FUNC_FIELD_SETUID); else def->setuid = false; if (tuple_field_count(tuple) > BOX_FUNC_FIELD_LANGUAGE) { const char *language = tuple_field_cstr_xc(tuple, BOX_FUNC_FIELD_LANGUAGE); def->language = STR2ENUM(func_language, language); if (def->language == func_language_MAX) { tnt_raise(ClientError, ER_FUNCTION_LANGUAGE, language, def->name); } } else { /* Lua is the default. */ def->language = FUNC_LANGUAGE_LUA; } def_guard.is_active = false; return def; } /** Remove a function from function cache */ static void func_cache_remove_func(struct trigger * /* trigger */, void *event) { struct txn_stmt *stmt = txn_last_stmt((struct txn *) event); uint32_t fid = tuple_field_u32_xc(stmt->old_tuple ? stmt->old_tuple : stmt->new_tuple, BOX_FUNC_FIELD_ID); func_cache_delete(fid); } /** Replace a function in the function cache */ static void func_cache_replace_func(struct trigger * /* trigger */, void *event) { struct txn_stmt *stmt = txn_last_stmt((struct txn*) event); struct func_def *def = func_def_new_from_tuple(stmt->new_tuple); auto def_guard = make_scoped_guard([=] { free(def); }); func_cache_replace(def); def_guard.is_active = false; } /** * A trigger invoked on replace in a space containing * functions on which there were defined any grants. */ static void on_replace_dd_func(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; txn_check_singlestatement_xc(txn, "Space _func"); struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; uint32_t fid = tuple_field_u32_xc(old_tuple ? old_tuple : new_tuple, BOX_FUNC_FIELD_ID); struct func *old_func = func_by_id(fid); if (new_tuple != NULL && old_func == NULL) { /* INSERT */ struct func_def *def = func_def_new_from_tuple(new_tuple); access_check_ddl(def->name, def->uid, SC_FUNCTION, PRIV_C, true); auto def_guard = make_scoped_guard([=] { free(def); }); func_cache_replace(def); def_guard.is_active = false; struct trigger *on_rollback = txn_alter_trigger_new(func_cache_remove_func, NULL); txn_on_rollback(txn, on_rollback); } else if (new_tuple == NULL) { /* DELETE */ uint32_t uid; func_def_get_ids_from_tuple(old_tuple, &fid, &uid); /* * Can only delete func if you're the one * who created it or a superuser. */ access_check_ddl(old_func->def->name, uid, SC_FUNCTION, PRIV_D, true); /* Can only delete func if it has no grants. */ if (schema_find_grants("function", old_func->def->fid)) { tnt_raise(ClientError, ER_DROP_FUNCTION, (unsigned) old_func->def->uid, "function has grants"); } struct trigger *on_commit = txn_alter_trigger_new(func_cache_remove_func, NULL); txn_on_commit(txn, on_commit); } else { /* UPDATE, REPLACE */ struct func_def *def = func_def_new_from_tuple(new_tuple); auto def_guard = make_scoped_guard([=] { free(def); }); access_check_ddl(def->name, def->uid, SC_FUNCTION, PRIV_A, true); struct trigger *on_commit = txn_alter_trigger_new(func_cache_replace_func, NULL); txn_on_commit(txn, on_commit); } } /** Create a collation definition from tuple. */ void coll_def_new_from_tuple(const struct tuple *tuple, struct coll_def *def) { memset(def, 0, sizeof(*def)); uint32_t name_len, locale_len, type_len; def->id = tuple_field_u32_xc(tuple, BOX_COLLATION_FIELD_ID); def->name = tuple_field_str_xc(tuple, BOX_COLLATION_FIELD_NAME, &name_len); def->name_len = name_len; def->owner_id = tuple_field_u32_xc(tuple, BOX_COLLATION_FIELD_UID); const char *type = tuple_field_str_xc(tuple, BOX_COLLATION_FIELD_TYPE, &type_len); def->type = STRN2ENUM(coll_type, type, type_len); if (def->type == coll_type_MAX) tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "unknown collation type"); def->locale = tuple_field_str_xc(tuple, BOX_COLLATION_FIELD_LOCALE, &locale_len); def->locale_len = locale_len; const char *options = tuple_field_with_type_xc(tuple, BOX_COLLATION_FIELD_OPTIONS, MP_MAP); if (name_len > BOX_NAME_MAX) tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "collation name is too long"); if (locale_len > BOX_NAME_MAX) tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "collation locale is too long"); /* Locale is an optional argument and can be NULL. */ if (locale_len > 0) identifier_check_xc(def->locale, locale_len); identifier_check_xc(def->name, name_len); assert(def->type == COLL_TYPE_ICU); /* no more defined now */ if (opts_decode(&def->icu, coll_icu_opts_reg, &options, ER_WRONG_COLLATION_OPTIONS, BOX_COLLATION_FIELD_OPTIONS, NULL) != 0) diag_raise(); if (def->icu.french_collation == coll_icu_on_off_MAX) { tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "ICU wrong french_collation option setting, " "expected ON | OFF"); } if (def->icu.alternate_handling == coll_icu_alternate_handling_MAX) { tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "ICU wrong alternate_handling option setting, " "expected NON_IGNORABLE | SHIFTED"); } if (def->icu.case_first == coll_icu_case_first_MAX) { tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "ICU wrong case_first option setting, " "expected OFF | UPPER_FIRST | LOWER_FIRST"); } if (def->icu.case_level == coll_icu_on_off_MAX) { tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "ICU wrong case_level option setting, " "expected ON | OFF"); } if (def->icu.normalization_mode == coll_icu_on_off_MAX) { tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "ICU wrong normalization_mode option setting, " "expected ON | OFF"); } if (def->icu.strength == coll_icu_strength_MAX) { tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "ICU wrong strength option setting, " "expected PRIMARY | SECONDARY | " "TERTIARY | QUATERNARY | IDENTICAL"); } if (def->icu.numeric_collation == coll_icu_on_off_MAX) { tnt_raise(ClientError, ER_CANT_CREATE_COLLATION, "ICU wrong numeric_collation option setting, " "expected ON | OFF"); } } /** Rollback change in collation space. */ static void coll_cache_rollback(struct trigger *trigger, void *event) { struct coll *old_coll = (struct coll *)trigger->data; struct txn_stmt *stmt = txn_last_stmt((struct txn*) event); struct tuple *new_tuple = stmt->new_tuple; if (new_tuple != NULL) { uint32_t new_id = tuple_field_u32_xc(new_tuple, BOX_COLLATION_FIELD_ID); struct coll *new_coll = coll_by_id(new_id); coll_cache_delete(new_coll); coll_delete(new_coll); } if (old_coll != NULL) { struct coll *replaced; int rc = coll_cache_replace(old_coll, &replaced); assert(rc == 0 && replaced == NULL); (void)rc; } } /** Delete a collation. */ static void coll_cache_delete_coll(struct trigger *trigger, void */* event */) { struct coll *old_coll = (struct coll *)trigger->data; coll_delete(old_coll); } /** * A trigger invoked on replace in a space containing * collations that a user defined. */ static void on_replace_dd_collation(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; txn_check_singlestatement_xc(txn, "Space _collation"); struct coll *old_coll = NULL; if (old_tuple != NULL) { /* TODO: Check that no index uses the collation */ uint32_t old_id = tuple_field_u32_xc(old_tuple, BOX_COLLATION_FIELD_ID); old_coll = coll_by_id(old_id); assert(old_coll != NULL); access_check_ddl(old_coll->name, old_coll->owner_id, SC_COLLATION, new_tuple == NULL ? PRIV_D: PRIV_A, false); struct trigger *on_commit = txn_alter_trigger_new(coll_cache_delete_coll, old_coll); txn_on_commit(txn, on_commit); } if (new_tuple == NULL) { /* Simple DELETE */ assert(old_tuple != NULL); coll_cache_delete(old_coll); struct trigger *on_rollback = txn_alter_trigger_new(coll_cache_rollback, old_coll); txn_on_rollback(txn, on_rollback); return; } struct coll_def new_def; coll_def_new_from_tuple(new_tuple, &new_def); access_check_ddl(new_def.name, new_def.owner_id, SC_COLLATION, old_tuple == NULL ? PRIV_C : PRIV_A, false); struct coll *new_coll = coll_new(&new_def); if (new_coll == NULL) diag_raise(); auto def_guard = make_scoped_guard([=] { coll_delete(new_coll); }); struct coll *replaced; if (coll_cache_replace(new_coll, &replaced) != 0) diag_raise(); if (replaced == NULL && old_coll != NULL) { /* * ID of a collation was changed. * Remove collation by old ID. */ coll_cache_delete(old_coll); } struct trigger *on_rollback = txn_alter_trigger_new(coll_cache_rollback, old_coll); txn_on_rollback(txn, on_rollback); def_guard.is_active = false; } /** * Create a privilege definition from tuple. */ void priv_def_create_from_tuple(struct priv_def *priv, struct tuple *tuple) { priv->grantor_id = tuple_field_u32_xc(tuple, BOX_PRIV_FIELD_ID); priv->grantee_id = tuple_field_u32_xc(tuple, BOX_PRIV_FIELD_UID); const char *object_type = tuple_field_cstr_xc(tuple, BOX_PRIV_FIELD_OBJECT_TYPE); priv->object_id = tuple_field_u32_xc(tuple, BOX_PRIV_FIELD_OBJECT_ID); priv->object_type = schema_object_type(object_type); if (priv->object_type == SC_UNKNOWN) { tnt_raise(ClientError, ER_UNKNOWN_SCHEMA_OBJECT, object_type); } priv->access = tuple_field_u32_xc(tuple, BOX_PRIV_FIELD_ACCESS); } /* * This function checks that: * - a privilege is granted from an existing user to an existing * user on an existing object * - the grantor has the right to grant (is the owner of the object) * * @XXX Potentially there is a race in case of rollback, since an * object can be changed during WAL write. * In the future we must protect grant/revoke with a logical lock. */ static void priv_def_check(struct priv_def *priv, enum priv_type priv_type) { struct user *grantor = user_find_xc(priv->grantor_id); /* May be a role */ struct user *grantee = user_by_id(priv->grantee_id); if (grantee == NULL) { tnt_raise(ClientError, ER_NO_SUCH_USER, int2str(priv->grantee_id)); } const char *name = schema_find_name(priv->object_type, priv->object_id); access_check_ddl(name, grantor->def->uid, priv->object_type, priv_type, false); switch (priv->object_type) { case SC_UNIVERSE: if (grantor->def->uid != ADMIN) { tnt_raise(AccessDeniedError, priv_name(priv_type), schema_object_name(SC_UNIVERSE), name, grantor->def->name); } break; case SC_SPACE: { struct space *space = space_cache_find_xc(priv->object_id); if (space->def->uid != grantor->def->uid && grantor->def->uid != ADMIN) { tnt_raise(AccessDeniedError, priv_name(priv_type), schema_object_name(SC_SPACE), name, grantor->def->name); } break; } case SC_FUNCTION: { struct func *func = func_cache_find(priv->object_id); if (func->def->uid != grantor->def->uid && grantor->def->uid != ADMIN) { tnt_raise(AccessDeniedError, priv_name(priv_type), schema_object_name(SC_FUNCTION), name, grantor->def->name); } break; } case SC_SEQUENCE: { struct sequence *seq = sequence_cache_find(priv->object_id); if (seq->def->uid != grantor->def->uid && grantor->def->uid != ADMIN) { tnt_raise(AccessDeniedError, priv_name(priv_type), schema_object_name(SC_SEQUENCE), name, grantor->def->name); } break; } case SC_ROLE: { struct user *role = user_by_id(priv->object_id); if (role == NULL || role->def->type != SC_ROLE) { tnt_raise(ClientError, ER_NO_SUCH_ROLE, role ? role->def->name : int2str(priv->object_id)); } /* * Only the creator of the role can grant or revoke it. * Everyone can grant 'PUBLIC' role. */ if (role->def->owner != grantor->def->uid && grantor->def->uid != ADMIN && (role->def->uid != PUBLIC || priv->access != PRIV_X)) { tnt_raise(AccessDeniedError, priv_name(priv_type), schema_object_name(SC_ROLE), name, grantor->def->name); } /* Not necessary to do during revoke, but who cares. */ role_check(grantee, role); } default: break; } if (priv->access == 0) { tnt_raise(ClientError, ER_GRANT, "the grant tuple has no privileges"); } } /** * Update a metadata cache object with the new access * data. */ static void grant_or_revoke(struct priv_def *priv) { struct user *grantee = user_by_id(priv->grantee_id); if (grantee == NULL) return; if (priv->object_type == SC_ROLE) { struct user *role = user_by_id(priv->object_id); if (role == NULL || role->def->type != SC_ROLE) return; if (priv->access) role_grant(grantee, role); else role_revoke(grantee, role); } else { priv_grant(grantee, priv); } } /** A trigger called on rollback of grant, or on commit of revoke. */ static void revoke_priv(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; struct txn_stmt *stmt = txn_last_stmt(txn); struct tuple *tuple = (stmt->new_tuple ? stmt->new_tuple : stmt->old_tuple); struct priv_def priv; priv_def_create_from_tuple(&priv, tuple); /* * Access to the object has been removed altogether so * there should be no grants at all. If only some grants * were removed, modify_priv trigger would have been * invoked. */ priv.access = 0; grant_or_revoke(&priv); } /** A trigger called on rollback of grant, or on commit of revoke. */ static void modify_priv(struct trigger * /* trigger */, void *event) { struct txn_stmt *stmt = txn_last_stmt((struct txn *) event); struct priv_def priv; priv_def_create_from_tuple(&priv, stmt->new_tuple); grant_or_revoke(&priv); } /** * A trigger invoked on replace in the space containing * all granted privileges. */ static void on_replace_dd_priv(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; txn_check_singlestatement_xc(txn, "Space _priv"); struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; struct priv_def priv; if (new_tuple != NULL && old_tuple == NULL) { /* grant */ priv_def_create_from_tuple(&priv, new_tuple); /* * Add system privileges explicitly to the * universe grant issued prior to 1.7.7 in * case upgrade script has not been invoked. */ if (priv.object_type == SC_UNIVERSE && dd_version_id < version_id(1, 7, 7)) { priv.access |= PRIV_S; priv.access |= PRIV_U; /* * For admin we have to set his privileges * explicitly because he needs them in upgrade and * bootstrap script */ if (priv.grantor_id == ADMIN) { priv.access = admin_credentials.universal_access; } } priv_def_check(&priv, PRIV_GRANT); grant_or_revoke(&priv); struct trigger *on_rollback = txn_alter_trigger_new(revoke_priv, NULL); txn_on_rollback(txn, on_rollback); } else if (new_tuple == NULL) { /* revoke */ assert(old_tuple); priv_def_create_from_tuple(&priv, old_tuple); priv_def_check(&priv, PRIV_REVOKE); struct trigger *on_commit = txn_alter_trigger_new(revoke_priv, NULL); txn_on_commit(txn, on_commit); } else { /* modify */ priv_def_create_from_tuple(&priv, new_tuple); priv_def_check(&priv, PRIV_GRANT); struct trigger *on_commit = txn_alter_trigger_new(modify_priv, NULL); txn_on_commit(txn, on_commit); } } /* }}} access control */ /* {{{ cluster configuration */ /** * This trigger is invoked only upon initial recovery, when * reading contents of the system spaces from the snapshot. * * Before a cluster is assigned a cluster id it's read only. * Since during recovery state of the WAL doesn't * concern us, we can safely change the cluster id in before-replace * event, not in after-replace event. */ static void on_replace_dd_schema(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; txn_check_singlestatement_xc(txn, "Space _schema"); struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; const char *key = tuple_field_cstr_xc(new_tuple ? new_tuple : old_tuple, BOX_SCHEMA_FIELD_KEY); if (strcmp(key, "cluster") == 0) { if (new_tuple == NULL) tnt_raise(ClientError, ER_REPLICASET_UUID_IS_RO); tt_uuid uu; tuple_field_uuid_xc(new_tuple, BOX_CLUSTER_FIELD_UUID, &uu); REPLICASET_UUID = uu; } else if (strcmp(key, "version") == 0) { if (new_tuple != NULL) { uint32_t major, minor, patch; if (tuple_field_u32(new_tuple, 1, &major) != 0 || tuple_field_u32(new_tuple, 2, &minor) != 0) tnt_raise(ClientError, ER_WRONG_DD_VERSION); /* Version can be major.minor with no patch. */ if (tuple_field_u32(new_tuple, 3, &patch) != 0) patch = 0; dd_version_id = version_id(major, minor, patch); } else { assert(old_tuple != NULL); /* * _schema:delete({'version'}) for * example, for box.internal.bootstrap(). */ dd_version_id = tarantool_version_id(); } } } /** * A record with id of the new instance has been synced to the * write ahead log. Update the cluster configuration cache * with it. */ static void on_commit_dd_cluster(struct trigger *trigger, void *event) { (void) trigger; struct txn_stmt *stmt = txn_last_stmt((struct txn *) event); struct tuple *new_tuple = stmt->new_tuple; struct tuple *old_tuple = stmt->old_tuple; if (new_tuple == NULL) { struct tt_uuid old_uuid; tuple_field_uuid_xc(stmt->old_tuple, BOX_CLUSTER_FIELD_UUID, &old_uuid); struct replica *replica = replica_by_uuid(&old_uuid); assert(replica != NULL); replica_clear_id(replica); return; } else if (old_tuple != NULL) { return; /* nothing to change */ } uint32_t id = tuple_field_u32_xc(new_tuple, BOX_CLUSTER_FIELD_ID); tt_uuid uuid; tuple_field_uuid_xc(new_tuple, BOX_CLUSTER_FIELD_UUID, &uuid); struct replica *replica = replica_by_uuid(&uuid); if (replica != NULL) { replica_set_id(replica, id); } else { try { replica = replicaset_add(id, &uuid); /* Can't throw exceptions from on_commit trigger */ } catch(Exception *e) { panic("Can't register replica: %s", e->errmsg); } } } /** * A trigger invoked on replace in the space _cluster, * which contains cluster configuration. * * This space is modified by JOIN command in IPROTO * protocol. * * The trigger updates the cluster configuration cache * with uuid of the newly joined instance. * * During recovery, it acts the same way, loading identifiers * of all instances into the cache. Instance globally unique * identifiers are used to keep track of cluster configuration, * so that a replica that previously joined a replica set can * follow updates, and a replica that belongs to a different * replica set can not by mistake join/follow another replica * set without first being reset (emptied). */ static void on_replace_dd_cluster(struct trigger *trigger, void *event) { (void) trigger; struct txn *txn = (struct txn *) event; txn_check_singlestatement_xc(txn, "Space _cluster"); struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; if (new_tuple != NULL) { /* Insert or replace */ /* Check fields */ uint32_t replica_id = tuple_field_u32_xc(new_tuple, BOX_CLUSTER_FIELD_ID); replica_check_id(replica_id); tt_uuid replica_uuid; tuple_field_uuid_xc(new_tuple, BOX_CLUSTER_FIELD_UUID, &replica_uuid); if (tt_uuid_is_nil(&replica_uuid)) tnt_raise(ClientError, ER_INVALID_UUID, tt_uuid_str(&replica_uuid)); if (old_tuple != NULL) { /* * Forbid changes of UUID for a registered instance: * it requires an extra effort to keep _cluster * in sync with appliers and relays. */ tt_uuid old_uuid; tuple_field_uuid_xc(old_tuple, BOX_CLUSTER_FIELD_UUID, &old_uuid); if (!tt_uuid_is_equal(&replica_uuid, &old_uuid)) { tnt_raise(ClientError, ER_UNSUPPORTED, "Space _cluster", "updates of instance uuid"); } } } else { /* * Don't allow deletion of the record for this instance * from _cluster. */ assert(old_tuple != NULL); uint32_t replica_id = tuple_field_u32_xc(old_tuple, BOX_CLUSTER_FIELD_ID); replica_check_id(replica_id); } struct trigger *on_commit = txn_alter_trigger_new(on_commit_dd_cluster, NULL); txn_on_commit(txn, on_commit); } /* }}} cluster configuration */ /* {{{ sequence */ /** Create a sequence definition from a tuple. */ static struct sequence_def * sequence_def_new_from_tuple(struct tuple *tuple, uint32_t errcode) { uint32_t name_len; const char *name = tuple_field_str_xc(tuple, BOX_USER_FIELD_NAME, &name_len); if (name_len > BOX_NAME_MAX) { tnt_raise(ClientError, errcode, tt_cstr(name, BOX_INVALID_NAME_MAX), "sequence name is too long"); } identifier_check_xc(name, name_len); size_t sz = sequence_def_sizeof(name_len); struct sequence_def *def = (struct sequence_def *) malloc(sz); if (def == NULL) tnt_raise(OutOfMemory, sz, "malloc", "sequence"); auto def_guard = make_scoped_guard([=] { free(def); }); memcpy(def->name, name, name_len); def->name[name_len] = '\0'; def->id = tuple_field_u32_xc(tuple, BOX_SEQUENCE_FIELD_ID); def->uid = tuple_field_u32_xc(tuple, BOX_SEQUENCE_FIELD_UID); def->step = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_STEP); def->min = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_MIN); def->max = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_MAX); def->start = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_START); def->cache = tuple_field_i64_xc(tuple, BOX_SEQUENCE_FIELD_CACHE); def->cycle = tuple_field_bool_xc(tuple, BOX_SEQUENCE_FIELD_CYCLE); if (def->step == 0) tnt_raise(ClientError, errcode, def->name, "step option must be non-zero"); if (def->min > def->max) tnt_raise(ClientError, errcode, def->name, "max must be greater than or equal to min"); if (def->start < def->min || def->start > def->max) tnt_raise(ClientError, errcode, def->name, "start must be between min and max"); def_guard.is_active = false; return def; } /** Argument passed to on_commit_dd_sequence() trigger. */ struct alter_sequence { /** Trigger invoked on commit in the _sequence space. */ struct trigger on_commit; /** Trigger invoked on rollback in the _sequence space. */ struct trigger on_rollback; /** Old sequence definition or NULL if create. */ struct sequence_def *old_def; /** New sequence defitition or NULL if drop. */ struct sequence_def *new_def; }; /** * Trigger invoked on commit in the _sequence space. */ static void on_commit_dd_sequence(struct trigger *trigger, void *event) { struct txn *txn = (struct txn *) event; struct alter_sequence *alter = (struct alter_sequence *) trigger->data; if (alter->new_def != NULL && alter->old_def != NULL) { /* Alter a sequence. */ sequence_cache_replace(alter->new_def); } else if (alter->new_def == NULL) { /* Drop a sequence. */ sequence_cache_delete(alter->old_def->id); } trigger_run_xc(&on_alter_sequence, txn_last_stmt(txn)); } /** * Trigger invoked on rollback in the _sequence space. */ static void on_rollback_dd_sequence(struct trigger *trigger, void * /* event */) { struct alter_sequence *alter = (struct alter_sequence *) trigger->data; if (alter->new_def != NULL && alter->old_def == NULL) { /* Rollback creation of a sequence. */ sequence_cache_delete(alter->new_def->id); } } /** * A trigger invoked on replace in space _sequence. * Used to alter a sequence definition. */ static void on_replace_dd_sequence(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; txn_check_singlestatement_xc(txn, "Space _sequence"); struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; struct alter_sequence *alter = region_calloc_object_xc(&fiber()->gc, struct alter_sequence); struct sequence_def *new_def = NULL; auto def_guard = make_scoped_guard([=] { free(new_def); }); if (old_tuple == NULL && new_tuple != NULL) { /* INSERT */ new_def = sequence_def_new_from_tuple(new_tuple, ER_CREATE_SEQUENCE); assert(sequence_by_id(new_def->id) == NULL); sequence_cache_replace(new_def); alter->new_def = new_def; } else if (old_tuple != NULL && new_tuple == NULL) { /* DELETE */ uint32_t id = tuple_field_u32_xc(old_tuple, BOX_SEQUENCE_DATA_FIELD_ID); struct sequence *seq = sequence_by_id(id); assert(seq != NULL); access_check_ddl(seq->def->name, seq->def->uid, SC_SEQUENCE, PRIV_D, false); if (space_has_data(BOX_SEQUENCE_DATA_ID, 0, id)) tnt_raise(ClientError, ER_DROP_SEQUENCE, seq->def->name, "the sequence has data"); if (space_has_data(BOX_SPACE_SEQUENCE_ID, 1, id)) tnt_raise(ClientError, ER_DROP_SEQUENCE, seq->def->name, "the sequence is in use"); if (schema_find_grants("sequence", seq->def->id)) tnt_raise(ClientError, ER_DROP_SEQUENCE, seq->def->name, "the sequence has grants"); alter->old_def = seq->def; } else { /* UPDATE */ new_def = sequence_def_new_from_tuple(new_tuple, ER_ALTER_SEQUENCE); struct sequence *seq = sequence_by_id(new_def->id); assert(seq != NULL); access_check_ddl(seq->def->name, seq->def->uid, SC_SEQUENCE, PRIV_A, false); alter->old_def = seq->def; alter->new_def = new_def; } def_guard.is_active = false; trigger_create(&alter->on_commit, on_commit_dd_sequence, alter, NULL); txn_on_commit(txn, &alter->on_commit); trigger_create(&alter->on_rollback, on_rollback_dd_sequence, alter, NULL); txn_on_rollback(txn, &alter->on_rollback); } /** * A trigger invoked on replace in space _sequence_data. * Used to update a sequence value. */ static void on_replace_dd_sequence_data(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; uint32_t id = tuple_field_u32_xc(old_tuple ?: new_tuple, BOX_SEQUENCE_DATA_FIELD_ID); struct sequence *seq = sequence_cache_find(id); if (seq == NULL) diag_raise(); if (new_tuple != NULL) { /* INSERT, UPDATE */ int64_t value = tuple_field_i64_xc(new_tuple, BOX_SEQUENCE_DATA_FIELD_VALUE); if (sequence_set(seq, value) != 0) diag_raise(); } else { /* DELETE */ sequence_reset(seq); } } /** * Run the triggers registered on commit of a change in _space. */ static void on_commit_dd_space_sequence(struct trigger *trigger, void * /* event */) { struct space *space = (struct space *) trigger->data; trigger_run_xc(&on_alter_space, space); } /** * A trigger invoked on replace in space _space_sequence. * Used to update space <-> sequence mapping. */ static void on_replace_dd_space_sequence(struct trigger * /* trigger */, void *event) { struct txn *txn = (struct txn *) event; txn_check_singlestatement_xc(txn, "Space _space_sequence"); struct txn_stmt *stmt = txn_current_stmt(txn); struct tuple *tuple = stmt->new_tuple ? stmt->new_tuple : stmt->old_tuple; uint32_t space_id = tuple_field_u32_xc(tuple, BOX_SPACE_SEQUENCE_FIELD_ID); uint32_t sequence_id = tuple_field_u32_xc(tuple, BOX_SPACE_SEQUENCE_FIELD_SEQUENCE_ID); bool is_generated = tuple_field_bool_xc(tuple, BOX_SPACE_SEQUENCE_FIELD_IS_GENERATED); struct space *space = space_cache_find_xc(space_id); struct sequence *seq = sequence_cache_find(sequence_id); enum priv_type priv_type = stmt->new_tuple ? PRIV_C : PRIV_D; if (stmt->new_tuple && stmt->old_tuple) priv_type = PRIV_A; /* Check we have the correct access type on the sequence. * */ access_check_ddl(seq->def->name, seq->def->uid, SC_SEQUENCE, priv_type, false); /** Check we have alter access on space. */ access_check_ddl(space->def->name, space->def->uid, SC_SPACE, PRIV_A, false); struct trigger *on_commit = txn_alter_trigger_new(on_commit_dd_space_sequence, space); txn_on_commit(txn, on_commit); if (stmt->new_tuple != NULL) { /* INSERT, UPDATE */ struct index *pk = index_find_xc(space, 0); index_def_check_sequence(pk->def, space_name(space)); if (seq->is_generated) { tnt_raise(ClientError, ER_ALTER_SPACE, space_name(space), "can not attach generated sequence"); } seq->is_generated = is_generated; space->sequence = seq; } else { /* DELETE */ assert(space->sequence == seq); space->sequence = NULL; } } /* }}} sequence */ static void unlock_after_dd(struct trigger *trigger, void *event) { (void) trigger; (void) event; latch_unlock(&schema_lock); /* * There can be a some count of other latch awaiting fibers. All of * these fibers should continue their job before current fiber fires * next request. It is important especially for replication - if some * rows are applied out of order then lsn order will be broken. This * can be done with locking latch one more time - it guarantees that * all "queued" fibers did their job before current fiber wakes next * time. If there is no waiting fibers then locking will be done without * any yields. */ latch_lock(&schema_lock); latch_unlock(&schema_lock); } static void lock_before_dd(struct trigger *trigger, void *event) { (void) trigger; if (fiber() == latch_owner(&schema_lock)) return; struct txn *txn = (struct txn *)event; /* * This trigger is executed before any check and may yield * on the latch lock. But a yield in a non-autocommit * memtx transaction will roll it back silently, rather * than produce an error, which is very confusing. * So don't try to lock a latch if there is * a multistatement transaction. */ txn_check_singlestatement_xc(txn, "DDL"); latch_lock(&schema_lock); struct trigger *on_commit = txn_alter_trigger_new(unlock_after_dd, NULL); txn_on_commit(txn, on_commit); struct trigger *on_rollback = txn_alter_trigger_new(unlock_after_dd, NULL); txn_on_rollback(txn, on_rollback); } struct trigger alter_space_on_replace_space = { RLIST_LINK_INITIALIZER, on_replace_dd_space, NULL, NULL }; struct trigger alter_space_on_replace_index = { RLIST_LINK_INITIALIZER, on_replace_dd_index, NULL, NULL }; struct trigger on_replace_truncate = { RLIST_LINK_INITIALIZER, on_replace_dd_truncate, NULL, NULL }; struct trigger on_replace_schema = { RLIST_LINK_INITIALIZER, on_replace_dd_schema, NULL, NULL }; struct trigger on_replace_user = { RLIST_LINK_INITIALIZER, on_replace_dd_user, NULL, NULL }; struct trigger on_replace_func = { RLIST_LINK_INITIALIZER, on_replace_dd_func, NULL, NULL }; struct trigger on_replace_collation = { RLIST_LINK_INITIALIZER, on_replace_dd_collation, NULL, NULL }; struct trigger on_replace_priv = { RLIST_LINK_INITIALIZER, on_replace_dd_priv, NULL, NULL }; struct trigger on_replace_cluster = { RLIST_LINK_INITIALIZER, on_replace_dd_cluster, NULL, NULL }; struct trigger on_replace_sequence = { RLIST_LINK_INITIALIZER, on_replace_dd_sequence, NULL, NULL }; struct trigger on_replace_sequence_data = { RLIST_LINK_INITIALIZER, on_replace_dd_sequence_data, NULL, NULL }; struct trigger on_replace_space_sequence = { RLIST_LINK_INITIALIZER, on_replace_dd_space_sequence, NULL, NULL }; struct trigger on_stmt_begin_space = { RLIST_LINK_INITIALIZER, lock_before_dd, NULL, NULL }; struct trigger on_stmt_begin_index = { RLIST_LINK_INITIALIZER, lock_before_dd, NULL, NULL }; struct trigger on_stmt_begin_truncate = { RLIST_LINK_INITIALIZER, lock_before_dd, NULL, NULL }; /* vim: set foldmethod=marker */ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_format.c0000664000000000000000000003401513306565107020443 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "tuple_format.h" /** Global table of tuple formats */ struct tuple_format **tuple_formats; static intptr_t recycled_format_ids = FORMAT_ID_NIL; static uint32_t formats_size = 0, formats_capacity = 0; static const struct tuple_field tuple_field_default = { FIELD_TYPE_ANY, TUPLE_OFFSET_SLOT_NIL, false, false, }; /** * Extract all available type info from keys and field * definitions. */ static int tuple_format_create(struct tuple_format *format, struct key_def * const *keys, uint16_t key_count, const struct field_def *fields, uint32_t field_count) { format->min_field_count = tuple_format_min_field_count(keys, key_count, fields, field_count); if (format->field_count == 0) { format->field_map_size = 0; return 0; } /* Initialize defined fields */ for (uint32_t i = 0; i < field_count; ++i) { format->fields[i].is_key_part = false; format->fields[i].type = fields[i].type; format->fields[i].offset_slot = TUPLE_OFFSET_SLOT_NIL; format->fields[i].is_nullable = fields[i].is_nullable; } /* Initialize remaining fields */ for (uint32_t i = field_count; i < format->field_count; i++) format->fields[i] = tuple_field_default; int current_slot = 0; /* extract field type info */ for (uint16_t key_no = 0; key_no < key_count; ++key_no) { const struct key_def *key_def = keys[key_no]; bool is_sequential = key_def_is_sequential(key_def); const struct key_part *part = key_def->parts; const struct key_part *parts_end = part + key_def->part_count; for (; part < parts_end; part++) { assert(part->fieldno < format->field_count); struct tuple_field *field = &format->fields[part->fieldno]; if (part->fieldno >= field_count) { field->is_nullable = part->is_nullable; } else if (field->is_nullable != part->is_nullable) { diag_set(ClientError, ER_NULLABLE_MISMATCH, part->fieldno + TUPLE_INDEX_BASE, field->is_nullable ? "nullable" : "not nullable", part->is_nullable ? "nullable" : "not nullable"); return -1; } /* * Check that there are no conflicts * between index part types and space * fields. If a part type is compatible * with field's one, then the part type is * more strict and the part type must be * used in tuple_format. */ if (field_type1_contains_type2(field->type, part->type)) { field->type = part->type; } else if (! field_type1_contains_type2(part->type, field->type)) { const char *name; int fieldno = part->fieldno + TUPLE_INDEX_BASE; if (part->fieldno >= field_count) { name = tt_sprintf("%d", fieldno); } else { const struct field_def *def = &fields[part->fieldno]; name = tt_sprintf("'%s'", def->name); } int errcode; if (! field->is_key_part) errcode = ER_FORMAT_MISMATCH_INDEX_PART; else errcode = ER_INDEX_PART_TYPE_MISMATCH; diag_set(ClientError, errcode, name, field_type_strs[field->type], field_type_strs[part->type]); return -1; } field->is_key_part = true; /* * In the tuple, store only offsets necessary * to access fields of non-sequential keys. * First field is always simply accessible, * so we don't store an offset for it. */ if (field->offset_slot == TUPLE_OFFSET_SLOT_NIL && is_sequential == false && part->fieldno > 0) { field->offset_slot = --current_slot; } } } assert(format->fields[0].offset_slot == TUPLE_OFFSET_SLOT_NIL); size_t field_map_size = -current_slot * sizeof(uint32_t); if (field_map_size + format->extra_size > UINT16_MAX) { /** tuple->data_offset is 16 bits */ diag_set(ClientError, ER_INDEX_FIELD_COUNT_LIMIT, -current_slot); return -1; } format->field_map_size = field_map_size; return 0; } static int tuple_format_register(struct tuple_format *format) { if (recycled_format_ids != FORMAT_ID_NIL) { format->id = (uint16_t) recycled_format_ids; recycled_format_ids = (intptr_t) tuple_formats[recycled_format_ids]; } else { if (formats_size == formats_capacity) { uint32_t new_capacity = formats_capacity ? formats_capacity * 2 : 16; struct tuple_format **formats; formats = (struct tuple_format **) realloc(tuple_formats, new_capacity * sizeof(tuple_formats[0])); if (formats == NULL) { diag_set(OutOfMemory, sizeof(struct tuple_format), "malloc", "tuple_formats"); return -1; } formats_capacity = new_capacity; tuple_formats = formats; } if (formats_size == FORMAT_ID_MAX + 1) { diag_set(ClientError, ER_TUPLE_FORMAT_LIMIT, (unsigned) formats_capacity); return -1; } format->id = formats_size++; } tuple_formats[format->id] = format; return 0; } static void tuple_format_deregister(struct tuple_format *format) { if (format->id == FORMAT_ID_NIL) return; tuple_formats[format->id] = (struct tuple_format *) recycled_format_ids; recycled_format_ids = format->id; format->id = FORMAT_ID_NIL; } static struct tuple_format * tuple_format_alloc(struct key_def * const *keys, uint16_t key_count, uint32_t space_field_count, struct tuple_dictionary *dict) { uint32_t index_field_count = 0; /* find max max field no */ for (uint16_t key_no = 0; key_no < key_count; ++key_no) { const struct key_def *key_def = keys[key_no]; const struct key_part *part = key_def->parts; const struct key_part *pend = part + key_def->part_count; for (; part < pend; part++) { index_field_count = MAX(index_field_count, part->fieldno + 1); } } uint32_t field_count = MAX(space_field_count, index_field_count); uint32_t total = sizeof(struct tuple_format) + field_count * sizeof(struct tuple_field); struct tuple_format *format = (struct tuple_format *) malloc(total); if (format == NULL) { diag_set(OutOfMemory, sizeof(struct tuple_format), "malloc", "tuple format"); return NULL; } if (dict == NULL) { assert(space_field_count == 0); format->dict = tuple_dictionary_new(NULL, 0); if (format->dict == NULL) { free(format); return NULL; } } else { format->dict = dict; tuple_dictionary_ref(dict); } format->refs = 0; format->id = FORMAT_ID_NIL; format->field_count = field_count; format->index_field_count = index_field_count; format->exact_field_count = 0; format->min_field_count = 0; return format; } /** Free tuple format resources, doesn't unregister. */ static inline void tuple_format_destroy(struct tuple_format *format) { tuple_dictionary_unref(format->dict); } void tuple_format_delete(struct tuple_format *format) { tuple_format_deregister(format); tuple_format_destroy(format); free(format); } struct tuple_format * tuple_format_new(struct tuple_format_vtab *vtab, struct key_def * const *keys, uint16_t key_count, uint16_t extra_size, const struct field_def *space_fields, uint32_t space_field_count, struct tuple_dictionary *dict) { assert((dict == NULL && space_field_count == 0) || (dict != NULL && space_field_count == dict->name_count)); struct tuple_format *format = tuple_format_alloc(keys, key_count, space_field_count, dict); if (format == NULL) return NULL; format->vtab = *vtab; format->extra_size = extra_size; if (tuple_format_register(format) < 0) { tuple_format_destroy(format); free(format); return NULL; } if (tuple_format_create(format, keys, key_count, space_fields, space_field_count) < 0) { tuple_format_delete(format); return NULL; } return format; } bool tuple_format1_can_store_format2_tuples(const struct tuple_format *format1, const struct tuple_format *format2) { if (format1->exact_field_count != format2->exact_field_count) return false; for (uint32_t i = 0; i < format1->field_count; ++i) { const struct tuple_field *field1 = &format1->fields[i]; /* * The field has a data type in format1, but has * no data type in format2. */ if (i >= format2->field_count) { /* * The field can get a name added * for it, and this doesn't require a data * check. * If the field is defined as not * nullable, however, we need a data * check, since old data may contain * NULLs or miss the subject field. */ if (field1->type == FIELD_TYPE_ANY && field1->is_nullable) continue; else return false; } const struct tuple_field *field2 = &format2->fields[i]; if (! field_type1_contains_type2(field1->type, field2->type)) return false; /* * Do not allow transition from nullable to non-nullable: * it would require a check of all data in the space. */ if (field2->is_nullable && !field1->is_nullable) return false; } return true; } bool tuple_format_eq(const struct tuple_format *a, const struct tuple_format *b) { if (a->field_map_size != b->field_map_size || a->field_count != b->field_count) return false; for (uint32_t i = 0; i < a->field_count; ++i) { if (a->fields[i].type != b->fields[i].type || a->fields[i].offset_slot != b->fields[i].offset_slot) return false; if (a->fields[i].is_key_part != b->fields[i].is_key_part) return false; if (a->fields[i].is_nullable != b->fields[i].is_nullable) return false; } return true; } struct tuple_format * tuple_format_dup(struct tuple_format *src) { uint32_t total = sizeof(struct tuple_format) + src->field_count * sizeof(struct tuple_field); struct tuple_format *format = (struct tuple_format *) malloc(total); if (format == NULL) { diag_set(OutOfMemory, total, "malloc", "tuple format"); return NULL; } memcpy(format, src, total); tuple_dictionary_ref(format->dict); format->id = FORMAT_ID_NIL; format->refs = 0; if (tuple_format_register(format) != 0) { tuple_format_destroy(format); free(format); return NULL; } return format; } /** @sa declaration for details. */ int tuple_init_field_map(const struct tuple_format *format, uint32_t *field_map, const char *tuple) { if (format->field_count == 0) return 0; /* Nothing to initialize */ const char *pos = tuple; /* Check to see if the tuple has a sufficient number of fields. */ uint32_t field_count = mp_decode_array(&pos); if (format->exact_field_count > 0 && format->exact_field_count != field_count) { diag_set(ClientError, ER_EXACT_FIELD_COUNT, (unsigned) field_count, (unsigned) format->exact_field_count); return -1; } if (unlikely(field_count < format->min_field_count)) { diag_set(ClientError, ER_MIN_FIELD_COUNT, (unsigned) field_count, (unsigned) format->min_field_count); return -1; } /* first field is simply accessible, so we do not store offset to it */ enum mp_type mp_type = mp_typeof(*pos); const struct tuple_field *field = &format->fields[0]; if (key_mp_type_validate(field->type, mp_type, ER_FIELD_TYPE, TUPLE_INDEX_BASE, field->is_nullable)) return -1; mp_next(&pos); /* other fields...*/ ++field; uint32_t i = 1; uint32_t defined_field_count = MIN(field_count, format->field_count); if (field_count < format->index_field_count) { /* * Nullify field map to be able to detect by 0, * which key fields are absent in tuple_field(). */ memset((char *)field_map - format->field_map_size, 0, format->field_map_size); } for (; i < defined_field_count; ++i, ++field) { mp_type = mp_typeof(*pos); if (key_mp_type_validate(field->type, mp_type, ER_FIELD_TYPE, i + TUPLE_INDEX_BASE, field->is_nullable)) return -1; if (field->offset_slot != TUPLE_OFFSET_SLOT_NIL) { field_map[field->offset_slot] = (uint32_t) (pos - tuple); } mp_next(&pos); } return 0; } uint32_t tuple_format_min_field_count(struct key_def * const *keys, uint16_t key_count, const struct field_def *space_fields, uint32_t space_field_count) { uint32_t min_field_count = 0; for (uint32_t i = 0; i < space_field_count; ++i) { if (! space_fields[i].is_nullable) min_field_count = i + 1; } for (uint32_t i = 0; i < key_count; ++i) { const struct key_def *kd = keys[i]; for (uint32_t j = 0; j < kd->part_count; ++j) { const struct key_part *kp = &kd->parts[j]; if (!kp->is_nullable && kp->fieldno + 1 > min_field_count) min_field_count = kp->fieldno + 1; } } return min_field_count; } /** Destroy tuple format subsystem and free resourses */ void tuple_format_free() { /* Clear recycled ids. */ while (recycled_format_ids != FORMAT_ID_NIL) { uint16_t id = (uint16_t) recycled_format_ids; recycled_format_ids = (intptr_t) tuple_formats[id]; tuple_formats[id] = NULL; } for (struct tuple_format **format = tuple_formats; format < tuple_formats + formats_size; format++) { /* Do not unregister. Only free resources. */ if (*format != NULL) { tuple_format_destroy(*format); free(*format); } } free(tuple_formats); } void box_tuple_format_ref(box_tuple_format_t *format) { tuple_format_ref(format); } void box_tuple_format_unref(box_tuple_format_t *format) { tuple_format_unref(format); } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_write_iterator.c0000664000000000000000000007015513306565107021530 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_write_iterator.h" #include "vy_mem.h" #include "vy_run.h" #include "vy_upsert.h" #include "column_mask.h" #include "fiber.h" #define HEAP_FORWARD_DECLARATION #include "salad/heap.h" static bool heap_less(heap_t *heap, struct heap_node *n1, struct heap_node *n2); #define HEAP_NAME vy_source_heap #define HEAP_LESS heap_less #include "salad/heap.h" /** * Merge source of a write iterator. Represents a mem or a run. */ struct vy_write_src { /* Link in vy_write_iterator::src_list */ struct rlist in_src_list; /* Node in vy_write_iterator::src_heap */ struct heap_node heap_node; /* Current tuple in the source (with minimal key and maximal LSN) */ struct tuple *tuple; /** * If this flag is set, this is a so called "virtual" * source. A virtual source does not stand for any mem or * run, but represents a delimiter between the current key * and the next one. There is a special rule used by the * write iterator heap when comparing with a virtual * source. Such source is greater than any source with * the same key and less than any source with a greater * key, regardless of LSN. */ bool is_end_of_key; /** An iterator over the source */ union { struct vy_slice_stream slice_stream; struct vy_mem_stream mem_stream; struct vy_stmt_stream stream; }; }; /** * A sequence of versions of a key, sorted by LSN in ascending order. * (history->tuple.lsn < history->next->tuple.lsn). */ struct vy_write_history { /** Next version with greater LSN. */ struct vy_write_history *next; /** Key. */ struct tuple *tuple; }; /** * Create a new vy_write_history object, save a statement into it * and link with a newer version. This function effectively * reverses key LSN order from newest first to oldest first, i.e. * orders statements on the same key chronologically. * * @param region Allocator for the object. * @param tuple Key version. * @param next Next version of the key. * * @retval not NULL Created object. * @retval NULL Memory error. */ static inline struct vy_write_history * vy_write_history_new(struct region *region, struct tuple *tuple, struct vy_write_history *next) { struct vy_write_history *h = region_alloc_object(region, struct vy_write_history); if (h == NULL) return NULL; h->tuple = tuple; assert(next == NULL || (next->tuple != NULL && vy_stmt_lsn(next->tuple) > vy_stmt_lsn(tuple))); h->next = next; vy_stmt_ref_if_possible(tuple); return h; } /** * Clear an entire sequence of versions of a key. Free resources * of each version. * @param history History to clear. */ static inline void vy_write_history_destroy(struct vy_write_history *history) { do { if (history->tuple != NULL) vy_stmt_unref_if_possible(history->tuple); history = history->next; } while (history != NULL); } /** Read view of a key. */ struct vy_read_view_stmt { /** Read view LSN. */ int64_t vlsn; /** Result key version, visible to this @vlsn. */ struct tuple *tuple; /** * A history of changes building up to this read * view. Once built, it is merged into a single * @tuple. */ struct vy_write_history *history; }; /** * Free resources, unref tuples, including all tuples in the * history. * @param rv Read view to clear. */ static inline void vy_read_view_stmt_destroy(struct vy_read_view_stmt *rv) { if (rv->tuple != NULL) vy_stmt_unref_if_possible(rv->tuple); rv->tuple = NULL; if (rv->history != NULL) vy_write_history_destroy(rv->history); rv->history = NULL; } /* @sa vy_write_iterator.h */ struct vy_write_iterator { /** Parent class, must be the first member */ struct vy_stmt_stream base; /* List of all sources of the iterator */ struct rlist src_list; /* A heap to order the sources, newest LSN at heap top. */ heap_t src_heap; /** Index key definition used to store statements on disk. */ const struct key_def *cmp_def; /** Format to allocate new REPLACE and DELETE tuples from vy_run */ struct tuple_format *format; /** Same as format, but for UPSERT tuples. */ struct tuple_format *upsert_format; /* There is no LSM tree level older than the one we're writing to. */ bool is_last_level; /** * Set if this iterator is for a primary index. * Not all implementation are applicable to the primary * key and its tuple format is different. */ bool is_primary; /** Length of the @read_views. */ int rv_count; /** * If there are no changes between two read views, the * newer read view is left empty. This is a count of * non-empty read views. It's used to speed up squashing. */ int rv_used_count; /** * Current read view in @read_views. It is used to return * key versions one by one from vy_write_iterator_next. */ int stmt_i; /** * Read views of the same key sorted by LSN in descending * order, starting from INT64_MAX. * * Some read views in @read_views can be empty, * - if there are no changes since the previous read view * - if there are no changes up until this read view since * the beginning of time. */ struct vy_read_view_stmt read_views[0]; }; /** * Comparator of the heap. Put newer LSNs first, unless * it's a virtual source (is_end_of_key). */ static bool heap_less(heap_t *heap, struct heap_node *node1, struct heap_node *node2) { struct vy_write_iterator *stream = container_of(heap, struct vy_write_iterator, src_heap); struct vy_write_src *src1 = container_of(node1, struct vy_write_src, heap_node); struct vy_write_src *src2 = container_of(node2, struct vy_write_src, heap_node); int cmp = vy_tuple_compare(src1->tuple, src2->tuple, stream->cmp_def); if (cmp != 0) return cmp < 0; /** * Keys are equal, order by LSN, descending. * Virtual sources use 0 for LSN, so they are ordered * last automatically. */ int64_t lsn1 = src1->is_end_of_key ? 0 : vy_stmt_lsn(src1->tuple); int64_t lsn2 = src2->is_end_of_key ? 0 : vy_stmt_lsn(src2->tuple); if (lsn1 != lsn2) return lsn1 > lsn2; /** * LSNs are equal. This may happen only during forced recovery. * Prioritize terminal (non-UPSERT) statements */ return (vy_stmt_type(src1->tuple) == IPROTO_UPSERT ? 1 : 0) < (vy_stmt_type(src2->tuple) == IPROTO_UPSERT ? 1 : 0); } /** * Allocate a source and add it to a write iterator. * @param stream - the write iterator. * @return the source or NULL on memory error. */ static struct vy_write_src * vy_write_iterator_new_src(struct vy_write_iterator *stream) { struct vy_write_src *res = (struct vy_write_src *) malloc(sizeof(*res)); if (res == NULL) { diag_set(OutOfMemory, sizeof(*res), "malloc", "vinyl write stream"); return NULL; } res->is_end_of_key = false; rlist_add(&stream->src_list, &res->in_src_list); return res; } /** Close a stream, remove it from the write iterator and delete. */ static void vy_write_iterator_delete_src(struct vy_write_iterator *stream, struct vy_write_src *src) { (void)stream; assert(!src->is_end_of_key); if (src->stream.iface->stop != NULL) src->stream.iface->stop(&src->stream); if (src->stream.iface->close != NULL) src->stream.iface->close(&src->stream); rlist_del(&src->in_src_list); free(src); } /** * Add a source to the write iterator heap. The added source * must be open. * * @return 0 - success, not 0 - error. */ static NODISCARD int vy_write_iterator_add_src(struct vy_write_iterator *stream, struct vy_write_src *src) { if (src->stream.iface->start != NULL) { int rc = src->stream.iface->start(&src->stream); if (rc != 0) { vy_write_iterator_delete_src(stream, src); return rc; } } int rc = src->stream.iface->next(&src->stream, &src->tuple); if (rc != 0 || src->tuple == NULL) { vy_write_iterator_delete_src(stream, src); return rc; } rc = vy_source_heap_insert(&stream->src_heap, &src->heap_node); if (rc != 0) { diag_set(OutOfMemory, sizeof(void *), "malloc", "vinyl write stream heap"); vy_write_iterator_delete_src(stream, src); return rc; } return 0; } /** * Remove a source from the heap, destroy and free it. */ static void vy_write_iterator_remove_src(struct vy_write_iterator *stream, struct vy_write_src *src) { vy_source_heap_delete(&stream->src_heap, &src->heap_node); vy_write_iterator_delete_src(stream, src); } static const struct vy_stmt_stream_iface vy_slice_stream_iface; /** * Open an empty write iterator. To add sources to the iterator * use vy_write_iterator_add_* functions. * @return the iterator or NULL on error (diag is set). */ struct vy_stmt_stream * vy_write_iterator_new(const struct key_def *cmp_def, struct tuple_format *format, struct tuple_format *upsert_format, bool is_primary, bool is_last_level, struct rlist *read_views) { /* * One is reserved for INT64_MAX - maximal read view. */ int count = 1; struct rlist *unused; rlist_foreach(unused, read_views) ++count; size_t size = sizeof(struct vy_write_iterator) + count * sizeof(struct vy_read_view_stmt); struct vy_write_iterator *stream = (struct vy_write_iterator *) calloc(1, size); if (stream == NULL) { diag_set(OutOfMemory, size, "malloc", "write stream"); return NULL; } stream->stmt_i = -1; stream->rv_count = count; stream->read_views[0].vlsn = INT64_MAX; count--; struct vy_read_view *rv; /* Descending order. */ rlist_foreach_entry(rv, read_views, in_read_views) stream->read_views[count--].vlsn = rv->vlsn; assert(count == 0); stream->base.iface = &vy_slice_stream_iface; vy_source_heap_create(&stream->src_heap); rlist_create(&stream->src_list); stream->cmp_def = cmp_def; stream->format = format; tuple_format_ref(stream->format); stream->upsert_format = upsert_format; tuple_format_ref(stream->upsert_format); stream->is_primary = is_primary; stream->is_last_level = is_last_level; return &stream->base; } /** * Start the search. Must be called after *new* methods and * before *next* method. * @return 0 on success or not 0 on error (diag is set). */ static int vy_write_iterator_start(struct vy_stmt_stream *vstream) { assert(vstream->iface->start == vy_write_iterator_start); struct vy_write_iterator *stream = (struct vy_write_iterator *)vstream; struct vy_write_src *src, *tmp; rlist_foreach_entry_safe(src, &stream->src_list, in_src_list, tmp) { if (vy_write_iterator_add_src(stream, src) != 0) return -1; } return 0; } /** * Free all resources. */ static void vy_write_iterator_stop(struct vy_stmt_stream *vstream) { assert(vstream->iface->stop == vy_write_iterator_stop); struct vy_write_iterator *stream = (struct vy_write_iterator *)vstream; for (int i = 0; i < stream->rv_count; ++i) vy_read_view_stmt_destroy(&stream->read_views[i]); struct vy_write_src *src, *tmp; rlist_foreach_entry_safe(src, &stream->src_list, in_src_list, tmp) vy_write_iterator_delete_src(stream, src); } /** * Delete the iterator. */ static void vy_write_iterator_close(struct vy_stmt_stream *vstream) { assert(vstream->iface->close == vy_write_iterator_close); struct vy_write_iterator *stream = (struct vy_write_iterator *)vstream; vy_write_iterator_stop(vstream); tuple_format_unref(stream->format); tuple_format_unref(stream->upsert_format); free(stream); } /** * Add a mem as a source of iterator. * @return 0 on success or -1 on error (diag is set). */ NODISCARD int vy_write_iterator_new_mem(struct vy_stmt_stream *vstream, struct vy_mem *mem) { struct vy_write_iterator *stream = (struct vy_write_iterator *)vstream; struct vy_write_src *src = vy_write_iterator_new_src(stream); if (src == NULL) return -1; vy_mem_stream_open(&src->mem_stream, mem); return 0; } /** * Add a run slice as a source of iterator. * @return 0 on success or -1 on error (diag is set). */ NODISCARD int vy_write_iterator_new_slice(struct vy_stmt_stream *vstream, struct vy_slice *slice) { struct vy_write_iterator *stream = (struct vy_write_iterator *)vstream; struct vy_write_src *src = vy_write_iterator_new_src(stream); if (src == NULL) return -1; vy_slice_stream_open(&src->slice_stream, slice, stream->cmp_def, stream->format, stream->upsert_format, stream->is_primary); return 0; } /** * Go to the next tuple in terms of sorted (merged) input steams. * @return 0 on success or not 0 on error (diag is set). */ static NODISCARD int vy_write_iterator_merge_step(struct vy_write_iterator *stream) { struct heap_node *node = vy_source_heap_top(&stream->src_heap); assert(node != NULL); struct vy_write_src *src = container_of(node, struct vy_write_src, heap_node); int rc = src->stream.iface->next(&src->stream, &src->tuple); if (rc != 0) return rc; if (src->tuple != NULL) vy_source_heap_update(&stream->src_heap, node); else vy_write_iterator_remove_src(stream, src); return 0; } /** * Try to get VLSN of the read view with the specified number in * the vy_write_iterator.read_views array. * If the requested read view is older than all existing ones, * return 0, as the oldest possible VLSN. * * @param stream Write iterator. * @param current_rv_i Index of the read view. * * @retval VLSN. */ static inline int64_t vy_write_iterator_get_vlsn(struct vy_write_iterator *stream, int rv_i) { if (rv_i >= stream->rv_count) return 0; return stream->read_views[rv_i].vlsn; } /** * Remember the current tuple of the @src as a part of the * current read view. * @param History objects allocator. * @param stream Write iterator. * @param src Source of the wanted tuple. * @param current_rv_i Index of the current read view. * * @retval 0 Success. * @retval -1 Memory error. */ static inline int vy_write_iterator_push_rv(struct region *region, struct vy_write_iterator *stream, struct tuple *tuple, int current_rv_i) { assert(current_rv_i < stream->rv_count); struct vy_read_view_stmt *rv = &stream->read_views[current_rv_i]; assert(rv->vlsn >= vy_stmt_lsn(tuple)); struct vy_write_history *h = vy_write_history_new(region, tuple, rv->history); if (h == NULL) return -1; rv->history = h; return 0; } /** * Return the next statement from the current key read view * statements sequence. Unref the previous statement, if needed. * We can't unref the statement right before returning it to the * caller, because reference in the read_views array can be * the only one to this statement, e.g. if the statement is * read from a disk page. * * @param stream Write iterator. * @retval not NULL Next statement of the current key. * @retval NULL End of the key (not the end of the sources). */ static inline struct tuple * vy_write_iterator_pop_read_view_stmt(struct vy_write_iterator *stream) { struct vy_read_view_stmt *rv; if (stream->stmt_i >= 0) { /* Destroy the current before getting to the next. */ rv = &stream->read_views[stream->stmt_i]; assert(rv->history == NULL); vy_read_view_stmt_destroy(rv); } if (stream->rv_used_count == 0) return NULL; /* Find a next non-empty history element. */ do { assert(stream->stmt_i + 1 < stream->rv_count); stream->stmt_i++; rv = &stream->read_views[stream->stmt_i]; assert(rv->history == NULL); } while (rv->tuple == NULL); assert(stream->rv_used_count > 0); stream->rv_used_count--; return rv->tuple; } /** * Build the history of the current key. * Apply optimizations 1, 2 and 3 (@sa vy_write_iterator.h). * When building a history, some statements can be * skipped (e.g. multiple REPLACE statements on the same key), * but nothing can be merged yet, since we don't know the first * statement in the history. * This is why there is a special "merge" step which applies * UPSERTs and builds a tuple for each read view. * * @param region History objects allocator. * @param stream Write iterator. * @param[out] count Count of statements saved in the history. * @param[out] is_first_insert Set if the oldest statement for * the current key among all sources is an INSERT. * * @retval 0 Success. * @retval -1 Memory error. */ static NODISCARD int vy_write_iterator_build_history(struct region *region, struct vy_write_iterator *stream, int *count, bool *is_first_insert) { *count = 0; *is_first_insert = false; assert(stream->stmt_i == -1); struct heap_node *node = vy_source_heap_top(&stream->src_heap); if (node == NULL) return 0; /* no more data */ struct vy_write_src *src = container_of(node, struct vy_write_src, heap_node); /* Search must have been started already. */ assert(src->tuple != NULL); /* * A virtual source instance which represents the end on * the current key in the source heap. It is greater * than any statement on the current key and less than * any statement on the next key. * The moment we get this source from the heap we know * that there are no statements that there are no more * statements for the current key. */ struct vy_write_src end_of_key_src; end_of_key_src.is_end_of_key = true; end_of_key_src.tuple = src->tuple; int rc = vy_source_heap_insert(&stream->src_heap, &end_of_key_src.heap_node); if (rc) { diag_set(OutOfMemory, sizeof(void *), "malloc", "vinyl write stream heap"); return rc; } vy_stmt_ref_if_possible(src->tuple); /* * For each pair (merge_until_lsn, current_rv_lsn] build * a history in the corresponding read view. * current_rv_i - index of the current read view. */ int current_rv_i = 0; int64_t current_rv_lsn = vy_write_iterator_get_vlsn(stream, 0); int64_t merge_until_lsn = vy_write_iterator_get_vlsn(stream, 1); uint64_t key_mask = stream->cmp_def->column_mask; while (true) { *is_first_insert = vy_stmt_type(src->tuple) == IPROTO_INSERT; if (!stream->is_primary && vy_stmt_type(src->tuple) == IPROTO_REPLACE) { /* * If a REPLACE stored in a secondary index was * generated by an update operation, it can be * turned into an INSERT. */ uint64_t stmt_mask = vy_stmt_column_mask(src->tuple); if (stmt_mask != UINT64_MAX && !key_update_can_be_skipped(stmt_mask, key_mask)) *is_first_insert = true; } if (vy_stmt_lsn(src->tuple) > current_rv_lsn) { /* * Skip statements invisible to the current read * view but older than the previous read view, * which is already fully built. */ goto next_lsn; } while (vy_stmt_lsn(src->tuple) <= merge_until_lsn) { /* * Skip read views which see the same * version of the key, until src->tuple is * between merge_until_lsn and * current_rv_lsn. */ current_rv_i++; current_rv_lsn = merge_until_lsn; merge_until_lsn = vy_write_iterator_get_vlsn(stream, current_rv_i + 1); } /* * Optimization 1: skip last level delete. * @sa vy_write_iterator for details about this * and other optimizations. */ if (vy_stmt_type(src->tuple) == IPROTO_DELETE && stream->is_last_level && merge_until_lsn == 0) { current_rv_lsn = 0; /* Force skip */ goto next_lsn; } /* * Optimization 2: skip statements overwritten * by a REPLACE or DELETE. */ if (vy_stmt_type(src->tuple) == IPROTO_REPLACE || vy_stmt_type(src->tuple) == IPROTO_INSERT || vy_stmt_type(src->tuple) == IPROTO_DELETE) { uint64_t stmt_mask = vy_stmt_column_mask(src->tuple); /* * Optimization 3: skip statements which * do not change this secondary key. */ if (!stream->is_primary && key_update_can_be_skipped(key_mask, stmt_mask)) goto next_lsn; rc = vy_write_iterator_push_rv(region, stream, src->tuple, current_rv_i); if (rc != 0) break; ++*count; current_rv_i++; current_rv_lsn = merge_until_lsn; merge_until_lsn = vy_write_iterator_get_vlsn(stream, current_rv_i + 1); goto next_lsn; } assert(vy_stmt_type(src->tuple) == IPROTO_UPSERT); rc = vy_write_iterator_push_rv(region, stream, src->tuple, current_rv_i); if (rc != 0) break; ++*count; next_lsn: rc = vy_write_iterator_merge_step(stream); if (rc != 0) break; node = vy_source_heap_top(&stream->src_heap); assert(node != NULL); src = container_of(node, struct vy_write_src, heap_node); assert(src->tuple != NULL); if (src->is_end_of_key) break; } vy_source_heap_delete(&stream->src_heap, &end_of_key_src.heap_node); vy_stmt_unref_if_possible(end_of_key_src.tuple); return rc; } /** * Apply accumulated UPSERTs in the read view with a hint from * a previous read view. After merge, the read view must contain * one statement. * * @param stream Write iterator. * @param hint The tuple from a previous read view (can be NULL). * @param rv Read view to merge. * @param is_first_insert Set if the oldest statement for the * current key among all sources is an INSERT. * * @retval 0 Success. * @retval -1 Memory error. */ static NODISCARD int vy_read_view_merge(struct vy_write_iterator *stream, struct tuple *hint, struct vy_read_view_stmt *rv, bool is_first_insert) { assert(rv != NULL); assert(rv->tuple == NULL); assert(rv->history != NULL); struct vy_write_history *h = rv->history; /* * Optimization 5: discard a DELETE statement referenced * by a read view if it is preceded by another DELETE for * the same key. */ if (hint != NULL && vy_stmt_type(hint) == IPROTO_DELETE && vy_stmt_type(h->tuple) == IPROTO_DELETE) { vy_write_history_destroy(h); rv->history = NULL; return 0; } /* * Two possible hints to remove the current UPSERT. * 1. If the stream is working on the last level, we * know that this UPSERT is the oldest version of * the key and can convert it into REPLACE. * 2. If the previous read view contains DELETE or * REPLACE, then the current UPSERT can be applied to * it, whether is_last_level is true or not. */ if (vy_stmt_type(h->tuple) == IPROTO_UPSERT && (stream->is_last_level || (hint != NULL && vy_stmt_type(hint) != IPROTO_UPSERT))) { assert(!stream->is_last_level || hint == NULL || vy_stmt_type(hint) != IPROTO_UPSERT); struct tuple *applied = vy_apply_upsert(h->tuple, hint, stream->cmp_def, stream->format, stream->upsert_format, false); if (applied == NULL) return -1; vy_stmt_unref_if_possible(h->tuple); h->tuple = applied; } /* Squash the rest of UPSERTs. */ struct vy_write_history *result = h; h = h->next; while (h != NULL) { assert(h->tuple != NULL && vy_stmt_type(h->tuple) == IPROTO_UPSERT); assert(result->tuple != NULL); struct tuple *applied = vy_apply_upsert(h->tuple, result->tuple, stream->cmp_def, stream->format, stream->upsert_format, false); if (applied == NULL) return -1; vy_stmt_unref_if_possible(result->tuple); result->tuple = applied; vy_stmt_unref_if_possible(h->tuple); /* * Don't bother freeing 'h' since it's * allocated on a region. */ h = h->next; result->next = h; } rv->tuple = result->tuple; rv->history = NULL; result->tuple = NULL; assert(result->next == NULL); if (hint != NULL) { /* Not the first statement. */ return 0; } struct tuple *tuple = rv->tuple; if (is_first_insert && vy_stmt_type(tuple) == IPROTO_DELETE) { /* * Optimization 6: discard the first DELETE if * the oldest statement for the current key among * all sources is an INSERT and hence there's no * statements for this key in older runs or the * last statement is a DELETE. */ vy_stmt_unref_if_possible(tuple); rv->tuple = NULL; } if ((is_first_insert && vy_stmt_type(tuple) == IPROTO_REPLACE) || (!is_first_insert && vy_stmt_type(tuple) == IPROTO_INSERT)) { /* * If the oldest statement among all sources is an * INSERT, convert the first REPLACE to an INSERT * so that if the key gets deleted later, we will * be able invoke optimization #6 to discard the * DELETE statement. * * Otherwise convert the first INSERT to a REPLACE * so as not to trigger optimization #6 on the next * compaction. */ uint32_t size; const char *data = tuple_data_range(tuple, &size); struct tuple *copy = is_first_insert ? vy_stmt_new_insert(stream->format, data, data + size) : vy_stmt_new_replace(stream->format, data, data + size); if (copy == NULL) return -1; vy_stmt_set_lsn(copy, vy_stmt_lsn(tuple)); vy_stmt_unref_if_possible(tuple); rv->tuple = copy; } return 0; } /** * Split the current key into a sequence of read view * statements. @sa struct vy_write_iterator comment for details * about the algorithm and optimizations. * * @param stream Write iterator. * @param[out] count Length of the result key versions sequence. * * @retval 0 Success. * @retval -1 Memory error. */ static NODISCARD int vy_write_iterator_build_read_views(struct vy_write_iterator *stream, int *count) { *count = 0; int raw_count; bool is_first_insert; struct region *region = &fiber()->gc; size_t used = region_used(region); stream->rv_used_count = 0; if (vy_write_iterator_build_history(region, stream, &raw_count, &is_first_insert) != 0) goto error; if (raw_count == 0) { /* A key is fully optimized. */ region_truncate(region, used); return 0; } /* Find the first non-empty read view. */ struct vy_read_view_stmt *rv = &stream->read_views[stream->rv_count - 1]; while (rv > &stream->read_views[0] && rv->history == NULL) --rv; /* * At least one statement has been found, since raw_count * here > 0. */ assert(rv >= &stream->read_views[0] && rv->history != NULL); struct tuple *hint = NULL; for (; rv >= &stream->read_views[0]; --rv) { if (rv->history == NULL) continue; if (vy_read_view_merge(stream, hint, rv, is_first_insert) != 0) goto error; assert(rv->history == NULL); if (rv->tuple == NULL) continue; stream->rv_used_count++; ++*count; hint = rv->tuple; } region_truncate(region, used); return 0; error: region_truncate(region, used); return -1; } /** * Get the next statement to write. * The user of the write iterator simply expects a stream * of statements to write to the output. * The tuple *ret is guaranteed to be valid until next tuple is * returned (thus last non-null tuple is valid after EOF). * * @return 0 on success or not 0 on error (diag is set). */ static NODISCARD int vy_write_iterator_next(struct vy_stmt_stream *vstream, struct tuple **ret) { assert(vstream->iface->next == vy_write_iterator_next); struct vy_write_iterator *stream = (struct vy_write_iterator *)vstream; /* * Try to get the next statement from the current key * read view statements sequence. */ *ret = vy_write_iterator_pop_read_view_stmt(stream); if (*ret != NULL) return 0; /* Build the next key sequence. */ stream->stmt_i = -1; int count = 0; while (true) { /* Squash UPSERTs and/or go to the next key */ if (vy_write_iterator_build_read_views(stream, &count) != 0) return -1; /* * next_key() routine could skip the next key, for * example, if it was truncated by last level * DELETE or it consisted only from optimized * updates. Then try to get the next key. */ if (count != 0 || stream->src_heap.size == 0) break; } /* Again try to get the statement, after calling next_key(). */ *ret = vy_write_iterator_pop_read_view_stmt(stream); return 0; } static const struct vy_stmt_stream_iface vy_slice_stream_iface = { .start = vy_write_iterator_start, .next = vy_write_iterator_next, .stop = vy_write_iterator_stop, .close = vy_write_iterator_close }; tarantool_1.9.1.26.g63eb81e3c/src/box/vclock.h0000664000000000000000000001602213306560010017212 0ustar rootroot#ifndef INCLUDES_TARANTOOL_VCLOCK_H #define INCLUDES_TARANTOOL_VCLOCK_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #define RB_COMPACT 1 #include #include "bit/bit.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ enum { /** * The maximum number of components in vclock */ VCLOCK_MAX = 32, /** * The maximum length of string representation of vclock. * * vclock formatted as {, ..., } where * is : , * is 0..VCLOCK_MAX (2 chars), * is int64_t (20 chars). * * @sa vclock_from_string() * @sa vclock_to_string() */ VCLOCK_STR_LEN_MAX = 1 + VCLOCK_MAX * (2 + 2 + 20 + 2) + 1 }; /** Cluster vector clock */ struct vclock { /** Map of used components in lsn array */ unsigned int map; /** Sum of all components of vclock. */ int64_t signature; int64_t lsn[VCLOCK_MAX]; /** To order binary logs by vector clock. */ rb_node(struct vclock) link; }; /* Replica id, coordinate */ struct vclock_c { uint32_t id; int64_t lsn; }; struct vclock_iterator { struct bit_iterator it; const struct vclock *vclock; }; static inline void vclock_iterator_init(struct vclock_iterator *it, const struct vclock *vclock) { it->vclock = vclock; bit_iterator_init(&it->it, &vclock->map, sizeof(vclock->map), true); } static inline struct vclock_c vclock_iterator_next(struct vclock_iterator *it) { struct vclock_c c = { 0, 0 }; size_t id = bit_iterator_next(&it->it); c.id = id == SIZE_MAX ? (int) VCLOCK_MAX : id; if (c.id < VCLOCK_MAX) c.lsn = it->vclock->lsn[c.id]; return c; } #define vclock_foreach(it, var) \ for (struct vclock_c var = vclock_iterator_next(it); \ (var).id < VCLOCK_MAX; (var) = vclock_iterator_next(it)) static inline void vclock_create(struct vclock *vclock) { memset(vclock, 0, sizeof(*vclock)); } static inline int64_t vclock_get(const struct vclock *vclock, uint32_t replica_id) { if (replica_id >= VCLOCK_MAX) return 0; return vclock->lsn[replica_id]; } static inline int64_t vclock_inc(struct vclock *vclock, uint32_t replica_id) { /* Easier add each time than check. */ vclock->map |= 1 << replica_id; vclock->signature++; return ++vclock->lsn[replica_id]; } static inline void vclock_copy(struct vclock *dst, const struct vclock *src) { *dst = *src; } static inline uint32_t vclock_size(const struct vclock *vclock) { return __builtin_popcount(vclock->map); } static inline int64_t vclock_calc_sum(const struct vclock *vclock) { int64_t sum = 0; struct vclock_iterator it; vclock_iterator_init(&it, vclock); vclock_foreach(&it, replica) sum += replica.lsn; return sum; } static inline int64_t vclock_sum(const struct vclock *vclock) { return vclock->signature; } int64_t vclock_follow(struct vclock *vclock, uint32_t replica_id, int64_t lsn); /** * \brief Format vclock to YAML-compatible string representation: * { replica_id: lsn, replica_id:lsn }) * \param vclock vclock * \return fomatted string. This pointer should be passed to free(3) to * release the allocated storage when it is no longer needed. */ char * vclock_to_string(const struct vclock *vclock); /** * \brief Fill vclock from string representation. * \param vclock vclock * \param str string to parse * \retval 0 on sucess * \retval error offset on error (indexed from 1) * \sa vclock_to_string() */ size_t vclock_from_string(struct vclock *vclock, const char *str); enum { VCLOCK_ORDER_UNDEFINED = INT_MAX }; /** * \brief Compare vclocks * \param a vclock * \param b vclock * \retval 1 if \a vclock is ordered after \a other * \retval -1 if \a vclock is ordered before than \a other * \retval 0 if vclocks are equal * \retval VCLOCK_ORDER_UNDEFINED if vclocks are concurrent */ static inline int vclock_compare(const struct vclock *a, const struct vclock *b) { bool le = true, ge = true; unsigned int map = a->map | b->map; struct bit_iterator it; bit_iterator_init(&it, &map, sizeof(map), true); for (size_t replica_id = bit_iterator_next(&it); replica_id < VCLOCK_MAX; replica_id = bit_iterator_next(&it)) { int64_t lsn_a = a->lsn[replica_id]; int64_t lsn_b = b->lsn[replica_id]; le = le && lsn_a <= lsn_b; ge = ge && lsn_a >= lsn_b; if (!ge && !le) return VCLOCK_ORDER_UNDEFINED; } if (ge && !le) return 1; if (le && !ge) return -1; return 0; } /** * @brief vclockset - a set of vclocks */ typedef rb_tree(struct vclock) vclockset_t; rb_proto(, vclockset_, vclockset_t, struct vclock); /** * A proximity search in a set of vclock objects. * * The set is normally the index of vclocks in the binary * log files of the current directory. The task of the search is * to find the first log, * * @return a vclock that <= than \a key */ static inline struct vclock * vclockset_match(vclockset_t *set, struct vclock *key) { struct vclock *match = vclockset_psearch(set, key); /** * vclockset comparator returns 0 for * incomparable keys, rendering them equal. * So the match, even when found, is not necessarily * strictly preceding the search key, it may be * incomparable. If this is the case, unwind until we get * to a key which is strictly below the search pattern. */ while (match != NULL) { if (vclock_compare(match, key) <= 0) return match; /* The order is undefined, try the previous vclock. */ match = vclockset_prev(set, match); } /* * There is no xlog which is strictly less than the search * pattern. Return the first log - it is either * strictly greater, or incomparable with the key. */ return vclockset_first(set); } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_VCLOCK_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/xlog.h0000664000000000000000000004424113306565107016722 0ustar rootroot#ifndef TARANTOOL_XLOG_H_INCLUDED #define TARANTOOL_XLOG_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include "tt_uuid.h" #include "vclock.h" #define ZSTD_STATIC_LINKING_ONLY #include "zstd.h" #include "small/ibuf.h" #include "small/obuf.h" struct iovec; struct xrow_header; #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /* {{{ log dir */ /** * Type of log directory. A single filesystem directory can be * used for write ahead logs, memtx snapshots or vinyl run files, * but an xlog object sees only those files which match its type. */ enum xdir_type { SNAP, /* memtx snapshot */ XLOG, /* write ahead log */ VYLOG, /* vinyl metadata log */ }; /** * Newly created snapshot files get .inprogress filename suffix. * The suffix is removed when the file is finished * and closed. */ enum log_suffix { NONE, INPROGRESS }; /** * A handle for a data directory with write ahead logs, snapshots, * vylogs. * Can be used to find the last log in the directory, scan * through all logs, create a new log. */ struct xdir { /** * Allow partial recovery from a damaged/incorrect * data directory. Suppresses exceptions when scanning * the directory, parsing file headers, or reading * partial or corrupt rows. Incorrect objects * are skipped. */ bool force_recovery; /** * true if a log file in this directory can by fsync()ed * at close in a separate thread (we use this technique to * speed up sync of write ahead logs, but not snapshots). */ bool sync_is_async; /* Default filename suffix for a new file. */ enum log_suffix suffix; /** * Additional flags to apply at open(2) to write. */ int open_wflags; /** * A pointer to this instance uuid. If not assigned * (tt_uuid_is_nil returns true), instance id check * for logs in this directory is not performed. * Otherwise, any log in this directory must have * the matching instance id. */ const struct tt_uuid *instance_uuid; /** * Text of a marker written to the text file header: * XLOG (meaning it's a write ahead log) SNAP (a * snapshot) or VYLOG. */ const char *filetype; /** * File name extension (.xlog or .snap). */ const char *filename_ext; /** File create mode in this directory. */ mode_t mode; /* * Index of files present in the directory. Initially * empty, must be initialized with xdir_scan(). */ vclockset_t index; /** * Directory path. */ char dirname[PATH_MAX+1]; /** Snapshots or xlogs */ enum xdir_type type; /** * Sync interval in bytes. * xlog file will be synced every sync_interval bytes, * corresponding file cache will be marked as free */ uint64_t sync_interval; }; /** * Initialize a log dir. */ void xdir_create(struct xdir *dir, const char *dirname, enum xdir_type type, const struct tt_uuid *instance_uuid); /** * Destroy a log dir object. */ void xdir_destroy(struct xdir *dir); /** * Scan or re-scan a directory and update directory * index with all log files (or snapshots) in the directory. * Must be used if it is necessary to find the last log/ * snapshot or scan through all logs. */ int xdir_scan(struct xdir *dir); /** * Check that a directory exists and is writable. */ int xdir_check(struct xdir *dir); /** * Return a file name based on directory type, vector clock * sum, and a suffix (.inprogress or not). */ char * xdir_format_filename(struct xdir *dir, int64_t signature, enum log_suffix suffix); /** * Remove files whose signature is less than specified. * If @use_coio is set, files are deleted by coio threads. */ int xdir_collect_garbage(struct xdir *dir, int64_t signature, bool use_coio); /** * Return LSN and vclock (unless @vclock is NULL) of the newest * file in a directory or -1 if the directory is empty. */ static inline int64_t xdir_last_vclock(struct xdir *xdir, struct vclock *vclock) { struct vclock *last = vclockset_last(&xdir->index); if (last == NULL) return -1; if (vclock != NULL) vclock_copy(vclock, last); return vclock_sum(last); } /** * Insert a vclock into the file index of a directory. * The vclock must be allocated with malloc(). */ static inline void xdir_add_vclock(struct xdir *xdir, struct vclock *vclock) { vclockset_insert(&xdir->index, vclock); } /* }}} */ /* {{{ xlog meta */ /** * A xlog meta info */ struct xlog_meta { /** Text file header: filetype */ char filetype[10]; /** * Text file header: instance uuid. We read * only logs with our own uuid, to avoid situations * when a DBA has manually moved a few logs around * and messed the data directory up. */ struct tt_uuid instance_uuid; /** * Text file header: vector clock taken at the time * this file was created. For WALs, this is vector * clock *at start of WAL*, for snapshots, this * is vector clock *at the time the snapshot is taken*. */ struct vclock vclock; }; /* }}} */ /** * A single log file - a snapshot, a vylog or a write ahead log. */ struct xlog { /** xlog meta header */ struct xlog_meta meta; /** do sync in async mode */ bool sync_is_async; /** File handle. */ int fd; /** * How many xlog rows are in the file last time it * was read or written. Updated in xlog_cursor_close() * and is used to check whether or not we have discovered * a new row in the file since it was last read. This is * used in local hot standby to "follow up" on new rows * appended to the file. */ int64_t rows; /* should have the same type as lsn */ /** * The number of rows in the current tx, part of * tx state used only in write mode. */ int64_t tx_rows; /** Log file name. */ char filename[PATH_MAX + 1]; /** Whether this file has .inprogress suffix. */ bool is_inprogress; /* * If true, we can flush the data in this buffer whenever * we like, and it's usually when the buffer gets * sufficiently big to get compressed. * * Otherwise, we must observe transactional boundaries * to avoid writing a partial transaction to WAL: a * single transaction always goes to WAL in a single * "chunk" with 1 fixed header and common checksum * for all transactional rows. This prevents miscarriage * or partial delivery of transactional rows to a slave * during replication. */ bool is_autocommit; /** The current offset in the log file, for writing. */ off_t offset; /** * Output buffer, works as row accumulator for * compression. */ struct obuf obuf; /** The context of zstd compression */ ZSTD_CCtx *zctx; /** * Compressed output buffer */ struct obuf zbuf; /** * Sync interval in bytes. * xlog file will be synced every sync_interval bytes, * corresponding file cache will be marked as free */ uint64_t sync_interval; /** * Synced file size */ uint64_t synced_size; /** * If xlog file was synced corresponding cache will be freed if true. * This can be significant for memtx snapshots (that wouldn't * be read in normal cases) and vinyl data files (that can be read * after writing) */ bool free_cache; /** * Write rate limit */ uint64_t rate_limit; /** Time when xlog wast synced last time */ double sync_time; }; /** * Touch xdir snapshot file. * * @param xdir xdir * @param vclock the global state of replication (vector * clock) at the moment the file is created. * * @retval 0 if OK * @retval -1 if error */ int xdir_touch_xlog(struct xdir *dir, const struct vclock *vclock); /** * Create a new file and open it in write (append) mode. * Note: an existing file is impossible to open for append, * the old files are never appended to. * * @param xdir xdir * @param[out] xlog xlog structure * @param instance uuid the instance which created the file * @param vclock the global state of replication (vector * clock) at the moment the file is created. * * @retval 0 if OK * @retval -1 if error */ int xdir_create_xlog(struct xdir *dir, struct xlog *xlog, const struct vclock *vclock); /** * Create new xlog writer based on fd. * @param fd file descriptor * @param name the assiciated name * @param flags flags to open the file or 0 for defaults * @param meta xlog meta * * @retval 0 for success * @retvl -1 if error */ int xlog_create(struct xlog *xlog, const char *name, int flags, const struct xlog_meta *meta); /** * Open an existing xlog file for appending. * @param xlog xlog descriptor * @param name file name * * @retval 0 success * @retval -1 error */ int xlog_open(struct xlog *xlog, const char *name); /** * Reset an xlog object without opening it. * The object is in limbo state: it doesn't hold * any resources and doesn't need close, but * xlog_is_open() returns false. */ void xlog_clear(struct xlog *xlog); /** Returns true if the xlog file is open. */ static inline bool xlog_is_open(struct xlog *l) { return l->fd != -1; } /** * Rename xlog * * @retval 0 for ok * @retval -1 for error */ int xlog_rename(struct xlog *l); /** * Write a row to xlog, * * @retval count of writen bytes * @retval -1 for error */ ssize_t xlog_write_row(struct xlog *log, const struct xrow_header *packet); /** * Prevent xlog row buffer offloading, should be use * at transaction start to write transaction in one xlog tx */ void xlog_tx_begin(struct xlog *log); /** * Enable xlog row buffer offloading * * @retval count of writen bytes * @retval 0 if buffer is not writen * @retval -1 if error */ ssize_t xlog_tx_commit(struct xlog *log); /** * Discard xlog row buffer */ void xlog_tx_rollback(struct xlog *log); /** * Flush buffered rows and sync file */ ssize_t xlog_flush(struct xlog *log); /** * Sync a log file. The exact action is defined * by xdir flags. * * @retval 0 success * @retval -1 error */ int xlog_sync(struct xlog *l); /** * Close the log file and free xlog object. * * @retval 0 success * @retval -1 error (fclose() failed). */ int xlog_close(struct xlog *l, bool reuse_fd); /** * atfork() handler function to close the log pointed * at by xlog in the child. */ void xlog_atfork(struct xlog *xlog); /* {{{ xlog_tx_cursor - iterate over rows in xlog transaction */ /** * xlog tx iterator */ struct xlog_tx_cursor { /** rows buffer */ struct ibuf rows; /** tx size */ size_t size; }; /** * Create xlog tx iterator from memory data. * *data will be adjusted to end of tx * * @retval 0 for Ok * @retval -1 for error * @retval >0 how many additional bytes should be read to parse tx */ ssize_t xlog_tx_cursor_create(struct xlog_tx_cursor *cursor, const char **data, const char *data_end, ZSTD_DStream *zdctx); /** * Destroy xlog tx cursor and free all associated memory * including parsed xrows */ int xlog_tx_cursor_destroy(struct xlog_tx_cursor *tx_cursor); /** * Fetch next xrow from xlog tx cursor * * @retval 0 for Ok * @retval -1 for error */ int xlog_tx_cursor_next_row(struct xlog_tx_cursor *tx_cursor, struct xrow_header *xrow); /** * Return current tx cursor position * * @param tx_cursor tx_cursor * @retval current tx cursor position */ static inline off_t xlog_tx_cursor_pos(struct xlog_tx_cursor *tx_cursor) { return tx_cursor->size - ibuf_used(&tx_cursor->rows); } /** * A conventional helper to decode rows from the raw tx buffer. * Decodes fixheader, checks crc32 and length, decompresses rows. * * @param data a buffer with the raw tx data, including fixheader * @param data_end the end of @a data buffer * @param[out] rows a buffer to store decoded rows * @param[out] rows_end the end of @a rows buffer * @retval 0 success * @retval -1 error, check diag */ int xlog_tx_decode(const char *data, const char *data_end, char *rows, char *rows_end, ZSTD_DStream *zdctx); /* }}} */ /* {{{ xlog_cursor - read rows from a log file */ enum xlog_cursor_state { /* Cursor is closed */ XLOG_CURSOR_CLOSED = 0, /* The cursor is open but no tx is read */ XLOG_CURSOR_ACTIVE = 1, /* The Cursor is open and a tx is read */ XLOG_CURSOR_TX = 2, /* The cursor is open but is at the end of file. */ XLOG_CURSOR_EOF = 3, /* The cursor was closed after reaching EOF. */ XLOG_CURSOR_EOF_CLOSED = 4, }; /** * Xlog cursor, read rows from xlog */ struct xlog_cursor { enum xlog_cursor_state state; /** xlog meta info */ struct xlog_meta meta; /** file descriptor or -1 for in memory */ int fd; /** associated file name */ char name[PATH_MAX]; /** file read buffer */ struct ibuf rbuf; /** file read position */ off_t read_offset; /** cursor for current tx */ struct xlog_tx_cursor tx_cursor; /** ZSTD context for decompression */ ZSTD_DStream *zdctx; }; /** * Return true if the cursor was opened and has not * been closed yet. */ static inline bool xlog_cursor_is_open(const struct xlog_cursor *cursor) { return (cursor->state != XLOG_CURSOR_CLOSED && cursor->state != XLOG_CURSOR_EOF_CLOSED); } /** * Return true if the cursor has reached EOF. * The cursor may be closed or still open. */ static inline bool xlog_cursor_is_eof(const struct xlog_cursor *cursor) { return (cursor->state == XLOG_CURSOR_EOF || cursor->state == XLOG_CURSOR_EOF_CLOSED); } /** * Open cursor from file descriptor * @param cursor cursor * @param fd file descriptor * @param name associated file name * @retval 0 succes * @retval -1 error, check diag */ int xlog_cursor_openfd(struct xlog_cursor *cursor, int fd, const char *name); /** * Open cursor from file * @param cursor cursor * @param name file name * @retval 0 succes * @retval -1 error, check diag */ int xlog_cursor_open(struct xlog_cursor *cursor, const char *name); /** * Open cursor from memory * @param cursor cursor * @param data pointer to memory block * @param size memory block size * @param name associated file name * @retval 0 succes * @retval -1 error, check diag */ int xlog_cursor_openmem(struct xlog_cursor *cursor, const char *data, size_t size, const char *name); /** * Reset cursor position * @param cursor cursor * @retval 0 succes * @retval -1 error, check diag */ int xlog_cursor_reset(struct xlog_cursor *cursor); /** * Close cursor * @param cursor cursor */ void xlog_cursor_close(struct xlog_cursor *cursor, bool reuse_fd); /** * Open next tx from xlog * @param cursor cursor * @retval 0 succes * @retval 1 eof * retval -1 error, check diag */ int xlog_cursor_next_tx(struct xlog_cursor *cursor); /** * Fetch next xrow from current xlog tx * * @retval 0 for Ok * @retval 1 if current tx is done * @retval -1 for error */ int xlog_cursor_next_row(struct xlog_cursor *cursor, struct xrow_header *xrow); /** * Fetch next row from cursor, ignores xlog tx boundary, * open a next one tx if current is done. * * @retval 0 for Ok * @retval 1 for EOF * @retval -1 for error */ int xlog_cursor_next(struct xlog_cursor *cursor, struct xrow_header *xrow, bool force_recovery); /** * Move to the next xlog tx * * @retval 0 magic found * @retval 1 magic not found and eof reached * @retval -1 error */ int xlog_cursor_find_tx_magic(struct xlog_cursor *i); /** * Cursor xlog position * * @param cursor xlog cursor * @retval xlog current position */ static inline off_t xlog_cursor_pos(struct xlog_cursor *cursor) { return cursor->read_offset - ibuf_used(&cursor->rbuf); } /** * Return tx positon for xlog cursor * * @param cursor xlog_cursor * @retval current tx postion */ static inline off_t xlog_cursor_tx_pos(struct xlog_cursor *cursor) { return xlog_tx_cursor_pos(&cursor->tx_cursor); } /* }}} */ /** {{{ miscellaneous log io functions. */ /** * Open cursor for xdir entry pointed by signature * @param xdir xdir * @param signature xlog signature * @param cursor cursor * @retval 0 succes * @retval -1 error, check diag */ int xdir_open_cursor(struct xdir *dir, int64_t signature, struct xlog_cursor *cursor); /** }}} */ #if defined(__cplusplus) } /* extern C */ #include "exception.h" static inline void xdir_scan_xc(struct xdir *dir) { if (xdir_scan(dir) == -1) diag_raise(); } static inline void xdir_check_xc(struct xdir *dir) { if (xdir_check(dir) == -1) diag_raise(); } /** * @copydoc xdir_open_cursor */ static inline int xdir_open_cursor_xc(struct xdir *dir, int64_t signature, struct xlog_cursor *cursor) { int rc = xdir_open_cursor(dir, signature, cursor); if (rc == -1) diag_raise(); return rc; } /** * @copydoc xlog_cursor_openfd */ static inline int xlog_cursor_openfd_xc(struct xlog_cursor *cursor, int fd, const char *name) { int rc = xlog_cursor_openfd(cursor, fd, name); if (rc == -1) diag_raise(); return rc; } /** * @copydoc xlog_cursor_open */ static inline int xlog_cursor_open_xc(struct xlog_cursor *cursor, const char *name) { int rc = xlog_cursor_open(cursor, name); if (rc == -1) diag_raise(); return rc; } /** * @copydoc xlog_cursor_next */ static inline int xlog_cursor_next_xc(struct xlog_cursor *cursor, struct xrow_header *xrow, bool force_recovery) { int rc = xlog_cursor_next(cursor, xrow, force_recovery); if (rc == -1) diag_raise(); return rc; } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_XLOG_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_engine.c0000664000000000000000000007032113306565107020421 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "memtx_engine.h" #include "memtx_space.h" #include "memtx_tuple.h" #include #include #include "coio_file.h" #include "tuple.h" #include "txn.h" #include "memtx_tree.h" #include "iproto_constants.h" #include "xrow.h" #include "xstream.h" #include "bootstrap.h" #include "replication.h" #include "schema.h" #include "gc.h" /** For all memory used by all indexes. * If you decide to use memtx_index_arena or * memtx_index_slab_cache for anything other than * memtx_index_extent_pool, make sure this is reflected in * box.slab.info(), @sa lua/slab.cc */ extern struct quota memtx_quota; static bool memtx_index_arena_initialized = false; struct slab_arena memtx_arena; /* used by memtx_tuple.cc */ static struct slab_cache memtx_index_slab_cache; struct mempool memtx_index_extent_pool; /** * To ensure proper statement-level rollback in case * of out of memory conditions, we maintain a number * of slack memory extents reserved before a statement * is begun. If there isn't enough slack memory, * we don't begin the statement. */ static int memtx_index_num_reserved_extents; static void *memtx_index_reserved_extents; static void txn_on_yield_or_stop(struct trigger *trigger, void *event) { (void)trigger; (void)event; txn_rollback(); /* doesn't throw */ } static int memtx_end_build_primary_key(struct space *space, void *param) { struct memtx_space *memtx_space = (struct memtx_space *)space; if (space->engine != param || space_index(space, 0) == NULL || memtx_space->replace == memtx_space_replace_all_keys) return 0; index_end_build(space->index[0]); memtx_space->replace = memtx_space_replace_primary_key; return 0; } /** * Secondary indexes are built in bulk after all data is * recovered. This function enables secondary keys on a space. * Data dictionary spaces are an exception, they are fully * built right from the start. */ static int memtx_build_secondary_keys(struct space *space, void *param) { struct memtx_space *memtx_space = (struct memtx_space *)space; if (space->engine != param || space_index(space, 0) == NULL || memtx_space->replace == memtx_space_replace_all_keys) return 0; if (space->index_id_max > 0) { struct index *pk = space->index[0]; ssize_t n_tuples = index_size(pk); assert(n_tuples >= 0); if (n_tuples > 0) { say_info("Building secondary indexes in space '%s'...", space_name(space)); } for (uint32_t j = 1; j < space->index_count; j++) { if (index_build(space->index[j], pk) < 0) return -1; } if (n_tuples > 0) { say_info("Space '%s': done", space_name(space)); } } memtx_space->replace = memtx_space_replace_all_keys; return 0; } static void memtx_engine_shutdown(struct engine *engine) { struct memtx_engine *memtx = (struct memtx_engine *)engine; if (mempool_is_initialized(&memtx->tree_iterator_pool)) mempool_destroy(&memtx->tree_iterator_pool); if (mempool_is_initialized(&memtx->rtree_iterator_pool)) mempool_destroy(&memtx->rtree_iterator_pool); if (mempool_is_initialized(&memtx->hash_iterator_pool)) mempool_destroy(&memtx->hash_iterator_pool); if (mempool_is_initialized(&memtx->bitset_iterator_pool)) mempool_destroy(&memtx->bitset_iterator_pool); xdir_destroy(&memtx->snap_dir); free(memtx); memtx_tuple_free(); } static int memtx_engine_recover_snapshot_row(struct memtx_engine *memtx, struct xrow_header *row); int memtx_engine_recover_snapshot(struct memtx_engine *memtx, const struct vclock *vclock) { /* Process existing snapshot */ say_info("recovery start"); int64_t signature = vclock_sum(vclock); const char *filename = xdir_format_filename(&memtx->snap_dir, signature, NONE); say_info("recovering from `%s'", filename); struct xlog_cursor cursor; if (xlog_cursor_open(&cursor, filename) < 0) return -1; INSTANCE_UUID = cursor.meta.instance_uuid; int rc; struct xrow_header row; uint64_t row_count = 0; while ((rc = xlog_cursor_next(&cursor, &row, memtx->force_recovery)) == 0) { row.lsn = signature; rc = memtx_engine_recover_snapshot_row(memtx, &row); if (rc < 0) { if (!memtx->force_recovery) break; say_error("can't apply row: "); diag_log(); } ++row_count; if (row_count % 100000 == 0) { say_info("%.1fM rows processed", row_count / 1000000.); fiber_yield_timeout(0); } } xlog_cursor_close(&cursor, false); if (rc < 0) return -1; /** * We should never try to read snapshots with no EOF * marker - such snapshots are very likely corrupted and * should not be trusted. */ if (!xlog_cursor_is_eof(&cursor)) panic("snapshot `%s' has no EOF marker", filename); return 0; } static int memtx_engine_recover_snapshot_row(struct memtx_engine *memtx, struct xrow_header *row) { assert(row->bodycnt == 1); /* always 1 for read */ if (row->type != IPROTO_INSERT) { diag_set(ClientError, ER_UNKNOWN_REQUEST_TYPE, (uint32_t) row->type); return -1; } struct request request; if (xrow_decode_dml(row, &request, dml_request_key_map(row->type)) != 0) return -1; struct space *space = space_cache_find(request.space_id); if (space == NULL) return -1; /* memtx snapshot must contain only memtx spaces */ if (space->engine != (struct engine *)memtx) { diag_set(ClientError, ER_CROSS_ENGINE_TRANSACTION); return -1; } /* no access checks here - applier always works with admin privs */ if (space_apply_initial_join_row(space, &request) != 0) return -1; /* * Don't let gc pool grow too much. Yet to * it before reading the next row, to make * sure it's not freed along here. */ fiber_gc(); return 0; } /** Called at start to tell memtx to recover to a given LSN. */ static int memtx_engine_begin_initial_recovery(struct engine *engine, const struct vclock *vclock) { (void)vclock; struct memtx_engine *memtx = (struct memtx_engine *)engine; assert(memtx->state == MEMTX_INITIALIZED); /* * By default, enable fast start: bulk read of tuples * from the snapshot, in which they are stored in key * order, and bulk build of the primary key. * * If force_recovery = true, it's a disaster * recovery mode. Enable all keys on start, to detect and * discard duplicates in the snapshot. */ memtx->state = (memtx->force_recovery ? MEMTX_OK : MEMTX_INITIAL_RECOVERY); return 0; } static int memtx_engine_begin_final_recovery(struct engine *engine) { struct memtx_engine *memtx = (struct memtx_engine *)engine; if (memtx->state == MEMTX_OK) return 0; assert(memtx->state == MEMTX_INITIAL_RECOVERY); /* End of the fast path: loaded the primary key. */ space_foreach(memtx_end_build_primary_key, memtx); if (!memtx->force_recovery) { /* * Fast start path: "play out" WAL * records using the primary key only, * then bulk-build all secondary keys. */ memtx->state = MEMTX_FINAL_RECOVERY; } else { /* * If force_recovery = true, it's * a disaster recovery mode. Build * secondary keys before reading the WAL, * to detect and discard duplicates in * unique keys. */ memtx->state = MEMTX_OK; if (space_foreach(memtx_build_secondary_keys, memtx) != 0) return -1; } return 0; } static int memtx_engine_end_recovery(struct engine *engine) { struct memtx_engine *memtx = (struct memtx_engine *)engine; /* * Recovery is started with enabled keys when: * - either of force_recovery * is false * - it's a replication join */ if (memtx->state != MEMTX_OK) { assert(memtx->state == MEMTX_FINAL_RECOVERY); memtx->state = MEMTX_OK; if (space_foreach(memtx_build_secondary_keys, memtx) != 0) return -1; } return 0; } static struct space * memtx_engine_create_space(struct engine *engine, struct space_def *def, struct rlist *key_list) { struct memtx_engine *memtx = (struct memtx_engine *)engine; return memtx_space_new(memtx, def, key_list); } static int memtx_engine_prepare(struct engine *engine, struct txn *txn) { (void)engine; if (txn->is_autocommit) return 0; /* * These triggers are only used for memtx and only * when autocommit == false, so we are saving * on calls to trigger_create/trigger_clear. */ trigger_clear(&txn->fiber_on_yield); trigger_clear(&txn->fiber_on_stop); return 0; } static int memtx_engine_begin(struct engine *engine, struct txn *txn) { (void)engine; /* * Register a trigger to rollback transaction on yield. * This must be done in begin(), since it's * the first thing txn invokes after txn->n_stmts++, * to match with trigger_clear() in rollbackStatement(). */ if (txn->is_autocommit == false) { trigger_create(&txn->fiber_on_yield, txn_on_yield_or_stop, NULL, NULL); trigger_create(&txn->fiber_on_stop, txn_on_yield_or_stop, NULL, NULL); /* * Memtx doesn't allow yields between statements of * a transaction. Set a trigger which would roll * back the transaction if there is a yield. */ trigger_add(&fiber()->on_yield, &txn->fiber_on_yield); trigger_add(&fiber()->on_stop, &txn->fiber_on_stop); } return 0; } static int memtx_engine_begin_statement(struct engine *engine, struct txn *txn) { (void)engine; (void)txn; return 0; } static void memtx_engine_rollback_statement(struct engine *engine, struct txn *txn, struct txn_stmt *stmt) { (void)engine; (void)txn; if (stmt->old_tuple == NULL && stmt->new_tuple == NULL) return; struct space *space = stmt->space; struct memtx_space *memtx_space = (struct memtx_space *)space; int index_count; /* Only roll back the changes if they were made. */ if (stmt->engine_savepoint == NULL) index_count = 0; else if (memtx_space->replace == memtx_space_replace_all_keys) index_count = space->index_count; else if (memtx_space->replace == memtx_space_replace_primary_key) index_count = 1; else panic("transaction rolled back during snapshot recovery"); for (int i = 0; i < index_count; i++) { struct tuple *unused; struct index *index = space->index[i]; /* Rollback must not fail. */ if (index_replace(index, stmt->new_tuple, stmt->old_tuple, DUP_INSERT, &unused) != 0) { diag_log(); unreachable(); panic("failed to rollback change"); } } /** Reset to old bsize, if it was changed. */ if (stmt->engine_savepoint != NULL) memtx_space_update_bsize(space, stmt->new_tuple, stmt->old_tuple); if (stmt->new_tuple) tuple_unref(stmt->new_tuple); stmt->old_tuple = NULL; stmt->new_tuple = NULL; } static void memtx_engine_rollback(struct engine *engine, struct txn *txn) { memtx_engine_prepare(engine, txn); struct txn_stmt *stmt; stailq_reverse(&txn->stmts); stailq_foreach_entry(stmt, &txn->stmts, next) memtx_engine_rollback_statement(engine, txn, stmt); } static void memtx_engine_commit(struct engine *engine, struct txn *txn) { (void)engine; struct txn_stmt *stmt; stailq_foreach_entry(stmt, &txn->stmts, next) { if (stmt->old_tuple) tuple_unref(stmt->old_tuple); } } static int memtx_engine_bootstrap(struct engine *engine) { struct memtx_engine *memtx = (struct memtx_engine *)engine; assert(memtx->state == MEMTX_INITIALIZED); memtx->state = MEMTX_OK; /* Recover from bootstrap.snap */ say_info("initializing an empty data directory"); struct xlog_cursor cursor; if (xlog_cursor_openmem(&cursor, (const char *)bootstrap_bin, sizeof(bootstrap_bin), "bootstrap") < 0) return -1; int rc; struct xrow_header row; while ((rc = xlog_cursor_next(&cursor, &row, true)) == 0) { rc = memtx_engine_recover_snapshot_row(memtx, &row); if (rc < 0) break; } xlog_cursor_close(&cursor, false); return rc < 0 ? -1 : 0; } static int checkpoint_write_row(struct xlog *l, struct xrow_header *row) { static ev_tstamp last = 0; if (last == 0) { ev_now_update(loop()); last = ev_now(loop()); } row->tm = last; row->replica_id = 0; /** * Rows in snapshot are numbered from 1 to %rows. * This makes streaming such rows to a replica or * to recovery look similar to streaming a normal * WAL. @sa the place which skips old rows in * recovery_apply_row(). */ row->lsn = l->rows + l->tx_rows; row->sync = 0; /* don't write sync to wal */ ssize_t written = xlog_write_row(l, row); fiber_gc(); if (written < 0) return -1; if ((l->rows + l->tx_rows) % 100000 == 0) say_crit("%.1fM rows written", (l->rows + l->tx_rows) / 1000000.0); return 0; } static int checkpoint_write_tuple(struct xlog *l, uint32_t space_id, const char *data, uint32_t size) { struct request_replace_body body; body.m_body = 0x82; /* map of two elements. */ body.k_space_id = IPROTO_SPACE_ID; body.m_space_id = 0xce; /* uint32 */ body.v_space_id = mp_bswap_u32(space_id); body.k_tuple = IPROTO_TUPLE; struct xrow_header row; memset(&row, 0, sizeof(struct xrow_header)); row.type = IPROTO_INSERT; row.bodycnt = 2; row.body[0].iov_base = &body; row.body[0].iov_len = sizeof(body); row.body[1].iov_base = (char *)data; row.body[1].iov_len = size; return checkpoint_write_row(l, &row); } struct checkpoint_entry { struct space *space; struct snapshot_iterator *iterator; struct rlist link; }; struct checkpoint { /** * List of MemTX spaces to snapshot, with consistent * read view iterators. */ struct rlist entries; uint64_t snap_io_rate_limit; struct cord cord; bool waiting_for_snap_thread; /** The vclock of the snapshot file. */ struct vclock *vclock; struct xdir dir; /** * Do nothing, just touch the snapshot file - the * checkpoint already exists. */ bool touch; }; static int checkpoint_init(struct checkpoint *ckpt, const char *snap_dirname, uint64_t snap_io_rate_limit) { rlist_create(&ckpt->entries); ckpt->waiting_for_snap_thread = false; xdir_create(&ckpt->dir, snap_dirname, SNAP, &INSTANCE_UUID); ckpt->snap_io_rate_limit = snap_io_rate_limit; /* May be used in abortCheckpoint() */ ckpt->vclock = malloc(sizeof(*ckpt->vclock)); if (ckpt->vclock == NULL) { diag_set(OutOfMemory, sizeof(*ckpt->vclock), "malloc", "vclock"); return -1; } vclock_create(ckpt->vclock); ckpt->touch = false; return 0; } static void checkpoint_destroy(struct checkpoint *ckpt) { struct checkpoint_entry *entry; rlist_foreach_entry(entry, &ckpt->entries, link) { entry->iterator->free(entry->iterator); } rlist_create(&ckpt->entries); xdir_destroy(&ckpt->dir); free(ckpt->vclock); } static int checkpoint_add_space(struct space *sp, void *data) { if (space_is_temporary(sp)) return 0; if (!space_is_memtx(sp)) return 0; struct index *pk = space_index(sp, 0); if (!pk) return 0; struct checkpoint *ckpt = (struct checkpoint *)data; struct checkpoint_entry *entry; entry = region_alloc_object(&fiber()->gc, struct checkpoint_entry); if (entry == NULL) { diag_set(OutOfMemory, sizeof(*entry), "region", "struct checkpoint_entry"); return -1; } rlist_add_tail_entry(&ckpt->entries, entry, link); entry->space = sp; entry->iterator = index_create_snapshot_iterator(pk); if (entry->iterator == NULL) return -1; return 0; }; static int checkpoint_f(va_list ap) { struct checkpoint *ckpt = va_arg(ap, struct checkpoint *); if (ckpt->touch) { if (xdir_touch_xlog(&ckpt->dir, ckpt->vclock) == 0) return 0; /* * Failed to touch an existing snapshot, create * a new one. */ ckpt->touch = false; } struct xlog snap; if (xdir_create_xlog(&ckpt->dir, &snap, ckpt->vclock) != 0) return -1; snap.rate_limit = ckpt->snap_io_rate_limit; say_info("saving snapshot `%s'", snap.filename); struct checkpoint_entry *entry; rlist_foreach_entry(entry, &ckpt->entries, link) { uint32_t size; const char *data; struct snapshot_iterator *it = entry->iterator; for (data = it->next(it, &size); data != NULL; data = it->next(it, &size)) { if (checkpoint_write_tuple(&snap, space_id(entry->space), data, size) != 0) { xlog_close(&snap, false); return -1; } } } if (xlog_flush(&snap) < 0) { xlog_close(&snap, false); return -1; } xlog_close(&snap, false); say_info("done"); return 0; } static int memtx_engine_begin_checkpoint(struct engine *engine) { struct memtx_engine *memtx = (struct memtx_engine *)engine; assert(memtx->checkpoint == NULL); memtx->checkpoint = region_alloc_object(&fiber()->gc, struct checkpoint); if (memtx->checkpoint == NULL) { diag_set(OutOfMemory, sizeof(*memtx->checkpoint), "region", "struct checkpoint"); return -1; } if (checkpoint_init(memtx->checkpoint, memtx->snap_dir.dirname, memtx->snap_io_rate_limit) != 0) return -1; if (space_foreach(checkpoint_add_space, memtx->checkpoint) != 0) { checkpoint_destroy(memtx->checkpoint); memtx->checkpoint = NULL; return -1; } /* increment snapshot version; set tuple deletion to delayed mode */ memtx_tuple_begin_snapshot(); return 0; } static int memtx_engine_wait_checkpoint(struct engine *engine, struct vclock *vclock) { struct memtx_engine *memtx = (struct memtx_engine *)engine; assert(memtx->checkpoint != NULL); /* * If a snapshot already exists, do not create a new one. */ struct vclock last; if (xdir_last_vclock(&memtx->snap_dir, &last) >= 0 && vclock_compare(&last, vclock) == 0) { memtx->checkpoint->touch = true; } vclock_copy(memtx->checkpoint->vclock, vclock); if (cord_costart(&memtx->checkpoint->cord, "snapshot", checkpoint_f, memtx->checkpoint)) { return -1; } memtx->checkpoint->waiting_for_snap_thread = true; /* wait for memtx-part snapshot completion */ int result = cord_cojoin(&memtx->checkpoint->cord); if (result != 0) diag_log(); memtx->checkpoint->waiting_for_snap_thread = false; return result; } static void memtx_engine_commit_checkpoint(struct engine *engine, struct vclock *vclock) { (void) vclock; struct memtx_engine *memtx = (struct memtx_engine *)engine; /* beginCheckpoint() must have been done */ assert(memtx->checkpoint != NULL); /* waitCheckpoint() must have been done. */ assert(!memtx->checkpoint->waiting_for_snap_thread); memtx_tuple_end_snapshot(); if (!memtx->checkpoint->touch) { int64_t lsn = vclock_sum(memtx->checkpoint->vclock); struct xdir *dir = &memtx->checkpoint->dir; /* rename snapshot on completion */ char to[PATH_MAX]; snprintf(to, sizeof(to), "%s", xdir_format_filename(dir, lsn, NONE)); char *from = xdir_format_filename(dir, lsn, INPROGRESS); #ifndef NDEBUG struct errinj *delay = errinj(ERRINJ_SNAP_COMMIT_DELAY, ERRINJ_BOOL); if (delay != NULL && delay->bparam) { while (delay->bparam) fiber_sleep(0.001); } #endif int rc = coio_rename(from, to); if (rc != 0) panic("can't rename .snap.inprogress"); } struct vclock last; if (xdir_last_vclock(&memtx->snap_dir, &last) < 0 || vclock_compare(&last, vclock) != 0) { /* Add the new checkpoint to the set. */ xdir_add_vclock(&memtx->snap_dir, memtx->checkpoint->vclock); /* Prevent checkpoint_destroy() from freeing vclock. */ memtx->checkpoint->vclock = NULL; } checkpoint_destroy(memtx->checkpoint); memtx->checkpoint = NULL; } static void memtx_engine_abort_checkpoint(struct engine *engine) { struct memtx_engine *memtx = (struct memtx_engine *)engine; /** * An error in the other engine's first phase. */ if (memtx->checkpoint->waiting_for_snap_thread) { /* wait for memtx-part snapshot completion */ if (cord_cojoin(&memtx->checkpoint->cord) != 0) diag_log(); memtx->checkpoint->waiting_for_snap_thread = false; } memtx_tuple_end_snapshot(); /** Remove garbage .inprogress file. */ char *filename = xdir_format_filename(&memtx->checkpoint->dir, vclock_sum(memtx->checkpoint->vclock), INPROGRESS); (void) coio_unlink(filename); checkpoint_destroy(memtx->checkpoint); memtx->checkpoint = NULL; } static int memtx_engine_collect_garbage(struct engine *engine, int64_t lsn) { struct memtx_engine *memtx = (struct memtx_engine *)engine; /* * We recover the checkpoint list by scanning the snapshot * directory so deletion of an xlog file or a file that * belongs to another engine without the corresponding snap * file would result in a corrupted checkpoint on the list. * That said, we have to abort garbage collection if we * fail to delete a snap file. */ if (xdir_collect_garbage(&memtx->snap_dir, lsn, true) != 0) return -1; return 0; } static int memtx_engine_backup(struct engine *engine, struct vclock *vclock, engine_backup_cb cb, void *cb_arg) { struct memtx_engine *memtx = (struct memtx_engine *)engine; char *filename = xdir_format_filename(&memtx->snap_dir, vclock_sum(vclock), NONE); return cb(filename, cb_arg); } /** Used to pass arguments to memtx_initial_join_f */ struct memtx_join_arg { const char *snap_dirname; int64_t checkpoint_lsn; struct xstream *stream; }; /** * Invoked from a thread to feed snapshot rows. */ static int memtx_initial_join_f(va_list ap) { struct memtx_join_arg *arg = va_arg(ap, struct memtx_join_arg *); const char *snap_dirname = arg->snap_dirname; int64_t checkpoint_lsn = arg->checkpoint_lsn; struct xstream *stream = arg->stream; struct xdir dir; /* * snap_dirname and INSTANCE_UUID don't change after start, * safe to use in another thread. */ xdir_create(&dir, snap_dirname, SNAP, &INSTANCE_UUID); struct xlog_cursor cursor; int rc = xdir_open_cursor(&dir, checkpoint_lsn, &cursor); xdir_destroy(&dir); if (rc < 0) return -1; struct xrow_header row; while ((rc = xlog_cursor_next(&cursor, &row, true)) == 0) { rc = xstream_write(stream, &row); if (rc < 0) break; } xlog_cursor_close(&cursor, false); if (rc < 0) return -1; /** * We should never try to read snapshots with no EOF * marker - such snapshots are very likely corrupted and * should not be trusted. */ /* TODO: replace panic with diag_set() */ if (!xlog_cursor_is_eof(&cursor)) panic("snapshot `%s' has no EOF marker", cursor.name); return 0; } static int memtx_engine_join(struct engine *engine, struct vclock *vclock, struct xstream *stream) { struct memtx_engine *memtx = (struct memtx_engine *)engine; /* * cord_costart() passes only void * pointer as an argument. */ struct memtx_join_arg arg = { /* .snap_dirname = */ memtx->snap_dir.dirname, /* .checkpoint_lsn = */ vclock_sum(vclock), /* .stream = */ stream }; /* Send snapshot using a thread */ struct cord cord; cord_costart(&cord, "initial_join", memtx_initial_join_f, &arg); return cord_cojoin(&cord); } static int small_stats_noop_cb(const struct mempool_stats *stats, void *cb_ctx) { (void)stats; (void)cb_ctx; return 0; } static void memtx_engine_memory_stat(struct engine *engine, struct engine_memory_stat *stat) { (void)engine; struct small_stats data_stats; struct mempool_stats index_stats; mempool_stats(&memtx_index_extent_pool, &index_stats); small_stats(&memtx_alloc, &data_stats, small_stats_noop_cb, NULL); stat->data += data_stats.used; stat->index += index_stats.totals.used; } static int memtx_engine_check_space_def(struct space_def *def) { (void)def; return 0; } static const struct engine_vtab memtx_engine_vtab = { /* .shutdown = */ memtx_engine_shutdown, /* .create_space = */ memtx_engine_create_space, /* .join = */ memtx_engine_join, /* .begin = */ memtx_engine_begin, /* .begin_statement = */ memtx_engine_begin_statement, /* .prepare = */ memtx_engine_prepare, /* .commit = */ memtx_engine_commit, /* .rollback_statement = */ memtx_engine_rollback_statement, /* .rollback = */ memtx_engine_rollback, /* .bootstrap = */ memtx_engine_bootstrap, /* .begin_initial_recovery = */ memtx_engine_begin_initial_recovery, /* .begin_final_recovery = */ memtx_engine_begin_final_recovery, /* .end_recovery = */ memtx_engine_end_recovery, /* .begin_checkpoint = */ memtx_engine_begin_checkpoint, /* .wait_checkpoint = */ memtx_engine_wait_checkpoint, /* .commit_checkpoint = */ memtx_engine_commit_checkpoint, /* .abort_checkpoint = */ memtx_engine_abort_checkpoint, /* .collect_garbage = */ memtx_engine_collect_garbage, /* .backup = */ memtx_engine_backup, /* .memory_stat = */ memtx_engine_memory_stat, /* .check_space_def = */ memtx_engine_check_space_def, }; struct memtx_engine * memtx_engine_new(const char *snap_dirname, bool force_recovery, uint64_t tuple_arena_max_size, uint32_t objsize_min, float alloc_factor) { memtx_tuple_init(tuple_arena_max_size, objsize_min, alloc_factor); struct memtx_engine *memtx = calloc(1, sizeof(*memtx)); if (memtx == NULL) { diag_set(OutOfMemory, sizeof(*memtx), "malloc", "struct memtx_engine"); return NULL; } xdir_create(&memtx->snap_dir, snap_dirname, SNAP, &INSTANCE_UUID); memtx->snap_dir.force_recovery = force_recovery; if (xdir_scan(&memtx->snap_dir) != 0) { xdir_destroy(&memtx->snap_dir); free(memtx); return NULL; } memtx->state = MEMTX_INITIALIZED; memtx->force_recovery = force_recovery; memtx->base.vtab = &memtx_engine_vtab; memtx->base.name = "memtx"; return memtx; } void memtx_engine_set_snap_io_rate_limit(struct memtx_engine *memtx, double limit) { memtx->snap_io_rate_limit = limit * 1024 * 1024; } void memtx_engine_set_max_tuple_size(struct memtx_engine *memtx, size_t max_size) { (void)memtx; memtx_max_tuple_size = max_size; } /** * Initialize arena for indexes. * The arena is used for memtx_index_extent_alloc * and memtx_index_extent_free. * Can be called several times, only first call do the work. */ void memtx_index_arena_init(void) { if (memtx_index_arena_initialized) { /* already done.. */ return; } /* Creating slab cache */ slab_cache_create(&memtx_index_slab_cache, &memtx_arena); /* Creating mempool */ mempool_create(&memtx_index_extent_pool, &memtx_index_slab_cache, MEMTX_EXTENT_SIZE); /* Empty reserved list */ memtx_index_num_reserved_extents = 0; memtx_index_reserved_extents = 0; /* Done */ memtx_index_arena_initialized = true; } /** * Allocate a block of size MEMTX_EXTENT_SIZE for memtx index */ void * memtx_index_extent_alloc(void *ctx) { (void)ctx; if (memtx_index_reserved_extents) { assert(memtx_index_num_reserved_extents > 0); memtx_index_num_reserved_extents--; void *result = memtx_index_reserved_extents; memtx_index_reserved_extents = *(void **) memtx_index_reserved_extents; return result; } ERROR_INJECT(ERRINJ_INDEX_ALLOC, { /* same error as in mempool_alloc */ diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "mempool", "new slab"); return NULL; }); void *ret = mempool_alloc(&memtx_index_extent_pool); if (ret == NULL) diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "mempool", "new slab"); return ret; } /** * Free a block previously allocated by memtx_index_extent_alloc */ void memtx_index_extent_free(void *ctx, void *extent) { (void)ctx; return mempool_free(&memtx_index_extent_pool, extent); } /** * Reserve num extents in pool. * Ensure that next num extent_alloc will succeed w/o an error */ int memtx_index_extent_reserve(int num) { ERROR_INJECT(ERRINJ_INDEX_ALLOC, { /* same error as in mempool_alloc */ diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "mempool", "new slab"); return -1; }); while (memtx_index_num_reserved_extents < num) { void *ext = mempool_alloc(&memtx_index_extent_pool); if (ext == NULL) { diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "mempool", "new slab"); return -1; } *(void **)ext = memtx_index_reserved_extents; memtx_index_reserved_extents = ext; memtx_index_num_reserved_extents++; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/relay.h0000664000000000000000000000510413306565107017060 0ustar rootroot#ifndef TARANTOOL_REPLICATION_RELAY_H_INCLUDED #define TARANTOOL_REPLICATION_RELAY_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct relay; struct replica; struct tt_uuid; struct vclock; /** * Returns relay's vclock * @param relay relay * @returns relay's vclock */ const struct vclock * relay_vclock(const struct relay *relay); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ /** * Send initial JOIN rows to the replica * * @param fd client connection * @param sync sync from incoming JOIN request * @param vclock vclock of the last checkpoint */ void relay_initial_join(int fd, uint64_t sync, struct vclock *vclock); /** * Send final JOIN rows to the replica. * * @param fd client connection * @param sync sync from incoming JOIN request */ void relay_final_join(int fd, uint64_t sync, struct vclock *start_vclock, struct vclock *stop_vclock); /** * Subscribe a replica to updates. * * @return none. */ void relay_subscribe(int fd, uint64_t sync, struct replica *replica, struct vclock *replica_vclock, uint32_t replica_version_id); #endif /* TARANTOOL_REPLICATION_RELAY_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple.h0000664000000000000000000006224613306565107017107 0ustar rootroot#ifndef TARANTOOL_BOX_TUPLE_H_INCLUDED #define TARANTOOL_BOX_TUPLE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include "say.h" #include "diag.h" #include "error.h" #include "tt_uuid.h" /* tuple_field_uuid */ #include "tuple_format.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct slab_arena; struct quota; /** * A format for standalone tuples allocated on runtime arena. * \sa tuple_new(). */ extern struct tuple_format *tuple_format_runtime; /** Initialize tuple library */ int tuple_init(field_name_hash_f hash); /** Cleanup tuple library */ void tuple_free(void); /** * Initialize tuples arena. * @param arena[out] Arena to initialize. * @param quota Arena's quota. * @param arena_max_size Maximal size of @arena. * @param arena_name Name of @arena for logs. */ void tuple_arena_create(struct slab_arena *arena, struct quota *quota, uint64_t arena_max_size, uint32_t slab_size, const char *arena_name); void tuple_arena_destroy(struct slab_arena *arena); /** \cond public */ typedef struct tuple_format box_tuple_format_t; /** * Tuple Format. * * Each Tuple has associated format (class). Default format is used to * create tuples which are not attach to any particular space. */ box_tuple_format_t * box_tuple_format_default(void); /** * Tuple */ typedef struct tuple box_tuple_t; /** * Increase the reference counter of tuple. * * Tuples are reference counted. All functions that return tuples guarantee * that the last returned tuple is refcounted internally until the next * call to API function that yields or returns another tuple. * * You should increase the reference counter before taking tuples for long * processing in your code. Such tuples will not be garbage collected even * if another fiber remove they from space. After processing please * decrement the reference counter using box_tuple_unref(), otherwise the * tuple will leak. * * \param tuple a tuple * \retval -1 on error (check box_error_last()) * \retval 0 on success * \sa box_tuple_unref() */ int box_tuple_ref(box_tuple_t *tuple); /** * Decrease the reference counter of tuple. * * \param tuple a tuple * \sa box_tuple_ref() */ void box_tuple_unref(box_tuple_t *tuple); /** * Return the number of fields in tuple (the size of MsgPack Array). * \param tuple a tuple */ uint32_t box_tuple_field_count(const box_tuple_t *tuple); /** * Return the number of bytes used to store internal tuple data (MsgPack Array). * \param tuple a tuple */ size_t box_tuple_bsize(const box_tuple_t *tuple); /** * Dump raw MsgPack data to the memory byffer \a buf of size \a size. * * Store tuple fields in the memory buffer. * \retval -1 on error. * \retval number of bytes written on success. * Upon successful return, the function returns the number of bytes written. * If buffer size is not enough then the return value is the number of bytes * which would have been written if enough space had been available. */ ssize_t box_tuple_to_buf(const box_tuple_t *tuple, char *buf, size_t size); /** * Return the associated format. * \param tuple tuple * \return tuple_format */ box_tuple_format_t * box_tuple_format(const box_tuple_t *tuple); /** * Return the raw tuple field in MsgPack format. * * The buffer is valid until next call to box_tuple_* functions. * * \param tuple a tuple * \param fieldno zero-based index in MsgPack array. * \retval NULL if i >= box_tuple_field_count(tuple) * \retval msgpack otherwise */ const char * box_tuple_field(const box_tuple_t *tuple, uint32_t fieldno); /** * Tuple iterator */ typedef struct tuple_iterator box_tuple_iterator_t; /** * Allocate and initialize a new tuple iterator. The tuple iterator * allow to iterate over fields at root level of MsgPack array. * * Example: * \code * box_tuple_iterator *it = box_tuple_iterator(tuple); * if (it == NULL) { * // error handling using box_error_last() * } * const char *field; * while (field = box_tuple_next(it)) { * // process raw MsgPack data * } * * // rewind iterator to first position * box_tuple_rewind(it); * assert(box_tuple_position(it) == 0); * * // rewind iterator to first position * field = box_tuple_seek(it, 3); * assert(box_tuple_position(it) == 4); * * box_iterator_free(it); * \endcode * * \post box_tuple_position(it) == 0 */ box_tuple_iterator_t * box_tuple_iterator(box_tuple_t *tuple); /** * Destroy and free tuple iterator */ void box_tuple_iterator_free(box_tuple_iterator_t *it); /** * Return zero-based next position in iterator. * That is, this function return the field id of field that will be * returned by the next call to box_tuple_next(it). Returned value is zero * after initialization or rewind and box_tuple_field_count(tuple) * after the end of iteration. * * \param it tuple iterator * \returns position. */ uint32_t box_tuple_position(box_tuple_iterator_t *it); /** * Rewind iterator to the initial position. * * \param it tuple iterator * \post box_tuple_position(it) == 0 */ void box_tuple_rewind(box_tuple_iterator_t *it); /** * Seek the tuple iterator. * * The returned buffer is valid until next call to box_tuple_* API. * Requested fieldno returned by next call to box_tuple_next(it). * * \param it tuple iterator * \param fieldno - zero-based position in MsgPack array. * \post box_tuple_position(it) == fieldno if returned value is not NULL * \post box_tuple_position(it) == box_tuple_field_count(tuple) if returned * value is NULL. */ const char * box_tuple_seek(box_tuple_iterator_t *it, uint32_t fieldno); /** * Return the next tuple field from tuple iterator. * The returned buffer is valid until next call to box_tuple_* API. * * \param it tuple iterator. * \retval NULL if there are no more fields. * \retval MsgPack otherwise * \pre box_tuple_position(it) is zerod-based id of returned field * \post box_tuple_position(it) == box_tuple_field_count(tuple) if returned * value is NULL. */ const char * box_tuple_next(box_tuple_iterator_t *it); /** * Allocate and initialize a new tuple from a raw MsgPack Array data. * * \param format tuple format. * Use box_tuple_format_default() to create space-independent tuple. * \param data tuple data in MsgPack Array format ([field1, field2, ...]). * \param end the end of \a data * \retval NULL on out of memory * \retval tuple otherwise * \pre data, end is valid MsgPack Array * \sa \code box.tuple.new(data) \endcode */ box_tuple_t * box_tuple_new(box_tuple_format_t *format, const char *data, const char *end); box_tuple_t * box_tuple_update(const box_tuple_t *tuple, const char *expr, const char *expr_end); box_tuple_t * box_tuple_upsert(const box_tuple_t *tuple, const char *expr, const char *expr_end); /** \endcond public */ /** * An atom of Tarantool storage. Represents MsgPack Array. * Tuple has the following structure: * format->tuple_meta_size bsize * +------------------------+-------------+ * tuple_begin, ..., raw = | tuple_meta | MessagePack | * | +------------------------+-------------+ * | ^ * +---------------------------------------------data_offset * * tuple_meta structure: * +----------------------+-----------------------+ * | extra_size | offset N ... offset 1 | * +----------------------+-----------------------+ * @sa tuple_format_new() uint32 ... uint32 * * Each 'off_i' is the offset to the i-th indexed field. */ struct PACKED tuple { /** reference counter */ uint16_t refs; /** format identifier */ uint16_t format_id; /** * Length of the MessagePack data in raw part of the * tuple. */ uint32_t bsize; /** * Offset to the MessagePack from the begin of the tuple. */ uint16_t data_offset; /** * Engine specific fields and offsets array concatenated * with MessagePack fields array. * char raw[0]; */ }; /** Size of the tuple including size of struct tuple. */ static inline size_t tuple_size(const struct tuple *tuple) { /* data_offset includes sizeof(struct tuple). */ return tuple->data_offset + tuple->bsize; } /** * Get pointer to MessagePack data of the tuple. * @param tuple tuple. * @return MessagePack array. */ static inline const char * tuple_data(const struct tuple *tuple) { return (const char *) tuple + tuple->data_offset; } /** * Wrapper around tuple_data() which returns NULL if @tuple == NULL. */ static inline const char * tuple_data_or_null(const struct tuple *tuple) { return tuple != NULL ? tuple_data(tuple) : NULL; } /** * Get pointer to MessagePack data of the tuple. * @param tuple tuple. * @param[out] size Size in bytes of the MessagePack array. * @return MessagePack array. */ static inline const char * tuple_data_range(const struct tuple *tuple, uint32_t *p_size) { *p_size = tuple->bsize; return (const char *) tuple + tuple->data_offset; } /** * Format a tuple into string. * Example: [1, 2, "string"] * @param buf buffer to format tuple to * @param size buffer size. This function writes at most @a size bytes * (including the terminating null byte ('\0')) to @a buffer * @param tuple tuple to format * @retval the number of characters printed, excluding the null byte used * to end output to string. If the output was truncated due to this limit, * then the return value is the number of characters (excluding the * terminating null byte) which would have been written to the final string * if enough space had been available. * @see snprintf * @see mp_snprint */ int tuple_snprint(char *buf, int size, const struct tuple *tuple); /** * Format a tuple into string using a static buffer. * Useful for debugger. Example: [1, 2, "string"] * @param tuple to format * @return formatted null-terminated string */ const char * tuple_str(const struct tuple *tuple); /** * Get the format of the tuple. * @param tuple Tuple. * @retval Tuple format instance. */ static inline struct tuple_format * tuple_format(const struct tuple *tuple) { struct tuple_format *format = tuple_format_by_id(tuple->format_id); assert(tuple_format_id(format) == tuple->format_id); return format; } /** * Return extra data saved in tuple metadata. * @param tuple tuple * @return a pointer to extra data saved in tuple metadata. */ static inline const char * tuple_extra(const struct tuple *tuple) { struct tuple_format *format = tuple_format(tuple); return tuple_data(tuple) - tuple_format_meta_size(format); } /** * Instantiate a new engine-independent tuple from raw MsgPack Array data * using runtime arena. Use this function to create a standalone tuple * from Lua or C procedures. * * \param format tuple format. * \param data tuple data in MsgPack Array format ([field1, field2, ...]). * \param end the end of \a data * \retval tuple on success * \retval NULL on out of memory * \sa \code box.tuple.new(data) \endcode */ struct tuple * tuple_new(struct tuple_format *format, const char *data, const char *end); /** * Free the tuple of any engine. * @pre tuple->refs == 0 */ static inline void tuple_delete(struct tuple *tuple) { say_debug("%s(%p)", __func__, tuple); assert(tuple->refs == 0); struct tuple_format *format = tuple_format(tuple); format->vtab.destroy(format, tuple); } /** * Check tuple data correspondence to space format. * Actually checks everything that checks tuple_init_field_map. * @param format Format to which the tuple must match. * @param tuple MessagePack array. * * @retval 0 The tuple is valid. * @retval -1 The tuple is invalid. */ int tuple_validate_raw(struct tuple_format *format, const char *data); /** * Check tuple data correspondence to the space format. * @param format Format to which the tuple must match. * @param tuple Tuple to validate. * * @retval 0 The tuple is valid. * @retval -1 The tuple is invalid. */ static inline int tuple_validate(struct tuple_format *format, struct tuple *tuple) { return tuple_validate_raw(format, tuple_data(tuple)); } /* * Return a field map for the tuple. * @param tuple tuple * @returns a field map for the tuple. * @sa tuple_init_field_map() */ static inline const uint32_t * tuple_field_map(const struct tuple *tuple) { return (const uint32_t *) ((const char *) tuple + tuple->data_offset); } /** * @brief Return the number of fields in tuple * @param tuple * @return the number of fields in tuple */ static inline uint32_t tuple_field_count(const struct tuple *tuple) { const char *data = tuple_data(tuple); return mp_decode_array(&data); } /** * Get a field at the specific index in this tuple. * @param tuple tuple * @param fieldno the index of field to return * @param len pointer where the len of the field will be stored * @retval pointer to MessagePack data * @retval NULL when fieldno is out of range */ static inline const char * tuple_field(const struct tuple *tuple, uint32_t fieldno) { return tuple_field_raw(tuple_format(tuple), tuple_data(tuple), tuple_field_map(tuple), fieldno); } /** * Get tuple field by its name. * @param tuple Tuple to get field from. * @param name Field name. * @param name_len Length of @a name. * @param name_hash Hash of @a name. * * @retval not NULL MessagePack field. * @retval NULL No field with @a name. */ static inline const char * tuple_field_by_name(const struct tuple *tuple, const char *name, uint32_t name_len, uint32_t name_hash) { return tuple_field_raw_by_name(tuple_format(tuple), tuple_data(tuple), tuple_field_map(tuple), name, name_len, name_hash); } /** * @brief Tuple Interator */ struct tuple_iterator { /** @cond false **/ /* State */ struct tuple *tuple; /** Always points to the beginning of the next field. */ const char *pos; /** End of the tuple. */ const char *end; /** @endcond **/ /** field no of the next field. */ int fieldno; }; /** * @brief Initialize an iterator over tuple fields * * A workflow example: * @code * struct tuple_iterator it; * tuple_rewind(&it, tuple); * const char *field; * uint32_t len; * while ((field = tuple_next(&it, &len))) * lua_pushlstring(L, field, len); * * @endcode * * @param[out] it tuple iterator * @param[in] tuple tuple */ static inline void tuple_rewind(struct tuple_iterator *it, struct tuple *tuple) { it->tuple = tuple; uint32_t bsize; const char *data = tuple_data_range(tuple, &bsize); it->pos = data; (void) mp_decode_array(&it->pos); /* Skip array header */ it->fieldno = 0; it->end = data + bsize; } /** * @brief Position the iterator at a given field no. * * @retval field if the iterator has the requested field * @retval NULL otherwise (iteration is out of range) */ const char * tuple_seek(struct tuple_iterator *it, uint32_t fieldno); /** * @brief Iterate to the next field * @param it tuple iterator * @return next field or NULL if the iteration is out of range */ const char * tuple_next(struct tuple_iterator *it); /** * Assert that buffer is valid MessagePack array * @param tuple buffer * @param the end of the buffer */ static inline void mp_tuple_assert(const char *tuple, const char *tuple_end) { assert(mp_typeof(*tuple) == MP_ARRAY); #ifndef NDEBUG mp_next(&tuple); #endif assert(tuple == tuple_end); (void) tuple; (void) tuple_end; } static inline const char * tuple_field_with_type(const struct tuple *tuple, uint32_t fieldno, enum mp_type type) { const char *field = tuple_field(tuple, fieldno); if (field == NULL) { diag_set(ClientError, ER_NO_SUCH_FIELD, fieldno + TUPLE_INDEX_BASE); return NULL; } if (mp_typeof(*field) != type) { diag_set(ClientError, ER_FIELD_TYPE, fieldno + TUPLE_INDEX_BASE, mp_type_strs[type]); return NULL; } return field; } /** * A convenience shortcut for data dictionary - get a tuple field * as bool. */ static inline int tuple_field_bool(const struct tuple *tuple, uint32_t fieldno, bool *out) { const char *field = tuple_field_with_type(tuple, fieldno, MP_BOOL); if (field == NULL) return -1; *out = mp_decode_bool(&field); return 0; } /** * A convenience shortcut for data dictionary - get a tuple field * as int64_t. */ static inline int tuple_field_i64(const struct tuple *tuple, uint32_t fieldno, int64_t *out) { const char *field = tuple_field(tuple, fieldno); if (field == NULL) { diag_set(ClientError, ER_NO_SUCH_FIELD, fieldno); return -1; } uint64_t val; switch (mp_typeof(*field)) { case MP_INT: *out = mp_decode_int(&field); break; case MP_UINT: val = mp_decode_uint(&field); if (val <= INT64_MAX) { *out = val; break; } FALLTHROUGH; default: diag_set(ClientError, ER_FIELD_TYPE, fieldno + TUPLE_INDEX_BASE, field_type_strs[FIELD_TYPE_INTEGER]); return -1; } return 0; } /** * A convenience shortcut for data dictionary - get a tuple field * as uint64_t. */ static inline int tuple_field_u64(const struct tuple *tuple, uint32_t fieldno, uint64_t *out) { const char *field = tuple_field_with_type(tuple, fieldno, MP_UINT); if (field == NULL) return -1; *out = mp_decode_uint(&field); return 0; } /** * A convenience shortcut for data dictionary - get a tuple field * as uint32_t. */ static inline int tuple_field_u32(const struct tuple *tuple, uint32_t fieldno, uint32_t *out) { const char *field = tuple_field_with_type(tuple, fieldno, MP_UINT); if (field == NULL) return -1; *out = mp_decode_uint(&field); if (*out > UINT32_MAX) { diag_set(ClientError, ER_FIELD_TYPE, fieldno + TUPLE_INDEX_BASE, field_type_strs[FIELD_TYPE_UNSIGNED]); return -1; } return 0; } /** * A convenience shortcut for data dictionary - get a tuple field * as a string. */ static inline const char * tuple_field_str(const struct tuple *tuple, uint32_t fieldno, uint32_t *len) { const char *field = tuple_field_with_type(tuple, fieldno, MP_STR); if (field == NULL) return NULL; return mp_decode_str(&field, len); } /** * A convenience shortcut for data dictionary - get a tuple field * as a NUL-terminated string - returns a string of up to 256 bytes. */ static inline const char * tuple_field_cstr(const struct tuple *tuple, uint32_t fieldno) { uint32_t len; const char *str = tuple_field_str(tuple, fieldno, &len); if (str == NULL) return NULL; return tt_cstr(str, len); } /** * Parse a tuple field which is expected to contain a string * representation of UUID, and return a 16-byte representation. */ static inline int tuple_field_uuid(const struct tuple *tuple, int fieldno, struct tt_uuid *out) { const char *value = tuple_field_cstr(tuple, fieldno); if (tt_uuid_from_string(value, out) != 0) { diag_set(ClientError, ER_INVALID_UUID, value); return -1; } return 0; } enum { TUPLE_REF_MAX = UINT16_MAX }; /** * Increment tuple reference counter. * @param tuple Tuple to reference. * @retval 0 Success. * @retval -1 Too many refs error. */ static inline int tuple_ref(struct tuple *tuple) { if (tuple->refs + 1 > TUPLE_REF_MAX) { diag_set(ClientError, ER_TUPLE_REF_OVERFLOW); return -1; } tuple->refs++; return 0; } /** * Decrement tuple reference counter. If it has reached zero, free the tuple. * * @pre tuple->refs + count >= 0 */ static inline void tuple_unref(struct tuple *tuple) { assert(tuple->refs - 1 >= 0); tuple->refs--; if (tuple->refs == 0) tuple_delete(tuple); } extern struct tuple *box_tuple_last; /** * Convert internal `struct tuple` to public `box_tuple_t`. * \retval tuple on success * \retval NULL on error, check diag * \post \a tuple ref counted until the next call. * \post tuple_ref() doesn't fail at least once * \sa tuple_ref */ static inline box_tuple_t * tuple_bless(struct tuple *tuple) { assert(tuple != NULL); /* Ensure tuple can be referenced at least once after return */ if (tuple->refs + 2 > TUPLE_REF_MAX) { diag_set(ClientError, ER_TUPLE_REF_OVERFLOW); return NULL; } tuple->refs++; /* Remove previous tuple */ if (likely(box_tuple_last != NULL)) tuple_unref(box_tuple_last); /* do not throw */ /* Remember current tuple */ box_tuple_last = tuple; return tuple; } /** * \copydoc box_tuple_to_buf() */ ssize_t tuple_to_buf(const struct tuple *tuple, char *buf, size_t size); #if defined(__cplusplus) } /* extern "C" */ #include "tuple_update.h" #include "errinj.h" /** * \copydoc tuple_ref() * \throws if overflow detected. */ static inline void tuple_ref_xc(struct tuple *tuple) { if (tuple_ref(tuple)) diag_raise(); } /** * \copydoc tuple_bless * \throw ER_TUPLE_REF_OVERFLOW */ static inline box_tuple_t * tuple_bless_xc(struct tuple *tuple) { box_tuple_t *blessed = tuple_bless(tuple); if (blessed == NULL) diag_raise(); return blessed; } /** Make tuple references exception-friendly in absence of @finally. */ struct TupleRefNil { struct tuple *tuple; TupleRefNil (struct tuple *arg) :tuple(arg) { if (tuple) tuple_ref_xc(tuple); } ~TupleRefNil() { if (tuple) tuple_unref(tuple); } TupleRefNil(const TupleRefNil&) = delete; void operator=(const TupleRefNil&) = delete; }; /* @copydoc tuple_field_with_type() */ static inline const char * tuple_field_with_type_xc(const struct tuple *tuple, uint32_t fieldno, enum mp_type type) { const char *out = tuple_field_with_type(tuple, fieldno, type); if (out == NULL) diag_raise(); return out; } /* @copydoc tuple_field_bool() */ static inline bool tuple_field_bool_xc(const struct tuple *tuple, uint32_t fieldno) { bool out; if (tuple_field_bool(tuple, fieldno, &out) != 0) diag_raise(); return out; } /* @copydoc tuple_field_i64() */ static inline int64_t tuple_field_i64_xc(const struct tuple *tuple, uint32_t fieldno) { int64_t out; if (tuple_field_i64(tuple, fieldno, &out) != 0) diag_raise(); return out; } /* @copydoc tuple_field_u64() */ static inline uint64_t tuple_field_u64_xc(const struct tuple *tuple, uint32_t fieldno) { uint64_t out; if (tuple_field_u64(tuple, fieldno, &out) != 0) diag_raise(); return out; } /* @copydoc tuple_field_u32() */ static inline uint32_t tuple_field_u32_xc(const struct tuple *tuple, uint32_t fieldno) { uint32_t out; if (tuple_field_u32(tuple, fieldno, &out) != 0) diag_raise(); return out; } /** @copydoc tuple_field_str() */ static inline const char * tuple_field_str_xc(const struct tuple *tuple, uint32_t fieldno, uint32_t *len) { const char *ret = tuple_field_str(tuple, fieldno, len); if (ret == NULL) diag_raise(); return ret; } /** @copydoc tuple_field_cstr() */ static inline const char * tuple_field_cstr_xc(const struct tuple *tuple, uint32_t fieldno) { const char *out = tuple_field_cstr(tuple, fieldno); if (out == NULL) diag_raise(); return out; } /** @copydoc tuple_field_uuid() */ static inline void tuple_field_uuid_xc(const struct tuple *tuple, int fieldno, struct tt_uuid *out) { if (tuple_field_uuid(tuple, fieldno, out) != 0) diag_raise(); } /** Return a tuple field and check its type. */ static inline const char * tuple_next_xc(struct tuple_iterator *it, enum mp_type type) { uint32_t fieldno = it->fieldno; const char *field = tuple_next(it); if (field == NULL) tnt_raise(ClientError, ER_NO_SUCH_FIELD, it->fieldno); if (mp_typeof(*field) != MP_UINT) { tnt_raise(ClientError, ER_FIELD_TYPE, fieldno + TUPLE_INDEX_BASE, mp_type_strs[type]); } return field; } /** * A convenience shortcut for the data dictionary - get next field * from iterator as uint32_t or raise an error if there is * no next field. */ static inline uint32_t tuple_next_u32_xc(struct tuple_iterator *it) { uint32_t fieldno = it->fieldno; const char *field = tuple_next_xc(it, MP_UINT); uint32_t val = mp_decode_uint(&field); if (val > UINT32_MAX) { tnt_raise(ClientError, ER_FIELD_TYPE, fieldno + TUPLE_INDEX_BASE, field_type_strs[FIELD_TYPE_UNSIGNED]); } return val; } /** * A convenience shortcut for the data dictionary - get next field * from iterator as a C string or raise an error if there is no * next field. */ static inline const char * tuple_next_cstr_xc(struct tuple_iterator *it) { const char *field = tuple_next_xc(it, MP_STR); uint32_t len = 0; const char *str = mp_decode_str(&field, &len); return tt_cstr(str, len); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_TUPLE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_stmt.h0000664000000000000000000005027713306565107017464 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_STMT_H #define INCLUDES_TARANTOOL_BOX_VY_STMT_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include "tuple.h" #include "iproto_constants.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct xrow_header; struct region; struct tuple_format; struct iovec; #define MAX_LSN (INT64_MAX / 2) enum { VY_UPSERT_THRESHOLD = 128, VY_UPSERT_INF, }; static_assert(VY_UPSERT_THRESHOLD <= UINT8_MAX, "n_upserts max value"); static_assert(VY_UPSERT_INF == VY_UPSERT_THRESHOLD + 1, "inf must be threshold + 1"); /** Vinyl statement vtable. */ extern struct tuple_format_vtab vy_tuple_format_vtab; /** * Max tuple size * @see box.cfg.vinyl_max_tuple_size */ extern size_t vy_max_tuple_size; /** * There are two groups of statements: * * - SELECT is "key" statement. * - DELETE, UPSERT and REPLACE are "tuple" statements. * * REPLACE/UPSERT/DELETE statements structure: * data_offset * ^ * +----------------------------------+ * | 4 bytes 4 bytes MessagePack data. * | +------+----+------+---------------------------+- - - - - - . *tuple, ..., raw: | offN | .. | off1 | header ..|key1|..|keyN|.. | operations | * +--+---+----+--+---+---------------------------+- - - - - - . * | ... | ^ ^ * | +-----------------+ | * +--------------------------------------+ * Offsets are stored only for indexed fields, though MessagePack'ed tuple data * can contain also not indexed fields. For example, if fields 3 and 5 are * indexed then before MessagePack data are stored offsets only for field 3 and * field 5. * * SELECT statements structure. * +--------------+-----------------+ * | array header | part1 ... partN | - MessagePack data * +--------------+-----------------+ * * Field 'operations' is used for storing operations of UPSERT statement. */ struct vy_stmt { struct tuple base; int64_t lsn; uint8_t type; /* IPROTO_SELECT/REPLACE/UPSERT/DELETE */ /** * Number of UPSERT statements for the same key preceding * this statement. Used to trigger upsert squashing in the * background (see vy_range_set_upsert()). This member is * stored only for UPSERT statements in the extra memory * space before offsets table. * * uint8_t n_upserts; * * Offsets array concatenated with MessagePack fields * array. * char raw[0]; */ }; /** Get LSN of the vinyl statement. */ static inline int64_t vy_stmt_lsn(const struct tuple *stmt) { return ((const struct vy_stmt *) stmt)->lsn; } /** Set LSN of the vinyl statement. */ static inline void vy_stmt_set_lsn(struct tuple *stmt, int64_t lsn) { ((struct vy_stmt *) stmt)->lsn = lsn; } /** Get type of the vinyl statement. */ static inline enum iproto_type vy_stmt_type(const struct tuple *stmt) { return (enum iproto_type)((const struct vy_stmt *) stmt)->type; } /** Set type of the vinyl statement. */ static inline void vy_stmt_set_type(struct tuple *stmt, enum iproto_type type) { ((struct vy_stmt *) stmt)->type = type; } /** Get upserts count of the vinyl statement. */ static inline uint8_t vy_stmt_n_upserts(const struct tuple *stmt) { assert(tuple_format(stmt)->extra_size == sizeof(uint8_t)); return *((const uint8_t *) tuple_extra(stmt)); } /** Set upserts count of the vinyl statement. */ static inline void vy_stmt_set_n_upserts(struct tuple *stmt, uint8_t n) { struct tuple_format *format = tuple_format(stmt); assert(format->extra_size == sizeof(uint8_t)); char *extra = (char *) stmt + stmt->data_offset - tuple_format_meta_size(format); *((uint8_t *) extra) = n; } /** Get the column mask of the specified tuple. */ static inline uint64_t vy_stmt_column_mask(const struct tuple *tuple) { enum iproto_type type = vy_stmt_type(tuple); assert(type == IPROTO_INSERT || type == IPROTO_REPLACE || type == IPROTO_DELETE); (void) type; if (tuple_format(tuple)->extra_size == sizeof(uint64_t)) { /* Tuple has column mask */ const char *extra = tuple_extra(tuple); return load_u64(extra); } return UINT64_MAX; /* return default value */ } /** * Set the column mask in the tuple. * @param tuple Tuple to set column mask. * @param column_mask Bitmask of the updated columns. */ static inline void vy_stmt_set_column_mask(struct tuple *tuple, uint64_t column_mask) { enum iproto_type type = vy_stmt_type(tuple); assert(type == IPROTO_INSERT || type == IPROTO_REPLACE || type == IPROTO_DELETE); assert(tuple_format(tuple)->extra_size == sizeof(uint64_t)); (void) type; char *extra = (char *) tuple_extra(tuple); store_u64(extra, column_mask); } /** * Free the tuple of a vinyl space. * @pre tuple->refs == 0 */ void vy_tuple_delete(struct tuple_format *format, struct tuple *tuple); /** * Duplicate the statememnt. * * @param stmt statement * @return new statement of the same type with the same data. */ struct tuple * vy_stmt_dup(const struct tuple *stmt, struct tuple_format *format); struct lsregion; /** * Duplicate the statement, using the lsregion as allocator. * @param stmt Statement to duplicate. * @param lsregion Allocator. * @param alloc_id Allocation identifier for the lsregion. * * @retval not NULL The new statement with the same data. * @retval NULL Memory error. */ struct tuple * vy_stmt_dup_lsregion(const struct tuple *stmt, struct lsregion *lsregion, int64_t alloc_id); /** * Return true if @a stmt can be referenced. Now to be not refable * it must be allocated on lsregion. * @param stmt a statement * @retval true if @a stmt was allocated on lsregion * @retval false otherwise */ static inline bool vy_stmt_is_refable(const struct tuple *stmt) { return stmt->refs > 0; } /** * Ref tuple, if it exists (!= NULL) and can be referenced. * @sa vy_stmt_is_refable. * * @param tuple Tuple to ref or NULL. */ static inline void vy_stmt_ref_if_possible(struct tuple *stmt) { if (vy_stmt_is_refable(stmt)) tuple_ref(stmt); } /** * Unref tuple, if it exists (!= NULL) and can be unreferenced. * @sa vy_stmt_is_refable. * * @param tuple Tuple to unref or NULL. */ static inline void vy_stmt_unref_if_possible(struct tuple *stmt) { if (vy_stmt_is_refable(stmt)) tuple_unref(stmt); } /** * Specialized comparators are faster than general-purpose comparators. * For example, vy_stmt_compare - slowest comparator because it in worst case * checks all combinations of key and tuple types, but * vy_key_compare - fastest comparator, because it shouldn't check statement * types. */ /** * Compare SELECT/DELETE statements using the key definition * @param a left operand (SELECT/DELETE) * @param b right operand (SELECT/DELETE) * @param cmp_def key definition, with primary parts * * @retval 0 if a == b * @retval > 0 if a > b * @retval < 0 if a < b * * @sa key_compare() */ static inline int vy_key_compare(const struct tuple *a, const struct tuple *b, const struct key_def *cmp_def) { assert(vy_stmt_type(a) == IPROTO_SELECT); assert(vy_stmt_type(b) == IPROTO_SELECT); return key_compare(tuple_data(a), tuple_data(b), cmp_def); } /** * Compare REPLACE/UPSERTS statements. * @param a left operand (REPLACE/UPSERT) * @param b right operand (REPLACE/UPSERT) * @param cmp_def key definition with primary parts * * @retval 0 if a == b * @retval > 0 if a > b * @retval < 0 if a < b * * @sa tuple_compare() */ static inline int vy_tuple_compare(const struct tuple *a, const struct tuple *b, const struct key_def *cmp_def) { enum iproto_type type; type = vy_stmt_type(a); assert(type == IPROTO_INSERT || type == IPROTO_REPLACE || type == IPROTO_UPSERT || type == IPROTO_DELETE); type = vy_stmt_type(b); assert(type == IPROTO_INSERT || type == IPROTO_REPLACE || type == IPROTO_UPSERT || type == IPROTO_DELETE); (void)type; return tuple_compare(a, b, cmp_def); } /** * Compare REPLACE/UPSERT with SELECT/DELETE using the key * definition * @param tuple Left operand (REPLACE/UPSERT) * @param key MessagePack array of key fields, right operand. * * @retval > 0 tuple > key. * @retval == 0 tuple == key in all fields * @retval == 0 tuple is prefix of key * @retval == 0 key is a prefix of tuple * @retval < 0 tuple < key. * * @sa tuple_compare_with_key() */ static inline int vy_tuple_compare_with_raw_key(const struct tuple *tuple, const char *key, const struct key_def *key_def) { uint32_t part_count = mp_decode_array(&key); return tuple_compare_with_key(tuple, key, part_count, key_def); } /** @sa vy_tuple_compare_with_raw_key(). */ static inline int vy_tuple_compare_with_key(const struct tuple *tuple, const struct tuple *key, const struct key_def *key_def) { const char *key_mp = tuple_data(key); uint32_t part_count = mp_decode_array(&key_mp); return tuple_compare_with_key(tuple, key_mp, part_count, key_def); } /** @sa tuple_compare. */ static inline int vy_stmt_compare(const struct tuple *a, const struct tuple *b, const struct key_def *key_def) { bool a_is_tuple = vy_stmt_type(a) != IPROTO_SELECT; bool b_is_tuple = vy_stmt_type(b) != IPROTO_SELECT; if (a_is_tuple && b_is_tuple) { return vy_tuple_compare(a, b, key_def); } else if (a_is_tuple && !b_is_tuple) { return vy_tuple_compare_with_key(a, b, key_def); } else if (!a_is_tuple && b_is_tuple) { return -vy_tuple_compare_with_key(b, a, key_def); } else { assert(!a_is_tuple && !b_is_tuple); return vy_key_compare(a, b, key_def); } } /** @sa tuple_compare_with_raw_key. */ static inline int vy_stmt_compare_with_raw_key(const struct tuple *stmt, const char *key, const struct key_def *key_def) { if (vy_stmt_type(stmt) != IPROTO_SELECT) return vy_tuple_compare_with_raw_key(stmt, key, key_def); return key_compare(tuple_data(stmt), key, key_def); } /** @sa tuple_compare_with_key. */ static inline int vy_stmt_compare_with_key(const struct tuple *stmt, const struct tuple *key, const struct key_def *key_def) { assert(vy_stmt_type(key) == IPROTO_SELECT); return vy_stmt_compare_with_raw_key(stmt, tuple_data(key), key_def); } /** * Create the SELECT statement from raw MessagePack data. * @param format Format of an index. * @param key MessagePack data that contain an array of * fields WITHOUT the array header. * @param part_count Count of the key fields that will be saved as * result. * * @retval NULL Memory allocation error. * @retval not NULL Success. */ struct tuple * vy_stmt_new_select(struct tuple_format *format, const char *key, uint32_t part_count); /** * Copy the key in a new memory area. * @retval not NULL Success. * @retval NULL Memory error. */ char * vy_key_dup(const char *key); /** * Create a new surrogate DELETE from @a key using format. * * Example: * key: {a3, a5} * key_def: { 3, 5 } * result: {nil, nil, a3, nil, a5} * * @param key MessagePack array with key fields. * @param cmp_def Key definition of the result statement (incudes * primary key parts). * @param format Target tuple format. * * @retval not NULL Success. * @retval NULL Memory or format error. */ struct tuple * vy_stmt_new_surrogate_delete_from_key(const char *key, const struct key_def *cmp_def, struct tuple_format *format); /** * Create a new surrogate DELETE from @a tuple using @a format. * A surrogate tuple has format->field_count fields from the source * with all unindexed fields replaced with MessagePack NIL. * * Example: * original: {a1, a2, a3, a4, a5} * index key_def: {2, 4} * result: {null, a2, null, a4, null} * * @param format Target tuple format. * @param src Source tuple from the primary index. * * @retval not NULL Success. * @retval NULL Memory or fields format error. */ struct tuple * vy_stmt_new_surrogate_delete(struct tuple_format *format, const struct tuple *tuple); /** * Create the REPLACE statement from raw MessagePack data. * @param format Format of a tuple for offsets generating. * @param tuple_begin MessagePack data that contain an array of fields WITH the * array header. * @param tuple_end End of the array that begins from @param tuple_begin. * * @retval NULL Memory allocation error. * @retval not NULL Success. */ struct tuple * vy_stmt_new_replace(struct tuple_format *format, const char *tuple, const char *tuple_end); /** * Create the INSERT statement from raw MessagePack data. * @param format Format of a tuple for offsets generating. * @param tuple_begin MessagePack data that contain an array of fields WITH the * array header. * @param tuple_end End of the array that begins from @param tuple_begin. * * @retval NULL Memory allocation error. * @retval not NULL Success. */ struct tuple * vy_stmt_new_insert(struct tuple_format *format, const char *tuple_begin, const char *tuple_end); /** * Create the UPSERT statement from raw MessagePack data. * @param tuple_begin MessagePack data that contain an array of fields WITH the * array header. * @param tuple_end End of the array that begins from @param tuple_begin. * @param format Format of a tuple for offsets generating. * @param part_count Part count from key definition. * @param operations Vector of update operations. * @param ops_cnt Length of the update operations vector. * * @retval NULL Memory allocation error. * @retval not NULL Success. */ struct tuple * vy_stmt_new_upsert(struct tuple_format *format, const char *tuple_begin, const char *tuple_end, struct iovec *operations, uint32_t ops_cnt); /** * Create REPLACE statement from UPSERT statement. * * @param replace_format Format for new REPLACE statement. * @param upsert Upsert statement. * @retval not NULL Success. * @retval NULL Memory error. */ struct tuple * vy_stmt_replace_from_upsert(struct tuple_format *replace_format, const struct tuple *upsert); /** * Extract MessagePack data from the REPLACE/UPSERT statement. * @param stmt An UPSERT or REPLACE statement. * @param[out] p_size Size of the MessagePack array in bytes. * * @return MessagePack array of tuple fields. */ static inline const char * vy_upsert_data_range(const struct tuple *tuple, uint32_t *p_size) { assert(vy_stmt_type(tuple) == IPROTO_UPSERT); /* UPSERT must have the n_upserts field. */ assert(tuple_format(tuple)->extra_size == sizeof(uint8_t)); const char *mp = tuple_data(tuple); assert(mp_typeof(*mp) == MP_ARRAY); const char *mp_end = mp; mp_next(&mp_end); assert(mp < mp_end); *p_size = mp_end - mp; return mp; } /** * Extract the operations array from the UPSERT statement. * @param stmt An UPSERT statement. * @param mp_size Out parameter for size of the returned array. * * @retval Pointer on MessagePack array of update operations. */ static inline const char * vy_stmt_upsert_ops(const struct tuple *tuple, uint32_t *mp_size) { assert(vy_stmt_type(tuple) == IPROTO_UPSERT); const char *mp = tuple_data(tuple); mp_next(&mp); *mp_size = tuple_data(tuple) + tuple->bsize - mp; return mp; } /** * Create the SELECT statement from MessagePack array. * @param format Format of an index. * @param key MessagePack array of key fields. * * @retval not NULL Success. * @retval NULL Memory error. */ static inline struct tuple * vy_key_from_msgpack(struct tuple_format *format, const char *key) { uint32_t part_count; /* * The statement already is a key, so simply copy it in * the new struct vy_stmt as SELECT. */ part_count = mp_decode_array(&key); return vy_stmt_new_select(format, key, part_count); } /** * Extract the key from a tuple by the given key definition * and store the result in a SELECT statement allocated with * malloc(). */ struct tuple * vy_stmt_extract_key(const struct tuple *stmt, const struct key_def *key_def, struct tuple_format *format); /** * Extract the key from msgpack by the given key definition * and store the result in a SELECT statement allocated with * malloc(). */ struct tuple * vy_stmt_extract_key_raw(const char *data, const char *data_end, const struct key_def *key_def, struct tuple_format *format); /** * Encode vy_stmt for a primary key as xrow_header * * @param value statement to encode * @param key_def key definition * @param space_id is written to the request header unless it is 0. * Pass 0 to save some space in xrow. * @param xrow[out] xrow to fill * * @retval 0 if OK * @retval -1 if error */ int vy_stmt_encode_primary(const struct tuple *value, const struct key_def *key_def, uint32_t space_id, struct xrow_header *xrow); /** * Encode vy_stmt for a secondary key as xrow_header * * @param value statement to encode * @param key_def key definition * @param xrow[out] xrow to fill * * @retval 0 if OK * @retval -1 if error */ int vy_stmt_encode_secondary(const struct tuple *value, const struct key_def *cmp_def, struct xrow_header *xrow); /** * Reconstruct vinyl tuple info and data from xrow * * @retval stmt on success * @retval NULL on error */ struct tuple * vy_stmt_decode(struct xrow_header *xrow, const struct key_def *key_def, struct tuple_format *format, struct tuple_format *upsert_format, bool is_primary); /** * Format a statement into string. * Example: REPLACE([1, 2, "string"], lsn=48) */ int vy_stmt_snprint(char *buf, int size, const struct tuple *stmt); /* * Format a statement into string using a static buffer. * Useful for gdb and say_debug(). * \sa vy_stmt_snprint() */ const char * vy_stmt_str(const struct tuple *stmt); /** * Create a tuple format with column mask of an update operation. * @sa vy_index.column_mask, vy_can_skip_update(). * @param mem_format A base tuple format. * * @retval not NULL Success. * @retval NULL Memory or format register error. */ struct tuple_format * vy_tuple_format_new_with_colmask(struct tuple_format *mem_format); /** * Create a tuple format for UPSERT tuples. UPSERTs has an additional * extra byte before an offsets table, that stores the count * of squashed upserts @sa vy_squash. * @param mem_format A base tuple format. * * @retval not NULL Success. * @retval NULL Memory or format register error. */ struct tuple_format * vy_tuple_format_new_upsert(struct tuple_format *mem_format); /** * Check if a key of @a tuple contains NULL. * @param tuple Tuple to check. * @param def Key def to check by. * @retval Does the key contain NULL or not? */ static inline bool vy_tuple_key_contains_null(const struct tuple *tuple, const struct key_def *def) { for (uint32_t i = 0; i < def->part_count; ++i) { const char *field = tuple_field(tuple, def->parts[i].fieldno); if (field == NULL || mp_typeof(*field) == MP_NIL) return true; } return false; } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_STMT_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/checkpoint.cc0000664000000000000000000000456413306565107020242 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "checkpoint.h" #include #include #include "engine.h" #include "memtx_engine.h" int64_t checkpoint_last(struct vclock *vclock) { struct memtx_engine *memtx; memtx = (struct memtx_engine *)engine_by_name("memtx"); assert(memtx != NULL); return xdir_last_vclock(&memtx->snap_dir, vclock); } const struct vclock * checkpoint_iterator_next(struct checkpoint_iterator *it) { struct memtx_engine *memtx; memtx = (struct memtx_engine *)engine_by_name("memtx"); assert(memtx != NULL); it->curr = it->curr == NULL ? vclockset_first(&memtx->snap_dir.index) : vclockset_next(&memtx->snap_dir.index, (struct vclock *)it->curr); return it->curr; } const struct vclock * checkpoint_iterator_prev(struct checkpoint_iterator *it) { struct memtx_engine *memtx; memtx = (struct memtx_engine *)engine_by_name("memtx"); assert(memtx != NULL); it->curr = it->curr == NULL ? vclockset_last(&memtx->snap_dir.index) : vclockset_prev(&memtx->snap_dir.index, (struct vclock *)it->curr); return it->curr; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_upsert.c0000664000000000000000000001731413306565107020005 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_upsert.h" #include #include #include #include "vy_stmt.h" #include "tuple_update.h" #include "fiber.h" #include "column_mask.h" static void * vy_update_alloc(void *arg, size_t size) { /* TODO: rewrite tuple_upsert_execute() without exceptions */ struct region *region = (struct region *) arg; void *data = region_aligned_alloc(region, size, sizeof(uint64_t)); if (data == NULL) diag_set(OutOfMemory, size, "region", "upsert"); return data; } /** * vinyl wrapper of tuple_upsert_execute. * vibyl upsert opts are slightly different from tarantool ops, * so they need some preparation before tuple_upsert_execute call. * The function does this preparation. * On successfull upsert the result is placed into stmt and stmt_end args. * On fail the stmt and stmt_end args are not changed. * Possibly allocates new stmt via fiber region alloc, * so call fiber_gc() after usage */ static void vy_apply_upsert_ops(struct region *region, const char **stmt, const char **stmt_end, const char *ops, const char *ops_end, bool suppress_error, uint64_t *column_mask) { if (ops == ops_end) return; #ifndef NDEBUG const char *serie_end_must_be = ops; mp_next(&serie_end_must_be); assert(ops_end == serie_end_must_be); #endif const char *result; uint32_t size; result = tuple_upsert_execute(vy_update_alloc, region, ops, ops_end, *stmt, *stmt_end, &size, 0, suppress_error, column_mask); if (result != NULL) { /* if failed, just skip it and leave stmt the same */ *stmt = result; *stmt_end = result + size; } } /** * Try to squash two upsert series (msgspacked index_base + ops) * Try to create a tuple with squahed operations * * @retval 0 && *result_stmt != NULL : successful squash * @retval 0 && *result_stmt == NULL : unsquashable sources * @retval -1 - memory error */ static int vy_upsert_try_to_squash(struct tuple_format *format, struct region *region, const char *key_mp, const char *key_mp_end, const char *old_serie, const char *old_serie_end, const char *new_serie, const char *new_serie_end, struct tuple **result_stmt) { *result_stmt = NULL; size_t squashed_size; const char *squashed = tuple_upsert_squash(vy_update_alloc, region, old_serie, old_serie_end, new_serie, new_serie_end, &squashed_size, 0); if (squashed == NULL) return 0; /* Successful squash! */ struct iovec operations[1]; operations[0].iov_base = (void *)squashed; operations[0].iov_len = squashed_size; *result_stmt = vy_stmt_new_upsert(format, key_mp, key_mp_end, operations, 1); if (*result_stmt == NULL) return -1; return 0; } struct tuple * vy_apply_upsert(const struct tuple *new_stmt, const struct tuple *old_stmt, const struct key_def *cmp_def, struct tuple_format *format, struct tuple_format *upsert_format, bool suppress_error) { /* * old_stmt - previous (old) version of stmt * new_stmt - next (new) version of stmt * result_stmt - the result of merging new and old */ assert(new_stmt != NULL); assert(new_stmt != old_stmt); assert(vy_stmt_type(new_stmt) == IPROTO_UPSERT); if (old_stmt == NULL || vy_stmt_type(old_stmt) == IPROTO_DELETE) { /* * INSERT case: return new stmt. */ return vy_stmt_replace_from_upsert(format, new_stmt); } /* * Unpack UPSERT operation from the new stmt */ uint32_t mp_size; const char *new_ops; new_ops = vy_stmt_upsert_ops(new_stmt, &mp_size); const char *new_ops_end = new_ops + mp_size; /* * Apply new operations to the old stmt */ const char *result_mp; if (vy_stmt_type(old_stmt) == IPROTO_UPSERT) result_mp = vy_upsert_data_range(old_stmt, &mp_size); else result_mp = tuple_data_range(old_stmt, &mp_size); const char *result_mp_end = result_mp + mp_size; struct tuple *result_stmt = NULL; struct region *region = &fiber()->gc; size_t region_svp = region_used(region); uint8_t old_type = vy_stmt_type(old_stmt); uint64_t column_mask = COLUMN_MASK_FULL; vy_apply_upsert_ops(region, &result_mp, &result_mp_end, new_ops, new_ops_end, suppress_error, &column_mask); if (old_type != IPROTO_UPSERT) { assert(old_type == IPROTO_INSERT || old_type == IPROTO_REPLACE); /* * UPDATE case: return the updated old stmt. */ result_stmt = vy_stmt_new_replace(format, result_mp, result_mp_end); region_truncate(region, region_svp); if (result_stmt == NULL) return NULL; /* OOM */ vy_stmt_set_lsn(result_stmt, vy_stmt_lsn(new_stmt)); goto check_key; } /* * Unpack UPSERT operation from the old stmt */ assert(old_stmt != NULL); const char *old_ops; old_ops = vy_stmt_upsert_ops(old_stmt, &mp_size); const char *old_ops_end = old_ops + mp_size; assert(old_ops_end > old_ops); /* * UPSERT + UPSERT case: combine operations */ assert(old_ops_end - old_ops > 0); if (vy_upsert_try_to_squash(upsert_format, region, result_mp, result_mp_end, old_ops, old_ops_end, new_ops, new_ops_end, &result_stmt) != 0) { region_truncate(region, region_svp); return NULL; } if (result_stmt != NULL) { region_truncate(region, region_svp); vy_stmt_set_lsn(result_stmt, vy_stmt_lsn(new_stmt)); goto check_key; } /* Failed to squash, simply add one upsert to another */ int old_ops_cnt, new_ops_cnt; struct iovec operations[3]; old_ops_cnt = mp_decode_array(&old_ops); operations[1].iov_base = (void *)old_ops; operations[1].iov_len = old_ops_end - old_ops; new_ops_cnt = mp_decode_array(&new_ops); operations[2].iov_base = (void *)new_ops; operations[2].iov_len = new_ops_end - new_ops; char ops_buf[16]; char *header = mp_encode_array(ops_buf, old_ops_cnt + new_ops_cnt); operations[0].iov_base = (void *)ops_buf; operations[0].iov_len = header - ops_buf; result_stmt = vy_stmt_new_upsert(upsert_format, result_mp, result_mp_end, operations, 3); region_truncate(region, region_svp); if (result_stmt == NULL) return NULL; vy_stmt_set_lsn(result_stmt, vy_stmt_lsn(new_stmt)); check_key: /* * Check that key hasn't been changed after applying operations. */ if (!key_update_can_be_skipped(cmp_def->column_mask, column_mask) && vy_tuple_compare(old_stmt, result_stmt, cmp_def) != 0) { /* * Key has been changed: ignore this UPSERT and * @retval the old stmt. */ tuple_unref(result_stmt); result_stmt = vy_stmt_dup(old_stmt, old_type == IPROTO_UPSERT ? upsert_format : format); } return result_stmt; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_log.c0000664000000000000000000017660013306565107017250 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_log.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include "assoc.h" #include "coio_task.h" #include "diag.h" #include "errcode.h" #include "errinj.h" #include "fiber.h" #include "iproto_constants.h" /* IPROTO_INSERT */ #include "key_def.h" #include "latch.h" #include "replication.h" /* INSTANCE_UUID */ #include "salad/stailq.h" #include "say.h" #include "trivia/util.h" #include "wal.h" #include "vclock.h" #include "xlog.h" #include "xrow.h" /** * Integer key of a field in the vy_log_record structure. * Used for packing a record in MsgPack. */ enum vy_log_key { VY_LOG_KEY_INDEX_LSN = 0, VY_LOG_KEY_RANGE_ID = 1, VY_LOG_KEY_RUN_ID = 2, VY_LOG_KEY_BEGIN = 3, VY_LOG_KEY_END = 4, VY_LOG_KEY_INDEX_ID = 5, VY_LOG_KEY_SPACE_ID = 6, VY_LOG_KEY_DEF = 7, VY_LOG_KEY_SLICE_ID = 8, VY_LOG_KEY_DUMP_LSN = 9, VY_LOG_KEY_GC_LSN = 10, VY_LOG_KEY_TRUNCATE_COUNT = 11, }; /** vy_log_key -> human readable name. */ static const char *vy_log_key_name[] = { [VY_LOG_KEY_INDEX_LSN] = "index_lsn", [VY_LOG_KEY_RANGE_ID] = "range_id", [VY_LOG_KEY_RUN_ID] = "run_id", [VY_LOG_KEY_BEGIN] = "begin", [VY_LOG_KEY_END] = "end", [VY_LOG_KEY_INDEX_ID] = "index_id", [VY_LOG_KEY_SPACE_ID] = "space_id", [VY_LOG_KEY_DEF] = "key_def", [VY_LOG_KEY_SLICE_ID] = "slice_id", [VY_LOG_KEY_DUMP_LSN] = "dump_lsn", [VY_LOG_KEY_GC_LSN] = "gc_lsn", [VY_LOG_KEY_TRUNCATE_COUNT] = "truncate_count", }; /** vy_log_type -> human readable name. */ static const char *vy_log_type_name[] = { [VY_LOG_CREATE_INDEX] = "create_index", [VY_LOG_DROP_INDEX] = "drop_index", [VY_LOG_INSERT_RANGE] = "insert_range", [VY_LOG_DELETE_RANGE] = "delete_range", [VY_LOG_PREPARE_RUN] = "prepare_run", [VY_LOG_CREATE_RUN] = "create_run", [VY_LOG_DROP_RUN] = "drop_run", [VY_LOG_FORGET_RUN] = "forget_run", [VY_LOG_INSERT_SLICE] = "insert_slice", [VY_LOG_DELETE_SLICE] = "delete_slice", [VY_LOG_DUMP_INDEX] = "dump_index", [VY_LOG_SNAPSHOT] = "snapshot", [VY_LOG_TRUNCATE_INDEX] = "truncate_index", }; struct vy_recovery; /** Metadata log object. */ struct vy_log { /** * The directory where log files are stored. * Note, dir.index contains vclocks of all snapshots, * even those that didn't result in file creation. */ struct xdir dir; /** Last checkpoint vclock. */ struct vclock last_checkpoint; /** Recovery context. */ struct vy_recovery *recovery; /** Latch protecting the log buffer. */ struct latch latch; /** * Next ID to use for a vinyl object. * Used by vy_log_next_id(). */ int64_t next_id; /** A region of struct vy_log_record entries. */ struct region pool; /** * Records awaiting to be written to disk. * Linked by vy_log_record::in_tx; */ struct stailq tx; /** Number of entries in the @tx list. */ int tx_size; /** Start of the current transaction in the pool, for rollback */ size_t tx_svp; /** * Last record in the queue at the time when the current * transaction was started. Used for rollback. */ struct stailq_entry *tx_begin; /** * Flag set if vy_log_write() failed. * * It indicates that that the current transaction must be * aborted on vy_log_commit(). Thanks to this flag, we don't * need to add error handling code after each invocation of * vy_log_write(), instead we only check vy_log_commit() * return code. */ bool tx_failed; /** * Diagnostic area where vy_log_write() error is stored, * only relevant if @tx_failed is set. */ struct diag tx_diag; }; static struct vy_log vy_log; /** Recovery context. */ struct vy_recovery { /** space_id, index_id -> vy_index_recovery_info. */ struct mh_i64ptr_t *index_id_hash; /** index_lsn -> vy_index_recovery_info. */ struct mh_i64ptr_t *index_lsn_hash; /** ID -> vy_range_recovery_info. */ struct mh_i64ptr_t *range_hash; /** ID -> vy_run_recovery_info. */ struct mh_i64ptr_t *run_hash; /** ID -> vy_slice_recovery_info. */ struct mh_i64ptr_t *slice_hash; /** * Maximal vinyl object ID, according to the metadata log, * or -1 in case no vinyl objects were recovered. */ int64_t max_id; }; /** Vinyl index info stored in a recovery context. */ struct vy_index_recovery_info { /** LSN of the index creation. */ int64_t index_lsn; /** Ordinal index number in the space. */ uint32_t index_id; /** Space ID. */ uint32_t space_id; /** Array of key part definitions. */ struct key_part_def *key_parts; /** Number of key parts. */ uint32_t key_part_count; /** True if the index was dropped. */ bool is_dropped; /** LSN of the last index dump. */ int64_t dump_lsn; /** Truncate count. */ int64_t truncate_count; /** * List of all ranges in the index, linked by * vy_range_recovery_info::in_index. */ struct rlist ranges; /** * List of all runs created for the index * (both committed and not), linked by * vy_run_recovery_info::in_index. */ struct rlist runs; }; /** Vinyl range info stored in a recovery context. */ struct vy_range_recovery_info { /** Link in vy_index_recovery_info::ranges. */ struct rlist in_index; /** ID of the range. */ int64_t id; /** Start of the range, stored in MsgPack array. */ char *begin; /** End of the range, stored in MsgPack array. */ char *end; /** * List of all slices in the range, linked by * vy_slice_recovery_info::in_range. * * Newer slices are closer to the head. */ struct rlist slices; }; /** Run info stored in a recovery context. */ struct vy_run_recovery_info { /** Link in vy_index_recovery_info::runs. */ struct rlist in_index; /** ID of the run. */ int64_t id; /** Max LSN stored on disk. */ int64_t dump_lsn; /** * For deleted runs: LSN of the last checkpoint * that uses this run. */ int64_t gc_lsn; /** * True if the run was not committed (there's * VY_LOG_PREPARE_RUN, but no VY_LOG_CREATE_RUN). */ bool is_incomplete; /** True if the run was dropped (VY_LOG_DROP_RUN). */ bool is_dropped; }; /** Slice info stored in a recovery context. */ struct vy_slice_recovery_info { /** Link in vy_range_recovery_info::slices. */ struct rlist in_range; /** ID of the slice. */ int64_t id; /** Run this slice was created for. */ struct vy_run_recovery_info *run; /** Start of the slice, stored in MsgPack array. */ char *begin; /** End of the slice, stored in MsgPack array. */ char *end; }; static struct vy_recovery * vy_recovery_new_locked(int64_t signature, bool only_checkpoint); /** * Return the name of the vylog file that has the given signature. */ static inline const char * vy_log_filename(int64_t signature) { return xdir_format_filename(&vy_log.dir, signature, NONE); } /** * Return the lsn of the checkpoint that was taken * before the given lsn. */ static int64_t vy_log_prev_checkpoint(int64_t lsn) { int64_t ret = -1; for (struct vclock *vclock = vclockset_last(&vy_log.dir.index); vclock != NULL; vclock = vclockset_prev(&vy_log.dir.index, vclock)) { if (vclock_sum(vclock) < lsn) { ret = vclock_sum(vclock); break; } } return ret; } /** An snprint-style function to print a log record. */ static int vy_log_record_snprint(char *buf, int size, const struct vy_log_record *record) { int total = 0; assert(record->type < vy_log_record_type_MAX); SNPRINT(total, snprintf, buf, size, "%s{", vy_log_type_name[record->type]); if (record->index_lsn > 0) SNPRINT(total, snprintf, buf, size, "%s=%"PRIi64", ", vy_log_key_name[VY_LOG_KEY_INDEX_LSN], record->index_lsn); if (record->range_id > 0) SNPRINT(total, snprintf, buf, size, "%s=%"PRIi64", ", vy_log_key_name[VY_LOG_KEY_RANGE_ID], record->range_id); if (record->run_id > 0) SNPRINT(total, snprintf, buf, size, "%s=%"PRIi64", ", vy_log_key_name[VY_LOG_KEY_RUN_ID], record->run_id); if (record->begin != NULL) { SNPRINT(total, snprintf, buf, size, "%s=", vy_log_key_name[VY_LOG_KEY_BEGIN]); SNPRINT(total, mp_snprint, buf, size, record->begin); SNPRINT(total, snprintf, buf, size, ", "); } if (record->end != NULL) { SNPRINT(total, snprintf, buf, size, "%s=", vy_log_key_name[VY_LOG_KEY_END]); SNPRINT(total, mp_snprint, buf, size, record->end); SNPRINT(total, snprintf, buf, size, ", "); } if (record->index_id > 0) SNPRINT(total, snprintf, buf, size, "%s=%"PRIu32", ", vy_log_key_name[VY_LOG_KEY_INDEX_ID], record->index_id); if (record->space_id > 0) SNPRINT(total, snprintf, buf, size, "%s=%"PRIu32", ", vy_log_key_name[VY_LOG_KEY_SPACE_ID], record->space_id); if (record->key_parts != NULL) { SNPRINT(total, snprintf, buf, size, "%s=", vy_log_key_name[VY_LOG_KEY_DEF]); SNPRINT(total, key_def_snprint_parts, buf, size, record->key_parts, record->key_part_count); SNPRINT(total, snprintf, buf, size, ", "); } if (record->slice_id > 0) SNPRINT(total, snprintf, buf, size, "%s=%"PRIi64", ", vy_log_key_name[VY_LOG_KEY_SLICE_ID], record->slice_id); if (record->dump_lsn > 0) SNPRINT(total, snprintf, buf, size, "%s=%"PRIi64", ", vy_log_key_name[VY_LOG_KEY_DUMP_LSN], record->dump_lsn); if (record->gc_lsn > 0) SNPRINT(total, snprintf, buf, size, "%s=%"PRIi64", ", vy_log_key_name[VY_LOG_KEY_GC_LSN], record->gc_lsn); if (record->truncate_count > 0) SNPRINT(total, snprintf, buf, size, "%s=%"PRIi64", ", vy_log_key_name[VY_LOG_KEY_TRUNCATE_COUNT], record->truncate_count); SNPRINT(total, snprintf, buf, size, "}"); return total; } /** * Return a string containing a human readable representation * of a log record. */ static const char * vy_log_record_str(const struct vy_log_record *record) { char *buf = tt_static_buf(); if (vy_log_record_snprint(buf, TT_STATIC_BUF_LEN, record) < 0) return ""; return buf; } /** * Encode a log record into an xrow to be further written to an xlog. * Return 0 on success, -1 on failure. * * When stored in xlog, a vinyl metadata log has the following MsgPack * representation: * * [ type, { key: value, ... } ] * * 'type': see vy_log_record_type enum * 'key': see vy_log_key enum * 'value': depends on 'key' */ static int vy_log_record_encode(const struct vy_log_record *record, struct xrow_header *row) { assert(record->type < vy_log_record_type_MAX); /* * Calculate record size. */ size_t size = 0; size += mp_sizeof_array(2); size += mp_sizeof_uint(record->type); size_t n_keys = 0; if (record->index_lsn > 0) { size += mp_sizeof_uint(VY_LOG_KEY_INDEX_LSN); size += mp_sizeof_uint(record->index_lsn); n_keys++; } if (record->range_id > 0) { size += mp_sizeof_uint(VY_LOG_KEY_RANGE_ID); size += mp_sizeof_uint(record->range_id); n_keys++; } if (record->run_id > 0) { size += mp_sizeof_uint(VY_LOG_KEY_RUN_ID); size += mp_sizeof_uint(record->run_id); n_keys++; } if (record->begin != NULL) { size += mp_sizeof_uint(VY_LOG_KEY_BEGIN); const char *p = record->begin; assert(mp_typeof(*p) == MP_ARRAY); mp_next(&p); size += p - record->begin; n_keys++; } if (record->end != NULL) { size += mp_sizeof_uint(VY_LOG_KEY_END); const char *p = record->end; assert(mp_typeof(*p) == MP_ARRAY); mp_next(&p); size += p - record->end; n_keys++; } if (record->index_id > 0) { size += mp_sizeof_uint(VY_LOG_KEY_INDEX_ID); size += mp_sizeof_uint(record->index_id); n_keys++; } if (record->space_id > 0) { size += mp_sizeof_uint(VY_LOG_KEY_SPACE_ID); size += mp_sizeof_uint(record->space_id); n_keys++; } if (record->key_parts != NULL) { size += mp_sizeof_uint(VY_LOG_KEY_DEF); size += mp_sizeof_array(record->key_part_count); size += key_def_sizeof_parts(record->key_parts, record->key_part_count); n_keys++; } if (record->slice_id > 0) { size += mp_sizeof_uint(VY_LOG_KEY_SLICE_ID); size += mp_sizeof_uint(record->slice_id); n_keys++; } if (record->dump_lsn > 0) { size += mp_sizeof_uint(VY_LOG_KEY_DUMP_LSN); size += mp_sizeof_uint(record->dump_lsn); n_keys++; } if (record->gc_lsn > 0) { size += mp_sizeof_uint(VY_LOG_KEY_GC_LSN); size += mp_sizeof_uint(record->gc_lsn); n_keys++; } if (record->truncate_count > 0) { size += mp_sizeof_uint(VY_LOG_KEY_TRUNCATE_COUNT); size += mp_sizeof_uint(record->truncate_count); n_keys++; } size += mp_sizeof_map(n_keys); /* * Encode record. */ char *tuple = region_alloc(&fiber()->gc, size); if (tuple == NULL) { diag_set(OutOfMemory, size, "region", "vy_log record"); return -1; } char *pos = tuple; pos = mp_encode_array(pos, 2); pos = mp_encode_uint(pos, record->type); pos = mp_encode_map(pos, n_keys); if (record->index_lsn > 0) { pos = mp_encode_uint(pos, VY_LOG_KEY_INDEX_LSN); pos = mp_encode_uint(pos, record->index_lsn); } if (record->range_id > 0) { pos = mp_encode_uint(pos, VY_LOG_KEY_RANGE_ID); pos = mp_encode_uint(pos, record->range_id); } if (record->run_id > 0) { pos = mp_encode_uint(pos, VY_LOG_KEY_RUN_ID); pos = mp_encode_uint(pos, record->run_id); } if (record->begin != NULL) { pos = mp_encode_uint(pos, VY_LOG_KEY_BEGIN); const char *p = record->begin; mp_next(&p); memcpy(pos, record->begin, p - record->begin); pos += p - record->begin; } if (record->end != NULL) { pos = mp_encode_uint(pos, VY_LOG_KEY_END); const char *p = record->end; mp_next(&p); memcpy(pos, record->end, p - record->end); pos += p - record->end; } if (record->index_id > 0) { pos = mp_encode_uint(pos, VY_LOG_KEY_INDEX_ID); pos = mp_encode_uint(pos, record->index_id); } if (record->space_id > 0) { pos = mp_encode_uint(pos, VY_LOG_KEY_SPACE_ID); pos = mp_encode_uint(pos, record->space_id); } if (record->key_parts != NULL) { pos = mp_encode_uint(pos, VY_LOG_KEY_DEF); pos = mp_encode_array(pos, record->key_part_count); pos = key_def_encode_parts(pos, record->key_parts, record->key_part_count); } if (record->slice_id > 0) { pos = mp_encode_uint(pos, VY_LOG_KEY_SLICE_ID); pos = mp_encode_uint(pos, record->slice_id); } if (record->dump_lsn > 0) { pos = mp_encode_uint(pos, VY_LOG_KEY_DUMP_LSN); pos = mp_encode_uint(pos, record->dump_lsn); } if (record->gc_lsn > 0) { pos = mp_encode_uint(pos, VY_LOG_KEY_GC_LSN); pos = mp_encode_uint(pos, record->gc_lsn); } if (record->truncate_count > 0) { pos = mp_encode_uint(pos, VY_LOG_KEY_TRUNCATE_COUNT); pos = mp_encode_uint(pos, record->truncate_count); } assert(pos == tuple + size); /* * Store record in xrow. */ struct request req; memset(&req, 0, sizeof(req)); req.type = IPROTO_INSERT; req.tuple = tuple; req.tuple_end = pos; memset(row, 0, sizeof(*row)); row->type = req.type; row->bodycnt = xrow_encode_dml(&req, row->body); return 0; } /** * Decode a log record from an xrow. * Return 0 on success, -1 on failure. */ static int vy_log_record_decode(struct vy_log_record *record, struct xrow_header *row) { char *buf; memset(record, 0, sizeof(*record)); struct request req; if (xrow_decode_dml(row, &req, 1ULL << IPROTO_TUPLE) != 0) { diag_log(); diag_set(ClientError, ER_INVALID_VYLOG_FILE, "Bad record: failed to decode request"); return -1; } const char *tmp, *pos = req.tuple; uint32_t array_size = mp_decode_array(&pos); if (array_size != 2) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Bad record: wrong array size " "(expected %d, got %u)", 2, (unsigned)array_size)); goto fail; } record->type = mp_decode_uint(&pos); if (record->type >= vy_log_record_type_MAX) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Bad record: unknown record type %d", record->type)); goto fail; } uint32_t n_keys = mp_decode_map(&pos); for (uint32_t i = 0; i < n_keys; i++) { uint32_t key = mp_decode_uint(&pos); switch (key) { case VY_LOG_KEY_INDEX_LSN: record->index_lsn = mp_decode_uint(&pos); break; case VY_LOG_KEY_RANGE_ID: record->range_id = mp_decode_uint(&pos); break; case VY_LOG_KEY_RUN_ID: record->run_id = mp_decode_uint(&pos); break; case VY_LOG_KEY_BEGIN: tmp = pos; record->begin = mp_decode_array(&tmp) > 0 ? pos : NULL; mp_next(&pos); break; case VY_LOG_KEY_END: tmp = pos; record->end = mp_decode_array(&tmp) > 0 ? pos : NULL; mp_next(&pos); break; case VY_LOG_KEY_INDEX_ID: record->index_id = mp_decode_uint(&pos); break; case VY_LOG_KEY_SPACE_ID: record->space_id = mp_decode_uint(&pos); break; case VY_LOG_KEY_DEF: { uint32_t part_count = mp_decode_array(&pos); struct key_part_def *parts = region_alloc(&fiber()->gc, sizeof(*parts) * part_count); if (parts == NULL) { diag_set(OutOfMemory, sizeof(*parts) * part_count, "region", "struct key_part_def"); return -1; } if (key_def_decode_parts(parts, part_count, &pos, NULL, 0) != 0) { diag_log(); diag_set(ClientError, ER_INVALID_VYLOG_FILE, "Bad record: failed to decode " "index key definition"); goto fail; } record->key_parts = parts; record->key_part_count = part_count; break; } case VY_LOG_KEY_SLICE_ID: record->slice_id = mp_decode_uint(&pos); break; case VY_LOG_KEY_DUMP_LSN: record->dump_lsn = mp_decode_uint(&pos); break; case VY_LOG_KEY_GC_LSN: record->gc_lsn = mp_decode_uint(&pos); break; case VY_LOG_KEY_TRUNCATE_COUNT: record->truncate_count = mp_decode_uint(&pos); break; default: diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Bad record: unknown key %u", (unsigned)key)); goto fail; } } return 0; fail: buf = tt_static_buf(); mp_snprint(buf, TT_STATIC_BUF_LEN, req.tuple); say_error("failed to decode vylog record: %s", buf); return -1; } /** * Duplicate a log record. All objects refered to by the record * are duplicated as well. */ static struct vy_log_record * vy_log_record_dup(struct region *pool, const struct vy_log_record *src) { size_t used = region_used(pool); struct vy_log_record *dst = region_alloc(pool, sizeof(*dst)); if (dst == NULL) { diag_set(OutOfMemory, sizeof(*dst), "region", "struct vy_log_record"); goto err; } *dst = *src; if (src->begin != NULL) { const char *data = src->begin; mp_next(&data); size_t size = data - src->begin; dst->begin = region_alloc(pool, size); if (dst->begin == NULL) { diag_set(OutOfMemory, size, "region", "vy_log_record::begin"); goto err; } memcpy((char *)dst->begin, src->begin, size); } if (src->end != NULL) { const char *data = src->end; mp_next(&data); size_t size = data - src->end; dst->end = region_alloc(pool, size); if (dst->end == NULL) { diag_set(OutOfMemory, size, "region", "struct vy_log_record"); goto err; } memcpy((char *)dst->end, src->end, size); } if (src->key_def != NULL) { size_t size = src->key_def->part_count * sizeof(struct key_part_def); dst->key_parts = region_alloc(pool, size); if (dst->key_parts == NULL) { diag_set(OutOfMemory, size, "region", "struct key_part_def"); goto err; } key_def_dump_parts(src->key_def, dst->key_parts); dst->key_part_count = src->key_def->part_count; dst->key_def = NULL; } return dst; err: region_truncate(pool, used); return NULL; } void vy_log_init(const char *dir) { xdir_create(&vy_log.dir, dir, VYLOG, &INSTANCE_UUID); latch_create(&vy_log.latch); region_create(&vy_log.pool, cord_slab_cache()); stailq_create(&vy_log.tx); diag_create(&vy_log.tx_diag); wal_init_vy_log(); } /** * Try to flush the log buffer to disk. * * We always flush the entire vy_log buffer as a single xlog * transaction, since we do not track boundaries of @no_discard * buffered transactions, and want to avoid a partial write. */ static int vy_log_flush(void) { if (vy_log.tx_size == 0) return 0; /* nothing to do */ ERROR_INJECT(ERRINJ_VY_LOG_FLUSH, { diag_set(ClientError, ER_INJECTION, "vinyl log flush"); return -1; }); struct errinj *delay = errinj(ERRINJ_VY_LOG_FLUSH_DELAY, ERRINJ_BOOL); if (delay != NULL && delay->bparam) { while (delay->bparam) fiber_sleep(0.001); } struct journal_entry *entry = journal_entry_new(vy_log.tx_size); if (entry == NULL) return -1; struct xrow_header *rows; rows = region_aligned_alloc(&fiber()->gc, vy_log.tx_size * sizeof(struct xrow_header), alignof(struct xrow_header)); if (rows == NULL) return -1; /* * Encode buffered records. */ int i = 0; struct vy_log_record *record; stailq_foreach_entry(record, &vy_log.tx, in_tx) { assert(i < vy_log.tx_size); struct xrow_header *row = &rows[i]; if (vy_log_record_encode(record, row) < 0) return -1; entry->rows[i] = row; i++; } assert(i == vy_log.tx_size); /* * Do actual disk writes on behalf of the WAL * so as not to block the tx thread. */ if (wal_write_vy_log(entry) != 0) return -1; /* Success. Free flushed records. */ region_reset(&vy_log.pool); stailq_create(&vy_log.tx); vy_log.tx_size = 0; return 0; } void vy_log_free(void) { xdir_destroy(&vy_log.dir); region_destroy(&vy_log.pool); diag_destroy(&vy_log.tx_diag); } int vy_log_open(struct xlog *xlog) { /* * Open the current log file or create a new one * if it doesn't exist. */ const char *path = vy_log_filename(vclock_sum(&vy_log.last_checkpoint)); if (access(path, F_OK) == 0) return xlog_open(xlog, path); if (errno != ENOENT) { diag_set(SystemError, "failed to access file '%s'", path); goto fail; } if (xdir_create_xlog(&vy_log.dir, xlog, &vy_log.last_checkpoint) < 0) goto fail; struct xrow_header row; struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_SNAPSHOT; if (vy_log_record_encode(&record, &row) < 0 || xlog_write_row(xlog, &row) < 0) goto fail_close_xlog; if (xlog_rename(xlog) < 0) goto fail_close_xlog; return 0; fail_close_xlog: if (unlink(xlog->filename) < 0) say_syserror("failed to delete file '%s'", xlog->filename); xlog_close(xlog, false); fail: return -1; } int64_t vy_log_next_id(void) { return vy_log.next_id++; } int vy_log_bootstrap(void) { /* * Scan the directory to make sure there is no * vylog files left from previous setups. */ if (xdir_scan(&vy_log.dir) < 0 && errno != ENOENT) return -1; if (xdir_last_vclock(&vy_log.dir, NULL) >= 0) panic("vinyl directory is not empty"); /* Add initial vclock to the xdir. */ struct vclock *vclock = malloc(sizeof(*vclock)); if (vclock == NULL) { diag_set(OutOfMemory, sizeof(*vclock), "malloc", "struct vclock"); return -1; } vclock_create(vclock); xdir_add_vclock(&vy_log.dir, vclock); return 0; } struct vy_recovery * vy_log_begin_recovery(const struct vclock *vclock) { assert(vy_log.recovery == NULL); /* * Do not fail recovery if vinyl directory does not exist, * because vinyl might not be even in use. Complain only * on an attempt to write a vylog. */ if (xdir_scan(&vy_log.dir) < 0 && errno != ENOENT) return NULL; struct vclock vy_log_vclock; vclock_create(&vy_log_vclock); if (xdir_last_vclock(&vy_log.dir, &vy_log_vclock) >= 0 && vclock_compare(&vy_log_vclock, vclock) > 0) { /* * Last vy_log log is newer than the last snapshot. * This can't normally happen, as vy_log is rotated * after snapshot is created. Looks like somebody * deleted snap file, but forgot to delete vy_log. */ diag_set(ClientError, ER_MISSING_SNAPSHOT); return NULL; } struct vy_recovery *recovery; recovery = vy_recovery_new(vclock_sum(&vy_log_vclock), false); if (recovery == NULL) return NULL; vy_log.next_id = recovery->max_id + 1; vy_log.recovery = recovery; vclock_copy(&vy_log.last_checkpoint, vclock); return recovery; } static int vy_log_create(const struct vclock *vclock, struct vy_recovery *recovery); int vy_log_end_recovery(void) { assert(vy_log.recovery != NULL); /* Flush all pending records. */ if (vy_log_flush() < 0) { diag_log(); say_error("failed to flush vylog after recovery"); return -1; } /* * On backup we copy files corresponding to the most recent * checkpoint. Since vy_log does not create snapshots of its log * files, but instead appends records written after checkpoint * to the most recent log file, the signature of the vy_log file * corresponding to the last checkpoint equals the signature * of the previous checkpoint. So upon successful recovery * from a backup we need to rotate the log to keep checkpoint * and vy_log signatures in sync. */ struct vclock *vclock = vclockset_last(&vy_log.dir.index); if (vclock == NULL || vclock_compare(vclock, &vy_log.last_checkpoint) != 0) { vclock = malloc(sizeof(*vclock)); if (vclock == NULL) { diag_set(OutOfMemory, sizeof(*vclock), "malloc", "struct vclock"); return -1; } vclock_copy(vclock, &vy_log.last_checkpoint); xdir_add_vclock(&vy_log.dir, vclock); if (vy_log_create(vclock, vy_log.recovery) < 0) { diag_log(); say_error("failed to write `%s'", vy_log_filename(vclock_sum(vclock))); return -1; } } vy_log.recovery = NULL; return 0; } /** Argument passed to vy_log_rotate_cb_func(). */ struct vy_log_rotate_cb_arg { struct xdir *dir; struct xlog *xlog; const struct vclock *vclock; }; /** Callback passed to vy_recovery_iterate() for log rotation. */ static int vy_log_rotate_cb_func(const struct vy_log_record *record, void *cb_arg) { struct vy_log_rotate_cb_arg *arg = cb_arg; struct xlog *xlog = arg->xlog; struct xrow_header row; say_verbose("save vylog record: %s", vy_log_record_str(record)); /* Create the log file on the first write. */ if (!xlog_is_open(xlog) && xdir_create_xlog(arg->dir, xlog, arg->vclock) < 0) return -1; if (vy_log_record_encode(record, &row) < 0 || xlog_write_row(xlog, &row) < 0) return -1; return 0; } /** * Create an vy_log file from a recovery context. */ static int vy_log_create(const struct vclock *vclock, struct vy_recovery *recovery) { /* * Only create the log file if we have something * to write to it. */ struct xlog xlog; xlog_clear(&xlog); say_verbose("saving vylog %lld", (long long)vclock_sum(vclock)); struct vy_log_rotate_cb_arg arg = { .xlog = &xlog, .dir = &vy_log.dir, .vclock = vclock, }; if (vy_recovery_iterate(recovery, vy_log_rotate_cb_func, &arg) < 0) goto err_write_xlog; if (!xlog_is_open(&xlog)) goto done; /* nothing written */ /* Mark the end of the snapshot. */ struct xrow_header row; struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_SNAPSHOT; if (vy_log_record_encode(&record, &row) < 0 || xlog_write_row(&xlog, &row) < 0) goto err_write_xlog; /* Finalize the new xlog. */ if (xlog_flush(&xlog) < 0 || xlog_sync(&xlog) < 0 || xlog_rename(&xlog) < 0) goto err_write_xlog; xlog_close(&xlog, false); done: say_verbose("done saving vylog"); return 0; err_write_xlog: /* Delete the unfinished xlog. */ if (xlog_is_open(&xlog)) { if (unlink(xlog.filename) < 0) say_syserror("failed to delete file '%s'", xlog.filename); xlog_close(&xlog, false); } return -1; } static ssize_t vy_log_rotate_f(va_list ap) { struct vy_recovery *recovery = va_arg(ap, struct vy_recovery *); const struct vclock *vclock = va_arg(ap, const struct vclock *); return vy_log_create(vclock, recovery); } int vy_log_rotate(const struct vclock *vclock) { int64_t signature = vclock_sum(vclock); int64_t prev_signature = vclock_sum(&vy_log.last_checkpoint); assert(vy_log.recovery == NULL); /* * This function is called right after bootstrap (by snapshot), * in which case old and new signatures coincide and there's * nothing we need to do. */ if (signature == prev_signature) return 0; assert(signature > prev_signature); struct vclock *new_vclock = malloc(sizeof(*new_vclock)); if (new_vclock == NULL) { diag_set(OutOfMemory, sizeof(*new_vclock), "malloc", "struct vclock"); return -1; } vclock_copy(new_vclock, vclock); say_verbose("rotating vylog %lld => %lld", (long long)prev_signature, (long long)signature); /* * Lock out all concurrent log writers while we are rotating it. * This effectively stalls the vinyl scheduler for a while, but * this is acceptable, because (1) the log file is small and * hence can be rotated fairly quickly so the stall isn't going * to take too long and (2) dumps/compactions, which are scheduled * by the scheduler, are rare events so there shouldn't be too * many of them piling up due to log rotation. */ latch_lock(&vy_log.latch); struct vy_recovery *recovery; recovery = vy_recovery_new_locked(prev_signature, false); if (recovery == NULL) goto fail; /* Do actual work from coio so as not to stall tx thread. */ int rc = coio_call(vy_log_rotate_f, recovery, vclock); vy_recovery_delete(recovery); if (rc < 0) { diag_log(); say_error("failed to write `%s'", vy_log_filename(signature)); goto fail; } /* * Success. Close the old log. The new one will be opened * automatically on the first write (see wal_write_vy_log()). */ wal_rotate_vy_log(); vclock_copy(&vy_log.last_checkpoint, vclock); /* Add the new vclock to the xdir so that we can track it. */ xdir_add_vclock(&vy_log.dir, new_vclock); latch_unlock(&vy_log.latch); say_verbose("done rotating vylog"); return 0; fail: latch_unlock(&vy_log.latch); free(new_vclock); return -1; } void vy_log_collect_garbage(int64_t signature) { /* * Always keep the previous file, because * it is still needed for backups. */ signature = vy_log_prev_checkpoint(signature); xdir_collect_garbage(&vy_log.dir, signature, true); } int64_t vy_log_signature(void) { return vclock_sum(&vy_log.last_checkpoint); } const char * vy_log_backup_path(struct vclock *vclock) { /* * Use the previous log file, because the current one * contains records written after the last checkpoint. */ int64_t lsn = vy_log_prev_checkpoint(vclock_sum(vclock)); if (lsn < 0) return NULL; const char *path = vy_log_filename(lsn); if (access(path, F_OK) == -1 && errno == ENOENT) return NULL; /* vinyl not used */ return path; } void vy_log_tx_begin(void) { latch_lock(&vy_log.latch); vy_log.tx_begin = stailq_last(&vy_log.tx); vy_log.tx_svp = region_used(&vy_log.pool); vy_log.tx_failed = false; say_verbose("begin vylog transaction"); } /** * Commit a transaction started with vy_log_tx_begin(). * * If @no_discard is set, pending records won't be expunged from the * buffer on failure, so that the next transaction will retry to write * them to disk. */ static int vy_log_tx_do_commit(bool no_discard) { struct stailq rollback; assert(latch_owner(&vy_log.latch) == fiber()); if (vy_log.tx_failed) { /* * vy_log_write() failed to append a record to tx. * @no_discard transactions can't handle this. */ diag_move(&vy_log.tx_diag, diag_get()); if (no_discard) { diag_log(); panic("non-discardable vylog transaction failed"); } goto rollback; } /* * During recovery, we may replay records we failed to commit * before restart (e.g. drop index). Since the log isn't open * yet, simply leave them in the tx buffer to be flushed upon * recovery completion. */ if (vy_log.recovery != NULL) goto done; if (vy_log_flush() != 0) { if (!no_discard) goto rollback; /* * We were told not to discard the transaction on * failure so just warn and leave it in the buffer. */ struct error *e = diag_last_error(diag_get()); say_warn("failed to flush vylog: %s", e->errmsg); } done: say_verbose("commit vylog transaction"); latch_unlock(&vy_log.latch); return 0; rollback: stailq_cut_tail(&vy_log.tx, vy_log.tx_begin, &rollback); region_truncate(&vy_log.pool, vy_log.tx_svp); vy_log.tx_size = 0; vy_log.tx_svp = 0; say_verbose("rollback vylog transaction"); latch_unlock(&vy_log.latch); return -1; } int vy_log_tx_commit(void) { return vy_log_tx_do_commit(false); } void vy_log_tx_try_commit(void) { if (vy_log_tx_do_commit(true) != 0) unreachable(); } void vy_log_write(const struct vy_log_record *record) { assert(latch_owner(&vy_log.latch) == fiber()); struct vy_log_record *tx_record = vy_log_record_dup(&vy_log.pool, record); if (tx_record == NULL) { diag_move(diag_get(), &vy_log.tx_diag); vy_log.tx_failed = true; return; } say_verbose("write vylog record: %s", vy_log_record_str(tx_record)); stailq_add_tail_entry(&vy_log.tx, tx_record, in_tx); vy_log.tx_size++; } /** * Given space_id and index_id, return the corresponding key in * vy_recovery::index_id_hash map. */ static inline int64_t vy_recovery_index_id_hash(uint32_t space_id, uint32_t index_id) { return ((uint64_t)space_id << 32) + index_id; } /** Lookup a vinyl index in vy_recovery::index_id_hash map. */ static struct vy_index_recovery_info * vy_recovery_lookup_index_by_id(struct vy_recovery *recovery, uint32_t space_id, uint32_t index_id) { int64_t key = vy_recovery_index_id_hash(space_id, index_id); struct mh_i64ptr_t *h = recovery->index_id_hash; mh_int_t k = mh_i64ptr_find(h, key, NULL); if (k == mh_end(h)) return NULL; return mh_i64ptr_node(h, k)->val; } /** Lookup a vinyl index in vy_recovery::index_lsn_hash map. */ static struct vy_index_recovery_info * vy_recovery_lookup_index_by_lsn(struct vy_recovery *recovery, int64_t index_lsn) { struct mh_i64ptr_t *h = recovery->index_lsn_hash; mh_int_t k = mh_i64ptr_find(h, index_lsn, NULL); if (k == mh_end(h)) return NULL; return mh_i64ptr_node(h, k)->val; } /** Lookup a vinyl range in vy_recovery::range_hash map. */ static struct vy_range_recovery_info * vy_recovery_lookup_range(struct vy_recovery *recovery, int64_t range_id) { struct mh_i64ptr_t *h = recovery->range_hash; mh_int_t k = mh_i64ptr_find(h, range_id, NULL); if (k == mh_end(h)) return NULL; return mh_i64ptr_node(h, k)->val; } /** Lookup a vinyl run in vy_recovery::run_hash map. */ static struct vy_run_recovery_info * vy_recovery_lookup_run(struct vy_recovery *recovery, int64_t run_id) { struct mh_i64ptr_t *h = recovery->run_hash; mh_int_t k = mh_i64ptr_find(h, run_id, NULL); if (k == mh_end(h)) return NULL; return mh_i64ptr_node(h, k)->val; } /** Lookup a vinyl slice in vy_recovery::slice_hash map. */ static struct vy_slice_recovery_info * vy_recovery_lookup_slice(struct vy_recovery *recovery, int64_t slice_id) { struct mh_i64ptr_t *h = recovery->slice_hash; mh_int_t k = mh_i64ptr_find(h, slice_id, NULL); if (k == mh_end(h)) return NULL; return mh_i64ptr_node(h, k)->val; } /** * Handle a VY_LOG_CREATE_INDEX log record. * This function allocates a new vinyl index with ID @index_lsn * and inserts it to the hash. * Return 0 on success, -1 on failure (ID collision or OOM). */ static int vy_recovery_create_index(struct vy_recovery *recovery, int64_t index_lsn, uint32_t index_id, uint32_t space_id, const struct key_part_def *key_parts, uint32_t key_part_count) { struct vy_index_recovery_info *index; struct key_part_def *key_parts_copy; struct mh_i64ptr_node_t node; struct mh_i64ptr_t *h; mh_int_t k; /* * Make a copy of the key definition to be used for * the new index incarnation. */ if (key_parts == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Missing key definition for index %lld", (long long)index_lsn)); return -1; } key_parts_copy = malloc(sizeof(*key_parts) * key_part_count); if (key_parts_copy == NULL) { diag_set(OutOfMemory, sizeof(*key_parts) * key_part_count, "malloc", "struct key_part_def"); return -1; } memcpy(key_parts_copy, key_parts, sizeof(*key_parts) * key_part_count); /* * Look up the index in the hash. */ h = recovery->index_id_hash; node.key = vy_recovery_index_id_hash(space_id, index_id); k = mh_i64ptr_find(h, node.key, NULL); index = (k != mh_end(h)) ? mh_i64ptr_node(h, k)->val : NULL; if (index == NULL) { /* * This is the first time the index is created * (there's no previous incarnation in the context). * Allocate a node for the index and add it to * the hash. */ index = malloc(sizeof(*index)); if (index == NULL) { diag_set(OutOfMemory, sizeof(*index), "malloc", "struct vy_index_recovery_info"); free(key_parts_copy); return -1; } index->index_id = index_id; index->space_id = space_id; rlist_create(&index->ranges); rlist_create(&index->runs); node.val = index; if (mh_i64ptr_put(h, &node, NULL, NULL) == mh_end(h)) { diag_set(OutOfMemory, 0, "mh_i64ptr_put", "mh_i64ptr_node_t"); free(key_parts_copy); free(index); return -1; } } else { /* * The index was dropped and recreated with the * same ID. Update its key definition (because it * could have changed since the last time it was * used) and reset its state. */ if (!index->is_dropped) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Index %u/%u created twice", (unsigned)space_id, (unsigned)index_id)); free(key_parts_copy); return -1; } assert(index->index_id == index_id); assert(index->space_id == space_id); free(index->key_parts); } index->index_lsn = index_lsn; index->key_parts = key_parts_copy; index->key_part_count = key_part_count; index->is_dropped = false; index->dump_lsn = -1; index->truncate_count = 0; /* * Add the index to the LSN hash. */ h = recovery->index_lsn_hash; node.key = index_lsn; node.val = index; if (mh_i64ptr_find(h, index_lsn, NULL) != mh_end(h)) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Duplicate index id %lld", (long long)index_lsn)); return -1; } if (mh_i64ptr_put(h, &node, NULL, NULL) == mh_end(h)) { diag_set(OutOfMemory, 0, "mh_i64ptr_put", "mh_i64ptr_node_t"); return -1; } return 0; } /** * Handle a VY_LOG_DROP_INDEX log record. * This function marks the vinyl index with ID @index_lsn as dropped. * All ranges and runs of the index must have been deleted by now. * Returns 0 on success, -1 if ID not found or index is already marked. */ static int vy_recovery_drop_index(struct vy_recovery *recovery, int64_t index_lsn) { struct vy_index_recovery_info *index; index = vy_recovery_lookup_index_by_lsn(recovery, index_lsn); if (index == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Index %lld deleted but not registered", (long long)index_lsn)); return -1; } if (index->is_dropped) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Index %lld deleted twice", (long long)index_lsn)); return -1; } if (!rlist_empty(&index->ranges)) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Dropped index %lld has ranges", (long long)index_lsn)); return -1; } struct vy_run_recovery_info *run; rlist_foreach_entry(run, &index->runs, in_index) { if (!run->is_dropped && !run->is_incomplete) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Dropped index %lld has active " "runs", (long long)index_lsn)); return -1; } } index->is_dropped = true; return 0; } /** * Handle a VY_LOG_DUMP_INDEX log record. * This function updates LSN of the last dump of the vinyl index * with ID @index_lsn. * Returns 0 on success, -1 if ID not found or index is dropped. */ static int vy_recovery_dump_index(struct vy_recovery *recovery, int64_t index_lsn, int64_t dump_lsn) { struct vy_index_recovery_info *index; index = vy_recovery_lookup_index_by_lsn(recovery, index_lsn); if (index == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Dump of unregistered index %lld", (long long)index_lsn)); return -1; } if (index->is_dropped) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Dump of deleted index %lld", (long long)index_lsn)); return -1; } index->dump_lsn = dump_lsn; return 0; } /** * Handle a VY_LOG_TRUNCATE_INDEX log record. * This function updates truncate_count of the index with ID @index_lsn. * Returns 0 on success, -1 if ID not found or index is dropped. */ static int vy_recovery_truncate_index(struct vy_recovery *recovery, int64_t index_lsn, int64_t truncate_count) { struct vy_index_recovery_info *index; index = vy_recovery_lookup_index_by_lsn(recovery, index_lsn); if (index == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Truncation of unregistered index %lld", (long long)index_lsn)); return -1; } if (index->is_dropped) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Truncation of deleted index %lld", (long long)index_lsn)); return -1; } index->truncate_count = truncate_count; return 0; } /** * Allocate a vinyl run with ID @run_id and insert it to the hash. * Return the new run on success, NULL on OOM. */ static struct vy_run_recovery_info * vy_recovery_do_create_run(struct vy_recovery *recovery, int64_t run_id) { struct vy_run_recovery_info *run = malloc(sizeof(*run)); if (run == NULL) { diag_set(OutOfMemory, sizeof(*run), "malloc", "struct vy_run_recovery_info"); return NULL; } struct mh_i64ptr_t *h = recovery->run_hash; struct mh_i64ptr_node_t node = { run_id, run }; struct mh_i64ptr_node_t *old_node = NULL; if (mh_i64ptr_put(h, &node, &old_node, NULL) == mh_end(h)) { diag_set(OutOfMemory, 0, "mh_i64ptr_put", "mh_i64ptr_node_t"); free(run); return NULL; } assert(old_node == NULL); run->id = run_id; run->dump_lsn = -1; run->gc_lsn = -1; run->is_incomplete = false; run->is_dropped = false; rlist_create(&run->in_index); if (recovery->max_id < run_id) recovery->max_id = run_id; return run; } /** * Handle a VY_LOG_PREPARE_RUN log record. * This function creates a new incomplete vinyl run with ID @run_id * and adds it to the list of runs of the index with ID @index_lsn. * Return 0 on success, -1 if run already exists, index not found, * or OOM. */ static int vy_recovery_prepare_run(struct vy_recovery *recovery, int64_t index_lsn, int64_t run_id) { struct vy_index_recovery_info *index; index = vy_recovery_lookup_index_by_lsn(recovery, index_lsn); if (index == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Run %lld created for unregistered " "index %lld", (long long)run_id, (long long)index_lsn)); return -1; } if (vy_recovery_lookup_run(recovery, run_id) != NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Duplicate run id %lld", (long long)run_id)); return -1; } struct vy_run_recovery_info *run; run = vy_recovery_do_create_run(recovery, run_id); if (run == NULL) return -1; run->is_incomplete = true; rlist_add_entry(&index->runs, run, in_index); return 0; } /** * Handle a VY_LOG_CREATE_RUN log record. * This function adds the vinyl run with ID @run_id to the list * of runs of the index with ID @index_lsn and marks it committed. * If the run does not exist, it will be created. * Return 0 on success, -1 if index not found, run or index * is dropped, or OOM. */ static int vy_recovery_create_run(struct vy_recovery *recovery, int64_t index_lsn, int64_t run_id, int64_t dump_lsn) { struct vy_index_recovery_info *index; index = vy_recovery_lookup_index_by_lsn(recovery, index_lsn); if (index == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Run %lld created for unregistered " "index %lld", (long long)run_id, (long long)index_lsn)); return -1; } if (index->is_dropped) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Run %lld created for deleted " "index %lld", (long long)run_id, (long long)index_lsn)); return -1; } struct vy_run_recovery_info *run; run = vy_recovery_lookup_run(recovery, run_id); if (run != NULL && run->is_dropped) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Run %lld committed after deletion", (long long)run_id)); return -1; } if (run == NULL) { run = vy_recovery_do_create_run(recovery, run_id); if (run == NULL) return -1; } run->dump_lsn = dump_lsn; run->is_incomplete = false; rlist_move_entry(&index->runs, run, in_index); return 0; } /** * Handle a VY_LOG_DROP_RUN log record. * This function marks the vinyl run with ID @run_id as deleted. * Note, the run is not removed from the recovery context until it is * "forgotten", because it is still needed for garbage collection. * Return 0 on success, -1 if run not found or already deleted. */ static int vy_recovery_drop_run(struct vy_recovery *recovery, int64_t run_id, int64_t gc_lsn) { struct vy_run_recovery_info *run; run = vy_recovery_lookup_run(recovery, run_id); if (run == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Run %lld deleted but not registered", (long long)run_id)); return -1; } if (run->is_dropped) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Run %lld deleted twice", (long long)run_id)); return -1; } run->is_dropped = true; run->gc_lsn = gc_lsn; return 0; } /** * Handle a VY_LOG_FORGET_RUN log record. * This function frees the vinyl run with ID @run_id. * Return 0 on success, -1 if run not found. */ static int vy_recovery_forget_run(struct vy_recovery *recovery, int64_t run_id) { struct mh_i64ptr_t *h = recovery->run_hash; mh_int_t k = mh_i64ptr_find(h, run_id, NULL); if (k == mh_end(h)) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Run %lld forgotten but not registered", (long long)run_id)); return -1; } struct vy_run_recovery_info *run = mh_i64ptr_node(h, k)->val; mh_i64ptr_del(h, k, NULL); rlist_del_entry(run, in_index); free(run); return 0; } /** * Handle a VY_LOG_INSERT_RANGE log record. * This function allocates a new vinyl range with ID @range_id, * inserts it to the hash, and adds it to the list of ranges of the * index with ID @index_lsn. * Return 0 on success, -1 on failure (ID collision or OOM). */ static int vy_recovery_insert_range(struct vy_recovery *recovery, int64_t index_lsn, int64_t range_id, const char *begin, const char *end) { if (vy_recovery_lookup_range(recovery, range_id) != NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Duplicate range id %lld", (long long)range_id)); return -1; } struct vy_index_recovery_info *index; index = vy_recovery_lookup_index_by_lsn(recovery, index_lsn); if (index == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Range %lld created for unregistered " "index %lld", (long long)range_id, (long long)index_lsn)); return -1; } size_t size = sizeof(struct vy_range_recovery_info); const char *data; data = begin; if (data != NULL) mp_next(&data); size_t begin_size = data - begin; size += begin_size; data = end; if (data != NULL) mp_next(&data); size_t end_size = data - end; size += end_size; struct vy_range_recovery_info *range = malloc(size); if (range == NULL) { diag_set(OutOfMemory, size, "malloc", "struct vy_range_recovery_info"); return -1; } struct mh_i64ptr_t *h = recovery->range_hash; struct mh_i64ptr_node_t node = { range_id, range }; if (mh_i64ptr_put(h, &node, NULL, NULL) == mh_end(h)) { diag_set(OutOfMemory, 0, "mh_i64ptr_put", "mh_i64ptr_node_t"); free(range); return -1; } range->id = range_id; if (begin != NULL) { range->begin = (void *)range + sizeof(*range); memcpy(range->begin, begin, begin_size); } else range->begin = NULL; if (end != NULL) { range->end = (void *)range + sizeof(*range) + begin_size; memcpy(range->end, end, end_size); } else range->end = NULL; rlist_create(&range->slices); rlist_add_entry(&index->ranges, range, in_index); if (recovery->max_id < range_id) recovery->max_id = range_id; return 0; } /** * Handle a VY_LOG_DELETE_RANGE log record. * This function frees the vinyl range with ID @range_id. * All slices of the range must have been deleted by now. * Return 0 on success, -1 if range not found. */ static int vy_recovery_delete_range(struct vy_recovery *recovery, int64_t range_id) { struct mh_i64ptr_t *h = recovery->range_hash; mh_int_t k = mh_i64ptr_find(h, range_id, NULL); if (k == mh_end(h)) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Range %lld deleted but not registered", (long long)range_id)); return -1; } struct vy_range_recovery_info *range = mh_i64ptr_node(h, k)->val; if (!rlist_empty(&range->slices)) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Deleted range %lld has run slices", (long long)range_id)); return -1; } mh_i64ptr_del(h, k, NULL); rlist_del_entry(range, in_index); free(range); return 0; } /** * Handle a VY_LOG_INSERT_SLICE log record. * This function allocates a new slice with ID @slice_id for * the run with ID @run_id, inserts it into the hash, and adds * it to the list of slices of the range with ID @range_id. * Return 0 on success, -1 on failure (ID collision or OOM). */ static int vy_recovery_insert_slice(struct vy_recovery *recovery, int64_t range_id, int64_t run_id, int64_t slice_id, const char *begin, const char *end) { if (vy_recovery_lookup_slice(recovery, slice_id) != NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Duplicate slice id %lld", (long long)slice_id)); return -1; } struct vy_range_recovery_info *range; range = vy_recovery_lookup_range(recovery, range_id); if (range == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Slice %lld created for unregistered " "range %lld", (long long)slice_id, (long long)range_id)); return -1; } struct vy_run_recovery_info *run; run = vy_recovery_lookup_run(recovery, run_id); if (run == NULL) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Slice %lld created for unregistered " "run %lld", (long long)slice_id, (long long)run_id)); return -1; } size_t size = sizeof(struct vy_slice_recovery_info); const char *data; data = begin; if (data != NULL) mp_next(&data); size_t begin_size = data - begin; size += begin_size; data = end; if (data != NULL) mp_next(&data); size_t end_size = data - end; size += end_size; struct vy_slice_recovery_info *slice = malloc(size); if (slice == NULL) { diag_set(OutOfMemory, size, "malloc", "struct vy_slice_recovery_info"); return -1; } struct mh_i64ptr_t *h = recovery->slice_hash; struct mh_i64ptr_node_t node = { slice_id, slice }; if (mh_i64ptr_put(h, &node, NULL, NULL) == mh_end(h)) { diag_set(OutOfMemory, 0, "mh_i64ptr_put", "mh_i64ptr_node_t"); free(slice); return -1; } slice->id = slice_id; slice->run = run; if (begin != NULL) { slice->begin = (void *)slice + sizeof(*slice); memcpy(slice->begin, begin, begin_size); } else slice->begin = NULL; if (end != NULL) { slice->end = (void *)slice + sizeof(*slice) + begin_size; memcpy(slice->end, end, end_size); } else slice->end = NULL; /* * If dump races with compaction, an older slice created by * compaction may be added after a newer slice created by * dump. Make sure that the list stays sorted by LSN in any * case. */ struct vy_slice_recovery_info *next_slice; rlist_foreach_entry(next_slice, &range->slices, in_range) { if (next_slice->run->dump_lsn < slice->run->dump_lsn) break; } rlist_add_tail(&next_slice->in_range, &slice->in_range); if (recovery->max_id < slice_id) recovery->max_id = slice_id; return 0; } /** * Handle a VY_LOG_DELETE_SLICE log record. * This function frees the vinyl slice with ID @slice_id. * Return 0 on success, -1 if slice not found. */ static int vy_recovery_delete_slice(struct vy_recovery *recovery, int64_t slice_id) { struct mh_i64ptr_t *h = recovery->slice_hash; mh_int_t k = mh_i64ptr_find(h, slice_id, NULL); if (k == mh_end(h)) { diag_set(ClientError, ER_INVALID_VYLOG_FILE, tt_sprintf("Slice %lld deleted but not registered", (long long)slice_id)); return -1; } struct vy_slice_recovery_info *slice = mh_i64ptr_node(h, k)->val; mh_i64ptr_del(h, k, NULL); rlist_del_entry(slice, in_range); free(slice); return 0; } /** * Update a recovery context with a new log record. * Return 0 on success, -1 on failure. * * The purpose of this function is to restore the latest consistent * view of the system by replaying the metadata log. */ static int vy_recovery_process_record(struct vy_recovery *recovery, const struct vy_log_record *record) { int rc; switch (record->type) { case VY_LOG_CREATE_INDEX: rc = vy_recovery_create_index(recovery, record->index_lsn, record->index_id, record->space_id, record->key_parts, record->key_part_count); break; case VY_LOG_DROP_INDEX: rc = vy_recovery_drop_index(recovery, record->index_lsn); break; case VY_LOG_INSERT_RANGE: rc = vy_recovery_insert_range(recovery, record->index_lsn, record->range_id, record->begin, record->end); break; case VY_LOG_DELETE_RANGE: rc = vy_recovery_delete_range(recovery, record->range_id); break; case VY_LOG_PREPARE_RUN: rc = vy_recovery_prepare_run(recovery, record->index_lsn, record->run_id); break; case VY_LOG_CREATE_RUN: rc = vy_recovery_create_run(recovery, record->index_lsn, record->run_id, record->dump_lsn); break; case VY_LOG_DROP_RUN: rc = vy_recovery_drop_run(recovery, record->run_id, record->gc_lsn); break; case VY_LOG_FORGET_RUN: rc = vy_recovery_forget_run(recovery, record->run_id); break; case VY_LOG_INSERT_SLICE: rc = vy_recovery_insert_slice(recovery, record->range_id, record->run_id, record->slice_id, record->begin, record->end); break; case VY_LOG_DELETE_SLICE: rc = vy_recovery_delete_slice(recovery, record->slice_id); break; case VY_LOG_DUMP_INDEX: rc = vy_recovery_dump_index(recovery, record->index_lsn, record->dump_lsn); break; case VY_LOG_TRUNCATE_INDEX: rc = vy_recovery_truncate_index(recovery, record->index_lsn, record->truncate_count); break; default: unreachable(); } if (rc != 0) say_error("failed to process vylog record: %s", vy_log_record_str(record)); return rc; } static ssize_t vy_recovery_new_f(va_list ap) { int64_t signature = va_arg(ap, int64_t); bool only_checkpoint = va_arg(ap, int); struct vy_recovery **p_recovery = va_arg(ap, struct vy_recovery **); say_verbose("loading vylog %lld", (long long)signature); struct vy_recovery *recovery = malloc(sizeof(*recovery)); if (recovery == NULL) { diag_set(OutOfMemory, sizeof(*recovery), "malloc", "struct vy_recovery"); goto fail; } recovery->index_id_hash = NULL; recovery->index_lsn_hash = NULL; recovery->range_hash = NULL; recovery->run_hash = NULL; recovery->slice_hash = NULL; recovery->max_id = -1; recovery->index_id_hash = mh_i64ptr_new(); recovery->index_lsn_hash = mh_i64ptr_new(); recovery->range_hash = mh_i64ptr_new(); recovery->run_hash = mh_i64ptr_new(); recovery->slice_hash = mh_i64ptr_new(); if (recovery->index_id_hash == NULL || recovery->index_lsn_hash == NULL || recovery->range_hash == NULL || recovery->run_hash == NULL || recovery->slice_hash == NULL) { diag_set(OutOfMemory, 0, "mh_i64ptr_new", "mh_i64ptr_t"); goto fail_free; } /* * We don't create a log file if there are no objects to * be stored in it, so if the log doesn't exist, assume * the recovery context is empty. */ const char *path = vy_log_filename(signature); if (access(path, F_OK) < 0 && errno == ENOENT) goto out; struct xlog_cursor cursor; if (xdir_open_cursor(&vy_log.dir, signature, &cursor) < 0) goto fail_free; int rc; struct xrow_header row; while ((rc = xlog_cursor_next(&cursor, &row, false)) == 0) { struct vy_log_record record; rc = vy_log_record_decode(&record, &row); if (rc < 0) break; say_verbose("load vylog record: %s", vy_log_record_str(&record)); if (record.type == VY_LOG_SNAPSHOT) { if (only_checkpoint) break; continue; } rc = vy_recovery_process_record(recovery, &record); if (rc < 0) break; fiber_gc(); } fiber_gc(); if (rc < 0) goto fail_close; xlog_cursor_close(&cursor, false); out: say_verbose("done loading vylog"); *p_recovery = recovery; return 0; fail_close: xlog_cursor_close(&cursor, false); fail_free: vy_recovery_delete(recovery); fail: return -1; } /** * Load the metadata log and return a recovery context. * Must be called with the log latch held. */ static struct vy_recovery * vy_recovery_new_locked(int64_t signature, bool only_checkpoint) { int rc; struct vy_recovery *recovery; assert(latch_owner(&vy_log.latch) == fiber()); /* * Before proceeding to log recovery, make sure that all * pending records have been flushed out. */ rc = vy_log_flush(); if (rc != 0) { diag_log(); say_error("failed to flush vylog for recovery"); return NULL; } /* Load the log from coio so as not to stall tx thread. */ rc = coio_call(vy_recovery_new_f, signature, (int)only_checkpoint, &recovery); if (rc != 0) { diag_log(); say_error("failed to load `%s'", vy_log_filename(signature)); return NULL; } return recovery; } struct vy_recovery * vy_recovery_new(int64_t signature, bool only_checkpoint) { /* Lock out concurrent writers while we are loading the log. */ latch_lock(&vy_log.latch); struct vy_recovery *recovery; recovery = vy_recovery_new_locked(signature, only_checkpoint); latch_unlock(&vy_log.latch); return recovery; } /** Helper to delete mh_i64ptr_t along with all its records. */ static void vy_recovery_delete_hash(struct mh_i64ptr_t *h) { mh_int_t i; mh_foreach(h, i) free(mh_i64ptr_node(h, i)->val); mh_i64ptr_delete(h); } void vy_recovery_delete(struct vy_recovery *recovery) { if (recovery->index_id_hash != NULL) { mh_int_t i; mh_foreach(recovery->index_id_hash, i) { struct vy_index_recovery_info *index; index = mh_i64ptr_node(recovery->index_id_hash, i)->val; free(index->key_parts); free(index); } mh_i64ptr_delete(recovery->index_id_hash); } if (recovery->index_lsn_hash != NULL) { /* Hash entries were deleted along with index_id_hash. */ mh_i64ptr_delete(recovery->index_lsn_hash); } if (recovery->range_hash != NULL) vy_recovery_delete_hash(recovery->range_hash); if (recovery->run_hash != NULL) vy_recovery_delete_hash(recovery->run_hash); if (recovery->slice_hash != NULL) vy_recovery_delete_hash(recovery->slice_hash); TRASH(recovery); free(recovery); } static int vy_recovery_iterate_index(struct vy_index_recovery_info *index, vy_recovery_cb cb, void *cb_arg) { struct vy_range_recovery_info *range; struct vy_slice_recovery_info *slice; struct vy_run_recovery_info *run; struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_CREATE_INDEX; record.index_lsn = index->index_lsn; record.index_id = index->index_id; record.space_id = index->space_id; record.key_parts = index->key_parts; record.key_part_count = index->key_part_count; if (cb(&record, cb_arg) != 0) return -1; if (index->truncate_count > 0) { vy_log_record_init(&record); record.type = VY_LOG_TRUNCATE_INDEX; record.index_lsn = index->index_lsn; record.truncate_count = index->truncate_count; if (cb(&record, cb_arg) != 0) return -1; } if (index->dump_lsn >= 0) { vy_log_record_init(&record); record.type = VY_LOG_DUMP_INDEX; record.index_lsn = index->index_lsn; record.dump_lsn = index->dump_lsn; if (cb(&record, cb_arg) != 0) return -1; } rlist_foreach_entry(run, &index->runs, in_index) { vy_log_record_init(&record); if (run->is_incomplete) { record.type = VY_LOG_PREPARE_RUN; } else { record.type = VY_LOG_CREATE_RUN; record.dump_lsn = run->dump_lsn; } record.index_lsn = index->index_lsn; record.run_id = run->id; record.is_dropped = run->is_dropped; if (cb(&record, cb_arg) != 0) return -1; if (!run->is_dropped) continue; vy_log_record_init(&record); record.type = VY_LOG_DROP_RUN; record.run_id = run->id; record.gc_lsn = run->gc_lsn; if (cb(&record, cb_arg) != 0) return -1; } rlist_foreach_entry(range, &index->ranges, in_index) { vy_log_record_init(&record); record.type = VY_LOG_INSERT_RANGE; record.index_lsn = index->index_lsn; record.range_id = range->id; record.begin = range->begin; record.end = range->end; if (cb(&record, cb_arg) != 0) return -1; /* * Newer slices are stored closer to the head of the list, * while we are supposed to return slices in chronological * order, so use reverse iterator. */ rlist_foreach_entry_reverse(slice, &range->slices, in_range) { vy_log_record_init(&record); record.type = VY_LOG_INSERT_SLICE; record.range_id = range->id; record.slice_id = slice->id; record.run_id = slice->run->id; record.begin = slice->begin; record.end = slice->end; if (cb(&record, cb_arg) != 0) return -1; } } if (index->is_dropped) { vy_log_record_init(&record); record.type = VY_LOG_DROP_INDEX; record.index_lsn = index->index_lsn; if (cb(&record, cb_arg) != 0) return -1; } return 0; } int vy_recovery_iterate(struct vy_recovery *recovery, vy_recovery_cb cb, void *cb_arg) { mh_int_t i; mh_foreach(recovery->index_id_hash, i) { struct vy_index_recovery_info *index; index = mh_i64ptr_node(recovery->index_id_hash, i)->val; /* * Purge dropped indexes that are not referenced by runs * (and thus not needed for garbage collection) from the * log on rotation. */ if (index->is_dropped && rlist_empty(&index->runs)) continue; if (vy_recovery_iterate_index(index, cb, cb_arg) < 0) return -1; } return 0; } int vy_recovery_load_index(struct vy_recovery *recovery, uint32_t space_id, uint32_t index_id, int64_t index_lsn, bool is_checkpoint_recovery, vy_recovery_cb cb, void *cb_arg) { struct vy_index_recovery_info *index; index = vy_recovery_lookup_index_by_id(recovery, space_id, index_id); if (index == NULL) return 0; /* See the comment to the function declaration. */ if (index_lsn < index->index_lsn) { /* * Loading a past incarnation of the index. * Emit create/drop records to indicate that * it is going to be dropped by a WAL statement * and hence doesn't need to be recovered. */ struct vy_log_record record; vy_log_record_init(&record); record.type = VY_LOG_CREATE_INDEX; record.index_id = index->index_id; record.space_id = index->space_id; record.index_lsn = index_lsn; if (cb(&record, cb_arg) != 0) return -1; vy_log_record_init(&record); record.type = VY_LOG_DROP_INDEX; record.index_lsn = index_lsn; if (cb(&record, cb_arg) != 0) return -1; return 0; } else if (is_checkpoint_recovery || index_lsn == index->index_lsn) { /* * Loading the last incarnation of the index. * Replay all records we have recovered from * the log for this index. */ return vy_recovery_iterate_index(index, cb, cb_arg); } else { /* * The requested incarnation is missing in the recovery * context, because we failed to log it before restart. * Do nothing and let the caller retry logging. */ assert(!is_checkpoint_recovery); assert(index_lsn > index->index_lsn); return 0; } } tarantool_1.9.1.26.g63eb81e3c/src/box/xrow_io.h0000664000000000000000000000360113306560010017416 0ustar rootroot#ifndef TARANTOOL_XROW_IO_H_INCLUDED #define TARANTOOL_XROW_IO_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif struct ev_io; struct ibuf; struct xrow_header; void coio_read_xrow(struct ev_io *coio, struct ibuf *in, struct xrow_header *row); void coio_read_xrow_timeout_xc(struct ev_io *coio, struct ibuf *in, struct xrow_header *row, double timeout); void coio_write_xrow(struct ev_io *coio, const struct xrow_header *row); #if defined(__cplusplus) } /* extern "C" */ #endif #endif /* TARANTOOL_XROW_IO_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_update.c0000664000000000000000000011116213306560010020420 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "tuple_update.h" #include #include #include "say.h" #include "error.h" #include "diag.h" #include "trivia/util.h" #include "third_party/queue.h" #include #include #include #include "column_mask.h" /** UPDATE request implementation. * UPDATE request is represented by a sequence of operations, each * working with a single field. There also are operations which * add or remove fields. Only one operation on the same field * is allowed. * * Supported field change operations are: SET, ADD, SUBTRACT; * bitwise AND, XOR and OR; SPLICE. * * Supported tuple change operations are: SET, DELETE, INSERT, * PUSH and POP. * If the number of fields in a tuple is altered by an operation, * field index of all following operations is evaluated against the * new tuple. * * Despite the allowed complexity, a typical use case for UPDATE * is when the operation count is much less than field count in * a tuple. * * With the common case in mind, UPDATE tries to minimize * the amount of unnecessary temporary tuple copies. * * First, operations are parsed and initialized. Then, the * resulting tuple length is calculated. A new tuple is allocated. * Finally, operations are applied sequentially, each copying data * from the old tuple to the new tuple. * * With this approach, cost of UPDATE is proportional to O(tuple * length) + O(C * log C), where C is the number of operations in * the request, and data is copied from the old tuple to the new * one only once. * * As long as INSERT, DELETE, PUSH and POP change the relative * field order, an auxiliary data structure is necessary to look * up fields in the "old" tuple by field number. Such field * index is built on demand, using "rope" data structure. * * A rope is a binary tree designed to store long strings built * from pieces. Each tree node points to a substring of a large * string. In our case, each rope node points at a range of * fields, initially in the old tuple, and then, as fields are * added and deleted by UPDATE, in the "current" tuple. * Note, that the tuple itself is not materialized: when * operations which affect field count are initialized, the rope * is updated to reflect the new field order. * In particular, if a field is deleted by an operation, * it disappears from the rope and all subsequent operations * on this field number instead affect the field following the * deleted one. */ /** Update internal state */ struct tuple_update { tuple_update_alloc_func alloc; void *alloc_ctx; struct rope *rope; struct update_op *ops; uint32_t op_count; int index_base; /* 0 for C and 1 for Lua */ /** A bitmask of all columns modified by this update */ uint64_t column_mask; }; /** Argument of SET (and INSERT) operation. */ struct op_set_arg { uint32_t length; const char *value; }; /** Argument of DELETE operation. */ struct op_del_arg { uint32_t count; }; /** * MsgPack format code of an arithmetic argument or result. * MsgPack codes are not used to simplify type calculation. */ enum arith_type { AT_DOUBLE = 0, /* MP_DOUBLE */ AT_FLOAT = 1, /* MP_FLOAT */ AT_INT = 2 /* MP_INT/MP_UINT */ }; /** * Argument (left and right) and result of ADD, SUBTRACT. * * To perform an arithmetic operation, update first loads * left and right arguments into corresponding value objects, * then performs arithmetics on types of arguments, thus * calculating the type of the result, and then * performs the requested operation according to the calculated * type rules. * * The rules are as follows: * - when one of the argument types is double, the result is * double * - when one of the argument types is float, the result is * float * - for integer arguments, the result type code depends on * the range in which falls the result of the operation. * If the result is in negative range, it's MP_INT, otherwise * it's MP_UINT. If the result is out of bounds of (-2^63, * 2^64), and exception is raised for overflow. */ struct op_arith_arg { enum arith_type type; union { double dbl; float flt; struct int96_num int96; }; }; /** Argument of AND, XOR, OR operations. */ struct op_bit_arg { uint64_t val; }; /** Argument of SPLICE. */ struct op_splice_arg { int32_t offset; /** splice position */ int32_t cut_length; /** cut this many bytes. */ const char *paste; /** paste what? */ uint32_t paste_length; /** paste this many bytes. */ /** Offset of the tail in the old field */ int32_t tail_offset; /** Size of the tail. */ int32_t tail_length; }; union update_op_arg { struct op_set_arg set; struct op_del_arg del; struct op_arith_arg arith; struct op_bit_arg bit; struct op_splice_arg splice; }; struct update_field; struct update_op; typedef int (*do_op_func)(struct tuple_update *update, struct update_op *op); typedef int (*read_arg_func)(int index_base, struct update_op *op, const char **expr); typedef void (*store_op_func)(union update_op_arg *arg, const char *in, char *out); /** A set of functions and properties to initialize and do an op. */ struct update_op_meta { read_arg_func read_arg; do_op_func do_op; store_op_func store; /* Argument count */ uint32_t args; }; /** A single UPDATE operation. */ struct update_op { const struct update_op_meta *meta; union update_op_arg arg; /* Subject field no. */ int32_t field_no; uint32_t new_field_len; uint8_t opcode; }; /** * We can have more than one operation on the same field. * A descriptor of one changed field. */ struct update_field { /** UPDATE operation against the first field in the range. */ struct update_op *op; /** Points at start of field *data* in the old tuple. */ const char *old; /** End of the old field. */ const char *tail; /** * Length of the "tail" in the old tuple from end * of old data to the beginning of the field in the * next update_field structure. */ uint32_t tail_len; }; static void update_field_init(struct update_field *field, const char *old, uint32_t old_len, uint32_t tail_len) { field->op = NULL; field->old = old; field->tail = old + old_len; field->tail_len = tail_len; } /* {{{ read_arg helpers */ /** Read a field index or any other integer field. */ static inline int mp_read_i32(int index_base, struct update_op *op, const char **expr, int32_t *ret) { if (mp_read_int32(expr, ret) == 0) return 0; diag_set(ClientError, ER_UPDATE_ARG_TYPE, (char)op->opcode, index_base + op->field_no, "an integer"); return -1; } static inline int mp_read_uint(int index_base, struct update_op *op, const char **expr, uint64_t *ret) { if (mp_typeof(**expr) == MP_UINT) { *ret = mp_decode_uint(expr); return 0; } else { diag_set(ClientError, ER_UPDATE_ARG_TYPE, (char)op->opcode, index_base + op->field_no, "a positive integer"); return -1; } } /** * Load an argument of an arithmetic operation either from tuple * or from the UPDATE command. */ static inline int mp_read_arith_arg(int index_base, struct update_op *op, const char **expr, struct op_arith_arg *ret) { if (mp_typeof(**expr) == MP_UINT) { ret->type = AT_INT; int96_set_unsigned(&ret->int96, mp_decode_uint(expr)); } else if (mp_typeof(**expr) == MP_INT) { ret->type = AT_INT; int96_set_signed(&ret->int96, mp_decode_int(expr)); } else if (mp_typeof(**expr) == MP_DOUBLE) { ret->type = AT_DOUBLE; ret->dbl = mp_decode_double(expr); } else if (mp_typeof(**expr) == MP_FLOAT) { ret->type = AT_FLOAT; ret->flt = mp_decode_float(expr); } else { diag_set(ClientError, ER_UPDATE_ARG_TYPE, (char)op->opcode, index_base + op->field_no, "a number"); return -1; } return 0; } static inline int mp_read_str(int index_base, struct update_op *op, const char **expr, uint32_t *len, const char **ret) { if (mp_typeof(**expr) != MP_STR) { diag_set(ClientError, ER_UPDATE_ARG_TYPE, (char) op->opcode, index_base + op->field_no, "a string"); return -1; } *ret = mp_decode_str(expr, len); /* value */ return 0; } /* }}} read_arg helpers */ /* {{{ read_arg */ static int read_arg_set(int index_base, struct update_op *op, const char **expr) { (void)index_base; op->arg.set.value = *expr; mp_next(expr); op->arg.set.length = (uint32_t) (*expr - op->arg.set.value); return 0; } static int read_arg_insert(int index_base, struct update_op *op, const char **expr) { return read_arg_set(index_base, op, expr); } static int read_arg_delete(int index_base, struct update_op *op, const char **expr) { if (mp_typeof(**expr) == MP_UINT) { op->arg.del.count = (uint32_t) mp_decode_uint(expr); return 0; } else { diag_set(ClientError, ER_UPDATE_ARG_TYPE, (char)op->opcode, index_base + op->field_no, "a number of fields to delete"); return -1; } } static int read_arg_arith(int index_base, struct update_op *op, const char **expr) { return mp_read_arith_arg(index_base, op, expr, &op->arg.arith); } static int read_arg_bit(int index_base, struct update_op *op, const char **expr) { struct op_bit_arg *arg = &op->arg.bit; return mp_read_uint(index_base, op, expr, &arg->val); } static int read_arg_splice(int index_base, struct update_op *op, const char **expr) { struct op_splice_arg *arg = &op->arg.splice; if (mp_read_i32(index_base, op, expr, &arg->offset)) return -1; /* cut length */ if (mp_read_i32(index_base, op, expr, &arg->cut_length)) return -1; /* value */ return mp_read_str(index_base, op, expr, &arg->paste_length, &arg->paste); } /* }}} read_arg */ /* {{{ do_op helpers */ static inline int op_adjust_field_no(struct tuple_update *update, struct update_op *op, int32_t field_max) { if (op->field_no >= 0) { if (op->field_no < field_max) return 0; diag_set(ClientError, ER_NO_SUCH_FIELD, update->index_base + op->field_no); return -1; } else { if (op->field_no + field_max >= 0) { op->field_no += field_max; return 0; } diag_set(ClientError, ER_NO_SUCH_FIELD, op->field_no); return -1; } } static inline double cast_arith_arg_to_double(struct op_arith_arg arg) { if (arg.type == AT_DOUBLE) { return arg.dbl; } else if (arg.type == AT_FLOAT) { return arg.flt; } else { assert(arg.type == AT_INT); if (int96_is_uint64(&arg.int96)) { return int96_extract_uint64(&arg.int96); } else { assert(int96_is_neg_int64(&arg.int96)); return int96_extract_neg_int64(&arg.int96); } } } /** Return the MsgPack size of an arithmetic operation result. */ static inline uint32_t mp_sizeof_op_arith_arg(struct op_arith_arg arg) { if (arg.type == AT_INT) { if (int96_is_uint64(&arg.int96)) { uint64_t val = int96_extract_uint64(&arg.int96); return mp_sizeof_uint(val); } else { int64_t val = int96_extract_neg_int64(&arg.int96); return mp_sizeof_int(val); } } else if (arg.type == AT_DOUBLE) { return mp_sizeof_double(arg.dbl); } else { assert(arg.type == AT_FLOAT); return mp_sizeof_float(arg.flt); } } static inline int make_arith_operation(struct op_arith_arg arg1, struct op_arith_arg arg2, char opcode, uint32_t err_fieldno, struct op_arith_arg *ret) { enum arith_type lowest_type = arg1.type; if (arg1.type > arg2.type) lowest_type = arg2.type; if (lowest_type == AT_INT) { switch(opcode) { case '+': int96_add(&arg1.int96, &arg2.int96); break; case '-': int96_invert(&arg2.int96); int96_add(&arg1.int96, &arg2.int96); break; default: unreachable(); /* checked by update_read_ops */ break; } if (!int96_is_uint64(&arg1.int96) && !int96_is_neg_int64(&arg1.int96)) { diag_set(ClientError, ER_UPDATE_INTEGER_OVERFLOW, opcode, err_fieldno); return -1; } *ret = arg1; return 0; } else { /* At least one of operands is double or float */ double a = cast_arith_arg_to_double(arg1); double b = cast_arith_arg_to_double(arg2); double c; switch(opcode) { case '+': c = a + b; break; case '-': c = a - b; break; default: diag_set(ClientError, ER_UPDATE_ARG_TYPE, (char)opcode, err_fieldno, "a positive integer"); return -1; } if (lowest_type == AT_DOUBLE) { /* result is DOUBLE */ ret->type = AT_DOUBLE; ret->dbl = c; } else { /* result is FLOAT */ assert(lowest_type == AT_FLOAT); ret->type = AT_FLOAT; ret->flt = (float)c; } } return 0; } /* }}} do_op helpers */ /* {{{ do_op */ static int do_op_insert(struct tuple_update *update, struct update_op *op) { if (op_adjust_field_no(update, op, rope_size(update->rope) + 1)) return -1; struct update_field *field = (struct update_field *) update->alloc(update->alloc_ctx, sizeof(*field)); if (field == NULL) return -1; update_field_init(field, op->arg.set.value, op->arg.set.length, 0); return rope_insert(update->rope, op->field_no, field, 1); } static int do_op_set(struct tuple_update *update, struct update_op *op) { /* intepret '=' for n +1 field as insert */ if (op->field_no == (int32_t) rope_size(update->rope)) return do_op_insert(update, op); if (op_adjust_field_no(update, op, rope_size(update->rope))) return -1; struct update_field *field = (struct update_field *) rope_extract(update->rope, op->field_no); if (field == NULL) return -1; /* Ignore the previous op, if any. */ field->op = op; op->new_field_len = op->arg.set.length; return 0; } static int do_op_delete(struct tuple_update *update, struct update_op *op) { if (op_adjust_field_no(update, op, rope_size(update->rope))) return -1; uint32_t delete_count = op->arg.del.count; if ((uint64_t) op->field_no + delete_count > rope_size(update->rope)) delete_count = rope_size(update->rope) - op->field_no; if (delete_count == 0) { diag_set(ClientError, ER_UPDATE_FIELD, update->index_base + op->field_no, "cannot delete 0 fields"); return -1; } for (uint32_t u = 0; u < delete_count; u++) rope_erase(update->rope, op->field_no); return 0; } static int do_op_arith(struct tuple_update *update, struct update_op *op) { if (op_adjust_field_no(update, op, rope_size(update->rope))) return -1; struct update_field *field = (struct update_field *) rope_extract(update->rope, op->field_no); if (field == NULL) return -1; if (field->op) { diag_set(ClientError, ER_UPDATE_FIELD, update->index_base + op->field_no, "double update of the same field"); return -1; } const char *old = field->old; struct op_arith_arg left_arg; if (mp_read_arith_arg(update->index_base, op, &old, &left_arg)) return -1; struct op_arith_arg right_arg = op->arg.arith; if (make_arith_operation(left_arg, right_arg, op->opcode, update->index_base + op->field_no, &op->arg.arith)) return -1; field->op = op; op->new_field_len = mp_sizeof_op_arith_arg(op->arg.arith); return 0; } static int do_op_bit(struct tuple_update *update, struct update_op *op) { if (op_adjust_field_no(update, op, rope_size(update->rope))) return -1; struct update_field *field = (struct update_field *) rope_extract(update->rope, op->field_no); if (field == NULL) return -1; struct op_bit_arg *arg = &op->arg.bit; if (field->op) { diag_set(ClientError, ER_UPDATE_FIELD, update->index_base + op->field_no, "double update of the same field"); return -1; } const char *old = field->old; uint64_t val; if (mp_read_uint(update->index_base, op, &old, &val)) return -1; switch (op->opcode) { case '&': arg->val &= val; break; case '^': arg->val ^= val; break; case '|': arg->val |= val; break; default: unreachable(); /* checked by update_read_ops */ } field->op = op; op->new_field_len = mp_sizeof_uint(arg->val); return 0; } static int do_op_splice(struct tuple_update *update, struct update_op *op) { if (op_adjust_field_no(update, op, rope_size(update->rope))) return -1; struct update_field *field = (struct update_field *) rope_extract(update->rope, op->field_no); if (field == NULL) return -1; if (field->op) { diag_set(ClientError, ER_UPDATE_FIELD, update->index_base + op->field_no, "double update of the same field"); return -1; } struct op_splice_arg *arg = &op->arg.splice; const char *in = field->old; int32_t str_len; if (mp_read_str(update->index_base, op, &in, (uint32_t *) &str_len, &in)) return -1; if (arg->offset < 0) { if (-arg->offset > str_len + 1) { diag_set(ClientError, ER_SPLICE, update->index_base + op->field_no, "offset is out of bound"); return -1; } arg->offset = arg->offset + str_len + 1; } else if (arg->offset - update->index_base >= 0) { arg->offset -= update->index_base; if (arg->offset > str_len) arg->offset = str_len; } else /* (offset <= 0) */ { diag_set(ClientError, ER_SPLICE, update->index_base + op->field_no, "offset is out of bound"); return -1; } assert(arg->offset >= 0 && arg->offset <= str_len); if (arg->cut_length < 0) { if (-arg->cut_length > (str_len - arg->offset)) arg->cut_length = 0; else arg->cut_length += str_len - arg->offset; } else if (arg->cut_length > str_len - arg->offset) { arg->cut_length = str_len - arg->offset; } assert(arg->offset <= str_len); /* Fill tail part */ arg->tail_offset = arg->offset + arg->cut_length; arg->tail_length = str_len - arg->tail_offset; field->op = op; /* Record the new field length (maximal). */ op->new_field_len = mp_sizeof_str(arg->offset + arg->paste_length + arg->tail_length); return 0; } /* }}} do_op */ /* {{{ store_op */ static void store_op_set(struct op_set_arg *arg, const char *in, char *out) { (void)in; memcpy(out, arg->value, arg->length); } static void store_op_insert(struct op_set_arg *arg, const char *in, char *out) { (void)in; memcpy(out, arg->value, arg->length); } static void store_op_arith(struct op_arith_arg *arg, const char *in, char *out) { (void)in; if (arg->type == AT_INT) { if (int96_is_uint64(&arg->int96)) { mp_encode_uint(out, int96_extract_uint64(&arg->int96)); } else { assert(int96_is_neg_int64(&arg->int96)); mp_encode_int(out, int96_extract_neg_int64(&arg->int96)); } } else if (arg->type == AT_DOUBLE) { mp_encode_double(out, arg->dbl); } else { assert(arg->type == AT_FLOAT); mp_encode_float(out, arg->flt); } } static void store_op_bit(struct op_bit_arg *arg, const char *in, char *out) { (void)in; mp_encode_uint(out, arg->val); } static void store_op_splice(struct op_splice_arg *arg, const char *in, char *out) { uint32_t new_str_len = arg->offset + arg->paste_length + arg->tail_length; (void) mp_decode_strl(&in); out = mp_encode_strl(out, new_str_len); memcpy(out, in, arg->offset); /* copy field head. */ out = out + arg->offset; memcpy(out, arg->paste, arg->paste_length); /* copy the paste */ out = out + arg->paste_length; memcpy(out, in + arg->tail_offset, arg->tail_length); /* copy tail */ } /* }}} store_op */ static const struct update_op_meta op_set = { read_arg_set, do_op_set, (store_op_func) store_op_set, 3 }; static const struct update_op_meta op_insert = { read_arg_insert, do_op_insert, (store_op_func) store_op_insert, 3 }; static const struct update_op_meta op_arith = { read_arg_arith, do_op_arith, (store_op_func) store_op_arith, 3 }; static const struct update_op_meta op_bit = { read_arg_bit, do_op_bit, (store_op_func) store_op_bit, 3 }; static const struct update_op_meta op_splice = { read_arg_splice, do_op_splice, (store_op_func) store_op_splice, 5 }; static const struct update_op_meta op_delete = { read_arg_delete, do_op_delete, (store_op_func) NULL, 3 }; /** Split a range of fields in two, allocating update_field * context for the new range. */ static void * update_field_split(void *split_ctx, void *data, size_t size, size_t offset) { (void)size; struct tuple_update *update = (struct tuple_update *) split_ctx; struct update_field *prev = (struct update_field *) data; struct update_field *next = (struct update_field *) update->alloc(update->alloc_ctx, sizeof(*next)); if (next == NULL) return NULL; assert(offset > 0 && prev->tail_len > 0); const char *field = prev->tail; const char *end = field + prev->tail_len; for (uint32_t i = 1; i < offset; i++) { mp_next(&field); } prev->tail_len = field - prev->tail; const char *f = field; mp_next(&f); uint32_t field_len = f - field; update_field_init(next, field, field_len, end - field - field_len); return next; } /** Free rope node - do nothing, since we use a pool allocator. */ static void region_alloc_free_stub(void *ctx, void *mem) { (void) ctx; (void) mem; } /** * We found a tuple to do the update on. Prepare a rope * to perform operations on. * @param update Update meta. * @param tuple_data MessagePack array without the array header. * @param tuple_data_end End of the @tuple_data. * @param field_count Field count in @tuple_data. * * @retval 0 Success. * @retval -1 Error. */ int update_create_rope(struct tuple_update *update, const char *tuple_data, const char *tuple_data_end, uint32_t field_count) { update->rope = rope_new(update_field_split, update, update->alloc, region_alloc_free_stub, update->alloc_ctx); if (update->rope == NULL) return -1; /* Initialize the rope with the old tuple. */ struct update_field *first = (struct update_field *) update->alloc(update->alloc_ctx, sizeof(*first)); if (first == NULL) return -1; const char *field = tuple_data; const char *end = tuple_data_end; /* Add first field to rope */ mp_next(&tuple_data); uint32_t field_len = tuple_data - field; update_field_init(first, field, field_len, end - field - field_len); return rope_append(update->rope, first, field_count); } static uint32_t update_calc_tuple_length(struct tuple_update *update) { uint32_t res = mp_sizeof_array(rope_size(update->rope)); struct rope_iter it; struct rope_node *node; rope_iter_create(&it, update->rope); for (node = rope_iter_start(&it); node; node = rope_iter_next(&it)) { struct update_field *field = (struct update_field *) rope_leaf_data(node); uint32_t field_len = (field->op ? field->op->new_field_len : (uint32_t)(field->tail - field->old)); res += field_len + field->tail_len; } return res; } static uint32_t update_write_tuple(struct tuple_update *update, char *buffer, char *buffer_end) { char *new_data = buffer; new_data = mp_encode_array(new_data, rope_size(update->rope)); (void) buffer_end; uint32_t total_field_count = 0; struct rope_iter it; struct rope_node *node; rope_iter_create(&it, update->rope); for (node = rope_iter_start(&it); node; node = rope_iter_next(&it)) { struct update_field *field = (struct update_field *) rope_leaf_data(node); uint32_t field_count = rope_leaf_size(node); const char *old_field = field->old; struct update_op *op = field->op; if (op) { op->meta->store(&op->arg, old_field, new_data); new_data += op->new_field_len; } else { uint32_t field_len = field->tail - field->old; memcpy(new_data, old_field, field_len); new_data += field_len; } /* Copy tail_len from the old tuple. */ assert(field->tail_len == 0 || field_count > 1); if (field_count > 1) { memcpy(new_data, field->tail, field->tail_len); new_data += field->tail_len; } total_field_count += field_count; } assert(rope_size(update->rope) == total_field_count); assert(new_data <= buffer_end); return new_data - buffer; /* real_tuple_size */ } static const struct update_op_meta * update_op_by(char opcode) { switch (opcode) { case '=': return &op_set; case '+': case '-': return &op_arith; case '&': case '|': case '^': return &op_bit; case ':': return &op_splice; case '#': return &op_delete; case '!': return &op_insert; default: diag_set(ClientError, ER_UNKNOWN_UPDATE_OP); return NULL; } } /** * Read and check update operations and fill column mask. * * @param[out] update Update meta. * @param expr MessagePack array of operations. * @param expr_end End of the @expr. * @param field_count_hint Field count in the updated tuple. If * there is no tuple at hand (for example, when we are * reading UPSERT operations), then 0 for field count will * do as a hint: the only effect of a wrong hint is * a possibly incorrect column_mask. * A correct field count results in an accurate * column mask calculation. * * @retval 0 Success. * @retval -1 Error. */ static int update_read_ops(struct tuple_update *update, const char *expr, const char *expr_end, int32_t field_count_hint) { if (mp_typeof(*expr) != MP_ARRAY) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "update operations must be an " "array {{op,..}, {op,..}}"); return -1; } uint64_t column_mask = 0; /* number of operations */ update->op_count = mp_decode_array(&expr); if (update->op_count > BOX_UPDATE_OP_CNT_MAX) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "too many operations for update"); return -1; } /* Read update operations. */ update->ops = (struct update_op *) update->alloc(update->alloc_ctx, update->op_count * sizeof(struct update_op)); if (update->ops == NULL) return -1; struct update_op *op = update->ops; struct update_op *ops_end = op + update->op_count; for (; op < ops_end; op++) { if (mp_typeof(*expr) != MP_ARRAY) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "update operation" " must be an array {op,..}"); return -1; } /* Read operation */ uint32_t args, len; args = mp_decode_array(&expr); if (args < 1) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "update operation must be an " "array {op,..}, got empty array"); return -1; } if (mp_typeof(*expr) != MP_STR) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "update operation name must be a string"); return -1; } op->opcode = *mp_decode_str(&expr, &len); op->meta = update_op_by(op->opcode); if (op->meta == NULL) return -1; if (args != op->meta->args) { diag_set(ClientError, ER_UNKNOWN_UPDATE_OP); return -1; } if (mp_typeof(*expr) != MP_INT && mp_typeof(*expr) != MP_UINT) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "field id must be a number"); return -1; } int32_t field_no; if (mp_read_i32(update->index_base, op, &expr, &field_no)) return -1; if (field_no - update->index_base >= 0) { op->field_no = field_no - update->index_base; } else if (field_no < 0) { op->field_no = field_no; } else { diag_set(ClientError, ER_NO_SUCH_FIELD, field_no); return -1; } if (op->meta->read_arg(update->index_base, op, &expr)) return -1; /* * Continue collecting the changed columns * only if there are unset bits in the mask. */ if (column_mask != COLUMN_MASK_FULL) { if (op->field_no >= 0) field_no = op->field_no; else if (op->opcode != '!') field_no = field_count_hint + op->field_no; else /* * '!' with a negative number * inserts a new value after the * position, specified in the * field_no. Example: * tuple: [1, 2, 3] * * update1: {'#', -1, 1} * update2: {'!', -1, 4} * * result1: [1, 2, * ] * result2: [1, 2, 3, *4] * As you can see, both operations * have field_no -1, but '!' actually * creates a new field. So * set field_no to insert position + 1. */ field_no = field_count_hint + op->field_no + 1; /* * Here field_no can be < 0 only if update * operation encounters a negative field * number N and abs(N) > field_count_hint. * For example, the tuple is: {1, 2, 3}, * and the update operation is * {'#', -4, 1}. */ if (field_no < 0) { /* * Turn off column mask for this * incorrect UPDATE. */ column_mask_set_range(&column_mask, 0); continue; } /* * Update result statement's field count * hint. It is used to translate negative * field numbers into positive ones. */ if (op->opcode == '!') ++field_count_hint; else if (op->opcode == '#') field_count_hint -= (int32_t) op->arg.del.count; if (op->opcode == '!' || op->opcode == '#') /* * If the operation is insertion * or deletion then it potentially * changes a range of columns by * moving them, so need to set a * range of bits. */ column_mask_set_range(&column_mask, field_no); else column_mask_set_fieldno(&column_mask, field_no); } } /* Check the remainder length, the request must be fully read. */ if (expr != expr_end) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "can't unpack update operations"); return -1; } update->column_mask = column_mask; return 0; } /** * Apply update operations to the concrete tuple. * * @param update Update meta. * @param old_data MessagePack array of tuple fields without the * array header. * @param old_data_end End of the @old_data. * @param part_count Field count in the @old_data. * * @retval 0 Success. * @retval -1 Error. */ static int update_do_ops(struct tuple_update *update, const char *old_data, const char *old_data_end, uint32_t part_count) { if (update_create_rope(update, old_data, old_data_end, part_count) != 0) return -1; struct update_op *op = update->ops; struct update_op *ops_end = op + update->op_count; for (; op < ops_end; op++) { if (op->meta->do_op(update, op)) return -1; } return 0; } /* * Same as update_do_ops but for upsert. * @param suppress_error True, if an upsert error is not critical * and it is enough to simply write the error to the log. */ static int upsert_do_ops(struct tuple_update *update, const char *old_data, const char *old_data_end, uint32_t part_count, bool suppress_error) { if (update_create_rope(update, old_data, old_data_end, part_count) != 0) return -1; struct update_op *op = update->ops; struct update_op *ops_end = op + update->op_count; for (; op < ops_end; op++) { if (op->meta->do_op(update, op) == 0) continue; struct error *e = diag_last_error(diag_get()); if (e->type != &type_ClientError) return -1; if (!suppress_error) { say_error("UPSERT operation failed:"); error_log(e); } } return 0; } static void update_init(struct tuple_update *update, tuple_update_alloc_func alloc, void *alloc_ctx, int index_base) { memset(update, 0, sizeof(*update)); update->alloc = alloc; update->alloc_ctx = alloc_ctx; /* * Base field offset, e.g. 0 for C and 1 for Lua. Used only for * error messages. All fields numbers must be zero-based! */ update->index_base = index_base; } const char * update_finish(struct tuple_update *update, uint32_t *p_tuple_len) { uint32_t tuple_len = update_calc_tuple_length(update); char *buffer = (char *) update->alloc(update->alloc_ctx, tuple_len); if (buffer == NULL) return NULL; *p_tuple_len = update_write_tuple(update, buffer, buffer + tuple_len); return buffer; } int tuple_update_check_ops(tuple_update_alloc_func alloc, void *alloc_ctx, const char *expr, const char *expr_end, int index_base) { struct tuple_update update; update_init(&update, alloc, alloc_ctx, index_base); return update_read_ops(&update, expr, expr_end, 0); } const char * tuple_update_execute(tuple_update_alloc_func alloc, void *alloc_ctx, const char *expr,const char *expr_end, const char *old_data, const char *old_data_end, uint32_t *p_tuple_len, int index_base, uint64_t *column_mask) { struct tuple_update update; update_init(&update, alloc, alloc_ctx, index_base); uint32_t field_count = mp_decode_array(&old_data); if (update_read_ops(&update, expr, expr_end, field_count) != 0) return NULL; if (update_do_ops(&update, old_data, old_data_end, field_count)) return NULL; if (column_mask) *column_mask = update.column_mask; return update_finish(&update, p_tuple_len); } const char * tuple_upsert_execute(tuple_update_alloc_func alloc, void *alloc_ctx, const char *expr,const char *expr_end, const char *old_data, const char *old_data_end, uint32_t *p_tuple_len, int index_base, bool suppress_error, uint64_t *column_mask) { struct tuple_update update; update_init(&update, alloc, alloc_ctx, index_base); uint32_t field_count = mp_decode_array(&old_data); if (update_read_ops(&update, expr, expr_end, field_count) != 0) return NULL; if (upsert_do_ops(&update, old_data, old_data_end, field_count, suppress_error)) return NULL; if (column_mask) *column_mask = update.column_mask; return update_finish(&update, p_tuple_len); } const char * tuple_upsert_squash(tuple_update_alloc_func alloc, void *alloc_ctx, const char *expr1, const char *expr1_end, const char *expr2, const char *expr2_end, size_t *result_size, int index_base) { const char *expr[2] = {expr1, expr2}; const char *expr_end[2] = {expr1_end, expr2_end}; struct tuple_update update[2]; for (int j = 0; j < 2; j++) { update_init(&update[j], alloc, alloc_ctx, index_base); if (update_read_ops(&update[j], expr[j], expr_end[j], 0)) return NULL; mp_decode_array(&expr[j]); int32_t prev_field_no = index_base - 1; for (uint32_t i = 0; i < update[j].op_count; i++) { struct update_op *op = &update[j].ops[i]; if (op->opcode != '+' && op->opcode != '-' && op->opcode != '=') return NULL; if (op->field_no <= prev_field_no) return NULL; prev_field_no = op->field_no; } } size_t possible_size = expr1_end - expr1 + expr2_end - expr2; const uint32_t space_for_arr_tag = 5; char *buf = (char *)alloc(alloc_ctx, possible_size + space_for_arr_tag); if (buf == NULL) return NULL; /* reserve some space for mp array header */ char *res_ops = buf + space_for_arr_tag; uint32_t res_count = 0; /* number of resulting operations */ uint32_t op_count[2] = {update[0].op_count, update[1].op_count}; uint32_t op_no[2] = {0, 0}; while (op_no[0] < op_count[0] || op_no[1] < op_count[1]) { res_count++; struct update_op *op[2] = {update[0].ops + op_no[0], update[1].ops + op_no[1]}; /* * from: * 0 - take op from first update, * 1 - take op from second update, * 2 - merge both ops */ uint32_t from; uint32_t has[2] = {op_no[0] < op_count[0], op_no[1] < op_count[1]}; assert(has[0] || has[1]); if (has[0] && has[1]) { from = op[0]->field_no < op[1]->field_no ? 0 : op[0]->field_no > op[1]->field_no ? 1 : 2; } else { assert(has[0] != has[1]); from = has[1]; } if (from == 2 && op[1]->opcode == '=') { /* * If an operation from the second upsert is '=' * it is just overwrites any op from the first upsert. * So we just skip op from the first upsert and * copy op from the second */ mp_next(&expr[0]); op_no[0]++; from = 1; } if (from < 2) { /* take op from one of upserts */ const char *copy = expr[from]; mp_next(&expr[from]); size_t copy_size = expr[from] - copy; memcpy(res_ops, copy, copy_size); res_ops += copy_size; op_no[from]++; continue; } /* merge: apply second '+' or '-' */ assert(op[1]->opcode == '+' || op[1]->opcode == '-'); if (op[0]->opcode == '-') { op[0]->opcode = '+'; int96_invert(&op[0]->arg.arith.int96); } struct op_arith_arg res; if (make_arith_operation(op[0]->arg.arith, op[1]->arg.arith, op[1]->opcode, update[0].index_base + op[0]->field_no, &res)) return NULL; res_ops = mp_encode_array(res_ops, 3); res_ops = mp_encode_str(res_ops, (const char *)&op[0]->opcode, 1); res_ops = mp_encode_uint(res_ops, op[0]->field_no + update[0].index_base); store_op_arith(&res, NULL, res_ops); res_ops += mp_sizeof_op_arith_arg(res); mp_next(&expr[0]); mp_next(&expr[1]); op_no[0]++; op_no[1]++; } assert(op_no[0] == op_count[0] && op_no[1] == op_count[1]); assert(expr[0] == expr_end[0] && expr[1] == expr_end[1]); char *arr_start = buf + space_for_arr_tag - mp_sizeof_array(res_count); mp_encode_array(arr_start, res_count); *result_size = res_ops - arr_start; return arr_start; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_run.h0000664000000000000000000004324413306565107017275 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_RUN_H #define INCLUDES_TARANTOOL_BOX_VY_RUN_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "fiber_cond.h" #include "iterator_type.h" #include "vy_stmt.h" /* for comparators */ #include "vy_stmt_stream.h" #include "vy_read_view.h" #include "vy_stat.h" #include "index_def.h" #include "xlog.h" #include "small/mempool.h" #include "salad/bloom.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct vy_run_reader; /** Part of vinyl environment for run read/write */ struct vy_run_env { /** Mempool for struct vy_page_read_task */ struct mempool read_task_pool; /** Key for thread-local ZSTD context */ pthread_key_t zdctx_key; /** Pool of threads used for reading run files. */ struct vy_run_reader *reader_pool; /** Number of threads in the reader pool. */ int reader_pool_size; /** * Index of the reader thread in the pool to be used for * processing the next read request. */ int next_reader; }; /** * Run metadata. Is a written to a file as a single chunk. */ struct vy_run_info { /** Min key in the run. */ char *min_key; /** Max key in the run. */ char *max_key; /** Min LSN over all statements in the run. */ int64_t min_lsn; /** Max LSN over all statements in the run. */ int64_t max_lsn; /** Number of pages in the run. */ uint32_t page_count; /** Set iff bloom filter is available. */ bool has_bloom; /** Bloom filter of all tuples in run */ struct bloom bloom; }; /** * Run page metadata. Is a written to a file as a single chunk. */ struct vy_page_info { /** Offset of page data in the run file. */ uint64_t offset; /** Size of page data in the run file. */ uint32_t size; /** Size of page data in memory, i.e. unpacked. */ uint32_t unpacked_size; /** Number of statements in the page. */ uint32_t row_count; /** Minimal key stored in the page. */ char *min_key; /** Offset of the row index in the page. */ uint32_t row_index_offset; }; /** * Logical unit of vinyl index - a sorted file with data. */ struct vy_run { /** Vinyl run environment. */ struct vy_run_env *env; /** Info about the run stored in the index file. */ struct vy_run_info info; /** Info about the run pages stored in the index file. */ struct vy_page_info *page_info; /** Run data file. */ int fd; /** Unique ID of this run. */ int64_t id; /** Number of statements in this run. */ struct vy_disk_stmt_counter count; /** Size of memory used for storing page index. */ size_t page_index_size; /** Max LSN stored on disk. */ int64_t dump_lsn; /** * Run reference counter, the run is deleted once it hits 0. * A new run is created with the reference counter set to 1. * A run is referenced by each slice created for it and each * pending read or write task. */ int refs; /** Number of slices created for this run. */ int slice_count; /** * Counter used on completion of a compaction task to check if * all slices of the run have been compacted and so the run is * not used any more and should be deleted. */ int compacted_slice_count; /** * Link in the list of runs that became unused * after compaction. */ struct rlist in_unused; /** Link in vy_index::runs list. */ struct rlist in_index; }; /** * Slice of a run, used to organize runs in ranges. */ struct vy_slice { /** Unique ID of this slice. */ int64_t id; /** Run this slice is for (increments vy_run::refs). */ struct vy_run *run; /** * Slice begin and end (increments tuple::refs). * If @begin is NULL, the slice starts from the beginning * of the run. If @end is NULL, the slice ends at the end * of the run. */ struct tuple *begin; struct tuple *end; /** * Number of async users of this slice. Slice must not * be removed until it hits 0. Used by the iterator to * prevent use-after-free after waiting for IO. * See also vy_run_wait_pinned(). */ int pin_count; /** * Condition variable signaled by vy_slice_unpin() * if pin_count reaches 0. */ struct fiber_cond pin_cond; union { /** Link in range->slices list. */ struct rlist in_range; /** Link in vy_join_ctx->slices list. */ struct rlist in_join; }; /** * Indexes of the first and the last page in the run * that belong to this slice. */ uint32_t first_page_no; uint32_t last_page_no; /** An estimate of the number of statements in this slice. */ struct vy_disk_stmt_counter count; }; /** Position of a particular stmt in vy_run. */ struct vy_run_iterator_pos { uint32_t page_no; uint32_t pos_in_page; }; /** * Return statements from vy_run based on initial search key, * iteration order and view lsn. * * All statements with lsn > vlsn are skipped. * The API allows to traverse over resulting statements within two * dimensions - key and lsn. next_key() switches to the youngest * statement of the next key, according to the iteration order, * and next_lsn() switches to an older statement for the same * key. */ struct vy_run_iterator { /** Usage statistics */ struct vy_run_iterator_stat *stat; /* Members needed for memory allocation and disk access */ /** Index key definition used for storing statements on disk. */ const struct key_def *cmp_def; /** Index key definition defined by the user. */ const struct key_def *key_def; /** * Format ot allocate REPLACE and DELETE tuples read from * pages. */ struct tuple_format *format; /** Same as format, but for UPSERT tuples. */ struct tuple_format *upsert_format; /** Set if this iterator is for a primary index. */ bool is_primary; /** The run slice to iterate. */ struct vy_slice *slice; /* Search options */ /** * Iterator type, that specifies direction, start position and stop * criteria if the key is not specified, GT and EQ are changed to * GE, LT to LE for beauty. */ enum iterator_type iterator_type; /** Key to search. */ const struct tuple *key; /* LSN visibility, iterator shows values with lsn <= vlsn */ const struct vy_read_view **read_view; /* State of the iterator */ /** Position of the current record */ struct vy_run_iterator_pos curr_pos; /** Statement at curr_pos. */ struct tuple *curr_stmt; /** * Last two pages read by the iterator. We keep two pages * rather than just one, because we often probe a page for * a better match. Keeping the previous page makes sure we * won't throw out the current page if probing fails to * find a better match. */ struct vy_page *curr_page; struct vy_page *prev_page; /** Is false until first .._get or .._next_.. method is called */ bool search_started; /** Search is finished, you will not get more values from iterator */ bool search_ended; }; /** * Vinyl page stored in memory. */ struct vy_page { /** Page position in the run file. */ uint32_t page_no; /** Size of page data in memory, i.e. unpacked. */ uint32_t unpacked_size; /** Number of statements in the page. */ uint32_t row_count; /** Array of row offsets. */ uint32_t *row_index; /** Pointer to the page data. */ char *data; }; /** * Initialize vinyl run environment */ void vy_run_env_create(struct vy_run_env *env); /** * Destroy vinyl run environment */ void vy_run_env_destroy(struct vy_run_env *env); /** * Enable coio reads for a vinyl run environment. * * This function starts @threads reader threads and makes * the run iterator hand disk reads over to them rather than * read run files directly blocking the current fiber. * * Subsequent calls to this function will silently return. */ void vy_run_env_enable_coio(struct vy_run_env *env, int threads); static inline size_t vy_run_bloom_size(struct vy_run *run) { return run->info.has_bloom ? bloom_store_size(&run->info.bloom) : 0; } static inline struct vy_page_info * vy_run_page_info(struct vy_run *run, uint32_t pos) { assert(pos < run->info.page_count); return &run->page_info[pos]; } static inline bool vy_run_is_empty(struct vy_run *run) { return run->info.page_count == 0; } struct vy_run * vy_run_new(struct vy_run_env *env, int64_t id); void vy_run_delete(struct vy_run *run); static inline void vy_run_ref(struct vy_run *run) { assert(run->refs > 0); run->refs++; } static inline void vy_run_unref(struct vy_run *run) { assert(run->refs > 0); if (--run->refs == 0) vy_run_delete(run); } /** * Load run from disk * @param run - run to laod * @param dir - path to the vinyl directory * @param space_id - space id * @param iid - index id * @return - 0 on sucess, -1 on fail */ int vy_run_recover(struct vy_run *run, const char *dir, uint32_t space_id, uint32_t iid); /** * Rebuild vy_run index * @param run - run to laod * @param dir - path to the vinyl directory * @param space_id - space id * @param iid - index id * @param key_def index key definition * @param bloom_fpr bloom filter param * @return - 0 on sucess, -1 on fail */ int vy_run_rebuild_index(struct vy_run *run, const char *dir, uint32_t space_id, uint32_t iid, const struct key_def *key_def, const struct key_def *user_key_def, struct tuple_format *mem_format, struct tuple_format *upsert_format, const struct index_opts *opts); enum vy_file_type { VY_FILE_INDEX, VY_FILE_RUN, vy_file_MAX, }; extern const char *vy_file_suffix[]; static inline int vy_index_snprint_path(char *buf, int size, const char *dir, uint32_t space_id, uint32_t iid) { return snprintf(buf, size, "%s/%u/%u", dir, (unsigned)space_id, (unsigned)iid); } static inline int vy_run_snprint_filename(char *buf, int size, int64_t run_id, enum vy_file_type type) { return snprintf(buf, size, "%020lld.%s", (long long)run_id, vy_file_suffix[type]); } static inline int vy_run_snprint_path(char *buf, int size, const char *dir, uint32_t space_id, uint32_t iid, int64_t run_id, enum vy_file_type type) { int total = 0; SNPRINT(total, vy_index_snprint_path, buf, size, dir, (unsigned)space_id, (unsigned)iid); SNPRINT(total, snprintf, buf, size, "/"); SNPRINT(total, vy_run_snprint_filename, buf, size, run_id, type); return total; } /** * Remove all files (data, index) corresponding to a run * with the given id. Return 0 on success, -1 if unlink() * failed. */ int vy_run_remove_files(const char *dir, uint32_t space_id, uint32_t iid, int64_t run_id); /** * Allocate a new run slice. * This function increments @run->refs. */ struct vy_slice * vy_slice_new(int64_t id, struct vy_run *run, struct tuple *begin, struct tuple *end, const struct key_def *cmp_def); /** * Free a run slice. * This function decrements @run->refs and * deletes the run if the counter hits 0. */ void vy_slice_delete(struct vy_slice *slice); /** * Pin a run slice. * A pinned slice can't be deleted until it's unpinned. */ static inline void vy_slice_pin(struct vy_slice *slice) { slice->pin_count++; } /* * Unpin a run slice. * This function reverts the effect of vy_slice_pin(). */ static inline void vy_slice_unpin(struct vy_slice *slice) { assert(slice->pin_count > 0); if (--slice->pin_count == 0) fiber_cond_broadcast(&slice->pin_cond); } /** * Wait until a run slice is unpinned. */ static inline void vy_slice_wait_pinned(struct vy_slice *slice) { while (slice->pin_count > 0) fiber_cond_wait(&slice->pin_cond); } /** * Cut a sub-slice of @slice starting at @begin and ending at @end. * Return 0 on success, -1 on OOM. * * The new slice is returned in @result. If @slice does not intersect * with [@begin, @end), @result is set to NULL. */ int vy_slice_cut(struct vy_slice *slice, int64_t id, struct tuple *begin, struct tuple *end, const struct key_def *cmp_def, struct vy_slice **result); /** * Open an iterator over on-disk run. * * Note, it is the caller's responsibility to make sure the slice * is not compacted while the iterator is reading it. */ void vy_run_iterator_open(struct vy_run_iterator *itr, struct vy_run_iterator_stat *stat, struct vy_slice *slice, enum iterator_type iterator_type, const struct tuple *key, const struct vy_read_view **rv, const struct key_def *cmp_def, const struct key_def *key_def, struct tuple_format *format, struct tuple_format *upsert_format, bool is_primary); /** * Advance a run iterator to the newest statement for the next key. * The statement is returned in @ret (NULL if EOF). * Returns 0 on success, -1 on memory allocation or IO error. */ NODISCARD int vy_run_iterator_next_key(struct vy_run_iterator *itr, struct tuple **ret); /** * Advance a run iterator to the older statement for the same key. * The statement is returned in @ret (NULL if EOF). * Returns 0 on success, -1 on memory allocation or IO error. */ NODISCARD int vy_run_iterator_next_lsn(struct vy_run_iterator *itr, struct tuple **ret); /** * Advance a run iterator to the newest statement for the first key * following @last_stmt. The statement is returned in @ret (NULL if EOF). * Returns 0 on success, -1 on memory allocation or IO error. */ NODISCARD int vy_run_iterator_skip(struct vy_run_iterator *itr, const struct tuple *last_stmt, struct tuple **ret); /** * Close a run iterator. */ void vy_run_iterator_close(struct vy_run_iterator *itr); /** * Simple stream over a slice. @see vy_stmt_stream. */ struct vy_slice_stream { /** Parent class, must be the first member */ struct vy_stmt_stream base; /** Current position */ uint32_t page_no; uint32_t pos_in_page; /** Last page read */ struct vy_page *page; /** The last tuple returned to user */ struct tuple *tuple; /** Members needed for memory allocation and disk access */ /** Slice to stream */ struct vy_slice *slice; /** * Key def for comparing with slice boundaries, * includes secondary key parts. */ const struct key_def *cmp_def; /** Format for allocating REPLACE and DELETE tuples read from pages. */ struct tuple_format *format; /** Same as format, but for UPSERT tuples. */ struct tuple_format *upsert_format; /** Set if this iterator is for a primary index. */ bool is_primary; }; /** * Open a run stream. Use vy_stmt_stream api for further work. */ void vy_slice_stream_open(struct vy_slice_stream *stream, struct vy_slice *slice, const struct key_def *cmp_def, struct tuple_format *format, struct tuple_format *upsert_format, bool is_primary); /** * Run_writer fills a created run with statements one by one, * splitting them into pages. */ struct vy_run_writer { /** Run to fill. */ struct vy_run *run; /** Path to directory with run files. */ const char *dirpath; /** Identifier of a space owning the run. */ uint32_t space_id; /** Identifier of an index owning the run. */ uint32_t iid; /** * Key definition to extract from tuple and store as page * min key, run min/max keys, and secondary index * statements. */ const struct key_def *cmp_def; /** Key definition to calculate bloom. */ const struct key_def *key_def; /** * Minimal page size. When a page becames bigger, it is * dumped. */ uint64_t page_size; /** * Current page info capacity. Can grow with page number. */ uint32_t page_info_capacity; /** Xlog to write data. */ struct xlog data_xlog; /** Set iff bloom filter is available. */ bool has_bloom; /** Bloom filter. */ struct bloom_spectrum bloom; /** Buffer of a current page row offsets. */ struct ibuf row_index_buf; /** * Remember a last written statement to use it as a source * of max key of a finished run. */ struct tuple *last_stmt; }; /** Create a run writer to fill a run with statements. */ int vy_run_writer_create(struct vy_run_writer *writer, struct vy_run *run, const char *dirpath, uint32_t space_id, uint32_t iid, const struct key_def *cmp_def, const struct key_def *key_def, uint64_t page_size, double bloom_fpr, size_t max_output_count); /** * Write a specified statement into a run. * @param writer Writer to write a statement. * @param stmt Statement to write. * * @retval -1 Memory error. * @retval 0 Success. */ int vy_run_writer_append_stmt(struct vy_run_writer *writer, struct tuple *stmt); /** * Finalize run writing by writing run index into file. The writer * is deleted after call. * @param writer Run writer. * @retval -1 Memory or IO error. * @retval 0 Success. */ int vy_run_writer_commit(struct vy_run_writer *writer); /** * Abort run writing. Can not delete a run and run's file here, * becase it must be done from tx thread. The writer is deleted * after call. * @param Run writer. */ void vy_run_writer_abort(struct vy_run_writer *writer); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_RUN_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/box.cc0000664000000000000000000015072613306565107016705 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/box.h" #include "trivia/config.h" #include "lua/utils.h" /* lua_hash() */ #include "fiber_pool.h" #include #include #include "identifier.h" #include "iproto.h" #include "iproto_constants.h" #include "recovery.h" #include "wal.h" #include "relay.h" #include "applier.h" #include #include "main.h" #include "tuple.h" #include "session.h" #include "schema.h" #include "engine.h" #include "memtx_engine.h" #include "sysview_engine.h" #include "vinyl.h" #include "space.h" #include "index.h" #include "port.h" #include "txn.h" #include "user.h" #include "cfg.h" #include "coio.h" #include "replication.h" /* replica */ #include "title.h" #include "xrow.h" #include "xrow_io.h" #include "xstream.h" #include "authentication.h" #include "path_lock.h" #include "gc.h" #include "checkpoint.h" #include "systemd.h" #include "call.h" #include "func.h" #include "sequence.h" static char status[64] = "unknown"; /** box.stat rmean */ struct rmean *rmean_box; static void title(const char *new_status) { snprintf(status, sizeof(status), "%s", new_status); title_set_status(new_status); title_update(); systemd_snotify("STATUS=%s", status); } bool box_checkpoint_is_in_progress = false; /** * If backup is in progress, this points to the gc consumer * object that prevents the garbage collector from deleting * the checkpoint files that are currently being backed up. */ static struct gc_consumer *backup_gc; /** * The instance is in read-write mode: the local checkpoint * and all write ahead logs are processed. For a replica, * it also means we've successfully connected to the master * and began receiving updates from it. */ static bool is_box_configured = false; static bool is_ro = true; static fiber_cond ro_cond; /** * The following flag is set if the instance failed to * synchronize to a sufficient number of replicas to form * a quorum and so was forced to switch to read-only mode. */ static bool is_orphan = true; /* Use the shared instance of xstream for all appliers */ static struct xstream join_stream; static struct xstream subscribe_stream; /** * The pool of fibers in the transaction processor thread * working on incoming messages from net, wal and other * threads. */ static struct fiber_pool tx_fiber_pool; /** * A separate endpoint for WAL wakeup messages, to * ensure that WAL messages are delivered even * if all fibers in tx_fiber_pool are used. Without * this endpoint, tx thread could deadlock when there * are too many messages in flight (gh-1892). */ static struct cbus_endpoint tx_prio_endpoint; static int box_check_writable(void) { /* box is only writable if box.cfg.read_only == false and */ if (is_ro || is_orphan) { diag_set(ClientError, ER_READONLY); diag_log(); return -1; } return 0; } static void box_check_writable_xc(void) { if (box_check_writable() != 0) diag_raise(); } static void box_check_memtx_min_tuple_size(ssize_t memtx_min_tuple_size) { if (memtx_min_tuple_size < 8 || memtx_min_tuple_size > 1048280) tnt_raise(ClientError, ER_CFG, "memtx_min_tuple_size", "specified value is out of bounds"); } static int process_rw(struct request *request, struct space *space, struct tuple **result) { assert(iproto_type_is_dml(request->type)); rmean_collect(rmean_box, request->type, 1); if (access_check_space(space, PRIV_W) != 0) return -1; struct txn *txn = txn_begin_stmt(space); if (txn == NULL) return -1; struct tuple *tuple; if (space_execute_dml(space, txn, request, &tuple) != 0) { txn_rollback_stmt(); return -1; } /* * Pin the tuple locally before the commit, * otherwise it may go away during yield in * when WAL is written in autocommit mode. */ TupleRefNil ref(tuple); if (txn_commit_stmt(txn, request) != 0) return -1; if (result != NULL) { if (tuple != NULL && tuple_bless(tuple) == NULL) return -1; *result = tuple; } return 0; } void box_set_ro(bool ro) { is_ro = ro; fiber_cond_broadcast(&ro_cond); } bool box_is_ro(void) { return is_ro || is_orphan; } int box_wait_ro(bool ro, double timeout) { double deadline = ev_monotonic_now(loop()) + timeout; while (box_is_ro() != ro) { if (fiber_cond_wait_deadline(&ro_cond, deadline) != 0) return -1; if (fiber_is_cancelled()) { diag_set(FiberIsCancelled); return -1; } } return 0; } void box_clear_orphan(void) { if (!is_orphan) return; /* nothing to do */ is_orphan = false; fiber_cond_broadcast(&ro_cond); /* Update the title to reflect the new status. */ title("running"); } struct wal_stream { struct xstream base; /** How many rows have been recovered so far. */ size_t rows; /** Yield once per 'yield' rows. */ size_t yield; }; /** * A stub used in txn_commit() during local recovery. We "replay" * transactions during local recovery, with WAL turned off. * Since each transaction attempts to write itself to WAL at * commit, we need an implementation which would fake WAL write. */ struct recovery_journal { struct journal base; struct vclock *vclock; }; /** * Use the current row LSN as commit LSN - vinyl needs to see the * exact same signature during local recovery to properly mark * min/max LSN of created LSM levels. */ static int64_t recovery_journal_write(struct journal *base, struct journal_entry * /* entry */) { struct recovery_journal *journal = (struct recovery_journal *) base; return vclock_sum(journal->vclock); } static inline void recovery_journal_create(struct recovery_journal *journal, struct vclock *v) { journal_create(&journal->base, recovery_journal_write, NULL); journal->vclock = v; } static inline void apply_row(struct xstream *stream, struct xrow_header *row) { assert(row->bodycnt == 1); /* always 1 for read */ (void) stream; struct request request; xrow_decode_dml_xc(row, &request, dml_request_key_map(row->type)); struct space *space = space_cache_find_xc(request.space_id); if (process_rw(&request, space, NULL) != 0) { say_error("error applying row: %s", request_str(&request)); diag_raise(); } } static void apply_wal_row(struct xstream *stream, struct xrow_header *row) { apply_row(stream, row); struct wal_stream *xstream = container_of(stream, struct wal_stream, base); /** * Yield once in a while, but not too often, * mostly to allow signal handling to take place. */ if (++xstream->rows % xstream->yield == 0) fiber_sleep(0); } static void wal_stream_create(struct wal_stream *ctx, size_t wal_max_rows) { xstream_create(&ctx->base, apply_wal_row); ctx->rows = 0; /** * Make the yield logic covered by the functional test * suite, which has a small setting for rows_per_wal. * Each yield can take up to 1ms if there are no events, * so we can't afford many of them during recovery. */ ctx->yield = (wal_max_rows >> 4) + 1; } static void apply_initial_join_row(struct xstream *stream, struct xrow_header *row) { (void) stream; struct request request; xrow_decode_dml_xc(row, &request, dml_request_key_map(row->type)); struct space *space = space_cache_find_xc(request.space_id); /* no access checks here - applier always works with admin privs */ space_apply_initial_join_row_xc(space, &request); } /* {{{ configuration bindings */ static void box_check_log(const char *log) { if (log == NULL) return; if (say_check_init_str(log) == -1) { if (diag_last_error(diag_get())->type == &type_IllegalParams) { tnt_raise(ClientError, ER_CFG, "log", diag_last_error(diag_get())->errmsg); } diag_raise(); } } static enum say_format box_check_log_format(const char *log_format) { enum say_format format = say_format_by_name(log_format); if (format == say_format_MAX) tnt_raise(ClientError, ER_CFG, "log_format", "expected 'plain' or 'json'"); return format; } static void box_check_uri(const char *source, const char *option_name) { if (source == NULL) return; struct uri uri; /* URI format is [host:]service */ if (uri_parse(&uri, source) || !uri.service) { tnt_raise(ClientError, ER_CFG, option_name, "expected host:service or /unix.socket"); } } static void box_check_replication(void) { int count = cfg_getarr_size("replication"); for (int i = 0; i < count; i++) { const char *source = cfg_getarr_elem("replication", i); box_check_uri(source, "replication"); } } static double box_check_replication_timeout(void) { double timeout = cfg_getd("replication_timeout"); if (timeout <= 0) { tnt_raise(ClientError, ER_CFG, "replication_timeout", "the value must be greather than 0"); } return timeout; } static double box_check_replication_connect_timeout(void) { double timeout = cfg_getd("replication_connect_timeout"); if (timeout <= 0) { tnt_raise(ClientError, ER_CFG, "replication_connect_timeout", "the value must be greather than 0"); } return timeout; } static int box_check_replication_connect_quorum(void) { int quorum = cfg_geti_default("replication_connect_quorum", REPLICATION_CONNECT_QUORUM_ALL); if (quorum < 0) { tnt_raise(ClientError, ER_CFG, "replication_connect_quorum", "the value must be greater or equal to 0"); } return quorum; } static double box_check_replication_sync_lag(void) { double lag = cfg_getd_default("replication_sync_lag", TIMEOUT_INFINITY); if (lag <= 0) { tnt_raise(ClientError, ER_CFG, "replication_sync_lag", "the value must be greater than 0"); } return lag; } static void box_check_instance_uuid(struct tt_uuid *uuid) { *uuid = uuid_nil; const char *uuid_str = cfg_gets("instance_uuid"); if (uuid_str != NULL && tt_uuid_from_string(uuid_str, uuid) != 0) tnt_raise(ClientError, ER_CFG, "instance_uuid", uuid_str); } static void box_check_replicaset_uuid(struct tt_uuid *uuid) { *uuid = uuid_nil; const char *uuid_str = cfg_gets("replicaset_uuid"); if (uuid_str != NULL && tt_uuid_from_string(uuid_str, uuid) != 0) tnt_raise(ClientError, ER_CFG, "replicaset_uuid", uuid_str); } static enum wal_mode box_check_wal_mode(const char *mode_name) { assert(mode_name != NULL); /* checked in Lua */ int mode = strindex(wal_mode_STRS, mode_name, WAL_MODE_MAX); if (mode == WAL_MODE_MAX) tnt_raise(ClientError, ER_CFG, "wal_mode", mode_name); return (enum wal_mode) mode; } static void box_check_readahead(int readahead) { enum { READAHEAD_MIN = 128, READAHEAD_MAX = 2147483647 }; if (readahead < (int) READAHEAD_MIN || readahead > (int) READAHEAD_MAX) { tnt_raise(ClientError, ER_CFG, "readahead", "specified value is out of bounds"); } } static void box_check_checkpoint_count(int checkpoint_count) { if (checkpoint_count < 1) { tnt_raise(ClientError, ER_CFG, "checkpoint_count", "the value must not be less than one"); } } static int64_t box_check_wal_max_rows(int64_t wal_max_rows) { /* check rows_per_wal configuration */ if (wal_max_rows <= 1) { tnt_raise(ClientError, ER_CFG, "rows_per_wal", "the value must be greater than one"); } return wal_max_rows; } static int64_t box_check_wal_max_size(int64_t wal_max_size) { /* check wal_max_bytes configuration */ if (wal_max_size <= 1) { tnt_raise(ClientError, ER_CFG, "wal_max_size", "the value must be greater than one"); } return wal_max_size; } static void box_check_vinyl_options(void) { int read_threads = cfg_geti("vinyl_read_threads"); int write_threads = cfg_geti("vinyl_write_threads"); int64_t range_size = cfg_geti64("vinyl_range_size"); int64_t page_size = cfg_geti64("vinyl_page_size"); int run_count_per_level = cfg_geti("vinyl_run_count_per_level"); double run_size_ratio = cfg_getd("vinyl_run_size_ratio"); double bloom_fpr = cfg_getd("vinyl_bloom_fpr"); if (read_threads < 1) { tnt_raise(ClientError, ER_CFG, "vinyl_read_threads", "must be greater than or equal to 1"); } if (write_threads < 2) { tnt_raise(ClientError, ER_CFG, "vinyl_write_threads", "must be greater than or equal to 2"); } if (range_size <= 0) { tnt_raise(ClientError, ER_CFG, "vinyl_range_size", "must be greater than 0"); } if (page_size <= 0 || page_size > range_size) { tnt_raise(ClientError, ER_CFG, "vinyl_page_size", "must be greater than 0 and less than " "or equal to vinyl_range_size"); } if (run_count_per_level <= 0) { tnt_raise(ClientError, ER_CFG, "vinyl_run_count_per_level", "must be greater than 0"); } if (run_size_ratio <= 1) { tnt_raise(ClientError, ER_CFG, "vinyl_run_size_ratio", "must be greater than 1"); } if (bloom_fpr <= 0 || bloom_fpr > 1) { tnt_raise(ClientError, ER_CFG, "vinyl_bloom_fpr", "must be greater than 0 and less than or equal to 1"); } } void box_check_config() { struct tt_uuid uuid; box_check_log(cfg_gets("log")); box_check_log_format(cfg_gets("log_format")); box_check_uri(cfg_gets("listen"), "listen"); box_check_instance_uuid(&uuid); box_check_replicaset_uuid(&uuid); box_check_replication(); box_check_replication_timeout(); box_check_replication_connect_timeout(); box_check_replication_connect_quorum(); box_check_replication_sync_lag(); box_check_readahead(cfg_geti("readahead")); box_check_checkpoint_count(cfg_geti("checkpoint_count")); box_check_wal_max_rows(cfg_geti64("rows_per_wal")); box_check_wal_max_size(cfg_geti64("wal_max_size")); box_check_wal_mode(cfg_gets("wal_mode")); box_check_memtx_min_tuple_size(cfg_geti64("memtx_min_tuple_size")); box_check_vinyl_options(); } /* * Parse box.cfg.replication and create appliers. */ static struct applier ** cfg_get_replication(int *p_count) { /* Use static buffer for result */ static struct applier *appliers[VCLOCK_MAX]; int count = cfg_getarr_size("replication"); if (count >= VCLOCK_MAX) { tnt_raise(ClientError, ER_CFG, "replication", "too many replicas"); } for (int i = 0; i < count; i++) { const char *source = cfg_getarr_elem("replication", i); struct applier *applier = applier_new(source, &join_stream, &subscribe_stream); if (applier == NULL) { /* Delete created appliers */ while (--i >= 0) applier_delete(appliers[i]); return NULL; } appliers[i] = applier; /* link to the list */ } *p_count = count; return appliers; } /* * Sync box.cfg.replication with the cluster registry, but * don't start appliers. */ static void box_sync_replication(double timeout, bool connect_all) { int count = 0; struct applier **appliers = cfg_get_replication(&count); if (appliers == NULL) diag_raise(); auto guard = make_scoped_guard([=]{ for (int i = 0; i < count; i++) applier_delete(appliers[i]); /* doesn't affect diag */ }); replicaset_connect(appliers, count, timeout, connect_all); guard.is_active = false; } void box_set_replication(void) { if (!is_box_configured) { /* * Do nothing, we're in local hot standby mode, this instance * will automatically begin following the replica when local * hot standby mode is finished, see box_cfg(). */ return; } box_check_replication(); /* Try to connect to all replicas within the timeout period */ box_sync_replication(replication_connect_timeout, true); /* Follow replica */ replicaset_follow(); } void box_set_replication_timeout(void) { replication_timeout = box_check_replication_timeout(); } void box_set_replication_connect_timeout(void) { replication_connect_timeout = box_check_replication_connect_timeout(); } void box_set_replication_connect_quorum(void) { replication_connect_quorum = box_check_replication_connect_quorum(); if (is_box_configured) replicaset_check_quorum(); } void box_bind(void) { const char *uri = cfg_gets("listen"); box_check_uri(uri, "listen"); iproto_bind(uri); } void box_listen(void) { iproto_listen(); } void box_set_log_level(void) { say_set_log_level(cfg_geti("log_level")); } void box_set_log_format(void) { enum say_format format = box_check_log_format(cfg_gets("log_format")); say_set_log_format(format); } void box_set_io_collect_interval(void) { ev_set_io_collect_interval(loop(), cfg_getd("io_collect_interval")); } void box_set_snap_io_rate_limit(void) { struct memtx_engine *memtx; memtx = (struct memtx_engine *)engine_by_name("memtx"); assert(memtx != NULL); memtx_engine_set_snap_io_rate_limit(memtx, cfg_getd("snap_io_rate_limit")); } void box_set_memtx_max_tuple_size(void) { struct memtx_engine *memtx; memtx = (struct memtx_engine *)engine_by_name("memtx"); assert(memtx != NULL); memtx_engine_set_max_tuple_size(memtx, cfg_geti("memtx_max_tuple_size")); } void box_set_too_long_threshold(void) { too_long_threshold = cfg_getd("too_long_threshold"); struct vinyl_engine *vinyl; vinyl = (struct vinyl_engine *)engine_by_name("vinyl"); assert(vinyl != NULL); vinyl_engine_set_too_long_threshold(vinyl, too_long_threshold); } void box_set_readahead(void) { int readahead = cfg_geti("readahead"); box_check_readahead(readahead); iproto_readahead = readahead; } void box_set_checkpoint_count(void) { int checkpoint_count = cfg_geti("checkpoint_count"); box_check_checkpoint_count(checkpoint_count); gc_set_checkpoint_count(checkpoint_count); } void box_set_vinyl_max_tuple_size(void) { struct vinyl_engine *vinyl; vinyl = (struct vinyl_engine *)engine_by_name("vinyl"); assert(vinyl != NULL); vinyl_engine_set_max_tuple_size(vinyl, cfg_geti("vinyl_max_tuple_size")); } void box_set_vinyl_cache(void) { struct vinyl_engine *vinyl; vinyl = (struct vinyl_engine *)engine_by_name("vinyl"); assert(vinyl != NULL); vinyl_engine_set_cache(vinyl, cfg_geti64("vinyl_cache")); } void box_set_vinyl_timeout(void) { struct vinyl_engine *vinyl; vinyl = (struct vinyl_engine *)engine_by_name("vinyl"); assert(vinyl != NULL); vinyl_engine_set_timeout(vinyl, cfg_getd("vinyl_timeout")); } /* }}} configuration bindings */ /** * Execute a request against a given space id with * a variable-argument tuple described in format. * * @example: you want to insert 5 into space 1: * boxk(IPROTO_INSERT, 1, "[%u]", 5); * * @example: you want to set field 3 (base 0) of * a tuple with key [10, 20] in space 1 to 1000: * boxk(IPROTO_UPDATE, 1, "[%u%u][[%s%u%u]]", 10, 20, "=", 3, 1000); * * @note Since this is for internal use, it has * no boundary or misuse checks. */ int boxk(int type, uint32_t space_id, const char *format, ...) { va_list ap; struct request request; memset(&request, 0, sizeof(request)); request.type = type; request.space_id = space_id; va_start(ap, format); size_t buf_size = mp_vformat(NULL, 0, format, ap); char *buf = (char *)region_alloc(&fiber()->gc, buf_size); va_end(ap); if (buf == NULL) return -1; va_start(ap, format); if (mp_vformat(buf, buf_size, format, ap) != buf_size) assert(0); va_end(ap); const char *data = buf; const char *data_end = buf + buf_size; switch (type) { case IPROTO_INSERT: case IPROTO_REPLACE: request.tuple = data; request.tuple_end = data_end; break; case IPROTO_DELETE: request.key = data; request.key_end = data_end; break; case IPROTO_UPDATE: request.key = data; mp_next(&data); request.key_end = data; request.tuple = data; mp_next(&data); request.tuple_end = data; request.index_base = 0; break; default: unreachable(); } struct space *space = space_cache_find(space_id); if (space == NULL) return -1; return process_rw(&request, space, NULL); } int box_return_tuple(box_function_ctx_t *ctx, box_tuple_t *tuple) { return port_tuple_add(ctx->port, tuple); } /* schema_find_id()-like method using only public API */ uint32_t box_space_id_by_name(const char *name, uint32_t len) { if (len > BOX_NAME_MAX) return BOX_ID_NIL; uint32_t size = mp_sizeof_array(1) + mp_sizeof_str(len); char *begin = (char *) region_alloc(&fiber()->gc, size); if (begin == NULL) { diag_set(OutOfMemory, size, "region_alloc", "begin"); return BOX_ID_NIL; } char *end = mp_encode_array(begin, 1); end = mp_encode_str(end, name, len); /* NOTE: error and missing key cases are indistinguishable */ box_tuple_t *tuple; if (box_index_get(BOX_VSPACE_ID, 2, begin, end, &tuple) != 0) return BOX_ID_NIL; if (tuple == NULL) return BOX_ID_NIL; uint32_t result = BOX_ID_NIL; (void) tuple_field_u32(tuple, BOX_SPACE_FIELD_ID, &result); return result; } uint32_t box_index_id_by_name(uint32_t space_id, const char *name, uint32_t len) { if (len > BOX_NAME_MAX) return BOX_ID_NIL; uint32_t size = mp_sizeof_array(2) + mp_sizeof_uint(space_id) + mp_sizeof_str(len); char *begin = (char *) region_alloc(&fiber()->gc, size); if (begin == NULL) { diag_set(OutOfMemory, size, "region_alloc", "begin"); return BOX_ID_NIL; } char *end = mp_encode_array(begin, 2); end = mp_encode_uint(end, space_id); end = mp_encode_str(end, name, len); /* NOTE: error and missing key cases are indistinguishable */ box_tuple_t *tuple; if (box_index_get(BOX_VINDEX_ID, 2, begin, end, &tuple) != 0) return BOX_ID_NIL; if (tuple == NULL) return BOX_ID_NIL; uint32_t result = BOX_ID_NIL; (void) tuple_field_u32(tuple, BOX_INDEX_FIELD_ID, &result); return result; } /** \endcond public */ int box_process1(struct request *request, box_tuple_t **result) { /* Allow to write to temporary spaces in read-only mode. */ struct space *space = space_cache_find(request->space_id); if (space == NULL) return -1; if (!space->def->opts.temporary && box_check_writable() != 0) return -1; return process_rw(request, space, result); } int box_select(uint32_t space_id, uint32_t index_id, int iterator, uint32_t offset, uint32_t limit, const char *key, const char *key_end, struct port *port) { (void)key_end; rmean_collect(rmean_box, IPROTO_SELECT, 1); if (iterator < 0 || iterator >= iterator_type_MAX) { diag_set(ClientError, ER_ILLEGAL_PARAMS, "Invalid iterator type"); diag_log(); return -1; } struct space *space = space_cache_find(space_id); if (space == NULL) return -1; if (access_check_space(space, PRIV_R) != 0) return -1; struct index *index = index_find(space, index_id); if (index == NULL) return -1; enum iterator_type type = (enum iterator_type) iterator; uint32_t part_count = key ? mp_decode_array(&key) : 0; if (key_validate(index->def, type, key, part_count)) return -1; ERROR_INJECT(ERRINJ_TESTING, { diag_set(ClientError, ER_INJECTION, "ERRINJ_TESTING"); return -1; }); struct txn *txn; if (txn_begin_ro_stmt(space, &txn) != 0) return -1; struct iterator *it = index_create_iterator(index, type, key, part_count); if (it == NULL) { txn_rollback_stmt(); return -1; } int rc = 0; uint32_t found = 0; struct tuple *tuple; port_tuple_create(port); while (found < limit) { rc = iterator_next(it, &tuple); if (rc != 0 || tuple == NULL) break; if (offset > 0) { offset--; continue; } rc = port_tuple_add(port, tuple); if (rc != 0) break; found++; } iterator_delete(it); if (rc != 0) { port_destroy(port); txn_rollback_stmt(); return -1; } txn_commit_ro_stmt(txn); return 0; } int box_insert(uint32_t space_id, const char *tuple, const char *tuple_end, box_tuple_t **result) { mp_tuple_assert(tuple, tuple_end); struct request request; memset(&request, 0, sizeof(request)); request.type = IPROTO_INSERT; request.space_id = space_id; request.tuple = tuple; request.tuple_end = tuple_end; return box_process1(&request, result); } int box_replace(uint32_t space_id, const char *tuple, const char *tuple_end, box_tuple_t **result) { mp_tuple_assert(tuple, tuple_end); struct request request; memset(&request, 0, sizeof(request)); request.type = IPROTO_REPLACE; request.space_id = space_id; request.tuple = tuple; request.tuple_end = tuple_end; return box_process1(&request, result); } int box_delete(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result) { mp_tuple_assert(key, key_end); struct request request; memset(&request, 0, sizeof(request)); request.type = IPROTO_DELETE; request.space_id = space_id; request.index_id = index_id; request.key = key; request.key_end = key_end; return box_process1(&request, result); } int box_update(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, const char *ops, const char *ops_end, int index_base, box_tuple_t **result) { mp_tuple_assert(key, key_end); mp_tuple_assert(ops, ops_end); struct request request; memset(&request, 0, sizeof(request)); request.type = IPROTO_UPDATE; request.space_id = space_id; request.index_id = index_id; request.key = key; request.key_end = key_end; request.index_base = index_base; /** Legacy: in case of update, ops are passed in in request tuple */ request.tuple = ops; request.tuple_end = ops_end; return box_process1(&request, result); } int box_upsert(uint32_t space_id, uint32_t index_id, const char *tuple, const char *tuple_end, const char *ops, const char *ops_end, int index_base, box_tuple_t **result) { mp_tuple_assert(ops, ops_end); mp_tuple_assert(tuple, tuple_end); struct request request; memset(&request, 0, sizeof(request)); request.type = IPROTO_UPSERT; request.space_id = space_id; request.index_id = index_id; request.ops = ops; request.ops_end = ops_end; request.tuple = tuple; request.tuple_end = tuple_end; request.index_base = index_base; return box_process1(&request, result); } /** * Trigger space truncation by bumping a counter * in _truncate space. */ static void space_truncate(struct space *space) { char tuple_buf[32]; char *tuple_buf_end = tuple_buf; tuple_buf_end = mp_encode_array(tuple_buf_end, 2); tuple_buf_end = mp_encode_uint(tuple_buf_end, space_id(space)); tuple_buf_end = mp_encode_uint(tuple_buf_end, 1); assert(tuple_buf_end < tuple_buf + sizeof(tuple_buf)); char ops_buf[128]; char *ops_buf_end = ops_buf; ops_buf_end = mp_encode_array(ops_buf_end, 1); ops_buf_end = mp_encode_array(ops_buf_end, 3); ops_buf_end = mp_encode_str(ops_buf_end, "+", 1); ops_buf_end = mp_encode_uint(ops_buf_end, 1); ops_buf_end = mp_encode_uint(ops_buf_end, 1); assert(ops_buf_end < ops_buf + sizeof(ops_buf)); if (box_upsert(BOX_TRUNCATE_ID, 0, tuple_buf, tuple_buf_end, ops_buf, ops_buf_end, 0, NULL) != 0) diag_raise(); } int box_truncate(uint32_t space_id) { try { struct space *space = space_cache_find_xc(space_id); space_truncate(space); return 0; } catch (Exception *exc) { return -1; } } /** Update a record in _sequence_data space. */ static int sequence_data_update(uint32_t seq_id, int64_t value) { size_t tuple_buf_size = (mp_sizeof_array(2) + 2 * mp_sizeof_uint(UINT64_MAX)); char *tuple_buf = (char *) region_alloc(&fiber()->gc, tuple_buf_size); if (tuple_buf == NULL) { diag_set(OutOfMemory, tuple_buf_size, "region", "tuple"); return -1; } char *tuple_buf_end = tuple_buf; tuple_buf_end = mp_encode_array(tuple_buf_end, 2); tuple_buf_end = mp_encode_uint(tuple_buf_end, seq_id); tuple_buf_end = (value < 0 ? mp_encode_int(tuple_buf_end, value) : mp_encode_uint(tuple_buf_end, value)); assert(tuple_buf_end < tuple_buf + tuple_buf_size); struct credentials *orig_credentials = effective_user(); fiber_set_user(fiber(), &admin_credentials); int rc = box_replace(BOX_SEQUENCE_DATA_ID, tuple_buf, tuple_buf_end, NULL); fiber_set_user(fiber(), orig_credentials); return rc; } /** Delete a record from _sequence_data space. */ static int sequence_data_delete(uint32_t seq_id) { size_t key_buf_size = mp_sizeof_array(1) + mp_sizeof_uint(UINT64_MAX); char *key_buf = (char *) region_alloc(&fiber()->gc, key_buf_size); if (key_buf == NULL) { diag_set(OutOfMemory, key_buf_size, "region", "key"); return -1; } char *key_buf_end = key_buf; key_buf_end = mp_encode_array(key_buf_end, 1); key_buf_end = mp_encode_uint(key_buf_end, seq_id); assert(key_buf_end < key_buf + key_buf_size); struct credentials *orig_credentials = effective_user(); fiber_set_user(fiber(), &admin_credentials); int rc = box_delete(BOX_SEQUENCE_DATA_ID, 0, key_buf, key_buf_end, NULL); fiber_set_user(fiber(), orig_credentials); return rc; } int box_sequence_next(uint32_t seq_id, int64_t *result) { struct sequence *seq = sequence_cache_find(seq_id); if (seq == NULL) return -1; if (access_check_sequence(seq) != 0) return -1; int64_t value; if (sequence_next(seq, &value) != 0) return -1; if (sequence_data_update(seq_id, value) != 0) return -1; *result = value; return 0; } int box_sequence_set(uint32_t seq_id, int64_t value) { struct sequence *seq = sequence_cache_find(seq_id); if (seq == NULL) return -1; if (access_check_sequence(seq) != 0) return -1; if (sequence_set(seq, value) != 0) return -1; return sequence_data_update(seq_id, value); } int box_sequence_reset(uint32_t seq_id) { struct sequence *seq = sequence_cache_find(seq_id); if (seq == NULL) return -1; if (access_check_sequence(seq) != 0) return -1; sequence_reset(seq); return sequence_data_delete(seq_id); } static inline void box_register_replica(uint32_t id, const struct tt_uuid *uuid) { if (boxk(IPROTO_INSERT, BOX_CLUSTER_ID, "[%u%s]", (unsigned) id, tt_uuid_str(uuid)) != 0) diag_raise(); assert(replica_by_uuid(uuid)->id == id); } /** * @brief Called when recovery/replication wants to add a new * replica to the replica set. * replica_set_id() is called as a commit trigger on _cluster * space and actually adds the replica to the replica set. * @param instance_uuid */ static void box_on_join(const tt_uuid *instance_uuid) { struct replica *replica = replica_by_uuid(instance_uuid); if (replica != NULL && replica->id != REPLICA_ID_NIL) return; /* nothing to do - already registered */ box_check_writable_xc(); /** Find the largest existing replica id. */ struct space *space = space_cache_find_xc(BOX_CLUSTER_ID); struct index *index = index_find_system_xc(space, 0); struct iterator *it = index_create_iterator_xc(index, ITER_ALL, NULL, 0); IteratorGuard iter_guard(it); struct tuple *tuple; /** Assign a new replica id. */ uint32_t replica_id = 1; while ((tuple = iterator_next_xc(it)) != NULL) { if (tuple_field_u32_xc(tuple, BOX_CLUSTER_FIELD_ID) != replica_id) break; replica_id++; } box_register_replica(replica_id, instance_uuid); } void box_process_auth(struct auth_request *request) { rmean_collect(rmean_box, IPROTO_AUTH, 1); /* Check that bootstrap has been finished */ if (!is_box_configured) tnt_raise(ClientError, ER_LOADING); const char *user = request->user_name; uint32_t len = mp_decode_strl(&user); authenticate(user, len, request->scramble); } void box_process_join(struct ev_io *io, struct xrow_header *header) { /* * Tarantool 1.7 JOIN protocol diagram (gh-1113) * ============================================= * * Replica => Master * * => JOIN { INSTANCE_UUID: replica_uuid } * <= OK { VCLOCK: start_vclock } * Replica has enough permissions and master is ready for JOIN. * - start_vclock - vclock of the latest master's checkpoint. * * <= INSERT * ... * Initial data: a stream of engine-specifc rows, e.g. snapshot * rows for memtx or dirty cursor data for Vinyl. Engine can * use REPLICA_ID, LSN and other fields for internal purposes. * ... * <= INSERT * <= OK { VCLOCK: stop_vclock } - end of initial JOIN stage. * - `stop_vclock` - master's vclock when it's done * done sending rows from the snapshot (i.e. vclock * for the end of final join). * * <= INSERT/REPLACE/UPDATE/UPSERT/DELETE { REPLICA_ID, LSN } * ... * Final data: a stream of WAL rows from `start_vclock` to * `stop_vclock`, inclusive. REPLICA_ID and LSN fields are * original values from WAL and master-master replication. * ... * <= INSERT/REPLACE/UPDATE/UPSERT/DELETE { REPLICA_ID, LSN } * <= OK { VCLOCK: current_vclock } - end of final JOIN stage. * - `current_vclock` - master's vclock after final stage. * * All packets must have the same SYNC value as initial JOIN request. * Master can send ERROR at any time. Replica doesn't confirm rows * by OKs. Either initial or final stream includes: * - Cluster UUID in _schema space * - Registration of master in _cluster space * - Registration of the new replica in _cluster space */ assert(header->type == IPROTO_JOIN); /* Decode JOIN request */ struct tt_uuid instance_uuid = uuid_nil; xrow_decode_join_xc(header, &instance_uuid); /* Check that bootstrap has been finished */ if (!is_box_configured) tnt_raise(ClientError, ER_LOADING); /* Forbid connection to itself */ if (tt_uuid_is_equal(&instance_uuid, &INSTANCE_UUID)) tnt_raise(ClientError, ER_CONNECTION_TO_SELF); /* Check permissions */ access_check_universe_xc(PRIV_R); /* * Unless already registered, the new replica will be * added to _cluster space once the initial join stage * is complete. Fail early if the caller does not have * appropriate access privileges. */ struct replica *replica = replica_by_uuid(&instance_uuid); if (replica == NULL || replica->id == REPLICA_ID_NIL) { box_check_writable_xc(); struct space *space = space_cache_find_xc(BOX_CLUSTER_ID); access_check_space_xc(space, PRIV_W); } /* Forbid replication with disabled WAL */ if (wal_mode() == WAL_NONE) { tnt_raise(ClientError, ER_UNSUPPORTED, "Replication", "wal_mode = 'none'"); } /* Remember start vclock. */ struct vclock start_vclock; /* * The only case when the directory index is empty is * when someone has deleted a snapshot and tries to join * as a replica. Our best effort is to not crash in such * case: raise ER_MISSING_SNAPSHOT. */ if (checkpoint_last(&start_vclock) < 0) tnt_raise(ClientError, ER_MISSING_SNAPSHOT); /* Register the replica with the garbage collector. */ struct gc_consumer *gc = gc_consumer_register( tt_sprintf("replica %s", tt_uuid_str(&instance_uuid)), vclock_sum(&start_vclock)); if (gc == NULL) diag_raise(); auto gc_guard = make_scoped_guard([=]{ gc_consumer_unregister(gc); }); /* Respond to JOIN request with start_vclock. */ struct xrow_header row; xrow_encode_vclock_xc(&row, &start_vclock); row.sync = header->sync; coio_write_xrow(io, &row); /* * Initial stream: feed replica with dirty data from engines. */ relay_initial_join(io->fd, header->sync, &start_vclock); say_info("initial data sent."); /** * Call the server-side hook which stores the replica uuid * in _cluster space after sending the last row but before * sending OK - if the hook fails, the error reaches the * client. */ box_on_join(&instance_uuid); replica = replica_by_uuid(&instance_uuid); assert(replica != NULL); replica->gc = gc; gc_guard.is_active = false; /* Remember master's vclock after the last request */ struct vclock stop_vclock; wal_checkpoint(&stop_vclock, false); /* Send end of initial stage data marker */ xrow_encode_vclock_xc(&row, &stop_vclock); row.sync = header->sync; coio_write_xrow(io, &row); /* * Final stage: feed replica with WALs in range * (start_vclock, stop_vclock). */ relay_final_join(io->fd, header->sync, &start_vclock, &stop_vclock); say_info("final data sent."); /* Send end of WAL stream marker */ struct vclock current_vclock; wal_checkpoint(¤t_vclock, false); xrow_encode_vclock_xc(&row, ¤t_vclock); row.sync = header->sync; coio_write_xrow(io, &row); } void box_process_subscribe(struct ev_io *io, struct xrow_header *header) { assert(header->type == IPROTO_SUBSCRIBE); /* Check that bootstrap has been finished */ if (!is_box_configured) tnt_raise(ClientError, ER_LOADING); struct tt_uuid replicaset_uuid = uuid_nil, replica_uuid = uuid_nil; struct vclock replica_clock; uint32_t replica_version_id; vclock_create(&replica_clock); xrow_decode_subscribe_xc(header, &replicaset_uuid, &replica_uuid, &replica_clock, &replica_version_id); /* Forbid connection to itself */ if (tt_uuid_is_equal(&replica_uuid, &INSTANCE_UUID)) tnt_raise(ClientError, ER_CONNECTION_TO_SELF); /* Check permissions */ access_check_universe_xc(PRIV_R); /** * Check that the given UUID matches the UUID of the * replica set this replica belongs to. Used to handshake * replica connect, and refuse a connection from a replica * which belongs to a different replica set. */ if (!tt_uuid_is_equal(&replicaset_uuid, &REPLICASET_UUID)) { tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH, tt_uuid_str(&REPLICASET_UUID), tt_uuid_str(&replicaset_uuid)); } /* Check replica uuid */ struct replica *replica = replica_by_uuid(&replica_uuid); if (replica == NULL || replica->id == REPLICA_ID_NIL) { tnt_raise(ClientError, ER_UNKNOWN_REPLICA, tt_uuid_str(&replica_uuid), tt_uuid_str(&REPLICASET_UUID)); } /* Forbid replication with disabled WAL */ if (wal_mode() == WAL_NONE) { tnt_raise(ClientError, ER_UNSUPPORTED, "Replication", "wal_mode = 'none'"); } /* * Send a response to SUBSCRIBE request, tell * the replica how many rows we have in stock for it, * and identify ourselves with our own replica id. */ struct xrow_header row; struct vclock current_vclock; wal_checkpoint(¤t_vclock, true); xrow_encode_vclock_xc(&row, ¤t_vclock); /* * Identify the message with the replica id of this * instance, this is the only way for a replica to find * out the id of the instance it has connected to. */ struct replica *self = replica_by_uuid(&INSTANCE_UUID); assert(self != NULL); /* the local registration is read-only */ row.replica_id = self->id; row.sync = header->sync; coio_write_xrow(io, &row); /* * Process SUBSCRIBE request via replication relay * Send current recovery vector clock as a marker * of the "current" state of the master. When * replica fetches rows up to this position, * it enters read-write mode. * * @todo: this is not implemented, this is imperfect, and * this is buggy in case there is rollback followed by * a stall in updates (in this case replica may hang * indefinitely). */ relay_subscribe(io->fd, header->sync, replica, &replica_clock, replica_version_id); } /** Insert a new cluster into _schema */ static void box_set_replicaset_uuid(const struct tt_uuid *replicaset_uuid) { tt_uuid uu; /* Use UUID from the config or generate a new one */ if (!tt_uuid_is_nil(replicaset_uuid)) uu = *replicaset_uuid; else tt_uuid_create(&uu); /* Save replica set UUID in _schema */ if (boxk(IPROTO_REPLACE, BOX_SCHEMA_ID, "[%s%s]", "cluster", tt_uuid_str(&uu))) diag_raise(); } void box_free(void) { /* * See gh-584 "box_free() is called even if box is not * initialized */ if (is_box_configured) { #if 0 session_free(); replication_free(); user_cache_free(); schema_free(); module_free(); tuple_free(); port_free(); #endif sequence_free(); gc_free(); engine_shutdown(); wal_thread_stop(); identifier_destroy(); } fiber_cond_destroy(&ro_cond); } static void engine_init() { /* * Sic: order is important here, since * memtx must be the first to participate * in checkpoints (in enigne_foreach order), * so it must be registered first. */ struct memtx_engine *memtx; memtx = memtx_engine_new_xc(cfg_gets("memtx_dir"), cfg_geti("force_recovery"), cfg_getd("memtx_memory"), cfg_geti("memtx_min_tuple_size"), cfg_getd("slab_alloc_factor")); engine_register((struct engine *)memtx); box_set_memtx_max_tuple_size(); struct sysview_engine *sysview = sysview_engine_new_xc(); engine_register((struct engine *)sysview); struct vinyl_engine *vinyl; vinyl = vinyl_engine_new_xc(cfg_gets("vinyl_dir"), cfg_geti64("vinyl_memory"), cfg_geti("vinyl_read_threads"), cfg_geti("vinyl_write_threads"), cfg_geti("force_recovery")); engine_register((struct engine *)vinyl); box_set_vinyl_max_tuple_size(); box_set_vinyl_cache(); box_set_vinyl_timeout(); } /** * Initialize the first replica of a new replica set. */ static void bootstrap_master(const struct tt_uuid *replicaset_uuid) { engine_bootstrap_xc(); uint32_t replica_id = 1; /* Unregister a local replica if it was registered by bootstrap.bin */ if (boxk(IPROTO_DELETE, BOX_CLUSTER_ID, "[%u]", 1) != 0) diag_raise(); /* Register the first replica in the replica set */ box_register_replica(replica_id, &INSTANCE_UUID); assert(replica_by_uuid(&INSTANCE_UUID)->id == 1); /* Register other cluster members */ replicaset_foreach(replica) { if (tt_uuid_is_equal(&replica->uuid, &INSTANCE_UUID)) continue; assert(replica->applier != NULL); box_register_replica(++replica_id, &replica->uuid); assert(replica->id == replica_id); } /* Set UUID of a new replica set */ box_set_replicaset_uuid(replicaset_uuid); } /** * Bootstrap from the remote master * \pre master->applier->state == APPLIER_CONNECTED * \post master->applier->state == APPLIER_READY * * @param[out] start_vclock the vector time of the master * at the moment of replica bootstrap */ static void bootstrap_from_master(struct replica *master) { struct applier *applier = master->applier; assert(applier != NULL); applier_resume_to_state(applier, APPLIER_READY, TIMEOUT_INFINITY); assert(applier->state == APPLIER_READY); say_info("bootstrapping replica from %s", sio_strfaddr(&applier->addr, applier->addr_len)); /* * Send JOIN request to master * See box_process_join(). */ assert(!tt_uuid_is_nil(&INSTANCE_UUID)); applier_resume_to_state(applier, APPLIER_INITIAL_JOIN, TIMEOUT_INFINITY); /* * Process initial data (snapshot or dirty disk data). */ engine_begin_initial_recovery_xc(NULL); applier_resume_to_state(applier, APPLIER_FINAL_JOIN, TIMEOUT_INFINITY); /* * Process final data (WALs). */ engine_begin_final_recovery_xc(); struct recovery_journal journal; recovery_journal_create(&journal, &replicaset.vclock); journal_set(&journal.base); applier_resume_to_state(applier, APPLIER_JOINED, TIMEOUT_INFINITY); /* Clear the pointer to journal before it goes out of scope */ journal_set(NULL); /* Finalize the new replica */ engine_end_recovery_xc(); /* Switch applier to initial state */ applier_resume_to_state(applier, APPLIER_READY, TIMEOUT_INFINITY); assert(applier->state == APPLIER_READY); } /** * Bootstrap a new instance either as the first master in a * replica set or as a replica of an existing master. * * @param[out] is_bootstrap_leader set if this instance is * the leader of a new cluster */ static void bootstrap(const struct tt_uuid *replicaset_uuid, bool *is_bootstrap_leader) { /* Use the first replica by URI as a bootstrap leader */ struct replica *master = replicaset_leader(); assert(master == NULL || master->applier != NULL); if (master != NULL && !tt_uuid_is_equal(&master->uuid, &INSTANCE_UUID)) { bootstrap_from_master(master); /* Check replica set UUID */ if (!tt_uuid_is_nil(replicaset_uuid) && !tt_uuid_is_equal(replicaset_uuid, &REPLICASET_UUID)) { tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH, tt_uuid_str(replicaset_uuid), tt_uuid_str(&REPLICASET_UUID)); } } else { bootstrap_master(replicaset_uuid); *is_bootstrap_leader = true; } if (engine_begin_checkpoint() || engine_commit_checkpoint(&replicaset.vclock)) panic("failed to create a checkpoint"); } static void tx_prio_cb(struct ev_loop *loop, ev_watcher *watcher, int events) { (void) loop; (void) events; struct cbus_endpoint *endpoint = (struct cbus_endpoint *)watcher->data; cbus_process(endpoint); } void box_init(void) { fiber_cond_create(&ro_cond); user_cache_init(); /* * The order is important: to initialize sessions, * we need to access the admin user, which is used * as a default session user when running triggers. */ session_init(); if (tuple_init(lua_hash) != 0) diag_raise(); sequence_init(); } bool box_is_configured(void) { return is_box_configured; } static inline void box_cfg_xc(void) { /* Join the cord interconnect as "tx" endpoint. */ fiber_pool_create(&tx_fiber_pool, "tx", FIBER_POOL_SIZE, FIBER_POOL_IDLE_TIMEOUT); /* Add an extra endpoint for WAL wake up/rollback messages. */ cbus_endpoint_create(&tx_prio_endpoint, "tx_prio", tx_prio_cb, &tx_prio_endpoint); rmean_box = rmean_new(iproto_type_strs, IPROTO_TYPE_STAT_MAX); rmean_error = rmean_new(rmean_error_strings, RMEAN_ERROR_LAST); gc_init(); engine_init(); if (module_init() != 0) diag_raise(); identifier_init(); schema_init(); replication_init(); port_init(); iproto_init(); wal_thread_start(); title("loading"); struct tt_uuid instance_uuid, replicaset_uuid; box_check_instance_uuid(&instance_uuid); box_check_replicaset_uuid(&replicaset_uuid); box_set_checkpoint_count(); box_set_too_long_threshold(); box_set_replication_timeout(); box_set_replication_connect_timeout(); box_set_replication_connect_quorum(); replication_sync_lag = box_check_replication_sync_lag(); xstream_create(&join_stream, apply_initial_join_row); xstream_create(&subscribe_stream, apply_row); struct vclock last_checkpoint_vclock; int64_t last_checkpoint_lsn = checkpoint_last(&last_checkpoint_vclock); /* * Lock the write ahead log directory to avoid multiple * instances running in the same dir. */ if (path_lock(cfg_gets("wal_dir"), &wal_dir_lock) < 0) diag_raise(); if (wal_dir_lock < 0) { /** * The directory is busy and hot standby mode is off: * refuse to start. In hot standby mode, a busy * WAL dir must contain at least one xlog. */ if (!cfg_geti("hot_standby") || last_checkpoint_lsn < 0) tnt_raise(ClientError, ER_ALREADY_RUNNING, cfg_gets("wal_dir")); } else { /* * Try to bind the port before recovery, to fail * early if the port is busy. In hot standby mode, * the port is most likely busy. */ box_bind(); } bool is_bootstrap_leader = false; if (last_checkpoint_lsn >= 0) { struct wal_stream wal_stream; wal_stream_create(&wal_stream, cfg_geti64("rows_per_wal")); struct recovery *recovery; recovery = recovery_new(cfg_gets("wal_dir"), cfg_geti("force_recovery"), &last_checkpoint_vclock); auto guard = make_scoped_guard([=]{ recovery_delete(recovery); }); /* * recovery->vclock is needed by Vinyl to filter * WAL rows that were dumped before restart. * * XXX: Passing an internal member of the recovery * object to an engine is an ugly hack. Instead we * should introduce Engine::applyWALRow method and * explicitly pass the statement LSN to it. */ engine_begin_initial_recovery_xc(&recovery->vclock); struct memtx_engine *memtx; memtx = (struct memtx_engine *)engine_by_name("memtx"); assert(memtx != NULL); struct recovery_journal journal; recovery_journal_create(&journal, &recovery->vclock); journal_set(&journal.base); /** * We explicitly request memtx to recover its * snapshot as a separate phase since it contains * data for system spaces, and triggers on * recovery of system spaces issue DDL events in * other engines. */ memtx_engine_recover_snapshot_xc(memtx, &last_checkpoint_vclock); engine_begin_final_recovery_xc(); recovery_follow_local(recovery, &wal_stream.base, "hot_standby", cfg_getd("wal_dir_rescan_delay")); title("hot_standby"); assert(!tt_uuid_is_nil(&INSTANCE_UUID)); /* * Leave hot standby mode, if any, only * after acquiring the lock. */ if (wal_dir_lock < 0) { say_info("Entering hot standby mode"); while (true) { if (path_lock(cfg_gets("wal_dir"), &wal_dir_lock)) diag_raise(); if (wal_dir_lock >= 0) break; fiber_sleep(0.1); } box_bind(); } recovery_finalize(recovery, &wal_stream.base); engine_end_recovery_xc(); /* Check replica set and instance UUID. */ if (!tt_uuid_is_nil(&instance_uuid) && !tt_uuid_is_equal(&instance_uuid, &INSTANCE_UUID)) { tnt_raise(ClientError, ER_INSTANCE_UUID_MISMATCH, tt_uuid_str(&instance_uuid), tt_uuid_str(&INSTANCE_UUID)); } if (!tt_uuid_is_nil(&replicaset_uuid) && !tt_uuid_is_equal(&replicaset_uuid, &REPLICASET_UUID)) { tnt_raise(ClientError, ER_REPLICASET_UUID_MISMATCH, tt_uuid_str(&replicaset_uuid), tt_uuid_str(&REPLICASET_UUID)); } /* Clear the pointer to journal before it goes out of scope */ journal_set(NULL); /* * Initialize the replica set vclock from recovery. * The local WAL may contain rows from remote masters, * so we must reflect this in replicaset vclock to * not attempt to apply these rows twice. */ vclock_copy(&replicaset.vclock, &recovery->vclock); /** Begin listening only when the local recovery is complete. */ box_listen(); title("orphan"); /* Wait for the cluster to start up */ box_sync_replication(replication_connect_timeout, false); } else { if (!tt_uuid_is_nil(&instance_uuid)) INSTANCE_UUID = instance_uuid; else tt_uuid_create(&INSTANCE_UUID); /* * Begin listening on the socket to enable * master-master replication leader election. */ box_listen(); title("orphan"); /* * Wait for the cluster to start up. * * Note, when bootstrapping a new instance, we have to * connect to all masters to make sure all replicas * receive the same replica set UUID when a new cluster * is deployed. */ box_sync_replication(TIMEOUT_INFINITY, true); /* Bootstrap a new master */ bootstrap(&replicaset_uuid, &is_bootstrap_leader); } fiber_gc(); /* Check for correct registration of the instance in _cluster */ { struct replica *self = replica_by_uuid(&INSTANCE_UUID); if (self == NULL || self->id == REPLICA_ID_NIL) { tnt_raise(ClientError, ER_UNKNOWN_REPLICA, tt_uuid_str(&INSTANCE_UUID), tt_uuid_str(&REPLICASET_UUID)); } } /* Start WAL writer */ int64_t wal_max_rows = box_check_wal_max_rows(cfg_geti64("rows_per_wal")); int64_t wal_max_size = box_check_wal_max_size(cfg_geti64("wal_max_size")); enum wal_mode wal_mode = box_check_wal_mode(cfg_gets("wal_mode")); wal_init(wal_mode, cfg_gets("wal_dir"), &INSTANCE_UUID, &replicaset.vclock, wal_max_rows, wal_max_size); rmean_cleanup(rmean_box); /* * If this instance is a leader of a newly bootstrapped * cluster, it is uptodate by definition so leave the * 'orphan' mode right away to let it initialize cluster * schema. */ if (is_bootstrap_leader) box_clear_orphan(); /* Follow replica */ replicaset_follow(); fiber_gc(); is_box_configured = true; if (!is_bootstrap_leader) replicaset_sync(); say_info("ready to accept requests"); } void box_cfg(void) { try { box_cfg_xc(); } catch (Exception *e) { e->log(); panic("can't initialize storage: %s", e->get_errmsg()); } } /** * box.coredump() forks to save a core. The entire * server forks in box.cfg{} if background=true. */ void box_atfork() { wal_atfork(); } int box_checkpoint() { /* Signal arrived before box.cfg{} */ if (! is_box_configured) return 0; int rc = 0; if (box_checkpoint_is_in_progress) { diag_set(ClientError, ER_CHECKPOINT_IN_PROGRESS); return -1; } box_checkpoint_is_in_progress = true; /* create checkpoint files */ latch_lock(&schema_lock); if ((rc = engine_begin_checkpoint())) goto end; struct vclock vclock; if ((rc = wal_checkpoint(&vclock, true))) { tnt_error(ClientError, ER_CHECKPOINT_ROLLBACK); goto end; } rc = engine_commit_checkpoint(&vclock); end: if (rc) engine_abort_checkpoint(); else gc_run(); latch_unlock(&schema_lock); box_checkpoint_is_in_progress = false; return rc; } int box_backup_start(box_backup_cb cb, void *cb_arg) { if (backup_gc != NULL) { diag_set(ClientError, ER_BACKUP_IN_PROGRESS); return -1; } struct vclock vclock; if (checkpoint_last(&vclock) < 0) { diag_set(ClientError, ER_MISSING_SNAPSHOT); return -1; } backup_gc = gc_consumer_register("backup", vclock_sum(&vclock)); if (backup_gc == NULL) return -1; int rc = engine_backup(&vclock, cb, cb_arg); if (rc != 0) { gc_consumer_unregister(backup_gc); backup_gc = NULL; } return rc; } void box_backup_stop(void) { if (backup_gc != NULL) { gc_consumer_unregister(backup_gc); backup_gc = NULL; } } const char * box_status(void) { return status; } tarantool_1.9.1.26.g63eb81e3c/src/box/errcode.c0000664000000000000000000000311213306560010017343 0ustar rootroot#include "errcode.h" /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "errcode.h" #define ERRCODE_RECORD_MEMBER(s, d) { \ .errstr = #s, \ .errdesc = d \ }, struct errcode_record box_error_codes[box_error_code_MAX] = { ERROR_CODES(ERRCODE_RECORD_MEMBER) }; tarantool_1.9.1.26.g63eb81e3c/src/box/relay.cc0000664000000000000000000004075313306565107017227 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "relay.h" #include "trivia/config.h" #include "trivia/util.h" #include "cbus.h" #include "cfg.h" #include "errinj.h" #include "fiber.h" #include "say.h" #include "coio.h" #include "coio_task.h" #include "engine.h" #include "gc.h" #include "iproto_constants.h" #include "recovery.h" #include "replication.h" #include "trigger.h" #include "vclock.h" #include "version.h" #include "xrow.h" #include "xrow_io.h" #include "xstream.h" #include "wal.h" /** * Cbus message to send status updates from relay to tx thread. */ struct relay_status_msg { /** Parent */ struct cmsg msg; /** Relay instance */ struct relay *relay; /** Replica vclock. */ struct vclock vclock; }; /** * Cbus message to update replica gc state in tx thread. */ struct relay_gc_msg { /** Parent */ struct cmsg msg; /** * Link in the list of pending gc messages, * see relay::pending_gc. */ struct stailq_entry in_pending; /** Relay instance */ struct relay *relay; /** Vclock signature to advance to */ int64_t signature; }; /** State of a replication relay. */ struct relay { /** The thread in which we relay data to the replica. */ struct cord cord; /** Replica connection */ struct ev_io io; /** Request sync */ uint64_t sync; /** Recovery instance to read xlog from the disk */ struct recovery *r; /** Xstream argument to recovery */ struct xstream stream; /** Vclock to stop playing xlogs */ struct vclock stop_vclock; /** Remote replica */ struct replica *replica; /** WAL event watcher. */ struct wal_watcher wal_watcher; /** Set before exiting the relay loop. */ bool exiting; /** Relay reader cond. */ struct fiber_cond reader_cond; /** Relay diagnostics. */ struct diag diag; /** Vclock recieved from replica. */ struct vclock recv_vclock; /** Replicatoin slave version. */ uint32_t version_id; /** * Local vclock at the moment of subscribe, used to check * dataset on the other side and send missing data rows if any. */ struct vclock local_vclock_at_subscribe; /** Relay endpoint */ struct cbus_endpoint endpoint; /** A pipe from 'relay' thread to 'tx' */ struct cpipe tx_pipe; /** A pipe from 'tx' thread to 'relay' */ struct cpipe relay_pipe; /** Status message */ struct relay_status_msg status_msg; /** * List of garbage collection messages awaiting * confirmation from the replica. */ struct stailq pending_gc; /** Time when last row was sent to peer. */ double last_row_tm; struct { /* Align to prevent false-sharing with tx thread */ alignas(CACHELINE_SIZE) /** Known relay vclock. */ struct vclock vclock; } tx; }; const struct vclock * relay_vclock(const struct relay *relay) { return &relay->tx.vclock; } static void relay_send(struct relay *relay, struct xrow_header *packet); static void relay_send_initial_join_row(struct xstream *stream, struct xrow_header *row); static void relay_send_row(struct xstream *stream, struct xrow_header *row); static void relay_create(struct relay *relay, int fd, uint64_t sync, void (*stream_write)(struct xstream *, struct xrow_header *)) { memset(relay, 0, sizeof(*relay)); xstream_create(&relay->stream, stream_write); coio_create(&relay->io, fd); relay->sync = sync; fiber_cond_create(&relay->reader_cond); diag_create(&relay->diag); stailq_create(&relay->pending_gc); } static void relay_destroy(struct relay *relay) { struct relay_gc_msg *gc_msg, *next_gc_msg; stailq_foreach_entry_safe(gc_msg, next_gc_msg, &relay->pending_gc, in_pending) { free(gc_msg); } if (relay->r != NULL) recovery_delete(relay->r); fiber_cond_destroy(&relay->reader_cond); diag_destroy(&relay->diag); TRASH(relay); } static void relay_set_cord_name(int fd) { char name[FIBER_NAME_MAX]; struct sockaddr_storage peer; socklen_t addrlen = sizeof(peer); if (getpeername(fd, ((struct sockaddr*)&peer), &addrlen) == 0) { snprintf(name, sizeof(name), "relay/%s", sio_strfaddr((struct sockaddr *)&peer, addrlen)); } else { snprintf(name, sizeof(name), "relay/"); } cord_set_name(name); } void relay_initial_join(int fd, uint64_t sync, struct vclock *vclock) { struct relay relay; relay_create(&relay, fd, sync, relay_send_initial_join_row); assert(relay.stream.write != NULL); engine_join_xc(vclock, &relay.stream); relay_destroy(&relay); } int relay_final_join_f(va_list ap) { struct relay *relay = va_arg(ap, struct relay *); coio_enable(); relay_set_cord_name(relay->io.fd); /* Send all WALs until stop_vclock */ assert(relay->stream.write != NULL); recover_remaining_wals(relay->r, &relay->stream, &relay->stop_vclock, true); assert(vclock_compare(&relay->r->vclock, &relay->stop_vclock) == 0); return 0; } void relay_final_join(int fd, uint64_t sync, struct vclock *start_vclock, struct vclock *stop_vclock) { struct relay relay; relay_create(&relay, fd, sync, relay_send_row); relay.r = recovery_new(cfg_gets("wal_dir"), cfg_geti("force_recovery"), start_vclock); vclock_copy(&relay.stop_vclock, stop_vclock); int rc = cord_costart(&relay.cord, "final_join", relay_final_join_f, &relay); if (rc == 0) rc = cord_cojoin(&relay.cord); relay_destroy(&relay); if (rc != 0) diag_raise(); ERROR_INJECT(ERRINJ_RELAY_FINAL_SLEEP, { while (vclock_compare(stop_vclock, &replicaset.vclock) == 0) fiber_sleep(0.001); }); } /** * The message which updated tx thread with a new vclock has returned back * to the relay. */ static void relay_status_update(struct cmsg *msg) { msg->route = NULL; } /** * Deliver a fresh relay vclock to tx thread. */ static void tx_status_update(struct cmsg *msg) { struct relay_status_msg *status = (struct relay_status_msg *)msg; vclock_copy(&status->relay->tx.vclock, &status->vclock); static const struct cmsg_hop route[] = { {relay_status_update, NULL} }; cmsg_init(msg, route); cpipe_push(&status->relay->relay_pipe, msg); } /** * Update replica gc state in tx thread. */ static void tx_gc_advance(struct cmsg *msg) { struct relay_gc_msg *m = (struct relay_gc_msg *)msg; gc_consumer_advance(m->relay->replica->gc, m->signature); free(m); } static void relay_on_close_log_f(struct trigger *trigger, void * /* event */) { static const struct cmsg_hop route[] = { {tx_gc_advance, NULL} }; struct relay *relay = (struct relay *)trigger->data; struct relay_gc_msg *m = (struct relay_gc_msg *)malloc(sizeof(*m)); if (m == NULL) { say_warn("failed to allocate relay gc message"); return; } cmsg_init(&m->msg, route); m->relay = relay; m->signature = vclock_sum(&relay->r->vclock); /* * Do not invoke garbage collection until the replica * confirms that it has received data stored in the * sent xlog. */ stailq_add_tail_entry(&relay->pending_gc, m, in_pending); } /** * Invoke pending garbage collection requests. * * This function schedules the most recent gc message whose * signature is less than or equal to the given one. Older * messages are discarded as their job will be done by the * scheduled message anyway. */ static inline void relay_schedule_pending_gc(struct relay *relay, int64_t signature) { struct relay_gc_msg *curr, *next, *gc_msg = NULL; stailq_foreach_entry_safe(curr, next, &relay->pending_gc, in_pending) { if (curr->signature > signature) break; stailq_shift(&relay->pending_gc); free(gc_msg); gc_msg = curr; } if (gc_msg != NULL) cpipe_push(&relay->tx_pipe, &gc_msg->msg); } static void relay_process_wal_event(struct wal_watcher *watcher, unsigned events) { struct relay *relay = container_of(watcher, struct relay, wal_watcher); if (relay->exiting) { /* * Do not try to send anything to the replica * if it already closed its socket. */ return; } try { recover_remaining_wals(relay->r, &relay->stream, NULL, (events & WAL_EVENT_ROTATE) != 0); } catch (Exception *e) { e->log(); diag_move(diag_get(), &relay->diag); fiber_cancel(fiber()); } } /* * Relay reader fiber function. * Read xrow encoded vclocks sent by the replica. */ int relay_reader_f(va_list ap) { struct relay *relay = va_arg(ap, struct relay *); struct fiber *relay_f = va_arg(ap, struct fiber *); struct ibuf ibuf; struct ev_io io; coio_create(&io, relay->io.fd); ibuf_create(&ibuf, &cord()->slabc, 1024); try { while (!fiber_is_cancelled()) { struct xrow_header xrow; coio_read_xrow_timeout_xc(&io, &ibuf, &xrow, replication_disconnect_timeout()); /* vclock is followed while decoding, zeroing it. */ vclock_create(&relay->recv_vclock); xrow_decode_vclock_xc(&xrow, &relay->recv_vclock); fiber_cond_signal(&relay->reader_cond); } } catch (Exception *e) { if (diag_is_empty(&relay->diag)) { /* Don't override existing error. */ diag_move(diag_get(), &relay->diag); fiber_cancel(relay_f); } else if (!fiber_is_cancelled()) { /* * There is an relay error and this fiber * fiber has another, log it. */ e->log(); } } ibuf_destroy(&ibuf); return 0; } /** * Send a heartbeat message over a connected relay. */ static void relay_send_heartbeat(struct relay *relay) { struct xrow_header row; xrow_encode_timestamp(&row, instance_id, ev_now(loop())); try { relay_send(relay, &row); } catch (Exception *e) { e->log(); } } /** * A libev callback invoked when a relay client socket is ready * for read. This currently only happens when the client closes * its socket, and we get an EOF. */ static int relay_subscribe_f(va_list ap) { struct relay *relay = va_arg(ap, struct relay *); struct recovery *r = relay->r; coio_enable(); cbus_endpoint_create(&relay->endpoint, cord_name(cord()), fiber_schedule_cb, fiber()); cbus_pair("tx", cord_name(cord()), &relay->tx_pipe, &relay->relay_pipe, NULL, NULL, cbus_process); /* Setup garbage collection trigger. */ struct trigger on_close_log = { RLIST_LINK_INITIALIZER, relay_on_close_log_f, relay, NULL }; trigger_add(&r->on_close_log, &on_close_log); wal_set_watcher(&relay->wal_watcher, cord_name(cord()), relay_process_wal_event, cbus_process); relay_set_cord_name(relay->io.fd); char name[FIBER_NAME_MAX]; snprintf(name, sizeof(name), "%s:%s", fiber()->name, "reader"); struct fiber *reader = fiber_new_xc(name, relay_reader_f); fiber_set_joinable(reader, true); fiber_start(reader, relay, fiber()); /* * If the replica happens to be up to date on subscribe, * don't wait for timeout to happen - send a heartbeat * message right away to update the replication lag as * soon as possible. */ relay_send_heartbeat(relay); while (!fiber_is_cancelled()) { double timeout = replication_timeout; struct errinj *inj = errinj(ERRINJ_RELAY_REPORT_INTERVAL, ERRINJ_DOUBLE); if (inj != NULL && inj->dparam != 0) timeout = inj->dparam; fiber_cond_wait_deadline(&relay->reader_cond, relay->last_row_tm + timeout); /* * The fiber can be woken by IO cancel, by a timeout of * status messaging or by an acknowledge to status message. * Handle cbus messages first. */ cbus_process(&relay->endpoint); /* Check for a heartbeat timeout. */ if (ev_monotonic_now(loop()) - relay->last_row_tm > timeout) relay_send_heartbeat(relay); /* * Check that the vclock has been updated and the previous * status message is delivered */ if (relay->status_msg.msg.route != NULL) continue; struct vclock *send_vclock; if (relay->version_id < version_id(1, 7, 4)) send_vclock = &r->vclock; else send_vclock = &relay->recv_vclock; if (vclock_sum(&relay->status_msg.vclock) == vclock_sum(send_vclock)) continue; static const struct cmsg_hop route[] = { {tx_status_update, NULL} }; cmsg_init(&relay->status_msg.msg, route); vclock_copy(&relay->status_msg.vclock, send_vclock); relay->status_msg.relay = relay; cpipe_push(&relay->tx_pipe, &relay->status_msg.msg); /* Collect xlog files received by the replica. */ relay_schedule_pending_gc(relay, vclock_sum(send_vclock)); } say_crit("exiting the relay loop"); if (!fiber_is_dead(reader)) fiber_cancel(reader); fiber_join(reader); relay->exiting = true; trigger_clear(&on_close_log); wal_clear_watcher(&relay->wal_watcher, cbus_process); cbus_unpair(&relay->tx_pipe, &relay->relay_pipe, NULL, NULL, cbus_process); cbus_endpoint_destroy(&relay->endpoint, cbus_process); if (!diag_is_empty(&relay->diag)) { /* An error has occured while ACKs of xlog reading */ diag_move(&relay->diag, diag_get()); } struct errinj *inj = errinj(ERRINJ_RELAY_EXIT_DELAY, ERRINJ_DOUBLE); if (inj != NULL && inj->dparam > 0) fiber_sleep(inj->dparam); return diag_is_empty(diag_get()) ? 0: -1; } /** Replication acceptor fiber handler. */ void relay_subscribe(int fd, uint64_t sync, struct replica *replica, struct vclock *replica_clock, uint32_t replica_version_id) { assert(replica->id != REPLICA_ID_NIL); /* Don't allow multiple relays for the same replica */ if (replica->relay != NULL) { tnt_raise(ClientError, ER_CFG, "replication", "duplicate connection with the same replica UUID"); } /* * Register the replica with the garbage collector * unless it has already been registered by initial * join. */ if (replica->gc == NULL) { replica->gc = gc_consumer_register( tt_sprintf("replica %s", tt_uuid_str(&replica->uuid)), vclock_sum(replica_clock)); if (replica->gc == NULL) diag_raise(); } struct relay relay; relay_create(&relay, fd, sync, relay_send_row); relay.r = recovery_new(cfg_gets("wal_dir"), cfg_geti("force_recovery"), replica_clock); vclock_copy(&relay.tx.vclock, replica_clock); relay.version_id = replica_version_id; relay.replica = replica; replica_set_relay(replica, &relay); vclock_copy(&relay.local_vclock_at_subscribe, &replicaset.vclock); int rc = cord_costart(&relay.cord, tt_sprintf("relay_%p", &relay), relay_subscribe_f, &relay); if (rc == 0) rc = cord_cojoin(&relay.cord); replica_clear_relay(replica); relay_destroy(&relay); if (rc != 0) diag_raise(); } static void relay_send(struct relay *relay, struct xrow_header *packet) { packet->sync = relay->sync; relay->last_row_tm = ev_monotonic_now(loop()); coio_write_xrow(&relay->io, packet); fiber_gc(); struct errinj *inj = errinj(ERRINJ_RELAY_TIMEOUT, ERRINJ_DOUBLE); if (inj != NULL && inj->dparam > 0) fiber_sleep(inj->dparam); } static void relay_send_initial_join_row(struct xstream *stream, struct xrow_header *row) { struct relay *relay = container_of(stream, struct relay, stream); relay_send(relay, row); } /** Send a single row to the client. */ static void relay_send_row(struct xstream *stream, struct xrow_header *packet) { struct relay *relay = container_of(stream, struct relay, stream); assert(iproto_type_is_dml(packet->type)); /* * We're feeding a WAL, thus responding to SUBSCRIBE request. * In that case, only send a row if it is not from the same replica * (i.e. don't send replica's own rows back) or if this row is * missing on the other side (i.e. in case of sudden power-loss, * data was not written to WAL, so remote master can't recover * it). In the latter case packet's LSN is less than or equal to * local master's LSN at the moment it received 'SUBSCRIBE' request. */ if (relay->replica == NULL || packet->replica_id != relay->replica->id || packet->lsn <= vclock_get(&relay->local_vclock_at_subscribe, packet->replica_id)) { relay_send(relay, packet); } } tarantool_1.9.1.26.g63eb81e3c/src/box/wal.cc0000664000000000000000000006505413306565107016677 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "wal.h" #include "vclock.h" #include "fiber.h" #include "fio.h" #include "errinj.h" #include "xlog.h" #include "xrow.h" #include "vy_log.h" #include "cbus.h" #include "coio_task.h" #include "replication.h" const char *wal_mode_STRS[] = { "none", "write", "fsync", NULL }; int wal_dir_lock = -1; static int64_t wal_write(struct journal *, struct journal_entry *); static int64_t wal_write_in_wal_mode_none(struct journal *, struct journal_entry *); /* WAL thread. */ struct wal_thread { /** 'wal' thread doing the writes. */ struct cord cord; /** A pipe from 'tx' thread to 'wal' */ struct cpipe wal_pipe; /** Return pipe from 'wal' to tx' */ struct cpipe tx_pipe; }; /* * WAL writer - maintain a Write Ahead Log for every change * in the data state. * * @sic the members are arranged to ensure proper cache alignment, * members used mainly in tx thread go first, wal thread members * following. */ struct wal_writer { struct journal base; /* ----------------- tx ------------------- */ /** * The rollback queue. An accumulator for all requests * that need to be rolled back. Also acts as a valve * in wal_write() so that new requests never enter * the wal-tx bus and are rolled back "on arrival". */ struct stailq rollback; /* ----------------- wal ------------------- */ /** A setting from instance configuration - rows_per_wal */ int64_t wal_max_rows; /** A setting from instance configuration - wal_max_size */ int64_t wal_max_size; /** Another one - wal_mode */ enum wal_mode wal_mode; /** wal_dir, from the configuration file. */ struct xdir wal_dir; /** * The vector clock of the WAL writer. It's a bit behind * the vector clock of the transaction thread, since it * "follows" the tx vector clock. * By "following" we mean this: whenever a transaction * is started in 'tx' thread, it's assigned a tentative * LSN. If the transaction is rolled back, this LSN * is abandoned. Otherwise, after the transaction is written * to the log with this LSN, WAL writer vclock is advanced * with this LSN and LSN becomes "real". */ struct vclock vclock; /** The current WAL file. */ struct xlog current_wal; /** * Used if there was a WAL I/O error and we need to * keep adding all incoming requests to the rollback * queue, until the tx thread has recovered. */ struct cmsg in_rollback; /** * WAL watchers, i.e. threads that should be alerted * whenever there are new records appended to the journal. * Used for replication relays. */ struct rlist watchers; }; struct wal_msg: public cmsg { /** Input queue, on output contains all committed requests. */ struct stailq commit; /** * In case of rollback, contains the requests which must * be rolled back. */ struct stailq rollback; }; /** * Vinyl metadata log writer. */ struct vy_log_writer { /** The metadata log file. */ struct xlog xlog; }; static struct vy_log_writer vy_log_writer; static struct wal_thread wal_thread; static struct wal_writer wal_writer_singleton; enum wal_mode wal_mode() { return wal_writer_singleton.wal_mode; } static void wal_write_to_disk(struct cmsg *msg); static void tx_schedule_commit(struct cmsg *msg); static struct cmsg_hop wal_request_route[] = { {wal_write_to_disk, &wal_thread.tx_pipe}, {tx_schedule_commit, NULL}, }; static void wal_msg_create(struct wal_msg *batch) { cmsg_init(batch, wal_request_route); stailq_create(&batch->commit); stailq_create(&batch->rollback); } static struct wal_msg * wal_msg(struct cmsg *msg) { return msg->route == wal_request_route ? (struct wal_msg *) msg : NULL; } /** Write a request to a log in a single transaction. */ static ssize_t xlog_write_entry(struct xlog *l, struct journal_entry *entry) { /* * Iterate over request rows (tx statements) */ xlog_tx_begin(l); struct xrow_header **row = entry->rows; for (; row < entry->rows + entry->n_rows; row++) { (*row)->tm = ev_now(loop()); if (xlog_write_row(l, *row) < 0) { /* * Rollback all un-written rows */ xlog_tx_rollback(l); return -1; } } return xlog_tx_commit(l); } /** * Invoke fibers waiting for their journal_entry's to be * completed. The fibers are invoked in strict fifo order: * this ensures that, in case of rollback, requests are * rolled back in strict reverse order, producing * a consistent database state. */ static void tx_schedule_queue(struct stailq *queue) { /* * fiber_wakeup() is faster than fiber_call() when there * are many ready fibers. */ struct journal_entry *req; stailq_foreach_entry(req, queue, fifo) fiber_wakeup(req->fiber); } /** * Complete execution of a batch of WAL write requests: * schedule all committed requests, and, should there * be any requests to be rolled back, append them to * the rollback queue. */ static void tx_schedule_commit(struct cmsg *msg) { struct wal_msg *batch = (struct wal_msg *) msg; /* * Move the rollback list to the writer first, since * wal_msg memory disappears after the first * iteration of tx_schedule_queue loop. */ if (! stailq_empty(&batch->rollback)) { struct wal_writer *writer = &wal_writer_singleton; /* Closes the input valve. */ stailq_concat(&writer->rollback, &batch->rollback); } tx_schedule_queue(&batch->commit); } static void tx_schedule_rollback(struct cmsg *msg) { (void) msg; struct wal_writer *writer = &wal_writer_singleton; /* * Perform a cascading abort of all transactions which * depend on the transaction which failed to get written * to the write ahead log. Abort transactions * in reverse order, performing a playback of the * in-memory database state. */ stailq_reverse(&writer->rollback); /* Must not yield. */ tx_schedule_queue(&writer->rollback); stailq_create(&writer->rollback); } /** * Initialize WAL writer context. Even though it's a singleton, * encapsulate the details just in case we may use * more writers in the future. */ static void wal_writer_create(struct wal_writer *writer, enum wal_mode wal_mode, const char *wal_dirname, const struct tt_uuid *instance_uuid, struct vclock *vclock, int64_t wal_max_rows, int64_t wal_max_size) { writer->wal_mode = wal_mode; writer->wal_max_rows = wal_max_rows; writer->wal_max_size = wal_max_size; journal_create(&writer->base, wal_mode == WAL_NONE ? wal_write_in_wal_mode_none : wal_write, NULL); xdir_create(&writer->wal_dir, wal_dirname, XLOG, instance_uuid); xlog_clear(&writer->current_wal); if (wal_mode == WAL_FSYNC) writer->wal_dir.open_wflags |= O_SYNC; stailq_create(&writer->rollback); cmsg_init(&writer->in_rollback, NULL); /* Create and fill writer->vclock. */ vclock_create(&writer->vclock); vclock_copy(&writer->vclock, vclock); rlist_create(&writer->watchers); } /** Destroy a WAL writer structure. */ static void wal_writer_destroy(struct wal_writer *writer) { xdir_destroy(&writer->wal_dir); } /** WAL thread routine. */ static int wal_thread_f(va_list ap); /** Start WAL thread and setup pipes to and from TX. */ void wal_thread_start() { if (cord_costart(&wal_thread.cord, "wal", wal_thread_f, NULL) != 0) panic("failed to start WAL thread"); /* Create a pipe to WAL thread. */ cpipe_create(&wal_thread.wal_pipe, "wal"); cpipe_set_max_input(&wal_thread.wal_pipe, IOV_MAX); } /** * Initialize WAL writer. * * @pre The instance has completed recovery from a snapshot * and/or existing WALs. All WALs opened in read-only * mode are closed. WAL thread has been started. */ void wal_init(enum wal_mode wal_mode, const char *wal_dirname, const struct tt_uuid *instance_uuid, struct vclock *vclock, int64_t wal_max_rows, int64_t wal_max_size) { assert(wal_max_rows > 1); struct wal_writer *writer = &wal_writer_singleton; wal_writer_create(writer, wal_mode, wal_dirname, instance_uuid, vclock, wal_max_rows, wal_max_size); xdir_scan_xc(&writer->wal_dir); journal_set(&writer->base); } /** * Stop WAL thread, wait until it exits, and destroy WAL writer * if it was initialized. Called on shutdown. */ void wal_thread_stop() { cbus_stop_loop(&wal_thread.wal_pipe); if (cord_join(&wal_thread.cord)) { /* We can't recover from this in any reasonable way. */ panic_syserror("WAL writer: thread join failed"); } if (journal_is_initialized(&wal_writer_singleton.base)) wal_writer_destroy(&wal_writer_singleton); } struct wal_checkpoint: public cmsg { struct vclock *vclock; struct fiber *fiber; bool rotate; int res; }; void wal_checkpoint_f(struct cmsg *data) { struct wal_checkpoint *msg = (struct wal_checkpoint *) data; struct wal_writer *writer = &wal_writer_singleton; if (writer->in_rollback.route != NULL) { /* We're rolling back a failed write. */ msg->res = -1; return; } /* * Avoid closing the current WAL if it has no rows (empty). */ if (msg->rotate && xlog_is_open(&writer->current_wal) && vclock_sum(&writer->current_wal.meta.vclock) != vclock_sum(&writer->vclock)) { xlog_close(&writer->current_wal, false); /* * Avoid creating an empty xlog if this is the * last snapshot before shutdown. */ } vclock_copy(msg->vclock, &writer->vclock); } void wal_checkpoint_done_f(struct cmsg *data) { struct wal_checkpoint *msg = (struct wal_checkpoint *) data; fiber_wakeup(msg->fiber); } int wal_checkpoint(struct vclock *vclock, bool rotate) { struct wal_writer *writer = &wal_writer_singleton; if (! stailq_empty(&writer->rollback)) { /* * The writer rollback queue is not empty, * roll back this transaction immediately. * This is to ensure we do not accidentally * commit a transaction which has seen changes * that will be rolled back. */ say_error("Aborting transaction %llu during " "cascading rollback", vclock_sum(&writer->vclock)); return -1; } if (writer->wal_mode == WAL_NONE) { vclock_copy(vclock, &writer->vclock); return 0; } static struct cmsg_hop wal_checkpoint_route[] = { {wal_checkpoint_f, &wal_thread.tx_pipe}, {wal_checkpoint_done_f, NULL}, }; vclock_create(vclock); struct wal_checkpoint msg; cmsg_init(&msg, wal_checkpoint_route); msg.vclock = vclock; msg.fiber = fiber(); msg.rotate = rotate; msg.res = 0; cpipe_push(&wal_thread.wal_pipe, &msg); fiber_set_cancellable(false); fiber_yield(); fiber_set_cancellable(true); return msg.res; } struct wal_gc_msg: public cbus_call_msg { int64_t lsn; }; static int wal_collect_garbage_f(struct cbus_call_msg *data) { int64_t lsn = ((struct wal_gc_msg *)data)->lsn; xdir_collect_garbage(&wal_writer_singleton.wal_dir, lsn, false); return 0; } void wal_collect_garbage(int64_t lsn) { struct wal_writer *writer = &wal_writer_singleton; if (writer->wal_mode == WAL_NONE) return; struct wal_gc_msg msg; msg.lsn = lsn; bool cancellable = fiber_set_cancellable(false); cbus_call(&wal_thread.wal_pipe, &wal_thread.tx_pipe, &msg, wal_collect_garbage_f, NULL, TIMEOUT_INFINITY); fiber_set_cancellable(cancellable); } static void wal_notify_watchers(struct wal_writer *writer, unsigned events); /** * If there is no current WAL, try to open it, and close the * previous WAL. We close the previous WAL only after opening * a new one to smoothly move local hot standby and replication * over to the next WAL. * In case of error, we try to close any open WALs. * * @post r->current_wal is in a good shape for writes or is NULL. * @return 0 in case of success, -1 on error. */ static int wal_opt_rotate(struct wal_writer *writer) { ERROR_INJECT_RETURN(ERRINJ_WAL_ROTATE); /* * Close the file *before* we create the new WAL, to * make sure local hot standby/replication can see * EOF in the old WAL before switching to the new * one. */ if (xlog_is_open(&writer->current_wal) && (writer->current_wal.rows >= writer->wal_max_rows || writer->current_wal.offset >= writer->wal_max_size)) { /* * We can not handle xlog_close() * failure in any reasonable way. * A warning is written to the error log. */ xlog_close(&writer->current_wal, false); } if (xlog_is_open(&writer->current_wal)) return 0; struct vclock *vclock = (struct vclock *)malloc(sizeof(*vclock)); if (vclock == NULL) { diag_set(OutOfMemory, sizeof(*vclock), "malloc", "struct vclock"); diag_log(); return -1; } vclock_copy(vclock, &writer->vclock); if (xdir_create_xlog(&writer->wal_dir, &writer->current_wal, &writer->vclock) != 0) { diag_log(); free(vclock); return -1; } xdir_add_vclock(&writer->wal_dir, vclock); wal_notify_watchers(writer, WAL_EVENT_ROTATE); return 0; } static void wal_writer_clear_bus(struct cmsg *msg) { (void) msg; } static void wal_writer_end_rollback(struct cmsg *msg) { (void) msg; struct wal_writer *writer = &wal_writer_singleton; cmsg_init(&writer->in_rollback, NULL); } static void wal_writer_begin_rollback(struct wal_writer *writer) { static struct cmsg_hop rollback_route[4] = { /* * Step 1: clear the bus, so that it contains * no WAL write requests. This is achieved as a * side effect of an empty message travelling * through both bus pipes, while writer input * valve is closed by non-empty writer->rollback * list. */ { wal_writer_clear_bus, &wal_thread.wal_pipe }, { wal_writer_clear_bus, &wal_thread.tx_pipe }, /* * Step 2: writer->rollback queue contains all * messages which need to be rolled back, * perform the rollback. */ { tx_schedule_rollback, &wal_thread.wal_pipe }, /* * Step 3: re-open the WAL for writing. */ { wal_writer_end_rollback, NULL } }; /* * Make sure the WAL writer rolls back * all input until rollback mode is off. */ cmsg_init(&writer->in_rollback, rollback_route); cpipe_push(&wal_thread.tx_pipe, &writer->in_rollback); } static void wal_assign_lsn(struct wal_writer *writer, struct xrow_header **row, struct xrow_header **end) { /** Assign LSN to all local rows. */ for ( ; row < end; row++) { if ((*row)->replica_id == 0) { (*row)->lsn = vclock_inc(&writer->vclock, instance_id); (*row)->replica_id = instance_id; } else { vclock_follow(&writer->vclock, (*row)->replica_id, (*row)->lsn); } } } static void wal_write_to_disk(struct cmsg *msg) { struct wal_writer *writer = &wal_writer_singleton; struct wal_msg *wal_msg = (struct wal_msg *) msg; struct errinj *inj = errinj(ERRINJ_WAL_DELAY, ERRINJ_BOOL); while (inj != NULL && inj->bparam) usleep(10); if (writer->in_rollback.route != NULL) { /* We're rolling back a failed write. */ stailq_concat(&wal_msg->rollback, &wal_msg->commit); return; } /* Xlog is only rotated between queue processing */ if (wal_opt_rotate(writer) != 0) { stailq_concat(&wal_msg->rollback, &wal_msg->commit); return wal_writer_begin_rollback(writer); } /* * This code tries to write queued requests (=transactions) using as * few I/O syscalls and memory copies as possible. For this reason * writev(2) and `struct iovec[]` are used (see `struct fio_batch`). * * For each request (=transaction) each request row (=statement) is * added to iov `batch`. A row can contain up to XLOG_IOVMAX iovecs. * A request can have an **unlimited** number of rows. Since OS has * a hard coded limit up to `sysconf(_SC_IOV_MAX)` iovecs (usually * 1024), a huge transaction may not fit into a single batch. * Therefore, it is not possible to "atomically" write an entire * transaction using a single writev(2) call. * * Request boundaries and batch boundaries are not connected at all * in this code. Batches flushed to disk as soon as they are full. * In order to guarantee that a transaction is either fully written * to file or isn't written at all, ftruncate(2) is used to shrink * the file to the last fully written request. The absolute position * of request in xlog file is stored inside `struct journal_entry`. */ struct xlog *l = &writer->current_wal; /* * Iterate over requests (transactions) */ struct journal_entry *entry; struct stailq_entry *last_committed = NULL; stailq_foreach_entry(entry, &wal_msg->commit, fifo) { wal_assign_lsn(writer, entry->rows, entry->rows + entry->n_rows); entry->res = vclock_sum(&writer->vclock); int rc = xlog_write_entry(l, entry); if (rc < 0) goto done; if (rc > 0) last_committed = &entry->fifo; /* rc == 0: the write is buffered in xlog_tx */ } if (xlog_flush(l) < 0) goto done; last_committed = stailq_last(&wal_msg->commit); done: struct error *error = diag_last_error(diag_get()); if (error) { /* Until we can pass the error to tx, log it and clear. */ error_log(error); diag_clear(diag_get()); } /* * We need to start rollback from the first request * following the last committed request. If * last_commit_req is NULL, it means we have committed * nothing, and need to start rollback from the first * request. Otherwise we rollback from the first request. */ struct stailq rollback; stailq_cut_tail(&wal_msg->commit, last_committed, &rollback); if (!stailq_empty(&rollback)) { /* Update status of the successfully committed requests. */ stailq_foreach_entry(entry, &rollback, fifo) entry->res = -1; /* Rollback unprocessed requests */ stailq_concat(&wal_msg->rollback, &rollback); wal_writer_begin_rollback(writer); } fiber_gc(); wal_notify_watchers(writer, WAL_EVENT_WRITE); } /** WAL thread main loop. */ static int wal_thread_f(va_list ap) { (void) ap; /** Initialize eio in this thread */ coio_enable(); struct cbus_endpoint endpoint; cbus_endpoint_create(&endpoint, "wal", fiber_schedule_cb, fiber()); /* * Create a pipe to TX thread. Use a high priority * endpoint, to ensure that WAL messages are delivered * even when tx fiber pool is used up by net messages. */ cpipe_create(&wal_thread.tx_pipe, "tx_prio"); cbus_loop(&endpoint); struct wal_writer *writer = &wal_writer_singleton; if (xlog_is_open(&writer->current_wal)) xlog_close(&writer->current_wal, false); if (xlog_is_open(&vy_log_writer.xlog)) xlog_close(&vy_log_writer.xlog, false); cpipe_destroy(&wal_thread.tx_pipe); return 0; } /** * WAL writer main entry point: queue a single request * to be written to disk and wait until this task is completed. */ int64_t wal_write(struct journal *journal, struct journal_entry *entry) { struct wal_writer *writer = (struct wal_writer *) journal; ERROR_INJECT_RETURN(ERRINJ_WAL_IO); if (! stailq_empty(&writer->rollback)) { /* * The writer rollback queue is not empty, * roll back this transaction immediately. * This is to ensure we do not accidentally * commit a transaction which has seen changes * that will be rolled back. */ say_error("Aborting transaction %llu during " "cascading rollback", vclock_sum(&writer->vclock)); return -1; } struct wal_msg *batch; if (!stailq_empty(&wal_thread.wal_pipe.input) && (batch = wal_msg(stailq_first_entry(&wal_thread.wal_pipe.input, struct cmsg, fifo)))) { stailq_add_tail_entry(&batch->commit, entry, fifo); } else { batch = (struct wal_msg *) region_alloc_xc(&fiber()->gc, sizeof(struct wal_msg)); wal_msg_create(batch); /* * Sic: first add a request, then push the batch, * since cpipe_push() may pass the batch to WAL * thread right away. */ stailq_add_tail_entry(&batch->commit, entry, fifo); cpipe_push(&wal_thread.wal_pipe, batch); } wal_thread.wal_pipe.n_input += entry->n_rows * XROW_IOVMAX; cpipe_flush_input(&wal_thread.wal_pipe); /** * It's not safe to spuriously wakeup this fiber * since in that case it will ignore a possible * error from WAL writer and not roll back the * transaction. */ bool cancellable = fiber_set_cancellable(false); fiber_yield(); /* Request was inserted. */ fiber_set_cancellable(cancellable); if (entry->res > 0) { struct xrow_header **last = entry->rows + entry->n_rows - 1; while (last >= entry->rows) { /* * Find last row from local instance id * and promote vclock. */ if ((*last)->replica_id == instance_id) { /* * In master-master configuration, during sudden * power-loss, if the data have not been written * to WAL but have already been sent to others, * they will send the data back. In this case * vclock has already been promoted by applier. */ if (vclock_get(&replicaset.vclock, instance_id) < (*last)->lsn) { vclock_follow(&replicaset.vclock, instance_id, (*last)->lsn); } break; } --last; } } return entry->res; } int64_t wal_write_in_wal_mode_none(struct journal *journal, struct journal_entry *entry) { struct wal_writer *writer = (struct wal_writer *) journal; wal_assign_lsn(writer, entry->rows, entry->rows + entry->n_rows); int64_t old_lsn = vclock_get(&replicaset.vclock, instance_id); int64_t new_lsn = vclock_get(&writer->vclock, instance_id); if (new_lsn > old_lsn) { /* There were local writes, promote vclock. */ vclock_follow(&replicaset.vclock, instance_id, new_lsn); } return vclock_sum(&writer->vclock); } void wal_init_vy_log() { xlog_clear(&vy_log_writer.xlog); } struct wal_write_vy_log_msg: public cbus_call_msg { struct journal_entry *entry; }; static int wal_write_vy_log_f(struct cbus_call_msg *msg) { struct journal_entry *entry = ((struct wal_write_vy_log_msg *)msg)->entry; if (! xlog_is_open(&vy_log_writer.xlog)) { if (vy_log_open(&vy_log_writer.xlog) < 0) return -1; } if (xlog_write_entry(&vy_log_writer.xlog, entry) < 0) return -1; if (xlog_flush(&vy_log_writer.xlog) < 0) return -1; return 0; } int wal_write_vy_log(struct journal_entry *entry) { struct wal_write_vy_log_msg msg; msg.entry= entry; bool cancellable = fiber_set_cancellable(false); int rc = cbus_call(&wal_thread.wal_pipe, &wal_thread.tx_pipe, &msg, wal_write_vy_log_f, NULL, TIMEOUT_INFINITY); fiber_set_cancellable(cancellable); return rc; } static int wal_rotate_vy_log_f(struct cbus_call_msg *msg) { (void) msg; if (xlog_is_open(&vy_log_writer.xlog)) xlog_close(&vy_log_writer.xlog, false); return 0; } void wal_rotate_vy_log() { struct cbus_call_msg msg; bool cancellable = fiber_set_cancellable(false); cbus_call(&wal_thread.wal_pipe, &wal_thread.tx_pipe, &msg, wal_rotate_vy_log_f, NULL, TIMEOUT_INFINITY); fiber_set_cancellable(cancellable); } static void wal_watcher_notify(struct wal_watcher *watcher, unsigned events) { assert(!rlist_empty(&watcher->next)); if (watcher->msg.cmsg.route != NULL) { /* * If the notification message is still en route, * mark the watcher to resend it as soon as it * returns to WAL so as not to lose any events. */ watcher->events |= events; return; } watcher->msg.events = events; cmsg_init(&watcher->msg.cmsg, watcher->route); cpipe_push(&watcher->watcher_pipe, &watcher->msg.cmsg); } static void wal_watcher_notify_perform(struct cmsg *cmsg) { struct wal_watcher_msg *msg = (struct wal_watcher_msg *) cmsg; struct wal_watcher *watcher = msg->watcher; unsigned events = msg->events; watcher->cb(watcher, events); } static void wal_watcher_notify_complete(struct cmsg *cmsg) { struct wal_watcher_msg *msg = (struct wal_watcher_msg *) cmsg; struct wal_watcher *watcher = msg->watcher; cmsg->route = NULL; if (rlist_empty(&watcher->next)) { /* The watcher is about to be destroyed. */ return; } if (watcher->events != 0) { /* * Resend the message if we got notified while * it was en route, see wal_watcher_notify(). */ wal_watcher_notify(watcher, watcher->events); watcher->events = 0; } } static void wal_watcher_attach(void *arg) { struct wal_watcher *watcher = (struct wal_watcher *) arg; struct wal_writer *writer = &wal_writer_singleton; assert(rlist_empty(&watcher->next)); rlist_add_tail_entry(&writer->watchers, watcher, next); /* * Notify the watcher right after registering it * so that it can process existing WALs. */ wal_watcher_notify(watcher, WAL_EVENT_ROTATE); } static void wal_watcher_detach(void *arg) { struct wal_watcher *watcher = (struct wal_watcher *) arg; assert(!rlist_empty(&watcher->next)); rlist_del_entry(watcher, next); } void wal_set_watcher(struct wal_watcher *watcher, const char *name, void (*watcher_cb)(struct wal_watcher *, unsigned events), void (*process_cb)(struct cbus_endpoint *)) { assert(journal_is_initialized(&wal_writer_singleton.base)); rlist_create(&watcher->next); watcher->cb = watcher_cb; watcher->msg.watcher = watcher; watcher->msg.events = 0; watcher->msg.cmsg.route = NULL; watcher->events = 0; assert(lengthof(watcher->route) == 2); watcher->route[0] = {wal_watcher_notify_perform, &watcher->wal_pipe}; watcher->route[1] = {wal_watcher_notify_complete, NULL}; cbus_pair("wal", name, &watcher->wal_pipe, &watcher->watcher_pipe, wal_watcher_attach, watcher, process_cb); } void wal_clear_watcher(struct wal_watcher *watcher, void (*process_cb)(struct cbus_endpoint *)) { assert(journal_is_initialized(&wal_writer_singleton.base)); cbus_unpair(&watcher->wal_pipe, &watcher->watcher_pipe, wal_watcher_detach, watcher, process_cb); } static void wal_notify_watchers(struct wal_writer *writer, unsigned events) { struct wal_watcher *watcher; rlist_foreach_entry(watcher, &writer->watchers, next) wal_watcher_notify(watcher, events); } /** * After fork, the WAL writer thread disappears. * Make sure that atexit() handlers in the child do * not try to stop a non-existent thread or write * a second EOF marker to an open file. */ void wal_atfork() { if (xlog_is_open(&wal_writer_singleton.current_wal)) xlog_atfork(&wal_writer_singleton.current_wal); if (xlog_is_open(&vy_log_writer.xlog)) xlog_atfork(&vy_log_writer.xlog); } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_write_iterator.h0000664000000000000000000002456113306565107021535 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_WRITE_STREAM_H #define INCLUDES_TARANTOOL_BOX_VY_WRITE_STREAM_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include "vy_stmt_stream.h" #include "vy_read_view.h" #include #include /** * Iterate over an in-memory index when writing it to disk (dump) * or over a series of sorted runs on disk to create a new sorted * run (compaction). * * Background * ---------- * The write iterator merges multiple data sources into one, * ordering statements by key and then by LSN and purging * unnecessary changes. * * The sources supply statements in ascending order of the * key and descending order of LSN (newest changes first). * A heap is used to preserve descending order of LSNs * in the output. * * There may be many statements for the same key, forming * a history. * * The iterator needs to preserve only the statements * which are visible to the active read views, each represented * by a view LSN (VLSN) and purge the rest. * * The list of read views always contains at least the "current" * read view, represented by INT64_MAX. 0 stands for the oldest * possible LSN: * * [0, vlsn1, vlsn2, vlsn3, ... INT64_MAX]. * * The iterator splits a sequence of LSNs for the same key into * a series of histories, one for each read view, and then merges * each history into a single statement: * * -------- * SAME KEY * -------- * 0 VLSN1 VLSN2 ... INT64_MAX * | | | | * | LSN1 ... LSN(i) | LSN(i+1) ... LSN(j) | LSN(j+1) ... LSN(N) | * \________________/ \___________________/ \____________________/ * merge merge merge * * The following optimizations are applicable, all aiming at * purging unnecessary statements from the output. The * optimizations are applied while reading the statements from * the heap, from newest LSN to oldest. * * --------------------------------------------------------------- * Optimization #1: when merging the last level of the LSM tree, * e.g. when doing a major compaction, skip DELETEs from the * output as long as they are older than the oldest read view: * * --------------------------- * SAME KEY, MAJOR COMPACTION * --------------------------- * * 0 VLSN1 ... INT64_MAX * | | | * | LSN1 LSN2 ... DELETE | LSNi LSNi+1 ... LSN_N | * \___________________________/ \___________________________/ * skip merge * * Indeed, we don't have to store absent data on disk, including * the statements even older than the pruned delete. * As for all other read views, if a DELETE is visible to a read * view, it has to be preserved. * * --------------------------------------------------------------- * Optimization #2: once we found a REPLACE or DELETE, we can skip * the rest of the stream until the next read view: * * -------- * SAME KEY * -------- * VLSN1 VLSN2 INT64_MAX * | | | * | LSN1 LSN2 ... REPLACE | LSNi ... DELETE ... LSN_N | * \______________/\_______/ \_______/\_________________/ * skip keep skip merge * * --------------------------------------------------------------- * Optimization #3: when compacting runs of a secondary key, skip * statements, which do not update this key. * * -------- * SAME KEY * -------- * VLSN(i) VLSN(i+1) * Masks | | * intersection:| not 0 0 0 not 0 not 0 | * | ANY DELETE REPLACE ANY ... REPLACE | * \______/\_______________/\___________________/ * merge skip merge * * Details: when UPDATE is executed by Tarantool, it is * transformed into DELETE + REPLACE or a single REPLACE. But it * is only necessary to write anything into the secondary key if * such UPDATE changes any field, which is part of the key. * All other UPDATEs can be simply skipped. * * --------------------------------------------------------------- * Optimization #4: use older REPLACE/DELETE to apply UPSERTs and * convert them into a single REPLACE. When compaction includes * the last level, absence of REPLACE or DELETE is equivalent * to a DELETE, and UPSERT can be converted to REPLACE as well. * If REPLACE or DELETE is found in an older read view, it can * be used as well. * * -------- * SAME KEY * -------- * 0 VLSN1 VLSN2 VLSN3 VLSN4 VLSN5 INT64_MAX * | | | | | | | * | | REPLACE | UPSERT | UPSERT | UPSERT | ... | * \_____|___^_____|_________|_________|_________|________/ * ^ < < apply * ^ < < apply * ^ < < apply * * Result: * * 0 VLSN1 VLSN2 VLSN3 VLSN4 VLSN5 INT64_MAX * | | | | | | | * | | REPLACE | REPLACE | REPLACE | REPLACE | ... | * \_____|_________|_________|_________|_________|________/ * * See implementation details in * vy_write_iterator_build_read_views. * * --------------------------------------------------------------- * Optimization #5: discard a tautological DELETE statement, i.e. * a statement that was not removed from the history because it * is referenced by read view, but that is preceeded by another * DELETE and hence not needed. * * -------- * SAME KEY * -------- * * VLSN(i) VLSN(i+1) VLSN(i+2) * | | | * | LSN1 LSN2 ... DELETE | LSNi LSNi+1 ... DELETE | * \________________/\_______/ \_________________/\______/ * skip keep skip discard * * --------------------------------------------------------------- * Optimization #6: discard the first DELETE if the oldest * statement for the current key among all sources is an INSERT. * Rationale: if a key's history starts from an INSERT, there is * either no statements for this key in older runs or the latest * statement is a DELETE; in either case, the first DELETE does * not affect the resulting tuple, no matter which read view it * is looked from, and hence can be skipped. * * -------- * SAME KEY * -------- * * 0 VLSN1 INT64_MAX * | | | * | INSERT LSN2 ... LSNi DELETE | LSNi+2 ... LSN_N | * \________________________/\______/ \__________________/ * skip discard merge * * If this optimization is performed, the resulting key's history * will either be empty or start with a REPLACE or INSERT. In the * latter case we convert the first REPLACE to INSERT so that if * the key gets deleted later, we will perform this optimization * again on the next compaction to drop the DELETE. * * In order not to trigger this optimization by mistake, we must * also turn the first INSERT in the resulting key's history to a * REPLACE in case the oldest statement among all sources is not * an INSERT. */ struct vy_write_iterator; struct key_def; struct tuple_format; struct tuple; struct vy_mem; struct vy_slice; /** * Open an empty write iterator. To add sources to the iterator * use vy_write_iterator_add_* functions. * @param cmp_def - key definition for tuple compare. * @param format - dormat to allocate new REPLACE and DELETE tuples from vy_run. * @param upsert_format - same as format, but for UPSERT tuples. * @param LSM tree is_primary - set if this iterator is for a primary index. * @param is_last_level - there is no older level than the one we're writing to. * @param read_views - Opened read views. * @return the iterator or NULL on error (diag is set). */ struct vy_stmt_stream * vy_write_iterator_new(const struct key_def *cmp_def, struct tuple_format *format, struct tuple_format *upsert_format, bool is_primary, bool is_last_level, struct rlist *read_views); /** * Add a mem as a source to the iterator. * @return 0 on success, -1 on error (diag is set). */ NODISCARD int vy_write_iterator_new_mem(struct vy_stmt_stream *stream, struct vy_mem *mem); /** * Add a run slice as a source to the iterator. * @return 0 on success, -1 on error (diag is set). */ NODISCARD int vy_write_iterator_new_slice(struct vy_stmt_stream *stream, struct vy_slice *slice); #endif /* INCLUDES_TARANTOOL_BOX_VY_WRITE_STREAM_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/coll_def.h0000664000000000000000000000714413306565107017521 0ustar rootroot#ifndef TARANTOOL_BOX_COLL_DEF_H_INCLUDED #define TARANTOOL_BOX_COLL_DEF_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "opt_def.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * The supported collation types */ enum coll_type { COLL_TYPE_ICU = 0, coll_type_MAX, }; extern const char *coll_type_strs[]; /* * ICU collation options. See * http://icu-project.org/apiref/icu4c/ucol_8h.html#a583fbe7fc4a850e2fcc692e766d2826c */ /** Settings for simple ICU on/off options */ enum coll_icu_on_off { COLL_ICU_DEFAULT = 0, COLL_ICU_ON, COLL_ICU_OFF, coll_icu_on_off_MAX }; extern const char *coll_icu_on_off_strs[]; /** Alternate handling ICU settings */ enum coll_icu_alternate_handling { COLL_ICU_AH_DEFAULT = 0, COLL_ICU_AH_NON_IGNORABLE, COLL_ICU_AH_SHIFTED, coll_icu_alternate_handling_MAX }; extern const char *coll_icu_alternate_handling_strs[]; /** Case first ICU settings */ enum coll_icu_case_first { COLL_ICU_CF_DEFAULT = 0, COLL_ICU_CF_OFF, COLL_ICU_CF_UPPER_FIRST, COLL_ICU_CF_LOWER_FIRST, coll_icu_case_first_MAX }; extern const char *coll_icu_case_first_strs[]; /** Strength ICU settings */ enum coll_icu_strength { COLL_ICU_STRENGTH_DEFAULT = 0, COLL_ICU_STRENGTH_PRIMARY, COLL_ICU_STRENGTH_SECONDARY, COLL_ICU_STRENGTH_TERTIARY, COLL_ICU_STRENGTH_QUATERNARY, COLL_ICU_STRENGTH_IDENTICAL, coll_icu_strength_MAX }; extern const char *coll_icu_strength_strs[]; /** Collection of ICU settings */ struct coll_icu_def { enum coll_icu_on_off french_collation; enum coll_icu_alternate_handling alternate_handling; enum coll_icu_case_first case_first; enum coll_icu_on_off case_level; enum coll_icu_on_off normalization_mode; enum coll_icu_strength strength; enum coll_icu_on_off numeric_collation; }; /** * Definition of a collation. */ struct coll_def { /** Perconal ID */ uint32_t id; /** Owner ID */ uint32_t owner_id; /** Collation name. */ size_t name_len; const char *name; /** Locale. */ size_t locale_len; const char *locale; /** Collation type. */ enum coll_type type; /** Type specific options. */ struct coll_icu_def icu; }; extern const struct opt_def coll_icu_opts_reg[]; #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_COLL_DEF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_read_set.c0000664000000000000000000001306613306565107020251 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_read_set.h" #include #include #include #include #include "trivia/util.h" #include "tuple.h" #include "vy_index.h" #include "vy_stmt.h" int vy_read_interval_cmpl(const struct vy_read_interval *a, const struct vy_read_interval *b) { assert(a->index == b->index); struct key_def *cmp_def = a->index->cmp_def; int cmp = vy_stmt_compare(a->left, b->left, cmp_def); if (cmp != 0) return cmp; if (a->left_belongs && !b->left_belongs) return -1; if (!a->left_belongs && b->left_belongs) return 1; uint32_t a_parts = tuple_field_count(a->left); uint32_t b_parts = tuple_field_count(b->left); a_parts = MIN(a_parts, cmp_def->part_count); b_parts = MIN(b_parts, cmp_def->part_count); if (a->left_belongs) return a_parts < b_parts ? -1 : a_parts > b_parts; else return a_parts > b_parts ? -1 : a_parts < b_parts; } int vy_read_interval_cmpr(const struct vy_read_interval *a, const struct vy_read_interval *b) { assert(a->index == b->index); struct key_def *cmp_def = a->index->cmp_def; int cmp = vy_stmt_compare(a->right, b->right, cmp_def); if (cmp != 0) return cmp; if (a->right_belongs && !b->right_belongs) return 1; if (!a->right_belongs && b->right_belongs) return -1; uint32_t a_parts = tuple_field_count(a->right); uint32_t b_parts = tuple_field_count(b->right); a_parts = MIN(a_parts, cmp_def->part_count); b_parts = MIN(b_parts, cmp_def->part_count); if (a->right_belongs) return a_parts > b_parts ? -1 : a_parts < b_parts; else return a_parts < b_parts ? -1 : a_parts > b_parts; } bool vy_read_interval_should_merge(const struct vy_read_interval *l, const struct vy_read_interval *r) { assert(l->index == r->index); assert(vy_read_interval_cmpl(l, r) <= 0); struct key_def *cmp_def = l->index->cmp_def; int cmp = vy_stmt_compare(l->right, r->left, cmp_def); if (cmp > 0) return true; if (cmp < 0) return false; if (l->right_belongs && r->left_belongs) return true; if (!l->right_belongs && !r->left_belongs) return false; uint32_t l_parts = tuple_field_count(l->right); uint32_t r_parts = tuple_field_count(r->left); l_parts = MIN(l_parts, cmp_def->part_count); r_parts = MIN(r_parts, cmp_def->part_count); if (l->right_belongs) return l_parts <= r_parts; else return l_parts >= r_parts; } struct vy_tx * vy_tx_conflict_iterator_next(struct vy_tx_conflict_iterator *it) { struct vy_read_interval *curr, *left, *right; while ((curr = vy_index_read_set_walk_next(&it->tree_walk, it->tree_dir, &left, &right)) != NULL) { struct key_def *cmp_def = curr->index->cmp_def; const struct vy_read_interval *last = curr->subtree_last; assert(left == NULL || left->index == curr->index); assert(right == NULL || right->index == curr->index); int cmp_right = vy_stmt_compare(it->stmt, last->right, cmp_def); if (cmp_right == 0 && !last->right_belongs) cmp_right = 1; if (cmp_right > 0) { /* * The point is to the right of the rightmost * interval in the subtree so there cannot be * any conflicts in this subtree. */ it->tree_dir = 0; continue; } int cmp_left; if (curr->left == last->right) { /* Optimize comparison out. */ cmp_left = cmp_right; } else { cmp_left = vy_stmt_compare(it->stmt, curr->left, cmp_def); if (cmp_left == 0 && !curr->left_belongs) cmp_left = -1; } if (cmp_left < 0) { /* * The point is to the left of the current interval * so an intersection can only be found in the left * subtree. */ it->tree_dir = RB_WALK_LEFT; } else { /* * Both subtrees can have intervals that contain the * given point. */ it->tree_dir = RB_WALK_LEFT | RB_WALK_RIGHT; } /* * Check if the point is within the current interval. */ if (curr->left == curr->right) { /* Optimize comparison out. */ cmp_right = cmp_left; } else if (curr != last) { cmp_right = vy_stmt_compare(it->stmt, curr->right, cmp_def); if (cmp_right == 0 && !curr->right_belongs) cmp_right = 1; } if (cmp_left >= 0 && cmp_right <= 0) { /* * The point is within the current interval. * Return the conflicting transaction before * continuing tree traversal. */ break; } } return curr != NULL ? curr->tx : NULL; } tarantool_1.9.1.26.g63eb81e3c/src/box/key_def.cc0000664000000000000000000004232613306565107017517 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "key_def.h" #include "tuple_compare.h" #include "tuple_extract_key.h" #include "tuple_hash.h" #include "column_mask.h" #include "schema_def.h" #include "coll_cache.h" static const struct key_part_def key_part_def_default = { 0, field_type_MAX, COLL_NONE, false, }; static int64_t part_type_by_name_wrapper(const char *str, uint32_t len) { return field_type_by_name(str, len); } #define PART_OPT_TYPE "type" #define PART_OPT_FIELD "field" #define PART_OPT_COLLATION "collation" #define PART_OPT_NULLABILITY "is_nullable" const struct opt_def part_def_reg[] = { OPT_DEF_ENUM(PART_OPT_TYPE, field_type, struct key_part_def, type, part_type_by_name_wrapper), OPT_DEF(PART_OPT_FIELD, OPT_UINT32, struct key_part_def, fieldno), OPT_DEF(PART_OPT_COLLATION, OPT_UINT32, struct key_part_def, coll_id), OPT_DEF(PART_OPT_NULLABILITY, OPT_BOOL, struct key_part_def, is_nullable), OPT_END, }; const char *mp_type_strs[] = { /* .MP_NIL = */ "nil", /* .MP_UINT = */ "unsigned", /* .MP_INT = */ "integer", /* .MP_STR = */ "string", /* .MP_BIN = */ "blob", /* .MP_ARRAY = */ "array", /* .MP_MAP = */ "map", /* .MP_BOOL = */ "boolean", /* .MP_FLOAT = */ "float", /* .MP_DOUBLE = */ "double", /* .MP_EXT = */ "extension", }; const uint32_t key_mp_type[] = { /* [FIELD_TYPE_ANY] = */ UINT32_MAX, /* [FIELD_TYPE_UNSIGNED] = */ 1U << MP_UINT, /* [FIELD_TYPE_STRING] = */ 1U << MP_STR, /* [FIELD_TYPE_NUMBER] = */ (1U << MP_UINT) | (1U << MP_INT) | (1U << MP_FLOAT) | (1U << MP_DOUBLE), /* [FIELD_TYPE_INTEGER] = */ (1U << MP_UINT) | (1U << MP_INT), /* [FIELD_TYPE_BOOLEAN] = */ 1U << MP_BOOL, /* [FIELD_TYPE_SCALAR] = */ (1U << MP_UINT) | (1U << MP_INT) | (1U << MP_FLOAT) | (1U << MP_DOUBLE) | (1U << MP_STR) | (1U << MP_BIN) | (1U << MP_BOOL), /* [FIELD_TYPE_ARRAY] = */ 1U << MP_ARRAY, /* [FIELD_TYPE_MAP] = */ (1U << MP_MAP), }; struct key_def * key_def_dup(const struct key_def *src) { size_t sz = key_def_sizeof(src->part_count); struct key_def *res = (struct key_def *)malloc(sz); if (res == NULL) { diag_set(OutOfMemory, sz, "malloc", "res"); return NULL; } memcpy(res, src, sz); return res; } void key_def_delete(struct key_def *def) { free(def); } static void key_def_set_cmp(struct key_def *def) { def->tuple_compare = tuple_compare_create(def); def->tuple_compare_with_key = tuple_compare_with_key_create(def); tuple_hash_func_set(def); tuple_extract_key_set(def); } struct key_def * key_def_new(uint32_t part_count) { size_t sz = key_def_sizeof(part_count); /** Use calloc() to zero comparator function pointers. */ struct key_def *key_def = (struct key_def *) calloc(1, sz); if (key_def == NULL) { diag_set(OutOfMemory, sz, "malloc", "struct key_def"); return NULL; } key_def->part_count = part_count; key_def->unique_part_count = part_count; return key_def; } struct key_def * key_def_new_with_parts(struct key_part_def *parts, uint32_t part_count) { struct key_def *def = key_def_new(part_count); if (def == NULL) return NULL; for (uint32_t i = 0; i < part_count; i++) { struct key_part_def *part = &parts[i]; struct coll *coll = NULL; if (part->coll_id != COLL_NONE) { coll = coll_by_id(part->coll_id); if (coll == NULL) { diag_set(ClientError, ER_WRONG_INDEX_OPTIONS, i + 1, "collation was not found by ID"); key_def_delete(def); return NULL; } } key_def_set_part(def, i, part->fieldno, part->type, part->is_nullable, coll); } return def; } void key_def_dump_parts(const struct key_def *def, struct key_part_def *parts) { for (uint32_t i = 0; i < def->part_count; i++) { const struct key_part *part = &def->parts[i]; struct key_part_def *part_def = &parts[i]; part_def->fieldno = part->fieldno; part_def->type = part->type; part_def->is_nullable = part->is_nullable; part_def->coll_id = (part->coll != NULL ? part->coll->id : COLL_NONE); } } box_key_def_t * box_key_def_new(uint32_t *fields, uint32_t *types, uint32_t part_count) { struct key_def *key_def = key_def_new(part_count); if (key_def == NULL) return key_def; for (uint32_t item = 0; item < part_count; ++item) { key_def_set_part(key_def, item, fields[item], (enum field_type)types[item], key_part_def_default.is_nullable, NULL); } return key_def; } void box_key_def_delete(box_key_def_t *key_def) { key_def_delete(key_def); } int box_tuple_compare(const box_tuple_t *tuple_a, const box_tuple_t *tuple_b, const box_key_def_t *key_def) { return tuple_compare(tuple_a, tuple_b, key_def); } int box_tuple_compare_with_key(const box_tuple_t *tuple_a, const char *key_b, const box_key_def_t *key_def) { uint32_t part_count = mp_decode_array(&key_b); return tuple_compare_with_key(tuple_a, key_b, part_count, key_def); } int key_part_cmp(const struct key_part *parts1, uint32_t part_count1, const struct key_part *parts2, uint32_t part_count2) { const struct key_part *part1 = parts1; const struct key_part *part2 = parts2; uint32_t part_count = MIN(part_count1, part_count2); const struct key_part *end = parts1 + part_count; for (; part1 != end; part1++, part2++) { if (part1->fieldno != part2->fieldno) return part1->fieldno < part2->fieldno ? -1 : 1; if ((int) part1->type != (int) part2->type) return (int) part1->type < (int) part2->type ? -1 : 1; if (part1->coll != part2->coll) return (uintptr_t) part1->coll < (uintptr_t) part2->coll ? -1 : 1; if (part1->is_nullable != part2->is_nullable) return part1->is_nullable < part2->is_nullable ? -1 : 1; } return part_count1 < part_count2 ? -1 : part_count1 > part_count2; } bool key_part_check_compatibility(const struct key_part *old_parts, uint32_t old_part_count, const struct key_part *new_parts, uint32_t new_part_count) { if (new_part_count != old_part_count) return false; for (uint32_t i = 0; i < new_part_count; i++) { const struct key_part *new_part = &new_parts[i]; const struct key_part *old_part = &old_parts[i]; if (old_part->fieldno != new_part->fieldno) return false; if (old_part->coll != new_part->coll) return false; } return true; } void key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno, enum field_type type, bool is_nullable, struct coll *coll) { assert(part_no < def->part_count); assert(type < field_type_MAX); def->is_nullable |= is_nullable; def->parts[part_no].is_nullable = is_nullable; def->parts[part_no].fieldno = fieldno; def->parts[part_no].type = type; def->parts[part_no].coll = coll; column_mask_set_fieldno(&def->column_mask, fieldno); /** * When all parts are set, initialize the tuple * comparator function. */ /* Last part is set, initialize the comparators. */ bool all_parts_set = true; for (uint32_t i = 0; i < def->part_count; i++) { if (def->parts[i].type == FIELD_TYPE_ANY) { all_parts_set = false; break; } } if (all_parts_set) key_def_set_cmp(def); } void key_def_update_optionality(struct key_def *def, uint32_t min_field_count) { def->has_optional_parts = false; for (uint32_t i = 0; i < def->part_count; ++i) { struct key_part *part = &def->parts[i]; def->has_optional_parts |= part->is_nullable && min_field_count < part->fieldno + 1; /* * One optional part is enough to switch to new * comparators. */ if (def->has_optional_parts) break; } key_def_set_cmp(def); } int key_def_snprint_parts(char *buf, int size, const struct key_part_def *parts, uint32_t part_count) { int total = 0; SNPRINT(total, snprintf, buf, size, "["); for (uint32_t i = 0; i < part_count; i++) { const struct key_part_def *part = &parts[i]; assert(part->type < field_type_MAX); SNPRINT(total, snprintf, buf, size, "%d, '%s'", (int)part->fieldno, field_type_strs[part->type]); if (i < part_count - 1) SNPRINT(total, snprintf, buf, size, ", "); } SNPRINT(total, snprintf, buf, size, "]"); return total; } size_t key_def_sizeof_parts(const struct key_part_def *parts, uint32_t part_count) { size_t size = 0; for (uint32_t i = 0; i < part_count; i++) { const struct key_part_def *part = &parts[i]; int count = 2; if (part->coll_id != COLL_NONE) count++; if (part->is_nullable) count++; size += mp_sizeof_map(count); size += mp_sizeof_str(strlen(PART_OPT_FIELD)); size += mp_sizeof_uint(part->fieldno); assert(part->type < field_type_MAX); size += mp_sizeof_str(strlen(PART_OPT_TYPE)); size += mp_sizeof_str(strlen(field_type_strs[part->type])); if (part->coll_id != COLL_NONE) { size += mp_sizeof_str(strlen(PART_OPT_COLLATION)); size += mp_sizeof_uint(part->coll_id); } if (part->is_nullable) { size += mp_sizeof_str(strlen(PART_OPT_NULLABILITY)); size += mp_sizeof_bool(part->is_nullable); } } return size; } char * key_def_encode_parts(char *data, const struct key_part_def *parts, uint32_t part_count) { for (uint32_t i = 0; i < part_count; i++) { const struct key_part_def *part = &parts[i]; int count = 2; if (part->coll_id != COLL_NONE) count++; if (part->is_nullable) count++; data = mp_encode_map(data, count); data = mp_encode_str(data, PART_OPT_FIELD, strlen(PART_OPT_FIELD)); data = mp_encode_uint(data, part->fieldno); data = mp_encode_str(data, PART_OPT_TYPE, strlen(PART_OPT_TYPE)); assert(part->type < field_type_MAX); const char *type_str = field_type_strs[part->type]; data = mp_encode_str(data, type_str, strlen(type_str)); if (part->coll_id != COLL_NONE) { data = mp_encode_str(data, PART_OPT_COLLATION, strlen(PART_OPT_COLLATION)); data = mp_encode_uint(data, part->coll_id); } if (part->is_nullable) { data = mp_encode_str(data, PART_OPT_NULLABILITY, strlen(PART_OPT_NULLABILITY)); data = mp_encode_bool(data, part->is_nullable); } } return data; } /** * 1.6.6-1.7.5 * Decode parts array from tuple field and write'em to index_def structure. * Throws a nice error about invalid types, but does not check ranges of * resulting values field_no and field_type * Parts expected to be a sequence of arrays like this: * [NUM, STR, ..][NUM, STR, ..].., */ static int key_def_decode_parts_166(struct key_part_def *parts, uint32_t part_count, const char **data, const struct field_def *fields, uint32_t field_count) { for (uint32_t i = 0; i < part_count; i++) { struct key_part_def *part = &parts[i]; if (mp_typeof(**data) != MP_ARRAY) { diag_set(ClientError, ER_WRONG_INDEX_PARTS, "expected an array"); return -1; } uint32_t item_count = mp_decode_array(data); if (item_count < 1) { diag_set(ClientError, ER_WRONG_INDEX_PARTS, "expected a non-empty array"); return -1; } if (item_count < 2) { diag_set(ClientError, ER_WRONG_INDEX_PARTS, "a field type is missing"); return -1; } if (mp_typeof(**data) != MP_UINT) { diag_set(ClientError, ER_WRONG_INDEX_PARTS, "field id must be an integer"); return -1; } part->fieldno = (uint32_t) mp_decode_uint(data); if (mp_typeof(**data) != MP_STR) { diag_set(ClientError, ER_WRONG_INDEX_PARTS, "field type must be a string"); return -1; } uint32_t len; const char *str = mp_decode_str(data, &len); for (uint32_t j = 2; j < item_count; j++) mp_next(data); part->type = field_type_by_name(str, len); if (part->type == field_type_MAX) { diag_set(ClientError, ER_WRONG_INDEX_PARTS, "unknown field type"); return -1; } part->is_nullable = (part->fieldno < field_count ? fields[part->fieldno].is_nullable : key_part_def_default.is_nullable); part->coll_id = COLL_NONE; } return 0; } int key_def_decode_parts(struct key_part_def *parts, uint32_t part_count, const char **data, const struct field_def *fields, uint32_t field_count) { if (mp_typeof(**data) == MP_ARRAY) { return key_def_decode_parts_166(parts, part_count, data, fields, field_count); } for (uint32_t i = 0; i < part_count; i++) { struct key_part_def *part = &parts[i]; if (mp_typeof(**data) != MP_MAP) { diag_set(ClientError, ER_WRONG_INDEX_OPTIONS, i + TUPLE_INDEX_BASE, "index part is expected to be a map"); return -1; } *part = key_part_def_default; if (opts_decode(part, part_def_reg, data, ER_WRONG_INDEX_OPTIONS, i + TUPLE_INDEX_BASE, NULL) != 0) return -1; if (part->type == field_type_MAX) { diag_set(ClientError, ER_WRONG_INDEX_OPTIONS, i + TUPLE_INDEX_BASE, "index part: unknown field type"); return -1; } if (part->coll_id != COLL_NONE && part->type != FIELD_TYPE_STRING && part->type != FIELD_TYPE_SCALAR) { diag_set(ClientError, ER_WRONG_INDEX_OPTIONS, i + 1, "collation is reasonable only for " "string and scalar parts"); return -1; } } return 0; } int key_def_decode_parts_160(struct key_part_def *parts, uint32_t part_count, const char **data, const struct field_def *fields, uint32_t field_count) { for (uint32_t i = 0; i < part_count; i++) { struct key_part_def *part = &parts[i]; part->fieldno = (uint32_t) mp_decode_uint(data); uint32_t len; const char *str = mp_decode_str(data, &len); part->type = field_type_by_name(str, len); if (part->type == field_type_MAX) { diag_set(ClientError, ER_WRONG_INDEX_PARTS, "unknown field type"); return -1; } part->is_nullable = (part->fieldno < field_count ? fields[part->fieldno].is_nullable : key_part_def_default.is_nullable); part->coll_id = COLL_NONE; } return 0; } const struct key_part * key_def_find(const struct key_def *key_def, uint32_t fieldno) { const struct key_part *part = key_def->parts; const struct key_part *end = part + key_def->part_count; for (; part != end; part++) { if (part->fieldno == fieldno) return part; } return NULL; } bool key_def_contains(const struct key_def *first, const struct key_def *second) { const struct key_part *part = second->parts; const struct key_part *end = part + second->part_count; for (; part != end; part++) { if (key_def_find(first, part->fieldno) == NULL) return false; } return true; } struct key_def * key_def_merge(const struct key_def *first, const struct key_def *second) { uint32_t new_part_count = first->part_count + second->part_count; /* * Find and remove part duplicates, i.e. parts counted * twice since they are present in both key defs. */ const struct key_part *part = second->parts; const struct key_part *end = part + second->part_count; for (; part != end; part++) { if (key_def_find(first, part->fieldno)) --new_part_count; } struct key_def *new_def; new_def = (struct key_def *)calloc(1, key_def_sizeof(new_part_count)); if (new_def == NULL) { diag_set(OutOfMemory, key_def_sizeof(new_part_count), "malloc", "new_def"); return NULL; } new_def->part_count = new_part_count; new_def->unique_part_count = new_part_count; new_def->is_nullable = first->is_nullable || second->is_nullable; new_def->has_optional_parts = first->has_optional_parts || second->has_optional_parts; /* Write position in the new key def. */ uint32_t pos = 0; /* Append first key def's parts to the new index_def. */ part = first->parts; end = part + first->part_count; for (; part != end; part++) { key_def_set_part(new_def, pos++, part->fieldno, part->type, part->is_nullable, part->coll); } /* Set-append second key def's part to the new key def. */ part = second->parts; end = part + second->part_count; for (; part != end; part++) { if (key_def_find(first, part->fieldno)) continue; key_def_set_part(new_def, pos++, part->fieldno, part->type, part->is_nullable, part->coll); } return new_def; } int key_validate_parts(const struct key_def *key_def, const char *key, uint32_t part_count, bool allow_nullable) { for (uint32_t i = 0; i < part_count; i++) { enum mp_type mp_type = mp_typeof(*key); const struct key_part *part = &key_def->parts[i]; mp_next(&key); if (key_mp_type_validate(part->type, mp_type, ER_KEY_PART_TYPE, i, part->is_nullable && allow_nullable)) return -1; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_convert.h0000664000000000000000000000376513306560010020634 0ustar rootroot#ifndef TARANTOOL_BOX_TUPLE_CONVERT_H_INCLUDED #define TARANTOOL_BOX_TUPLE_CONVERT_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct obuf; struct tuple; /* Store tuple in the output buffer in iproto format. */ int tuple_to_obuf(const struct tuple *tuple, struct obuf *buf); /** * Convert tuple to yaml string * * \param tuple tuple * \retval NULL in case of error written in diag * \retval pointer to string allocated on fiber()->gc region */ char * tuple_to_yaml(const struct tuple *tuple); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_TUPLE_CONVERT_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vinyl.h0000664000000000000000000000541113306565107017106 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VINYL_H #define INCLUDES_TARANTOOL_BOX_VINYL_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #ifdef __cplusplus extern "C" { #endif /* defined(__cplusplus) */ struct info_handler; struct vinyl_engine; struct vinyl_engine * vinyl_engine_new(const char *dir, size_t memory, int read_threads, int write_threads, bool force_recovery); /** * Engine introspection (box.info.vinyl()) */ void vinyl_engine_info(struct vinyl_engine *vinyl, struct info_handler *handler); /** * Update vinyl cache size. */ void vinyl_engine_set_cache(struct vinyl_engine *vinyl, size_t quota); /** * Update max tuple size. */ void vinyl_engine_set_max_tuple_size(struct vinyl_engine *vinyl, size_t max_size); /** * Update query timeout. */ void vinyl_engine_set_timeout(struct vinyl_engine *vinyl, double timeout); /** * Update too_long_threshold. */ void vinyl_engine_set_too_long_threshold(struct vinyl_engine *vinyl, double too_long_threshold); #ifdef __cplusplus } /* extern "C" */ #include "diag.h" static inline struct vinyl_engine * vinyl_engine_new_xc(const char *dir, size_t memory, int read_threads, int write_threads, bool force_recovery) { struct vinyl_engine *vinyl; vinyl = vinyl_engine_new(dir, memory, read_threads, write_threads, force_recovery); if (vinyl == NULL) diag_raise(); return vinyl; } #endif /* defined(__plusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VINYL_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/xrow.h0000664000000000000000000004227613306565107016756 0ustar rootroot#ifndef TARANTOOL_XROW_H_INCLUDED #define TARANTOOL_XROW_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include /* struct iovec */ #include "tt_uuid.h" #include "diag.h" #if defined(__cplusplus) extern "C" { #endif struct vclock; enum { XROW_HEADER_IOVMAX = 1, XROW_BODY_IOVMAX = 2, XROW_IOVMAX = XROW_HEADER_IOVMAX + XROW_BODY_IOVMAX, XROW_HEADER_LEN_MAX = 40, XROW_BODY_LEN_MAX = 128, IPROTO_HEADER_LEN = 28, }; struct xrow_header { /* (!) Please update txn_add_redo() after changing members */ uint32_t type; uint32_t replica_id; uint64_t sync; int64_t lsn; /* LSN must be signed for correct comparison */ double tm; int bodycnt; uint32_t schema_version; struct iovec body[XROW_BODY_IOVMAX]; }; /** * Encode xrow into a binary packet * * @param header xrow * @param[out] out iovec to store encoded packet * @param fixheader_len the number of bytes to reserve for fixheader * * @retval > 0 the number of iovector components used (<= XROW_IOVMAX) * @retval -1 on error (check diag) * * @pre out iovec must have space at least for XROW_IOVMAX members * @post retval <= XROW_IOVMAX */ int xrow_header_encode(const struct xrow_header *header, uint64_t sync, struct iovec *out, size_t fixheader_len); /** * Decode xrow from a binary packet * * @param header[out] xrow to fill * @param pos[inout] the start of a packet * @param end the end of a packet * * @retval 0 on success * @retval -1 on error (check diag) * @post *pos == end on success */ int xrow_header_decode(struct xrow_header *header, const char **pos, const char *end); /** * DML request. */ struct request { /* * Either log row, or network header, or NULL, depending * on where this packet originated from: the write ahead * log/snapshot, client request, or a Lua request. */ struct xrow_header *header; /** * Request type - IPROTO type code */ uint32_t type; uint32_t space_id; uint32_t index_id; uint32_t offset; uint32_t limit; uint32_t iterator; /** Search key. */ const char *key; const char *key_end; /** Insert/replace/upsert tuple or proc argument or update operations. */ const char *tuple; const char *tuple_end; /** Upsert operations. */ const char *ops; const char *ops_end; /** Base field offset for UPDATE/UPSERT, e.g. 0 for C and 1 for Lua. */ int index_base; }; /** * Create a JSON-like string representation of a request. */ const char * request_str(const struct request *request); /** * Decode DML request from a given MessagePack map. * @param row request header. * @param[out] request DML request to decode to. * @param key_map a bit map of keys that are required by the caller, * @sa request_key_map(). * @retval 0 on success * @retval -1 on error */ int xrow_decode_dml(struct xrow_header *xrow, struct request *request, uint64_t key_map); /** * Encode the request fields to iovec using region_alloc(). * @param request request to encode * @param iov[out] iovec to fill * @retval -1 on error, see diag * @retval > 0 the number of iovecs used */ int xrow_encode_dml(const struct request *request, struct iovec *iov); /** * CALL/EVAL request. */ struct call_request { /** Request header */ const struct xrow_header *header; /** Function name for CALL request. MessagePack String. */ const char *name; /** Expression for EVAL request. MessagePack String. */ const char *expr; /** CALL/EVAL parameters. MessagePack Array. */ const char *args; const char *args_end; }; /** * Decode CALL/EVAL request from a given MessagePack map. * @param[out] call_request Request to decode to. * @param type Request type - either CALL or CALL_16 or EVAL. * @param sync Request sync. * @param data Request MessagePack encoded body. * @param len @data length. */ int xrow_decode_call(const struct xrow_header *row, struct call_request *request); /** * AUTH request */ struct auth_request { /** MessagePack encoded name of the user to authenticate. */ const char *user_name; /** Auth scramble. @sa scramble.h */ const char *scramble; }; /** * Decode AUTH request from MessagePack. * @param row request header. * @param[out] request Request to decode. * @retval 0 on success * @retval -1 on error */ int xrow_decode_auth(const struct xrow_header *row, struct auth_request *request); /** * Encode AUTH command. * @param[out] Row. * @param salt Salt from IPROTO greeting. * @param salt_len Length of @salt. * @param login User login. * @param login_len Length of @login. * @param password User password. * @param password_len Length of @password. * * @retval 0 Success. * @retval -1 Memory error. */ int xrow_encode_auth(struct xrow_header *row, const char *salt, size_t salt_len, const char *login, size_t login_len, const char *password, size_t password_len); /** * Encode a vote request for master election. * @param row[out] Row to encode into. */ void xrow_encode_request_vote(struct xrow_header *row); /** * Encode SUBSCRIBE command. * @param[out] Row. * @param replicaset_uuid Replica set uuid. * @param instance_uuid Instance uuid. * @param vclock Replication clock. * * @retval 0 Success. * @retval -1 Memory error. */ int xrow_encode_subscribe(struct xrow_header *row, const struct tt_uuid *replicaset_uuid, const struct tt_uuid *instance_uuid, const struct vclock *vclock); /** * Decode SUBSCRIBE command. * @param row Row to decode. * @param[out] replicaset_uuid. * @param[out] instance_uuid. * @param[out] vclock. * @param[out] version_id. * @param[out] read_only. * * @retval 0 Success. * @retval -1 Memory or format error. */ int xrow_decode_subscribe(struct xrow_header *row, struct tt_uuid *replicaset_uuid, struct tt_uuid *instance_uuid, struct vclock *vclock, uint32_t *version_id, bool *read_only); /** * Encode JOIN command. * @param[out] row Row to encode into. * @param instance_uuid. * * @retval 0 Success. * @retval -1 Memory error. */ int xrow_encode_join(struct xrow_header *row, const struct tt_uuid *instance_uuid); /** * Decode JOIN command. * @param row Row to decode. * @param[out] instance_uuid. * * @retval 0 Success. * @retval -1 Memory or format error. */ static inline int xrow_decode_join(struct xrow_header *row, struct tt_uuid *instance_uuid) { return xrow_decode_subscribe(row, NULL, instance_uuid, NULL, NULL, NULL); } /** * Encode end of stream command (a response to JOIN command). * @param row[out] Row to encode into. * @param vclock. * * @retval 0 Success. * @retval -1 Memory error. */ int xrow_encode_vclock(struct xrow_header *row, const struct vclock *vclock); /** * Decode end of stream command (a response to JOIN command). * @param row Row to decode. * @param[out] vclock. * * @retval 0 Success. * @retval -1 Memory or format error. */ static inline int xrow_decode_vclock(struct xrow_header *row, struct vclock *vclock) { return xrow_decode_subscribe(row, NULL, NULL, vclock, NULL, NULL); } /** * Decode peer vclock and access rights (a response to VOTE command). * @param row Row to decode. * @param[out] vclock. * @param[out] read_only. * * @retval 0 Success. * @retval -1 Memory or format error. */ static inline int xrow_decode_request_vote(struct xrow_header *row, struct vclock *vclock, bool *read_only) { return xrow_decode_subscribe(row, NULL, NULL, vclock, NULL, read_only); } /** * Encode a heartbeat message. * @param row[out] Row to encode into. * @param replica_id Instance id. * @param tm Time stamp. */ void xrow_encode_timestamp(struct xrow_header *row, uint32_t replica_id, double tm); /** * Fast encode xrow header using the specified header fields. * It is faster than the xrow_header_encode, because uses * the predefined values for all fields of the header, defined * in the struct iproto_header_bin in iproto_port.cc. Because of * it, the implementation is placed in the same * file: iproto_port.cc. * * @param out Previously allocated memory of at least * IPROTO_HEADER_LEN bytes. * @param type IPROTO_OK or iproto error code. * @param sync Sync of the response. Must be the same as the * request sync. * @param schema_version Schema version. * @param body_length Length of the body of the iproto message. * Please, pass it without IPROTO_HEADER_LEN. * @see xrow_header_encode() */ void iproto_header_encode(char *data, uint32_t type, uint64_t sync, uint32_t schema_version, uint32_t body_length); struct obuf; struct obuf_svp; int iproto_prepare_select(struct obuf *buf, struct obuf_svp *svp); /** * Write select header to a preallocated buffer. * This function doesn't throw (and we rely on this in iproto.cc). */ void iproto_reply_select(struct obuf *buf, struct obuf_svp *svp, uint64_t sync, uint32_t schema_version, uint32_t count); /** * Encode iproto header with IPROTO_OK response code. * @param out Encode to. * @param sync Request sync. * @param schema_version. * * @retval 0 Success. * @retval -1 Memory error. */ int iproto_reply_ok(struct obuf *out, uint64_t sync, uint32_t schema_version); /** * Encode iproto header with IPROTO_OK response code * and vclock in the body. * @param out Encode to. * @param sync Request sync. * @param schema_version. * @param vclock. * @param read_only. * * @retval 0 Success. * @retval -1 Memory error. */ int iproto_reply_request_vote(struct obuf *out, uint64_t sync, uint32_t schema_version, const struct vclock *vclock, bool read_only); /** * Write an error packet int output buffer. Doesn't throw if out * of memory */ int iproto_reply_error(struct obuf *out, const struct error *e, uint64_t sync, uint32_t schema_version); /** Write error directly to a socket. */ void iproto_write_error(int fd, const struct error *e, uint32_t schema_version, uint64_t sync); enum { /* Maximal length of protocol name in handshake */ GREETING_PROTOCOL_LEN_MAX = 32, /* Maximal length of salt in handshake */ GREETING_SALT_LEN_MAX = 44, }; /** * The server sends a greeting into a newly established socket, * regardless of the socket protocol. This allows the connected * client identify the protocol, server version and instance uuid. * The greeting also contains a random salt which can be * used to encode a password. */ struct greeting { /** Peer version id. */ uint32_t version_id; uint32_t salt_len; /** Peer protocol - Binary or Console */ char protocol[GREETING_PROTOCOL_LEN_MAX + 1]; /** Peer instance uuid */ struct tt_uuid uuid; /** Random salt. */ char salt[GREETING_SALT_LEN_MAX]; }; /** * \brief Format a text greeting sent by the instance during handshake. * This function encodes greeting for binary protocol (adds "(Binary)" * after version signature). * * \param[out] greetingbuf buffer to store result. Exactly * IPROTO_GREETING_SIZE bytes will be written. * \param version_id instance version_id created by version_id() * \param uuid instance UUID * \param salt random bytes that client should use to sign passwords. * \param salt_len size of \a salt. Up to GREETING_SALT_LEN_MAX bytes. * * \sa greeting_decode() */ void greeting_encode(char *greetingbuf, uint32_t version_id, const struct tt_uuid *uuid, const char *salt, uint32_t salt_len); /** * \brief Parse a text greeting send by the instance during handshake. * This function supports both binary and console protocol. * * \param greetingbuf a text greeting * \param[out] greeting parsed struct greeting. * \retval 0 on success * \retval -1 on failure due to mailformed greeting * * \sa greeting_encode() */ int greeting_decode(const char *greetingbuf, struct greeting *greeting); /** * Encode an xrow record into the specified iovec. * * @param row Record to encode. * @param[out] out Encoded record. * * @retval >= 0 Used iovector components. * @retval -1 Error. */ int xrow_to_iovec(const struct xrow_header *row, struct iovec *out); /** * Decode ERROR and set it to diagnostics area. * @param row Encoded error. */ void xrow_decode_error(struct xrow_header *row); #if defined(__cplusplus) } /* extern "C" */ /** @copydoc xrow_header_decode. */ static inline void xrow_header_decode_xc(struct xrow_header *header, const char **pos, const char *end) { if (xrow_header_decode(header, pos, end) < 0) diag_raise(); } /** @copydoc xrow_to_iovec. */ static inline int xrow_to_iovec_xc(const struct xrow_header *row, struct iovec *out) { int rc = xrow_to_iovec(row, out); if (rc < 0) diag_raise(); return rc; } /** @copydoc xrow_decode_error. */ static inline void xrow_decode_error_xc(struct xrow_header *row) { xrow_decode_error(row); diag_raise(); } /** @copydoc xrow_decode_dml. */ static inline void xrow_decode_dml_xc(struct xrow_header *row, struct request *request, uint64_t key_map) { if (xrow_decode_dml(row, request, key_map) != 0) diag_raise(); } /** @copydoc xrow_encode_dml. */ static inline int xrow_encode_dml_xc(const struct request *request, struct iovec *iov) { int iovcnt = xrow_encode_dml(request, iov); if (iovcnt < 0) diag_raise(); return iovcnt; } /** @copydoc xrow_decode_call. */ static inline void xrow_decode_call_xc(const struct xrow_header *row, struct call_request *request) { if (xrow_decode_call(row, request) != 0) diag_raise(); } /** @copydoc xrow_decode_auth. */ static inline void xrow_decode_auth_xc(const struct xrow_header *row, struct auth_request *request) { if (xrow_decode_auth(row, request) != 0) diag_raise(); } /** @copydoc xrow_encode_auth. */ static inline void xrow_encode_auth_xc(struct xrow_header *row, const char *salt, size_t salt_len, const char *login, size_t login_len, const char *password, size_t password_len) { if (xrow_encode_auth(row, salt, salt_len, login, login_len, password, password_len) != 0) diag_raise(); } /** @copydoc xrow_encode_subscribe. */ static inline void xrow_encode_subscribe_xc(struct xrow_header *row, const struct tt_uuid *replicaset_uuid, const struct tt_uuid *instance_uuid, const struct vclock *vclock) { if (xrow_encode_subscribe(row, replicaset_uuid, instance_uuid, vclock) != 0) diag_raise(); } /** @copydoc xrow_decode_subscribe. */ static inline void xrow_decode_subscribe_xc(struct xrow_header *row, struct tt_uuid *replicaset_uuid, struct tt_uuid *instance_uuid, struct vclock *vclock, uint32_t *replica_version_id) { if (xrow_decode_subscribe(row, replicaset_uuid, instance_uuid, vclock, replica_version_id, NULL) != 0) diag_raise(); } /** @copydoc xrow_encode_join. */ static inline void xrow_encode_join_xc(struct xrow_header *row, const struct tt_uuid *instance_uuid) { if (xrow_encode_join(row, instance_uuid) != 0) diag_raise(); } /** @copydoc xrow_decode_join. */ static inline void xrow_decode_join_xc(struct xrow_header *row, struct tt_uuid *instance_uuid) { if (xrow_decode_join(row, instance_uuid) != 0) diag_raise(); } /** @copydoc xrow_encode_vclock. */ static inline void xrow_encode_vclock_xc(struct xrow_header *row, const struct vclock *vclock) { if (xrow_encode_vclock(row, vclock) != 0) diag_raise(); } /** @copydoc xrow_decode_vclock. */ static inline void xrow_decode_vclock_xc(struct xrow_header *row, struct vclock *vclock) { if (xrow_decode_vclock(row, vclock) != 0) diag_raise(); } /** @copydoc xrow_decode_request_vote. */ static inline void xrow_decode_request_vote_xc(struct xrow_header *row, struct vclock *vclock, bool *read_only) { if (xrow_decode_request_vote(row, vclock, read_only) != 0) diag_raise(); } /** @copydoc iproto_reply_ok. */ static inline void iproto_reply_ok_xc(struct obuf *out, uint64_t sync, uint32_t schema_version) { if (iproto_reply_ok(out, sync, schema_version) != 0) diag_raise(); } /** @copydoc iproto_reply_request_vote_xc. */ static inline void iproto_reply_request_vote_xc(struct obuf *out, uint64_t sync, uint32_t schema_version, const struct vclock *vclock, bool read_only) { if (iproto_reply_request_vote(out, sync, schema_version, vclock, read_only) != 0) diag_raise(); } #endif #endif /* TARANTOOL_XROW_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/iterator_type.c0000664000000000000000000000371713306560010020625 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "iterator_type.h" #include "trivia/util.h" const char *iterator_type_strs[] = { /* [ITER_EQ] = */ "EQ", /* [ITER_REQ] = */ "REQ", /* [ITER_ALL] = */ "ALL", /* [ITER_LT] = */ "LT", /* [ITER_LE] = */ "LE", /* [ITER_GE] = */ "GE", /* [ITER_GT] = */ "GT", /* [ITER_BITS_ALL_SET] = */ "BITS_ALL_SET", /* [ITER_BITS_ANY_SET] = */ "BITS_ANY_SET", /* [ITER_BITS_ALL_NOT_SET] = */ "BITS_ALL_NOT_SET", /* [ITER_OVERLAPS] = */ "OVERLAPS", /* [ITER_NEIGHBOR] = */ "NEIGHBOR", }; static_assert(sizeof(iterator_type_strs) / sizeof(const char *) == iterator_type_MAX, "iterator_type_str constants"); tarantool_1.9.1.26.g63eb81e3c/src/box/recovery.h0000664000000000000000000000612613306565107017607 0ustar rootroot#ifndef TARANTOOL_RECOVERY_H_INCLUDED #define TARANTOOL_RECOVERY_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "small/rlist.h" #include "trivia/util.h" #include "third_party/tarantool_ev.h" #include "xlog.h" #include "vclock.h" #include "tt_uuid.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ extern const struct type_info type_XlogGapError; struct xrow_header; struct xstream; struct recovery { struct vclock vclock; /** The WAL cursor we're currently reading/writing from/to. */ struct xlog_cursor cursor; struct xdir wal_dir; /** * This fiber is used in local hot standby mode. * It looks for changes in the wal_dir and applies * them locally. */ struct fiber *watcher; /** List of triggers invoked when the current WAL is closed. */ struct rlist on_close_log; }; struct recovery * recovery_new(const char *wal_dirname, bool force_recovery, struct vclock *vclock); void recovery_delete(struct recovery *r); /* to be called at exit */ void recovery_exit(struct recovery *r); void recovery_follow_local(struct recovery *r, struct xstream *stream, const char *name, ev_tstamp wal_dir_rescan_delay); void recovery_stop_local(struct recovery *r); void recovery_finalize(struct recovery *r, struct xstream *stream); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ /** * Find out if there are new .xlog files since the current * vclock, and read them all up. * * Reading will be stopped on reaching stop_vclock. * Use NULL for boundless recover * * This function will not close r->current_wal if * recovery was successful. */ void recover_remaining_wals(struct recovery *r, struct xstream *stream, struct vclock *stop_vclock, bool scan_dir); #endif /* TARANTOOL_RECOVERY_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/applier.cc0000664000000000000000000005676613306565107017562 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "applier.h" #include #include "xlog.h" #include "fiber.h" #include "fiber_cond.h" #include "coio.h" #include "coio_buf.h" #include "xstream.h" #include "wal.h" #include "xrow.h" #include "replication.h" #include "iproto_constants.h" #include "version.h" #include "trigger.h" #include "xrow_io.h" #include "error.h" #include "session.h" STRS(applier_state, applier_STATE); static inline void applier_set_state(struct applier *applier, enum applier_state state) { applier->state = state; say_debug("=> %s", applier_state_strs[state] + strlen("APPLIER_")); trigger_run_xc(&applier->on_state, applier); } /** * Write a nice error message to log file on SocketError or ClientError * in applier_f(). */ static inline void applier_log_error(struct applier *applier, struct error *e) { uint32_t errcode = box_error_code(e); if (applier->last_logged_errcode == errcode) return; switch (applier->state) { case APPLIER_CONNECT: say_info("can't connect to master"); break; case APPLIER_CONNECTED: case APPLIER_READY: say_info("can't join/subscribe"); break; case APPLIER_AUTH: say_info("failed to authenticate"); break; case APPLIER_SYNC: case APPLIER_FOLLOW: case APPLIER_INITIAL_JOIN: case APPLIER_FINAL_JOIN: say_info("can't read row"); break; default: break; } error_log(e); if (type_cast(SocketError, e) || type_cast(SystemError, e)) say_info("will retry every %.2lf second", replication_reconnect_interval()); applier->last_logged_errcode = errcode; } /* * Fiber function to write vclock to replication master. * To track connection status, replica answers master * with encoded vclock. In addition to DML requests, * master also sends heartbeat messages every * replication_timeout seconds (introduced in 1.7.7). * On such requests replica also responds with vclock. */ static int applier_writer_f(va_list ap) { struct applier *applier = va_arg(ap, struct applier *); struct ev_io io; coio_create(&io, applier->io.fd); while (!fiber_is_cancelled()) { /* * Tarantool >= 1.7.7 sends periodic heartbeat * messages so we don't need to send ACKs every * replication_timeout seconds any more. */ if (applier->version_id >= version_id(1, 7, 7)) fiber_cond_wait_timeout(&applier->writer_cond, TIMEOUT_INFINITY); else fiber_cond_wait_timeout(&applier->writer_cond, replication_timeout); /* Send ACKs only when in FOLLOW mode ,*/ if (applier->state != APPLIER_SYNC && applier->state != APPLIER_FOLLOW) continue; try { struct xrow_header xrow; xrow_encode_vclock(&xrow, &replicaset.vclock); coio_write_xrow(&io, &xrow); } catch (SocketError *e) { /* * There is no point trying to send ACKs if * the master closed its end - we would only * spam the log - so exit immediately. */ if (e->get_errno() == EPIPE) break; /* * Do not exit, if there is a network error, * the reader fiber will reconnect for us * and signal our cond afterwards. */ e->log(); } catch (Exception *e) { /* * Out of memory encoding the message, ignore * and try again after an interval. */ e->log(); } fiber_gc(); } return 0; } /** * Connect to a remote host and authenticate the client. */ void applier_connect(struct applier *applier) { struct ev_io *coio = &applier->io; struct ibuf *ibuf = &applier->ibuf; if (coio->fd >= 0) return; char greetingbuf[IPROTO_GREETING_SIZE]; struct xrow_header row; struct uri *uri = &applier->uri; /* * coio_connect() stores resolved address to \a &applier->addr * on success. &applier->addr_len is a value-result argument which * must be initialized to the size of associated buffer (addrstorage) * before calling coio_connect(). Since coio_connect() performs * DNS resolution under the hood it is theoretically possible that * applier->addr_len will be different even for same uri. */ applier->addr_len = sizeof(applier->addrstorage); applier_set_state(applier, APPLIER_CONNECT); coio_connect(coio, uri, &applier->addr, &applier->addr_len); assert(coio->fd >= 0); coio_readn(coio, greetingbuf, IPROTO_GREETING_SIZE); applier->last_row_time = ev_monotonic_now(loop()); /* Decode instance version and name from greeting */ struct greeting greeting; if (greeting_decode(greetingbuf, &greeting) != 0) tnt_raise(LoggedError, ER_PROTOCOL, "Invalid greeting"); if (strcmp(greeting.protocol, "Binary") != 0) { tnt_raise(LoggedError, ER_PROTOCOL, "Unsupported protocol for replication"); } if (applier->version_id != greeting.version_id) { say_info("remote master is %u.%u.%u at %s\r\n", version_id_major(greeting.version_id), version_id_minor(greeting.version_id), version_id_patch(greeting.version_id), sio_strfaddr(&applier->addr, applier->addr_len)); } /* Save the remote instance version and UUID on connect. */ applier->uuid = greeting.uuid; applier->version_id = greeting.version_id; /* Don't display previous error messages in box.info.replication */ diag_clear(&fiber()->diag); /* * Tarantool >= 1.7.7: send an IPROTO_REQUEST_VOTE message * to fetch the master's vclock before proceeding to "join". * It will be used for leader election on bootstrap. */ if (applier->version_id >= version_id(1, 7, 7)) { xrow_encode_request_vote(&row); coio_write_xrow(coio, &row); coio_read_xrow(coio, ibuf, &row); if (row.type != IPROTO_OK) xrow_decode_error_xc(&row); vclock_create(&applier->vclock); xrow_decode_request_vote_xc(&row, &applier->vclock, &applier->remote_is_ro); } applier_set_state(applier, APPLIER_CONNECTED); /* Detect connection to itself */ if (tt_uuid_is_equal(&applier->uuid, &INSTANCE_UUID)) tnt_raise(ClientError, ER_CONNECTION_TO_SELF); /* Perform authentication if user provided at least login */ if (!uri->login) goto done; /* Authenticate */ applier_set_state(applier, APPLIER_AUTH); xrow_encode_auth_xc(&row, greeting.salt, greeting.salt_len, uri->login, uri->login_len, uri->password, uri->password_len); coio_write_xrow(coio, &row); coio_read_xrow(coio, ibuf, &row); applier->last_row_time = ev_monotonic_now(loop()); if (row.type != IPROTO_OK) xrow_decode_error_xc(&row); /* auth failed */ /* auth succeeded */ say_info("authenticated"); done: applier_set_state(applier, APPLIER_READY); } /** * Execute and process JOIN request (bootstrap the instance). */ static void applier_join(struct applier *applier) { /* Send JOIN request */ struct ev_io *coio = &applier->io; struct ibuf *ibuf = &applier->ibuf; struct xrow_header row; xrow_encode_join_xc(&row, &INSTANCE_UUID); coio_write_xrow(coio, &row); /** * Tarantool < 1.7.0: if JOIN is successful, there is no "OK" * response, but a stream of rows from checkpoint. */ if (applier->version_id >= version_id(1, 7, 0)) { /* Decode JOIN response */ coio_read_xrow(coio, ibuf, &row); if (iproto_type_is_error(row.type)) { xrow_decode_error_xc(&row); /* re-throw error */ } else if (row.type != IPROTO_OK) { tnt_raise(ClientError, ER_UNKNOWN_REQUEST_TYPE, (uint32_t) row.type); } /* * Start vclock. The vclock of the checkpoint * the master is sending to the replica. * Used to initialize the replica's initial * vclock in bootstrap_from_master() */ xrow_decode_vclock_xc(&row, &replicaset.vclock); } applier_set_state(applier, APPLIER_INITIAL_JOIN); /* * Receive initial data. */ assert(applier->join_stream != NULL); while (true) { coio_read_xrow(coio, ibuf, &row); applier->last_row_time = ev_monotonic_now(loop()); if (iproto_type_is_dml(row.type)) { xstream_write_xc(applier->join_stream, &row); } else if (row.type == IPROTO_OK) { if (applier->version_id < version_id(1, 7, 0)) { /* * This is the start vclock if the * server is 1.6. Since we have * not initialized replication * vclock yet, do it now. In 1.7+ * this vclock is not used. */ xrow_decode_vclock_xc(&row, &replicaset.vclock); } break; /* end of stream */ } else if (iproto_type_is_error(row.type)) { xrow_decode_error_xc(&row); /* rethrow error */ } else { tnt_raise(ClientError, ER_UNKNOWN_REQUEST_TYPE, (uint32_t) row.type); } } say_info("initial data received"); applier_set_state(applier, APPLIER_FINAL_JOIN); /* * Tarantool < 1.7.0: there is no "final join" stage. * Proceed to "subscribe" and do not finish bootstrap * until replica id is received. */ if (applier->version_id < version_id(1, 7, 0)) return; /* * Receive final data. */ while (true) { coio_read_xrow(coio, ibuf, &row); applier->last_row_time = ev_monotonic_now(loop()); if (iproto_type_is_dml(row.type)) { vclock_follow(&replicaset.vclock, row.replica_id, row.lsn); xstream_write_xc(applier->subscribe_stream, &row); } else if (row.type == IPROTO_OK) { /* * Current vclock. This is not used now, * ignore. */ break; /* end of stream */ } else if (iproto_type_is_error(row.type)) { xrow_decode_error_xc(&row); /* rethrow error */ } else { tnt_raise(ClientError, ER_UNKNOWN_REQUEST_TYPE, (uint32_t) row.type); } } say_info("final data received"); applier_set_state(applier, APPLIER_JOINED); applier_set_state(applier, APPLIER_READY); } /** * Execute and process SUBSCRIBE request (follow updates from a master). */ static void applier_subscribe(struct applier *applier) { assert(applier->subscribe_stream != NULL); /* Send SUBSCRIBE request */ struct ev_io *coio = &applier->io; struct ibuf *ibuf = &applier->ibuf; struct xrow_header row; struct vclock remote_vclock_at_subscribe; xrow_encode_subscribe_xc(&row, &REPLICASET_UUID, &INSTANCE_UUID, &replicaset.vclock); coio_write_xrow(coio, &row); if (applier->state == APPLIER_READY) { /* * Tarantool < 1.7.7 does not send periodic heartbeat * messages so we cannot enable applier synchronization * for it without risking getting stuck in the 'orphan' * mode until a DML operation happens on the master. */ if (applier->version_id >= version_id(1, 7, 7)) applier_set_state(applier, APPLIER_SYNC); else applier_set_state(applier, APPLIER_FOLLOW); } else { /* * Tarantool < 1.7.0 sends replica id during * "subscribe" stage. We can't finish bootstrap * until it is received. */ assert(applier->state == APPLIER_FINAL_JOIN); assert(applier->version_id < version_id(1, 7, 0)); } /* * Read SUBSCRIBE response */ if (applier->version_id >= version_id(1, 6, 7)) { coio_read_xrow(coio, ibuf, &row); if (iproto_type_is_error(row.type)) { xrow_decode_error_xc(&row); /* error */ } else if (row.type != IPROTO_OK) { tnt_raise(ClientError, ER_PROTOCOL, "Invalid response to SUBSCRIBE"); } /* * In case of successful subscribe, the server * responds with its current vclock. */ vclock_create(&remote_vclock_at_subscribe); xrow_decode_vclock_xc(&row, &remote_vclock_at_subscribe); } /** * Tarantool < 1.6.7: * If there is an error in subscribe, it's sent directly * in response to subscribe. If subscribe is successful, * there is no "OK" response, but a stream of rows from * the binary log. */ /* Re-enable warnings after successful execution of SUBSCRIBE */ applier->last_logged_errcode = 0; if (applier->version_id >= version_id(1, 7, 4)) { /* Enable replication ACKs for newer servers */ assert(applier->writer == NULL); char name[FIBER_NAME_MAX]; int pos = snprintf(name, sizeof(name), "applierw/"); uri_format(name + pos, sizeof(name) - pos, &applier->uri, false); applier->writer = fiber_new_xc(name, applier_writer_f); fiber_set_joinable(applier->writer, true); fiber_start(applier->writer, applier); } applier->lag = TIMEOUT_INFINITY; /* * Process a stream of rows from the binary log. */ while (true) { if (applier->state == APPLIER_FINAL_JOIN && instance_id != REPLICA_ID_NIL) { say_info("final data received"); applier_set_state(applier, APPLIER_JOINED); applier_set_state(applier, APPLIER_READY); applier_set_state(applier, APPLIER_FOLLOW); } /* * Stay 'orphan' until appliers catch up with * the remote vclock at the time of SUBSCRIBE * and the lag is less than configured. */ if (applier->state == APPLIER_SYNC && applier->lag <= replication_sync_lag && vclock_compare(&remote_vclock_at_subscribe, &replicaset.vclock) <= 0) { /* Applier is synced, switch to "follow". */ applier_set_state(applier, APPLIER_FOLLOW); } /* * Tarantool < 1.7.7 does not send periodic heartbeat * messages so we can't assume that if we haven't heard * from the master for quite a while the connection is * broken - the master might just be idle. */ if (applier->version_id < version_id(1, 7, 7)) { coio_read_xrow(coio, ibuf, &row); } else { double timeout = replication_disconnect_timeout(); coio_read_xrow_timeout_xc(coio, ibuf, &row, timeout); } if (iproto_type_is_error(row.type)) xrow_decode_error_xc(&row); /* error */ /* Replication request. */ if (row.replica_id == REPLICA_ID_NIL || row.replica_id >= VCLOCK_MAX) { /* * A safety net, this can only occur * if we're fed a strangely broken xlog. */ tnt_raise(ClientError, ER_UNKNOWN_REPLICA, int2str(row.replica_id), tt_uuid_str(&REPLICASET_UUID)); } applier->lag = ev_now(loop()) - row.tm; applier->last_row_time = ev_monotonic_now(loop()); if (vclock_get(&replicaset.vclock, row.replica_id) < row.lsn) { /** * Promote the replica set vclock before * applying the row. If there is an * exception (conflict) applying the row, * the row is skipped when the replication * is resumed. */ vclock_follow(&replicaset.vclock, row.replica_id, row.lsn); struct replica *replica = replica_by_id(row.replica_id); /* * In a full mesh topology, the same set * of changes may arrive via two * concurrently running appliers. Thanks * to vclock_follow() above, the first row * in the set will be skipped - but the * remaining may execute out of order, * when the following xstream_write() * yields on WAL. Hence we need a latch to * strictly order all changes which belong * to the same server id. */ if (replica) latch_lock(&replica->order_latch); else latch_lock(&replicaset.applier.order_latch); int res = xstream_write(applier->subscribe_stream, &row); if (replica) latch_unlock(&replica->order_latch); else latch_unlock(&replicaset.applier.order_latch); if (res != 0) diag_raise(); } if (applier->state == APPLIER_SYNC || applier->state == APPLIER_FOLLOW) fiber_cond_signal(&applier->writer_cond); if (ibuf_used(ibuf) == 0) ibuf_reset(ibuf); fiber_gc(); } } static inline void applier_disconnect(struct applier *applier, enum applier_state state) { applier_set_state(applier, state); if (applier->writer != NULL) { fiber_cancel(applier->writer); fiber_join(applier->writer); applier->writer = NULL; } coio_close(loop(), &applier->io); /* Clear all unparsed input. */ ibuf_reinit(&applier->ibuf); fiber_gc(); } static int applier_f(va_list ap) { struct applier *applier = va_arg(ap, struct applier *); /* * Set correct session type for use in on_replace() * triggers. */ current_session()->type = SESSION_TYPE_APPLIER; /* Re-connect loop */ while (!fiber_is_cancelled()) { try { applier_connect(applier); if (tt_uuid_is_nil(&REPLICASET_UUID)) { /* * Execute JOIN if this is a bootstrap. * The join will pause the applier * until WAL is created. */ applier_join(applier); } applier_subscribe(applier); /* * subscribe() has an infinite loop which * is stoppable only with fiber_cancel(). */ unreachable(); return 0; } catch (ClientError *e) { if (e->errcode() == ER_CONNECTION_TO_SELF && tt_uuid_is_equal(&applier->uuid, &INSTANCE_UUID)) { /* Connection to itself, stop applier */ applier_disconnect(applier, APPLIER_OFF); return 0; } else if (e->errcode() == ER_LOADING) { /* Autobootstrap */ applier_log_error(applier, e); applier_disconnect(applier, APPLIER_LOADING); goto reconnect; } else if (e->errcode() == ER_ACCESS_DENIED) { /* Invalid configuration */ applier_log_error(applier, e); applier_disconnect(applier, APPLIER_DISCONNECTED); goto reconnect; } else if (e->errcode() == ER_SYSTEM) { /* System error from master instance. */ applier_log_error(applier, e); applier_disconnect(applier, APPLIER_DISCONNECTED); goto reconnect; } else if (e->errcode() == ER_CFG) { /* Invalid configuration */ applier_log_error(applier, e); applier_disconnect(applier, APPLIER_DISCONNECTED); goto reconnect; } else { /* Unrecoverable errors */ applier_log_error(applier, e); applier_disconnect(applier, APPLIER_STOPPED); return -1; } } catch (FiberIsCancelled *e) { applier_disconnect(applier, APPLIER_OFF); break; } catch (SocketError *e) { applier_log_error(applier, e); applier_disconnect(applier, APPLIER_DISCONNECTED); goto reconnect; } catch (SystemError *e) { applier_log_error(applier, e); applier_disconnect(applier, APPLIER_DISCONNECTED); goto reconnect; } catch (Exception *e) { applier_log_error(applier, e); applier_disconnect(applier, APPLIER_STOPPED); return -1; } /* Put fiber_sleep() out of catch block. * * This is done to avoid the case when two or more * fibers yield inside their try/catch blocks and * throw an exception. Seems like the exception unwinder * uses global state inside the catch block. * * This could lead to incorrect exception processing * and crash the program. * * See: https://github.com/tarantool/tarantool/issues/136 */ reconnect: fiber_sleep(replication_reconnect_interval()); } return 0; } void applier_start(struct applier *applier) { char name[FIBER_NAME_MAX]; assert(applier->reader == NULL); int pos = snprintf(name, sizeof(name), "applier/"); uri_format(name + pos, sizeof(name) - pos, &applier->uri, false); struct fiber *f = fiber_new_xc(name, applier_f); /** * So that we can safely grab the status of the * fiber any time we want. */ fiber_set_joinable(f, true); applier->reader = f; fiber_start(f, applier); } void applier_stop(struct applier *applier) { struct fiber *f = applier->reader; if (f == NULL) return; fiber_cancel(f); fiber_join(f); applier_set_state(applier, APPLIER_OFF); applier->reader = NULL; } struct applier * applier_new(const char *uri, struct xstream *join_stream, struct xstream *subscribe_stream) { struct applier *applier = (struct applier *) calloc(1, sizeof(struct applier)); if (applier == NULL) { diag_set(OutOfMemory, sizeof(*applier), "malloc", "struct applier"); return NULL; } coio_create(&applier->io, -1); ibuf_create(&applier->ibuf, &cord()->slabc, 1024); /* uri_parse() sets pointers to applier->source buffer */ snprintf(applier->source, sizeof(applier->source), "%s", uri); int rc = uri_parse(&applier->uri, applier->source); /* URI checked by box_check_replication() */ assert(rc == 0 && applier->uri.service != NULL); (void) rc; applier->join_stream = join_stream; applier->subscribe_stream = subscribe_stream; applier->last_row_time = ev_monotonic_now(loop()); rlist_create(&applier->on_state); fiber_cond_create(&applier->resume_cond); fiber_cond_create(&applier->writer_cond); return applier; } void applier_delete(struct applier *applier) { assert(applier->reader == NULL && applier->writer == NULL); ibuf_destroy(&applier->ibuf); assert(applier->io.fd == -1); trigger_destroy(&applier->on_state); fiber_cond_destroy(&applier->resume_cond); fiber_cond_destroy(&applier->writer_cond); free(applier); } void applier_resume(struct applier *applier) { assert(!fiber_is_dead(applier->reader)); applier->is_paused = false; fiber_cond_signal(&applier->resume_cond); } void applier_pause(struct applier *applier) { /* Sleep until applier_resume() wake us up */ assert(fiber() == applier->reader); assert(!applier->is_paused); applier->is_paused = true; while (applier->is_paused && !fiber_is_cancelled()) fiber_cond_wait(&applier->resume_cond); } struct applier_on_state { struct trigger base; struct applier *applier; enum applier_state desired_state; struct fiber_cond wakeup; }; static void applier_on_state_f(struct trigger *trigger, void *event) { (void) event; struct applier_on_state *on_state = container_of(trigger, struct applier_on_state, base); struct applier *applier = on_state->applier; if (applier->state != APPLIER_OFF && applier->state != APPLIER_STOPPED && applier->state != on_state->desired_state) return; /* Wake up waiter */ fiber_cond_signal(&on_state->wakeup); applier_pause(applier); } static inline void applier_add_on_state(struct applier *applier, struct applier_on_state *trigger, enum applier_state desired_state) { trigger_create(&trigger->base, applier_on_state_f, NULL, NULL); trigger->applier = applier; fiber_cond_create(&trigger->wakeup); trigger->desired_state = desired_state; trigger_add(&applier->on_state, &trigger->base); } static inline void applier_clear_on_state(struct applier_on_state *trigger) { fiber_cond_destroy(&trigger->wakeup); trigger_clear(&trigger->base); } static inline int applier_wait_for_state(struct applier_on_state *trigger, double timeout) { struct applier *applier = trigger->applier; double deadline = ev_monotonic_now(loop()) + timeout; while (applier->state != APPLIER_OFF && applier->state != APPLIER_STOPPED && applier->state != trigger->desired_state) { if (fiber_cond_wait_deadline(&trigger->wakeup, deadline) != 0) return -1; /* ER_TIMEOUT */ } if (applier->state != trigger->desired_state) { assert(applier->state == APPLIER_OFF || applier->state == APPLIER_STOPPED); /* Re-throw the original error */ assert(!diag_is_empty(&applier->reader->diag)); diag_move(&applier->reader->diag, &fiber()->diag); return -1; } return 0; } void applier_resume_to_state(struct applier *applier, enum applier_state state, double timeout) { struct applier_on_state trigger; applier_add_on_state(applier, &trigger, state); applier_resume(applier); int rc = applier_wait_for_state(&trigger, timeout); applier_clear_on_state(&trigger); if (rc != 0) diag_raise(); assert(applier->state == state); } tarantool_1.9.1.26.g63eb81e3c/src/box/engine.c0000664000000000000000000001016113306565107017203 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "engine.h" #include #include #include RLIST_HEAD(engines); /** Register engine instance. */ void engine_register(struct engine *engine) { static int n_engines; rlist_add_tail_entry(&engines, engine, link); engine->id = n_engines++; } /** Find engine by name. */ struct engine * engine_by_name(const char *name) { struct engine *e; engine_foreach(e) { if (strcmp(e->name, name) == 0) return e; } return NULL; } void engine_shutdown(void) { struct engine *engine, *tmp; rlist_foreach_entry_safe(engine, &engines, link, tmp) { engine->vtab->shutdown(engine); } } int engine_bootstrap(void) { struct engine *engine; engine_foreach(engine) { if (engine->vtab->bootstrap(engine) != 0) return -1; } return 0; } int engine_begin_initial_recovery(const struct vclock *recovery_vclock) { struct engine *engine; engine_foreach(engine) { if (engine->vtab->begin_initial_recovery(engine, recovery_vclock) != 0) return -1; } return 0; } int engine_begin_final_recovery(void) { struct engine *engine; engine_foreach(engine) { if (engine->vtab->begin_final_recovery(engine) != 0) return -1; } return 0; } int engine_end_recovery(void) { /* * For all new spaces created after recovery is complete, * when the primary key is added, enable all keys. */ struct engine *engine; engine_foreach(engine) { if (engine->vtab->end_recovery(engine) != 0) return -1; } return 0; } int engine_begin_checkpoint(void) { struct engine *engine; engine_foreach(engine) { if (engine->vtab->begin_checkpoint(engine) < 0) return -1; } return 0; } int engine_commit_checkpoint(struct vclock *vclock) { struct engine *engine; engine_foreach(engine) { if (engine->vtab->wait_checkpoint(engine, vclock) < 0) return -1; } engine_foreach(engine) { engine->vtab->commit_checkpoint(engine, vclock); } return 0; } void engine_abort_checkpoint(void) { struct engine *engine; engine_foreach(engine) engine->vtab->abort_checkpoint(engine); } int engine_collect_garbage(int64_t lsn) { struct engine *engine; engine_foreach(engine) { if (engine->vtab->collect_garbage(engine, lsn) < 0) return -1; } return 0; } int engine_backup(struct vclock *vclock, engine_backup_cb cb, void *cb_arg) { struct engine *engine; engine_foreach(engine) { if (engine->vtab->backup(engine, vclock, cb, cb_arg) < 0) return -1; } return 0; } int engine_join(struct vclock *vclock, struct xstream *stream) { struct engine *engine; engine_foreach(engine) { if (engine->vtab->join(engine, vclock, stream) != 0) return -1; } return 0; } void engine_memory_stat(struct engine_memory_stat *stat) { memset(stat, 0, sizeof(*stat)); struct engine *engine; engine_foreach(engine) engine->vtab->memory_stat(engine, stat); } tarantool_1.9.1.26.g63eb81e3c/src/box/port.c0000664000000000000000000001000313306565107016715 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "port.h" #include "tuple.h" #include "tuple_convert.h" #include #include #include #include #include "errinj.h" static struct mempool port_tuple_entry_pool; int port_tuple_add(struct port *base, struct tuple *tuple) { struct port_tuple *port = port_tuple(base); struct port_tuple_entry *e; if (port->size == 0) { if (tuple_ref(tuple) != 0) return -1; e = &port->first_entry; port->first = port->last = e; } else { e = mempool_alloc(&port_tuple_entry_pool); if (e == NULL) { diag_set(OutOfMemory, sizeof(*e), "mempool_alloc", "e"); return -1; } if (tuple_ref(tuple) != 0) { mempool_free(&port_tuple_entry_pool, e); return -1; } port->last->next = e; port->last = e; } e->tuple = tuple; e->next = NULL; ++port->size; return 0; } void port_tuple_create(struct port *base) { struct port_tuple *port = (struct port_tuple *)base; port->vtab = &port_tuple_vtab; port->size = 0; port->first = NULL; port->last = NULL; } static void port_tuple_destroy(struct port *base) { struct port_tuple *port = port_tuple(base); struct port_tuple_entry *e = port->first; if (e == NULL) return; tuple_unref(e->tuple); e = e->next; while (e != NULL) { struct port_tuple_entry *cur = e; e = e->next; tuple_unref(cur->tuple); mempool_free(&port_tuple_entry_pool, cur); } } static int port_tuple_dump_16(struct port *base, struct obuf *out) { struct port_tuple *port = port_tuple(base); struct port_tuple_entry *pe; for (pe = port->first; pe != NULL; pe = pe->next) { if (tuple_to_obuf(pe->tuple, out) != 0) return -1; ERROR_INJECT(ERRINJ_PORT_DUMP, { diag_set(OutOfMemory, tuple_size(pe->tuple), "obuf_dup", "data"); return -1; }); } return port->size; } static int port_tuple_dump(struct port *base, struct obuf *out) { struct port_tuple *port = port_tuple(base); char *size_buf = obuf_alloc(out, mp_sizeof_array(port->size)); if (size_buf == NULL) return -1; mp_encode_array(size_buf, port->size); if (port_tuple_dump_16(base, out) < 0) return -1; return 1; } void port_destroy(struct port *port) { return port->vtab->destroy(port); } int port_dump(struct port *port, struct obuf *out) { return port->vtab->dump(port, out); } int port_dump_16(struct port *port, struct obuf *out) { return port->vtab->dump_16(port, out); } void port_init(void) { mempool_create(&port_tuple_entry_pool, &cord()->slabc, sizeof(struct port_tuple_entry)); } void port_free(void) { mempool_destroy(&port_tuple_entry_pool); } const struct port_vtab port_tuple_vtab = { .dump = port_tuple_dump, .dump_16 = port_tuple_dump_16, .destroy = port_tuple_destroy, }; tarantool_1.9.1.26.g63eb81e3c/src/box/vy_upsert.h0000664000000000000000000000545013306565107020010 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_UPSERT_H #define INCLUDES_TARANTOOL_BOX_VY_UPSERT_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct vy_stat; struct key_def; struct tuple_format; /** * Apply the UPSERT statement to the REPLACE, UPSERT or DELETE statement. * If the second statement is * - REPLACE then update operations of the first one will be applied to the * second and a REPLACE statement will be returned; * * - UPSERT then the new UPSERT will be created with combined operations of both * arguments; * * - DELETE or NULL then the first one will be turned into REPLACE and returned * as the result; * * @param new_stmt An UPSERT statement. * @param old_stmt An REPLACE/DELETE/UPSERT statement or NULL. * @param cmp_def Key definition of an index, with primary parts. * @param format Format for REPLACE/DELETE tuples. * @param upsert_format Format for UPSERT tuples. * @param suppress_error True if ClientErrors must not be written to log. * * @retval NULL Memory allocation error. * @retval not NULL Success. */ struct tuple * vy_apply_upsert(const struct tuple *new_stmt, const struct tuple *old_stmt, const struct key_def *cmp_def, struct tuple_format *format, struct tuple_format *upsert_format, bool suppress_error); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_UPSERT_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/opt_def.c0000664000000000000000000001270413306565107017363 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_def.h" #include "msgpuck.h" #include "bit/bit.h" #include "small/region.h" #include "error.h" #include "diag.h" const char *opt_type_strs[] = { /* [OPT_BOOL] = */ "boolean", /* [OPT_UINT32] = */ "unsigned", /* [OPT_INT64] = */ "integer", /* [OPT_FLOAT] = */ "float", /* [OPT_STR] = */ "string", /* [OPT_STRPTR] = */ "string", /* [OPT_ENUM] = */ "enum", }; static int opt_set(void *opts, const struct opt_def *def, const char **val, struct region *region) { int64_t ival; uint64_t uval; double dval; uint32_t str_len; const char *str; char *ptr; char *opt = ((char *) opts) + def->offset; switch (def->type) { case OPT_BOOL: if (mp_typeof(**val) != MP_BOOL) return -1; store_bool(opt, mp_decode_bool(val)); break; case OPT_UINT32: if (mp_typeof(**val) != MP_UINT) return -1; uval = mp_decode_uint(val); if (uval > UINT32_MAX) return -1; store_u32(opt, uval); break; case OPT_INT64: if (mp_read_int64(val, &ival) != 0) return -1; store_u64(opt, ival); break; case OPT_FLOAT: if (mp_read_double(val, &dval) != 0) return -1; store_double(opt, dval); break; case OPT_STR: if (mp_typeof(**val) != MP_STR) return -1; str = mp_decode_str(val, &str_len); str_len = MIN(str_len, def->len - 1); memcpy(opt, str, str_len); opt[str_len] = '\0'; break; case OPT_STRPTR: if (mp_typeof(**val) != MP_STR) return -1; str = mp_decode_str(val, &str_len); if (str_len > 0) { ptr = (char *) region_alloc(region, str_len + 1); if (ptr == NULL) { diag_set(OutOfMemory, str_len + 1, "region", "opt string"); return -1; } memcpy(ptr, str, str_len); ptr[str_len] = '\0'; assert (strlen(ptr) == str_len); } else { ptr = NULL; } *(const char **)opt = ptr; break; case OPT_ENUM: if (mp_typeof(**val) != MP_STR) return -1; str = mp_decode_str(val, &str_len); if (def->to_enum == NULL) { ival = strnindex(def->enum_strs, str, str_len, def->enum_max); } else { ival = def->to_enum(str, str_len); } switch(def->enum_size) { case sizeof(uint8_t): store_u8(opt, (uint8_t)ival); break; case sizeof(uint16_t): store_u16(opt, (uint16_t)ival); break; case sizeof(uint32_t): store_u32(opt, (uint32_t)ival); break; case sizeof(uint64_t): store_u64(opt, (uint64_t)ival); break; default: unreachable(); }; break; default: unreachable(); } return 0; } int opts_parse_key(void *opts, const struct opt_def *reg, const char *key, uint32_t key_len, const char **data, uint32_t errcode, uint32_t field_no, struct region *region, bool skip_unknown_options) { char errmsg[DIAG_ERRMSG_MAX]; bool found = false; for (const struct opt_def *def = reg; def->name != NULL; def++) { if (key_len != strlen(def->name) || memcmp(key, def->name, key_len) != 0) continue; if (opt_set(opts, def, data, region) != 0) { snprintf(errmsg, sizeof(errmsg), "'%.*s' must be %s", key_len, key, opt_type_strs[def->type]); diag_set(ClientError, errcode, field_no, errmsg); return -1; } found = true; break; } if (!found) { if (skip_unknown_options) { mp_next(data); } else { snprintf(errmsg, sizeof(errmsg), "unexpected option '%.*s'", key_len, key); diag_set(ClientError, errcode, field_no, errmsg); return -1; } } return 0; } /** * Populate key options from their msgpack-encoded representation * (msgpack map). */ int opts_decode(void *opts, const struct opt_def *reg, const char **map, uint32_t errcode, uint32_t field_no, struct region *region) { assert(mp_typeof(**map) == MP_MAP); /* * The implementation below has O(map_size * reg_size) complexity. * DDL is not performance-critical, so this is not a problem. */ uint32_t map_size = mp_decode_map(map); for (uint32_t i = 0; i < map_size; i++) { if (mp_typeof(**map) != MP_STR) { diag_set(ClientError, errcode, field_no, "key must be a string"); return -1; } uint32_t key_len; const char *key = mp_decode_str(map, &key_len); if (opts_parse_key(opts, reg, key, key_len, map, errcode, field_no, region, false) != 0) return -1; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_tree.h0000664000000000000000000000661613306565107020126 0ustar rootroot#ifndef TARANTOOL_BOX_MEMTX_TREE_H_INCLUDED #define TARANTOOL_BOX_MEMTX_TREE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "index.h" #include "memtx_engine.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct memtx_engine; /** * Struct that is used as a key in BPS tree definition. */ struct memtx_tree_key_data { /** Sequence of msgpacked search fields */ const char *key; /** Number of msgpacked search fields */ uint32_t part_count; }; /** * BPS tree element vs key comparator. * Defined in header in order to allow compiler to inline it. * @param tuple - tuple to compare. * @param key_data - key to compare with. * @param def - key definition. * @retval 0 if tuple == key in terms of def. * @retval <0 if tuple < key in terms of def. * @retval >0 if tuple > key in terms of def. */ static inline int memtx_tree_compare_key(const struct tuple *tuple, const struct memtx_tree_key_data *key_data, struct key_def *def) { return tuple_compare_with_key(tuple, key_data->key, key_data->part_count, def); } #define BPS_TREE_NAME memtx_tree #define BPS_TREE_BLOCK_SIZE (512) #define BPS_TREE_EXTENT_SIZE MEMTX_EXTENT_SIZE #define BPS_TREE_COMPARE(a, b, arg) tuple_compare(a, b, arg) #define BPS_TREE_COMPARE_KEY(a, b, arg) memtx_tree_compare_key(a, b, arg) #define bps_tree_elem_t struct tuple * #define bps_tree_key_t struct memtx_tree_key_data * #define bps_tree_arg_t struct key_def * #include "salad/bps_tree.h" #undef BPS_TREE_NAME #undef BPS_TREE_BLOCK_SIZE #undef BPS_TREE_EXTENT_SIZE #undef BPS_TREE_COMPARE #undef BPS_TREE_COMPARE_KEY #undef bps_tree_elem_t #undef bps_tree_key_t #undef bps_tree_arg_t struct memtx_tree_index { struct index base; struct memtx_tree tree; struct tuple **build_array; size_t build_array_size, build_array_alloc_size; }; struct memtx_tree_index * memtx_tree_index_new(struct memtx_engine *memtx, struct index_def *def); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_MEMTX_TREE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/coll_cache.h0000664000000000000000000000447413306565107020031 0ustar rootroot#ifndef TARANTOOL_BOX_COLL_CACHE_H_INCLUDED #define TARANTOOL_BOX_COLL_CACHE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coll.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * Create global hash tables. * @return - 0 on success, -1 on memory error. */ int coll_cache_init(); /** Delete global hash tables. */ void coll_cache_destroy(); /** * Insert or replace a collation into collation cache. * @param coll - collation to insert/replace. * @param replaced - collation that was replaced. * @return - 0 on success, -1 on memory error. */ int coll_cache_replace(struct coll *coll, struct coll **replaced); /** * Delete a collation from collation cache. * @param coll - collation to delete. */ void coll_cache_delete(const struct coll *coll); /** * Find a collation object by its id. */ struct coll * coll_by_id(uint32_t id); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_COLL_CACHE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vy_scheduler.h0000664000000000000000000001544613306565107020452 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_VY_SCHEDULER_H #define INCLUDES_TARANTOOL_BOX_VY_SCHEDULER_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include "diag.h" #include "fiber_cond.h" #define HEAP_FORWARD_DECLARATION #include "salad/heap.h" #include "salad/stailq.h" #include "tt_pthread.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct cord; struct fiber; struct vy_index; struct vy_run_env; struct vy_scheduler; typedef void (*vy_scheduler_dump_complete_f)(struct vy_scheduler *scheduler, int64_t dump_generation, double dump_duration); struct vy_scheduler { /** Scheduler fiber. */ struct fiber *scheduler_fiber; /** Scheduler event loop. */ struct ev_loop *scheduler_loop; /** Used to wake up the scheduler fiber from TX. */ struct fiber_cond scheduler_cond; /** Used to wake up the scheduler from a worker thread. */ struct ev_async scheduler_async; /** * Array of worker threads used for performing * dump/compaction tasks. */ struct cord *worker_pool; /** Set if the worker threads are running. */ bool is_worker_pool_running; /** Total number of worker threads. */ int worker_pool_size; /** Number worker threads that are currently idle. */ int workers_available; /** Memory pool used for allocating vy_task objects. */ struct mempool task_pool; /** Queue of pending tasks, linked by vy_task::link. */ struct stailq input_queue; /** Queue of processed tasks, linked by vy_task::link. */ struct stailq output_queue; /** * Signaled to wake up a worker when there is * a pending task in the input queue. Also used * to stop worker threads on shutdown. */ pthread_cond_t worker_cond; /** * Mutex protecting input and output queues and * the condition variable used to wake up worker * threads. */ pthread_mutex_t mutex; /** * Heap of indexes, ordered by dump priority, * linked by vy_index::in_dump. */ heap_t dump_heap; /** * Heap of indexes, ordered by compaction priority, * linked by vy_index::in_compact. */ heap_t compact_heap; /** Last error seen by the scheduler. */ struct diag diag; /** * Scheduler timeout. Grows exponentially with each * successive failure. Reset on successful task completion. */ double timeout; /** Set if the scheduler is throttled due to errors. */ bool is_throttled; /** Set if checkpoint is in progress. */ bool checkpoint_in_progress; /** * In order to guarantee checkpoint consistency, we must not * dump in-memory trees created after checkpoint was started * so we set this flag instead, which will make the scheduler * schedule a dump as soon as checkpoint is complete. */ bool dump_pending; /** * Current generation of in-memory data. * * New in-memory trees inherit the current generation, while * the scheduler dumps all in-memory trees whose generation * is less. The generation is increased either on checkpoint * or on exceeding the memory quota to force dumping all old * in-memory trees. */ int64_t generation; /** * Generation of in-memory data currently being dumped. * * If @dump_generation < @generation, the scheduler is dumping * in-memory trees created at @dump_generation. When all such * trees have been dumped, it bumps @dump_generation and frees * memory. * * If @dump_generation == @generation, dump have been completed * and the scheduler won't schedule a dump task until @generation * is bumped, which may happen either on exceeding the memory * quota or on checkpoint. * * Throughout the code, a process of dumping all in-memory trees * at @dump_generation is called 'dump round'. */ int64_t dump_generation; /** Number of dump tasks that are currently in progress. */ int dump_task_count; /** Time when the current dump round started. */ double dump_start; /** Signaled on dump round completion. */ struct fiber_cond dump_cond; /** * Function called by the scheduler upon dump round * completion. It is supposed to free memory released * by the dump. */ vy_scheduler_dump_complete_f dump_complete_cb; /** List of read views, see tx_manager::read_views. */ struct rlist *read_views; /** Context needed for writing runs. */ struct vy_run_env *run_env; }; /** * Create a scheduler instance. */ void vy_scheduler_create(struct vy_scheduler *scheduler, int write_threads, vy_scheduler_dump_complete_f dump_complete_cb, struct vy_run_env *run_env, struct rlist *read_views); /** * Destroy a scheduler instance. */ void vy_scheduler_destroy(struct vy_scheduler *scheduler); /** * Add an index to scheduler dump/compaction queues. */ void vy_scheduler_add_index(struct vy_scheduler *, struct vy_index *); /** * Remove an index from scheduler dump/compaction queues. */ void vy_scheduler_remove_index(struct vy_scheduler *, struct vy_index *); /** * Trigger dump of all currently existing in-memory trees. */ void vy_scheduler_trigger_dump(struct vy_scheduler *scheduler); /** * Schedule a checkpoint. Please call vy_scheduler_wait_checkpoint() * after that. */ int vy_scheduler_begin_checkpoint(struct vy_scheduler *); /** * Wait for checkpoint. Please call vy_scheduler_end_checkpoint() * after that. */ int vy_scheduler_wait_checkpoint(struct vy_scheduler *); /** * End checkpoint. Called on both checkpoint commit and abort. */ void vy_scheduler_end_checkpoint(struct vy_scheduler *); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_VY_SCHEDULER_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/iproto_constants.c0000664000000000000000000001424413306565107021354 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "iproto_constants.h" const unsigned char iproto_key_type[IPROTO_KEY_MAX] = { /* {{{ header */ /* 0x00 */ MP_UINT, /* IPROTO_REQUEST_TYPE */ /* 0x01 */ MP_UINT, /* IPROTO_SYNC */ /* 0x02 */ MP_UINT, /* IPROTO_REPLICA_ID */ /* 0x03 */ MP_UINT, /* IPROTO_LSN */ /* 0x04 */ MP_DOUBLE, /* IPROTO_TIMESTAMP */ /* 0x05 */ MP_UINT, /* IPROTO_SCHEMA_VERSION */ /* 0x06 */ MP_UINT, /* IPROTO_SERVER_VERSION */ /* 0x07 */ MP_UINT, /* IPROTO_SERVER_IS_RO */ /* }}} */ /* {{{ unused */ /* 0x08 */ MP_UINT, /* 0x09 */ MP_UINT, /* 0x0a */ MP_UINT, /* 0x0b */ MP_UINT, /* 0x0c */ MP_UINT, /* 0x0d */ MP_UINT, /* 0x0e */ MP_UINT, /* 0x0f */ MP_UINT, /* }}} */ /* {{{ body -- integer keys */ /* 0x10 */ MP_UINT, /* IPROTO_SPACE_ID */ /* 0x11 */ MP_UINT, /* IPROTO_INDEX_ID */ /* 0x12 */ MP_UINT, /* IPROTO_LIMIT */ /* 0x13 */ MP_UINT, /* IPROTO_OFFSET */ /* 0x14 */ MP_UINT, /* IPROTO_ITERATOR */ /* 0x15 */ MP_UINT, /* IPROTO_INDEX_BASE */ /* }}} */ /* {{{ unused */ /* 0x16 */ MP_UINT, /* 0x17 */ MP_UINT, /* 0x18 */ MP_UINT, /* 0x19 */ MP_UINT, /* 0x1a */ MP_UINT, /* 0x1b */ MP_UINT, /* 0x1c */ MP_UINT, /* 0x1d */ MP_UINT, /* 0x1e */ MP_UINT, /* 0x1f */ MP_UINT, /* }}} */ /* {{{ body -- all keys */ /* 0x20 */ MP_ARRAY, /* IPROTO_KEY */ /* 0x21 */ MP_ARRAY, /* IPROTO_TUPLE */ /* 0x22 */ MP_STR, /* IPROTO_FUNCTION_NAME */ /* 0x23 */ MP_STR, /* IPROTO_USER_NAME */ /* 0x24 */ MP_STR, /* IPROTO_INSTANCE_UUID */ /* 0x25 */ MP_STR, /* IPROTO_CLUSTER_UUID */ /* 0x26 */ MP_MAP, /* IPROTO_VCLOCK */ /* 0x27 */ MP_STR, /* IPROTO_EXPR */ /* 0x28 */ MP_ARRAY, /* IPROTO_OPS */ /* }}} */ }; const char *iproto_type_strs[] = { NULL, "SELECT", "INSERT", "REPLACE", "UPDATE", "DELETE", NULL, /* CALL_16 */ "AUTH", "EVAL", "UPSERT", "CALL", NULL, /* reserved */ NULL, /* NOP */ }; #define bit(c) (1ULL< ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "tuple_compare.h" #include "tuple.h" #include "trivia/util.h" /* NOINLINE */ #include #include "coll_def.h" /* {{{ tuple_compare */ /* * Compare two tuple fields. * Separate version exists since compare is a very * often used operation, so any performance speed up * in it can have dramatic impact on the overall * performance. */ inline __attribute__((always_inline)) int mp_compare_uint(const char **data_a, const char **data_b); enum mp_class { MP_CLASS_NIL = 0, MP_CLASS_BOOL, MP_CLASS_NUMBER, MP_CLASS_STR, MP_CLASS_BIN, MP_CLASS_ARRAY, MP_CLASS_MAP }; static enum mp_class mp_classes[] = { /* .MP_NIL = */ MP_CLASS_NIL, /* .MP_UINT = */ MP_CLASS_NUMBER, /* .MP_INT = */ MP_CLASS_NUMBER, /* .MP_STR = */ MP_CLASS_STR, /* .MP_BIN = */ MP_CLASS_BIN, /* .MP_ARRAY = */ MP_CLASS_ARRAY, /* .MP_MAP = */ MP_CLASS_MAP, /* .MP_BOOL = */ MP_CLASS_BOOL, /* .MP_FLOAT = */ MP_CLASS_NUMBER, /* .MP_DOUBLE = */ MP_CLASS_NUMBER, /* .MP_BIN = */ MP_CLASS_BIN }; #define COMPARE_RESULT(a, b) (a < b ? -1 : a > b) static enum mp_class mp_classof(enum mp_type type) { return mp_classes[type]; } static int mp_compare_bool(const char *field_a, const char *field_b) { int a_val = mp_decode_bool(&field_a); int b_val = mp_decode_bool(&field_b); return COMPARE_RESULT(a_val, b_val); } static int mp_compare_integer_with_hint(const char *field_a, enum mp_type a_type, const char *field_b, enum mp_type b_type) { assert(mp_classof(a_type) == MP_CLASS_NUMBER); assert(mp_classof(b_type) == MP_CLASS_NUMBER); if (a_type == MP_UINT) { uint64_t a_val = mp_decode_uint(&field_a); if (b_type == MP_UINT) { uint64_t b_val = mp_decode_uint(&field_b); return COMPARE_RESULT(a_val, b_val); } else { int64_t b_val = mp_decode_int(&field_b); if (b_val < 0) return 1; return COMPARE_RESULT(a_val, (uint64_t)b_val); } } else { int64_t a_val = mp_decode_int(&field_a); if (b_type == MP_UINT) { uint64_t b_val = mp_decode_uint(&field_b); if (a_val < 0) return -1; return COMPARE_RESULT((uint64_t)a_val, b_val); } else { int64_t b_val = mp_decode_int(&field_b); return COMPARE_RESULT(a_val, b_val); } } } #define EXP2_53 9007199254740992.0 /* 2.0 ^ 53 */ #define EXP2_64 1.8446744073709552e+19 /* 2.0 ^ 64 */ /* * Compare LHS with RHS, return a value <0, 0 or >0 depending on the * comparison result (strcmp-style). * Normally, K==1. If K==-1, the result is inverted (as if LHS and RHS * were swapped). * K is needed to enable tail call optimization in Release build. * NOINLINE attribute was added to avoid aggressive inlining which * resulted in over 2Kb code size for mp_compare_number. */ NOINLINE static int mp_compare_double_uint64(double lhs, uint64_t rhs, int k) { assert(k==1 || k==-1); /* * IEEE double represents 2^N precisely. * The value below is 2^53. If a double exceeds this threshold, * there's no fractional part. Moreover, the "next" float is * 2^53+2, i.e. there's not enough precision to encode even some * "odd" integers. * Note: ">=" is important, see next block. */ if (lhs >= EXP2_53) { /* * The value below is 2^64. * Note: UINT64_MAX is 2^64-1, hence ">=" */ if (lhs >= EXP2_64) return k; /* Within [2^53, 2^64) double->uint64_t is lossless. */ assert((double)(uint64_t)lhs == lhs); return k*COMPARE_RESULT((uint64_t)lhs, rhs); } /* * According to the IEEE 754 the double format is the * following: * +------+----------+----------+ * | sign | exponent | fraction | * +------+----------+----------+ * 1 bit 11 bits 52 bits * If the exponent is 0x7FF, the value is a special one. * Special value can be NaN, +inf and -inf. * If the fraction == 0, the value is inf. Sign depends on * the sign bit. * If the first bit of the fraction is 1, the value is the * quiet NaN, else the signaling NaN. */ if (!isnan(lhs)) { /* * lhs is a number or inf. * If RHS < 2^53, uint64_t->double is lossless. * Otherwize the value may get rounded. It's * unspecified whether it gets rounded up or down, * i.e. the conversion may yield 2^53 for a * RHS > 2^53. Since we've aready ensured that * LHS < 2^53, the result is still correct even if * rounding happens. */ assert(lhs < EXP2_53); assert((uint64_t)(double)rhs == rhs || rhs > (uint64_t)EXP2_53); return k*COMPARE_RESULT(lhs, (double)rhs); } /* * Lhs is NaN. We assume all NaNs to be less than any * number. */ return -k; } static int mp_compare_double_any_int(double lhs, const char *rhs, enum mp_type rhs_type, int k) { if (rhs_type == MP_INT) { int64_t v = mp_decode_int(&rhs); if (v < 0) { return mp_compare_double_uint64(-lhs, (uint64_t)-v, -k); } return mp_compare_double_uint64(lhs, (uint64_t)v, k); } assert(rhs_type == MP_UINT); return mp_compare_double_uint64(lhs, mp_decode_uint(&rhs), k); } static int mp_compare_double_any_number(double lhs, const char *rhs, enum mp_type rhs_type, int k) { double v; if (rhs_type == MP_FLOAT) v = mp_decode_float(&rhs); else if (rhs_type == MP_DOUBLE) v = mp_decode_double(&rhs); else return mp_compare_double_any_int(lhs, rhs, rhs_type, k); int lhs_is_nan = isnan(lhs); int rhs_is_nan = isnan(v); assert(lhs_is_nan == 1 || lhs_is_nan == 0); assert(rhs_is_nan == 1 || rhs_is_nan == 0); if (lhs_is_nan == 0 && rhs_is_nan == 0) { return k * COMPARE_RESULT(lhs, v); } else if (lhs_is_nan != rhs_is_nan) { /* * lhs | lhs_isNaN | rhs | rhs_isNaN | ret * -------+-----------+--------+-----------+----- * NaN | 1 | number | 0 | -1 * number | 0 | NaN | 1 | 1 */ return k * (rhs_is_nan - lhs_is_nan); } /* * Both NaNs. Compare signaling and quiet NaNs by * 'quiet bit'. */ uint64_t lqbit = *((uint64_t *)&lhs) & (uint64_t)0x8000000000000; uint64_t rqbit = *((uint64_t *)&v) & (uint64_t)0x8000000000000; /* * Lets consider the quiet NaN (fraction first bit == 1) * to be bigger than signaling NaN (fraction first * bit == 0). */ return k * COMPARE_RESULT(lqbit, rqbit); } static int mp_compare_number_with_hint(const char *lhs, enum mp_type lhs_type, const char *rhs, enum mp_type rhs_type) { assert(mp_classof(lhs_type) == MP_CLASS_NUMBER); assert(mp_classof(rhs_type) == MP_CLASS_NUMBER); if (rhs_type == MP_FLOAT) { return mp_compare_double_any_number( mp_decode_float(&rhs), lhs, lhs_type, -1 ); } if (rhs_type == MP_DOUBLE) { return mp_compare_double_any_number( mp_decode_double(&rhs), lhs, lhs_type, -1 ); } assert(rhs_type == MP_INT || rhs_type == MP_UINT); if (lhs_type == MP_FLOAT) { return mp_compare_double_any_int( mp_decode_float(&lhs), rhs, rhs_type, 1 ); } if (lhs_type == MP_DOUBLE) { return mp_compare_double_any_int( mp_decode_double(&lhs), rhs, rhs_type, 1 ); } assert(lhs_type == MP_INT || lhs_type == MP_UINT); return mp_compare_integer_with_hint(lhs, lhs_type, rhs, rhs_type); } static inline int mp_compare_number(const char *lhs, const char *rhs) { return mp_compare_number_with_hint(lhs, mp_typeof(*lhs), rhs, mp_typeof(*rhs)); } static inline int mp_compare_str(const char *field_a, const char *field_b) { uint32_t size_a = mp_decode_strl(&field_a); uint32_t size_b = mp_decode_strl(&field_b); int r = memcmp(field_a, field_b, MIN(size_a, size_b)); if (r != 0) return r; return COMPARE_RESULT(size_a, size_b); } static inline int mp_compare_str_coll(const char *field_a, const char *field_b, struct coll *coll) { uint32_t size_a = mp_decode_strl(&field_a); uint32_t size_b = mp_decode_strl(&field_b); return coll->cmp(field_a, size_a, field_b, size_b, coll); } static inline int mp_compare_bin(const char *field_a, const char *field_b) { uint32_t size_a = mp_decode_binl(&field_a); uint32_t size_b = mp_decode_binl(&field_b); int r = memcmp(field_a, field_b, MIN(size_a, size_b)); if (r != 0) return r; return COMPARE_RESULT(size_a, size_b); } typedef int (*mp_compare_f)(const char *, const char *); static mp_compare_f mp_class_comparators[] = { /* .MP_CLASS_NIL = */ NULL, /* .MP_CLASS_BOOL = */ mp_compare_bool, /* .MP_CLASS_NUMBER = */ mp_compare_number, /* .MP_CLASS_STR = */ mp_compare_str, /* .MP_CLASS_BIN = */ mp_compare_bin, /* .MP_CLASS_ARRAY = */ NULL, /* .MP_CLASS_MAP = */ NULL, }; static int mp_compare_scalar_with_hint(const char *field_a, enum mp_type a_type, const char *field_b, enum mp_type b_type) { enum mp_class a_class = mp_classof(a_type); enum mp_class b_class = mp_classof(b_type); if (a_class != b_class) return COMPARE_RESULT(a_class, b_class); mp_compare_f cmp = mp_class_comparators[a_class]; assert(cmp != NULL); return cmp(field_a, field_b); } static inline int mp_compare_scalar(const char *field_a, const char *field_b) { return mp_compare_scalar_with_hint(field_a, mp_typeof(*field_a), field_b, mp_typeof(*field_b)); } static inline int mp_compare_scalar_coll(const char *field_a, const char *field_b, struct coll *coll) { enum mp_type type_a = mp_typeof(*field_a); enum mp_type type_b = mp_typeof(*field_b); if (type_a == MP_STR && type_b == MP_STR) return mp_compare_str_coll(field_a, field_b, coll); return mp_compare_scalar_with_hint(field_a, type_a, field_b, type_b); } /** * @brief Compare two fields parts using a type definition * @param field_a field * @param field_b field * @param field_type field type definition * @retval 0 if field_a == field_b * @retval <0 if field_a < field_b * @retval >0 if field_a > field_b */ static int tuple_compare_field(const char *field_a, const char *field_b, int8_t type, struct coll *coll) { switch (type) { case FIELD_TYPE_UNSIGNED: return mp_compare_uint(field_a, field_b); case FIELD_TYPE_STRING: return coll != NULL ? mp_compare_str_coll(field_a, field_b, coll) : mp_compare_str(field_a, field_b); case FIELD_TYPE_INTEGER: return mp_compare_integer_with_hint(field_a, mp_typeof(*field_a), field_b, mp_typeof(*field_b)); case FIELD_TYPE_NUMBER: return mp_compare_number(field_a, field_b); case FIELD_TYPE_BOOLEAN: return mp_compare_bool(field_a, field_b); case FIELD_TYPE_SCALAR: return coll != NULL ? mp_compare_scalar_coll(field_a, field_b, coll) : mp_compare_scalar(field_a, field_b); default: unreachable(); return 0; } } static int tuple_compare_field_with_hint(const char *field_a, enum mp_type a_type, const char *field_b, enum mp_type b_type, int8_t type, struct coll *coll) { switch (type) { case FIELD_TYPE_UNSIGNED: return mp_compare_uint(field_a, field_b); case FIELD_TYPE_STRING: return coll != NULL ? mp_compare_str_coll(field_a, field_b, coll) : mp_compare_str(field_a, field_b); case FIELD_TYPE_INTEGER: return mp_compare_integer_with_hint(field_a, a_type, field_b, b_type); case FIELD_TYPE_NUMBER: return mp_compare_number_with_hint(field_a, a_type, field_b, b_type); case FIELD_TYPE_BOOLEAN: return mp_compare_bool(field_a, field_b); case FIELD_TYPE_SCALAR: return coll != NULL ? mp_compare_scalar_coll(field_a, field_b, coll) : mp_compare_scalar_with_hint(field_a, a_type, field_b, b_type); default: unreachable(); return 0; } } template static inline int tuple_compare_slowpath(const struct tuple *tuple_a, const struct tuple *tuple_b, const struct key_def *key_def) { assert(!has_optional_parts || is_nullable); assert(is_nullable == key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); const struct key_part *part = key_def->parts; const char *tuple_a_raw = tuple_data(tuple_a); const char *tuple_b_raw = tuple_data(tuple_b); if (key_def->part_count == 1 && part->fieldno == 0) { /* * First field can not be optional - empty tuples * can not exist. */ assert(!has_optional_parts); mp_decode_array(&tuple_a_raw); mp_decode_array(&tuple_b_raw); if (! is_nullable) { return tuple_compare_field(tuple_a_raw, tuple_b_raw, part->type, part->coll); } enum mp_type a_type = mp_typeof(*tuple_a_raw); enum mp_type b_type = mp_typeof(*tuple_b_raw); if (a_type == MP_NIL) return b_type == MP_NIL ? 0 : -1; else if (b_type == MP_NIL) return 1; return tuple_compare_field_with_hint(tuple_a_raw, a_type, tuple_b_raw, b_type, part->type, part->coll); } bool was_null_met = false; const struct tuple_format *format_a = tuple_format(tuple_a); const struct tuple_format *format_b = tuple_format(tuple_b); const uint32_t *field_map_a = tuple_field_map(tuple_a); const uint32_t *field_map_b = tuple_field_map(tuple_b); const struct key_part *end; const char *field_a, *field_b; enum mp_type a_type, b_type; int rc; if (is_nullable) end = part + key_def->unique_part_count; else end = part + key_def->part_count; for (; part < end; part++) { field_a = tuple_field_raw(format_a, tuple_a_raw, field_map_a, part->fieldno); field_b = tuple_field_raw(format_b, tuple_b_raw, field_map_b, part->fieldno); assert(has_optional_parts || (field_a != NULL && field_b != NULL)); if (! is_nullable) { rc = tuple_compare_field(field_a, field_b, part->type, part->coll); if (rc != 0) return rc; else continue; } if (has_optional_parts) { a_type = field_a != NULL ? mp_typeof(*field_a) : MP_NIL; b_type = field_b != NULL ? mp_typeof(*field_b) : MP_NIL; } else { a_type = mp_typeof(*field_a); b_type = mp_typeof(*field_b); } if (a_type == MP_NIL) { if (b_type != MP_NIL) return -1; was_null_met = true; } else if (b_type == MP_NIL) { return 1; } else { rc = tuple_compare_field_with_hint(field_a, a_type, field_b, b_type, part->type, part->coll); if (rc != 0) return rc; } } /* * Do not use full parts set when no NULLs. It allows to * simulate a NULL != NULL logic in secondary keys, * because in them full parts set contains unique primary * key. */ if (!is_nullable || !was_null_met) return 0; /* * Inxex parts are equal and contain NULLs. So use * extended parts only. */ end = key_def->parts + key_def->part_count; for (; part < end; ++part) { field_a = tuple_field_raw(format_a, tuple_a_raw, field_map_a, part->fieldno); field_b = tuple_field_raw(format_b, tuple_b_raw, field_map_b, part->fieldno); /* * Extended parts are primary, and they can not * be absent or be NULLs. */ assert(field_a != NULL && field_b != NULL); rc = tuple_compare_field(field_a, field_b, part->type, part->coll); if (rc != 0) return rc; } return 0; } template static inline int tuple_compare_with_key_slowpath(const struct tuple *tuple, const char *key, uint32_t part_count, const struct key_def *key_def) { assert(!has_optional_parts || is_nullable); assert(is_nullable == key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); assert(key != NULL || part_count == 0); assert(part_count <= key_def->part_count); const struct key_part *part = key_def->parts; const struct tuple_format *format = tuple_format(tuple); const char *tuple_raw = tuple_data(tuple); const uint32_t *field_map = tuple_field_map(tuple); enum mp_type a_type, b_type; if (likely(part_count == 1)) { const char *field; field = tuple_field_raw(format, tuple_raw, field_map, part->fieldno); if (! is_nullable) { return tuple_compare_field(field, key, part->type, part->coll); } if (has_optional_parts) a_type = field != NULL ? mp_typeof(*field) : MP_NIL; else a_type = mp_typeof(*field); b_type = mp_typeof(*key); if (a_type == MP_NIL) { return b_type == MP_NIL ? 0 : -1; } else if (b_type == MP_NIL) { return 1; } else { return tuple_compare_field_with_hint(field, a_type, key, b_type, part->type, part->coll); } } const struct key_part *end = part + part_count; int rc; for (; part < end; ++part, mp_next(&key)) { const char *field; field = tuple_field_raw(format, tuple_raw, field_map, part->fieldno); if (! is_nullable) { rc = tuple_compare_field(field, key, part->type, part->coll); if (rc != 0) return rc; else continue; } if (has_optional_parts) a_type = field != NULL ? mp_typeof(*field) : MP_NIL; else a_type = mp_typeof(*field); b_type = mp_typeof(*key); if (a_type == MP_NIL) { if (b_type != MP_NIL) return -1; } else if (b_type == MP_NIL) { return 1; } else { rc = tuple_compare_field_with_hint(field, a_type, key, b_type, part->type, part->coll); if (rc != 0) return rc; } } return 0; } template static inline int key_compare_parts(const char *key_a, const char *key_b, uint32_t part_count, const struct key_def *key_def) { assert(is_nullable == key_def->is_nullable); assert((key_a != NULL && key_b != NULL) || part_count == 0); const struct key_part *part = key_def->parts; if (likely(part_count == 1)) { if (! is_nullable) { return tuple_compare_field(key_a, key_b, part->type, part->coll); } enum mp_type a_type = mp_typeof(*key_a); enum mp_type b_type = mp_typeof(*key_b); if (a_type == MP_NIL) { return b_type == MP_NIL ? 0 : -1; } else if (b_type == MP_NIL) { return 1; } else { return tuple_compare_field_with_hint(key_a, a_type, key_b, b_type, part->type, part->coll); } } const struct key_part *end = part + part_count; int rc; for (; part < end; ++part, mp_next(&key_a), mp_next(&key_b)) { if (! is_nullable) { rc = tuple_compare_field(key_a, key_b, part->type, part->coll); if (rc != 0) return rc; else continue; } enum mp_type a_type = mp_typeof(*key_a); enum mp_type b_type = mp_typeof(*key_b); if (a_type == MP_NIL) { if (b_type != MP_NIL) return -1; } else if (b_type == MP_NIL) { return 1; } else { rc = tuple_compare_field_with_hint(key_a, a_type, key_b, b_type, part->type, part->coll); if (rc != 0) return rc; } } return 0; } template static inline int tuple_compare_with_key_sequential(const struct tuple *tuple, const char *key, uint32_t part_count, const struct key_def *key_def) { assert(!has_optional_parts || is_nullable); assert(key_def_is_sequential(key_def)); assert(is_nullable == key_def->is_nullable); assert(has_optional_parts == key_def->has_optional_parts); const char *tuple_key = tuple_data(tuple); uint32_t field_count = mp_decode_array(&tuple_key); uint32_t cmp_part_count; if (has_optional_parts && field_count < part_count) { cmp_part_count = field_count; } else { assert(field_count >= part_count); cmp_part_count = part_count; } int rc = key_compare_parts(tuple_key, key, cmp_part_count, key_def); if (!has_optional_parts || rc != 0) return rc; /* * If some tuple indexed fields are absent, then check * corresponding key fields to be equal to NULL. */ if (field_count < part_count) { /* * Key's and tuple's first field_count fields are * equal, and their bsize too. */ key += tuple->bsize - mp_sizeof_array(field_count); for (uint32_t i = field_count; i < part_count; ++i, mp_next(&key)) { if (mp_typeof(*key) != MP_NIL) return -1; } } return 0; } int key_compare(const char *key_a, const char *key_b, const struct key_def *key_def) { uint32_t part_count_a = mp_decode_array(&key_a); uint32_t part_count_b = mp_decode_array(&key_b); assert(part_count_a <= key_def->part_count); assert(part_count_b <= key_def->part_count); uint32_t part_count = MIN(part_count_a, part_count_b); assert(part_count <= key_def->part_count); if (! key_def->is_nullable) { return key_compare_parts(key_a, key_b, part_count, key_def); } else { return key_compare_parts(key_a, key_b, part_count, key_def); } } template static int tuple_compare_sequential(const struct tuple *tuple_a, const struct tuple *tuple_b, const struct key_def *key_def) { assert(!has_optional_parts || is_nullable); assert(has_optional_parts == key_def->has_optional_parts); assert(key_def_is_sequential(key_def)); assert(is_nullable == key_def->is_nullable); const char *key_a = tuple_data(tuple_a); uint32_t fc_a = mp_decode_array(&key_a); const char *key_b = tuple_data(tuple_b); uint32_t fc_b = mp_decode_array(&key_b); if (!has_optional_parts && !is_nullable) { assert(fc_a >= key_def->part_count); assert(fc_b >= key_def->part_count); return key_compare_parts(key_a, key_b, key_def->part_count, key_def); } bool was_null_met = false; const struct key_part *part = key_def->parts; const struct key_part *end = part + key_def->unique_part_count; int rc; uint32_t i = 0; for (; part < end; ++part, ++i) { enum mp_type a_type, b_type; if (has_optional_parts) { a_type = i >= fc_a ? MP_NIL : mp_typeof(*key_a); b_type = i >= fc_b ? MP_NIL : mp_typeof(*key_b); } else { a_type = mp_typeof(*key_a); b_type = mp_typeof(*key_b); } if (a_type == MP_NIL) { if (b_type != MP_NIL) return -1; was_null_met = true; } else if (b_type == MP_NIL) { return 1; } else { rc = tuple_compare_field_with_hint(key_a, a_type, key_b, b_type, part->type, part->coll); if (rc != 0) return rc; } if (!has_optional_parts || i < fc_a) mp_next(&key_a); if (!has_optional_parts || i < fc_b) mp_next(&key_b); } if (! was_null_met) return 0; end = key_def->parts + key_def->part_count; for (; part < end; ++part, ++i, mp_next(&key_a), mp_next(&key_b)) { /* * If tuples are equal by unique_part_count, then * the rest of parts are a primary key, which can * not be absent or be null. */ assert(i < fc_a && i < fc_b); rc = tuple_compare_field(key_a, key_b, part->type, part->coll); if (rc != 0) return rc; } return 0; } template static inline int field_compare(const char **field_a, const char **field_b); template <> inline int field_compare(const char **field_a, const char **field_b) { return mp_compare_uint(*field_a, *field_b); } template <> inline int field_compare(const char **field_a, const char **field_b) { uint32_t size_a, size_b; size_a = mp_decode_strl(field_a); size_b = mp_decode_strl(field_b); int r = memcmp(*field_a, *field_b, MIN(size_a, size_b)); if (r == 0) r = size_a < size_b ? -1 : size_a > size_b; return r; } template static inline int field_compare_and_next(const char **field_a, const char **field_b); template <> inline int field_compare_and_next(const char **field_a, const char **field_b) { int r = mp_compare_uint(*field_a, *field_b); mp_next(field_a); mp_next(field_b); return r; } template <> inline int field_compare_and_next(const char **field_a, const char **field_b) { uint32_t size_a, size_b; size_a = mp_decode_strl(field_a); size_b = mp_decode_strl(field_b); int r = memcmp(*field_a, *field_b, MIN(size_a, size_b)); if (r == 0) r = size_a < size_b ? -1 : size_a > size_b; *field_a += size_a; *field_b += size_b; return r; } /* Tuple comparator */ namespace /* local symbols */ { template struct FieldCompare { }; /** * Common case. */ template struct FieldCompare { inline static int compare(const struct tuple *tuple_a, const struct tuple *tuple_b, const struct tuple_format *format_a, const struct tuple_format *format_b, const char *field_a, const char *field_b) { int r; /* static if */ if (IDX + 1 == IDX2) { if ((r = field_compare_and_next(&field_a, &field_b)) != 0) return r; } else { if ((r = field_compare(&field_a, &field_b)) != 0) return r; field_a = tuple_field_raw(format_a, tuple_data(tuple_a), tuple_field_map(tuple_a), IDX2); field_b = tuple_field_raw(format_b, tuple_data(tuple_b), tuple_field_map(tuple_b), IDX2); } return FieldCompare:: compare(tuple_a, tuple_b, format_a, format_b, field_a, field_b); } }; template struct FieldCompare { inline static int compare(const struct tuple *, const struct tuple *, const struct tuple_format *, const struct tuple_format *, const char *field_a, const char *field_b) { return field_compare(&field_a, &field_b); } }; /** * header */ template struct TupleCompare { static int compare(const struct tuple *tuple_a, const struct tuple *tuple_b, const struct key_def *) { struct tuple_format *format_a = tuple_format(tuple_a); struct tuple_format *format_b = tuple_format(tuple_b); const char *field_a, *field_b; field_a = tuple_field_raw(format_a, tuple_data(tuple_a), tuple_field_map(tuple_a), IDX); field_b = tuple_field_raw(format_b, tuple_data(tuple_b), tuple_field_map(tuple_b), IDX); return FieldCompare:: compare(tuple_a, tuple_b, format_a, format_b, field_a, field_b); } }; template struct TupleCompare<0, TYPE, MORE_TYPES...> { static int compare(const struct tuple *tuple_a, const struct tuple *tuple_b, const struct key_def *) { struct tuple_format *format_a = tuple_format(tuple_a); struct tuple_format *format_b = tuple_format(tuple_b); const char *field_a = tuple_data(tuple_a); const char *field_b = tuple_data(tuple_b); mp_decode_array(&field_a); mp_decode_array(&field_b); return FieldCompare<0, TYPE, MORE_TYPES...>::compare(tuple_a, tuple_b, format_a, format_b, field_a, field_b); } }; } /* end of anonymous namespace */ struct comparator_signature { tuple_compare_t f; uint32_t p[64]; }; #define COMPARATOR(...) \ { TupleCompare<__VA_ARGS__>::compare, { __VA_ARGS__, UINT32_MAX } }, /** * field1 no, field1 type, field2 no, field2 type, ... */ static const comparator_signature cmp_arr[] = { COMPARATOR(0, FIELD_TYPE_UNSIGNED) COMPARATOR(0, FIELD_TYPE_STRING) COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_UNSIGNED) COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_UNSIGNED) COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_STRING) COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_STRING) COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_UNSIGNED) COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_UNSIGNED) COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_STRING , 2, FIELD_TYPE_UNSIGNED) COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_STRING , 2, FIELD_TYPE_UNSIGNED) COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_STRING) COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_STRING) COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_STRING , 2, FIELD_TYPE_STRING) COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_STRING , 2, FIELD_TYPE_STRING) }; #undef COMPARATOR tuple_compare_t tuple_compare_create(const struct key_def *def) { if (def->is_nullable) { if (key_def_is_sequential(def)) { if (def->has_optional_parts) return tuple_compare_sequential; else return tuple_compare_sequential; } else if (def->has_optional_parts) { return tuple_compare_slowpath; } else { return tuple_compare_slowpath; } } assert(! def->has_optional_parts); if (!key_def_has_collation(def)) { /* Precalculated comparators don't use collation */ for (uint32_t k = 0; k < sizeof(cmp_arr) / sizeof(cmp_arr[0]); k++) { uint32_t i = 0; for (; i < def->part_count; i++) if (def->parts[i].fieldno != cmp_arr[k].p[i * 2] || def->parts[i].type != cmp_arr[k].p[i * 2 + 1]) break; if (i == def->part_count && cmp_arr[k].p[i * 2] == UINT32_MAX) return cmp_arr[k].f; } } if (key_def_is_sequential(def)) return tuple_compare_sequential; else return tuple_compare_slowpath; } /* }}} tuple_compare */ /* {{{ tuple_compare_with_key */ template static inline int field_compare_with_key(const char **field, const char **key); template <> inline int field_compare_with_key(const char **field, const char **key) { return mp_compare_uint(*field, *key); } template <> inline int field_compare_with_key(const char **field, const char **key) { uint32_t size_a, size_b; size_a = mp_decode_strl(field); size_b = mp_decode_strl(key); int r = memcmp(*field, *key, MIN(size_a, size_b)); if (r == 0) r = size_a < size_b ? -1 : size_a > size_b; return r; } template static inline int field_compare_with_key_and_next(const char **field_a, const char **field_b); template <> inline int field_compare_with_key_and_next(const char **field_a, const char **field_b) { int r = mp_compare_uint(*field_a, *field_b); mp_next(field_a); mp_next(field_b); return r; } template <> inline int field_compare_with_key_and_next(const char **field_a, const char **field_b) { uint32_t size_a, size_b; size_a = mp_decode_strl(field_a); size_b = mp_decode_strl(field_b); int r = memcmp(*field_a, *field_b, MIN(size_a, size_b)); if (r == 0) r = size_a < size_b ? -1 : size_a > size_b; *field_a += size_a; *field_b += size_b; return r; } /* Tuple with key comparator */ namespace /* local symbols */ { template struct FieldCompareWithKey {}; /** * common */ template struct FieldCompareWithKey { inline static int compare(const struct tuple *tuple, const char *key, uint32_t part_count, const struct key_def *key_def, const struct tuple_format *format, const char *field) { int r; /* static if */ if (IDX + 1 == IDX2) { r = field_compare_with_key_and_next(&field, &key); if (r || part_count == FLD_ID + 1) return r; } else { r = field_compare_with_key(&field, &key); if (r || part_count == FLD_ID + 1) return r; field = tuple_field_raw(format, tuple_data(tuple), tuple_field_map(tuple), IDX2); mp_next(&key); } return FieldCompareWithKey:: compare(tuple, key, part_count, key_def, format, field); } }; template struct FieldCompareWithKey { inline static int compare(const struct tuple *, const char *key, uint32_t, const struct key_def *, const struct tuple_format *, const char *field) { return field_compare_with_key(&field, &key); } }; /** * header */ template struct TupleCompareWithKey { static int compare(const struct tuple *tuple, const char *key, uint32_t part_count, const struct key_def *key_def) { /* Part count can be 0 in wildcard searches. */ if (part_count == 0) return 0; struct tuple_format *format = tuple_format(tuple); const char *field = tuple_field_raw(format, tuple_data(tuple), tuple_field_map(tuple), IDX); return FieldCompareWithKey:: compare(tuple, key, part_count, key_def, format, field); } }; template struct TupleCompareWithKey<0, 0, TYPE, MORE_TYPES...> { static int compare(const struct tuple *tuple, const char *key, uint32_t part_count, const struct key_def *key_def) { /* Part count can be 0 in wildcard searches. */ if (part_count == 0) return 0; struct tuple_format *format = tuple_format(tuple); const char *field = tuple_data(tuple); mp_decode_array(&field); return FieldCompareWithKey<0, 0, TYPE, MORE_TYPES...>:: compare(tuple, key, part_count, key_def, format, field); } }; } /* end of anonymous namespace */ struct comparator_with_key_signature { tuple_compare_with_key_t f; uint32_t p[64]; }; #define KEY_COMPARATOR(...) \ { TupleCompareWithKey<0, __VA_ARGS__>::compare, { __VA_ARGS__ } }, static const comparator_with_key_signature cmp_wk_arr[] = { KEY_COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_UNSIGNED) KEY_COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_UNSIGNED) KEY_COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_STRING , 2, FIELD_TYPE_UNSIGNED) KEY_COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_STRING , 2, FIELD_TYPE_UNSIGNED) KEY_COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_STRING) KEY_COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_STRING) KEY_COMPARATOR(0, FIELD_TYPE_UNSIGNED, 1, FIELD_TYPE_STRING , 2, FIELD_TYPE_STRING) KEY_COMPARATOR(0, FIELD_TYPE_STRING , 1, FIELD_TYPE_STRING , 2, FIELD_TYPE_STRING) KEY_COMPARATOR(1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_UNSIGNED) KEY_COMPARATOR(1, FIELD_TYPE_STRING , 2, FIELD_TYPE_UNSIGNED) KEY_COMPARATOR(1, FIELD_TYPE_UNSIGNED, 2, FIELD_TYPE_STRING) KEY_COMPARATOR(1, FIELD_TYPE_STRING , 2, FIELD_TYPE_STRING) }; #undef KEY_COMPARATOR tuple_compare_with_key_t tuple_compare_with_key_create(const struct key_def *def) { if (def->is_nullable) { if (key_def_is_sequential(def)) { if (def->has_optional_parts) { return tuple_compare_with_key_sequential; } else { return tuple_compare_with_key_sequential; } } else if (def->has_optional_parts) { return tuple_compare_with_key_slowpath; } else { return tuple_compare_with_key_slowpath; } } assert(! def->has_optional_parts); if (!key_def_has_collation(def)) { /* Precalculated comparators don't use collation */ for (uint32_t k = 0; k < sizeof(cmp_wk_arr) / sizeof(cmp_wk_arr[0]); k++) { uint32_t i = 0; for (; i < def->part_count; i++) { if (def->parts[i].fieldno != cmp_wk_arr[k].p[i * 2] || def->parts[i].type != cmp_wk_arr[k].p[i * 2 + 1]) { break; } } if (i == def->part_count) return cmp_wk_arr[k].f; } } if (key_def_is_sequential(def)) return tuple_compare_with_key_sequential; else return tuple_compare_with_key_slowpath; } /* }}} tuple_compare_with_key */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/0000775000000000000000000000000013306565107016354 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/box/lua/net_box.lua0000664000000000000000000012341413306565107020522 0ustar rootroot-- net_box.lua (internal file) local log = require('log') local ffi = require('ffi') local buffer = require('buffer') local socket = require('socket') local fiber = require('fiber') local msgpack = require('msgpack') local errno = require('errno') local urilib = require('uri') local internal = require('net.box.lib') local trigger = require('internal.trigger') local band = bit.band local max = math.max local fiber_clock = fiber.clock local fiber_self = fiber.self local decode = msgpack.decode_unchecked local table_new = require('table.new') local check_iterator_type = box.internal.check_iterator_type local check_index_arg = box.internal.check_index_arg local check_space_arg = box.internal.check_space_arg local check_primary_index = box.internal.check_primary_index local communicate = internal.communicate local encode_auth = internal.encode_auth local encode_select = internal.encode_select local decode_greeting = internal.decode_greeting local sequence_mt = { __serialize = 'sequence' } local TIMEOUT_INFINITY = 500 * 365 * 86400 local VSPACE_ID = 281 local VINDEX_ID = 289 local IPROTO_STATUS_KEY = 0x00 local IPROTO_ERRNO_MASK = 0x7FFF local IPROTO_SYNC_KEY = 0x01 local IPROTO_SCHEMA_VERSION_KEY = 0x05 local IPROTO_DATA_KEY = 0x30 local IPROTO_ERROR_KEY = 0x31 local IPROTO_GREETING_SIZE = 128 -- select errors from box.error local E_UNKNOWN = box.error.UNKNOWN local E_NO_CONNECTION = box.error.NO_CONNECTION local E_TIMEOUT = box.error.TIMEOUT local E_WRONG_SCHEMA_VERSION = box.error.WRONG_SCHEMA_VERSION local E_PROC_LUA = box.error.PROC_LUA -- utility tables local is_final_state = {closed = 1, error = 1} local method_codec = { ping = internal.encode_ping, call_16 = internal.encode_call_16, call_17 = internal.encode_call, eval = internal.encode_eval, insert = internal.encode_insert, replace = internal.encode_replace, delete = internal.encode_delete, update = internal.encode_update, upsert = internal.encode_upsert, select = internal.encode_select, -- inject raw data into connection, used by console and tests inject = function(buf, id, schema_version, bytes) local ptr = buf:reserve(#bytes) ffi.copy(ptr, bytes, #bytes) buf.wpos = ptr + #bytes end } local function next_id(id) return band(id + 1, 0x7FFFFFFF) end -- function create_transport(host, port, user, password, callback) -- -- Transport methods: connect(), close(), perfrom_request(), wait_state() -- -- Basically, *transport* is a TCP connection speaking one of -- Tarantool network protocols. This is a low-level interface. -- Primary features: -- * implements protocols; concurrent perform_request()-s benefit from -- multiplexing support in the protocol; -- * schema-aware (optional) - snoops responses and initiates -- schema reload when a request fails due to schema version mismatch; -- * delivers transport events via the callback. -- -- Transport state machine: -- -- State machine starts in 'initial' state. Connect method changes -- the state to 'connecting' and spawns a worker fiber. Close -- method sets the state to 'closed' and kills the worker. -- If the transport is already in 'error' state close() does nothing. -- -- State chart: -- -- initial -> connecting -> active -- \ -- -> auth -> fetch_schema <-> active -- -- (any state, on error) -> error_reconnect -> connecting -> ... -- \ -- -> [error] -- (any_state, but [error]) -> [closed] -- -- -- State change events can be delivered to the transport user via -- the optional 'callback' argument: -- -- The callback functions needs to have the following signature: -- -- callback(event_name, ...) -- -- The following events are delivered, with arguments: -- -- 'state_changed', state, errno, error -- 'handshake', greeting -> nil (accept) / errno, error (reject) -- 'will_fetch_schema' -> true (approve) / false (skip fetch) -- 'did_fetch_schema', schema_version, spaces, indices -- 'reconnect_timeout' -> get reconnect timeout if set and > 0, -- else nil is returned. -- -- Suggestion for callback writers: sleep a few secs before approving -- reconnect. -- local function create_transport(host, port, user, password, callback) -- check / normalize credentials if user == nil and password ~= nil then box.error(E_PROC_LUA, 'net.box: user is not defined') end if user ~= nil and password == nil then password = '' end -- state: current state, only the worker fiber and connect method -- change state local state = 'initial' local last_errno local last_error local state_cond = fiber.cond() -- signaled when the state changes -- requests: requests currently 'in flight', keyed by a request id; -- value refs are weak hence if a client dies unexpectedly, -- GC cleans the mess. Client submits a request and waits on state_cond. -- If the reponse arrives within the timeout, the worker wakes -- client fiber explicitly. Otherwize, wait on state_cond completes and -- the client reports E_TIMEOUT. local requests = setmetatable({}, { __mode = 'v' }) local next_request_id = 1 local worker_fiber local connection local send_buf = buffer.ibuf(buffer.READAHEAD) local recv_buf = buffer.ibuf(buffer.READAHEAD) -- STATE SWITCHING -- local function set_state(new_state, new_errno, new_error, schema_version) state = new_state last_errno = new_errno last_error = new_error callback('state_changed', new_state, new_errno, new_error) state_cond:broadcast() if state ~= 'active' then -- cancel all requests but the ones bearing the particular -- schema id; if schema id was omitted or we aren't fetching -- schema, cancel everything if not schema_version or state ~= 'fetch_schema' then schema_version = -1 end local next_id, next_request = next(requests) while next_id do local id, request = next_id, next_request next_id, next_request = next(requests, id) if request.schema_version ~= schema_version then requests[id] = nil -- this marks the request as completed request.errno = new_errno request.response = new_error end end end end -- FYI: [] on a string is valid local function wait_state(target_state, timeout) local deadline = fiber_clock() + (timeout or TIMEOUT_INFINITY) repeat until state == target_state or target_state[state] or is_final_state[state] or not state_cond:wait(max(0, deadline - fiber_clock())) return state == target_state or target_state[state] or false end -- CONNECT/CLOSE -- local protocol_sm local function connect() if state ~= 'initial' then return not is_final_state[state] end set_state('connecting') fiber.create(function() worker_fiber = fiber_self() fiber.name(string.format('%s:%s (net.box)', host, port), {truncate=true}) ::reconnect:: local ok, err = pcall(protocol_sm) if not (ok or is_final_state[state]) then set_state('error', E_UNKNOWN, err) end if connection then connection:close() connection = nil end local timeout = callback('reconnect_timeout') if timeout and state == 'error_reconnect' then fiber.sleep(timeout) timeout = callback('reconnect_timeout') if timeout and state == 'error_reconnect' then goto reconnect end end send_buf:recycle() recv_buf:recycle() worker_fiber = nil end) return true end local function close() if not is_final_state[state] then set_state('closed', E_NO_CONNECTION, 'Connection closed') end if worker_fiber then worker_fiber:cancel() worker_fiber = nil end end -- REQUEST/RESPONSE -- local function perform_request(timeout, buffer, method, schema_version, ...) if state ~= 'active' then return last_errno or E_NO_CONNECTION, last_error end local deadline = fiber_clock() + (timeout or TIMEOUT_INFINITY) -- alert worker to notify it of the queued outgoing data; -- if the buffer wasn't empty, assume the worker was already alerted if send_buf:size() == 0 then worker_fiber:wakeup() end local id = next_request_id method_codec[method](send_buf, id, schema_version, ...) next_request_id = next_id(id) local request = table_new(0, 6) -- reserve space for 6 keys request.client = fiber_self() request.method = method request.schema_version = schema_version request.buffer = buffer requests[id] = request repeat local timeout = max(0, deadline - fiber_clock()) if not state_cond:wait(timeout) then requests[id] = nil return E_TIMEOUT, 'Timeout exceeded' end until requests[id] == nil -- i.e. completed (beware spurious wakeups) return request.errno, request.response end local function wakeup_client(client) if client:status() ~= 'dead' then client:wakeup() end end local function dispatch_response_iproto(hdr, body_rpos, body_end) local id = hdr[IPROTO_SYNC_KEY] local request = requests[id] if request == nil then -- nobody is waiting for the response return end requests[id] = nil local status = hdr[IPROTO_STATUS_KEY] local body, body_end_check if status ~= 0 then -- Handle errors body, body_end_check = decode(body_rpos) assert(body_end == body_end_check, "invalid xrow length") request.errno = band(status, IPROTO_ERRNO_MASK) request.response = body[IPROTO_ERROR_KEY] wakeup_client(request.client) return end local buffer = request.buffer if buffer ~= nil then -- Copy xrow.body to user-provided buffer local body_len = body_end - body_rpos local wpos = buffer:alloc(body_len) ffi.copy(wpos, body_rpos, body_len) request.response = tonumber(body_len) wakeup_client(request.client) return end -- Decode xrow.body[DATA] to Lua objects body, body_end_check = decode(body_rpos) assert(body_end == body_end_check, "invalid xrow length") request.response = body[IPROTO_DATA_KEY] wakeup_client(request.client) end local function new_request_id() local id = next_request_id; next_request_id = next_id(id) return id end -- IO (WORKER FIBER) -- local function send_and_recv(limit_or_boundary, timeout) return communicate(connection:fd(), send_buf, recv_buf, limit_or_boundary, timeout) end local function send_and_recv_iproto(timeout) local data_len = recv_buf.wpos - recv_buf.rpos local required = 0 if data_len < 5 then required = 5 else -- PWN! insufficient input validation local bufpos = recv_buf.rpos local len, rpos = decode(bufpos) required = (rpos - bufpos) + len if data_len >= required then local body_end = rpos + len local hdr, body_rpos = decode(rpos) recv_buf.rpos = body_end return nil, hdr, body_rpos, body_end end end local deadline = fiber_clock() + (timeout or TIMEOUT_INFINITY) local err, extra = send_and_recv(required, timeout) if err then return err, extra end return send_and_recv_iproto(max(0, deadline - fiber_clock())) end local function send_and_recv_console(timeout) local delim = '\n...\n' local err, delim_pos = send_and_recv(delim, timeout) if err then return err, delim_pos else local response = ffi.string(recv_buf.rpos, delim_pos + #delim) recv_buf.rpos = recv_buf.rpos + delim_pos + #delim return nil, response end end -- PROTOCOL STATE MACHINE (WORKER FIBER) -- -- -- The sm is implemented as a collection of functions performing -- tail-recursive calls to each other. Yep, Lua optimizes -- such calls, and yep, this is the canonical way to implement -- a state machine in Lua. local console_sm, iproto_auth_sm, iproto_schema_sm, iproto_sm, error_sm -- -- Protocol_sm is a core function of netbox. It calls all -- other ..._sm() functions, and explicitly or implicitly -- holds Lua referece on a connection object. It means, that -- until it works, the connection can not be garbage -- collected. See gh-3164, where because of reconnect sleeps -- in this function, a connection could not be deleted. -- protocol_sm = function () local tm_begin, tm = fiber.clock(), callback('fetch_connect_timeout') connection = socket.tcp_connect(host, port, tm) if connection == nil then return error_sm(E_NO_CONNECTION, errno.strerror(errno())) end local size = IPROTO_GREETING_SIZE local err, msg = send_and_recv(size, tm - (fiber.clock() - tm_begin)) if err then return error_sm(err, msg) end local g = decode_greeting(ffi.string(recv_buf.rpos, size)) recv_buf.rpos = recv_buf.rpos + size if not g then return error_sm(E_NO_CONNECTION, 'Can\'t decode handshake') end err, msg = callback('handshake', g) if err then return error_sm(err, msg) end if g.protocol == 'Lua console' then local setup_delimiter = 'require("console").delimiter("$EOF$")\n' method_codec.inject(send_buf, nil, nil, setup_delimiter) local err, response = send_and_recv_console() if err then return error_sm(err, response) elseif response ~= '---\n...\n' then return error_sm(E_NO_CONNECTION, 'Unexpected response') end local rid = next_request_id set_state('active') return console_sm(rid) elseif g.protocol == 'Binary' then return iproto_auth_sm(g.salt) else return error_sm(E_NO_CONNECTION, 'Unknown protocol: ' .. g.protocol) end end console_sm = function(rid) local delim = '\n...\n' local err, response = send_and_recv_console() if err then return error_sm(err, response) else local request = requests[rid] if request == nil then -- nobody is waiting for the response return end requests[rid] = nil request.response = response wakeup_client(request.client) return console_sm(next_id(rid)) end end iproto_auth_sm = function(salt) set_state('auth') if not user or not password then set_state('fetch_schema') return iproto_schema_sm() end encode_auth(send_buf, new_request_id(), nil, user, password, salt) local err, hdr, body_rpos, body_end = send_and_recv_iproto() if err then return error_sm(err, hdr) end if hdr[IPROTO_STATUS_KEY] ~= 0 then local body body, body_end = decode(body_rpos) return error_sm(E_NO_CONNECTION, body[IPROTO_ERROR_KEY]) end set_state('fetch_schema') return iproto_schema_sm(hdr[IPROTO_SCHEMA_VERSION_KEY]) end iproto_schema_sm = function(schema_version) if not callback('will_fetch_schema') then set_state('active') return iproto_sm(schema_version) end local select1_id = new_request_id() local select2_id = new_request_id() local response = {} -- fetch everything from space _vspace, 2 = ITER_ALL encode_select(send_buf, select1_id, nil, VSPACE_ID, 0, 2, 0, 0xFFFFFFFF, nil) -- fetch everything from space _vindex, 2 = ITER_ALL encode_select(send_buf, select2_id, nil, VINDEX_ID, 0, 2, 0, 0xFFFFFFFF, nil) schema_version = nil -- any schema_version will do provided that -- it is consistent across responses repeat local err, hdr, body_rpos, body_end = send_and_recv_iproto() if err then return error_sm(err, hdr) end dispatch_response_iproto(hdr, body_rpos, body_end) local id = hdr[IPROTO_SYNC_KEY] if id == select1_id or id == select2_id then -- response to a schema query we've submitted local status = hdr[IPROTO_STATUS_KEY] local response_schema_version = hdr[IPROTO_SCHEMA_VERSION_KEY] if status ~= 0 then local body body, body_end = decode(body_rpos) return error_sm(E_NO_CONNECTION, body[IPROTO_ERROR_KEY]) end if schema_version == nil then schema_version = response_schema_version elseif schema_version ~= response_schema_version then -- schema changed while fetching schema; restart loader return iproto_schema_sm() end local body body, body_end = decode(body_rpos) response[id] = body[IPROTO_DATA_KEY] end until response[select1_id] and response[select2_id] callback('did_fetch_schema', schema_version, response[select1_id], response[select2_id]) set_state('active') return iproto_sm(schema_version) end iproto_sm = function(schema_version) local err, hdr, body_rpos, body_end = send_and_recv_iproto() if err then return error_sm(err, hdr) end dispatch_response_iproto(hdr, body_rpos, body_end) local status = hdr[IPROTO_STATUS_KEY] local response_schema_version = hdr[IPROTO_SCHEMA_VERSION_KEY] if response_schema_version > 0 and response_schema_version ~= schema_version then -- schema_version has been changed - start to load a new version. -- Sic: self.schema_version will be updated only after reload. local body body, body_end = decode(body_rpos) set_state('fetch_schema', E_WRONG_SCHEMA_VERSION, body[IPROTO_ERROR_KEY], response_schema_version) return iproto_schema_sm(schema_version) end return iproto_sm(schema_version) end error_sm = function(err, msg) if connection then connection:close(); connection = nil end send_buf:recycle() recv_buf:recycle() if state ~= 'closed' then if callback('reconnect_timeout') then set_state('error_reconnect', err, msg) else set_state('error', err, msg) end end end return { close = close, connect = connect, wait_state = wait_state, perform_request = perform_request } end -- Wrap create_transport, adding auto-close-on-GC feature. -- All the GC magic is neatly encapsulated! -- The tricky part is the callback: -- * callback (typically) references the transport (indirectly); -- * worker fiber references the callback; -- * fibers are GC roots - i.e. transport is never GC-ed! -- We solve the issue by making the worker->callback ref weak. -- Now it is necessary to have a strong ref to callback somewhere or -- it is GC-ed prematurely. We wrap close() method, stashing the -- ref in an upvalue (close() performance doesn't matter much.) local create_transport = function(host, port, user, password, callback) local weak_refs = setmetatable({callback = callback}, {__mode = 'v'}) local function weak_callback(...) local callback = weak_refs.callback if callback then return callback(...) end end local transport = create_transport(host, port, user, password, weak_callback) local transport_close = transport.close local gc_hook = ffi.gc(ffi.new('char[1]'), function() pcall(transport_close) end) transport.close = function() -- dummy gc_hook, callback refs prevent premature GC return transport_close(gc_hook, callback) end return transport end local function parse_connect_params(host_or_uri, ...) -- self? host_or_uri port? opts? local port, opts = ... if type(host_or_uri) == 'table' then host_or_uri, port, opts = ... end if type(port) == 'table' then opts = port; port = nil end if opts == nil then opts = {} else local copy = {} for k, v in pairs(opts) do copy[k] = v end opts = copy end local host = host_or_uri if port == nil then local url = urilib.parse(tostring(host)) if url == nil or url.service == nil then box.error(E_PROC_LUA, "usage: connect(uri[, opts] | host, port[, opts])") end host, port = url.host, url.service if opts.user == nil and opts.password == nil then opts.user, opts.password = url.login, url.password end end return host, port, opts end local function remote_serialize(self) return { host = self.host, port = self.port, opts = next(self.opts) and self.opts, state = self.state, error = self.error, protocol = self.protocol, schema_version = self.schema_version, peer_uuid = self.peer_uuid, peer_version_id = self.peer_version_id } end local remote_methods = {} local remote_mt = { __index = remote_methods, __serialize = remote_serialize, __metatable = false } local console_methods = {} local console_mt = { __index = console_methods, __serialize = remote_serialize, __metatable = false } local space_metatable, index_metatable local function connect(...) local host, port, opts = parse_connect_params(...) local user, password = opts.user, opts.password; opts.password = nil local last_reconnect_error local remote = {host = host, port = port, opts = opts, state = 'initial'} local function callback(what, ...) if what == 'state_changed' then local state, errno, err = ... if (remote.state == 'active' or remote.state == 'fetch_schema') and (state == 'error' or state == 'closed' or state == 'error_reconnect') then remote._on_disconnect:run(remote) end if remote.state ~= 'error' and remote.state ~= 'error_reconnect' and state == 'active' then remote._on_connect:run(remote) end remote.state, remote.error = state, err if state == 'error_reconnect' then -- Repeat the same error in verbose log only. -- Else the error clogs the log. See gh-3175. if err ~= last_reconnect_error then log.warn('%s:%s: %s', host or "", port or "", err) last_reconnect_error = err else log.verbose('%s:%s: %s', host or "", port or "", err) end end elseif what == 'handshake' then local greeting = ... if not opts.console and greeting.protocol ~= 'Binary' then return E_NO_CONNECTION, 'Unsupported protocol: '..greeting.protocol end remote.protocol = greeting.protocol remote.peer_uuid = greeting.uuid remote.peer_version_id = greeting.version_id elseif what == 'will_fetch_schema' then return not opts.console elseif what == 'fetch_connect_timeout' then return opts.connect_timeout or 10 elseif what == 'did_fetch_schema' then remote:_install_schema(...) elseif what == 'reconnect_timeout' then if type(opts.reconnect_after) == 'number' and opts.reconnect_after > 0 then return opts.reconnect_after end end end if opts.console then setmetatable(remote, console_mt) else setmetatable(remote, remote_mt) -- @deprecated since 1.7.4 remote._deadlines = setmetatable({}, {__mode = 'k'}) remote._space_mt = space_metatable(remote) remote._index_mt = index_metatable(remote) if opts.call_16 then remote.call = remote.call_16 remote.eval = remote.eval_16 end end remote._on_schema_reload = trigger.new("on_schema_reload") remote._on_disconnect = trigger.new("on_disconnect") remote._on_connect = trigger.new("on_connect") remote._transport = create_transport(host, port, user, password, callback) remote._transport.connect() if opts.wait_connected ~= false then remote._transport.wait_state('active', tonumber(opts.wait_connected)) end return remote end local function check_remote_arg(remote, method) if type(remote) ~= 'table' then local fmt = 'Use remote:%s(...) instead of remote.%s(...):' box.error(E_PROC_LUA, string.format(fmt, method, method)) end end local function check_call_args(args) if args ~= nil and type(args) ~= 'table' then error("Use remote:call(func_name, {arg1, arg2, ...}, opts) ".. "instead of remote:call(func_name, arg1, arg2, ...)") end end local function check_eval_args(args) if args ~= nil and type(args) ~= 'table' then error("Use remote:eval(expression, {arg1, arg2, ...}, opts) ".. "instead of remote:eval(expression, arg1, arg2, ...)") end end function remote_methods:close() check_remote_arg(self, 'close') self._transport.close() end function remote_methods:on_schema_reload(...) check_remote_arg(self, 'on_schema_reload') return self._on_schema_reload(...) end function remote_methods:on_disconnect(...) check_remote_arg(self, 'on_disconnect') return self._on_disconnect(...) end function remote_methods:on_connect(...) check_remote_arg(self, 'on_connect') return self._on_connect(...) end function remote_methods:is_connected() check_remote_arg(self, 'is_connected') return self.state == 'active' or self.state == 'fetch_schema' end function remote_methods:wait_connected(timeout) check_remote_arg(self, 'wait_connected') return self._transport.wait_state('active', timeout) end function remote_methods:_request(method, opts, ...) local this_fiber = fiber_self() local transport = self._transport local perform_request = transport.perform_request local wait_state = transport.wait_state local deadline = nil if opts and opts.timeout then -- conn.space:request(, { timeout = timeout }) deadline = fiber_clock() + opts.timeout else -- conn:timeout(timeout).space:request() -- @deprecated since 1.7.4 deadline = self._deadlines[this_fiber] end local buffer = opts and opts.buffer local err, res repeat local timeout = deadline and max(0, deadline - fiber_clock()) if self.state ~= 'active' then wait_state('active', timeout) timeout = deadline and max(0, deadline - fiber_clock()) end err, res = perform_request(timeout, buffer, method, self.schema_version, ...) if not err and buffer ~= nil then return res -- the length of xrow.body elseif not err then setmetatable(res, sequence_mt) local postproc = method ~= 'eval' and method ~= 'call_17' if postproc then local tnew = box.tuple.new for i, v in pairs(res) do res[i] = tnew(v) end end return res -- decoded xrow.body[DATA] elseif err == E_WRONG_SCHEMA_VERSION then err = nil end until err box.error({code = err, reason = res}) end function remote_methods:ping(opts) check_remote_arg(self, 'ping') local timeout = opts and opts.timeout if timeout == nil then -- conn:timeout(timeout):ping() -- @deprecated since 1.7.4 local deadline = self._deadlines[fiber_self()] timeout = deadline and max(0, deadline - fiber_clock()) or (opts and opts.timeout) end local err = self._transport.perform_request(timeout, nil, 'ping', self.schema_version) return not err or err == E_WRONG_SCHEMA_VERSION end function remote_methods:reload_schema() check_remote_arg(self, 'reload_schema') self:_request('select', nil, VSPACE_ID, 0, box.index.GE, 0, 0xFFFFFFFF, nil) end -- @deprecated since 1.7.4 function remote_methods:call_16(func_name, ...) check_remote_arg(self, 'call') return self:_request('call_16', nil, tostring(func_name), {...}) end function remote_methods:call(func_name, args, opts) check_remote_arg(self, 'call') check_call_args(args) args = args or {} local res = self:_request('call_17', opts, tostring(func_name), args) if type(res) ~= 'table' then return res end return unpack(res) end -- @deprecated since 1.7.4 function remote_methods:eval_16(code, ...) check_remote_arg(self, 'eval') return unpack(self:_request('eval', nil, code, {...})) end function remote_methods:eval(code, args, opts) check_remote_arg(self, 'eval') check_eval_args(args) args = args or {} local res = self:_request('eval', opts, code, args) if type(res) ~= 'table' then return res end return unpack(res) end function remote_methods:wait_state(state, timeout) check_remote_arg(self, 'wait_state') if timeout == nil then local deadline = self._deadlines[fiber_self()] timeout = deadline and max(0, deadline - fiber_clock()) end return self._transport.wait_state(state, timeout) end local compat_warning_said = false -- @deprecated since 1.7.4 function remote_methods:timeout(timeout) check_remote_arg(self, 'timeout') if not compat_warning_said then compat_warning_said = true log.warn("netbox:timeout(timeout) is deprecated since 1.7.4, ".. "please use space:(..., {timeout = t}) instead.") end -- Sic: this is broken by design self._deadlines[fiber_self()] = (timeout and fiber_clock() + timeout) return self end function remote_methods:_install_schema(schema_version, spaces, indices) local sl, space_mt, index_mt = {}, self._space_mt, self._index_mt for _, space in pairs(spaces) do local name = space[3] local id = space[1] local engine = space[4] local field_count = space[5] local format = space[7] or {} local s = {} if self.space ~= nil and self.space[id] ~= nil then s = self.space[id] else setmetatable(s, space_mt) end s.id = id s.name = name s.engine = engine s.field_count = field_count s.enabled = true s.index = {} s.temporary = false s._format = format s.connection = self if #space > 5 then local opts = space[6] if type(opts) == 'table' then -- Tarantool >= 1.7.0 s.temporary = not not opts.temporary elseif type(opts) == 'string' then -- Tarantool < 1.7.0 s.temporary = string.match(opts, 'temporary') ~= nil end end sl[id] = s sl[name] = s end for _, index in pairs(indices) do local idx = { space = index[1], id = index[2], name = index[3], type = string.upper(index[4]), parts = {}, } local OPTS = 5 local PARTS = 6 if type(index[OPTS]) == 'number' then idx.unique = index[OPTS] == 1 for k = 0, index[PARTS] - 1 do local pktype = index[7 + k * 2 + 1] local pkfield = index[7 + k * 2] local pk = { type = pktype, fieldno = pkfield + 1 } idx.parts[k + 1] = pk end else for k = 1, #index[PARTS] do local pknullable = index[PARTS][k].is_nullable or false local pkcollationid = index[PARTS][k].collation local pktype = index[PARTS][k][2] or index[PARTS][k].type local pkfield = index[PARTS][k][1] or index[PARTS][k].field local pk = { type = pktype, fieldno = pkfield + 1, collation_id = pkcollationid, is_nullable = pknullable } idx.parts[k] = pk end idx.unique = not not index[OPTS].is_unique end if sl[idx.space] ~= nil then sl[idx.space].index[idx.id] = idx sl[idx.space].index[idx.name] = idx idx.space = sl[idx.space] setmetatable(idx, index_mt) end end self.schema_version = schema_version self.space = sl self._on_schema_reload:run(self) end -- console methods console_methods.close = remote_methods.close console_methods.on_schema_reload = remote_methods.on_schema_reload console_methods.on_disconnect = remote_methods.on_disconnect console_methods.on_connect = remote_methods.on_connect console_methods.is_connected = remote_methods.is_connected console_methods.wait_state = remote_methods.wait_state function console_methods:eval(line, timeout) check_remote_arg(self, 'eval') local err, res local transport = self._transport local pr = transport.perform_request if self.state ~= 'active' then local deadline = fiber_clock() + (timeout or TIMEOUT_INFINITY) transport.wait_state('active', timeout) timeout = max(0, deadline - fiber_clock()) end if self.protocol == 'Binary' then local loader = 'return require("console").eval(...)' err, res = pr(timeout, nil, 'eval', nil, loader, {line}) else assert(self.protocol == 'Lua console') err, res = pr(timeout, nil, 'inject', nil, line..'$EOF$\n') end if err then box.error({code = err, reason = res}) end return res[1] or res end local function one_tuple(tab) if type(tab) ~= 'table' then return tab elseif tab[1] ~= nil then return tab[1] end end space_metatable = function(remote) local methods = {} function methods:insert(tuple, opts) check_space_arg(self, 'insert') return one_tuple(remote:_request('insert', opts, self.id, tuple)) end function methods:replace(tuple, opts) check_space_arg(self, 'replace') return one_tuple(remote:_request('replace', opts, self.id, tuple)) end function methods:select(key, opts) check_space_arg(self, 'select') return check_primary_index(self):select(key, opts) end function methods:delete(key, opts) check_space_arg(self, 'delete') return check_primary_index(self):delete(key, opts) end function methods:update(key, oplist, opts) check_space_arg(self, 'update') return check_primary_index(self):update(key, oplist, opts) end function methods:upsert(key, oplist, opts) check_space_arg(self, 'upsert') remote:_request('upsert', opts, self.id, key, oplist) return end function methods:get(key, opts) check_space_arg(self, 'get') return check_primary_index(self):get(key, opts) end function methods:format(format) if format == nil then return self._format else box.error(box.error.UNSUPPORTED, "net.box", "setting space format") end end return { __index = methods, __metatable = false } end index_metatable = function(remote) local methods = {} function methods:select(key, opts) check_index_arg(self, 'select') local key_is_nil = (key == nil or (type(key) == 'table' and #key == 0)) local iterator = check_iterator_type(opts, key_is_nil) local offset = tonumber(opts and opts.offset) or 0 local limit = tonumber(opts and opts.limit) or 0xFFFFFFFF return remote:_request('select', opts, self.space.id, self.id, iterator, offset, limit, key) end function methods:get(key, opts) check_index_arg(self, 'get') if opts and opts.buffer then error("index:get() doesn't support `buffer` argument") end local res = remote:_request('select', opts, self.space.id, self.id, box.index.EQ, 0, 2, key) if res[2] ~= nil then box.error(box.error.MORE_THAN_ONE_TUPLE) end if res[1] ~= nil then return res[1] end end function methods:min(key, opts) check_index_arg(self, 'min') if opts and opts.buffer then error("index:min() doesn't support `buffer` argument") end local res = remote:_request('select', opts, self.space.id, self.id, box.index.GE, 0, 1, key) return one_tuple(res) end function methods:max(key, opts) check_index_arg(self, 'max') if opts and opts.buffer then error("index:max() doesn't support `buffer` argument") end local res = remote:_request('select', opts, self.space.id, self.id, box.index.LE, 0, 1, key) return one_tuple(res) end function methods:count(key, opts) check_index_arg(self, 'count') if opts and opts.buffer then error("index:count() doesn't support `buffer` argument") end local code = string.format('box.space.%s.index.%s:count', self.space.name, self.name) return remote:_request('call_16', opts, code, { key })[1][1] end function methods:delete(key, opts) check_index_arg(self, 'delete') local res = remote:_request('delete', opts, self.space.id, self.id, key) return one_tuple(res) end function methods:update(key, oplist, opts) check_index_arg(self, 'update') local res = remote:_request('update', opts, self.space.id, self.id, key, oplist) return one_tuple(res) end return { __index = methods, __metatable = false } end local this_module = { create_transport = create_transport, connect = connect, new = connect -- Tarantool < 1.7.1 compatibility } function this_module.timeout(timeout, ...) if type(timeout) == 'table' then timeout = ... end if not timeout then return this_module end local function timed_connect(...) local host, port, opts = parse_connect_params(...) if opts.wait_connected ~= false then opts.wait_connected = timeout end return connect(host, port, opts) end return setmetatable({ connect = timed_connect, new = timed_connect }, {__index = this_module}) end local function rollback() if rawget(box, 'rollback') ~= nil then -- roll back local transaction on error box.rollback() end end local function handle_eval_result(status, ...) if not status then rollback() return box.error(E_PROC_LUA, (...)) end return ... end this_module.self = { ping = function() return true end, reload_schema = function() end, close = function() end, timeout = function(self) return self end, wait_connected = function(self) return true end, is_connected = function(self) return true end, call = function(_box, proc_name, args, opts) check_remote_arg(_box, 'call') check_call_args(args) args = args or {} proc_name = tostring(proc_name) local status, proc, obj = pcall(package.loaded['box.internal']. call_loadproc, proc_name) if not status then rollback() return box.error() -- re-throw end local result if obj ~= nil then return handle_eval_result(pcall(proc, obj, unpack(args))) else return handle_eval_result(pcall(proc, unpack(args))) end end, eval = function(_box, expr, args, opts) check_remote_arg(_box, 'eval') check_eval_args(args) args = args or {} local proc, errmsg = loadstring(expr) if not proc then proc, errmsg = loadstring("return "..expr) end if not proc then rollback() return box.error(box.error.PROC_LUA, errmsg) end return handle_eval_result(pcall(proc, unpack(args))) end } setmetatable(this_module.self, { __index = function(self, key) if key == 'space' then -- proxy self.space to box.space return require('box').space end return nil end }) package.loaded['net.box'] = this_module tarantool_1.9.1.26.g63eb81e3c/src/box/lua/xlog.c0000664000000000000000000002124713306565107017477 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "xlog.h" #include #include #include #include #include #include #include #include #include #include #include #include "box/memtx_tuple.h" /* {{{ Helpers */ static uint32_t CTID_STRUCT_XLOG_CURSOR_REF = 0; static const char *xloglib_name = "xlog"; static int lbox_pushcursor(struct lua_State *L, struct xlog_cursor *cur) { struct xlog_cursor **pcur = NULL; pcur = (struct xlog_cursor **)luaL_pushcdata(L, CTID_STRUCT_XLOG_CURSOR_REF); *pcur = cur; return 1; } static struct xlog_cursor * lbox_checkcursor(struct lua_State *L, int narg, const char *src) { uint32_t ctypeid; void *data = NULL; data = (struct xlog_cursor *)luaL_checkcdata(L, narg, &ctypeid); assert(ctypeid == CTID_STRUCT_XLOG_CURSOR_REF); if (ctypeid != (uint32_t )CTID_STRUCT_XLOG_CURSOR_REF) luaL_error(L, "%s: expecting xlog_cursor object", src); return *(struct xlog_cursor **)data; } /* }}} */ /* {{{ Xlog Parser */ /** * Replaces whitespace with underscore for xlog key names, e.g. * "row index offset" => "row_index_offset". */ static void lbox_xlog_pushkey(lua_State *L, const char *key) { luaL_Buffer b; luaL_buffinit(L, &b); for (const char *pos = key; *pos; pos++) luaL_addchar(&b, (*pos != ' ') ? *pos : '_'); luaL_pushresult(&b); } static void lbox_xlog_parse_body_kv(struct lua_State *L, int type, const char **beg, const char *end) { if (mp_typeof(**beg) != MP_UINT) luaL_error(L, "Broken type of body key"); uint32_t v = mp_decode_uint(beg); if (iproto_type_is_dml(type) && iproto_key_name(v)) { lbox_xlog_pushkey(L, iproto_key_name(v)); } else if (type == VY_INDEX_RUN_INFO && vy_run_info_key_name(v)) { lbox_xlog_pushkey(L, vy_run_info_key_name(v)); } else if (type == VY_INDEX_PAGE_INFO && vy_page_info_key_name(v)) { lbox_xlog_pushkey(L, vy_page_info_key_name(v)); } else if (type == VY_RUN_ROW_INDEX && vy_row_index_key_name(v)) { lbox_xlog_pushkey(L, vy_row_index_key_name(v)); } else { lua_pushinteger(L, v); /* unknown key */ } if ((v == IPROTO_KEY || v == IPROTO_TUPLE) && (mp_typeof(**beg) == MP_ARRAY)) { /* * Push tuple if possible. */ const char *tuple_beg = *beg; mp_next(beg); struct tuple_format *format = box_tuple_format_default(); struct tuple *tuple = box_tuple_new(format, tuple_beg, *beg); if (tuple == NULL) luaT_error(L); luaT_pushtuple(L, tuple); } else { /* * Push Lua objects */ const char *tmp = *beg; if (mp_check(&tmp, end) != 0) { lua_pushstring(L, ""); } else { luamp_decode(L, luaL_msgpack_default, beg); } } lua_settable(L, -3); } static int lbox_xlog_parse_body(struct lua_State *L, int type, const char *ptr, size_t len) { const char **beg = &ptr; const char *end = ptr + len; if (mp_typeof(**beg) != MP_MAP) return -1; uint32_t size = mp_decode_map(beg); uint32_t i; for (i = 0; i < size && *beg < end; i++) lbox_xlog_parse_body_kv(L, type, beg, end); if (i != size) say_warn("warning: decoded %u values from" " MP_MAP, %u expected", i, size); return 0; } static int lbox_xlog_parser_iterate(struct lua_State *L) { struct xlog_cursor *cur = lbox_checkcursor(L, 1, "xlog:pairs()"); struct xrow_header row; int rc = 0; /* skip all bad read requests */ while (true) { rc = xlog_cursor_next_row(cur, &row); if (rc == 0) break; if (rc < 0) { struct error *e = diag_last_error(diag_get()); if (e->type != &type_XlogError) luaT_error(L); } while ((rc = xlog_cursor_next_tx(cur)) < 0) { struct error *e = diag_last_error(diag_get()); if (e->type != &type_XlogError) luaT_error(L); if ((rc = xlog_cursor_find_tx_magic(cur)) < 0) luaT_error(L); if (rc == 1) break; } if (rc == 1) break; } if (rc == 1) return 0; /* EOF */ assert(rc == 0); lua_pushinteger(L, row.lsn); lua_createtable(L, 0, 8); lua_pushstring(L, "HEADER"); lua_createtable(L, 0, 8); lua_pushstring(L, iproto_key_name(IPROTO_REQUEST_TYPE)); const char *typename = iproto_type_name(row.type); if (typename != NULL) { lua_pushstring(L, typename); } else { lua_pushnumber(L, row.type); /* unknown key */ } lua_settable(L, -3); /* type */ if (row.sync != 0) { lbox_xlog_pushkey(L, iproto_key_name(IPROTO_SYNC)); lua_pushinteger(L, row.sync); lua_settable(L, -3); /* sync */ } if (row.lsn != 0) { lbox_xlog_pushkey(L, iproto_key_name(IPROTO_LSN)); lua_pushinteger(L, row.lsn); lua_settable(L, -3); /* lsn */ } if (row.replica_id != 0) { lbox_xlog_pushkey(L, iproto_key_name(IPROTO_REPLICA_ID)); lua_pushinteger(L, row.replica_id); lua_settable(L, -3); /* replica_id */ } if (row.tm != 0) { lbox_xlog_pushkey(L, iproto_key_name(IPROTO_TIMESTAMP)); lua_pushnumber(L, row.tm); lua_settable(L, -3); /* timestamp */ } lua_settable(L, -3); /* HEADER */ assert(row.bodycnt == 1); /* always 1 for read */ lua_pushstring(L, "BODY"); lua_newtable(L); lbox_xlog_parse_body(L, row.type, (char *)row.body[0].iov_base, row.body[0].iov_len); lua_settable(L, -3); /* BODY */ return 2; } /* }}} */ static void lbox_xlog_parser_close(struct xlog_cursor *cur) { if (cur == NULL) return; xlog_cursor_close(cur, false); free(cur); } static int lbox_xlog_parser_gc(struct lua_State *L) { struct xlog_cursor *cur = lbox_checkcursor(L, 1, "xlog:gc()"); lbox_xlog_parser_close(cur); return 0; } static int lbox_xlog_parser_open_pairs(struct lua_State *L) { int args_n = lua_gettop(L); if (args_n != 1 || !lua_isstring(L, 1)) luaL_error(L, "Usage: parser.open(log_filename)"); const char *filename = luaL_checkstring(L, 1); /* Construct xlog cursor */ struct xlog_cursor *cur = (struct xlog_cursor *)calloc(1, sizeof(struct xlog_cursor)); if (cur == NULL) { diag_set(OutOfMemory, sizeof(struct xlog_cursor), "malloc", "struct xlog_cursor"); return luaT_error(L); } /* Construct xlog object */ if (xlog_cursor_open(cur, filename) < 0) { return luaT_error(L); } if (strncmp(cur->meta.filetype, "SNAP", 4) != 0 && strncmp(cur->meta.filetype, "XLOG", 4) != 0 && strncmp(cur->meta.filetype, "RUN", 3) != 0 && strncmp(cur->meta.filetype, "INDEX", 5) != 0 && strncmp(cur->meta.filetype, "DATA", 4) != 0 && strncmp(cur->meta.filetype, "VYLOG", 4) != 0) { char buf[1024]; snprintf(buf, sizeof(buf), "'%.*s' file type", (int) strlen(cur->meta.filetype), cur->meta.filetype); diag_set(ClientError, ER_UNSUPPORTED, "xlog reader", buf); xlog_cursor_close(cur, false); free(cur); return luaT_error(L); } /* push iteration function */ lua_pushcclosure(L, &lbox_xlog_parser_iterate, 1); /* push log and set GC */ lbox_pushcursor(L, cur); lua_pushcfunction(L, lbox_xlog_parser_gc); luaL_setcdatagc(L, -2); /* push iterator position */ lua_pushinteger(L, 0); return 3; } static const struct luaL_Reg lbox_xlog_parser_lib [] = { { "pairs", lbox_xlog_parser_open_pairs }, { NULL, NULL } }; void box_lua_xlog_init(struct lua_State *L) { int rc = 0; /* Get CTypeIDs */ rc = luaL_cdef(L, "struct xlog_cursor;"); assert(rc == 0); (void) rc; CTID_STRUCT_XLOG_CURSOR_REF = luaL_ctypeid(L, "struct xlog_cursor&"); assert(CTID_STRUCT_XLOG_CURSOR_REF != 0); luaL_register_module(L, xloglib_name, lbox_xlog_parser_lib); lua_newtable(L); lua_setmetatable(L, -2); lua_pop(L, 1); } /* }}} */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/init.c0000664000000000000000000001406413306565107017470 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/init.h" #include #include #include #include "lua/utils.h" /* luaT_error() */ #include "box/box.h" #include "box/txn.h" #include "box/gc.h" #include "box/checkpoint.h" #include "box/vclock.h" #include "box/lua/error.h" #include "box/lua/tuple.h" #include "box/lua/call.h" #include "box/lua/slab.h" #include "box/lua/index.h" #include "box/lua/space.h" #include "box/lua/sequence.h" #include "box/lua/misc.h" #include "box/lua/stat.h" #include "box/lua/info.h" #include "box/lua/ctl.h" #include "box/lua/session.h" #include "box/lua/net_box.h" #include "box/lua/cfg.h" #include "box/lua/xlog.h" #include "box/lua/console.h" extern char session_lua[], tuple_lua[], schema_lua[], load_cfg_lua[], xlog_lua[], checkpoint_daemon_lua[], net_box_lua[], upgrade_lua[], console_lua[]; static const char *lua_sources[] = { "box/session", session_lua, "box/tuple", tuple_lua, "box/schema", schema_lua, "box/checkpoint_daemon", checkpoint_daemon_lua, "box/upgrade", upgrade_lua, "box/net_box", net_box_lua, "box/console", console_lua, "box/load_cfg", load_cfg_lua, "box/xlog", xlog_lua, NULL }; static int lbox_commit(lua_State *L) { if (box_txn_commit() != 0) return luaT_error(L); return 0; } static int lbox_rollback(lua_State *L) { (void)L; if (box_txn_rollback() != 0) return luaT_error(L); return 0; } static int lbox_snapshot(struct lua_State *L) { int ret = box_checkpoint(); if (ret == 0) { lua_pushstring(L, "ok"); return 1; } return luaT_error(L); } static int lbox_gc_info(struct lua_State *L) { int count; const struct vclock *vclock; lua_newtable(L); lua_pushstring(L, "checkpoints"); lua_newtable(L); struct checkpoint_iterator checkpoints; checkpoint_iterator_init(&checkpoints); count = 0; while ((vclock = checkpoint_iterator_next(&checkpoints)) != NULL) { lua_createtable(L, 0, 1); lua_pushstring(L, "signature"); luaL_pushint64(L, vclock_sum(vclock)); lua_settable(L, -3); lua_rawseti(L, -2, ++count); } lua_settable(L, -3); lua_pushstring(L, "consumers"); lua_newtable(L); struct gc_consumer_iterator consumers; gc_consumer_iterator_init(&consumers); count = 0; struct gc_consumer *consumer; while ((consumer = gc_consumer_iterator_next(&consumers)) != NULL) { lua_createtable(L, 0, 2); lua_pushstring(L, "name"); lua_pushstring(L, gc_consumer_name(consumer)); lua_settable(L, -3); lua_pushstring(L, "signature"); luaL_pushint64(L, gc_consumer_signature(consumer)); lua_settable(L, -3); lua_rawseti(L, -2, ++count); } lua_settable(L, -3); return 1; } /** Argument passed to lbox_backup_fn(). */ struct lbox_backup_arg { /** Lua state. */ struct lua_State *L; /** Number of files in the resulting table. */ int file_count; }; static int lbox_backup_cb(const char *path, void *cb_arg) { struct lbox_backup_arg *arg = cb_arg; lua_pushinteger(arg->L, ++arg->file_count); lua_pushstring(arg->L, path); lua_settable(arg->L, -3); return 0; } static int lbox_backup_start(struct lua_State *L) { lua_newtable(L); struct lbox_backup_arg arg = { .L = L, }; if (box_backup_start(lbox_backup_cb, &arg) != 0) return luaT_error(L); return 1; } static int lbox_backup_stop(struct lua_State *L) { (void)L; box_backup_stop(); return 0; } static const struct luaL_Reg boxlib[] = { {"commit", lbox_commit}, {"rollback", lbox_rollback}, {"snapshot", lbox_snapshot}, {NULL, NULL} }; static const struct luaL_Reg boxlib_gc[] = { {"info", lbox_gc_info}, {NULL, NULL} }; static const struct luaL_Reg boxlib_backup[] = { {"start", lbox_backup_start}, {"stop", lbox_backup_stop}, {NULL, NULL} }; #include "say.h" void box_lua_init(struct lua_State *L) { /* Use luaL_register() to set _G.box */ luaL_register(L, "box", boxlib); lua_pop(L, 1); luaL_register(L, "box.internal.gc", boxlib_gc); lua_pop(L, 1); luaL_register(L, "box.backup", boxlib_backup); lua_pop(L, 1); box_lua_error_init(L); box_lua_tuple_init(L); box_lua_call_init(L); box_lua_cfg_init(L); box_lua_slab_init(L); box_lua_index_init(L); box_lua_space_init(L); box_lua_sequence_init(L); box_lua_misc_init(L); box_lua_info_init(L); box_lua_stat_init(L); box_lua_ctl_init(L); box_lua_session_init(L); box_lua_xlog_init(L); luaopen_net_box(L); lua_pop(L, 1); tarantool_lua_console_init(L); lua_pop(L, 1); /* Load Lua extension */ for (const char **s = lua_sources; *s; s += 2) { const char *modname = *s; const char *modsrc = *(s + 1); const char *modfile = lua_pushfstring(L, "@builtin/%s.lua", modname); if (luaL_loadbuffer(L, modsrc, strlen(modsrc), modfile)) panic("Error loading Lua module %s...: %s", modname, lua_tostring(L, -1)); lua_call(L, 0, 0); lua_pop(L, 1); /* modfile */ } assert(lua_gettop(L) == 0); } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/info.c0000664000000000000000000003021213306565107017451 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "box/lua/info.h" #include /* tolower() */ #include #include #include #include "box/applier.h" #include "box/relay.h" #include "box/iproto.h" #include "box/wal.h" #include "box/replication.h" #include "box/info.h" #include "box/engine.h" #include "box/vinyl.h" #include "main.h" #include "version.h" #include "box/box.h" #include "lua/utils.h" #include "fiber.h" static void lbox_pushvclock(struct lua_State *L, const struct vclock *vclock) { lua_createtable(L, 0, vclock_size(vclock)); struct vclock_iterator it; vclock_iterator_init(&it, vclock); vclock_foreach(&it, replica) { lua_pushinteger(L, replica.id); luaL_pushuint64(L, replica.lsn); lua_settable(L, -3); } luaL_setmaphint(L, -1); /* compact flow */ } static void lbox_pushapplier(lua_State *L, struct applier *applier) { lua_newtable(L); /* Get applier state in lower case */ static char status[16]; char *d = status; const char *s = applier_state_strs[applier->state] + strlen("APPLIER_"); assert(strlen(s) < sizeof(status)); while ((*(d++) = tolower(*(s++)))); lua_pushstring(L, "status"); lua_pushstring(L, status); lua_settable(L, -3); if (applier->reader) { lua_pushstring(L, "lag"); lua_pushnumber(L, applier->lag); lua_settable(L, -3); lua_pushstring(L, "idle"); lua_pushnumber(L, ev_monotonic_now(loop()) - applier->last_row_time); lua_settable(L, -3); char name[FIBER_NAME_MAX]; int total = uri_format(name, sizeof(name), &applier->uri, false); lua_pushstring(L, "peer"); lua_pushlstring(L, name, total); lua_settable(L, -3); struct error *e = diag_last_error(&applier->reader->diag); if (e != NULL) { lua_pushstring(L, "message"); lua_pushstring(L, e->errmsg); lua_settable(L, -3); } } } static void lbox_pushrelay(lua_State *L, struct relay *relay) { lua_newtable(L); lua_pushstring(L, "vclock"); lbox_pushvclock(L, relay_vclock(relay)); lua_settable(L, -3); } static void lbox_pushreplica(lua_State *L, struct replica *replica) { struct applier *applier = replica->applier; struct relay *relay = replica->relay; /* 16 is used to get the best visual experience in YAML output */ lua_createtable(L, 0, 16); lua_pushstring(L, "id"); lua_pushinteger(L, replica->id); lua_settable(L, -3); lua_pushstring(L, "uuid"); lua_pushstring(L, tt_uuid_str(&replica->uuid)); lua_settable(L, -3); lua_pushstring(L, "lsn"); luaL_pushuint64(L, vclock_get(&replicaset.vclock, replica->id)); lua_settable(L, -3); if (applier != NULL && applier->state != APPLIER_OFF) { lua_pushstring(L, "upstream"); lbox_pushapplier(L, applier); lua_settable(L, -3); } if (relay != NULL) { lua_pushstring(L, "downstream"); lbox_pushrelay(L, relay); lua_settable(L, -3); } } static int lbox_info_replication(struct lua_State *L) { lua_newtable(L); /* box.info.replication */ /* Nice formatting */ lua_newtable(L); /* metatable */ lua_pushliteral(L, "mapping"); lua_setfield(L, -2, "__serialize"); lua_setmetatable(L, -2); replicaset_foreach(replica) { /* Applier hasn't received replica id yet */ if (replica->id == REPLICA_ID_NIL) continue; lbox_pushreplica(L, replica); lua_rawseti(L, -2, replica->id); } return 1; } static int lbox_info_id(struct lua_State *L) { /* * Self can be NULL during bootstrap: entire box.info * bundle becomes available soon after entering box.cfg{} * and replication bootstrap relies on this as it looks * at box.info.status. */ struct replica *self = replica_by_uuid(&INSTANCE_UUID); if (self != NULL && self->id != REPLICA_ID_NIL) { lua_pushinteger(L, self->id); } else { luaL_pushnull(L); } return 1; } static int lbox_info_uuid(struct lua_State *L) { lua_pushlstring(L, tt_uuid_str(&INSTANCE_UUID), UUID_STR_LEN); return 1; } static int lbox_info_lsn(struct lua_State *L) { /* See comments in lbox_info_id */ struct replica *self = replica_by_uuid(&INSTANCE_UUID); if (self != NULL && self->id != REPLICA_ID_NIL) { luaL_pushint64(L, vclock_get(&replicaset.vclock, self->id)); } else { luaL_pushint64(L, -1); } return 1; } static int lbox_info_signature(struct lua_State *L) { luaL_pushint64(L, vclock_sum(&replicaset.vclock)); return 1; } static int lbox_info_ro(struct lua_State *L) { lua_pushboolean(L, box_is_ro()); return 1; } /* * Tarantool 1.6.x compat */ static int lbox_info_server(struct lua_State *L) { lua_createtable(L, 0, 2); lua_pushliteral(L, "id"); lbox_info_id(L); lua_settable(L, -3); lua_pushliteral(L, "uuid"); lbox_info_uuid(L); lua_settable(L, -3); lua_pushliteral(L, "lsn"); lbox_info_lsn(L); lua_settable(L, -3); lua_pushliteral(L, "ro"); lbox_info_ro(L); lua_settable(L, -3); return 1; } static int lbox_info_vclock(struct lua_State *L) { lbox_pushvclock(L, &replicaset.vclock); return 1; } static int lbox_info_status(struct lua_State *L) { lua_pushstring(L, box_status()); return 1; } static int lbox_info_uptime(struct lua_State *L) { lua_pushnumber(L, (unsigned)tarantool_uptime() + 1); return 1; } static int lbox_info_pid(struct lua_State *L) { lua_pushnumber(L, getpid()); return 1; } static int lbox_info_cluster(struct lua_State *L) { lua_createtable(L, 0, 2); lua_pushliteral(L, "uuid"); lua_pushlstring(L, tt_uuid_str(&REPLICASET_UUID), UUID_STR_LEN); lua_settable(L, -3); return 1; } static int lbox_info_memory_call(struct lua_State *L) { struct engine_memory_stat stat; engine_memory_stat(&stat); lua_pushstring(L, "data"); luaL_pushuint64(L, stat.data); lua_settable(L, -3); lua_pushstring(L, "index"); luaL_pushuint64(L, stat.index); lua_settable(L, -3); lua_pushstring(L, "cache"); luaL_pushuint64(L, stat.cache); lua_settable(L, -3); lua_pushstring(L, "tx"); luaL_pushuint64(L, stat.tx); lua_settable(L, -3); lua_pushstring(L, "net"); luaL_pushuint64(L, iproto_mem_used()); lua_settable(L, -3); lua_pushstring(L, "lua"); lua_pushinteger(L, G(L)->gc.total); lua_settable(L, -3); return 1; } static int lbox_info_memory(struct lua_State *L) { lua_newtable(L); lua_newtable(L); /* metatable */ lua_pushstring(L, "__call"); lua_pushcfunction(L, lbox_info_memory_call); lua_settable(L, -3); lua_setmetatable(L, -2); return 1; } static void luaT_info_begin(struct info_handler *info) { lua_State *L = (lua_State *) info->ctx; lua_newtable(L); } static void luaT_info_end(struct info_handler *info) { (void) info; } static void luaT_info_begin_table(struct info_handler *info, const char *key) { lua_State *L = (lua_State *) info->ctx; lua_pushstring(L, key); lua_newtable(L); } static void luaT_info_end_table(struct info_handler *info) { lua_State *L = (lua_State *) info->ctx; lua_settable(L, -3); } static void luaT_info_append_double(struct info_handler *info, const char *key, double value) { lua_State *L = (lua_State *) info->ctx; lua_pushstring(L, key); lua_pushnumber(L, value); lua_settable(L, -3); } static void luaT_info_append_int(struct info_handler *info, const char *key, int64_t value) { lua_State *L = (lua_State *) info->ctx; lua_pushstring(L, key); luaL_pushint64(L, value); lua_settable(L, -3); } static void luaT_info_append_str(struct info_handler *info, const char *key, const char *value) { lua_State *L = (lua_State *) info->ctx; lua_pushstring(L, key); lua_pushstring(L, value); lua_settable(L, -3); } void luaT_info_handler_create(struct info_handler *h, struct lua_State *L) { static struct info_handler_vtab lua_vtab = { .begin = luaT_info_begin, .end = luaT_info_end, .begin_table = luaT_info_begin_table, .end_table = luaT_info_end_table, .append_int = luaT_info_append_int, .append_str = luaT_info_append_str, .append_double = luaT_info_append_double }; h->vtab = &lua_vtab; h->ctx = L; } static int lbox_info_vinyl_call(struct lua_State *L) { struct info_handler h; luaT_info_handler_create(&h, L); struct vinyl_engine *vinyl; vinyl = (struct vinyl_engine *)engine_by_name("vinyl"); assert(vinyl != NULL); vinyl_engine_info(vinyl, &h); return 1; } static int lbox_info_vinyl(struct lua_State *L) { lua_newtable(L); lua_newtable(L); /* metatable */ lua_pushstring(L, "__call"); lua_pushcfunction(L, lbox_info_vinyl_call); lua_settable(L, -3); lua_setmetatable(L, -2); return 1; } static const struct luaL_Reg lbox_info_dynamic_meta[] = { {"id", lbox_info_id}, {"uuid", lbox_info_uuid}, {"lsn", lbox_info_lsn}, {"signature", lbox_info_signature}, {"vclock", lbox_info_vclock}, {"ro", lbox_info_ro}, {"replication", lbox_info_replication}, {"status", lbox_info_status}, {"uptime", lbox_info_uptime}, {"pid", lbox_info_pid}, {"cluster", lbox_info_cluster}, {"memory", lbox_info_memory}, {"vinyl", lbox_info_vinyl}, {NULL, NULL} }; static const struct luaL_Reg lbox_info_dynamic_meta_v16[] = { {"server", lbox_info_server}, {NULL, NULL} }; /** Evaluate box.info.* function value and push it on the stack. */ static int lbox_info_index(struct lua_State *L) { lua_pushvalue(L, -1); /* dup key */ lua_gettable(L, lua_upvalueindex(1)); /* table[key] */ if (!lua_isfunction(L, -1)) { /* No such key. Leave nil is on the stack. */ return 1; } lua_call(L, 0, 1); lua_remove(L, -2); return 1; } /** Push a bunch of compile-time or start-time constants into a Lua table. */ static void lbox_info_init_static_values(struct lua_State *L) { /* tarantool version */ lua_pushstring(L, "version"); lua_pushstring(L, tarantool_version()); lua_settable(L, -3); } /** * When user invokes box.info(), return a table of key/value * pairs containing the current info. */ static int lbox_info_call(struct lua_State *L) { lua_newtable(L); lbox_info_init_static_values(L); for (int i = 0; lbox_info_dynamic_meta[i].name; i++) { lua_pushstring(L, lbox_info_dynamic_meta[i].name); lbox_info_dynamic_meta[i].func(L); lua_settable(L, -3); } /* Tarantool 1.6.x compat */ lua_newtable(L); lua_newtable(L); for (int i = 0; lbox_info_dynamic_meta_v16[i].name; i++) { lua_pushstring(L, lbox_info_dynamic_meta_v16[i].name); lbox_info_dynamic_meta_v16[i].func(L); lua_settable(L, -3); } lua_setfield(L, -2, "__index"); lua_setmetatable(L, -2); return 1; } /** Initialize box.info package. */ void box_lua_info_init(struct lua_State *L) { static const struct luaL_Reg infolib [] = { {NULL, NULL} }; luaL_register_module(L, "box.info", infolib); lua_newtable(L); /* metatable for info */ lua_pushstring(L, "__index"); lua_newtable(L); /* table for __index */ luaL_register(L, NULL, lbox_info_dynamic_meta); luaL_register(L, NULL, lbox_info_dynamic_meta_v16); lua_pushcclosure(L, lbox_info_index, 1); lua_settable(L, -3); lua_pushstring(L, "__call"); lua_pushcfunction(L, lbox_info_call); lua_settable(L, -3); lua_pushstring(L, "__serialize"); lua_pushcfunction(L, lbox_info_call); lua_settable(L, -3); lua_setmetatable(L, -2); lbox_info_init_static_values(L); lua_pop(L, 1); /* info module */ } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/misc.h0000664000000000000000000000345013306560010017446 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_LUA_MISC_H #define INCLUDES_TARANTOOL_BOX_LUA_MISC_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; char * lbox_encode_tuple_on_gc(struct lua_State *L, int idx, size_t *p_len); void box_lua_misc_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_LUA_MISC_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/xlog.lua0000664000000000000000000000027613306560010020021 0ustar rootrootlocal internal = require('xlog') local fun = require('fun') local function xlog_pairs(...) return fun.wrap(internal.pairs(...)) end package.loaded['xlog'] = { pairs = xlog_pairs, } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/slab.c0000664000000000000000000001711013306565107017441 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include "box/lua/slab.h" #include "lua/utils.h" #include #include #include #include /* internals: lua in box.runtime.info() */ #include "small/small.h" #include "small/quota.h" #include "memory.h" extern struct small_alloc memtx_alloc; extern struct mempool memtx_index_extent_pool; static int small_stats_noop_cb(const struct mempool_stats *stats, void *cb_ctx) { (void) stats; (void) cb_ctx; return 0; } static int small_stats_lua_cb(const struct mempool_stats *stats, void *cb_ctx) { /** Don't publish information about empty slabs. */ if (stats->slabcount == 0) return 0; struct lua_State *L = (struct lua_State *) cb_ctx; /* * Create a Lua table for every slab class. A class is * defined by its item size. */ /** Assign next slab size to the next member of an array. */ lua_pushnumber(L, lua_objlen(L, -1) + 1); lua_newtable(L); /** * This is in fact only to force YaML flow "compact" for this * table. */ luaL_setmaphint(L, -1); lua_pushstring(L, "mem_used"); luaL_pushuint64(L, stats->totals.used); lua_settable(L, -3); lua_pushstring(L, "slab_size"); luaL_pushuint64(L, stats->slabsize); lua_settable(L, -3); lua_pushstring(L, "mem_free"); luaL_pushuint64(L, stats->totals.total - stats->totals.used); lua_settable(L, -3); lua_pushstring(L, "item_size"); luaL_pushuint64(L, stats->objsize); lua_settable(L, -3); lua_pushstring(L, "slab_count"); luaL_pushuint64(L, stats->slabcount); lua_settable(L, -3); lua_pushstring(L, "item_count"); luaL_pushuint64(L, stats->objcount); lua_settable(L, -3); lua_settable(L, -3); return 0; } static int lbox_slab_stats(struct lua_State *L) { struct small_stats totals; lua_newtable(L); /* * List all slabs used for tuples and slabs used for * indexes, with their stats. */ small_stats(&memtx_alloc, &totals, small_stats_lua_cb, L); struct mempool_stats index_stats; mempool_stats(&memtx_index_extent_pool, &index_stats); small_stats_lua_cb(&index_stats, L); return 1; } static int lbox_slab_info(struct lua_State *L) { struct small_stats totals; /* * List all slabs used for tuples and slabs used for * indexes, with their stats. */ lua_newtable(L); small_stats(&memtx_alloc, &totals, small_stats_noop_cb, L); struct mempool_stats index_stats; mempool_stats(&memtx_index_extent_pool, &index_stats); struct slab_arena *tuple_arena = memtx_alloc.cache->arena; struct quota *memtx_quota = tuple_arena->quota; double ratio; char ratio_buf[32]; ratio = 100 * ((double) totals.used / ((double) totals.total + 0.0001)); snprintf(ratio_buf, sizeof(ratio_buf), "%0.2lf%%", ratio); /** How much address space has been already touched */ lua_pushstring(L, "items_size"); luaL_pushuint64(L, totals.total); lua_settable(L, -3); /** * How much of this formatted address space is used for * actual data. */ lua_pushstring(L, "items_used"); luaL_pushuint64(L, totals.used); lua_settable(L, -3); /* * Fragmentation factor for tuples. Don't account indexes, * even if they are fragmented, there is nothing people * can do about it. */ lua_pushstring(L, "items_used_ratio"); lua_pushstring(L, ratio_buf); lua_settable(L, -3); /** How much address space has been already touched * (tuples and indexes) */ lua_pushstring(L, "arena_size"); /* * We could use totals.total + index_stats.total here, * but this would not account for slabs which are sitting * in slab cache or in the arena, available for reuse. * Make sure a simple formula: * items_used_ratio > 0.9 && arena_used_ratio > 0.9 && * quota_used_ratio > 0.9 work as an indicator * for reaching Tarantool memory limit. */ size_t arena_size = tuple_arena->used; luaL_pushuint64(L, arena_size); lua_settable(L, -3); /** * How much of this formatted address space is used for * data (tuples and indexes). */ lua_pushstring(L, "arena_used"); luaL_pushuint64(L, totals.used + index_stats.totals.used); lua_settable(L, -3); ratio = 100 * ((double) (totals.used + index_stats.totals.used) / (double) arena_size); snprintf(ratio_buf, sizeof(ratio_buf), "%0.1lf%%", ratio); lua_pushstring(L, "arena_used_ratio"); lua_pushstring(L, ratio_buf); lua_settable(L, -3); /* * This is pretty much the same as * box.cfg.slab_alloc_arena, but in bytes */ lua_pushstring(L, "quota_size"); luaL_pushuint64(L, quota_total(memtx_quota)); lua_settable(L, -3); /* * How much quota has been booked - reflects the total * size of slabs in various slab caches. */ lua_pushstring(L, "quota_used"); luaL_pushuint64(L, quota_used(memtx_quota)); lua_settable(L, -3); /** * This should be the same as arena_size/arena_used, however, * don't trust totals in the most important monitoring * factor, it's the quota that give you OOM error in the * end of the day. */ ratio = 100 * ((double) quota_used(memtx_quota) / ((double) quota_total(memtx_quota) + 0.0001)); snprintf(ratio_buf, sizeof(ratio_buf), "%0.2lf%%", ratio); lua_pushstring(L, "quota_used_ratio"); lua_pushstring(L, ratio_buf); lua_settable(L, -3); return 1; } static int lbox_runtime_info(struct lua_State *L) { lua_newtable(L); lua_pushstring(L, "used"); luaL_pushuint64(L, runtime.used); lua_settable(L, -3); lua_pushstring(L, "maxalloc"); luaL_pushuint64(L, quota_total(runtime.quota)); lua_settable(L, -3); /* * Lua GC heap size */ lua_pushstring(L, "lua"); lua_pushinteger(L, G(L)->gc.total); lua_settable(L, -3); return 1; } static int lbox_slab_check(MAYBE_UNUSED struct lua_State *L) { slab_cache_check(memtx_alloc.cache); return 0; } /** Initialize box.slab package. */ void box_lua_slab_init(struct lua_State *L) { lua_getfield(L, LUA_GLOBALSINDEX, "box"); lua_pushstring(L, "slab"); lua_newtable(L); lua_pushstring(L, "info"); lua_pushcfunction(L, lbox_slab_info); lua_settable(L, -3); lua_pushstring(L, "stats"); lua_pushcfunction(L, lbox_slab_stats); lua_settable(L, -3); lua_pushstring(L, "check"); lua_pushcfunction(L, lbox_slab_check); lua_settable(L, -3); lua_settable(L, -3); /* box.slab */ lua_pushstring(L, "runtime"); lua_newtable(L); lua_pushstring(L, "info"); lua_pushcfunction(L, lbox_runtime_info); lua_settable(L, -3); lua_settable(L, -3); /* box.runtime */ lua_pop(L, 1); /* box. */ } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/stat.h0000664000000000000000000000326713306560010017474 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_STAT_H #define INCLUDES_TARANTOOL_LUA_STAT_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_stat_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_STAT_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/space.cc0000664000000000000000000003123013306565107017755 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/space.h" #include "box/lua/tuple.h" #include "lua/utils.h" #include "lua/trigger.h" extern "C" { #include #include #include } /* extern "C" */ #include "box/space.h" #include "box/schema.h" #include "box/user_def.h" #include "box/tuple.h" #include "box/txn.h" #include "box/vclock.h" /* VCLOCK_MAX */ #include "box/sequence.h" /** * Trigger function for all spaces */ static int lbox_push_txn_stmt(struct lua_State *L, void *event) { struct txn_stmt *stmt = txn_current_stmt((struct txn *) event); if (stmt->old_tuple) { luaT_pushtuple(L, stmt->old_tuple); } else { lua_pushnil(L); } if (stmt->new_tuple) { luaT_pushtuple(L, stmt->new_tuple); } else { lua_pushnil(L); } /* @todo: maybe the space object has to be here */ lua_pushstring(L, stmt->space->def->name); return 3; } static int lbox_pop_txn_stmt(struct lua_State *L, void *event) { struct txn_stmt *stmt = txn_current_stmt((struct txn *) event); if (lua_gettop(L) < 1) { /* No return value - nothing to do. */ return 0; } struct tuple *result = luaT_istuple(L, 1); if (result == NULL && !lua_isnil(L, 1) && !luaL_isnull(L, 1)) { /* Invalid return value - raise error. */ diag_set(ClientError, ER_BEFORE_REPLACE_RET, lua_typename(L, lua_type(L, 1))); return -1; } /* Update the new tuple. */ if (result != NULL) tuple_ref(result); if (stmt->new_tuple != NULL) tuple_unref(stmt->new_tuple); stmt->new_tuple = result; return 0; } /** * Set/Reset/Get space.on_replace trigger */ static int lbox_space_on_replace(struct lua_State *L) { int top = lua_gettop(L); if (top < 1 || !lua_istable(L, 1)) { luaL_error(L, "usage: space:on_replace(function | nil, [function | nil])"); } lua_getfield(L, 1, "id"); /* Get space id. */ uint32_t id = lua_tonumber(L, lua_gettop(L)); struct space *space = space_cache_find_xc(id); lua_pop(L, 1); return lbox_trigger_reset(L, 3, &space->on_replace, lbox_push_txn_stmt, NULL); } /** * Set/Reset/Get space.before_replace trigger */ static int lbox_space_before_replace(struct lua_State *L) { int top = lua_gettop(L); if (top < 1 || !lua_istable(L, 1)) { luaL_error(L, "usage: space:before_replace(function | nil, [function | nil])"); } lua_getfield(L, 1, "id"); /* Get space id. */ uint32_t id = lua_tonumber(L, lua_gettop(L)); struct space *space = space_cache_find_xc(id); lua_pop(L, 1); return lbox_trigger_reset(L, 3, &space->before_replace, lbox_push_txn_stmt, lbox_pop_txn_stmt); } /** * Make a single space available in Lua, * via box.space[] array. * * @return A new table representing a space on top of the Lua * stack. */ static void lbox_fillspace(struct lua_State *L, struct space *space, int i) { /* space.arity */ lua_pushstring(L, "field_count"); lua_pushnumber(L, space->def->exact_field_count); lua_settable(L, i); /* space.n */ lua_pushstring(L, "id"); lua_pushnumber(L, space_id(space)); lua_settable(L, i); /* space.is_temp */ lua_pushstring(L, "temporary"); lua_pushboolean(L, space_is_temporary(space)); lua_settable(L, i); /* space.name */ lua_pushstring(L, "name"); lua_pushstring(L, space_name(space)); lua_settable(L, i); /* space.engine */ lua_pushstring(L, "engine"); lua_pushstring(L, space->def->engine_name); lua_settable(L, i); lua_pushstring(L, "enabled"); lua_pushboolean(L, space_index(space, 0) != 0); lua_settable(L, i); /* space:on_replace */ lua_pushstring(L, "on_replace"); lua_pushcfunction(L, lbox_space_on_replace); lua_settable(L, i); /* space:before_replace */ lua_pushstring(L, "before_replace"); lua_pushcfunction(L, lbox_space_before_replace); lua_settable(L, i); lua_getfield(L, i, "index"); if (lua_isnil(L, -1)) { lua_pop(L, 1); /* space.index */ lua_pushstring(L, "index"); lua_newtable(L); lua_settable(L, i); /* push space.index */ lua_getfield(L, i, "index"); } else { /* Empty the table. */ lua_pushnil(L); /* first key */ while (lua_next(L, -2) != 0) { lua_pop(L, 1); /* remove the value. */ lua_pushnil(L); /* set the key to nil. */ lua_settable(L, -3); lua_pushnil(L); /* start over. */ } } /* * Fill space.index table with * all defined indexes. */ for (unsigned k = 0; k <= space->index_id_max; k++) { struct index *index = space_index(space, k); if (index == NULL) continue; struct index_def *index_def = index->def; struct index_opts *index_opts = &index_def->opts; lua_pushnumber(L, index_def->iid); lua_newtable(L); /* space.index[k] */ if (index_def->type == HASH || index_def->type == TREE) { lua_pushboolean(L, index_opts->is_unique); lua_setfield(L, -2, "unique"); } else if (index_def->type == RTREE) { lua_pushnumber(L, index_opts->dimension); lua_setfield(L, -2, "dimension"); } lua_pushstring(L, index_type_strs[index_def->type]); lua_setfield(L, -2, "type"); lua_pushnumber(L, index_def->iid); lua_setfield(L, -2, "id"); lua_pushnumber(L, space->def->id); lua_setfield(L, -2, "space_id"); lua_pushstring(L, index_def->name); lua_setfield(L, -2, "name"); lua_pushstring(L, "parts"); lua_newtable(L); for (uint32_t j = 0; j < index_def->key_def->part_count; j++) { lua_pushnumber(L, j + 1); lua_newtable(L); const struct key_part *part = &index_def->key_def->parts[j]; lua_pushstring(L, field_type_strs[part->type]); lua_setfield(L, -2, "type"); lua_pushnumber(L, part->fieldno + TUPLE_INDEX_BASE); lua_setfield(L, -2, "fieldno"); lua_pushboolean(L, part->is_nullable); lua_setfield(L, -2, "is_nullable"); if (part->coll != NULL) { lua_pushstring(L, part->coll->name); lua_setfield(L, -2, "collation"); } lua_settable(L, -3); /* index[k].parts[j] */ } lua_settable(L, -3); /* space.index[k].parts */ if (k == 0 && space->sequence != NULL) { lua_pushnumber(L, space->sequence->def->id); lua_setfield(L, -2, "sequence_id"); } if (space_is_vinyl(space)) { lua_pushstring(L, "options"); lua_newtable(L); lua_pushnumber(L, index_opts->range_size); lua_setfield(L, -2, "range_size"); lua_pushnumber(L, index_opts->page_size); lua_setfield(L, -2, "page_size"); lua_pushnumber(L, index_opts->run_count_per_level); lua_setfield(L, -2, "run_count_per_level"); lua_pushnumber(L, index_opts->run_size_ratio); lua_setfield(L, -2, "run_size_ratio"); lua_pushnumber(L, index_opts->bloom_fpr); lua_setfield(L, -2, "bloom_fpr"); lua_settable(L, -3); } lua_settable(L, -3); /* space.index[k] */ lua_rawgeti(L, -1, index_def->iid); lua_setfield(L, -2, index_def->name); } lua_pop(L, 1); /* pop the index field */ lua_getfield(L, LUA_GLOBALSINDEX, "box"); lua_pushstring(L, "schema"); lua_gettable(L, -2); lua_pushstring(L, "space"); lua_gettable(L, -2); lua_pushstring(L, "bless"); lua_gettable(L, -2); lua_pushvalue(L, i); /* space */ lua_call(L, 1, 0); lua_pop(L, 3); /* cleanup stack - box, schema, space */ } /** Export a space to Lua */ static void box_lua_space_new(struct lua_State *L, struct space *space) { lua_getfield(L, LUA_GLOBALSINDEX, "box"); lua_getfield(L, -1, "space"); if (!lua_istable(L, -1)) { lua_pop(L, 1); /* pop nil */ lua_newtable(L); lua_setfield(L, -2, "space"); lua_getfield(L, -1, "space"); } lua_rawgeti(L, -1, space_id(space)); if (lua_isnil(L, -1)) { /* * If the space already exists, modify it, rather * than create a new one -- to not invalidate * Lua variable references to old space outside * the box.space[]. */ lua_pop(L, 1); lua_newtable(L); lua_rawseti(L, -2, space_id(space)); lua_rawgeti(L, -1, space_id(space)); } else { /* Clear the reference to old space by old name. */ lua_getfield(L, -1, "name"); lua_pushnil(L); lua_settable(L, -4); } lbox_fillspace(L, space, lua_gettop(L)); lua_setfield(L, -2, space_name(space)); lua_pop(L, 2); /* box, space */ } /** Delete a given space in Lua */ static void box_lua_space_delete(struct lua_State *L, uint32_t id) { lua_getfield(L, LUA_GLOBALSINDEX, "box"); lua_getfield(L, -1, "space"); lua_rawgeti(L, -1, id); lua_getfield(L, -1, "name"); lua_pushnil(L); lua_rawset(L, -4); lua_pop(L, 1); /* pop space */ lua_pushnil(L); lua_rawseti(L, -2, id); lua_pop(L, 2); /* box, space */ } static void box_lua_space_new_or_delete(struct trigger *trigger, void *event) { struct lua_State *L = (struct lua_State *) trigger->data; struct space *space = (struct space *) event; if (space_by_id(space->def->id) != NULL) { box_lua_space_new(L, space); } else { box_lua_space_delete(L, space->def->id); } } static struct trigger on_alter_space_in_lua = { RLIST_LINK_INITIALIZER, box_lua_space_new_or_delete, NULL, NULL }; void box_lua_space_init(struct lua_State *L) { /* Register the trigger that will push space data to Lua. */ on_alter_space_in_lua.data = L; trigger_add(&on_alter_space, &on_alter_space_in_lua); lua_getfield(L, LUA_GLOBALSINDEX, "box"); lua_newtable(L); lua_setfield(L, -2, "schema"); lua_getfield(L, -1, "schema"); lua_pushnumber(L, BOX_SCHEMA_ID); lua_setfield(L, -2, "SCHEMA_ID"); lua_pushnumber(L, BOX_SPACE_ID); lua_setfield(L, -2, "SPACE_ID"); lua_pushnumber(L, BOX_VSPACE_ID); lua_setfield(L, -2, "VSPACE_ID"); lua_pushnumber(L, BOX_INDEX_ID); lua_setfield(L, -2, "INDEX_ID"); lua_pushnumber(L, BOX_VINDEX_ID); lua_setfield(L, -2, "VINDEX_ID"); lua_pushnumber(L, BOX_USER_ID); lua_setfield(L, -2, "USER_ID"); lua_pushnumber(L, BOX_VUSER_ID); lua_setfield(L, -2, "VUSER_ID"); lua_pushnumber(L, BOX_FUNC_ID); lua_setfield(L, -2, "FUNC_ID"); lua_pushnumber(L, BOX_COLLATION_ID); lua_setfield(L, -2, "COLLATION_ID"); lua_pushnumber(L, BOX_VFUNC_ID); lua_setfield(L, -2, "VFUNC_ID"); lua_pushnumber(L, BOX_PRIV_ID); lua_setfield(L, -2, "PRIV_ID"); lua_pushnumber(L, BOX_VPRIV_ID); lua_setfield(L, -2, "VPRIV_ID"); lua_pushnumber(L, BOX_CLUSTER_ID); lua_setfield(L, -2, "CLUSTER_ID"); lua_pushnumber(L, BOX_TRUNCATE_ID); lua_setfield(L, -2, "TRUNCATE_ID"); lua_pushnumber(L, BOX_SEQUENCE_ID); lua_setfield(L, -2, "SEQUENCE_ID"); lua_pushnumber(L, BOX_SEQUENCE_DATA_ID); lua_setfield(L, -2, "SEQUENCE_DATA_ID"); lua_pushnumber(L, BOX_SPACE_SEQUENCE_ID); lua_setfield(L, -2, "SPACE_SEQUENCE_ID"); lua_pushnumber(L, BOX_SYSTEM_ID_MIN); lua_setfield(L, -2, "SYSTEM_ID_MIN"); lua_pushnumber(L, BOX_SYSTEM_ID_MAX); lua_setfield(L, -2, "SYSTEM_ID_MAX"); lua_pushnumber(L, BOX_SYSTEM_USER_ID_MIN); lua_setfield(L, -2, "SYSTEM_USER_ID_MIN"); lua_pushnumber(L, BOX_SYSTEM_USER_ID_MAX); lua_setfield(L, -2, "SYSTEM_USER_ID_MAX"); lua_pushnumber(L, ADMIN); lua_setfield(L, -2, "ADMIN_ID"); lua_pushnumber(L, GUEST); lua_setfield(L, -2, "GUEST_ID"); lua_pushnumber(L, PUBLIC); lua_setfield(L, -2, "PUBLIC_ROLE_ID"); lua_pushnumber(L, SUPER); lua_setfield(L, -2, "SUPER_ROLE_ID"); lua_pushnumber(L, BOX_INDEX_MAX); lua_setfield(L, -2, "INDEX_MAX"); lua_pushnumber(L, BOX_SPACE_MAX); lua_setfield(L, -2, "SPACE_MAX"); lua_pushnumber(L, BOX_FIELD_MAX); lua_setfield(L, -2, "FIELD_MAX"); lua_pushnumber(L, BOX_INDEX_FIELD_MAX); lua_setfield(L, -2, "INDEX_FIELD_MAX"); lua_pushnumber(L, BOX_INDEX_PART_MAX); lua_setfield(L, -2, "INDEX_PART_MAX"); lua_pushnumber(L, BOX_NAME_MAX); lua_setfield(L, -2, "NAME_MAX"); lua_pushnumber(L, FORMAT_ID_MAX); lua_setfield(L, -2, "FORMAT_ID_MAX"); lua_pushnumber(L, VCLOCK_MAX); lua_setfield(L, -2, "REPLICA_MAX"); lua_pop(L, 2); /* box, schema */ } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/console.lua0000664000000000000000000002443413306565107020530 0ustar rootroot-- console.lua -- internal file local internal = require('console') local session_internal = require('box.internal.session') local fiber = require('fiber') local socket = require('socket') local log = require('log') local errno = require('errno') local urilib = require('uri') local yaml = require('yaml') -- admin formatter must be able to encode any Lua variable local formatter = yaml.new() formatter.cfg{ encode_invalid_numbers = true; encode_load_metatables = true; encode_use_tostring = true; encode_invalid_as_nil = true; } local function format(status, ...) -- When storing a nil in a Lua table, there is no way to -- distinguish nil value from no value. This is a trick to -- make sure yaml converter correctly local function wrapnull(v) return v == nil and formatter.NULL or v end local err if status then local count = select('#', ...) if count == 0 then return "---\n...\n" end local res = {} for i=1,count,1 do table.insert(res, wrapnull(select(i, ...))) end -- serializer can raise an exception status, err = pcall(formatter.encode, res) if status then return err else err = 'console: an exception occurred when formatting the output: '.. tostring(err) end else err = wrapnull(...) end return formatter.encode({{error = err }}) end -- -- Evaluate command on local instance -- local function local_eval(self, line) if not line then return nil end -- -- Attempt to append 'return ' before the chunk: if the chunk is -- an expression, this pushes results of the expression onto the -- stack. If the chunk is a statement, it won't compile. In that -- case try to run the original string. -- local fun, errmsg = loadstring("return "..line) if not fun then fun, errmsg = loadstring(line) end if not fun then return format(false, errmsg) end return format(pcall(fun)) end local function eval(line) return local_eval(nil, line) end -- -- Evaluate command on remote instance -- local function remote_eval(self, line) if not line or self.remote.state ~= 'active' then local err = self.remote.error self.remote:close() self.remote = nil -- restore local REPL mode self.eval = nil self.prompt = nil self.completion = nil pcall(self.on_client_disconnect, self) return (err and format(false, err)) or '' end -- -- execute line -- local ok, res = pcall(self.remote.eval, self.remote, line) return ok and res or format(false, res) end -- -- Read command from stdin -- local function local_read(self) local buf = "" local prompt = self.prompt while true do local delim = self.delimiter local line = internal.readline({ prompt = prompt.. "> ", completion = self.ac and self.completion or nil }) if not line then return nil end buf = buf..line if delim == "" then -- stop once a complete Lua statement is entered local fn, err = loadstring(buf) if fn ~= nil or not string.find(err, " near ''$") then -- valid Lua code or a syntax error not due to -- an incomplete input break end if loadstring('return '..buf) ~= nil then -- certain obscure inputs like '(42\n)' yield the -- same error as incomplete statement break end elseif #buf >= #delim and buf:sub(#buf - #delim + 1) == delim then buf = buf:sub(0, #buf - #delim) break end buf = buf.."\n" prompt = string.rep(' ', #self.prompt) end internal.add_history(buf) if self.history_file then internal.save_history(self.history_file) end return buf end -- -- Print result to stdout -- local function local_print(self, output) if output == nil then self.running = nil return end print(output) end -- -- Read command from connected client console.listen() -- local function client_read(self) local delim = self.delimiter.."\n" local buf = self.client:read(delim) if buf == nil then return nil elseif buf == "" then return nil -- EOF elseif buf == "~.\n" then -- Escape sequence to close current connection (like SSH) return nil end -- remove trailing delimiter return buf:sub(1, -#delim-1) end -- -- Print result to connected client from console.listen() -- local function client_print(self, output) if not self.client then return elseif not output then -- disconnect peer self.client = nil -- socket will be closed by tcp_server() function self.running = nil return end self.client:write(output) end -- -- REPL state -- local repl_mt = { __index = { running = false; delimiter = ""; prompt = "tarantool"; read = local_read; eval = local_eval; print = local_print; completion = internal.completion_handler; ac = true; }; } -- -- REPL = read-eval-print-loop -- local function repl(self) fiber.self().storage.console = self if type(self.on_start) == 'function' then self:on_start() end while self.running do local command = self:read() local output = self:eval(command) self:print(output) end fiber.self().storage.console = nil end local function on_start(foo) if foo == nil or type(foo) == 'function' then repl_mt.__index.on_start = foo return end error('Wrong type of on_start hook: ' .. type(foo)) end local function on_client_disconnect(foo) if foo == nil or type(foo) == 'function' then repl_mt.__index.on_client_disconnect = foo return end error('Wrong type of on_client_disconnect hook: ' .. type(foo)) end -- -- Set delimiter -- local function delimiter(delim) local self = fiber.self().storage.console if self == nil then error("console.delimiter(): need existing console") end if delim == nil then return self.delimiter elseif type(delim) == 'string' then self.delimiter = delim else error('invalid delimiter') end end -- -- -- local function ac(yes_no) local self = fiber.self().storage.console if self == nil then error("console.ac(): need existing console") end self.ac = not not yes_no end -- -- Start REPL on stdin -- local started = false local function start() if started then error("console is already started") end started = true local self = setmetatable({ running = true }, repl_mt) local home_dir = os.getenv('HOME') if home_dir then self.history_file = home_dir .. '/.tarantool_history' internal.load_history(self.history_file) end session_internal.create(-1, "repl") repl(self) started = false end -- -- Connect to remove instance -- local netbox_connect local function connect(uri, opts) if not netbox_connect then -- workaround the broken loader netbox_connect = require('net.box').connect end opts = opts or {} local self = fiber.self().storage.console if self == nil then error("console.connect() need existing console") end local u if uri then u = urilib.parse(tostring(uri)) end if u == nil or u.service == nil then error('Usage: console.connect("[login:password@][host:]port")') end -- connect to remote host local remote remote = netbox_connect(u.host, u.service, { user = u.login, password = u.password, console = true, connect_timeout = opts.timeout }) remote.host, remote.port = u.host or 'localhost', u.service -- run disconnect trigger if connection failed if not remote:is_connected() then pcall(self.on_client_disconnect, self) error('Connection is not established: '..remote.error) end -- check connection && permissions local ok, res = pcall(remote.eval, remote, 'return true') if not ok then remote:close() pcall(self.on_client_disconnect, self) error(res) end -- override methods self.remote = remote self.eval = remote_eval self.prompt = string.format("%s:%s", self.remote.host, self.remote.port) self.completion = function (str, pos1, pos2) local c = string.format( 'return require("console").completion_handler(%q, %d, %d)', str, pos1, pos2) return yaml.decode(remote:eval(c))[1] end log.info("connected to %s:%s", self.remote.host, self.remote.port) return true end local function client_handler(client, peer) session_internal.create(client:fd(), "console") session_internal.run_on_connect() session_internal.run_on_auth(box.session.user(), true) local state = setmetatable({ running = true; read = client_read; print = client_print; client = client; }, repl_mt) local version = _TARANTOOL:match("([^-]+)-") state:print(string.format("%-63s\n%-63s\n", "Tarantool ".. version.." (Lua console)", "type 'help' for interactive help")) repl(state) session_internal.run_on_disconnect() end -- -- Start admin console -- local function listen(uri) local host, port if uri == nil then host = 'unix/' port = '/tmp/tarantool-console.sock' else local u = urilib.parse(tostring(uri)) if u == nil or u.service == nil then error('Usage: console.listen("[host:]port")') end host = u.host port = u.service or 3313 end local s, addr = socket.tcp_server(host, port, { handler = client_handler, name = 'console'}) if not s then error(string.format('failed to create server %s:%s: %s', host, port, errno.strerror())) end return s end package.loaded['console'] = { start = start; eval = eval; delimiter = delimiter; ac = ac; connect = connect; listen = listen; on_start = on_start; on_client_disconnect = on_client_disconnect; completion_handler = internal.completion_handler; } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/index.c0000664000000000000000000002445013306565107017634 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/index.h" #include "lua/utils.h" #include "box/box.h" #include "box/index.h" #include "box/info.h" #include "box/lua/info.h" #include "box/lua/tuple.h" #include "box/lua/misc.h" /* lbox_encode_tuple_on_gc() */ /** {{{ box.index Lua library: access to spaces and indexes */ static int CTID_STRUCT_ITERATOR_REF = 0; static int lbox_insert(lua_State *L) { if (lua_gettop(L) != 2 || !lua_isnumber(L, 1)) return luaL_error(L, "Usage space:insert(tuple)"); uint32_t space_id = lua_tonumber(L, 1); size_t tuple_len; const char *tuple = lbox_encode_tuple_on_gc(L, 2, &tuple_len); struct tuple *result; if (box_insert(space_id, tuple, tuple + tuple_len, &result) != 0) return luaT_error(L); return luaT_pushtupleornil(L, result); } static int lbox_replace(lua_State *L) { if (lua_gettop(L) != 2 || !lua_isnumber(L, 1)) return luaL_error(L, "Usage space:replace(tuple)"); uint32_t space_id = lua_tonumber(L, 1); size_t tuple_len; const char *tuple = lbox_encode_tuple_on_gc(L, 2, &tuple_len); struct tuple *result; if (box_replace(space_id, tuple, tuple + tuple_len, &result) != 0) return luaT_error(L); return luaT_pushtupleornil(L, result); } static int lbox_index_update(lua_State *L) { if (lua_gettop(L) != 4 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2) || (lua_type(L, 3) != LUA_TTABLE && luaT_istuple(L, 3) == NULL) || (lua_type(L, 4) != LUA_TTABLE && luaT_istuple(L, 4) == NULL)) return luaL_error(L, "Usage index:update(key, ops)"); uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); size_t key_len; const char *key = lbox_encode_tuple_on_gc(L, 3, &key_len); size_t ops_len; const char *ops = lbox_encode_tuple_on_gc(L, 4, &ops_len); struct tuple *result; if (box_update(space_id, index_id, key, key + key_len, ops, ops + ops_len, 1, &result) != 0) return luaT_error(L); return luaT_pushtupleornil(L, result); } static int lbox_upsert(lua_State *L) { if (lua_gettop(L) != 3 || !lua_isnumber(L, 1) || (lua_type(L, 2) != LUA_TTABLE && luaT_istuple(L, 2) == NULL) || (lua_type(L, 3) != LUA_TTABLE && luaT_istuple(L, 3) == NULL)) return luaL_error(L, "Usage space:upsert(tuple_key, ops)"); uint32_t space_id = lua_tonumber(L, 1); size_t tuple_len; const char *tuple = lbox_encode_tuple_on_gc(L, 2, &tuple_len); size_t ops_len; const char *ops = lbox_encode_tuple_on_gc(L, 3, &ops_len); struct tuple *result; if (box_upsert(space_id, 0, tuple, tuple + tuple_len, ops, ops + ops_len, 1, &result) != 0) return luaT_error(L); return luaT_pushtupleornil(L, result); } static int lbox_index_delete(lua_State *L) { if (lua_gettop(L) != 3 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2) || (lua_type(L, 3) != LUA_TTABLE && luaT_istuple(L, 3) == NULL)) return luaL_error(L, "Usage space:delete(key)"); uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); size_t key_len; const char *key = lbox_encode_tuple_on_gc(L, 3, &key_len); struct tuple *result; if (box_delete(space_id, index_id, key, key + key_len, &result) != 0) return luaT_error(L); return luaT_pushtupleornil(L, result); } static int lbox_index_random(lua_State *L) { if (lua_gettop(L) != 3 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2) || !lua_isnumber(L, 3)) return luaL_error(L, "Usage index.random(space_id, index_id, rnd)"); uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); uint32_t rnd = lua_tonumber(L, 3); struct tuple *tuple; if (box_index_random(space_id, index_id, rnd, &tuple) != 0) return luaT_error(L); return luaT_pushtupleornil(L, tuple); } static int lbox_index_get(lua_State *L) { if (lua_gettop(L) != 3 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2)) return luaL_error(L, "Usage index.get(space_id, index_id, key)"); uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); size_t key_len; const char *key = lbox_encode_tuple_on_gc(L, 3, &key_len); struct tuple *tuple; if (box_index_get(space_id, index_id, key, key + key_len, &tuple) != 0) return luaT_error(L); return luaT_pushtupleornil(L, tuple); } static int lbox_index_min(lua_State *L) { if (lua_gettop(L) != 3 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2)) return luaL_error(L, "usage index.min(space_id, index_id, key)"); uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); size_t key_len; const char *key = lbox_encode_tuple_on_gc(L, 3, &key_len); struct tuple *tuple; if (box_index_min(space_id, index_id, key, key + key_len, &tuple) != 0) return luaT_error(L); return luaT_pushtupleornil(L, tuple); } static int lbox_index_max(lua_State *L) { if (lua_gettop(L) != 3 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2)) return luaL_error(L, "usage index.max(space_id, index_id, key)"); uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); size_t key_len; const char *key = lbox_encode_tuple_on_gc(L, 3, &key_len); struct tuple *tuple; if (box_index_max(space_id, index_id, key, key + key_len, &tuple) != 0) return luaT_error(L); return luaT_pushtupleornil(L, tuple); } static int lbox_index_count(lua_State *L) { if (lua_gettop(L) != 4 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2) || !lua_isnumber(L, 3)) { return luaL_error(L, "usage index.count(space_id, index_id, " "iterator, key)"); } uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); uint32_t iterator = lua_tonumber(L, 3); size_t key_len; const char *key = lbox_encode_tuple_on_gc(L, 4, &key_len); ssize_t count = box_index_count(space_id, index_id, iterator, key, key + key_len); if (count == -1) return luaT_error(L); lua_pushinteger(L, count); return 1; } static void box_index_init_iterator_types(struct lua_State *L, int idx) { for (int i = 0; i < iterator_type_MAX; i++) { lua_pushnumber(L, i); lua_setfield(L, idx, iterator_type_strs[i]); } } /* }}} */ /* {{{ box.index.iterator Lua library: index iterators */ static int lbox_index_iterator(lua_State *L) { if (lua_gettop(L) != 4 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2) || !lua_isnumber(L, 3)) return luaL_error(L, "usage index.iterator(space_id, index_id, type, key)"); uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); uint32_t iterator = lua_tonumber(L, 3); size_t mpkey_len; const char *mpkey = lua_tolstring(L, 4, &mpkey_len); /* Key encoded by Lua */ /* const char *key = lbox_encode_tuple_on_gc(L, 4, key_len); */ struct iterator *it = box_index_iterator(space_id, index_id, iterator, mpkey, mpkey + mpkey_len); if (it == NULL) return luaT_error(L); assert(CTID_STRUCT_ITERATOR_REF != 0); struct iterator **ptr = (struct iterator **) luaL_pushcdata(L, CTID_STRUCT_ITERATOR_REF); *ptr = it; /* NULL handled by Lua, gc also set by Lua */ return 1; } static int lbox_iterator_next(lua_State *L) { /* first argument is key buffer */ if (lua_gettop(L) < 1 || lua_type(L, 1) != LUA_TCDATA) return luaL_error(L, "usage: next(state)"); assert(CTID_STRUCT_ITERATOR_REF != 0); uint32_t ctypeid; void *data = luaL_checkcdata(L, 1, &ctypeid); if (ctypeid != (uint32_t) CTID_STRUCT_ITERATOR_REF) return luaL_error(L, "usage: next(state)"); struct iterator *itr = *(struct iterator **) data; struct tuple *tuple; if (box_iterator_next(itr, &tuple) != 0) return luaT_error(L); return luaT_pushtupleornil(L, tuple); } /** Truncate a given space */ static int lbox_truncate(struct lua_State *L) { uint32_t space_id = luaL_checkinteger(L, 1); if (box_truncate(space_id) != 0) return luaT_error(L); return 0; } /* }}} */ /* {{{ Introspection */ static int lbox_index_info(lua_State *L) { if (lua_gettop(L) != 2 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2)) return luaL_error(L, "usage index.info(space_id, index_id)"); uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); struct info_handler info; luaT_info_handler_create(&info, L); if (box_index_info(space_id, index_id, &info) != 0) return luaT_error(L); return 1; } /* }}} */ void box_lua_index_init(struct lua_State *L) { /* Get CTypeIDs */ int rc = luaL_cdef(L, "struct iterator;"); assert(rc == 0); (void) rc; CTID_STRUCT_ITERATOR_REF = luaL_ctypeid(L, "struct iterator&"); assert(CTID_STRUCT_ITERATOR_REF != 0); static const struct luaL_Reg indexlib [] = { {NULL, NULL} }; /* box.index */ luaL_register_module(L, "box.index", indexlib); box_index_init_iterator_types(L, -2); lua_pop(L, 1); static const struct luaL_Reg boxlib_internal[] = { {"insert", lbox_insert}, {"replace", lbox_replace}, {"update", lbox_index_update}, {"upsert", lbox_upsert}, {"delete", lbox_index_delete}, {"random", lbox_index_random}, {"get", lbox_index_get}, {"min", lbox_index_min}, {"max", lbox_index_max}, {"count", lbox_index_count}, {"iterator", lbox_index_iterator}, {"iterator_next", lbox_iterator_next}, {"truncate", lbox_truncate}, {"info", lbox_index_info}, {NULL, NULL} }; luaL_register(L, "box.internal", boxlib_internal); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/schema.lua0000664000000000000000000021741013306565107020324 0ustar rootroot-- schema.lua (internal file) -- local ffi = require('ffi') local msgpack = require('msgpack') local msgpackffi = require('msgpackffi') local fun = require('fun') local log = require('log') local session = box.session local internal = require('box.internal') local function setmap(table) return setmetatable(table, { __serialize = 'map' }) end local builtin = ffi.C -- performance fixup for hot functions local tuple_encode = box.tuple.encode local tuple_bless = box.tuple.bless local is_tuple = box.tuple.is assert(tuple_encode ~= nil and tuple_bless ~= nil and is_tuple ~= nil) local INT64_MIN = tonumber64('-9223372036854775808') local INT64_MAX = tonumber64('9223372036854775807') ffi.cdef[[ struct space *space_by_id(uint32_t id); extern uint32_t box_schema_version(); void space_run_triggers(struct space *space, bool yesno); size_t space_bsize(struct space *space); typedef struct tuple box_tuple_t; typedef struct iterator box_iterator_t; /** \cond public */ box_iterator_t * box_index_iterator(uint32_t space_id, uint32_t index_id, int type, const char *key, const char *key_end); int box_iterator_next(box_iterator_t *itr, box_tuple_t **result); void box_iterator_free(box_iterator_t *itr); /** \endcond public */ /** \cond public */ ssize_t box_index_len(uint32_t space_id, uint32_t index_id); ssize_t box_index_bsize(uint32_t space_id, uint32_t index_id); int box_index_random(uint32_t space_id, uint32_t index_id, uint32_t rnd, box_tuple_t **result); int box_index_get(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result); int box_index_min(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result); int box_index_max(uint32_t space_id, uint32_t index_id, const char *key, const char *key_end, box_tuple_t **result); ssize_t box_index_count(uint32_t space_id, uint32_t index_id, int type, const char *key, const char *key_end); /** \endcond public */ /** \cond public */ int64_t box_txn_id(); int box_txn_begin(); /** \endcond public */ typedef struct txn_savepoint box_txn_savepoint_t; box_txn_savepoint_t * box_txn_savepoint(); int box_txn_rollback_to_savepoint(box_txn_savepoint_t *savepoint); struct port_tuple_entry { struct port_tuple_entry *next; struct tuple *tuple; }; struct port_tuple { const struct port_vtab *vtab; size_t size; struct port_tuple_entry *first; struct port_tuple_entry *last; struct port_tuple_entry first_entry; }; void port_destroy(struct port *port); int box_select(uint32_t space_id, uint32_t index_id, int iterator, uint32_t offset, uint32_t limit, const char *key, const char *key_end, struct port *port); void password_prepare(const char *password, int len, char *out, int out_len); ]] local function user_or_role_resolve(user) local _user = box.space[box.schema.USER_ID] local tuple if type(user) == 'string' then tuple = _user.index.name:get{user} else tuple = _user:get{user} end if tuple == nil then return nil end return tuple[1] end local function role_resolve(name_or_id) local _user = box.space[box.schema.USER_ID] local tuple if type(name_or_id) == 'string' then tuple = _user.index.name:get{name_or_id} elseif type(name_or_id) ~= 'nil' then tuple = _user:get{name_or_id} end if tuple == nil or tuple[4] ~= 'role' then return nil else return tuple[1] end end local function user_resolve(name_or_id) local _user = box.space[box.schema.USER_ID] local tuple if type(name_or_id) == 'string' then tuple = _user.index.name:get{name_or_id} elseif type(name_or_id) ~= 'nil' then tuple = _user:get{name_or_id} end if tuple == nil or tuple[4] ~= 'user' then return nil else return tuple[1] end end local function sequence_resolve(name_or_id) local _sequence = box.space[box.schema.SEQUENCE_ID] local tuple if type(name_or_id) == 'string' then tuple = _sequence.index.name:get{name_or_id} elseif type(name_or_id) ~= 'nil' then tuple = _sequence:get{name_or_id} end if tuple ~= nil then return tuple[1], tuple else return nil end end -- Revoke all privileges associated with the given object. local function revoke_object_privs(object_type, object_id) local _priv = box.space[box.schema.PRIV_ID] local privs = _priv.index.object:select{object_type, object_id} for k, tuple in pairs(privs) do local uid = tuple[2] _priv:delete{uid, object_type, object_id} end end -- Same as type(), but returns 'number' if 'param' is -- of type 'cdata' and represents a 64-bit integer. local function param_type(param) local t = type(param) if t == 'cdata' and tonumber64(param) ~= nil then t = 'number' end return t end --[[ @brief Common function to check table with parameters (like options) @param table - table with parameters @param template - table with expected types of expected parameters type could be comma separated string with lua types (number, string etc), or 'any' if any type is allowed The function checks following: 1)that parameters table is a table (or nil) 2)all keys in parameters are present in template 3)type of every parameter fits (one of) types described in template Check (2) and (3) could be disabled by adding {, dont_check = , } into parameters table The functions calls box.error(box.error.ILLEGAL_PARAMS, ..) on error @example check_param_table(options, { user = 'string', port = 'string, number', data = 'any'} ) --]] local function check_param_table(table, template) if table == nil then return end if type(table) ~= 'table' then box.error(box.error.ILLEGAL_PARAMS, "options should be a table") end -- just pass {.. dont_check = true, ..} to disable checks below if table.dont_check then return end for k,v in pairs(table) do if template[k] == nil then box.error(box.error.ILLEGAL_PARAMS, "unexpected option '" .. k .. "'") elseif template[k] == 'any' then -- any type is ok elseif (string.find(template[k], ',') == nil) then -- one type if param_type(v) ~= template[k] then box.error(box.error.ILLEGAL_PARAMS, "options parameter '" .. k .. "' should be of type " .. template[k]) end else local good_types = string.gsub(template[k], ' ', '') local haystack = ',' .. good_types .. ',' local needle = ',' .. param_type(v) .. ',' if (string.find(haystack, needle) == nil) then good_types = string.gsub(good_types, ',', ', ') box.error(box.error.ILLEGAL_PARAMS, "options parameter '" .. k .. "' should be one of types: " .. template[k]) end end end end --[[ @brief Common function to check type parameter (of function) Calls box.error(box.error.ILLEGAL_PARAMS, ) on error @example: check_param(user, 'user', 'string') --]] local function check_param(param, name, should_be_type) if param_type(param) ~= should_be_type then box.error(box.error.ILLEGAL_PARAMS, name .. " should be a " .. should_be_type) end end --[[ Adds to a table key-value pairs from defaults table that is not present in original table. Returns updated table. If nil is passed instead of table, it's treated as empty table {} For example update_param_table({ type = 'hash', temporary = true }, { type = 'tree', unique = true }) will return table { type = 'hash', temporary = true, unique = true } --]] local function update_param_table(table, defaults) local new_table = {} if defaults ~= nil then for k,v in pairs(defaults) do new_table[k] = v end end if table ~= nil then for k,v in pairs(table) do new_table[k] = v end end return new_table end box.begin = function() if builtin.box_txn_begin() == -1 then box.error() end end box.savepoint = function() local csavepoint = builtin.box_txn_savepoint() if csavepoint == nil then box.error() end return { csavepoint=csavepoint, txn_id=builtin.box_txn_id() } end local savepoint_type = ffi.typeof('box_txn_savepoint_t') local function check_savepoint(savepoint) if savepoint == nil or savepoint.txn_id == nil or savepoint.csavepoint == nil or type(tonumber(savepoint.txn_id)) ~= 'number' or type(savepoint.csavepoint) ~= 'cdata' or not ffi.istype(savepoint_type, savepoint.csavepoint) then error("Usage: box.rollback_to_savepoint(savepoint)") end end box.rollback_to_savepoint = function(savepoint) check_savepoint(savepoint) if savepoint.txn_id ~= builtin.box_txn_id() then box.error(box.error.NO_SUCH_SAVEPOINT) end if builtin.box_txn_rollback_to_savepoint(savepoint.csavepoint) == -1 then box.error() end end local function atomic_tail(status, ...) if not status then box.rollback() error((...), 2) end box.commit() return ... end box.atomic = function(fun, ...) box.begin() return atomic_tail(pcall(fun, ...)) end -- box.commit yields, so it's defined as Lua/C binding -- box.rollback yields as well function update_format(format) local result = {} for i, given in ipairs(format) do local field = {} if type(given) ~= "table" then field.name = given else for k, v in pairs(given) do if k == 1 then if given.name then if not given.type then field.type = v else field[1] = v end else field.name = v end elseif k == 2 and not given.type and not given.name then field.type = v else field[k] = v end end end if type(field.name) ~= 'string' then box.error(box.error.ILLEGAL_PARAMS, "format[" .. i .. "]: name (string) is expected") end if field.type == nil then field.type = 'any' elseif type(field.type) ~= 'string' then box.error(box.error.ILLEGAL_PARAMS, "format[" .. i .. "]: type must be a string") end table.insert(result, field) end return result end box.schema.space = {} box.schema.space.create = function(name, options) check_param(name, 'name', 'string') local options_template = { if_not_exists = 'boolean', engine = 'string', id = 'number', field_count = 'number', user = 'string, number', format = 'table', temporary = 'boolean', } local options_defaults = { engine = 'memtx', field_count = 0, temporary = false, } check_param_table(options, options_template) options = update_param_table(options, options_defaults) local _space = box.space[box.schema.SPACE_ID] if box.space[name] then if options.if_not_exists then return box.space[name], "not created" else box.error(box.error.SPACE_EXISTS, name) end end local id = options.id if not id then local _schema = box.space._schema local max_id = _schema:update({'max_id'}, {{'+', 2, 1}}) if max_id == nil then id = _space.index.primary:max()[1] if id < box.schema.SYSTEM_ID_MAX then id = box.schema.SYSTEM_ID_MAX end max_id = _schema:insert{'max_id', id + 1} end id = max_id[2] end local uid = session.euid() if options.user then uid = user_or_role_resolve(options.user) if uid == nil then box.error(box.error.NO_SUCH_USER, options.user) end end local format = options.format and options.format or {} check_param(format, 'format', 'table') format = update_format(format) -- filter out global parameters from the options array local space_options = setmap({ temporary = options.temporary and true or nil, }) _space:insert{id, uid, name, options.engine, options.field_count, space_options, format} return box.space[id], "created" end -- space format - the metadata about space fields function box.schema.space.format(id, format) local _space = box.space._space check_param(id, 'id', 'number') if format == nil then return _space:get(id)[7] else check_param(format, 'format', 'table') format = update_format(format) _space:update(id, {{'=', 7, format}}) end end box.schema.create_space = box.schema.space.create box.schema.space.drop = function(space_id, space_name, opts) check_param(space_id, 'space_id', 'number') opts = opts or {} check_param_table(opts, { if_exists = 'boolean' }) local _space = box.space[box.schema.SPACE_ID] local _index = box.space[box.schema.INDEX_ID] local _truncate = box.space[box.schema.TRUNCATE_ID] local _space_sequence = box.space[box.schema.SPACE_SEQUENCE_ID] local sequence_tuple = _space_sequence:delete{space_id} if sequence_tuple ~= nil and sequence_tuple[3] == true then -- Delete automatically generated sequence. box.schema.sequence.drop(sequence_tuple[2]) end local keys = _index:select(space_id) for i = #keys, 1, -1 do local v = keys[i] _index:delete{v[1], v[2]} end revoke_object_privs('space', space_id) _truncate:delete{space_id} if _space:delete{space_id} == nil then if space_name == nil then space_name = '#'..tostring(space_id) end if not opts.if_exists then box.error(box.error.NO_SUCH_SPACE, space_name) end end end box.schema.space.rename = function(space_id, space_name) check_param(space_id, 'space_id', 'number') check_param(space_name, 'space_name', 'string') local _space = box.space[box.schema.SPACE_ID] _space:update(space_id, {{"=", 3, space_name}}) end box.schema.index = {} local function update_index_parts_1_6_0(parts) local result = {} if #parts % 2 ~= 0 then box.error(box.error.ILLEGAL_PARAMS, "options.parts: expected field_no (number), type (string) pairs") end for i=1,#parts,2 do if type(parts[i]) ~= "number" then box.error(box.error.ILLEGAL_PARAMS, "options.parts: expected field_no (number), type (string) pairs") elseif parts[i] == 0 then -- Lua uses one-based field numbers but _space is zero-based box.error(box.error.ILLEGAL_PARAMS, "invalid index parts: field_no must be one-based") end if type(parts[i + 1]) ~= "string" then box.error(box.error.ILLEGAL_PARAMS, "options.parts: expected field_no (number), type (string) pairs") end table.insert(result, {field = parts[i] - 1, type = parts[i + 1]}) end return result end local function update_index_parts(format, parts) if type(parts) ~= "table" then box.error(box.error.ILLEGAL_PARAMS, "options.parts parameter should be a table") end if #parts == 0 then box.error(box.error.ILLEGAL_PARAMS, "options.parts must have at least one part") end if type(parts[1]) == 'number' and type(parts[2]) == 'string' then return update_index_parts_1_6_0(parts), true end local parts_can_be_simplified = true local result = {} for i=1,#parts do local part = {} if type(parts[i]) ~= "table" then part.field = parts[i] else for k, v in pairs(parts[i]) do -- Support {1, 'unsigned', collation='xx'} shortcut if k == 1 or k == 'field' then part.field = v; elseif k == 2 or k == 'type' then part.type = v; elseif k == 'collation' then -- find ID by name local coll = box.space._collation.index.name:get{v} if not coll then coll = box.space._collation.index.name:get{v:lower()} end if not coll then box.error(box.error.ILLEGAL_PARAMS, "options.parts[" .. i .. "]: collation was not found by name '" .. v .. "'") end part[k] = coll[1] parts_can_be_simplified = false elseif k == 'is_nullable' then part[k] = v parts_can_be_simplified = false else part[k] = v parts_can_be_simplified = false end end end if type(part.field) ~= 'number' and type(part.field) ~= 'string' then box.error(box.error.ILLEGAL_PARAMS, "options.parts[" .. i .. "]: field (name or number) is expected") elseif type(part.field) == 'string' then for k,v in pairs(format) do if v.name == part.field then part.field = k break end end if type(part.field) == 'string' then box.error(box.error.ILLEGAL_PARAMS, "options.parts[" .. i .. "]: field was not found by name '" .. part.field .. "'") end elseif part.field == 0 then box.error(box.error.ILLEGAL_PARAMS, "options.parts[" .. i .. "]: field (number) must be one-based") end local fmt = format[part.field] if part.type == nil then if fmt and fmt.type then part.type = fmt.type else part.type = 'scalar' end elseif type(part.type) ~= 'string' then box.error(box.error.ILLEGAL_PARAMS, "options.parts[" .. i .. "]: type (string) is expected") end if part.is_nullable == nil then if fmt and fmt.is_nullable then part.is_nullable = true parts_can_be_simplified = false end elseif type(part.is_nullable) ~= 'boolean' then box.error(box.error.ILLEGAL_PARAMS, "options.parts[" .. i .. "]: type (boolean) is expected") end part.field = part.field - 1 table.insert(result, part) end return result, parts_can_be_simplified end -- -- Convert index parts into 1.6.6 format if they -- doesn't use collation and is_nullable options -- local function simplify_index_parts(parts) local new_parts = {} for i, part in pairs(parts) do assert(part.collation == nil and part.is_nullable == nil, "part is simple") new_parts[i] = {part.field, part.type} end return new_parts end -- Historically, some properties of an index -- are stored as tuple fields, others in a -- single field containing msgpack map. -- This is the map. local index_options = { unique = 'boolean', dimension = 'number', distance = 'string', run_count_per_level = 'number', run_size_ratio = 'number', range_size = 'number', page_size = 'number', bloom_fpr = 'number', } -- -- check_param_table() template for alter index, -- includes all index options. -- local alter_index_template = { id = 'number', name = 'string', type = 'string', parts = 'table', sequence = 'boolean, number, string', } for k, v in pairs(index_options) do alter_index_template[k] = v end -- -- check_param_table() template for create_index(), includes -- all index options and if_not_exists specifier -- local create_index_template = table.deepcopy(alter_index_template) create_index_template.if_not_exists = "boolean" box.schema.index.create = function(space_id, name, options) check_param(space_id, 'space_id', 'number') check_param(name, 'name', 'string') check_param_table(options, create_index_template) local space = box.space[space_id] if not space then box.error(box.error.NO_SUCH_SPACE, '#'..tostring(space_id)) end local format = space:format() local options_defaults = { type = 'tree', } options = update_param_table(options, options_defaults) local type_dependent_defaults = { rtree = {parts = { 2, 'array' }, unique = false}, bitset = {parts = { 2, 'unsigned' }, unique = false}, other = {parts = { 1, 'unsigned' }, unique = true}, } options_defaults = type_dependent_defaults[options.type] or type_dependent_defaults.other if not options.parts then local fieldno = options_defaults.parts[1] if #format >= fieldno then local t = format[fieldno].type if t ~= 'any' then options.parts = {{fieldno, format[fieldno].type}} end end end options = update_param_table(options, options_defaults) if space.engine == 'vinyl' then options_defaults = { page_size = box.cfg.vinyl_page_size, range_size = box.cfg.vinyl_range_size, run_count_per_level = box.cfg.vinyl_run_count_per_level, run_size_ratio = box.cfg.vinyl_run_size_ratio, bloom_fpr = box.cfg.vinyl_bloom_fpr } else options_defaults = {} end options = update_param_table(options, options_defaults) local _index = box.space[box.schema.INDEX_ID] if _index.index.name:get{space_id, name} then if options.if_not_exists then return space.index[name], "not created" else box.error(box.error.INDEX_EXISTS, name) end end local iid = 0 if options.id then iid = options.id else -- max local tuple = _index.index[0] :select(space_id, { limit = 1, iterator = 'LE' })[1] if tuple then local id = tuple[1] if id == space_id then iid = tuple[2] + 1 end end end local parts, parts_can_be_simplified = update_index_parts(format, options.parts) -- create_index() options contains type, parts, etc, -- stored separately. Remove these members from index_opts local index_opts = { dimension = options.dimension, unique = options.unique, distance = options.distance, page_size = options.page_size, range_size = options.range_size, run_count_per_level = options.run_count_per_level, run_size_ratio = options.run_size_ratio, bloom_fpr = options.bloom_fpr, } local field_type_aliases = { num = 'unsigned'; -- Deprecated since 1.7.2 uint = 'unsigned'; str = 'string'; int = 'integer'; ['*'] = 'any'; }; for _, part in pairs(parts) do local field_type = part.type:lower() part.type = field_type_aliases[field_type] or field_type if field_type == 'num' then log.warn("field type '%s' is deprecated since Tarantool 1.7, ".. "please use '%s' instead", field_type, part.type) end end local _space_sequence = box.space[box.schema.SPACE_SEQUENCE_ID] local sequence_is_generated = false local sequence = options.sequence or nil -- ignore sequence = false if sequence ~= nil then if iid ~= 0 then box.error(box.error.MODIFY_INDEX, name, space.name, "sequence cannot be used with a secondary key") end if #parts >= 1 and parts[1].type ~= 'integer' and parts[1].type ~= 'unsigned' then box.error(box.error.MODIFY_INDEX, name, space.name, "sequence cannot be used with a non-integer key") end if sequence == true then sequence = box.schema.sequence.create(space.name .. '_seq') sequence = sequence.id sequence_is_generated = true else sequence = sequence_resolve(sequence) if sequence == nil then box.error(box.error.NO_SUCH_SEQUENCE, options.sequence) end end end -- save parts in old format if possible if parts_can_be_simplified then parts = simplify_index_parts(parts) end _index:insert{space_id, iid, name, options.type, index_opts, parts} if sequence ~= nil then _space_sequence:insert{space_id, sequence, sequence_is_generated} end return space.index[name] end box.schema.index.drop = function(space_id, index_id) check_param(space_id, 'space_id', 'number') check_param(index_id, 'index_id', 'number') if index_id == 0 then local _space_sequence = box.space[box.schema.SPACE_SEQUENCE_ID] local sequence_tuple = _space_sequence:delete{space_id} if sequence_tuple ~= nil and sequence_tuple[3] == true then -- Delete automatically generated sequence. box.schema.sequence.drop(sequence_tuple[2]) end end local _index = box.space[box.schema.INDEX_ID] _index:delete{space_id, index_id} end box.schema.index.rename = function(space_id, index_id, name) check_param(space_id, 'space_id', 'number') check_param(index_id, 'index_id', 'number') check_param(name, 'name', 'string') local _index = box.space[box.schema.INDEX_ID] _index:update({space_id, index_id}, {{"=", 3, name}}) end box.schema.index.alter = function(space_id, index_id, options) local space = box.space[space_id] if space == nil then box.error(box.error.NO_SUCH_SPACE, '#'..tostring(space_id)) end if space.index[index_id] == nil then box.error(box.error.NO_SUCH_INDEX, index_id, space.name) end if options == nil then return end check_param_table(options, alter_index_template) if type(space_id) ~= "number" then space_id = space.id end if type(index_id) ~= "number" then index_id = space.index[index_id].id end local format = space:format() local _index = box.space[box.schema.INDEX_ID] if options.id ~= nil then local can_update_field = {id = true, name = true, type = true } local can_update = true local cant_update_fields = '' for k,v in pairs(options) do if not can_update_field[k] then can_update = false cant_update_fields = cant_update_fields .. ' ' .. k end end if not can_update then box.error(box.error.PROC_LUA, "Don't know how to update both id and" .. cant_update_fields) end local ops = {} local function add_op(value, field_no) if value then table.insert(ops, {'=', field_no, value}) end end add_op(options.id, 2) add_op(options.name, 3) add_op(options.type, 4) _index:update({space_id, index_id}, ops) return end local tuple = _index:get{space_id, index_id } local parts = {} local index_opts = {} local OPTS = 5 local PARTS = 6 if type(tuple[OPTS]) == 'number' then -- old format index_opts.unique = tuple[OPTS] == 1 local part_count = tuple[PARTS] for i = 1, part_count do table.insert(parts, {tuple[2 * i + 4], tuple[2 * i + 5]}); end else -- new format index_opts = tuple[OPTS] parts = tuple[PARTS] end if options.name == nil then options.name = tuple[3] end if options.type == nil then options.type = tuple[4] end for k, t in pairs(index_options) do if options[k] ~= nil then index_opts[k] = options[k] end end if options.parts then local parts_can_be_simplified parts, parts_can_be_simplified = update_index_parts(format, options.parts) -- save parts in old format if possible if parts_can_be_simplified then parts = simplify_index_parts(parts) end end local _space_sequence = box.space[box.schema.SPACE_SEQUENCE_ID] local sequence_is_generated = false local sequence = options.sequence local sequence_tuple if index_id ~= 0 then if sequence then box.error(box.error.MODIFY_INDEX, options.name, space.name, "sequence cannot be used with a secondary key") end -- ignore 'sequence = false' for secondary indexes sequence = nil else sequence_tuple = _space_sequence:get(space_id) if (sequence or (sequence ~= false and sequence_tuple ~= nil)) and #parts >= 1 and (parts[1].type or parts[1][2]) ~= 'integer' and (parts[1].type or parts[1][2]) ~= 'unsigned' then box.error(box.error.MODIFY_INDEX, options.name, space.name, "sequence cannot be used with a non-integer key") end end if sequence == true then if sequence_tuple == nil or sequence_tuple[3] == false then sequence = box.schema.sequence.create(space.name .. '_seq') sequence = sequence.id sequence_is_generated = true else -- Space already has an automatically generated sequence. sequence = nil end elseif sequence then sequence = sequence_resolve(sequence) if sequence == nil then box.error(box.error.NO_SUCH_SEQUENCE, options.sequence) end end if sequence == false then _space_sequence:delete(space_id) end _index:replace{space_id, index_id, options.name, options.type, index_opts, parts} if sequence then _space_sequence:replace{space_id, sequence, sequence_is_generated} end if sequence_tuple ~= nil and sequence_tuple[3] == true and sequence_tuple[2] ~= sequence then -- Delete automatically generated sequence. box.schema.sequence.drop(sequence_tuple[2]) end end -- a static box_tuple_t ** instance for calling box_index_* API local ptuple = ffi.new('box_tuple_t *[1]') local function keify(key) if key == nil then return {} elseif type(key) == "table" or is_tuple(key) then return key end return {key} end local iterator_t = ffi.typeof('struct iterator') ffi.metatype(iterator_t, { __tostring = function(iterator) return "" end; }) local iterator_gen = function(param, state) --[[ index:pairs() mostly conforms to the Lua for-in loop conventions and tries to follow the best practices of Lua community. - this generating function is stateless. - *param* should contain **immutable** data needed to fully define an iterator. *param* is opaque for users. Currently it contains keybuf string just to prevent GC from collecting it. In future some other variables like space_id, index_id, sc_version will be stored here. - *state* should contain **immutable** transient state of an iterator. *state* is opaque for users. Currently it contains `struct iterator` cdata that is modified during iteration. This is a sad limitation of underlying C API. Moreover, the separation of *param* and *state* is not properly implemented here. These drawbacks can be fixed in future without changing this API. Please check out http://www.lua.org/pil/7.3.html for details. --]] if not ffi.istype(iterator_t, state) then error('usage: next(param, state)') end -- next() modifies state in-place if builtin.box_iterator_next(state, ptuple) ~= 0 then return box.error() -- error elseif ptuple[0] ~= nil then return state, tuple_bless(ptuple[0]) -- new state, value else return nil end end local iterator_gen_luac = function(param, state) local tuple = internal.iterator_next(state) if tuple ~= nil then return state, tuple -- new state, value else return nil end end -- global struct port instance to use by select()/get() local port_tuple = ffi.new('struct port_tuple') local port_tuple_entry_t = ffi.typeof('struct port_tuple_entry') -- Helper function to check space:method() usage local function check_space_arg(space, method) if type(space) ~= 'table' or space.id == nil then local fmt = 'Use space:%s(...) instead of space.%s(...)' error(string.format(fmt, method, method)) end end box.internal.check_space_arg = check_space_arg -- for net.box -- Helper function for nicer error messages -- in some cases when space object is misused -- Takes time so should not be used for DML. local function check_space_exists(space) local s = box.space[space.id] if s == nil then box.error(box.error.NO_SUCH_SPACE, space.name) end end -- Helper function to check index:method() usage local function check_index_arg(index, method) if type(index) ~= 'table' or index.id == nil then local fmt = 'Use index:%s(...) instead of index.%s(...)' error(string.format(fmt, method, method)) end end box.internal.check_index_arg = check_index_arg -- for net.box -- Helper function to check that space have primary key and return it local function check_primary_index(space) local pk = space.index[0] if pk == nil then box.error(box.error.NO_SUCH_INDEX, 0, space.name) end return pk end box.internal.check_primary_index = check_primary_index -- for net.box box.internal.schema_version = builtin.box_schema_version local function check_iterator_type(opts, key_is_nil) local itype if opts and opts.iterator then if type(opts.iterator) == "number" then itype = opts.iterator elseif type(opts.iterator) == "string" then itype = box.index[string.upper(opts.iterator)] if itype == nil then box.error(box.error.ITERATOR_TYPE, opts.iterator) end else box.error(box.error.ITERATOR_TYPE, tostring(opts.iterator)) end elseif opts and type(opts) == "string" then itype = box.index[string.upper(opts)] if itype == nil then box.error(box.error.ITERATOR_TYPE, opts) end else -- Use ALL for {} and nil keys and EQ for other keys itype = key_is_nil and box.index.ALL or box.index.EQ end return itype end internal.check_iterator_type = check_iterator_type -- export for net.box function box.schema.space.bless(space) local index_mt = {} -- __len and __index index_mt.len = function(index) check_index_arg(index, 'len') local ret = builtin.box_index_len(index.space_id, index.id) if ret == -1 then box.error() end return tonumber(ret) end -- index.bsize index_mt.bsize = function(index) check_index_arg(index, 'bsize') local ret = builtin.box_index_bsize(index.space_id, index.id) if ret == -1 then box.error() end return tonumber(ret) end index_mt.__len = index_mt.len -- Lua 5.2 compatibility index_mt.__newindex = function(table, index) return error('Attempt to modify a read-only table') end index_mt.__index = index_mt -- min and max index_mt.min_ffi = function(index, key) check_index_arg(index, 'min') local pkey, pkey_end = tuple_encode(key) if builtin.box_index_min(index.space_id, index.id, pkey, pkey_end, ptuple) ~= 0 then box.error() -- error elseif ptuple[0] ~= nil then return tuple_bless(ptuple[0]) else return end end index_mt.min_luac = function(index, key) check_index_arg(index, 'min') key = keify(key) return internal.min(index.space_id, index.id, key); end index_mt.max_ffi = function(index, key) check_index_arg(index, 'max') local pkey, pkey_end = tuple_encode(key) if builtin.box_index_max(index.space_id, index.id, pkey, pkey_end, ptuple) ~= 0 then box.error() -- error elseif ptuple[0] ~= nil then return tuple_bless(ptuple[0]) else return end end index_mt.max_luac = function(index, key) check_index_arg(index, 'max') key = keify(key) return internal.max(index.space_id, index.id, key); end index_mt.random_ffi = function(index, rnd) check_index_arg(index, 'random') rnd = rnd or math.random() if builtin.box_index_random(index.space_id, index.id, rnd, ptuple) ~= 0 then box.error() -- error elseif ptuple[0] ~= nil then return tuple_bless(ptuple[0]) else return end end index_mt.random_luac = function(index, rnd) check_index_arg(index, 'random') rnd = rnd or math.random() return internal.random(index.space_id, index.id, rnd); end -- iteration index_mt.pairs_ffi = function(index, key, opts) check_index_arg(index, 'pairs') local pkey, pkey_end = tuple_encode(key) local itype = check_iterator_type(opts, pkey + 1 >= pkey_end); local keybuf = ffi.string(pkey, pkey_end - pkey) local pkeybuf = ffi.cast('const char *', keybuf) local cdata = builtin.box_index_iterator(index.space_id, index.id, itype, pkeybuf, pkeybuf + #keybuf); if cdata == nil then box.error() end return fun.wrap(iterator_gen, keybuf, ffi.gc(cdata, builtin.box_iterator_free)) end index_mt.pairs_luac = function(index, key, opts) check_index_arg(index, 'pairs') key = keify(key) local itype = check_iterator_type(opts, #key == 0); local keymp = msgpack.encode(key) local keybuf = ffi.string(keymp, #keymp) local cdata = internal.iterator(index.space_id, index.id, itype, keymp); return fun.wrap(iterator_gen_luac, keybuf, ffi.gc(cdata, builtin.box_iterator_free)) end -- index subtree size index_mt.count_ffi = function(index, key, opts) check_index_arg(index, 'count') local pkey, pkey_end = tuple_encode(key) local itype = check_iterator_type(opts, pkey + 1 >= pkey_end); local count = builtin.box_index_count(index.space_id, index.id, itype, pkey, pkey_end); if count == -1 then box.error() end return tonumber(count) end index_mt.count_luac = function(index, key, opts) check_index_arg(index, 'count') key = keify(key) local itype = check_iterator_type(opts, #key == 0); return internal.count(index.space_id, index.id, itype, key); end index_mt.get_ffi = function(index, key) check_index_arg(index, 'get') local key, key_end = tuple_encode(key) if builtin.box_index_get(index.space_id, index.id, key, key_end, ptuple) ~= 0 then return box.error() -- error elseif ptuple[0] ~= nil then return tuple_bless(ptuple[0]) else return end end index_mt.get_luac = function(index, key) check_index_arg(index, 'get') key = keify(key) return internal.get(index.space_id, index.id, key) end local function check_select_opts(opts, key_is_nil) local offset = 0 local limit = 4294967295 local iterator = check_iterator_type(opts, key_is_nil) if opts ~= nil then if opts.offset ~= nil then offset = opts.offset end if opts.limit ~= nil then limit = opts.limit end end return iterator, offset, limit end index_mt.select_ffi = function(index, key, opts) check_index_arg(index, 'select') local key, key_end = tuple_encode(key) local iterator, offset, limit = check_select_opts(opts, key + 1 >= key_end) local port = ffi.cast('struct port *', port_tuple) if builtin.box_select(index.space_id, index.id, iterator, offset, limit, key, key_end, port) ~= 0 then return box.error() end local ret = {} local entry = port_tuple.first for i=1,tonumber(port_tuple.size),1 do ret[i] = tuple_bless(entry.tuple) entry = entry.next end builtin.port_destroy(port); return ret end index_mt.select_luac = function(index, key, opts) check_index_arg(index, 'select') local key = keify(key) local iterator, offset, limit = check_select_opts(opts, #key == 0) return internal.select(index.space_id, index.id, iterator, offset, limit, key) end index_mt.update = function(index, key, ops) check_index_arg(index, 'update') return internal.update(index.space_id, index.id, keify(key), ops); end index_mt.delete = function(index, key) check_index_arg(index, 'delete') return internal.delete(index.space_id, index.id, keify(key)); end index_mt.info = function(index) return internal.info(index.space_id, index.id); end index_mt.drop = function(index) check_index_arg(index, 'drop') return box.schema.index.drop(index.space_id, index.id) end index_mt.rename = function(index, name) check_index_arg(index, 'rename') return box.schema.index.rename(index.space_id, index.id, name) end index_mt.alter = function(index, options) check_index_arg(index, 'alter') if index.id == nil or index.space_id == nil then box.error(box.error.PROC_LUA, "Usage: index:alter{opts}") end return box.schema.index.alter(index.space_id, index.id, options) end -- true if reading operations may yield local read_yields = space.engine == 'vinyl' local read_ops = {'select', 'get', 'min', 'max', 'count', 'random', 'pairs'} for _, op in ipairs(read_ops) do if read_yields then -- use Lua/C implmenetation index_mt[op] = index_mt[op .. "_luac"] else -- use FFI implementation index_mt[op] = index_mt[op .. "_ffi"] end end index_mt.__pairs = index_mt.pairs -- Lua 5.2 compatibility index_mt.__ipairs = index_mt.pairs -- Lua 5.2 compatibility -- local space_mt = {} space_mt.len = function(space) check_space_arg(space, 'len') local pk = space.index[0] if pk == nil then return 0 -- empty space without indexes, return 0 end return space.index[0]:len() end space_mt.count = function(space, key, opts) check_space_arg(space, 'count') local pk = space.index[0] if pk == nil then return 0 -- empty space without indexes, return 0 end return pk:count(key, opts) end space_mt.bsize = function(space) check_space_arg(space, 'bsize') local s = builtin.space_by_id(space.id) if s == nil then box.error(box.error.NO_SUCH_SPACE, space.name) end return builtin.space_bsize(s) end space_mt.__newindex = index_mt.__newindex space_mt.get = function(space, key) check_space_arg(space, 'get') return check_primary_index(space):get(key) end space_mt.select = function(space, key, opts) check_space_arg(space, 'select') return check_primary_index(space):select(key, opts) end space_mt.insert = function(space, tuple) check_space_arg(space, 'insert') return internal.insert(space.id, tuple); end space_mt.replace = function(space, tuple) check_space_arg(space, 'replace') return internal.replace(space.id, tuple); end space_mt.put = space_mt.replace; -- put is an alias for replace space_mt.update = function(space, key, ops) check_space_arg(space, 'update') return check_primary_index(space):update(key, ops) end space_mt.upsert = function(space, tuple_key, ops, deprecated) check_space_arg(space, 'upsert') if deprecated ~= nil then local msg = "Error: extra argument in upsert call: " msg = msg .. tostring(deprecated) msg = msg .. ". Usage :upsert(tuple, operations)" box.error(box.error.PROC_LUA, msg) end return internal.upsert(space.id, tuple_key, ops); end space_mt.delete = function(space, key) check_space_arg(space, 'delete') return check_primary_index(space):delete(key) end -- Assumes that spaceno has a TREE (NUM) primary key -- inserts a tuple after getting the next value of the -- primary key and returns it back to the user space_mt.auto_increment = function(space, tuple) check_space_arg(space, 'auto_increment') local max_tuple = check_primary_index(space):max() local max = 0 if max_tuple ~= nil then max = max_tuple[1] end table.insert(tuple, 1, max + 1) return space:insert(tuple) end space_mt.pairs = function(space, key, opts) check_space_arg(space, 'pairs') local pk = space.index[0] if pk == nil then -- empty space without indexes, return empty iterator return fun.iter({}) end return pk:pairs(key, opts) end space_mt.__pairs = space_mt.pairs -- Lua 5.2 compatibility space_mt.__ipairs = space_mt.pairs -- Lua 5.2 compatibility space_mt.truncate = function(space) check_space_arg(space, 'truncate') return internal.truncate(space.id) end space_mt.format = function(space, format) check_space_arg(space, 'format') return box.schema.space.format(space.id, format) end space_mt.drop = function(space) check_space_arg(space, 'drop') check_space_exists(space) return box.schema.space.drop(space.id, space.name) end space_mt.rename = function(space, name) check_space_arg(space, 'rename') check_space_exists(space) return box.schema.space.rename(space.id, name) end space_mt.create_index = function(space, name, options) check_space_arg(space, 'create_index') check_space_exists(space) return box.schema.index.create(space.id, name, options) end space_mt.run_triggers = function(space, yesno) check_space_arg(space, 'run_triggers') local s = builtin.space_by_id(space.id) if s == nil then box.error(box.error.NO_SUCH_SPACE, space.name) end builtin.space_run_triggers(s, yesno) end space_mt.__index = space_mt setmetatable(space, space_mt) if type(space.index) == 'table' and space.enabled then for j, index in pairs(space.index) do if type(j) == 'number' then setmetatable(index, index_mt) end end end end local sequence_mt = {} sequence_mt.__index = sequence_mt sequence_mt.next = function(self) return internal.sequence.next(self.id) end sequence_mt.set = function(self, value) return internal.sequence.set(self.id, value) end sequence_mt.reset = function(self) return internal.sequence.reset(self.id) end sequence_mt.alter = function(self, opts) box.schema.sequence.alter(self.id, opts) end sequence_mt.drop = function(self) box.schema.sequence.drop(self.id) end local function sequence_tuple_decode(seq, tuple) seq.id, seq.uid, seq.name, seq.step, seq.min, seq.max, seq.start, seq.cache, seq.cycle = tuple:unpack() end local function sequence_new(tuple) local seq = setmetatable({}, sequence_mt) sequence_tuple_decode(seq, tuple) return seq end local function sequence_on_alter(old_tuple, new_tuple) if old_tuple and not new_tuple then local old_name = old_tuple[3] box.sequence[old_name] = nil elseif not old_tuple and new_tuple then local seq = sequence_new(new_tuple) box.sequence[seq.name] = seq else local old_name = old_tuple[3] local seq = box.sequence[old_name] if not seq then seq = sequence_new(seq, new_tuple) else sequence_tuple_decode(seq, new_tuple) end box.sequence[old_name] = nil box.sequence[seq.name] = seq end end box.sequence = {} local function box_sequence_init() -- Install a trigger that will update Lua objects on -- _sequence space modifications. internal.sequence.on_alter(sequence_on_alter) end local sequence_options = { step = 'number', min = 'number', max = 'number', start = 'number', cache = 'number', cycle = 'boolean', } local create_sequence_options = table.deepcopy(sequence_options) create_sequence_options.if_not_exists = 'boolean' local alter_sequence_options = table.deepcopy(sequence_options) alter_sequence_options.name = 'string' box.schema.sequence = {} box.schema.sequence.create = function(name, opts) opts = opts or {} check_param(name, 'name', 'string') check_param_table(opts, create_sequence_options) local ascending = not opts.step or opts.step > 0 local options_defaults = { step = 1, min = ascending and 1 or INT64_MIN, max = ascending and INT64_MAX or -1, start = ascending and (opts.min or 1) or (opts.max or -1), cache = 0, cycle = false, } opts = update_param_table(opts, options_defaults) local id = sequence_resolve(name) if id ~= nil then if not opts.if_not_exists then box.error(box.error.SEQUENCE_EXISTS, name) end return box.sequence[name], 'not created' end local _sequence = box.space[box.schema.SEQUENCE_ID] _sequence:auto_increment{session.euid(), name, opts.step, opts.min, opts.max, opts.start, opts.cache, opts.cycle} return box.sequence[name] end box.schema.sequence.alter = function(name, opts) check_param_table(opts, alter_sequence_options) local id, tuple = sequence_resolve(name) if id == nil then box.error(box.error.NO_SUCH_SEQUENCE, name) end if opts == nil then return end local seq = {} sequence_tuple_decode(seq, tuple) opts = update_param_table(opts, seq) local _sequence = box.space[box.schema.SEQUENCE_ID] _sequence:replace{seq.id, seq.uid, opts.name, opts.step, opts.min, opts.max, opts.start, opts.cache, opts.cycle} end box.schema.sequence.drop = function(name, opts) opts = opts or {} check_param_table(opts, {if_exists = 'boolean'}) local id = sequence_resolve(name) if id == nil then if not opts.if_exists then box.error(box.error.NO_SUCH_SEQUENCE, name) end return end revoke_object_privs('sequence', id) local _sequence = box.space[box.schema.SEQUENCE_ID] local _sequence_data = box.space[box.schema.SEQUENCE_DATA_ID] _sequence_data:delete{id} _sequence:delete{id} end local function privilege_resolve(privilege) local numeric = 0 if type(privilege) == 'string' then privilege = string.lower(privilege) if string.find(privilege, 'read') then numeric = numeric + 1 end if string.find(privilege, 'write') then numeric = numeric + 2 end if string.find(privilege, 'execute') then numeric = numeric + 4 end if string.find(privilege, 'session') then numeric = numeric + 8 end if string.find(privilege, 'usage') then numeric = numeric + 16 end if string.find(privilege, 'create') then numeric = numeric + 32 end if string.find(privilege, 'drop') then numeric = numeric + 64 end if string.find(privilege, 'alter') then numeric = numeric + 128 end if string.find(privilege, 'reference') then numeric = numeric + 256 end if string.find(privilege, 'trigger') then numeric = numeric + 512 end if string.find(privilege, 'insert') then numeric = numeric + 1024 end if string.find(privilege, 'update') then numeric = numeric + 2048 end if string.find(privilege, 'delete') then numeric = numeric + 4096 end else numeric = privilege end return numeric end local function checked_privilege(privilege, object_type) local priv_hex = privilege_resolve(privilege) if object_type == 'role' and priv_hex ~= 4 then box.error(box.error.UNSUPPORTED_ROLE_PRIV, privilege) end return priv_hex end local function privilege_name(privilege) local names = {} if bit.band(privilege, 1) ~= 0 then table.insert(names, "read") end if bit.band(privilege, 2) ~= 0 then table.insert(names, "write") end if bit.band(privilege, 4) ~= 0 then table.insert(names, "execute") end if bit.band(privilege, 8) ~= 0 then table.insert(names, "session") end if bit.band(privilege, 16) ~= 0 then table.insert(names, "usage") end if bit.band(privilege, 32) ~= 0 then table.insert(names, "create") end if bit.band(privilege, 64) ~= 0 then table.insert(names, "drop") end if bit.band(privilege, 128) ~= 0 then table.insert(names, "alter") end if bit.band(privilege, 256) ~= 0 then table.insert(names, "reference") end if bit.band(privilege, 512) ~= 0 then table.insert(names, "trigger") end if bit.band(privilege, 1024) ~= 0 then table.insert(names, "insert") end if bit.band(privilege, 2048) ~= 0 then table.insert(names, "update") end if bit.band(privilege, 4096) ~= 0 then table.insert(names, "delete") end return table.concat(names, ",") end local function object_resolve(object_type, object_name) if object_type == 'universe' then if object_name ~= nil and type(object_name) ~= 'string' and type(object_name) ~= 'number' then box.error(box.error.ILLEGAL_PARAMS, "wrong object name type") end return 0 end if object_type == 'space' then local space = box.space[object_name] if space == nil then box.error(box.error.NO_SUCH_SPACE, object_name) end return space.id end if object_type == 'function' then local _func = box.space[box.schema.FUNC_ID] local func if type(object_name) == 'string' then func = _func.index.name:get{object_name} else func = _func:get{object_name} end if func then return func[1] else box.error(box.error.NO_SUCH_FUNCTION, object_name) end end if object_type == 'sequence' then local seq = sequence_resolve(object_name) if seq == nil then box.error(box.error.NO_SUCH_SEQUENCE, object_name) end return seq end if object_type == 'role' then local _user = box.space[box.schema.USER_ID] local role if type(object_name) == 'string' then role = _user.index.name:get{object_name} else role = _user:get{object_name} end if role and role[4] == 'role' then return role[1] else box.error(box.error.NO_SUCH_ROLE, object_name) end end box.error(box.error.UNKNOWN_SCHEMA_OBJECT, object_type) end local function object_name(object_type, object_id) if object_type == 'universe' then return "" end local space if object_type == 'space' then space = box.space._space elseif object_type == 'sequence' then space = box.space._sequence elseif object_type == 'function' then space = box.space._func elseif object_type == 'role' or object_type == 'user' then space = box.space._user else box.error(box.error.UNKNOWN_SCHEMA_OBJECT, object_type) end return space:get{object_id}[3] end box.schema.func = {} box.schema.func.create = function(name, opts) opts = opts or {} check_param_table(opts, { setuid = 'boolean', if_not_exists = 'boolean', language = 'string'}) local _func = box.space[box.schema.FUNC_ID] local func = _func.index.name:get{name} if func then if not opts.if_not_exists then box.error(box.error.FUNCTION_EXISTS, name) end return end opts = update_param_table(opts, { setuid = false, language = 'lua'}) opts.language = string.upper(opts.language) opts.setuid = opts.setuid and 1 or 0 _func:auto_increment{session.euid(), name, opts.setuid, opts.language} end box.schema.func.drop = function(name, opts) opts = opts or {} check_param_table(opts, { if_exists = 'boolean' }) local _func = box.space[box.schema.FUNC_ID] local fid local tuple if type(name) == 'string' then tuple = _func.index.name:get{name} else tuple = _func:get{name} end if tuple then fid = tuple[1] end if fid == nil then if not opts.if_exists then box.error(box.error.NO_SUCH_FUNCTION, name) end return end revoke_object_privs('function', fid) _func:delete{fid} end function box.schema.func.exists(name_or_id) local _func = box.space[box.schema.FUNC_ID] local tuple = nil if type(name_or_id) == 'string' then tuple = _func.index.name:get{name_or_id} elseif type(name_or_id) == 'number' then tuple = _func:get{name_or_id} end return tuple ~= nil end box.schema.func.reload = internal.func_reload box.internal.collation = {} box.internal.collation.create = function(name, coll_type, locale, opts) opts = opts or setmap{} if type(name) ~= 'string' then box.error(box.error.ILLEGAL_PARAMS, "name (first arg) must be a string") end if type(coll_type) ~= 'string' then box.error(box.error.ILLEGAL_PARAMS, "type (second arg) must be a string") end if type(locale) ~= 'string' then box.error(box.error.ILLEGAL_PARAMS, "locale (third arg) must be a string") end if type(opts) ~= 'table' then box.error(box.error.ILLEGAL_PARAMS, "options (fourth arg) must be a table or nil") end local lua_opts = {if_not_exists = opts.if_not_exists } check_param_table(lua_opts, {if_not_exists = 'boolean'}) opts.if_not_exists = nil opts = setmap(opts) local _coll = box.space[box.schema.COLLATION_ID] if lua_opts.if_not_exists then local coll = _coll.index.name:get{name} if coll then return end end _coll:auto_increment{name, session.euid(), coll_type, locale, opts} end box.internal.collation.drop = function(name, opts) opts = opts or {} check_param_table(opts, { if_exists = 'boolean' }) local _coll = box.space[box.schema.COLLATION_ID] if opts.if_exists then local coll = _coll.index.name:get{name} if not coll then return end end _coll.index.name:delete{name} end box.internal.collation.exists = function(name) local _coll = box.space[box.schema.COLLATION_ID] local coll = _coll.index.name:get{name} return not not coll end box.internal.collation.id_by_name = function(name) local _coll = box.space[box.schema.COLLATION_ID] local coll = _coll.index.name:get{name} return coll[1] end box.schema.user = {} box.schema.user.password = function(password) local BUF_SIZE = 128 local buf = ffi.new("char[?]", BUF_SIZE) builtin.password_prepare(password, #password, buf, BUF_SIZE) return ffi.string(buf) end local function chpasswd(uid, new_password) local _user = box.space[box.schema.USER_ID] local auth_mech_list = {} auth_mech_list["chap-sha1"] = box.schema.user.password(new_password) _user:update({uid}, {{"=", 5, auth_mech_list}}) end box.schema.user.passwd = function(name, new_password) if name == nil then box.error(box.error.PROC_LUA, "Usage: box.schema.user.passwd([user,] password)") end if new_password == nil then -- change password for current user new_password = name box.session.su('admin', chpasswd, session.uid(), new_password) else -- change password for other user local uid = user_resolve(name) if uid == nil then box.error(box.error.NO_SUCH_USER, name) end return chpasswd(uid, new_password) end end box.schema.user.create = function(name, opts) local uid = user_or_role_resolve(name) opts = opts or {} check_param_table(opts, { password = 'string', if_not_exists = 'boolean' }) if uid then if not opts.if_not_exists then box.error(box.error.USER_EXISTS, name) end return end local auth_mech_list = setmap({}) if opts.password then auth_mech_list["chap-sha1"] = box.schema.user.password(opts.password) end local _user = box.space[box.schema.USER_ID] uid = _user:auto_increment{session.euid(), name, 'user', auth_mech_list}[1] -- grant role 'public' to the user box.schema.user.grant(uid, 'public') -- we have to grant global privileges from setuid function, since -- only admin has the ownership over universe and we don't have -- grant option box.session.su('admin', box.schema.user.grant, uid, 'session,usage', 'universe', nil, {if_not_exists=true}) end box.schema.user.exists = function(name) if user_resolve(name) then return true else return false end end local function grant(uid, name, privilege, object_type, object_name, options) -- From user point of view, role is the same thing -- as a privilege. Allow syntax grant(user, role). if object_name == nil and object_type == nil then -- sic: avoid recursion, to not bother with roles -- named 'execute' object_type = 'role' object_name = privilege privilege = 'execute' end local privilege_hex = checked_privilege(privilege, object_type) local oid = object_resolve(object_type, object_name) options = options or {} if options.grantor == nil then options.grantor = session.euid() else options.grantor = user_or_role_resolve(options.grantor) end local _priv = box.space[box.schema.PRIV_ID] -- add the granted privilege to the current set local tuple = _priv:get{uid, object_type, oid} local old_privilege if tuple ~= nil then old_privilege = tuple[5] else old_privilege = 0 end privilege_hex = bit.bor(privilege_hex, old_privilege) -- do not execute a replace if it does not change anything -- XXX bug if we decide to add a grant option: new grantor -- replaces the old one, old grantor is lost if privilege_hex ~= old_privilege then _priv:replace{options.grantor, uid, object_type, oid, privilege_hex} elseif not options.if_not_exists then if object_type == 'role' then box.error(box.error.ROLE_GRANTED, name, object_name) else box.error(box.error.PRIV_GRANTED, name, privilege, object_type, object_name) end end end local function revoke(uid, name, privilege, object_type, object_name, options) -- From user point of view, role is the same thing -- as a privilege. Allow syntax revoke(user, role). if object_name == nil and object_type == nil then object_type = 'role' object_name = privilege privilege = 'execute' end local privilege_hex = checked_privilege(privilege, object_type) options = options or {} local oid = object_resolve(object_type, object_name) local _priv = box.space[box.schema.PRIV_ID] local tuple = _priv:get{uid, object_type, oid} -- system privileges of admin and guest can't be revoked if tuple == nil then if options.if_exists then return end if object_type == 'role' then box.error(box.error.ROLE_NOT_GRANTED, name, object_name) else box.error(box.error.PRIV_NOT_GRANTED, name, privilege, object_type, object_name) end end local old_privilege = tuple[5] local grantor = tuple[1] -- sic: -- a user may revoke more than he/she granted -- (erroneous user input) -- privilege_hex = bit.band(old_privilege, bit.bnot(privilege_hex)) if privilege_hex ~= 0 then _priv:replace{grantor, uid, object_type, oid, privilege_hex} else _priv:delete{uid, object_type, oid} end end local function drop(uid, opts) -- recursive delete of user data local _priv = box.space[box.schema.PRIV_ID] local spaces = box.space[box.schema.SPACE_ID].index.owner:select{uid} for k, tuple in pairs(spaces) do box.space[tuple[1]]:drop() end local funcs = box.space[box.schema.FUNC_ID].index.owner:select{uid} for k, tuple in pairs(funcs) do box.schema.func.drop(tuple[1]) end -- if this is a role, revoke this role from whoever it was granted to local grants = _priv.index.object:select{'role', uid} for k, tuple in pairs(grants) do revoke(tuple[2], tuple[2], uid) end local sequences = box.space[box.schema.SEQUENCE_ID].index.owner:select{uid} for k, tuple in pairs(sequences) do box.schema.sequence.drop(tuple[1]) end -- xxx: hack, we have to revoke session and usage privileges -- of a user using a setuid function in absence of create/drop -- privileges and grant option if box.space._user:get{uid}[4] == 'user' then box.session.su('admin', box.schema.user.revoke, uid, 'session,usage', 'universe', nil, {if_exists = true}) end local privs = _priv.index.primary:select{uid} for k, tuple in pairs(privs) do revoke(uid, uid, tuple[5], tuple[3], tuple[4]) end box.space[box.schema.USER_ID]:delete{uid} end box.schema.user.grant = function(user_name, ...) local uid = user_resolve(user_name) if uid == nil then box.error(box.error.NO_SUCH_USER, user_name) end return grant(uid, user_name, ...) end box.schema.user.revoke = function(user_name, ...) local uid = user_resolve(user_name) if uid == nil then box.error(box.error.NO_SUCH_USER, user_name) end return revoke(uid, user_name, ...) end box.schema.user.enable = function(user) box.schema.user.grant(user, "session,usage", "universe", nil, {if_not_exists = true}) end box.schema.user.disable = function(user) box.schema.user.revoke(user, "session,usage", "universe", nil, {if_exists = true}) end box.schema.user.drop = function(name, opts) opts = opts or {} check_param_table(opts, { if_exists = 'boolean' }) local uid = user_resolve(name) if uid ~= nil then if uid >= box.schema.SYSTEM_USER_ID_MIN and uid <= box.schema.SYSTEM_USER_ID_MAX then -- gh-1205: box.schema.user.info fails box.error(box.error.DROP_USER, name, "the user or the role is a system") end if uid == box.session.uid() or uid == box.session.euid() then box.error(box.error.DROP_USER, name, "the user is active in the current session") end return drop(uid, opts) end if not opts.if_exists then box.error(box.error.NO_SUCH_USER, name) end return end local function info(id) local _priv = box.space._priv local _user = box.space._priv local privs = {} for _, v in pairs(_priv:select{id}) do table.insert( privs, {privilege_name(v[5]), v[3], object_name(v[3], v[4])} ) end return privs end box.schema.user.info = function(user_name) local uid if user_name == nil then uid = box.session.euid() else uid = user_resolve(user_name) if uid == nil then box.error(box.error.NO_SUCH_USER, user_name) end end return info(uid) end box.schema.role = {} box.schema.role.exists = function(name) if role_resolve(name) then return true else return false end end box.schema.role.create = function(name, opts) opts = opts or {} check_param_table(opts, { if_not_exists = 'boolean' }) local uid = user_or_role_resolve(name) if uid then if not opts.if_not_exists then box.error(box.error.ROLE_EXISTS, name) end return end local _user = box.space[box.schema.USER_ID] _user:auto_increment{session.euid(), name, 'role', setmap({})} end box.schema.role.drop = function(name, opts) opts = opts or {} check_param_table(opts, { if_exists = 'boolean' }) local uid = role_resolve(name) if uid == nil then if not opts.if_exists then box.error(box.error.NO_SUCH_ROLE, name) end return end if uid >= box.schema.SYSTEM_USER_ID_MIN and uid <= box.schema.SYSTEM_USER_ID_MAX or uid == box.schema.SUPER_ROLE_ID then -- gh-1205: box.schema.user.info fails box.error(box.error.DROP_USER, name, "the user or the role is a system") end return drop(uid) end function role_check_grant_revoke_of_sys_priv(priv) priv = string.lower(priv) if (type(priv) == 'string' and (priv:match("session") or priv:match("usage"))) or (type(priv) == "number" and (bit.band(priv, 8) ~= 0 or bit.band(priv, 16) ~= 0)) then box.error(box.error.GRANT, "system privilege can not be granted to role") end end box.schema.role.grant = function(user_name, ...) local uid = role_resolve(user_name) if uid == nil then box.error(box.error.NO_SUCH_ROLE, user_name) end role_check_grant_revoke_of_sys_priv(...) return grant(uid, user_name, ...) end box.schema.role.revoke = function(user_name, ...) local uid = role_resolve(user_name) if uid == nil then box.error(box.error.NO_SUCH_ROLE, user_name) end role_check_grant_revoke_of_sys_priv(...) return revoke(uid, user_name, ...) end box.schema.role.info = function(role_name) local rid = role_resolve(role_name) if rid == nil then box.error(box.error.NO_SUCH_ROLE, role_name) end return info(rid) end -- -- once -- box.once = function(key, func, ...) if type(key) ~= 'string' or type(func) ~= 'function' then box.error(box.error.ILLEGAL_PARAMS, "Usage: box.once(key, func, ...)") end local key = "once"..key if box.space._schema:get{key} ~= nil then return end box.ctl.wait_rw() box.space._schema:put{key} return func(...) end -- -- nice output when typing box.space in admin console -- box.space = {} local function box_space_mt(tab) local t = {} for k,v in pairs(tab) do -- skip system spaces and views if type(k) == 'string' and #k > 0 and k:sub(1,1) ~= '_' then t[k] = { engine = v.engine, temporary = v.temporary } end end return t end setmetatable(box.space, { __serialize = box_space_mt }) box.internal.schema = {} box.internal.schema.init = function() box_sequence_init() end box.NULL = msgpack.NULL tarantool_1.9.1.26.g63eb81e3c/src/box/lua/tuple.lua0000664000000000000000000002361213306565107020214 0ustar rootroot-- tuple.lua (internal file) local ffi = require('ffi') local yaml = require('yaml') local msgpackffi = require('msgpackffi') local fun = require('fun') local buffer = require('buffer') local internal = require('box.internal') ffi.cdef[[ /** \cond public */ typedef struct tuple_format box_tuple_format_t; box_tuple_format_t * box_tuple_format_default(void); typedef struct tuple box_tuple_t; box_tuple_t * box_tuple_new(box_tuple_format_t *format, const char *data, const char *end); int box_tuple_ref(box_tuple_t *tuple); void box_tuple_unref(box_tuple_t *tuple); uint32_t box_tuple_field_count(const box_tuple_t *tuple); size_t box_tuple_bsize(const box_tuple_t *tuple); ssize_t box_tuple_to_buf(const box_tuple_t *tuple, char *buf, size_t size); box_tuple_format_t * box_tuple_format(const box_tuple_t *tuple); const char * box_tuple_field(const box_tuple_t *tuple, uint32_t i); typedef struct tuple_iterator box_tuple_iterator_t; box_tuple_iterator_t * box_tuple_iterator(box_tuple_t *tuple); void box_tuple_iterator_free(box_tuple_iterator_t *it); uint32_t box_tuple_position(box_tuple_iterator_t *it); void box_tuple_rewind(box_tuple_iterator_t *it); const char * box_tuple_seek(box_tuple_iterator_t *it, uint32_t field_no); const char * box_tuple_next(box_tuple_iterator_t *it); /** \endcond public */ box_tuple_t * box_tuple_update(box_tuple_t *tuple, const char *expr, const char *expr_end); box_tuple_t * box_tuple_upsert(box_tuple_t *tuple, const char *expr, const char *expr_end); ]] local builtin = ffi.C local tuple_t = ffi.typeof('box_tuple_t') local const_tuple_ref_t = ffi.typeof('const box_tuple_t&') local is_tuple = function(tuple) return tuple ~= nil and type(tuple) == 'cdata' and ffi.istype(const_tuple_ref_t, tuple) end local encode_fix = msgpackffi.internal.encode_fix local encode_array = msgpackffi.internal.encode_array local encode_r = msgpackffi.internal.encode_r local tuple_encode = function(obj) local tmpbuf = buffer.IBUF_SHARED tmpbuf:reset() if obj == nil then encode_fix(tmpbuf, 0x90, 0) -- empty array elseif is_tuple(obj) then encode_r(tmpbuf, obj, 1) elseif type(obj) == "table" then encode_array(tmpbuf, #obj) local i for i = 1, #obj, 1 do encode_r(tmpbuf, obj[i], 1) end else encode_fix(tmpbuf, 0x90, 1) -- array of one element encode_r(tmpbuf, obj, 1) end return tmpbuf.rpos, tmpbuf.wpos end local tuple_gc = function(tuple) builtin.box_tuple_unref(tuple) end local tuple_bless = function(tuple) -- overflow checked by tuple_bless() in C builtin.box_tuple_ref(tuple) -- must never fail: return ffi.gc(ffi.cast(const_tuple_ref_t, tuple), tuple_gc) end local tuple_check = function(tuple, usage) if not is_tuple(tuple) then error('Usage: ' .. usage) end end local tuple_iterator_t = ffi.typeof('box_tuple_iterator_t') local tuple_iterator_ref_t = ffi.typeof('box_tuple_iterator_t &') local function tuple_iterator(tuple) local it = builtin.box_tuple_iterator(tuple) if it == nil then box.error() end return ffi.gc(ffi.cast(tuple_iterator_ref_t, it), builtin.box_tuple_iterator_free) end local function tuple_iterator_next(it, tuple, pos) if pos == nil then pos = 0 elseif type(pos) ~= "number" then error("error: invalid key to 'next'") end local curpos = builtin.box_tuple_position(it) local field if curpos == pos then -- Sequential iteration field = builtin.box_tuple_next(it) else -- Seek builtin.box_tuple_rewind(it) field = builtin.box_tuple_seek(it, pos); end if field == nil then if #tuple == pos then -- No more fields, stop iteration return nil else -- Invalid pos error("error: invalid key to 'next'") end end -- () used to shrink the return stack to one value return pos + 1, (msgpackffi.decode_unchecked(field)) end; -- See http://www.lua.org/manual/5.2/manual.html#pdf-next local function tuple_next(tuple, pos) tuple_check(tuple, "tuple:next(tuple[, pos])") if pos == nil then pos = 0 end local field = builtin.box_tuple_field(tuple, pos) if field == nil then return nil end return pos + 1, (msgpackffi.decode_unchecked(field)) end -- See http://www.lua.org/manual/5.2/manual.html#pdf-ipairs local function tuple_ipairs(tuple, pos) tuple_check(tuple, "tuple:pairs(tuple[, pos])") local it = tuple_iterator(tuple) return fun.wrap(it, tuple, pos) end local function tuple_totable(tuple, i, j) tuple_check(tuple, "tuple:totable([from[, to]])"); local it = tuple_iterator(tuple) builtin.box_tuple_rewind(it) local field if i ~= nil then if i < 1 then error('tuple.totable: invalid second argument') end field = builtin.box_tuple_seek(it, i - 1) else i = 1 field = builtin.box_tuple_next(it) end if j ~= nil then if j <= 0 then error('tuple.totable: invalid third argument') end else j = 4294967295 end local ret = {} while field ~= nil and i <= j do local val = msgpackffi.decode_unchecked(field) table.insert(ret, val) i = i + 1 field = builtin.box_tuple_next(it) end return setmetatable(ret, msgpackffi.array_mt) end local function tuple_unpack(tuple, i, j) return unpack(tuple_totable(tuple, i, j)) end local function tuple_find(tuple, offset, val) tuple_check(tuple, "tuple:find([offset, ]val)"); if val == nil then val = offset offset = 0 end local r = tuple:pairs(offset):index(val) return r ~= nil and offset + r or nil end local function tuple_findall(tuple, offset, val) tuple_check(tuple, "tuple:findall([offset, ]val)"); if val == nil then val = offset offset = 0 end return tuple:pairs(offset):indexes(val) :map(function(i) return offset + i end) :totable() end local function tuple_update(tuple, expr) tuple_check(tuple, "tuple:update({ { op, field, arg}+ })"); if type(expr) ~= 'table' then error("Usage: tuple:update({ { op, field, arg}+ })") end local pexpr, pexpr_end = tuple_encode(expr) local tuple = builtin.box_tuple_update(tuple, pexpr, pexpr_end) if tuple == nil then return box.error() end return tuple_bless(tuple) end local function tuple_upsert(tuple, expr) tuple_check(tuple, "tuple:upsert({ { op, field, arg}+ })"); if type(expr) ~= 'table' then error("Usage: tuple:upsert({ { op, field, arg}+ })") end local pexpr, pexpr_end = tuple_encode(expr) local tuple = builtin.box_tuple_upsert(tuple, pexpr, pexpr_end) if tuple == nil then return box.error() end return tuple_bless(tuple) end -- Set encode hooks for msgpackffi local function tuple_to_msgpack(buf, tuple) assert(ffi.istype(tuple_t, tuple)) local bsize = builtin.box_tuple_bsize(tuple) buf:reserve(bsize) builtin.box_tuple_to_buf(tuple, buf.wpos, bsize) buf.wpos = buf.wpos + bsize end local function tuple_bsize(tuple) tuple_check(tuple, "tuple:bsize()"); return tonumber(builtin.box_tuple_bsize(tuple)) end msgpackffi.on_encode(const_tuple_ref_t, tuple_to_msgpack) local function tuple_field_by_name(tuple, name) tuple_check(tuple, "tuple['field_name']"); return internal.tuple.tuple_field_by_name(tuple, name) end local methods = { ["next"] = tuple_next; ["ipairs"] = tuple_ipairs; ["pairs"] = tuple_ipairs; -- just alias for ipairs() ["slice"] = internal.tuple.slice; ["transform"] = internal.tuple.transform; ["find"] = tuple_find; ["findall"] = tuple_findall; ["unpack"] = tuple_unpack; ["totable"] = tuple_totable; ["update"] = tuple_update; ["upsert"] = tuple_upsert; ["bsize"] = tuple_bsize; ["tomap"] = internal.tuple.tuple_to_map; } -- Aliases for tuple:methods(). for k, v in pairs(methods) do box.tuple[k] = v end methods["__serialize"] = tuple_totable -- encode hook for msgpack/yaml/json local tuple_field = function(tuple, field_n) local field = builtin.box_tuple_field(tuple, field_n - 1) if field == nil then return nil end -- Use () to shrink stack to the first return value return (msgpackffi.decode_unchecked(field)) end ffi.metatype(tuple_t, { __len = function(tuple) return builtin.box_tuple_field_count(tuple) end; __tostring = internal.tuple.tostring; __index = function(tuple, key) if type(key) == "number" then return tuple_field(tuple, key) elseif type(key) == "string" then -- Try to get a field with a name = key. If it was not -- found (rc ~= 0) then return a method from the -- vtable. If a collision occurred, then fields have -- higher priority. For example, if a tuple T has a -- field with name 'bsize', then T.bsize returns field -- value, not tuple_bsize function. To access hidden -- methods use 'box.tuple.(T, [args...])'. local rc, field = tuple_field_by_name(tuple, key) if rc == 0 then return field end end return methods[key] end; __eq = function(tuple_a, tuple_b) -- Two tuple are considered equal if they have same memory address return ffi.cast('void *', tuple_a) == ffi.cast('void *', tuple_b); end; __pairs = tuple_ipairs; -- Lua 5.2 compatibility __ipairs = tuple_ipairs; -- Lua 5.2 compatibility }) ffi.metatype(tuple_iterator_t, { __call = tuple_iterator_next; __tostring = function(it) return "" end; }) -- internal api for box.select and iterators box.tuple.bless = tuple_bless box.tuple.encode = tuple_encode box.tuple.is = is_tuple tarantool_1.9.1.26.g63eb81e3c/src/box/lua/net_box.h0000664000000000000000000000327713306560010020160 0ustar rootroot#ifndef TARANTOOL_LUA_NET_BOX_H_INCLUDED #define TARANTOOL_LUA_NET_BOX_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; int luaopen_net_box(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LUA_NET_BOX_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/session.c0000664000000000000000000002507613306565107020215 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "session.h" #include "lua/utils.h" #include "lua/trigger.h" #include #include #include #include #include "box/box.h" #include "box/session.h" #include "box/user.h" #include "box/schema.h" static const char *sessionlib_name = "box.session"; /* Create session and pin it to fiber */ static int lbox_session_create(struct lua_State *L) { struct session *session = fiber_get_session(fiber()); if (session == NULL) { int fd = luaL_optinteger(L, 1, -1); session = session_create_on_demand(fd); if (session == NULL) return luaT_error(L); } /* If a session already exists, simply reset its type */ session->type = STR2ENUM(session_type, luaL_optstring(L, 2, "console")); lua_pushnumber(L, session->id); return 1; } /** * Return a unique monotonic session * identifier. The identifier can be used * to check whether or not a session is alive. * 0 means there is no session (e.g. * a procedure is running in a detached * fiber). */ static int lbox_session_id(struct lua_State *L) { lua_pushnumber(L, current_session()->id); return 1; } /** * Return session type: one of "binary", "console", * "replication", "background" */ static int lbox_session_type(struct lua_State *L) { lua_pushstring(L, session_type_strs[current_session()->type]); return 1; } /** * Return the id of currently executed request. * Many requests share the same session so this is only * valid at session start. 0 for non-iproto sessions. */ static int lbox_session_sync(struct lua_State *L) { lua_pushnumber(L, current_session()->sync); return 1; } /** * Session effective user id. * Note: user id (effective_user()->uid) * may be different in a setuid function. */ static int lbox_session_euid(struct lua_State *L) { /* * Sic: push effective session user, not the current user, * which may differ inside a setuid function. */ lua_pushnumber(L, effective_user()->uid); return 1; } /** * Session user id. * Note: effective user id * may be different in a setuid function. */ static int lbox_session_uid(struct lua_State *L) { lua_pushnumber(L, current_session()->credentials.uid); return 1; } /** Session authenticated user name. */ static int lbox_session_user(struct lua_State *L) { struct user *user = user_by_id(current_session()->credentials.uid); if (user) lua_pushstring(L, user->def->name); else lua_pushnil(L); return 1; } /** * Session effective name. * Note: effective user name may be different in * a setuid function or in box.session.su() used in sudo * mode. */ static int lbox_session_effective_user(struct lua_State *L) { struct user *user = user_by_id(effective_user()->uid); if (user) lua_pushstring(L, user->def->name); else lua_pushnil(L); return 1; } /** Session user id. */ static int lbox_session_su(struct lua_State *L) { if (!box_is_configured()) luaL_error(L, "Please call box.cfg{} first"); int top = lua_gettop(L); if (top < 1) luaL_error(L, "session.su(): bad arguments"); struct session *session = current_session(); if (session == NULL) luaL_error(L, "session.su(): session does not exist"); struct user *user; if (lua_type(L, 1) == LUA_TSTRING) { size_t len; const char *name = lua_tolstring(L, 1, &len); user = user_find_by_name(name, len); } else { user = user_find(lua_tonumber(L, 1)); } if (user == NULL) luaT_error(L); if (access_check_session(user) < 0) luaT_error(L); if (top == 1) { credentials_init(&session->credentials, user->auth_token, user->def->uid); fiber_set_user(fiber(), &session->credentials); return 0; /* su */ } struct credentials su_credentials; credentials_init(&su_credentials, user->auth_token, user->def->uid); fiber_set_user(fiber(), &su_credentials); /* sudo */ luaL_checktype(L, 2, LUA_TFUNCTION); int error = lua_pcall(L, top - 2, LUA_MULTRET, 0); /* Restore the original credentials. */ fiber_set_user(fiber(), &session->credentials); if (error) luaT_error(L); return lua_gettop(L) - 1; } /** * Check whether or not a session exists. */ static int lbox_session_exists(struct lua_State *L) { if (lua_gettop(L) != 1) luaL_error(L, "session.exists(sid): bad arguments"); uint64_t sid = luaL_checkint64(L, -1); lua_pushboolean(L, session_find(sid) != NULL); return 1; } /** * Check whether or not a session exists. */ static int lbox_session_fd(struct lua_State *L) { if (lua_gettop(L) != 1) luaL_error(L, "session.fd(sid): bad arguments"); uint64_t sid = luaL_checkint64(L, -1); struct session *session = session_find(sid); if (session == NULL) luaL_error(L, "session.fd(): session does not exist"); lua_pushinteger(L, session->fd); return 1; } /** * Pretty print peer name. */ static int lbox_session_peer(struct lua_State *L) { if (lua_gettop(L) > 1) luaL_error(L, "session.peer(sid): bad arguments"); int fd; struct session *session; if (lua_gettop(L) == 1) session = session_find(luaL_checkint(L, 1)); else session = current_session(); if (session == NULL) luaL_error(L, "session.peer(): session does not exist"); fd = session->fd; if (fd < 0) { lua_pushnil(L); /* no associated peer */ return 1; } struct sockaddr_storage addr; socklen_t addrlen = sizeof(addr); if (sio_getpeername(fd, (struct sockaddr *)&addr, &addrlen) < 0) luaL_error(L, "session.peer(): getpeername() failed"); lua_pushstring(L, sio_strfaddr((struct sockaddr *)&addr, addrlen)); return 1; } /** * run on_connect|on_disconnect trigger */ static int lbox_push_on_connect_event(struct lua_State *L, void *event) { (void) L; (void) event; return 0; } static int lbox_push_on_auth_event(struct lua_State *L, void *event) { struct on_auth_trigger_ctx *ctx = (struct on_auth_trigger_ctx *) event; lua_pushstring(L, ctx->username); lua_pushboolean(L, ctx->is_authenticated); return 2; } static int lbox_session_on_connect(struct lua_State *L) { return lbox_trigger_reset(L, 2, &session_on_connect, lbox_push_on_connect_event, NULL); } static int lbox_session_run_on_connect(struct lua_State *L) { struct session *session = current_session(); if (session_run_on_connect_triggers(session) != 0) return luaT_error(L); return 0; } static int lbox_session_on_disconnect(struct lua_State *L) { return lbox_trigger_reset(L, 2, &session_on_disconnect, lbox_push_on_connect_event, NULL); } static int lbox_session_run_on_disconnect(struct lua_State *L) { struct session *session = current_session(); session_run_on_disconnect_triggers(session); (void) L; return 0; } static int lbox_session_on_auth(struct lua_State *L) { return lbox_trigger_reset(L, 2, &session_on_auth, lbox_push_on_auth_event, NULL); } static int lbox_session_run_on_auth(struct lua_State *L) { struct on_auth_trigger_ctx ctx; ctx.username = luaL_optstring(L, 1, ""); /* * In earlier versions of tarantool on_auth trigger * was not invoked on authentication failure and the * second argument was missing. */ assert(lua_isboolean(L, 2)); ctx.is_authenticated = lua_toboolean(L, 2); if (session_run_on_auth_triggers(&ctx) != 0) return luaT_error(L); return 0; } static int lbox_push_on_access_denied_event(struct lua_State *L, void *event) { struct on_access_denied_ctx *ctx = (struct on_access_denied_ctx *) event; lua_pushstring(L, ctx->access_type); lua_pushstring(L, ctx->object_type); lua_pushstring(L, ctx->object_name); return 3; } /** * Sets trigger on_access_denied. * For test purposes only. */ static int lbox_session_on_access_denied(struct lua_State *L) { return lbox_trigger_reset(L, 2, &on_access_denied, lbox_push_on_access_denied_event, NULL); } void session_storage_cleanup(int sid) { static int ref = LUA_REFNIL; struct lua_State *L = tarantool_L; int top = lua_gettop(L); if (ref == LUA_REFNIL) { lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); if (!lua_istable(L, -1)) goto exit; lua_getfield(L, -1, "box"); if (!lua_istable(L, -1)) goto exit; lua_getfield(L, -1, "session"); if (!lua_istable(L, -1)) goto exit; lua_getmetatable(L, -1); if (!lua_istable(L, -1)) goto exit; lua_getfield(L, -1, "aggregate_storage"); if (!lua_istable(L, -1)) goto exit; ref = luaL_ref(L, LUA_REGISTRYINDEX); } lua_rawgeti(L, LUA_REGISTRYINDEX, ref); lua_pushnil(L); lua_rawseti(L, -2, sid); exit: lua_settop(L, top); } void box_lua_session_init(struct lua_State *L) { static const struct luaL_Reg session_internal_lib[] = { {"create", lbox_session_create}, {"run_on_connect", lbox_session_run_on_connect}, {"run_on_disconnect", lbox_session_run_on_disconnect}, {"run_on_auth", lbox_session_run_on_auth}, {NULL, NULL} }; luaL_register(L, "box.internal.session", session_internal_lib); lua_pop(L, 1); static const struct luaL_Reg sessionlib[] = { {"id", lbox_session_id}, {"type", lbox_session_type}, {"sync", lbox_session_sync}, {"uid", lbox_session_uid}, {"euid", lbox_session_euid}, {"user", lbox_session_user}, {"effective_user", lbox_session_effective_user}, {"su", lbox_session_su}, {"fd", lbox_session_fd}, {"exists", lbox_session_exists}, {"peer", lbox_session_peer}, {"on_connect", lbox_session_on_connect}, {"on_disconnect", lbox_session_on_disconnect}, {"on_auth", lbox_session_on_auth}, {"on_access_denied", lbox_session_on_access_denied}, {NULL, NULL} }; luaL_register_module(L, sessionlib_name, sessionlib); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/space.h0000664000000000000000000000332113306560010017603 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_SPACE_H #define INCLUDES_TARANTOOL_LUA_SPACE_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_space_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_SPACE_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/init.h0000664000000000000000000000327713306560010017465 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_LUA_INIT_H #define INCLUDES_TARANTOOL_BOX_LUA_INIT_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_LUA_INIT_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/call.h0000664000000000000000000000375213306560010017433 0ustar rootroot#ifndef INCLUDES_TARANTOOL_MOD_BOX_LUA_CALL_H #define INCLUDES_TARANTOOL_MOD_BOX_LUA_CALL_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_call_init(struct lua_State *L); struct port; struct call_request; /** * Invoke a Lua stored procedure from the binary protocol * (implementation of 'CALL' command code). */ int box_lua_call(struct call_request *request, struct port *port); int box_lua_eval(struct call_request *request, struct port *port); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_MOD_BOX_LUA_CALL_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/stat.c0000664000000000000000000000757013306565107017504 0ustar rootroot/* * * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "stat.h" #include #include #include #include #include #include "lua/utils.h" extern struct rmean *rmean_box; extern struct rmean *rmean_error; /** network statistics (iproto & cbus) */ extern struct rmean *rmean_net; extern struct rmean *rmean_tx_wal_bus; static void fill_stat_item(struct lua_State *L, int rps, int64_t total) { lua_pushstring(L, "rps"); lua_pushnumber(L, rps); lua_settable(L, -3); lua_pushstring(L, "total"); lua_pushnumber(L, total); lua_settable(L, -3); } static int set_stat_item(const char *name, int rps, int64_t total, void *cb_ctx) { struct lua_State *L = (struct lua_State *) cb_ctx; lua_pushstring(L, name); lua_newtable(L); fill_stat_item(L, rps, total); lua_settable(L, -3); return 0; } /** * A stat_foreach() callback used to handle access to e.g. * box.stats.DELETE. */ static int seek_stat_item(const char *name, int rps, int64_t total, void *cb_ctx) { struct lua_State *L = (struct lua_State *) cb_ctx; if (strcmp(name, lua_tostring(L, -1)) != 0) return 0; lua_newtable(L); fill_stat_item(L, rps, total); return 1; } static int lbox_stat_index(struct lua_State *L) { luaL_checkstring(L, -1); int res = rmean_foreach(rmean_box, seek_stat_item, L); if (res) return res; return rmean_foreach(rmean_error, seek_stat_item, L); } static int lbox_stat_call(struct lua_State *L) { lua_newtable(L); rmean_foreach(rmean_box, set_stat_item, L); rmean_foreach(rmean_error, set_stat_item, L); return 1; } static int lbox_stat_net_index(struct lua_State *L) { luaL_checkstring(L, -1); return rmean_foreach(rmean_net, seek_stat_item, L); } static int lbox_stat_net_call(struct lua_State *L) { lua_newtable(L); rmean_foreach(rmean_net, set_stat_item, L); return 1; } static const struct luaL_Reg lbox_stat_meta [] = { {"__index", lbox_stat_index}, {"__call", lbox_stat_call}, {NULL, NULL} }; static const struct luaL_Reg lbox_stat_net_meta [] = { {"__index", lbox_stat_net_index}, {"__call", lbox_stat_net_call}, {NULL, NULL} }; /** Initialize box.stat package. */ void box_lua_stat_init(struct lua_State *L) { static const struct luaL_Reg statlib [] = { {NULL, NULL} }; luaL_register_module(L, "box.stat", statlib); lua_newtable(L); luaL_register(L, NULL, lbox_stat_meta); lua_setmetatable(L, -2); lua_pop(L, 1); /* stat module */ luaL_register_module(L, "box.stat.net", statlib); lua_newtable(L); luaL_register(L, NULL, lbox_stat_net_meta); lua_setmetatable(L, -2); lua_pop(L, 1); /* stat net module */ } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/session.h0000664000000000000000000000335413306560010020201 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_SESSION_H #define INCLUDES_TARANTOOL_LUA_SESSION_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_session_init(struct lua_State *L); void session_storage_cleanup(int sid); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_SESSION_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/index.h0000664000000000000000000000331113306560010017616 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_LUA_INDEX_H #define INCLUDES_TARANTOOL_BOX_LUA_INDEX_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_index_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_LUA_INDEX_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/error.cc0000664000000000000000000001375413306565107020026 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/error.h" extern "C" { #include #include #include } /* extern "C" */ #include #include #include "lua/utils.h" #include "box/error.h" static int luaT_error_raise(lua_State *L) { uint32_t code = 0; const char *reason = NULL; const char *file = ""; unsigned line = 0; lua_Debug info; /* lua_type(L, 1) == LUA_TTABLE - box.error table */ int top = lua_gettop(L); if (top <= 1) { /* re-throw saved exceptions (if any) */ if (box_error_last()) luaT_error(L); return 0; } else if (top >= 2 && lua_type(L, 2) == LUA_TNUMBER) { code = lua_tonumber(L, 2); reason = tnt_errcode_desc(code); if (top > 2) { /* Call string.format(reason, ...) to format message */ lua_getglobal(L, "string"); if (lua_isnil(L, -1)) goto raise; lua_getfield(L, -1, "format"); if (lua_isnil(L, -1)) goto raise; lua_pushstring(L, reason); for (int i = 3; i <= top; i++) lua_pushvalue(L, i); lua_call(L, top - 1, 1); reason = lua_tostring(L, -1); } else if (strchr(reason, '%') != NULL) { /* Missing arguments to format string */ luaL_error(L, "box.error(): bad arguments"); } } else if (top == 2 && lua_istable(L, 2)) { /* A special case that rethrows raw error (used by net.box) */ lua_getfield(L, 2, "code"); code = lua_tonumber(L, -1); lua_pop(L, 1); lua_getfield(L, 2, "reason"); reason = lua_tostring(L, -1); if (reason == NULL) reason = ""; lua_pop(L, 1); } else { luaL_error(L, "box.error(): bad arguments"); } raise: if (lua_getstack(L, 1, &info) && lua_getinfo(L, "Sl", &info)) { if (*info.short_src) { file = info.short_src; } else if (*info.source) { file = info.source; } else { file = "eval"; } line = info.currentline; } say_debug("box.error() at %s:%i", file, line); box_error_set(file, line, code, "%s", reason); luaT_error(L); return 0; } static int luaT_error_last(lua_State *L) { if (lua_gettop(L) >= 1) luaL_error(L, "box.error.last(): bad arguments"); struct error *e = box_error_last(); if (e == NULL) { lua_pushnil(L); return 1; } luaT_pusherror(L, e); return 1; } static int luaT_error_clear(lua_State *L) { if (lua_gettop(L) >= 1) luaL_error(L, "box.error.clear(): bad arguments"); box_error_clear(); return 0; } static int lbox_errinj_set(struct lua_State *L) { char *name = (char*)luaL_checkstring(L, 1); struct errinj *errinj; errinj = errinj_by_name(name); if (errinj == NULL) { say_error("%s", name); lua_pushfstring(L, "error: can't find error injection '%s'", name); return 1; } switch (errinj->type) { case ERRINJ_BOOL: errinj->bparam = lua_toboolean(L, 2); break; case ERRINJ_INT: errinj->iparam = luaL_checkint64(L, 2); break; case ERRINJ_DOUBLE: errinj->dparam = lua_tonumber(L, 2); break; default: lua_pushfstring(L, "error: unknown injection type '%s'", name); return 1; } lua_pushstring(L, "ok"); return 1; } static inline int lbox_errinj_cb(struct errinj *e, void *cb_ctx) { struct lua_State *L = (struct lua_State*)cb_ctx; lua_pushstring(L, e->name); lua_newtable(L); lua_pushstring(L, "state"); switch (e->type) { case ERRINJ_BOOL: lua_pushboolean(L, e->bparam); break; case ERRINJ_INT: luaL_pushint64(L, e->iparam); break; case ERRINJ_DOUBLE: lua_pushnumber(L, e->dparam); break; default: unreachable(); } lua_settable(L, -3); lua_settable(L, -3); return 0; } static int lbox_errinj_info(struct lua_State *L) { lua_newtable(L); errinj_foreach(lbox_errinj_cb, L); return 1; } void box_lua_error_init(struct lua_State *L) { static const struct luaL_Reg errorlib[] = { {NULL, NULL} }; luaL_register_module(L, "box.error", errorlib); for (int i = 0; i < box_error_code_MAX; i++) { const char *name = box_error_codes[i].errstr; if (strstr(name, "UNUSED") || strstr(name, "RESERVED")) continue; assert(strncmp(name, "ER_", 3) == 0); lua_pushnumber(L, i); /* cut ER_ prefix from constant */ lua_setfield(L, -2, name + 3); } lua_newtable(L); { lua_pushcfunction(L, luaT_error_raise); lua_setfield(L, -2, "__call"); lua_newtable(L); { lua_pushcfunction(L, luaT_error_last); lua_setfield(L, -2, "last"); } { lua_pushcfunction(L, luaT_error_clear); lua_setfield(L, -2, "clear"); } { lua_pushcfunction(L, luaT_error_raise); lua_setfield(L, -2, "raise"); } lua_setfield(L, -2, "__index"); } lua_setmetatable(L, -2); lua_pop(L, 1); static const struct luaL_Reg errinjlib[] = { {"info", lbox_errinj_info}, {"set", lbox_errinj_set}, {NULL, NULL} }; /* box.error.injection is not set by register_module */ luaL_register_module(L, "box.error.injection", errinjlib); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/slab.h0000664000000000000000000000326713306560010017442 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_SLAB_H #define INCLUDES_TARANTOOL_LUA_SLAB_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_slab_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_SLAB_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/checkpoint_daemon.lua0000664000000000000000000000745513306565107022544 0ustar rootroot-- checkpoint_daemon.lua (internal file) local log = require 'log' local fiber = require 'fiber' local fio = require 'fio' local yaml = require 'yaml' local errno = require 'errno' local digest = require 'digest' local pickle = require 'pickle' local PREFIX = 'checkpoint_daemon' local daemon = { checkpoint_interval = 0; fiber = nil; control = nil; } -- create snapshot, return true if no errors local function snapshot() log.info("making snapshot...") local s, e = pcall(function() box.snapshot() end) if s then return true end -- don't complain in the log if the snapshot already exists if errno() == errno.EEXIST then return false end log.error("error while creating snapshot: %s", e) return false end -- check filesystem and current time local function process(self) if daemon.checkpoint_interval == nil then return false end if not(daemon.checkpoint_interval > 0) then return false end local checkpoints = box.internal.gc.info().checkpoints local last_checkpoint = checkpoints[#checkpoints] local last_snap = fio.pathjoin(box.cfg.memtx_dir, string.format('%020d.snap', last_checkpoint.signature)) local snstat = fio.stat(last_snap) if snstat == nil then log.error("can't stat %s: %s", last_snap, errno.strerror()) return false end if snstat.mtime + daemon.checkpoint_interval <= fiber.time() then return snapshot() end end local function daemon_fiber(self) fiber.name(PREFIX, {truncate = true}) log.info("started") -- -- Add random offset to the initial period to avoid simultaneous -- snapshotting when multiple instances of tarantool are running -- on the same host. -- See https://github.com/tarantool/tarantool/issues/732 -- local random = pickle.unpack('i', digest.urandom(4)) local offset = random % self.checkpoint_interval while true do local period = self.checkpoint_interval + offset -- maintain next_snapshot_time as a self member for testing purposes self.next_snapshot_time = fiber.time() + period log.info("scheduled the next snapshot at %s", os.date("%c", self.next_snapshot_time)) local msg = self.control:get(period) if msg == 'shutdown' then break elseif msg == 'reload' then log.info("reloaded") -- continue elseif msg == nil and box.info.status == 'running' then local s, e = pcall(process, self) if not s then log.error(e) end offset = 0 end end self.next_snapshot_time = nil log.info("stopped") end local function reload(self) if self.checkpoint_interval > 0 then if self.control == nil then -- Start daemon self.control = fiber.channel() self.fiber = fiber.create(daemon_fiber, self) fiber.sleep(0) else -- Reload daemon self.control:put("reload") -- -- channel:put() doesn't block the writer if there -- is a ready reader. Give daemon fiber way so that -- it can execute before reload() returns to the caller. -- fiber.sleep(0) end elseif self.control ~= nil then -- Shutdown daemon self.control:put("shutdown") self.fiber = nil self.control = nil fiber.sleep(0) -- see comment above end end setmetatable(daemon, { __index = { set_checkpoint_interval = function() daemon.checkpoint_interval = box.cfg.checkpoint_interval reload(daemon) return end, } }) if box.internal == nil then box.internal = { [PREFIX] = daemon } else box.internal[PREFIX] = daemon end tarantool_1.9.1.26.g63eb81e3c/src/box/lua/info.h0000664000000000000000000000355313306560010017452 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_INFO_H #define INCLUDES_TARANTOOL_LUA_INFO_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; struct info_handler; void box_lua_info_init(struct lua_State *L); void luaT_info_handler_create(struct info_handler *h, struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_INFO_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/console.h0000664000000000000000000000331213306560010020152 0ustar rootroot#ifndef TARANTOOL_LUA_CONSOLE_H_INCLUDED #define TARANTOOL_LUA_CONSOLE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void tarantool_lua_console_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LUA_CONSOLE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/xlog.h0000664000000000000000000000316113306560010017463 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_XLOG_H #define INCLUDES_TARANTOOL_LUA_XLOG_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ struct lua_State; #ifdef __cplusplus extern "C" { #endif void box_lua_xlog_init(struct lua_State *L); #ifdef __cplusplus } #endif #endif /* INCLUDES_TARANTOOL_LUA_XLOG_PARSER_V12_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/tuple.h0000664000000000000000000000531413306565107017661 0ustar rootroot#ifndef INCLUDES_TARANTOOL_MOD_BOX_LUA_TUPLE_H #define INCLUDES_TARANTOOL_MOD_BOX_LUA_TUPLE_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct tuple; typedef struct tuple box_tuple_t; struct lua_State; struct mpstream; struct luaL_serializer; /** \cond public */ /** * Push a tuple onto the stack. * @param L Lua State * @sa luaT_istuple * @throws on OOM */ void luaT_pushtuple(struct lua_State *L, box_tuple_t *tuple); /** * Checks whether argument idx is a tuple * * @param L Lua State * @param idx the stack index * @retval non-NULL argument is tuple * @retval NULL argument is not tuple */ box_tuple_t * luaT_istuple(struct lua_State *L, int idx); /** \endcond public */ static inline int luaT_pushtupleornil(struct lua_State *L, struct tuple *tuple) { if (tuple == NULL) return 0; luaT_pushtuple(L, tuple); return 1; } void luamp_convert_key(struct lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream, int index); void luamp_encode_tuple(struct lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream, int index); void tuple_to_mpstream(struct tuple *tuple, struct mpstream *stream); void box_lua_tuple_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_MOD_BOX_LUA_TUPLE_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/net_box.c0000664000000000000000000004004513306565107020161 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "net_box.h" #include #include #include /* mp_store_u32() */ #include "scramble.h" #include "box/iproto_constants.h" #include "box/lua/tuple.h" /* luamp_convert_tuple() / luamp_convert_key() */ #include "box/xrow.h" #include "lua/msgpack.h" #include "third_party/base64.h" #include "coio.h" #include "box/errcode.h" #include "lua/fiber.h" #define cfg luaL_msgpack_default static inline size_t netbox_prepare_request(lua_State *L, struct mpstream *stream, uint32_t r_type) { struct ibuf *ibuf = (struct ibuf *) lua_topointer(L, 1); uint64_t sync = luaL_touint64(L, 2); uint64_t schema_version = luaL_touint64(L, 3); mpstream_init(stream, ibuf, ibuf_reserve_cb, ibuf_alloc_cb, luamp_error, L); /* Remember initial size of ibuf (see netbox_encode_request()) */ size_t used = ibuf_used(ibuf); /* Reserve and skip space for fixheader */ size_t fixheader_size = mp_sizeof_uint(UINT32_MAX); mpstream_reserve(stream, fixheader_size); mpstream_advance(stream, fixheader_size); /* encode header */ luamp_encode_map(cfg, stream, 3); luamp_encode_uint(cfg, stream, IPROTO_SYNC); luamp_encode_uint(cfg, stream, sync); luamp_encode_uint(cfg, stream, IPROTO_SCHEMA_VERSION); luamp_encode_uint(cfg, stream, schema_version); luamp_encode_uint(cfg, stream, IPROTO_REQUEST_TYPE); luamp_encode_uint(cfg, stream, r_type); /* Caller should remember how many bytes was used in ibuf */ return used; } static inline void netbox_encode_request(struct mpstream *stream, size_t initial_size) { mpstream_flush(stream); struct ibuf *ibuf = (struct ibuf *) stream->ctx; /* * Calculation the start position in ibuf by getting current size * and then substracting initial size. Since we don't touch * ibuf->rpos during encoding this approach should always work * even on realloc or memmove inside ibuf. */ size_t fixheader_size = mp_sizeof_uint(UINT32_MAX); size_t used = ibuf_used(ibuf); assert(initial_size + fixheader_size <= used); size_t total_size = used - initial_size; char *fixheader = ibuf->wpos - total_size; assert(fixheader >= ibuf->rpos); /* patch skipped len */ *(fixheader++) = 0xce; /* fixheader size is not included */ mp_store_u32(fixheader, total_size - fixheader_size); } static int netbox_encode_ping(lua_State *L) { if (lua_gettop(L) < 3) return luaL_error(L, "Usage: netbox.encode_ping(ibuf, sync, " "schema_version)"); struct mpstream stream; size_t svp = netbox_prepare_request(L, &stream, IPROTO_PING); netbox_encode_request(&stream, svp); return 0; } static int netbox_encode_auth(lua_State *L) { if (lua_gettop(L) < 6) return luaL_error(L, "Usage: netbox.encode_update(ibuf, sync, " "schema_version, user, password, greeting)"); struct mpstream stream; size_t svp = netbox_prepare_request(L, &stream, IPROTO_AUTH); size_t user_len; const char *user = lua_tolstring(L, 4, &user_len); size_t password_len; const char *password = lua_tolstring(L, 5, &password_len); size_t salt_len; const char *salt = lua_tolstring(L, 6, &salt_len); if (salt_len < SCRAMBLE_SIZE) return luaL_error(L, "Invalid salt"); /* Adapted from xrow_encode_auth() */ luamp_encode_map(cfg, &stream, password != NULL ? 2 : 1); luamp_encode_uint(cfg, &stream, IPROTO_USER_NAME); luamp_encode_str(cfg, &stream, user, user_len); if (password != NULL) { /* password can be omitted */ char scramble[SCRAMBLE_SIZE]; scramble_prepare(scramble, salt, password, password_len); luamp_encode_uint(cfg, &stream, IPROTO_TUPLE); luamp_encode_array(cfg, &stream, 2); luamp_encode_str(cfg, &stream, "chap-sha1", strlen("chap-sha1")); luamp_encode_str(cfg, &stream, scramble, SCRAMBLE_SIZE); } netbox_encode_request(&stream, svp); return 0; } static int netbox_encode_call_impl(lua_State *L, enum iproto_type type) { if (lua_gettop(L) < 5) return luaL_error(L, "Usage: netbox.encode_call(ibuf, sync, " "schema_version, function_name, args)"); struct mpstream stream; size_t svp = netbox_prepare_request(L, &stream, type); luamp_encode_map(cfg, &stream, 2); /* encode proc name */ size_t name_len; const char *name = lua_tolstring(L, 4, &name_len); luamp_encode_uint(cfg, &stream, IPROTO_FUNCTION_NAME); luamp_encode_str(cfg, &stream, name, name_len); /* encode args */ luamp_encode_uint(cfg, &stream, IPROTO_TUPLE); luamp_encode_tuple(L, cfg, &stream, 5); netbox_encode_request(&stream, svp); return 0; } static int netbox_encode_call_16(lua_State *L) { return netbox_encode_call_impl(L, IPROTO_CALL_16); } static int netbox_encode_call(lua_State *L) { return netbox_encode_call_impl(L, IPROTO_CALL); } static int netbox_encode_eval(lua_State *L) { if (lua_gettop(L) < 5) return luaL_error(L, "Usage: netbox.encode_eval(ibuf, sync, " "schema_version, expr, args)"); struct mpstream stream; size_t svp = netbox_prepare_request(L, &stream, IPROTO_EVAL); luamp_encode_map(cfg, &stream, 2); /* encode expr */ size_t expr_len; const char *expr = lua_tolstring(L, 4, &expr_len); luamp_encode_uint(cfg, &stream, IPROTO_EXPR); luamp_encode_str(cfg, &stream, expr, expr_len); /* encode args */ luamp_encode_uint(cfg, &stream, IPROTO_TUPLE); luamp_encode_tuple(L, cfg, &stream, 5); netbox_encode_request(&stream, svp); return 0; } static int netbox_encode_select(lua_State *L) { if (lua_gettop(L) < 9) return luaL_error(L, "Usage netbox.encode_select(ibuf, sync, " "schema_version, space_id, index_id, iterator, " "offset, limit, key)"); struct mpstream stream; size_t svp = netbox_prepare_request(L, &stream, IPROTO_SELECT); luamp_encode_map(cfg, &stream, 6); uint32_t space_id = lua_tonumber(L, 4); uint32_t index_id = lua_tonumber(L, 5); int iterator = lua_tointeger(L, 6); uint32_t offset = lua_tonumber(L, 7); uint32_t limit = lua_tonumber(L, 8); /* encode space_id */ luamp_encode_uint(cfg, &stream, IPROTO_SPACE_ID); luamp_encode_uint(cfg, &stream, space_id); /* encode index_id */ luamp_encode_uint(cfg, &stream, IPROTO_INDEX_ID); luamp_encode_uint(cfg, &stream, index_id); /* encode iterator */ luamp_encode_uint(cfg, &stream, IPROTO_ITERATOR); luamp_encode_uint(cfg, &stream, iterator); /* encode offset */ luamp_encode_uint(cfg, &stream, IPROTO_OFFSET); luamp_encode_uint(cfg, &stream, offset); /* encode limit */ luamp_encode_uint(cfg, &stream, IPROTO_LIMIT); luamp_encode_uint(cfg, &stream, limit); /* encode key */ luamp_encode_uint(cfg, &stream, IPROTO_KEY); luamp_convert_key(L, cfg, &stream, 9); netbox_encode_request(&stream, svp); return 0; } static inline int netbox_encode_insert_or_replace(lua_State *L, uint32_t reqtype) { if (lua_gettop(L) < 5) return luaL_error(L, "Usage: netbox.encode_insert(ibuf, sync, " "schema_version, space_id, tuple)"); lua_settop(L, 5); struct mpstream stream; size_t svp = netbox_prepare_request(L, &stream, reqtype); luamp_encode_map(cfg, &stream, 2); /* encode space_id */ uint32_t space_id = lua_tonumber(L, 4); luamp_encode_uint(cfg, &stream, IPROTO_SPACE_ID); luamp_encode_uint(cfg, &stream, space_id); /* encode args */ luamp_encode_uint(cfg, &stream, IPROTO_TUPLE); luamp_encode_tuple(L, cfg, &stream, 5); netbox_encode_request(&stream, svp); return 0; } static int netbox_encode_insert(lua_State *L) { return netbox_encode_insert_or_replace(L, IPROTO_INSERT); } static int netbox_encode_replace(lua_State *L) { return netbox_encode_insert_or_replace(L, IPROTO_REPLACE); } static int netbox_encode_delete(lua_State *L) { if (lua_gettop(L) < 6) return luaL_error(L, "Usage: netbox.encode_delete(ibuf, sync, " "schema_version, space_id, index_id, key)"); struct mpstream stream; size_t svp = netbox_prepare_request(L, &stream, IPROTO_DELETE); luamp_encode_map(cfg, &stream, 3); /* encode space_id */ uint32_t space_id = lua_tonumber(L, 4); luamp_encode_uint(cfg, &stream, IPROTO_SPACE_ID); luamp_encode_uint(cfg, &stream, space_id); /* encode space_id */ uint32_t index_id = lua_tonumber(L, 5); luamp_encode_uint(cfg, &stream, IPROTO_INDEX_ID); luamp_encode_uint(cfg, &stream, index_id); /* encode key */ luamp_encode_uint(cfg, &stream, IPROTO_KEY); luamp_convert_key(L, cfg, &stream, 6); netbox_encode_request(&stream, svp); return 0; } static int netbox_encode_update(lua_State *L) { if (lua_gettop(L) < 7) return luaL_error(L, "Usage: netbox.encode_update(ibuf, sync, " "schema_version, space_id, index_id, key, ops)"); struct mpstream stream; size_t svp = netbox_prepare_request(L, &stream, IPROTO_UPDATE); luamp_encode_map(cfg, &stream, 5); /* encode space_id */ uint32_t space_id = lua_tonumber(L, 4); luamp_encode_uint(cfg, &stream, IPROTO_SPACE_ID); luamp_encode_uint(cfg, &stream, space_id); /* encode index_id */ uint32_t index_id = lua_tonumber(L, 5); luamp_encode_uint(cfg, &stream, IPROTO_INDEX_ID); luamp_encode_uint(cfg, &stream, index_id); /* encode index_id */ luamp_encode_uint(cfg, &stream, IPROTO_INDEX_BASE); luamp_encode_uint(cfg, &stream, 1); /* encode in reverse order for speedup - see luamp_encode() code */ /* encode ops */ luamp_encode_uint(cfg, &stream, IPROTO_TUPLE); luamp_encode_tuple(L, cfg, &stream, 7); lua_pop(L, 1); /* ops */ /* encode key */ luamp_encode_uint(cfg, &stream, IPROTO_KEY); luamp_convert_key(L, cfg, &stream, 6); netbox_encode_request(&stream, svp); return 0; } static int netbox_encode_upsert(lua_State *L) { if (lua_gettop(L) != 6) return luaL_error(L, "Usage: netbox.encode_upsert(ibuf, sync, " "schema_version, space_id, tuple, ops)"); struct mpstream stream; size_t svp = netbox_prepare_request(L, &stream, IPROTO_UPSERT); luamp_encode_map(cfg, &stream, 4); /* encode space_id */ uint32_t space_id = lua_tonumber(L, 4); luamp_encode_uint(cfg, &stream, IPROTO_SPACE_ID); luamp_encode_uint(cfg, &stream, space_id); /* encode index_base */ luamp_encode_uint(cfg, &stream, IPROTO_INDEX_BASE); luamp_encode_uint(cfg, &stream, 1); /* encode in reverse order for speedup - see luamp_encode() code */ /* encode ops */ luamp_encode_uint(cfg, &stream, IPROTO_OPS); luamp_encode_tuple(L, cfg, &stream, 6); lua_pop(L, 1); /* ops */ /* encode tuple */ luamp_encode_uint(cfg, &stream, IPROTO_TUPLE); luamp_encode_tuple(L, cfg, &stream, 5); netbox_encode_request(&stream, svp); return 0; } static int netbox_decode_greeting(lua_State *L) { struct greeting greeting; size_t len; const char *buf = NULL; char uuid_buf[UUID_STR_LEN + 1]; if (lua_isstring(L, 1)) buf = lua_tolstring(L, 1, &len); if (buf == NULL || len != IPROTO_GREETING_SIZE || greeting_decode(buf, &greeting) != 0) { lua_pushboolean(L, 0); lua_pushstring(L, "Invalid greeting"); return 2; } lua_newtable(L); lua_pushinteger(L, greeting.version_id); lua_setfield(L, -2, "version_id"); lua_pushstring(L, greeting.protocol); lua_setfield(L, -2, "protocol"); lua_pushlstring(L, greeting.salt, greeting.salt_len); lua_setfield(L, -2, "salt"); tt_uuid_to_string(&greeting.uuid, uuid_buf); lua_pushstring(L, uuid_buf); lua_setfield(L, -2, "uuid"); return 1; } /** * communicate(fd, send_buf, recv_buf, limit_or_boundary, timeout) * -> errno, error * -> nil, limit/boundary_pos * * The need for this function arises from not wanting to * have more than one watcher for a single fd, and thus issue * redundant epoll_ctl(EPOLLCTL_ADD) for it when doing both * reading and writing. * * Instead, this function takes an fd, input and output buffer, * and does sending and receiving on it in a single event loop * interaction. */ static int netbox_communicate(lua_State *L) { uint32_t fd = lua_tonumber(L, 1); const int NETBOX_READAHEAD = 16320; struct ibuf *send_buf = (struct ibuf *) lua_topointer(L, 2); struct ibuf *recv_buf = (struct ibuf *) lua_topointer(L, 3); /* limit or boundary */ size_t limit = SIZE_MAX; const void *boundary = NULL; size_t boundary_len; if (lua_type(L, 4) == LUA_TSTRING) boundary = lua_tolstring(L, 4, &boundary_len); else limit = lua_tonumber(L, 4); /* timeout */ ev_tstamp timeout = TIMEOUT_INFINITY; if (lua_type(L, 5) == LUA_TNUMBER) timeout = lua_tonumber(L, 5); if (timeout < 0) { lua_pushinteger(L, ER_TIMEOUT); lua_pushstring(L, "Timeout exceeded"); return 2; } int revents = COIO_READ; while (true) { /* reader serviced first */ check_limit: if (ibuf_used(recv_buf) >= limit) { lua_pushnil(L); lua_pushinteger(L, (lua_Integer)limit); return 2; } const char *p; if (boundary != NULL && (p = memmem( recv_buf->rpos, ibuf_used(recv_buf), boundary, boundary_len)) != NULL) { lua_pushnil(L); lua_pushinteger(L, (lua_Integer)( p - recv_buf->rpos)); return 2; } while (revents & COIO_READ) { void *p = ibuf_reserve(recv_buf, NETBOX_READAHEAD); if (p == NULL) luaL_error(L, "out of memory"); ssize_t rc = recv( fd, recv_buf->wpos, ibuf_unused(recv_buf), 0); if (rc == 0) { lua_pushinteger(L, ER_NO_CONNECTION); lua_pushstring(L, "Peer closed"); return 2; } if (rc > 0) { recv_buf->wpos += rc; goto check_limit; } else if (errno == EAGAIN || errno == EWOULDBLOCK) revents &= ~COIO_READ; else if (errno != EINTR) goto handle_error; } while ((revents & COIO_WRITE) && ibuf_used(send_buf) != 0) { ssize_t rc = send( fd, send_buf->rpos, ibuf_used(send_buf), 0); if (rc >= 0) send_buf->rpos += rc; else if (errno == EAGAIN || errno == EWOULDBLOCK) revents &= ~COIO_WRITE; else if (errno != EINTR) goto handle_error; } ev_tstamp deadline = ev_monotonic_now(loop()) + timeout; revents = coio_wait(fd, EV_READ | (ibuf_used(send_buf) != 0 ? EV_WRITE : 0), timeout); luaL_testcancel(L); timeout = deadline - ev_monotonic_now(loop()); timeout = MAX(0.0, timeout); if (revents == 0 && timeout == 0.0) { lua_pushinteger(L, ER_TIMEOUT); lua_pushstring(L, "Timeout exceeded"); return 2; } } handle_error: lua_pushinteger(L, ER_NO_CONNECTION); lua_pushstring(L, strerror(errno)); return 2; } int luaopen_net_box(struct lua_State *L) { static const luaL_Reg net_box_lib[] = { { "encode_ping", netbox_encode_ping }, { "encode_call_16", netbox_encode_call_16 }, { "encode_call", netbox_encode_call }, { "encode_eval", netbox_encode_eval }, { "encode_select", netbox_encode_select }, { "encode_insert", netbox_encode_insert }, { "encode_replace", netbox_encode_replace }, { "encode_delete", netbox_encode_delete }, { "encode_update", netbox_encode_update }, { "encode_upsert", netbox_encode_upsert }, { "encode_auth", netbox_encode_auth }, { "decode_greeting",netbox_decode_greeting }, { "communicate", netbox_communicate }, { NULL, NULL} }; /* luaL_register_module polutes _G */ lua_newtable(L); luaL_openlib(L, NULL, net_box_lib, 0); lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); lua_pushvalue(L, -2); lua_setfield(L, -2, "net.box.lib"); lua_remove(L, -1); return 1; } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/ctl.c0000664000000000000000000000434113306560010017270 0ustar rootroot/* * Copyright 2010-2018, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/ctl.h" #include #include #include #include #include "lua/utils.h" #include "box/box.h" static int lbox_ctl_wait_ro(struct lua_State *L) { int index = lua_gettop(L); double timeout = TIMEOUT_INFINITY; if (index > 0) timeout = luaL_checknumber(L, 1); if (box_wait_ro(true, timeout) != 0) return luaT_error(L); return 0; } static int lbox_ctl_wait_rw(struct lua_State *L) { int index = lua_gettop(L); double timeout = TIMEOUT_INFINITY; if (index > 0) timeout = luaL_checknumber(L, 1); if (box_wait_ro(false, timeout) != 0) return luaT_error(L); return 0; } static const struct luaL_Reg lbox_ctl_lib[] = { {"wait_ro", lbox_ctl_wait_ro}, {"wait_rw", lbox_ctl_wait_rw}, {NULL, NULL} }; void box_lua_ctl_init(struct lua_State *L) { luaL_register_module(L, "box.ctl", lbox_ctl_lib); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/misc.cc0000664000000000000000000000756613306560010017620 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/misc.h" #include "fiber.h" /* fiber->gc() */ #include #include "lua/utils.h" #include "lua/msgpack.h" #include "box/box.h" #include "box/port.h" #include "box/lua/tuple.h" /** {{{ Miscellaneous utils **/ char * lbox_encode_tuple_on_gc(lua_State *L, int idx, size_t *p_len) { struct region *gc = &fiber()->gc; size_t used = region_used(gc); struct mpstream stream; mpstream_init(&stream, gc, region_reserve_cb, region_alloc_cb, luamp_error, L); luamp_encode_tuple(L, luaL_msgpack_default, &stream, idx); mpstream_flush(&stream); *p_len = region_used(gc) - used; return (char *) region_join_xc(gc, *p_len); } /* }}} */ /** {{{ Lua/C implementation of index:select(): used only by Vinyl **/ static inline void lbox_port_to_table(lua_State *L, struct port *port_base) { struct port_tuple *port = port_tuple(port_base); lua_createtable(L, port->size, 0); struct port_tuple_entry *entry = port->first; for (int i = 0 ; i < port->size; i++) { luaT_pushtuple(L, entry->tuple); lua_rawseti(L, -2, i + 1); entry = entry->next; } } static int lbox_select(lua_State *L) { if (lua_gettop(L) != 6 || !lua_isnumber(L, 1) || !lua_isnumber(L, 2) || !lua_isnumber(L, 3) || !lua_isnumber(L, 4) || !lua_isnumber(L, 5)) { return luaL_error(L, "Usage index:select(iterator, offset, " "limit, key)"); } uint32_t space_id = lua_tonumber(L, 1); uint32_t index_id = lua_tonumber(L, 2); int iterator = lua_tonumber(L, 3); uint32_t offset = lua_tonumber(L, 4); uint32_t limit = lua_tonumber(L, 5); size_t key_len; const char *key = lbox_encode_tuple_on_gc(L, 6, &key_len); struct port port; if (box_select(space_id, index_id, iterator, offset, limit, key, key + key_len, &port) != 0) { return luaT_error(L); } /* * Lua may raise an exception during allocating table or pushing * tuples. In this case `port' definitely will leak. It is possible to * wrap lbox_port_to_table() to pcall(), but it was too expensive * for this binding according to our benchmarks (~5% decrease). * However, we tried to simulate this situation and LuaJIT finalizers * table always crashed the first (can't be fixed with pcall). * https://github.com/tarantool/tarantool/issues/1182 */ lbox_port_to_table(L, &port); port_destroy(&port); return 1; /* lua table with tuples */ } /* }}} */ void box_lua_misc_init(struct lua_State *L) { static const struct luaL_Reg boxlib_internal[] = { {"select", lbox_select}, {NULL, NULL} }; luaL_register(L, "box.internal", boxlib_internal); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/cfg.cc0000664000000000000000000001400013306565107017415 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "cfg.h" #include "exception.h" #include #include "main.h" #include "lua/utils.h" #include "box/box.h" #include "libeio/eio.h" extern "C" { #include } // extern "C" static int lbox_cfg_check(struct lua_State *L) { try { box_check_config(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_load(struct lua_State *L) { try { load_cfg(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_listen(struct lua_State *L) { try { box_bind(); box_listen(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_replication(struct lua_State *L) { try { box_set_replication(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_log_level(struct lua_State *L) { try { box_set_log_level(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_log_format(struct lua_State *L) { try { box_set_log_format(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_readahead(struct lua_State *L) { try { box_set_readahead(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_io_collect_interval(struct lua_State *L) { try { box_set_io_collect_interval(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_too_long_threshold(struct lua_State *L) { try { box_set_too_long_threshold(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_snap_io_rate_limit(struct lua_State *L) { try { box_set_snap_io_rate_limit(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_checkpoint_count(struct lua_State *L) { try { box_set_checkpoint_count(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_read_only(struct lua_State *L) { try { box_set_ro(cfg_geti("read_only") != 0); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_memtx_max_tuple_size(struct lua_State *L) { try { box_set_memtx_max_tuple_size(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_vinyl_max_tuple_size(struct lua_State *L) { try { box_set_vinyl_max_tuple_size(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_vinyl_cache(struct lua_State *L) { try { box_set_vinyl_cache(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_vinyl_timeout(struct lua_State *L) { try { box_set_vinyl_timeout(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_worker_pool_threads(struct lua_State *L) { (void) L; eio_set_min_parallel(cfg_geti("worker_pool_threads")); eio_set_max_parallel(cfg_geti("worker_pool_threads")); return 0; } static int lbox_cfg_set_replication_timeout(struct lua_State *L) { try { box_set_replication_timeout(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_replication_connect_timeout(struct lua_State *L) { try { box_set_replication_connect_timeout(); } catch (Exception *) { luaT_error(L); } return 0; } static int lbox_cfg_set_replication_connect_quorum(struct lua_State *L) { try { box_set_replication_connect_quorum(); } catch (Exception *) { luaT_error(L); } return 0; } void box_lua_cfg_init(struct lua_State *L) { static const struct luaL_Reg cfglib_internal[] = { {"cfg_check", lbox_cfg_check}, {"cfg_load", lbox_cfg_load}, {"cfg_set_listen", lbox_cfg_set_listen}, {"cfg_set_replication", lbox_cfg_set_replication}, {"cfg_set_worker_pool_threads", lbox_cfg_set_worker_pool_threads}, {"cfg_set_log_level", lbox_cfg_set_log_level}, {"cfg_set_log_format", lbox_cfg_set_log_format}, {"cfg_set_readahead", lbox_cfg_set_readahead}, {"cfg_set_io_collect_interval", lbox_cfg_set_io_collect_interval}, {"cfg_set_too_long_threshold", lbox_cfg_set_too_long_threshold}, {"cfg_set_snap_io_rate_limit", lbox_cfg_set_snap_io_rate_limit}, {"cfg_set_checkpoint_count", lbox_cfg_set_checkpoint_count}, {"cfg_set_read_only", lbox_cfg_set_read_only}, {"cfg_set_memtx_max_tuple_size", lbox_cfg_set_memtx_max_tuple_size}, {"cfg_set_vinyl_max_tuple_size", lbox_cfg_set_vinyl_max_tuple_size}, {"cfg_set_vinyl_cache", lbox_cfg_set_vinyl_cache}, {"cfg_set_vinyl_timeout", lbox_cfg_set_vinyl_timeout}, {"cfg_set_replication_timeout", lbox_cfg_set_replication_timeout}, {"cfg_set_replication_connect_timeout", lbox_cfg_set_replication_connect_timeout}, {"cfg_set_replication_connect_quorum", lbox_cfg_set_replication_connect_quorum}, {NULL, NULL} }; luaL_register(L, "box.internal", cfglib_internal); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/load_cfg.lua0000664000000000000000000003426113306565107020623 0ustar rootroot-- load_cfg.lua - internal file local log = require('log') local json = require('json') local private = require('box.internal') local urilib = require('uri') local math = require('math') -- all available options local default_cfg = { listen = nil, memtx_memory = 256 * 1024 *1024, memtx_min_tuple_size = 16, memtx_max_tuple_size = 1024 * 1024, slab_alloc_factor = 1.05, work_dir = nil, memtx_dir = ".", wal_dir = ".", vinyl_dir = '.', vinyl_memory = 128 * 1024 * 1024, vinyl_cache = 128 * 1024 * 1024, vinyl_max_tuple_size = 1024 * 1024, vinyl_read_threads = 1, vinyl_write_threads = 2, vinyl_timeout = 60, vinyl_run_count_per_level = 2, vinyl_run_size_ratio = 3.5, vinyl_range_size = 1024 * 1024 * 1024, vinyl_page_size = 8 * 1024, vinyl_bloom_fpr = 0.05, log = nil, log_nonblock = true, log_level = 5, log_format = "plain", io_collect_interval = nil, readahead = 16320, snap_io_rate_limit = nil, -- no limit too_long_threshold = 0.5, wal_mode = "write", rows_per_wal = 500000, wal_max_size = 256 * 1024 * 1024, wal_dir_rescan_delay= 2, force_recovery = false, replication = nil, instance_uuid = nil, replicaset_uuid = nil, custom_proc_title = nil, pid_file = nil, background = false, username = nil, coredump = false, read_only = false, hot_standby = false, checkpoint_interval = 3600, checkpoint_count = 2, worker_pool_threads = 4, replication_timeout = 1, replication_sync_lag = 10, replication_connect_timeout = 30, replication_connect_quorum = nil, -- connect all } -- types of available options -- could be comma separated lua types or 'any' if any type is allowed local template_cfg = { listen = 'string, number', memtx_memory = 'number', memtx_min_tuple_size = 'number', memtx_max_tuple_size = 'number', slab_alloc_factor = 'number', work_dir = 'string', memtx_dir = 'string', wal_dir = 'string', vinyl_dir = 'string', vinyl_memory = 'number', vinyl_cache = 'number', vinyl_max_tuple_size = 'number', vinyl_read_threads = 'number', vinyl_write_threads = 'number', vinyl_timeout = 'number', vinyl_run_count_per_level = 'number', vinyl_run_size_ratio = 'number', vinyl_range_size = 'number', vinyl_page_size = 'number', vinyl_bloom_fpr = 'number', log = 'string', log_nonblock = 'boolean', log_level = 'number', log_format = 'string', io_collect_interval = 'number', readahead = 'number', snap_io_rate_limit = 'number', too_long_threshold = 'number', wal_mode = 'string', rows_per_wal = 'number', wal_max_size = 'number', wal_dir_rescan_delay= 'number', force_recovery = 'boolean', replication = 'string, number, table', instance_uuid = 'string', replicaset_uuid = 'string', custom_proc_title = 'string', pid_file = 'string', background = 'boolean', username = 'string', coredump = 'boolean', checkpoint_interval = 'number', checkpoint_count = 'number', read_only = 'boolean', hot_standby = 'boolean', worker_pool_threads = 'number', replication_timeout = 'number', replication_sync_lag = 'number', replication_connect_timeout = 'number', replication_connect_quorum = 'number', } local function normalize_uri(port) if port == nil or type(port) == 'table' then return port end return tostring(port); end -- options that require special handling local modify_cfg = { listen = normalize_uri, replication = normalize_uri, } local function purge_password_from_uri(uri) local parsed = urilib.parse(uri) if parsed ~= nil and parsed.password ~= nil then return urilib.format(parsed, false) end return uri end local function purge_password_from_uris(uri) if uri == nil then return nil end if type(uri) == 'table' then local new_table = {} for k, v in pairs(uri) do new_table[k] = purge_password_from_uri(v) end return new_table end return purge_password_from_uri(uri) end -- options that require modification for logging local log_cfg_option = { replication = purge_password_from_uris, } -- dynamically settable options local dynamic_cfg = { listen = private.cfg_set_listen, replication = private.cfg_set_replication, log_level = private.cfg_set_log_level, log_format = private.cfg_set_log_format, io_collect_interval = private.cfg_set_io_collect_interval, readahead = private.cfg_set_readahead, too_long_threshold = private.cfg_set_too_long_threshold, snap_io_rate_limit = private.cfg_set_snap_io_rate_limit, read_only = private.cfg_set_read_only, memtx_max_tuple_size = private.cfg_set_memtx_max_tuple_size, vinyl_max_tuple_size = private.cfg_set_vinyl_max_tuple_size, vinyl_cache = private.cfg_set_vinyl_cache, vinyl_timeout = private.cfg_set_vinyl_timeout, checkpoint_count = private.cfg_set_checkpoint_count, checkpoint_interval = private.checkpoint_daemon.set_checkpoint_interval, worker_pool_threads = private.cfg_set_worker_pool_threads, -- do nothing, affects new replicas, which query this value on start wal_dir_rescan_delay = function() end, custom_proc_title = function() require('title').update(box.cfg.custom_proc_title) end, force_recovery = function() end, replication_timeout = private.cfg_set_replication_timeout, replication_connect_timeout = private.cfg_set_replication_connect_timeout, replication_connect_quorum = private.cfg_set_replication_connect_quorum, instance_uuid = function() if box.cfg.instance_uuid ~= box.info.uuid then box.error(box.error.CFG, 'instance_uuid', 'Can\'t change instance uuid') end end, replicaset_uuid = function(new_value) if box.cfg.replicaset_uuid ~= box.info.cluster.uuid then box.error(box.error.CFG, 'replicaset_uuid', 'Can\'t change replicaset uuid') end end, } local dynamic_cfg_skip_at_load = { wal_mode = true, listen = true, replication = true, replication_timeout = true, replication_connect_timeout = true, replication_connect_quorum = true, wal_dir_rescan_delay = true, custom_proc_title = true, force_recovery = true, instance_uuid = true, replicaset_uuid = true, } local function convert_gb(size) return math.floor(size * 1024 * 1024 * 1024) end -- Old to new config translation tables local translate_cfg = { snapshot_count = {'checkpoint_count'}, snapshot_period = {'checkpoint_interval'}, slab_alloc_arena = {'memtx_memory', convert_gb}, slab_alloc_minimal = {'memtx_min_tuple_size'}, slab_alloc_maximal = {'memtx_max_tuple_size'}, snap_dir = {'memtx_dir'}, logger = {'log'}, logger_nonblock = {'log_nonblock'}, panic_on_snap_error = {'force_recovery', function (p) return not p end}, panic_on_wal_error = {'force_recovery', function (p) return not p end}, replication_source = {'replication'}, } -- Upgrade old config local function upgrade_cfg(cfg, translate_cfg) if cfg == nil then return {} end local result_cfg = {} for k, v in pairs(cfg) do local translation = translate_cfg[k] if translation ~= nil then log.warn('Deprecated option %s, please use %s instead', k, translation[1]) local new_val if translation[2] == nil then new_val = v else new_val = translation[2](v) end if cfg[translation[1]] ~= nil and cfg[translation[1]] ~= new_val then box.error(box.error.CFG, k, 'can not override a value for a deprecated option') end result_cfg[translation[1]] = new_val else result_cfg[k] = v end end return result_cfg end local function prepare_cfg(cfg, default_cfg, template_cfg, modify_cfg, prefix) if cfg == nil then return {} end if type(cfg) ~= 'table' then error("Error: cfg should be a table") end -- just pass {.. dont_check = true, ..} to disable check below if cfg.dont_check then return end local readable_prefix = '' if prefix ~= nil and prefix ~= '' then readable_prefix = prefix .. '.' end local new_cfg = {} for k,v in pairs(cfg) do local readable_name = readable_prefix .. k; if template_cfg[k] == nil then box.error(box.error.CFG, readable_name , "unexpected option") elseif v == "" or v == nil then -- "" and NULL = ffi.cast('void *', 0) set option to default value v = default_cfg[k] elseif template_cfg[k] == 'any' then -- any type is ok elseif type(template_cfg[k]) == 'table' then if type(v) ~= 'table' then box.error(box.error.CFG, readable_name, "should be a table") end v = prepare_cfg(v, default_cfg[k], template_cfg[k], modify_cfg[k], readable_name) elseif (string.find(template_cfg[k], ',') == nil) then -- one type if type(v) ~= template_cfg[k] then box.error(box.error.CFG, readable_name, "should be of type ".. template_cfg[k]) end else local good_types = string.gsub(template_cfg[k], ' ', ''); if (string.find(',' .. good_types .. ',', ',' .. type(v) .. ',') == nil) then good_types = string.gsub(good_types, ',', ', '); box.error(box.error.CFG, readable_name, "should be one of types ".. template_cfg[k]) end end if modify_cfg ~= nil and type(modify_cfg[k]) == 'function' then v = modify_cfg[k](v) end new_cfg[k] = v end return new_cfg end local function apply_default_cfg(cfg, default_cfg) for k,v in pairs(default_cfg) do if cfg[k] == nil then cfg[k] = v elseif type(v) == 'table' then apply_default_cfg(cfg[k], v) end end end local function reload_cfg(oldcfg, cfg) cfg = upgrade_cfg(cfg, translate_cfg) local newcfg = prepare_cfg(cfg, default_cfg, template_cfg, modify_cfg) -- iterate over original table because prepare_cfg() may store NILs for key, val in pairs(cfg) do if dynamic_cfg[key] == nil and oldcfg[key] ~= val then box.error(box.error.RELOAD_CFG, key); end end for key in pairs(cfg) do local val = newcfg[key] local oldval = oldcfg[key] if oldval ~= val then rawset(oldcfg, key, val) if not pcall(dynamic_cfg[key]) then rawset(oldcfg, key, oldval) -- revert the old value return box.error() -- re-throw end if log_cfg_option[key] ~= nil then val = log_cfg_option[key](val) end log.info("set '%s' configuration option to %s", key, json.encode(val)) end end if type(box.on_reload_configuration) == 'function' then box.on_reload_configuration() end end local box_cfg_guard_whitelist = { error = true; internal = true; index = true; session = true; tuple = true; runtime = true; NULL = true; }; local box = require('box') -- Move all box members except 'error' to box_configured local box_configured = {} for k, v in pairs(box) do box_configured[k] = v if not box_cfg_guard_whitelist[k] then box[k] = nil end end setmetatable(box, { __index = function(table, index) error(debug.traceback("Please call box.cfg{} first")) error("Please call box.cfg{} first") end }) local function load_cfg(cfg) box.internal.schema.init() cfg = upgrade_cfg(cfg, translate_cfg) cfg = prepare_cfg(cfg, default_cfg, template_cfg, modify_cfg) apply_default_cfg(cfg, default_cfg); -- Save new box.cfg box.cfg = cfg if not pcall(private.cfg_check) then box.cfg = load_cfg -- restore original box.cfg return box.error() -- re-throw exception from check_cfg() end -- Restore box members after initial configuration for k, v in pairs(box_configured) do box[k] = v end setmetatable(box, nil) box_configured = nil box.cfg = setmetatable(cfg, { __newindex = function(table, index) error('Attempt to modify a read-only table') end, __call = reload_cfg, }) private.cfg_load() for key, fun in pairs(dynamic_cfg) do local val = cfg[key] if val ~= nil and not dynamic_cfg_skip_at_load[key] then fun() if val ~= default_cfg[key] then log.info("set '%s' configuration option to %s", key, json.encode(val)) end end end if not box.cfg.read_only and not box.cfg.replication then box.schema.upgrade{auto = true} end end box.cfg = load_cfg -- gh-810: -- hack luajit default cpath -- commented out because we fixed luajit to build properly, see -- https://github.com/luajit/luajit/issues/76 -- local format = require('tarantool').build.mod_format -- package.cpath = package.cpath:gsub( -- '?.so', '?.' .. format -- ):gsub('loadall.so', 'loadall.' .. format) tarantool_1.9.1.26.g63eb81e3c/src/box/lua/cfg.h0000664000000000000000000000326413306560010017255 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_CFG_H #define INCLUDES_TARANTOOL_LUA_CFG_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_cfg_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_CFG_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/error.h0000664000000000000000000000327413306560010017650 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_ERROR_H #define INCLUDES_TARANTOOL_LUA_ERROR_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_error_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_ERROR_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/upgrade.lua0000664000000000000000000011361413306565107020514 0ustar rootrootlocal log = require('log') local bit = require('bit') local json = require('json') -- Guest user id - the default user local GUEST = 0 -- Super User ID local ADMIN = 1 -- role 'PUBLIC' is special, it's automatically granted to every user local PUBLIC = 2 -- role 'REPLICATION' local REPLICATION = 3 -- role 'SUPER' -- choose a fancy id to not clash with any existing role or -- user during upgrade local SUPER = 31 -------------------------------------------------------------------------------- -- Utils -------------------------------------------------------------------------------- local function setmap(tab) return setmetatable(tab, { __serialize = 'map' }) end local function ismap(tab) if type(tab) ~= 'table' then return false end local mt = getmetatable(tab) return mt and (mt.__serialize == 'map' or mt.__serialize == 'mapping') end local mkversion = {} mkversion.__index = mkversion setmetatable(mkversion, {__call = function(c, ...) return c.new(...) end}) function mkversion.new(major, minor, patch) local self = setmetatable({}, mkversion) self.major = major self.minor = minor self.patch = patch self.id = bit.bor(bit.lshift(bit.bor(bit.lshift(major, 8), minor), 8), patch) return self end function mkversion.__tostring(self) return string.format('%s.%s.%s', self.major, self.minor, self.patch) end function mkversion.__eq(lhs, rhs) return lhs.id == rhs.id end function mkversion.__lt(lhs, rhs) return lhs.id < rhs.id end -- space:truncate() doesn't work with disabled triggers on __index local function truncate(space) local pk = space.index[0] while pk:len() > 0 do local state, t for state, t in pk:pairs() do local key = {} for _k2, parts in ipairs(pk.parts) do table.insert(key, t[parts.fieldno]) end space:delete(key) end end end local function set_system_triggers(val) box.space._space:run_triggers(val) box.space._index:run_triggers(val) box.space._user:run_triggers(val) box.space._func:run_triggers(val) box.space._priv:run_triggers(val) end -------------------------------------------------------------------------------- -- Bootstrap -------------------------------------------------------------------------------- local function erase() truncate(box.space._space) truncate(box.space._index) truncate(box.space._user) truncate(box.space._func) truncate(box.space._priv) truncate(box.space._sequence_data) truncate(box.space._sequence) truncate(box.space._truncate) truncate(box.space._collation) --truncate(box.space._schema) box.space._schema:delete('version') box.space._schema:delete('max_id') end local function initial() -- stick to the following convention: -- prefer user id (owner id) in field #1 -- prefer object name in field #2 -- index on owner id is index #1 -- index on object name is index #2 -- local _schema = box.space[box.schema.SCHEMA_ID] local _space = box.space[box.schema.SPACE_ID] local _index = box.space[box.schema.INDEX_ID] local _func = box.space[box.schema.FUNC_ID] local _user = box.space[box.schema.USER_ID] local _priv = box.space[box.schema.PRIV_ID] local _cluster = box.space[box.schema.CLUSTER_ID] local MAP = setmap({}) -- -- _schema -- log.info("create space _schema") _space:insert{_schema.id, ADMIN, '_schema', 'memtx', 0, MAP, {}} log.info("create index primary on _schema") _index:insert{_schema.id, 0, 'primary', 'tree', { unique = true }, {{0, 'str'}}} -- -- _space -- log.info("create space _space") _space:insert{_space.id, ADMIN, '_space', 'memtx', 0, MAP, {}} -- space name is unique log.info("create index primary on _space") _index:insert{_space.id, 0, 'primary', 'tree', { unique = true }, {{0, 'num'}}} log.info("create index owner on _space") _index:insert{_space.id, 1, 'owner', 'tree', {unique = false }, {{1, 'num'}}} log.info("create index index name on _space") _index:insert{_space.id, 2, 'name', 'tree', { unique = true }, {{2, 'str'}}} -- -- _index -- log.info("create space _index") _space:insert{_index.id, ADMIN, '_index', 'memtx', 0, MAP, {}} -- index name is unique within a space log.info("create index primary on _index") _index:insert{_index.id, 0, 'primary', 'tree', {unique = true}, {{0, 'num'}, {1, 'num'}}} log.info("create index name on _index") _index:insert{_index.id, 2, 'name', 'tree', {unique = true}, {{0, 'num'}, {2, 'str'}}} -- -- _func -- log.info("create space _func") _space:insert{_func.id, ADMIN, '_func', 'memtx', 0, MAP, {}} -- function name and id are unique log.info("create index _func:primary") _index:insert{_func.id, 0, 'primary', 'tree', {unique = true}, {{0, 'num'}}} log.info("create index _func:owner") _index:insert{_func.id, 1, 'owner', 'tree', {unique = false}, {{1, 'num'}}} log.info("create index _func:name") _index:insert{_func.id, 2, 'name', 'tree', {unique = true}, {{2, 'str'}}} -- -- _user -- log.info("create space _user") _space:insert{_user.id, ADMIN, '_user', 'memtx', 0, MAP, {}} -- user name and id are unique log.info("create index _func:primary") _index:insert{_user.id, 0, 'primary', 'tree', {unique = true}, {{0, 'num'}}} log.info("create index _func:owner") _index:insert{_user.id, 1, 'owner', 'tree', {unique = false}, {{1, 'num'}}} log.info("create index _func:name") _index:insert{_user.id, 2, 'name', 'tree', {unique = true}, {{2, 'str'}}} -- -- _priv -- log.info("create space _priv") _space:insert{_priv.id, ADMIN, '_priv', 'memtx', 0, MAP, {}} -- -- space schema is: grantor id, user id, object_type, object_id, privilege -- primary key: user id, object type, object id log.info("create index primary on _priv") _index:insert{_priv.id, 0, 'primary', 'tree', {unique = true}, {{1, 'num'}, {2, 'str'}, {3, 'num'}}} -- owner index - to quickly find all privileges granted by a user log.info("create index owner on _priv") _index:insert{_priv.id, 1, 'owner', 'tree', {unique = false}, {{0, 'num'}}} -- object index - to quickly find all grants on a given object log.info("create index object on _priv") _index:insert{_priv.id, 2, 'object', 'tree', {unique = false}, {{2, 'str'}, {3, 'num'}}} -- -- _cluster -- log.info("create space _cluster") _space:insert{_cluster.id, ADMIN, '_cluster', 'memtx', 0, MAP, {}} -- primary key: node id log.info("create index primary on _cluster") _index:insert{_cluster.id, 0, 'primary', 'tree', {unique = true}, {{0, 'num'}}} -- node uuid key: node uuid log.info("create index uuid on _cluster") _index:insert{_cluster.id, 1, 'uuid', 'tree', {unique = true}, {{1, 'str'}}} -- -- Pre-create user and grants log.info("create user guest") _user:insert{GUEST, ADMIN, 'guest', 'user'} log.info("create user admin") _user:insert{ADMIN, ADMIN, 'admin', 'user'} log.info("create role public") _user:insert{PUBLIC, ADMIN, 'public', 'role'} log.info("grant read,write,execute on universe to admin") _priv:insert{ADMIN, ADMIN, 'universe', 0, 7} -- grant role 'public' to 'guest' log.info("grant role public to guest") _priv:insert{ADMIN, GUEST, 'role', PUBLIC, 4} log.info("set max_id to box.schema.SYSTEM_ID_MAX") _schema:insert{'max_id', box.schema.SYSTEM_ID_MAX} log.info("set schema version to 1.6.0") _schema:insert({'version', 1, 6, 0}) end -------------------------------------------------------------------------------- -- Tarantool 1.6.8 -------------------------------------------------------------------------------- local function upgrade_index_options_to_1_6_8() local indexes = {} for _, def in box.space._index:pairs() do if type(def[5]) == 'number' then -- Tarantool < 1.6.5 format local part_count = def[6] local new_def = def:update({{'#', 7, 2 * part_count}}):totable() new_def[5] = setmap({}) new_def[5].unique = def[5] ~= 0 new_def[6] = {} for i=1,part_count,1 do local field_id = def[7 + (i - 1) * 2] local field_type = def[7 + (i - 1) * 2 + 1] table.insert(new_def[6], { field_id, field_type }) end table.insert(indexes, new_def) elseif not ismap(def[5]) then log.error("unexpected index options: %s", json.encode(def[5])) end end for _, new_def in ipairs(indexes) do log.info("alter index %s on %s set options to %s, parts to %s", new_def[3], box.space[new_def[1]].name, json.encode(new_def[5]), json.encode(new_def[6])) box.space._index:replace(new_def) end end local function upgrade_space_options_to_1_6_8() local spaces = {} for _, def in box.space._space:pairs() do local new_def = def:totable() new_def[6] = setmap({}) if def[6] == nil or def[6] == "" then -- Tarantool < 1.6.8 format table.insert(spaces, new_def) elseif def[6] == 'temporary' then -- Tarantool < 1.6.8 format new_def[6].temporary = true table.insert(spaces, new_def) elseif not ismap(def[6]) then log.error("unexpected space options: %s", json.encode(def[6])) end end for _, new_def in ipairs(spaces) do log.info("alter space %s set options to %s", new_def[3], json.encode(new_def[6])) box.space._space:update(new_def[1], {{'=', 6, new_def[6]}}) end end local function upgrade_space_format_to_1_6_8() local space_def = box.space._space:get(box.space._schema.id) if space_def[7] == nil or next(space_def[7]) == nil then local format = {} format[1] = {type='str', name='key'} log.info("alter space _schema set format to %s", json.encode(format)) box.space._space:update(box.space._schema.id, {{'=', 7, format}}) end local space_def = box.space._space:get(box.space._space.id) if space_def[7] == nil or next(space_def[7]) == nil then local format = {} format[1] = {name='id', type='num'} format[2] = {name='owner', type='num'} format[3] = {name='name', type='str'} format[4] = {name='engine', type='str'} format[5] = {name='field_count', type='num'} format[6] = {name='flags', type='str'} format[7] = {name='format', type='*'} log.info("alter space _space set format") box.space._space:format(format) end local space_def = box.space._space:get(box.space._index.id) if space_def[7] == nil or next(space_def[7]) == nil or space_def[7][5].name == 'unique' then local format = {} format[1] = {name = 'id', type = 'num'} format[2] = {name = 'iid', type = 'num'} format[3] = {name = 'name', type = 'str'} format[4] = {name = 'type', type = 'str'} format[5] = {name = 'opts', type = 'array'} format[6] = {name = 'parts', type = 'array'} log.info("alter space _index set format") box.space._index:format(format) end local space_def = box.space._space:get(box.space._func.id) if space_def[7] == nil or next(space_def[7]) == nil then local format = {} format[1] = {name='id', type='num'} format[2] = {name='owner', type='num'} format[3] = {name='name', type='str'} format[4] = {name='setuid', type='num'} log.info("alter space _func set format") box.space._func:format(format) end local space_def = box.space._space:get(box.space._user.id) if space_def[7] == nil or next(space_def[7]) == nil then local format = {} format[1] = {name='id', type='num'} format[2] = {name='owner', type='num'} format[3] = {name='name', type='str'} format[4] = {name='type', type='str'} format[5] = {name='auth', type='*'} log.info("alter space _user set format") box.space._user:format(format) end local space_def = box.space._space:get(box.space._priv.id) if space_def[7] == nil or next(space_def[7]) == nil then local format = {} format={} format[1] = {name='grantor', type='num'} format[2] = {name='grantee', type='num'} format[3] = {name='object_type', type='str'} format[4] = {name='object_id', type='num'} format[5] = {name='privilege', type='num'} log.info("alter space _priv set format") box.space._priv:format(format) end local space_def = box.space._space:get(box.space._cluster.id) if space_def[7] == nil or next(space_def[7]) == nil then local format = {} format[1] = {name='id', type='num'} format[2] = {name='uuid', type='str'} log.info("alter space _schema set format") box.space._cluster:format(format) end local spaces = {} for _, space_def in box.space._space:pairs() do if space_def[7] == nil then table.insert(spaces, space_def) end end for _, space_def in ipairs(spaces) do log.info("alter space %s set format", space_def[3]) box.space._space:update(space_def[1], {{'=', 7, {}}}) end end local function create_sysview(source_id, target_id) -- -- Create definitions for the system view, and grant -- privileges on system views to 'PUBLIC' role -- local def = box.space._space:get(source_id):totable() def[1] = target_id def[3] = "_v"..def[3]:sub(2) def[4] = 'sysview' local space_def = box.space._space:get(target_id) if space_def == nil then log.info("create view %s...", def[3]) box.space._space:replace(def) elseif json.encode(space_def[7]) ~= json.encode(def[7]) then -- sync box.space._vXXX format with box.space._XXX format log.info("alter space %s set format", def[3]) box.space._space:update(def[1], {{ '=', 7, def[7] }}) end local idefs = {} for _, idef in box.space._index:pairs(source_id, { iterator = 'EQ'}) do idef = idef:totable() idef[1] = target_id table.insert(idefs, idef) end for _, idef in ipairs(idefs) do if box.space._index:get({idef[1], idef[2]}) == nil then log.info("create index %s on %s", idef[3], def[3]) box.space._index:replace(idef) end end -- public can read system views if box.space._priv.index.primary:count({PUBLIC, 'space', target_id}) == 0 then log.info("grant read access to 'public' role for %s view", def[3]) box.space._priv:insert({1, PUBLIC, 'space', target_id, 1}) end end local function upgrade_users_to_1_6_8() if box.space._user.index.name:count({'replication'}) == 0 then log.info("create role replication") local RPL_ID = box.space._user:auto_increment{ADMIN, 'replication', 'role'}[1] -- replication can read the entire universe log.info("grant read on universe to replication") box.space._priv:replace{1, RPL_ID, 'universe', 0, 1} -- replication can append to '_cluster' system space log.info("grant write on space _cluster to replication") box.space._priv:replace{1, RPL_ID, 'space', box.space._cluster.id, 2} end if box.space._priv.index.primary:count({ADMIN, 'universe', 0}) == 0 then -- grant admin access to the universe log.info("grant all on universe to admin") box.space._priv:insert{ADMIN, ADMIN, 'universe', 0, 7} end if box.space._func.index.name:count("box.schema.user.info") == 0 then -- create "box.schema.user.info" function log.info('create function "box.schema.user.info" with setuid') box.space._func:auto_increment{ADMIN, 'box.schema.user.info', 1} -- grant 'public' role access to 'box.schema.user.info' function log.info('grant execute on function "box.schema.user.info" to public') box.space._priv:replace{ADMIN, PUBLIC, 'function', 1, 4} end end local function upgrade_priv_to_1_6_8() -- see e5862c387c7151b812810b1a51086b82a7eedcc4 local index_def = box.space._index.index.name:get({312, 'owner'}) local parts = index_def[6] if parts[1][1] == 1 then log.info("fix index owner for _priv") parts = {{0, 'num'}} box.space._index:update({index_def[1], index_def[2]}, {{'=', 6, parts }}) end end local function upgrade_func_to_1_6_8() local funcs = {} for _, def in box.space._func:pairs() do local new_def = def:totable() if new_def[5] == nil then new_def[5] = 'LUA' table.insert(funcs, new_def) end end for _, def in ipairs(funcs) do box.space._func:update(def[1], {{ '=', 5, def[5] }}) end end local function upgrade_to_1_6_8() upgrade_index_options_to_1_6_8() upgrade_space_options_to_1_6_8() upgrade_space_format_to_1_6_8() upgrade_users_to_1_6_8() upgrade_priv_to_1_6_8() upgrade_func_to_1_6_8() create_sysview(box.schema.SPACE_ID, box.schema.VSPACE_ID) create_sysview(box.schema.INDEX_ID, box.schema.VINDEX_ID) create_sysview(box.schema.USER_ID, box.schema.VUSER_ID) create_sysview(box.schema.FUNC_ID, box.schema.VFUNC_ID) create_sysview(box.schema.PRIV_ID, box.schema.VPRIV_ID) local max_id = box.space._schema:get('max_id') if max_id == nil then local id = box.space._space.index.primary:max()[1] if id < box.schema.SYSTEM_ID_MAX then id = box.schema.SYSTEM_ID_MAX end log.info("set max_id to %d", id) box.space._schema:insert{'max_id', id} end end -------------------------------------------------------------------------------- -- Tarantool 1.7.1 -------------------------------------------------------------------------------- local function upgrade_users_to_1_7_1() box.schema.user.passwd('guest', '') end local function upgrade_to_1_7_1() upgrade_users_to_1_7_1() end -------------------------------------------------------------------------------- -- Tarantool 1.7.2 -------------------------------------------------------------------------------- local function upgrade_field_types_to_1_7_2() local field_types_v16 = { num = 'unsigned'; int = 'integer'; str = 'string'; }; local indexes = {} for _, deftuple in box.space._index:pairs() do local def = deftuple:totable() local changed = false local parts = def[6] for _, part in pairs(parts) do local field_type = part[2]:lower() part[2] = field_types_v16[field_type] or field_type if field_type ~= part[2] then changed = true end end if changed then table.insert(indexes, def) end end for _, new_def in ipairs(indexes) do log.info("alter index %s on %s set parts to %s", new_def[3], box.space[new_def[1]].name, json.encode(new_def[6])) box.space._index:replace(new_def) end end local function upgrade_to_1_7_2() upgrade_field_types_to_1_7_2() end -------------------------------------------------------------------------------- -- Tarantool 1.7.5 -------------------------------------------------------------------------------- local function create_truncate_space() local _truncate = box.space[box.schema.TRUNCATE_ID] log.info("create space _truncate") box.space._space:insert{_truncate.id, ADMIN, '_truncate', 'memtx', 0, setmap({}), {{name = 'id', type = 'unsigned'}, {name = 'count', type = 'unsigned'}}} log.info("create index primary on _truncate") box.space._index:insert{_truncate.id, 0, 'primary', 'tree', {unique = true}, {{0, 'unsigned'}}} local _priv = box.space[box.schema.PRIV_ID] _priv:insert{ADMIN, PUBLIC, 'space', _truncate.id, 2} end local function update_existing_users_to_1_7_5() local def_ids_to_update = {} for _, def in box.space._user:pairs() do local new_def = def:totable() if new_def[5] == nil then table.insert(def_ids_to_update, new_def[1]) end end for _, id in ipairs(def_ids_to_update) do box.space._user:update(id, {{'=', 5, setmap({})}}) end end local function update_space_formats_to_1_7_5() local format = {} format[1] = {type='string', name='key'} box.space._schema:format(format) format = {} format[1] = {name='id', type='unsigned'} format[2] = {name='owner', type='unsigned'} format[3] = {name='name', type='string'} format[4] = {name='engine', type='string'} format[5] = {name='field_count', type='unsigned'} format[6] = {name='flags', type='map'} format[7] = {name='format', type='array'} box.space._space:format(format) box.space._vspace:format(format) format = {} format[1] = {name = 'id', type = 'unsigned'} format[2] = {name = 'iid', type = 'unsigned'} format[3] = {name = 'name', type = 'string'} format[4] = {name = 'type', type = 'string'} format[5] = {name = 'opts', type = 'map'} format[6] = {name = 'parts', type = 'array'} box.space._index:format(format) box.space._vindex:format(format) format = {} format[1] = {name='id', type='unsigned'} format[2] = {name='owner', type='unsigned'} format[3] = {name='name', type='string'} format[4] = {name='setuid', type='unsigned'} box.space._func:format(format) box.space._vfunc:format(format) format = {} format[1] = {name='id', type='unsigned'} format[2] = {name='owner', type='unsigned'} format[3] = {name='name', type='string'} format[4] = {name='type', type='string'} format[5] = {name='auth', type='map'} box.space._user:format(format) box.space._vuser:format(format) format = {} format[1] = {name='grantor', type='unsigned'} format[2] = {name='grantee', type='unsigned'} format[3] = {name='object_type', type='string'} format[4] = {name='object_id', type='unsigned'} format[5] = {name='privilege', type='unsigned'} box.space._priv:format(format) box.space._vpriv:format(format) format = {} format[1] = {name='id', type='unsigned'} format[2] = {name='uuid', type='string'} box.space._cluster:format(format) end local function upgrade_to_1_7_5() create_truncate_space() update_space_formats_to_1_7_5() update_existing_users_to_1_7_5() end local function initial_1_7_5() -- stick to the following convention: -- prefer user id (owner id) in field #1 -- prefer object name in field #2 -- index on owner id is index #1 -- index on object name is index #2 -- local _schema = box.space[box.schema.SCHEMA_ID] local _space = box.space[box.schema.SPACE_ID] local _index = box.space[box.schema.INDEX_ID] local _func = box.space[box.schema.FUNC_ID] local _user = box.space[box.schema.USER_ID] local _priv = box.space[box.schema.PRIV_ID] local _cluster = box.space[box.schema.CLUSTER_ID] local _truncate = box.space[box.schema.TRUNCATE_ID] local MAP = setmap({}) -- -- _schema -- log.info("create space _schema") local format = {} format[1] = {type='string', name='key'} _space:insert{_schema.id, ADMIN, '_schema', 'memtx', 0, MAP, format} log.info("create index primary on _schema") _index:insert{_schema.id, 0, 'primary', 'tree', { unique = true }, {{0, 'string'}}} -- -- _space -- log.info("create space _space") format = {} format[1] = {name='id', type='unsigned'} format[2] = {name='owner', type='unsigned'} format[3] = {name='name', type='string'} format[4] = {name='engine', type='string'} format[5] = {name='field_count', type='unsigned'} format[6] = {name='flags', type='map'} format[7] = {name='format', type='array'} _space:insert{_space.id, ADMIN, '_space', 'memtx', 0, MAP, format} -- space name is unique log.info("create index primary on _space") _index:insert{_space.id, 0, 'primary', 'tree', { unique = true }, {{0, 'unsigned'}}} log.info("create index owner on _space") _index:insert{_space.id, 1, 'owner', 'tree', {unique = false }, {{1, 'unsigned'}}} log.info("create index index name on _space") _index:insert{_space.id, 2, 'name', 'tree', { unique = true }, {{2, 'string'}}} create_sysview(box.schema.SPACE_ID, box.schema.VSPACE_ID) -- -- _index -- log.info("create space _index") format = {} format[1] = {name = 'id', type = 'unsigned'} format[2] = {name = 'iid', type = 'unsigned'} format[3] = {name = 'name', type = 'string'} format[4] = {name = 'type', type = 'string'} format[5] = {name = 'opts', type = 'map'} format[6] = {name = 'parts', type = 'array'} _space:insert{_index.id, ADMIN, '_index', 'memtx', 0, MAP, format} -- index name is unique within a space log.info("create index primary on _index") _index:insert{_index.id, 0, 'primary', 'tree', {unique = true}, {{0, 'unsigned'}, {1, 'unsigned'}}} log.info("create index name on _index") _index:insert{_index.id, 2, 'name', 'tree', {unique = true}, {{0, 'unsigned'}, {2, 'string'}}} create_sysview(box.schema.INDEX_ID, box.schema.VINDEX_ID) -- -- _func -- log.info("create space _func") format = {} format[1] = {name='id', type='unsigned'} format[2] = {name='owner', type='unsigned'} format[3] = {name='name', type='string'} format[4] = {name='setuid', type='unsigned'} _space:insert{_func.id, ADMIN, '_func', 'memtx', 0, MAP, format} -- function name and id are unique log.info("create index _func:primary") _index:insert{_func.id, 0, 'primary', 'tree', {unique = true}, {{0, 'unsigned'}}} log.info("create index _func:owner") _index:insert{_func.id, 1, 'owner', 'tree', {unique = false}, {{1, 'unsigned'}}} log.info("create index _func:name") _index:insert{_func.id, 2, 'name', 'tree', {unique = true}, {{2, 'string'}}} create_sysview(box.schema.FUNC_ID, box.schema.VFUNC_ID) -- -- _user -- log.info("create space _user") format = {} format[1] = {name='id', type='unsigned'} format[2] = {name='owner', type='unsigned'} format[3] = {name='name', type='string'} format[4] = {name='type', type='string'} format[5] = {name='auth', type='map'} _space:insert{_user.id, ADMIN, '_user', 'memtx', 0, MAP, format} -- user name and id are unique log.info("create index _func:primary") _index:insert{_user.id, 0, 'primary', 'tree', {unique = true}, {{0, 'unsigned'}}} log.info("create index _func:owner") _index:insert{_user.id, 1, 'owner', 'tree', {unique = false}, {{1, 'unsigned'}}} log.info("create index _func:name") _index:insert{_user.id, 2, 'name', 'tree', {unique = true}, {{2, 'string'}}} create_sysview(box.schema.USER_ID, box.schema.VUSER_ID) -- -- _priv -- log.info("create space _priv") format = {} format[1] = {name='grantor', type='unsigned'} format[2] = {name='grantee', type='unsigned'} format[3] = {name='object_type', type='string'} format[4] = {name='object_id', type='unsigned'} format[5] = {name='privilege', type='unsigned'} _space:insert{_priv.id, ADMIN, '_priv', 'memtx', 0, MAP, format} -- user id, object type and object id are unique log.info("create index primary on _priv") _index:insert{_priv.id, 0, 'primary', 'tree', {unique = true}, {{1, 'unsigned'}, {2, 'string'}, {3, 'unsigned'}}} -- owner index - to quickly find all privileges granted by a user log.info("create index owner on _priv") _index:insert{_priv.id, 1, 'owner', 'tree', {unique = false}, {{0, 'unsigned'}}} -- object index - to quickly find all grants on a given object log.info("create index object on _priv") _index:insert{_priv.id, 2, 'object', 'tree', {unique = false}, {{2, 'string'}, {3, 'unsigned'}}} create_sysview(box.schema.PRIV_ID, box.schema.VPRIV_ID) -- -- _cluster -- log.info("create space _cluster") format = {} format[1] = {name='id', type='unsigned'} format[2] = {name='uuid', type='string'} _space:insert{_cluster.id, ADMIN, '_cluster', 'memtx', 0, MAP, format} -- primary key: node id log.info("create index primary on _cluster") _index:insert{_cluster.id, 0, 'primary', 'tree', {unique = true}, {{0, 'unsigned'}}} -- node uuid key: node uuid log.info("create index uuid on _cluster") _index:insert{_cluster.id, 1, 'uuid', 'tree', {unique = true}, {{1, 'string'}}} -- -- _truncate -- log.info("create space _truncate") format = {} format[1] = {name='id', type='unsigned'} format[2] = {name='count', type='unsigned'} _space:insert{_truncate.id, ADMIN, '_truncate', 'memtx', 0, MAP, format} -- primary key: space id log.info("create index primary on _truncate") _index:insert{_truncate.id, 0, 'primary', 'tree', {unique = true}, {{0, 'unsigned'}}} -- -- Create users -- log.info("create user guest") _user:insert{GUEST, ADMIN, 'guest', 'user', MAP} box.schema.user.passwd('guest', '') log.info("create user admin") _user:insert{ADMIN, ADMIN, 'admin', 'user', MAP} log.info("create role public") _user:insert{PUBLIC, ADMIN, 'public', 'role', MAP} log.info("create role replication") _user:insert{REPLICATION, ADMIN, 'replication', 'role', MAP} -- -- Create grants -- log.info("grant read,write,execute on universe to admin") _priv:insert{ADMIN, ADMIN, 'universe', 0, 7} -- grant role 'public' to 'guest' log.info("grant role public to guest") _priv:insert{ADMIN, GUEST, 'role', PUBLIC, 4} -- replication can read the entire universe log.info("grant read on universe to replication") _priv:replace{ADMIN, REPLICATION, 'universe', 0, 1} -- replication can append to '_cluster' system space log.info("grant write on space _cluster to replication") _priv:replace{ADMIN, REPLICATION, 'space', _cluster.id, 2} _priv:insert{ADMIN, PUBLIC, 'space', _truncate.id, 2} -- create "box.schema.user.info" function log.info('create function "box.schema.user.info" with setuid') _func:replace{1, ADMIN, 'box.schema.user.info', 1, 'LUA'} -- grant 'public' role access to 'box.schema.user.info' function log.info('grant execute on function "box.schema.user.info" to public') _priv:replace{ADMIN, PUBLIC, 'function', 1, 4} log.info("set max_id to box.schema.SYSTEM_ID_MAX") _schema:insert{'max_id', box.schema.SYSTEM_ID_MAX} log.info("set schema version to 1.7.5") _schema:insert({'version', 1, 7, 5}) end -------------------------------------------------------------------------------- -- Tarantool 1.7.6 local function create_sequence_space() local _space = box.space[box.schema.SPACE_ID] local _index = box.space[box.schema.INDEX_ID] local _sequence = box.space[box.schema.SEQUENCE_ID] local _sequence_data = box.space[box.schema.SEQUENCE_DATA_ID] local _space_sequence = box.space[box.schema.SPACE_SEQUENCE_ID] local MAP = setmap({}) log.info("create space _sequence") _space:insert{_sequence.id, ADMIN, '_sequence', 'memtx', 0, MAP, {{name = 'id', type = 'unsigned'}, {name = 'owner', type = 'unsigned'}, {name = 'name', type = 'string'}, {name = 'step', type = 'integer'}, {name = 'min', type = 'integer'}, {name = 'max', type = 'integer'}, {name = 'start', type = 'integer'}, {name = 'cache', type = 'integer'}, {name = 'cycle', type = 'boolean'}}} log.info("create index _sequence:primary") _index:insert{_sequence.id, 0, 'primary', 'tree', {unique = true}, {{0, 'unsigned'}}} log.info("create index _sequence:owner") _index:insert{_sequence.id, 1, 'owner', 'tree', {unique = false}, {{1, 'unsigned'}}} log.info("create index _sequence:name") _index:insert{_sequence.id, 2, 'name', 'tree', {unique = true}, {{2, 'string'}}} log.info("create space _sequence_data") _space:insert{_sequence_data.id, ADMIN, '_sequence_data', 'memtx', 0, MAP, {{name = 'id', type = 'unsigned'}, {name = 'value', type = 'integer'}}} log.info("create index primary on _sequence_data") _index:insert{_sequence_data.id, 0, 'primary', 'hash', {unique = true}, {{0, 'unsigned'}}} log.info("create space _space_sequence") _space:insert{_space_sequence.id, ADMIN, '_space_sequence', 'memtx', 0, MAP, {{name = 'id', type = 'unsigned'}, {name = 'sequence_id', type = 'unsigned'}, {name = 'is_generated', type = 'boolean'}}} log.info("create index _space_sequence:primary") _index:insert{_space_sequence.id, 0, 'primary', 'tree', {unique = true}, {{0, 'unsigned'}}} log.info("create index _space_sequence:sequence") _index:insert{_space_sequence.id, 1, 'sequence', 'tree', {unique = false}, {{1, 'unsigned'}}} end local function create_collation_space() local _collation = box.space[box.schema.COLLATION_ID] log.info("create space _collation") box.space._space:insert{_collation.id, ADMIN, '_collation', 'memtx', 0, setmap({}), { { name = 'id', type = 'unsigned' }, { name = 'name', type = 'string' }, { name = 'owner', type = 'unsigned' }, { name = 'type', type = 'string' }, { name = 'locale', type = 'string' }, { name = 'opts', type = 'map' } } } log.info("create index primary on _collation") box.space._index:insert{_collation.id, 0, 'primary', 'tree', {unique = true}, {{0, 'unsigned'}}} log.info("create index name on _collation") box.space._index:insert{_collation.id, 1, 'name', 'tree', {unique = true}, {{1, 'string'}}} log.info("create predefined collations") box.space._collation:replace{1, "unicode", ADMIN, "ICU", "", setmap{}} box.space._collation:replace{2, "unicode_ci", ADMIN, "ICU", "", {strength='primary'}} local _priv = box.space[box.schema.PRIV_ID] _priv:insert{ADMIN, PUBLIC, 'space', _collation.id, 2} end local function upgrade_to_1_7_6() create_sequence_space() create_collation_space() -- Trigger space format checking by updating version in _schema. end -------------------------------------------------------------------------------- --- Tarantool 1.7.7 -------------------------------------------------------------------------------- local function upgrade_to_1_7_7() local _priv = box.space[box.schema.PRIV_ID] local _user = box.space[box.schema.USER_ID] -- -- grant 'session' and 'usage' to all existing users -- for _, v in _user:pairs() do if v[4] ~= "role" then _priv:upsert({ADMIN, v[1], "universe", 0, 24}, {{"|", 5, 24}}) end end -- -- grant 'create' to all users with 'read' and 'write' -- on the universe, since going forward we will require -- 'create' rather than 'read,write' to be able to create -- objects -- for _, v in _priv.index.object:pairs{'universe'} do if bit.band(v[5], 1) ~= 0 and bit.band(v[5], 2) ~= 0 then _priv:update({v[2], v[3], v[4]}, {{ "|", 5, 32}}) end end -- grant admin all new privileges (session, usage, grant option, -- create, alter, drop and anything that might come up in the future -- _priv:upsert({ADMIN, ADMIN, 'universe', 0, 4294967295}, {{ "|", 5, 4294967295}}) -- -- create role 'super' and grant it all privileges on universe -- _user:replace{SUPER, ADMIN, 'super', 'role', setmap({})} _priv:replace({ADMIN, SUPER, 'universe', 0, 4294967295}) end local function get_version() local version = box.space._schema:get{'version'} if version == nil then error('Missing "version" in box.space._schema') end local major = version[2] local minor = version[3] local patch = version[4] or 0 return mkversion(major, minor, patch) end local function upgrade(options) options = options or {} setmetatable(options, {__index = {auto = false}}) local version = get_version() local handlers = { {version = mkversion(1, 6, 8), func = upgrade_to_1_6_8, auto = false}, {version = mkversion(1, 7, 1), func = upgrade_to_1_7_1, auto = false}, {version = mkversion(1, 7, 2), func = upgrade_to_1_7_2, auto = false}, {version = mkversion(1, 7, 5), func = upgrade_to_1_7_5, auto = true}, {version = mkversion(1, 7, 6), func = upgrade_to_1_7_6, auto = false}, {version = mkversion(1, 7, 7), func = upgrade_to_1_7_7, auto = true}, } for _, handler in ipairs(handlers) do if version >= handler.version then goto continue end if options.auto and not handler.auto then log.warn("cannot auto upgrade schema version to %s, " .. "please call box.schema.upgrade() manually", handler.version) return end handler.func() log.info("set schema version to %s", handler.version) box.space._schema:replace({'version', handler.version.major, handler.version.minor, handler.version.patch}) ::continue:: end end local function bootstrap() set_system_triggers(false) local version = get_version() -- Initial() creates a spaces with 1.6.0 format, but 1.7.6 -- checks space formats, that fails initial(). It is because -- bootstrap() is called after box.cfg{}. If box.cfg{} is run -- on 1.7.6, then spaces in the cache contains new 1.7.6 -- formats (gh-2754). Spaces in the cache are not updated on -- erase(), because system triggers are turned off. -- erase current schema erase() -- insert initial schema if version < mkversion(1, 7, 6) then initial() else initial_1_7_5() end -- upgrade schema to the latest version upgrade() set_system_triggers(true) -- save new bootstrap.snap box.snapshot() end box.schema.upgrade = upgrade; box.internal.bootstrap = bootstrap; tarantool_1.9.1.26.g63eb81e3c/src/box/lua/sequence.h0000664000000000000000000000334113306560010020322 0ustar rootroot#ifndef INCLUDES_TARANTOOL_MOD_BOX_LUA_SEQUENCE_H #define INCLUDES_TARANTOOL_MOD_BOX_LUA_SEQUENCE_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_sequence_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_MOD_BOX_LUA_SEQUENCE_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/ctl.h0000664000000000000000000000326613306560010017302 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_CTL_H #define INCLUDES_TARANTOOL_LUA_CTL_H /* * Copyright 2010-2018, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void box_lua_ctl_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_CTL_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/lua/console.c0000664000000000000000000003623213306565107020170 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/console.h" #include "lua/utils.h" #include "lua/fiber.h" #include "fiber.h" #include "coio.h" #include #include #include #include #include #include #include /* * Completion engine (Mike Paul's). * Used internally when collecting completions locally. Also a Lua * wrapper is provided enabling a remote server to compute completions * for a client. */ static char ** lua_rl_complete(lua_State *L, const char *text, int start, int end); /* * Lua state that made the pending readline call. * This Lua state is accessed in readline callbacks. Unfortunately * readline library doesn't allow to pass it as a function argument. * Two concurrent readline() calls never happen. */ static struct lua_State *readline_L; /* * console_completion_handler() * Called by readline to collect plausible completions; * The call stack is as follows: * * - lbox_console_readline * - (loop) rl_callback_read_char * - console_completion_handler * * Delegates to the func selected when the call to lbox_console_readline * was made, e.g. readline({ completion = ... }). */ static char ** console_completion_handler(const char *text, int start, int end) { size_t n, i; char **res; /* * Don't falback to builtin filename completion, ever. */ rl_attempted_completion_over = 1; /* * The lbox_console_readline() frame is still on the top of Lua * stack. We can reach the function arguments. Assuming arg#1 is * the options table. */ lua_getfield(readline_L, 1, "completion"); if (lua_isnil(readline_L, -1)) { lua_pop(readline_L, 1); return NULL; } /* * If the completion func is lbox_console_completion_handler() * /we have it in upvalue #1/ which is a wrapper on top of * lua_rl_complete, call lua_rl_complete func directly. */ if (lua_equal(readline_L, -1, lua_upvalueindex(1))) { lua_pop(readline_L, 1); res = lua_rl_complete(readline_L, text, start, end); goto done; } /* Slow path - arbitrary completion handler. */ lua_pushstring(readline_L, text); lua_pushinteger(readline_L, start); lua_pushinteger(readline_L, end); if (lua_pcall(readline_L, 3, 1, 0) != 0 || !lua_istable(readline_L, -1) || (n = lua_objlen(readline_L, -1)) == 0) { lua_pop(readline_L, 1); return NULL; } res = malloc(sizeof(res[0]) * (n + 1)); if (res == NULL) { lua_pop(readline_L, 1); return NULL; } res[n] = NULL; for (i = 0; i < n; i++) { lua_pushinteger(readline_L, i + 1); lua_gettable(readline_L, -2); res[i] = strdup(lua_tostring(readline_L, -1)); lua_pop(readline_L, 1); } lua_pop(readline_L, 1); done: #if RL_READLINE_VERSION >= 0x0600 rl_completion_suppress_append = 1; #endif return res; } /* * console_push_line() * Readline invokes this callback once the whole line is ready. * The call stack is as follows: * * - lbox_console_readline * - (loop) rl_callback_read_char * - console_push_line * * The callback creates a copy of the line on the Lua stack; this copy * becomes the lbox_console_readline()'s ultimate result. */ static void console_push_line(char *line) { /* XXX pushnil/pushstring may err */ if (line == NULL) lua_pushnil(readline_L); else lua_pushstring(readline_L, line); #ifdef HAVE_GNU_READLINE /* * This is to avoid a stray prompt on the next line with GNU * readline. Interestingly, it botches the terminal when * attempted with libeditline. */ rl_callback_handler_install(NULL, NULL); #endif } /* implements readline() Lua API */ static int lbox_console_readline(struct lua_State *L) { const char *prompt = NULL; int top; int completion = 0; if (lua_gettop(L) > 0) { switch (lua_type(L, 1)) { case LUA_TSTRING: prompt = lua_tostring(L, 1); break; case LUA_TTABLE: lua_getfield(L, 1, "prompt"); prompt = lua_tostring(L, -1); lua_pop(L, 1); lua_getfield(L, 1, "completion"); if (!lua_isnil(L, -1)) completion = 1; lua_pop(L, 1); break; default: luaL_error(L, "readline([prompt])"); } } if (prompt == NULL) prompt = "> "; if (readline_L != NULL) luaL_error(L, "readline(): earlier call didn't complete yet"); readline_L = L; if (completion) { rl_inhibit_completion = 0; rl_attempted_completion_function = console_completion_handler; rl_completer_word_break_characters = "\t\r\n !\"#$%&'()*+,-/;<=>?@[\\]^`{|}~"; rl_completer_quote_characters = "\"'"; #if RL_READLINE_VERSION < 0x0600 rl_completion_append_character = '\0'; #endif } else { rl_inhibit_completion = 1; rl_attempted_completion_function = NULL; } /* * Readline library provides eventloop-friendly API; repeat * until console_push_line() manages to capture the result. */ rl_callback_handler_install(prompt, console_push_line); top = lua_gettop(L); while (top == lua_gettop(L)) { while (coio_wait(STDIN_FILENO, COIO_READ, TIMEOUT_INFINITY) == 0); rl_callback_read_char(); } readline_L = NULL; /* Incidents happen. */ #pragma GCC poison readline_L rl_attempted_completion_function = NULL; luaL_testcancel(L); return 1; } /* C string array to lua table converter */ static int console_completion_helper(struct lua_State *L) { size_t i; char **res = *(char ***)lua_topointer(L, -1); assert(lua_islightuserdata(L, -1)); assert(L != NULL); lua_createtable(L, 0, 0); for (i = 0; res[i]; i++) { lua_pushstring(L, res[i]); lua_rawseti(L, -2, i + 1); } return 1; } /* * completion_handler() Lua API * Exposing completion engine to Lua. */ static int lbox_console_completion_handler(struct lua_State *L) { size_t i; char **res; int st; /* * Prepare for the future pcall; * this may err, hence do it before res is created */ lua_pushcfunction(L, console_completion_helper); lua_pushlightuserdata(L, &res); res = lua_rl_complete(L, lua_tostring(L, 1), lua_tointeger(L, 2), lua_tointeger(L, 3)); if (res == NULL) { return 0; } st = lua_pcall(L, 1, 1, 0); /* free res */ for (i = 0; res[i]; i++) { free(res[i]); } free(res); res = NULL; if (st != 0) { lua_error(L); } return 1; } static int lbox_console_load_history(struct lua_State *L) { if (!lua_isstring(L, 1)) luaL_error(L, "load_history(filename: string)"); read_history(lua_tostring(L, 1)); return 0; } static int lbox_console_save_history(struct lua_State *L) { if (!lua_isstring(L, 1)) luaL_error(L, "save_history(filename: string)"); write_history(lua_tostring(L, 1)); return 0; } static int lbox_console_add_history(struct lua_State *L) { if (lua_gettop(L) < 1 || !lua_isstring(L, 1)) luaL_error(L, "add_history(string)"); const char *s = lua_tostring(L, 1); if (*s) { HIST_ENTRY *hist_ent = history_get(history_length - 1 + history_base); const char *prev_s = hist_ent ? hist_ent->line : ""; if (strcmp(prev_s, s) != 0) add_history(s); } return 0; } void tarantool_lua_console_init(struct lua_State *L) { static const struct luaL_Reg consolelib[] = { {"load_history", lbox_console_load_history}, {"save_history", lbox_console_save_history}, {"add_history", lbox_console_add_history}, {"completion_handler", lbox_console_completion_handler}, {NULL, NULL} }; luaL_register_module(L, "console", consolelib); /* readline() func needs a ref to completion_handler (in upvalue) */ lua_getfield(L, -1, "completion_handler"); lua_pushcclosure(L, lbox_console_readline, 1); lua_setfield(L, -2, "readline"); } /* * Completion engine from "Mike Paul's advanced readline patch". * With minor fixes and code style tweaks. */ #define lua_pushglobaltable(L) lua_pushvalue(L, LUA_GLOBALSINDEX) enum { /* * Suggest a keyword if a prefix of KEYWORD_MATCH_MIN * characters or more was entered. */ KEYWORD_MATCH_MIN = 1, /* * Metatables are consulted recursively when learning items; * avoid infinite metatable loops. */ METATABLE_RECURSION_MAX = 20, /* * Extracting all items matching a given prefix is O(n); * stop once that many items were considered. */ ITEMS_CHECKED_MAX = 500 }; /* goto intentionally omited */ static const char * const lua_rl_keywords[] = { "and", "break", "do", "else", "elseif", "end", "false", "for", "function", "if", "in", "local", "nil", "not", "or", "repeat", "return", "then", "true", "until", "while", NULL }; static int valid_identifier(const char *s) { if (!(isalpha(*s) || *s == '_')) return 0; for (s++; *s; s++) if (!(isalpha(*s) || isdigit(*s) || *s == '_')) return 0; return 1; } /* * Dynamically resizable match list. * Readline consumes argv-style string list; both the list itself and * individual strings should be malloc-ed; readline is responsible for * releasing them once done. Item #0 is the longest common prefix * (inited last). Idx is the last index assigned (i.e. len - 1.) */ typedef struct { char **list; size_t idx, allocated, matchlen; } dmlist; static void lua_rl_dmfree(dmlist *ml) { if (ml->list == NULL) return; /* * Note: item #0 isn't initialized until the very end of * lua_rl_complete, the only function calling dmfree(). */ for (size_t i = 1; i <= ml->idx; i++) { free(ml->list[i]); } free(ml->list); ml->list = NULL; } /* Add prefix + string + suffix to list and compute common prefix. */ static int lua_rl_dmadd(dmlist *ml, const char *p, size_t pn, const char *s, int suf) { char *t = NULL; if (ml->idx+1 >= ml->allocated) { char **new_list; new_list = realloc( ml->list, sizeof(char *)*(ml->allocated += 32)); if (!new_list) return -1; ml->list = new_list; } if (s) { size_t n = strlen(s); if (!(t = (char *)malloc(sizeof(char)*(pn + n + 2)))) return 1; memcpy(t, p, pn); memcpy(t + pn, s, n); n += pn; t[n] = suf; if (suf) t[++n] = '\0'; if (ml->idx == 0) { ml->matchlen = n; } else { size_t i; for (i = 0; i < ml->matchlen && i < n && ml->list[1][i] == t[i]; i++) ; /* Set matchlen to common prefix. */ ml->matchlen = i; } } ml->list[++ml->idx] = t; return 0; } /* Get __index field of metatable of object on top of stack. */ static int lua_rl_getmetaindex(lua_State *L) { if (!lua_getmetatable(L, -1)) { lua_pop(L, 1); return 0; } lua_pushstring(L, "__index"); lua_rawget(L, -2); lua_replace(L, -2); if (lua_isnil(L, -1) || lua_rawequal(L, -1, -2)) { lua_pop(L, 2); return 0; } lua_replace(L, -2); return 1; } /* 1: obj -- val, 0: obj -- */ /* Get field from object on top of stack. Avoid calling metamethods. */ static int lua_rl_getfield(lua_State *L, const char *s, size_t n) { int loop = METATABLE_RECURSION_MAX; do { if (lua_istable(L, -1)) { lua_pushlstring(L, s, n); lua_rawget(L, -2); if (!lua_isnil(L, -1)) { lua_replace(L, -2); return 1; } lua_pop(L, 1); } if (--loop == 0) { lua_pop(L, 1); return 0; } } while (lua_rl_getmetaindex(L)); return 0; } /* 1: obj -- val, 0: obj -- */ static char ** lua_rl_complete(lua_State *L, const char *text, int start, int end) { dmlist ml; const char *s; size_t i, n, dot, items_checked; int loop, savetop, is_method_ref = 0; if (!(text[0] == '\0' || isalpha(text[0]) || text[0] == '_')) return NULL; ml.list = NULL; ml.idx = ml.allocated = ml.matchlen = 0; savetop = lua_gettop(L); lua_pushglobaltable(L); for (n = (size_t)(end-start), i = dot = 0; i < n; i++) { if (text[i] == '.' || text[i] == ':') { is_method_ref = (text[i] == ':'); if (!lua_rl_getfield(L, text+dot, i-dot)) goto error; /* Invalid prefix. */ dot = i+1; /* Points to first char after dot/colon. */ } } /* Add all matches against keywords if there is no dot/colon. */ if (dot == 0) { for (i = 0; (s = lua_rl_keywords[i]) != NULL; i++) { if (n >= KEYWORD_MATCH_MIN && !strncmp(s, text, n) && lua_rl_dmadd(&ml, NULL, 0, s, ' ')) { goto error; } } } /* Add all valid matches from all tables/metatables. */ loop = 0; items_checked = 0; lua_pushglobaltable(L); lua_insert(L, -2); do { if (!lua_istable(L, -1) || (loop != 0 && lua_rawequal(L, -1, -2))) continue; for (lua_pushnil(L); lua_next(L, -2); lua_pop(L, 1)) { /* Beware huge tables */ if (++items_checked > ITEMS_CHECKED_MAX) break; if (lua_type(L, -2) != LUA_TSTRING) continue; s = lua_tostring(L, -2); /* * Only match names starting with '_' * if explicitly requested. */ if (strncmp(s, text+dot, n-dot) || !valid_identifier(s) || (*s == '_' && text[dot] != '_')) continue; int suf = 0; /* Omit suffix by default. */ int type = lua_type(L, -1); switch (type) { case LUA_TTABLE: case LUA_TUSERDATA: /* * For tables and userdata omit a * suffix, since all variants, i.e. * T, T.field, T:method and T() * are likely valid. */ break; case LUA_TFUNCTION: /* * Prepend '(' for a function. This * helps to differentiate functions * visually in completion lists. It is * believed that in interactive console * functions are most often called * rather then assigned to a variable or * passed as a parameter, hence * an ocasional need to delete an * unwanted '(' shouldn't be a burden. */ suf = '('; break; } /* * If completing a method ref, i.e * foo:meth, show functions only. */ if (!is_method_ref || type == LUA_TFUNCTION) { if (lua_rl_dmadd(&ml, text, dot, s, suf)) goto error; } } } while (++loop < METATABLE_RECURSION_MAX && lua_rl_getmetaindex(L)); lua_pop(L, 1); if (ml.idx == 0) { error: lua_rl_dmfree(&ml); lua_settop(L, savetop); return NULL; } else { /* list[0] holds the common prefix of all matches (may * be ""). If there is only one match, list[0] and * list[1] will be the same. */ ml.list[0] = malloc(sizeof(char)*(ml.matchlen+1)); if (!ml.list[0]) goto error; memcpy(ml.list[0], ml.list[1], ml.matchlen); ml.list[0][ml.matchlen] = '\0'; /* Add the NULL list terminator. */ if (lua_rl_dmadd(&ml, NULL, 0, NULL, 0)) goto error; } lua_settop(L, savetop); return ml.list; } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/tuple.c0000664000000000000000000003407013306565107017655 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/tuple.h" #include "lua/utils.h" /* luaT_error() */ #include "lua/msgpack.h" /* luamp_encode_XXX() */ #include "diag.h" /* diag_set() */ #include #include #include #include "box/tuple.h" #include "box/tuple_convert.h" #include "box/errcode.h" #include "box/memtx_tuple.h" /** {{{ box.tuple Lua library * * To avoid extra copying between Lua memory and garbage-collected * tuple memory, provide a Lua userdata object 'box.tuple'. This * object refers to a tuple instance in the slab allocator, and * allows accessing it using Lua primitives (array subscription, * iteration, etc.). When Lua object is garbage-collected, * tuple reference counter in the slab allocator is decreased, * allowing the tuple to be eventually garbage collected in * the slab allocator. */ static const char *tuplelib_name = "box.tuple"; static const char *tuple_iteratorlib_name = "box.tuple.iterator"; extern char tuple_lua[]; /* Lua source */ uint32_t CTID_CONST_STRUCT_TUPLE_REF; static inline box_tuple_t * lua_checktuple(struct lua_State *L, int narg) { struct tuple *tuple = luaT_istuple(L, narg); if (tuple == NULL) { luaL_error(L, "Invalid argument #%d (box.tuple expected, got %s)", narg, lua_typename(L, lua_type(L, narg))); } return tuple; } box_tuple_t * luaT_istuple(struct lua_State *L, int narg) { assert(CTID_CONST_STRUCT_TUPLE_REF != 0); uint32_t ctypeid; void *data; if (lua_type(L, narg) != LUA_TCDATA) return NULL; data = luaL_checkcdata(L, narg, &ctypeid); if (ctypeid != CTID_CONST_STRUCT_TUPLE_REF) return NULL; return *(struct tuple **) data; } static int lbox_tuple_new(lua_State *L) { int argc = lua_gettop(L); if (argc < 1) { lua_newtable(L); /* create an empty tuple */ ++argc; } struct ibuf *buf = tarantool_lua_ibuf; ibuf_reset(buf); struct mpstream stream; mpstream_init(&stream, buf, ibuf_reserve_cb, ibuf_alloc_cb, luamp_error, L); if (argc == 1 && (lua_istable(L, 1) || luaT_istuple(L, 1))) { /* New format: box.tuple.new({1, 2, 3}) */ luamp_encode_tuple(L, luaL_msgpack_default, &stream, 1); } else { /* Backward-compatible format: box.tuple.new(1, 2, 3). */ luamp_encode_array(luaL_msgpack_default, &stream, argc); for (int k = 1; k <= argc; ++k) { luamp_encode(L, luaL_msgpack_default, &stream, k); } } mpstream_flush(&stream); box_tuple_format_t *fmt = box_tuple_format_default(); struct tuple *tuple = box_tuple_new(fmt, buf->buf, buf->buf + ibuf_used(buf)); if (tuple == NULL) return luaT_error(L); /* box_tuple_new() doesn't leak on exception, see public API doc */ luaT_pushtuple(L, tuple); ibuf_reinit(tarantool_lua_ibuf); return 1; } static int lbox_tuple_gc(struct lua_State *L) { struct tuple *tuple = lua_checktuple(L, 1); box_tuple_unref(tuple); return 0; } static int lbox_tuple_slice_wrapper(struct lua_State *L) { box_tuple_iterator_t *it = (box_tuple_iterator_t *) lua_topointer(L, 1); uint32_t start = lua_tonumber(L, 2); uint32_t end = lua_tonumber(L, 3); assert(end >= start); const char *field; uint32_t field_no = start; field = box_tuple_seek(it, start); while (field && field_no < end) { luamp_decode(L, luaL_msgpack_default, &field); ++field_no; field = box_tuple_next(it); } assert(field_no == end); return end - start; } static int lbox_tuple_slice(struct lua_State *L) { struct tuple *tuple = lua_checktuple(L, 1); int argc = lua_gettop(L) - 1; uint32_t start, end; int32_t offset; /* * Prepare the range. The second argument is optional. * If the end is beyond tuple size, adjust it. * If no arguments, or start > end, return an error. */ if (argc == 0 || argc > 2) luaL_error(L, "tuple.slice(): bad arguments"); int32_t field_count = box_tuple_field_count(tuple); offset = lua_tonumber(L, 2); if (offset >= 0 && offset < field_count) { start = offset; } else if (offset < 0 && -offset <= field_count) { start = offset + field_count; } else { return luaL_error(L, "tuple.slice(): start >= field count"); } if (argc == 2) { offset = lua_tonumber(L, 3); if (offset > 0 && offset <= field_count) { end = offset; } else if (offset < 0 && -offset < field_count) { end = offset + field_count; } else { return luaL_error(L, "tuple.slice(): end > field count"); } } else { end = field_count; } if (end <= start) return luaL_error(L, "tuple.slice(): start must be less than end"); box_tuple_iterator_t *it = box_tuple_iterator(tuple); lua_pushcfunction(L, lbox_tuple_slice_wrapper); lua_pushlightuserdata(L, it); lua_pushinteger(L, start); lua_pushinteger(L, end); int rc = luaT_call(L, 3, end - start); box_tuple_iterator_free(it); if (rc != 0) return luaT_error(L); return end - start; } void luamp_convert_key(struct lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream, int index) { /* Performs keyfy() logic */ struct tuple *tuple = luaT_istuple(L, index); if (tuple != NULL) return tuple_to_mpstream(tuple, stream); struct luaL_field field; luaL_tofield(L, cfg, index, &field); if (field.type == MP_ARRAY) { lua_pushvalue(L, index); luamp_encode_r(L, cfg, stream, &field, 0); lua_pop(L, 1); } else if (field.type == MP_NIL) { luamp_encode_array(cfg, stream, 0); } else { luamp_encode_array(cfg, stream, 1); lua_pushvalue(L, index); luamp_encode_r(L, cfg, stream, &field, 0); lua_pop(L, 1); } } void luamp_encode_tuple(struct lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream, int index) { struct tuple *tuple = luaT_istuple(L, index); if (tuple != NULL) { return tuple_to_mpstream(tuple, stream); } else if (luamp_encode(L, cfg, stream, index) != MP_ARRAY) { diag_set(ClientError, ER_TUPLE_NOT_ARRAY); luaT_error(L); } } void tuple_to_mpstream(struct tuple *tuple, struct mpstream *stream) { size_t bsize = box_tuple_bsize(tuple); char *ptr = mpstream_reserve(stream, bsize); box_tuple_to_buf(tuple, ptr, bsize); mpstream_advance(stream, bsize); } /* A MsgPack extensions handler that supports tuples */ static enum mp_type luamp_encode_extension_box(struct lua_State *L, int idx, struct mpstream *stream) { struct tuple *tuple = luaT_istuple(L, idx); if (tuple != NULL) { tuple_to_mpstream(tuple, stream); return MP_ARRAY; } return MP_EXT; } /** * Convert a tuple into lua table. Named fields are stored as * {name = value} pairs. Not named fields are stored as * {1-based_index_in_tuple = value}. */ static int lbox_tuple_to_map(struct lua_State *L) { if (lua_gettop(L) < 1) luaL_error(L, "Usage: tuple:tomap()"); const struct tuple *tuple = lua_checktuple(L, 1); const struct tuple_format *format = tuple_format(tuple); const struct tuple_field *field = &format->fields[0]; const char *pos = tuple_data(tuple); int field_count = (int)mp_decode_array(&pos); int n_named = format->dict->name_count; lua_createtable(L, field_count, n_named); for (int i = 0; i < n_named; ++i, ++field) { /* Access by name. */ const char *name = format->dict->names[i]; lua_pushstring(L, name); luamp_decode(L, luaL_msgpack_default, &pos); lua_rawset(L, -3); /* * Access the same field by an index. There is no * copy for tables - lua optimizes it and uses * references. */ lua_pushstring(L, name); lua_rawget(L, -2); lua_rawseti(L, -2, i + TUPLE_INDEX_BASE); } /* Access for not named fields by index. */ for (int i = n_named; i < field_count; ++i) { luamp_decode(L, luaL_msgpack_default, &pos); lua_rawseti(L, -2, i + TUPLE_INDEX_BASE); } return 1; } /** * Tuple transforming function. * * Remove the fields designated by 'offset' and 'len' from an tuple, * and replace them with the elements of supplied data fields, * if any. * * Function returns newly allocated tuple. * It does not change any parent tuple data. */ static int lbox_tuple_transform(struct lua_State *L) { struct tuple *tuple = lua_checktuple(L, 1); int argc = lua_gettop(L); if (argc < 3) luaL_error(L, "tuple.transform(): bad arguments"); lua_Integer offset = lua_tointeger(L, 2); /* Can be negative and can be > INT_MAX */ lua_Integer len = lua_tointeger(L, 3); lua_Integer field_count = box_tuple_field_count(tuple); /* validate offset and len */ if (offset == 0) { luaL_error(L, "tuple.transform(): offset is out of bound"); } else if (offset < 0) { if (-offset > field_count) luaL_error(L, "tuple.transform(): offset is out of bound"); offset += field_count + 1; } else if (offset > field_count) { offset = field_count + 1; } if (len < 0) luaL_error(L, "tuple.transform(): len is negative"); if (len > field_count + 1 - offset) len = field_count + 1 - offset; assert(offset + len <= field_count + 1); /* * Calculate the number of operations and length of UPDATE expression */ uint32_t op_cnt = 0; if (offset < field_count + 1 && len > 0) op_cnt++; if (argc > 3) op_cnt += argc - 3; if (op_cnt == 0) { /* tuple_update() does not accept an empty operation list. */ luaT_pushtuple(L, tuple); return 1; } struct ibuf *buf = tarantool_lua_ibuf; ibuf_reset(buf); struct mpstream stream; mpstream_init(&stream, buf, ibuf_reserve_cb, ibuf_alloc_cb, luamp_error, L); /* * Prepare UPDATE expression */ luamp_encode_array(luaL_msgpack_default, &stream, op_cnt); if (len > 0) { luamp_encode_array(luaL_msgpack_default, &stream, 3); luamp_encode_str(luaL_msgpack_default, &stream, "#", 1); luamp_encode_uint(luaL_msgpack_default, &stream, offset); luamp_encode_uint(luaL_msgpack_default, &stream, len); } for (int i = argc ; i > 3; i--) { luamp_encode_array(luaL_msgpack_default, &stream, 3); luamp_encode_str(luaL_msgpack_default, &stream, "!", 1); luamp_encode_uint(luaL_msgpack_default, &stream, offset); luamp_encode(L, luaL_msgpack_default, &stream, i); } mpstream_flush(&stream); /* Execute tuple_update */ struct tuple *new_tuple = box_tuple_update(tuple, buf->buf, buf->buf + ibuf_used(buf)); if (tuple == NULL) luaT_error(L); /* box_tuple_update() doesn't leak on exception, see public API doc */ luaT_pushtuple(L, new_tuple); ibuf_reset(buf); return 1; } /** * Find a tuple field using its name. * @param L Lua state. * @param tuple 1-th argument on lua stack, tuple to get field * from. * @param field_name 2-th argument on lua stack, field name to * get. * * @retval If a field was not found, return -1 and nil to lua else * return 0 and decoded field. */ static int lbox_tuple_field_by_name(struct lua_State *L) { struct tuple *tuple = luaT_istuple(L, 1); /* Is checked in Lua wrapper. */ assert(tuple != NULL); assert(lua_isstring(L, 2)); size_t name_len; const char *name = lua_tolstring(L, 2, &name_len); uint32_t name_hash = lua_hashstring(L, 2); const char *field = tuple_field_by_name(tuple, name, name_len, name_hash); if (field == NULL) { lua_pushinteger(L, -1); lua_pushnil(L); return 2; } lua_pushinteger(L, 0); luamp_decode(L, luaL_msgpack_default, &field); return 2; } static int lbox_tuple_to_string(struct lua_State *L) { struct tuple *tuple = lua_checktuple(L, 1); size_t used = region_used(&fiber()->gc); char *res = tuple_to_yaml(tuple); if (res == NULL) { region_truncate(&fiber()->gc, used); return luaT_error(L); } lua_pushstring(L, res); region_truncate(&fiber()->gc, used); return 1; } void luaT_pushtuple(struct lua_State *L, box_tuple_t *tuple) { assert(CTID_CONST_STRUCT_TUPLE_REF != 0); struct tuple **ptr = (struct tuple **) luaL_pushcdata(L, CTID_CONST_STRUCT_TUPLE_REF); *ptr = tuple; /* The order is important - first reference tuple, next set gc */ if (box_tuple_ref(tuple) != 0) { luaT_error(L); return; } lua_pushcfunction(L, lbox_tuple_gc); luaL_setcdatagc(L, -2); } static const struct luaL_Reg lbox_tuple_meta[] = { {"__gc", lbox_tuple_gc}, {"tostring", lbox_tuple_to_string}, {"slice", lbox_tuple_slice}, {"transform", lbox_tuple_transform}, {"tuple_field_by_name", lbox_tuple_field_by_name}, {"tuple_to_map", lbox_tuple_to_map}, {NULL, NULL} }; static const struct luaL_Reg lbox_tuplelib[] = { {"new", lbox_tuple_new}, {NULL, NULL} }; static const struct luaL_Reg lbox_tuple_iterator_meta[] = { {NULL, NULL} }; /* }}} */ void box_lua_tuple_init(struct lua_State *L) { /* export C functions to Lua */ luaL_findtable(L, LUA_GLOBALSINDEX, "box.internal", 1); luaL_newmetatable(L, tuplelib_name); luaL_register(L, NULL, lbox_tuple_meta); lua_setfield(L, -2, "tuple"); lua_pop(L, 1); /* box.internal */ luaL_register_type(L, tuple_iteratorlib_name, lbox_tuple_iterator_meta); luaL_register_module(L, tuplelib_name, lbox_tuplelib); lua_pop(L, 1); luamp_set_encode_extension(luamp_encode_extension_box); /* Get CTypeID for `struct tuple' */ int rc = luaL_cdef(L, "struct tuple;"); assert(rc == 0); (void) rc; CTID_CONST_STRUCT_TUPLE_REF = luaL_ctypeid(L, "const struct tuple &"); assert(CTID_CONST_STRUCT_TUPLE_REF != 0); } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/call.c0000664000000000000000000003172713306565107017445 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/call.h" #include "box/call.h" #include "box/error.h" #include "fiber.h" #include "lua/utils.h" #include "lua/msgpack.h" #include "box/xrow.h" #include "box/port.h" #include "box/lua/tuple.h" #include "small/obuf.h" #include "trivia/util.h" /** * A helper to find a Lua function by name and put it * on top of the stack. */ static int box_lua_find(lua_State *L, const char *name, const char *name_end) { int index = LUA_GLOBALSINDEX; int objstack = 0; const char *start = name, *end; while ((end = (const char *) memchr(start, '.', name_end - start))) { lua_checkstack(L, 3); lua_pushlstring(L, start, end - start); lua_gettable(L, index); if (! lua_istable(L, -1)) { diag_set(ClientError, ER_NO_SUCH_PROC, name_end - name, name); luaT_error(L); } start = end + 1; /* next piece of a.b.c */ index = lua_gettop(L); /* top of the stack */ } /* box.something:method */ if ((end = (const char *) memchr(start, ':', name_end - start))) { lua_checkstack(L, 3); lua_pushlstring(L, start, end - start); lua_gettable(L, index); if (! (lua_istable(L, -1) || lua_islightuserdata(L, -1) || lua_isuserdata(L, -1) )) { diag_set(ClientError, ER_NO_SUCH_PROC, name_end - name, name); luaT_error(L); } start = end + 1; /* next piece of a.b.c */ index = lua_gettop(L); /* top of the stack */ objstack = index; } lua_pushlstring(L, start, name_end - start); lua_gettable(L, index); if (!lua_isfunction(L, -1) && !lua_istable(L, -1)) { /* lua_call or lua_gettable would raise a type error * for us, but our own message is more verbose. */ diag_set(ClientError, ER_NO_SUCH_PROC, name_end - name, name); luaT_error(L); } /* setting stack that it would contain only * the function pointer. */ if (index != LUA_GLOBALSINDEX) { if (objstack == 0) { /* no object, only a function */ lua_replace(L, 1); } else if (objstack == 1) { /* just two values, swap them */ lua_insert(L, -2); } else { /* long path */ lua_insert(L, 1); lua_insert(L, 2); objstack = 1; } lua_settop(L, 1 + objstack); } return 1 + objstack; } /** * A helper to find lua stored procedures for box.call. * box.call iteslf is pure Lua, to avoid issues * with infinite call recursion smashing C * thread stack. */ static int lbox_call_loadproc(struct lua_State *L) { const char *name; size_t name_len; name = lua_tolstring(L, 1, &name_len); return box_lua_find(L, name, name + name_len); } /* * Encode CALL/EVAL result. */ static inline uint32_t luamp_encode_call(lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream) { int count = lua_gettop(L); for (int i = 1; i <= count; ++i) luamp_encode(L, cfg, stream, i); return count; } /* * Encode CALL_16 result. * * To allow clients to understand a complex return from * a procedure, we are compatible with SELECT protocol, * and return the number of return values first, and * then each return value as a tuple. * * The following conversion rules apply: * * If a Lua stack contains at least one scalar, each * value on the stack is converted to a tuple. A stack * containing a single Lua table with scalars is converted to * a tuple with multiple fields. * * If the stack is a Lua table, each member of which is * not scalar, each member of the table is converted to * a tuple. This way very large lists of return values can * be used, since Lua stack size is limited by 8000 elements, * while Lua table size is pretty much unlimited. * * Please read gh-291 carefully before "fixing" this code. */ static inline uint32_t luamp_encode_call_16(lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream) { int nrets = lua_gettop(L); if (nrets == 0) { return 0; } else if (nrets > 1) { /* * Multireturn: * `return 1, box.tuple.new(...), array, 3, ...` */ for (int i = 1; i <= nrets; ++i) { struct luaL_field field; luaL_tofield(L, cfg, i, &field); struct tuple *tuple; if (field.type == MP_EXT && (tuple = luaT_istuple(L, i)) != NULL) { /* `return ..., box.tuple.new(...), ...` */ tuple_to_mpstream(tuple, stream); } else if (field.type != MP_ARRAY) { /* * `return ..., scalar, ... => * ..., { scalar }, ...` */ lua_pushvalue(L, i); luamp_encode_array(cfg, stream, 1); luamp_encode_r(L, cfg, stream, &field, 0); lua_pop(L, 1); } else { /* `return ..., array, ...` */ luamp_encode(L, cfg, stream, i); } } return nrets; } assert(nrets == 1); /* * Inspect the first result */ struct luaL_field root; luaL_tofield(L, cfg, 1, &root); struct tuple *tuple; if (root.type == MP_EXT && (tuple = luaT_istuple(L, 1)) != NULL) { /* `return box.tuple()` */ tuple_to_mpstream(tuple, stream); return 1; } else if (root.type != MP_ARRAY) { /* * `return scalar` * `return map` */ luamp_encode_array(cfg, stream, 1); assert(lua_gettop(L) == 1); luamp_encode_r(L, cfg, stream, &root, 0); return 1; } assert(root.type == MP_ARRAY); if (root.size == 0) { /* `return {}` => `{ box.tuple() }` */ luamp_encode_array(cfg, stream, 0); return 1; } /* `return { tuple, scalar, tuple }` */ assert(root.type == MP_ARRAY && root.size > 0); for (uint32_t t = 1; t <= root.size; t++) { lua_rawgeti(L, 1, t); struct luaL_field field; luaL_tofield(L, cfg, -1, &field); if (field.type == MP_EXT && (tuple = luaT_istuple(L, -1))) { tuple_to_mpstream(tuple, stream); } else if (field.type != MP_ARRAY) { /* The first member of root table is not tuple/array */ if (t == 1) { /* * `return { scalar, ... } => * box.tuple.new(scalar, ...)` */ luamp_encode_array(cfg, stream, root.size); /* * Encode the first field of tuple using * existing information from luaL_tofield */ luamp_encode_r(L, cfg, stream, &field, 0); lua_pop(L, 1); assert(lua_gettop(L) == 1); /* Encode remaining fields as usual */ for (uint32_t f = 2; f <= root.size; f++) { lua_rawgeti(L, 1, f); luamp_encode(L, cfg, stream, -1); lua_pop(L, 1); } return 1; } /* * `return { tuple/array, ..., scalar, ... } => * { tuple/array, ..., { scalar }, ... }` */ luamp_encode_array(cfg, stream, 1); luamp_encode_r(L, cfg, stream, &field, 0); } else { /* `return { tuple/array, ..., tuple/array, ... }` */ luamp_encode_r(L, cfg, stream, &field, 0); } lua_pop(L, 1); assert(lua_gettop(L) == 1); } return root.size; } static int execute_lua_call(lua_State *L) { struct call_request *request = (struct call_request *) lua_topointer(L, 1); lua_settop(L, 0); /* clear the stack to simplify the logic below */ const char *name = request->name; uint32_t name_len = mp_decode_strl(&name); int oc = 0; /* how many objects are on stack after box_lua_find */ /* Try to find a function by name in Lua */ oc = box_lua_find(L, name, name + name_len); /* Push the rest of args (a tuple). */ const char *args = request->args; uint32_t arg_count = mp_decode_array(&args); luaL_checkstack(L, arg_count, "call: out of stack"); for (uint32_t i = 0; i < arg_count; i++) luamp_decode(L, luaL_msgpack_default, &args); lua_call(L, arg_count + oc - 1, LUA_MULTRET); return lua_gettop(L); } static int execute_lua_eval(lua_State *L) { struct call_request *request = (struct call_request *) lua_topointer(L, 1); lua_settop(L, 0); /* clear the stack to simplify the logic below */ /* Compile expression */ const char *expr = request->expr; uint32_t expr_len = mp_decode_strl(&expr); if (luaL_loadbuffer(L, expr, expr_len, "=eval")) { diag_set(LuajitError, lua_tostring(L, -1)); luaT_error(L); } /* Unpack arguments */ const char *args = request->args; uint32_t arg_count = mp_decode_array(&args); luaL_checkstack(L, arg_count, "eval: out of stack"); for (uint32_t i = 0; i < arg_count; i++) { luamp_decode(L, luaL_msgpack_default, &args); } /* Call compiled code */ lua_call(L, arg_count, LUA_MULTRET); return lua_gettop(L); } struct encode_lua_call_ctx { /** Buffer to append the call result to. */ struct obuf *out; /** If set, use Tarantool 1.6 output format. */ bool call_16; /** Number of values in the output. */ int count; }; static int encode_lua_call(lua_State *L) { struct encode_lua_call_ctx *ctx = (struct encode_lua_call_ctx *) lua_topointer(L, -1); lua_pop(L, 1); /* * Add all elements from Lua stack to the buffer. * * TODO: forbid explicit yield from __serialize or __index here */ struct mpstream stream; mpstream_init(&stream, ctx->out, obuf_reserve_cb, obuf_alloc_cb, luamp_error, L); struct luaL_serializer *cfg = luaL_msgpack_default; if (ctx->call_16) ctx->count = luamp_encode_call_16(L, cfg, &stream); else ctx->count = luamp_encode_call(L, cfg, &stream); mpstream_flush(&stream); return 0; } /** * Port for storing the result of a Lua CALL/EVAL. */ struct port_lua { const struct port_vtab *vtab; /** Lua state that stores the result. */ struct lua_State *L; /** Reference to L in tarantool_L. */ int ref; }; static_assert(sizeof(struct port_lua) <= sizeof(struct port), "sizeof(struct port_lua) must be <= sizeof(struct port)"); static const struct port_vtab port_lua_vtab; static inline int port_lua_do_dump(struct port *base, bool call_16, struct obuf *out) { struct port_lua *port = (struct port_lua *)base; assert(port->vtab == &port_lua_vtab); struct lua_State *L = port->L; struct encode_lua_call_ctx ctx = { out, call_16, 0 }; lua_pushlightuserdata(L, &ctx); if (luaT_call(L, lua_gettop(L) - 1, 0) != 0) return -1; return ctx.count; } static int port_lua_dump(struct port *base, struct obuf *out) { return port_lua_do_dump(base, false, out); } static int port_lua_dump_16(struct port *base, struct obuf *out) { return port_lua_do_dump(base, true, out); } static void port_lua_destroy(struct port *base) { struct port_lua *port = (struct port_lua *)base; assert(port->vtab == &port_lua_vtab); luaL_unref(tarantool_L, LUA_REGISTRYINDEX, port->ref); } static const struct port_vtab port_lua_vtab = { .dump = port_lua_dump, .dump_16 = port_lua_dump_16, .destroy = port_lua_destroy, }; static inline int box_process_lua(struct call_request *request, struct port *base, lua_CFunction handler) { lua_State *L = lua_newthread(tarantool_L); int coro_ref = luaL_ref(tarantool_L, LUA_REGISTRYINDEX); /* * Push the encoder function first - values returned by * the handler will be passed to it as arguments, see * port_lua_dump(). */ lua_pushcfunction(L, encode_lua_call); lua_pushcfunction(L, handler); lua_pushlightuserdata(L, request); if (luaT_call(L, 1, LUA_MULTRET) != 0) { luaL_unref(tarantool_L, LUA_REGISTRYINDEX, coro_ref); return -1; } struct port_lua *port = (struct port_lua *)base; port->vtab = &port_lua_vtab; port->L = L; port->ref = coro_ref; return 0; } int box_lua_call(struct call_request *request, struct port *port) { return box_process_lua(request, port, execute_lua_call); } int box_lua_eval(struct call_request *request, struct port *port) { return box_process_lua(request, port, execute_lua_eval); } static int lbox_func_reload(lua_State *L) { const char *name = luaL_checkstring(L, 1); if (box_func_reload(name) != 0) return luaT_error(L); return 0; } static const struct luaL_Reg boxlib_internal[] = { {"call_loadproc", lbox_call_loadproc}, {"func_reload", lbox_func_reload}, {NULL, NULL} }; void box_lua_call_init(struct lua_State *L) { luaL_register(L, "box.internal", boxlib_internal); lua_pop(L, 1); #if 0 /* Get CTypeID for `struct port *' */ int rc = luaL_cdef(L, "struct port;"); assert(rc == 0); (void) rc; CTID_STRUCT_PORT_PTR = luaL_ctypeid(L, "struct port *"); assert(CTID_CONST_STRUCT_TUPLE_REF != 0); #endif } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/sequence.c0000664000000000000000000000570513306560010020323 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/lua/sequence.h" #include "box/lua/tuple.h" #include "lua/utils.h" #include "lua/trigger.h" #include "diag.h" #include "box/box.h" #include "box/schema.h" #include "box/txn.h" static int lbox_sequence_next(struct lua_State *L) { uint32_t seq_id = luaL_checkinteger(L, 1); int64_t result; if (box_sequence_next(seq_id, &result) != 0) luaT_error(L); luaL_pushint64(L, result); return 1; } static int lbox_sequence_set(struct lua_State *L) { uint32_t seq_id = luaL_checkinteger(L, 1); int64_t value = luaL_checkint64(L, 2); if (box_sequence_set(seq_id, value) != 0) luaT_error(L); return 0; } static int lbox_sequence_reset(struct lua_State *L) { uint32_t seq_id = luaL_checkinteger(L, 1); if (box_sequence_reset(seq_id) != 0) luaT_error(L); return 0; } static int lbox_sequence_push_on_alter_event(struct lua_State *L, void *event) { struct txn_stmt *stmt = (struct txn_stmt *) event; if (stmt->old_tuple) { luaT_pushtuple(L, stmt->old_tuple); } else { lua_pushnil(L); } if (stmt->new_tuple) { luaT_pushtuple(L, stmt->new_tuple); } else { lua_pushnil(L); } return 2; } static int lbox_sequence_on_alter(struct lua_State *L) { return lbox_trigger_reset(L, 2, &on_alter_sequence, lbox_sequence_push_on_alter_event, NULL); } void box_lua_sequence_init(struct lua_State *L) { static const struct luaL_Reg sequence_internal_lib[] = { {"next", lbox_sequence_next}, {"set", lbox_sequence_set}, {"reset", lbox_sequence_reset}, {"on_alter", lbox_sequence_on_alter}, {NULL, NULL} }; luaL_register(L, "box.internal.sequence", sequence_internal_lib); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/box/lua/session.lua0000664000000000000000000000065713306560010020536 0ustar rootroot-- session.lua local session = box.session setmetatable(session, { __index = function(tbl, idx) if idx ~= 'storage' then return end local sid = session.id() local mt = getmetatable(tbl) if mt.aggregate_storage[ sid ] == nil then mt.aggregate_storage[ sid ] = {} end return mt.aggregate_storage[ sid ] end, aggregate_storage = {} }) tarantool_1.9.1.26.g63eb81e3c/src/box/error.h0000664000000000000000000001460613306560010017070 0ustar rootroot#ifndef TARANTOOL_BOX_ERROR_H_INCLUDED #define TARANTOOL_BOX_ERROR_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "errcode.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct error * BuildClientError(const char *file, unsigned line, uint32_t errcode, ...); struct error * BuildAccessDeniedError(const char *file, unsigned int line, const char *access_type, const char *object_type, const char *object_name, const char *user_name); /** \cond public */ struct error; /** * Error - contains information about error. */ typedef struct error box_error_t; /** * Return the error type, e.g. "ClientError", "SocketError", etc. * \param error * \return not-null string */ const char * box_error_type(const box_error_t *error); /** * Return IPROTO error code * \param error error * \return enum box_error_code */ uint32_t box_error_code(const box_error_t *error); /** * Return the error message * \param error error * \return not-null string */ const char * box_error_message(const box_error_t *error); /** * Get the information about the last API call error. * * The Tarantool error handling works most like libc's errno. All API calls * return -1 or NULL in the event of error. An internal pointer to * box_error_t type is set by API functions to indicate what went wrong. * This value is only significant if API call failed (returned -1 or NULL). * * Successful function can also touch the last error in some * cases. You don't have to clear the last error before calling * API functions. The returned object is valid only until next * call to **any** API function. * * You must set the last error using box_error_set() in your stored C * procedures if you want to return a custom error message. * You can re-throw the last API error to IPROTO client by keeping * the current value and returning -1 to Tarantool from your * stored procedure. * * \return last error. */ box_error_t * box_error_last(void); /** * Clear the last error. */ void box_error_clear(void); /** * Set the last error. * * \param code IPROTO error code (enum \link box_error_code \endlink) * \param format (const char * ) - printf()-like format string * \param ... - format arguments * \returns -1 for convention use * * \sa enum box_error_code */ int box_error_set(const char *file, unsigned line, uint32_t code, const char *format, ...); /** * A backward-compatible API define. */ #define box_error_raise(code, format, ...) \ box_error_set(__FILE__, __LINE__, code, format, ##__VA_ARGS__) /** \endcond public */ extern const struct type_info type_ClientError; extern const struct type_info type_XlogError; extern const struct type_info type_AccessDeniedError; #if defined(__cplusplus) } /* extern "C" */ #include "exception.h" struct rmean; extern "C" struct rmean *rmean_error; enum rmean_error_name { RMEAN_ERROR, RMEAN_ERROR_LAST }; extern const char *rmean_error_strings[RMEAN_ERROR_LAST]; class ClientError: public Exception { public: virtual void raise() { throw this; } virtual void log() const; int errcode() const { return m_errcode; } ClientError(const char *file, unsigned line, uint32_t errcode, ...); static uint32_t get_errcode(const struct error *e); /* client errno code */ int m_errcode; protected: ClientError(const type_info *type, const char *file, unsigned line, uint32_t errcode); }; class LoggedError: public ClientError { public: template LoggedError(const char *file, unsigned line, uint32_t errcode, Args ... args) : ClientError(file, line, errcode, args...) { /* TODO: actually calls ClientError::log */ log(); } }; /** * A special type of exception which must be used * for all access denied errors, since it invokes audit triggers. */ class AccessDeniedError: public ClientError { public: AccessDeniedError(const char *file, unsigned int line, const char *access_type, const char *object_type, const char *object_name, const char *user_name); ~AccessDeniedError() { free(m_object_name); } const char * object_type() { return m_object_type; } const char * object_name() { return m_object_name?:"(nil)"; } const char * access_type() { return m_access_type; } private: /** Type of object the required access was denied to */ const char *m_object_type; /** Name of object the required access was denied to */ char *m_object_name; /** Type of declined access */ const char *m_access_type; }; /** * XlogError is raised when there is an error with contents * of the data directory or a log file. A special subclass * of exception is introduced to gracefully skip such errors * in force_recovery = true mode. */ struct XlogError: public Exception { XlogError(const char *file, unsigned line, const char *format, va_list ap) :Exception(&type_XlogError, file, line) { error_vformat_msg(this, format, ap); } XlogError(const struct type_info *type, const char *file, unsigned line) :Exception(type, file, line) { } virtual void raise() { throw this; } }; #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_ERROR_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/sequence.h0000664000000000000000000001013313306560010017536 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_SEQUENCE_H #define INCLUDES_TARANTOOL_BOX_SEQUENCE_H /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include "user_def.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct iterator; /** Sequence metadata. */ struct sequence_def { /** Sequence id. */ uint32_t id; /** Owner of the sequence. */ uint32_t uid; /** * The value added to the sequence at each step. * If it is positive, the sequence is ascending, * otherwise it is descending. */ int64_t step; /** Min sequence value. */ int64_t min; /** Max sequence value. */ int64_t max; /** Initial sequence value. */ int64_t start; /** Number of values to preallocate. Not implemented yet. */ int64_t cache; /** * If this flag is set, the sequence will wrap * upon reaching min or max value by a descending * or ascending sequence respectively. */ bool cycle; /** Sequence name. */ char name[0]; }; /** Sequence object. */ struct sequence { /** Sequence definition. */ struct sequence_def *def; /** Set if the sequence is automatically generated. */ bool is_generated; /** Cached runtime access information. */ struct access access[BOX_USER_MAX]; }; static inline size_t sequence_def_sizeof(uint32_t name_len) { return sizeof(struct sequence_def) + name_len + 1; } /** Init sequence subsystem. */ void sequence_init(void); /** Destroy sequence subsystem. */ void sequence_free(void); /** Reset a sequence. */ void sequence_reset(struct sequence *seq); /** * Set a sequence value. * * Return 0 on success, -1 on memory allocation failure. */ int sequence_set(struct sequence *seq, int64_t value); /** * Update the sequence if the given value is newer than * the last generated value. * * Return 0 on success, -1 on memory allocation failure. */ int sequence_update(struct sequence *seq, int64_t value); /** * Advance a sequence. * * On success, return 0 and assign the next sequence to * @result, otherwise return -1 and set diag. * * The function may fail for two reasons: * - sequence isn't cyclic and has reached its limit * - memory allocation failure */ int sequence_next(struct sequence *seq, int64_t *result); /** * Check whether or not the current user can be granted * access to the sequence. */ int access_check_sequence(struct sequence *seq); /** * Create an iterator over sequence data. * * The iterator creates a snapshot of sequence data and walks * over it, i.e. updates done after the iterator was open are * invisible. Used to make a snapshot of _sequence_data space. */ struct snapshot_iterator * sequence_data_iterator_create(void); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_BOX_SEQUENCE_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/txn.c0000664000000000000000000002642513306565107016561 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "txn.h" #include "engine.h" #include "tuple.h" #include "journal.h" #include #include "xrow.h" double too_long_threshold; static inline void fiber_set_txn(struct fiber *fiber, struct txn *txn) { fiber_set_key(fiber, FIBER_KEY_TXN, (void *) txn); } static int txn_add_redo(struct txn_stmt *stmt, struct request *request) { stmt->row = request->header; if (request->header != NULL) return 0; /* Create a redo log row for Lua requests */ struct xrow_header *row; row = region_alloc_object(&fiber()->gc, struct xrow_header); if (row == NULL) { diag_set(OutOfMemory, sizeof(*row), "region", "struct xrow_header"); return -1; } /* Initialize members explicitly to save time on memset() */ row->type = request->type; row->replica_id = 0; row->lsn = 0; row->sync = 0; row->tm = 0; row->bodycnt = xrow_encode_dml(request, row->body); if (row->bodycnt < 0) return -1; stmt->row = row; return 0; } /** Initialize a new stmt object within txn. */ static struct txn_stmt * txn_stmt_new(struct txn *txn) { struct txn_stmt *stmt; stmt = region_alloc_object(&fiber()->gc, struct txn_stmt); if (stmt == NULL) { diag_set(OutOfMemory, sizeof(*stmt), "region", "struct txn_stmt"); return NULL; } /* Initialize members explicitly to save time on memset() */ stmt->space = NULL; stmt->old_tuple = NULL; stmt->new_tuple = NULL; stmt->engine_savepoint = NULL; stmt->row = NULL; /* Set the savepoint for statement rollback. */ txn->sub_stmt_begin[txn->in_sub_stmt] = stailq_last(&txn->stmts); txn->in_sub_stmt++; stailq_add_tail_entry(&txn->stmts, stmt, next); return stmt; } static void txn_rollback_to_svp(struct txn *txn, struct stailq_entry *svp) { struct txn_stmt *stmt; struct stailq rollback; stailq_cut_tail(&txn->stmts, svp, &rollback); stailq_reverse(&rollback); stailq_foreach_entry(stmt, &rollback, next) { engine_rollback_statement(txn->engine, txn, stmt); if (stmt->row != NULL) { assert(txn->n_rows > 0); txn->n_rows--; stmt->row = NULL; } stmt->space = NULL; } } struct txn * txn_begin(bool is_autocommit) { static int64_t txn_id = 0; assert(! in_txn()); struct txn *txn = region_alloc_object(&fiber()->gc, struct txn); if (txn == NULL) { diag_set(OutOfMemory, sizeof(*txn), "region", "struct txn"); return NULL; } /* Initialize members explicitly to save time on memset() */ stailq_create(&txn->stmts); txn->n_rows = 0; txn->is_autocommit = is_autocommit; txn->has_triggers = false; txn->in_sub_stmt = 0; txn->id = ++txn_id; txn->signature = -1; txn->engine = NULL; txn->engine_tx = NULL; /* fiber_on_yield/fiber_on_stop initialized by engine on demand */ fiber_set_txn(fiber(), txn); return txn; } int txn_begin_in_engine(struct engine *engine, struct txn *txn) { if (txn->engine == NULL) { assert(stailq_empty(&txn->stmts)); txn->engine = engine; return engine_begin(engine, txn); } else if (txn->engine != engine) { /** * Only one engine can be used in * a multi-statement transaction currently. */ diag_set(ClientError, ER_CROSS_ENGINE_TRANSACTION); return -1; } return 0; } struct txn * txn_begin_stmt(struct space *space) { struct txn *txn = in_txn(); if (txn == NULL) { txn = txn_begin(true); if (txn == NULL) return NULL; } else if (txn->in_sub_stmt > TXN_SUB_STMT_MAX) { diag_set(ClientError, ER_SUB_STMT_MAX); return NULL; } if (trigger_run(&space->on_stmt_begin, txn) != 0) goto fail; struct engine *engine = space->engine; if (txn_begin_in_engine(engine, txn) != 0) goto fail; struct txn_stmt *stmt = txn_stmt_new(txn); if (stmt == NULL) goto fail; stmt->space = space; if (engine_begin_statement(engine, txn) != 0) { txn_rollback_stmt(); return NULL; } return txn; fail: if (txn->is_autocommit && txn->in_sub_stmt == 0) txn_rollback(); return NULL; } /** * End a statement. In autocommit mode, end * the current transaction as well. */ int txn_commit_stmt(struct txn *txn, struct request *request) { assert(txn->in_sub_stmt > 0); /* * Run on_replace triggers. For now, disallow mutation * of tuples in the trigger. */ struct txn_stmt *stmt = txn_current_stmt(txn); /* Create WAL record for the write requests in non-temporary spaces */ if (!space_is_temporary(stmt->space)) { if (txn_add_redo(stmt, request) != 0) goto fail; ++txn->n_rows; } /* * If there are triggers, and they are not disabled, and * the statement found any rows, run triggers. * XXX: * - vinyl doesn't set old/new tuple, so triggers don't * work for it * - perhaps we should run triggers even for deletes which * doesn't find any rows */ if (!rlist_empty(&stmt->space->on_replace) && stmt->space->run_triggers && (stmt->old_tuple || stmt->new_tuple)) { if (trigger_run(&stmt->space->on_replace, txn) != 0) goto fail; } --txn->in_sub_stmt; if (txn->is_autocommit && txn->in_sub_stmt == 0) return txn_commit(txn); return 0; fail: txn_rollback_stmt(); return -1; } static int64_t txn_write_to_wal(struct txn *txn) { assert(txn->n_rows > 0); struct journal_entry *req = journal_entry_new(txn->n_rows); if (req == NULL) return -1; struct txn_stmt *stmt; struct xrow_header **row = req->rows; stailq_foreach_entry(stmt, &txn->stmts, next) { if (stmt->row == NULL) continue; /* A read (e.g. select) request */ *row++ = stmt->row; } assert(row == req->rows + req->n_rows); ev_tstamp start = ev_monotonic_now(loop()); int64_t res = journal_write(req); ev_tstamp stop = ev_monotonic_now(loop()); if (res < 0) { /* Cascading rollback. */ txn_rollback(); /* Perform our part of cascading rollback. */ /* * Move fiber to end of event loop to avoid * execution of any new requests before all * pending rollbacks are processed. */ fiber_reschedule(); diag_set(ClientError, ER_WAL_IO); diag_log(); } else if (stop - start > too_long_threshold) { say_warn("too long WAL write: %d rows at LSN %lld: %.3f sec", txn->n_rows, res - txn->n_rows + 1, stop - start); } /* * Use vclock_sum() from WAL writer as transaction signature. */ return res; } int txn_commit(struct txn *txn) { assert(txn == in_txn()); assert(stailq_empty(&txn->stmts) || txn->engine); /* Do transaction conflict resolving */ if (txn->engine) { if (engine_prepare(txn->engine, txn) != 0) goto fail; if (txn->n_rows > 0) { txn->signature = txn_write_to_wal(txn); if (txn->signature < 0) goto fail; } /* * The transaction is in the binary log. No action below * may throw. In case an error has happened, there is * no other option but terminate. */ if (txn->has_triggers && trigger_run(&txn->on_commit, txn) != 0) { diag_log(); unreachable(); panic("commit trigger failed"); } engine_commit(txn->engine, txn); } TRASH(txn); /** Free volatile txn memory. */ fiber_gc(); fiber_set_txn(fiber(), NULL); return 0; fail: txn_rollback(); return -1; } void txn_rollback_stmt() { struct txn *txn = in_txn(); if (txn == NULL || txn->in_sub_stmt == 0) return; txn->in_sub_stmt--; if (txn->is_autocommit && txn->in_sub_stmt == 0) return txn_rollback(); txn_rollback_to_svp(txn, txn->sub_stmt_begin[txn->in_sub_stmt]); } void txn_rollback() { struct txn *txn = in_txn(); if (txn == NULL) return; /* Rollback triggers must not throw. */ if (txn->has_triggers && trigger_run(&txn->on_rollback, txn) != 0) { diag_log(); unreachable(); panic("rollback trigger failed"); } if (txn->engine) engine_rollback(txn->engine, txn); TRASH(txn); /** Free volatile txn memory. */ fiber_gc(); fiber_set_txn(fiber(), NULL); } int txn_check_singlestatement(struct txn *txn, const char *where) { if (!txn->is_autocommit || stailq_last(&txn->stmts) != stailq_first(&txn->stmts)) { diag_set(ClientError, ER_UNSUPPORTED, where, "multi-statement transactions"); return -1; } return 0; } int64_t box_txn_id(void) { struct txn *txn = in_txn(); if (txn != NULL) return txn->id; else return -1; } bool box_txn() { return in_txn() != NULL; } int box_txn_begin() { if (in_txn()) { diag_set(ClientError, ER_ACTIVE_TRANSACTION); return -1; } if (txn_begin(false) == NULL) return -1; return 0; } int box_txn_commit() { struct txn *txn = in_txn(); /** * COMMIT is like BEGIN or ROLLBACK * a "transaction-initiating statement". * Do nothing if transaction is not started, * it's the same as BEGIN + COMMIT. */ if (! txn) return 0; if (txn->in_sub_stmt) { diag_set(ClientError, ER_COMMIT_IN_SUB_STMT); return -1; } return txn_commit(txn); } int box_txn_rollback() { struct txn *txn = in_txn(); if (txn && txn->in_sub_stmt) { diag_set(ClientError, ER_ROLLBACK_IN_SUB_STMT); return -1; } txn_rollback(); /* doesn't throw */ return 0; } void * box_txn_alloc(size_t size) { union natural_align { void *p; double lf; long l; }; return region_aligned_alloc(&fiber()->gc, size, alignof(union natural_align)); } box_txn_savepoint_t * box_txn_savepoint() { struct txn *txn = in_txn(); if (txn == NULL) { diag_set(ClientError, ER_SAVEPOINT_NO_TRANSACTION); return NULL; } struct txn_savepoint *svp = (struct txn_savepoint *) region_alloc_object(&fiber()->gc, struct txn_savepoint); if (svp == NULL) { diag_set(OutOfMemory, sizeof(*svp), "region", "struct txn_savepoint"); return NULL; } svp->stmt = stailq_last(&txn->stmts); svp->in_sub_stmt = txn->in_sub_stmt; return svp; } int box_txn_rollback_to_savepoint(box_txn_savepoint_t *svp) { struct txn *txn = in_txn(); if (txn == NULL) { diag_set(ClientError, ER_SAVEPOINT_NO_TRANSACTION); return -1; } struct txn_stmt *stmt = svp->stmt == NULL ? NULL : stailq_entry(svp->stmt, struct txn_stmt, next); if (stmt != NULL && stmt->space == NULL) { /* * The statement at which this savepoint was * created has been rolled back. */ diag_set(ClientError, ER_NO_SUCH_SAVEPOINT); return -1; } if (svp->in_sub_stmt != txn->in_sub_stmt) { diag_set(ClientError, ER_NO_SUCH_SAVEPOINT); return -1; } txn_rollback_to_svp(txn, svp->stmt); return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/xrow_io.cc0000664000000000000000000000622213306560010017556 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "xrow_io.h" #include "xrow.h" #include "coio.h" #include "coio_buf.h" #include "error.h" #include "msgpuck/msgpuck.h" void coio_read_xrow(struct ev_io *coio, struct ibuf *in, struct xrow_header *row) { /* Read fixed header */ if (ibuf_used(in) < 1) coio_breadn(coio, in, 1); /* Read length */ if (mp_typeof(*in->rpos) != MP_UINT) { tnt_raise(ClientError, ER_INVALID_MSGPACK, "packet length"); } ssize_t to_read = mp_check_uint(in->rpos, in->wpos); if (to_read > 0) coio_breadn(coio, in, to_read); uint32_t len = mp_decode_uint((const char **) &in->rpos); /* Read header and body */ to_read = len - ibuf_used(in); if (to_read > 0) coio_breadn(coio, in, to_read); xrow_header_decode_xc(row, (const char **) &in->rpos, in->rpos + len); } void coio_read_xrow_timeout_xc(struct ev_io *coio, struct ibuf *in, struct xrow_header *row, ev_tstamp timeout) { ev_tstamp start, delay; coio_timeout_init(&start, &delay, timeout); /* Read fixed header */ if (ibuf_used(in) < 1) coio_breadn_timeout(coio, in, 1, delay); coio_timeout_update(start, &delay); /* Read length */ if (mp_typeof(*in->rpos) != MP_UINT) { tnt_raise(ClientError, ER_INVALID_MSGPACK, "packet length"); } ssize_t to_read = mp_check_uint(in->rpos, in->wpos); if (to_read > 0) coio_breadn_timeout(coio, in, to_read, delay); coio_timeout_update(start, &delay); uint32_t len = mp_decode_uint((const char **) &in->rpos); /* Read header and body */ to_read = len - ibuf_used(in); if (to_read > 0) coio_breadn_timeout(coio, in, to_read, delay); xrow_header_decode_xc(row, (const char **) &in->rpos, in->rpos + len); } void coio_write_xrow(struct ev_io *coio, const struct xrow_header *row) { struct iovec iov[XROW_IOVMAX]; int iovcnt = xrow_to_iovec_xc(row, iov); coio_writev(coio, iov, iovcnt, 0); } tarantool_1.9.1.26.g63eb81e3c/src/box/field_def.h0000664000000000000000000000574413306565107017657 0ustar rootroot#ifndef TARANTOOL_BOX_FIELD_DEF_H_INCLUDED #define TARANTOOL_BOX_FIELD_DEF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "opt_def.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** \cond public */ /* * Possible field data types. Can't use STRS/ENUM macros for them, * since there is a mismatch between enum name (STRING) and type * name literal ("STR"). STR is already used as Objective C type. */ enum field_type { FIELD_TYPE_ANY = 0, FIELD_TYPE_UNSIGNED, FIELD_TYPE_STRING, FIELD_TYPE_NUMBER, FIELD_TYPE_INTEGER, FIELD_TYPE_BOOLEAN, FIELD_TYPE_SCALAR, FIELD_TYPE_ARRAY, FIELD_TYPE_MAP, field_type_MAX }; /** \endcond public */ extern const char *field_type_strs[]; /** Check if @a type1 can store values of @a type2. */ bool field_type1_contains_type2(enum field_type type1, enum field_type type2); /** * Get field type by name */ enum field_type field_type_by_name(const char *name, size_t len); extern const struct opt_def field_def_reg[]; extern const struct field_def field_def_default; /** * @brief Field definition * Contains information about of one tuple field. */ struct field_def { /** * Field type of an indexed field. * If a field participates in at least one of space indexes * then its type is stored in this member. * If a field does not participate in an index * then UNKNOWN is stored for it. */ enum field_type type; /** 0-terminated field name. */ char *name; /** True, if a field can store NULL. */ bool is_nullable; }; #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_FIELD_DEF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/engine.h0000664000000000000000000002470713306565107017223 0ustar rootroot#ifndef TARANTOOL_BOX_ENGINE_H_INCLUDED #define TARANTOOL_BOX_ENGINE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "diag.h" #include "error.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct engine; struct txn; struct txn_stmt; struct space; struct space_def; struct vclock; struct xstream; extern struct rlist engines; /** * Aggregated memory statistics. Used by box.info.memory(). */ struct engine_memory_stat { /** Size of memory used for storing user data. */ size_t data; /** Size of memory used for indexing user data. */ size_t index; /** Size of memory used for caching user data. */ size_t cache; /** Size of memory used by active transactions. */ size_t tx; }; typedef int engine_backup_cb(const char *path, void *arg); struct engine_vtab { /** Destroy an engine instance. */ void (*shutdown)(struct engine *); /** Allocate a new space instance. */ struct space *(*create_space)(struct engine *engine, struct space_def *def, struct rlist *key_list); /** * Write statements stored in checkpoint @vclock to @stream. */ int (*join)(struct engine *engine, struct vclock *vclock, struct xstream *stream); /** * Begin a new single or multi-statement transaction. * Called on first statement in a transaction, not when * a user said begin(). Effectively it means that * transaction in the engine begins with the first * statement. */ int (*begin)(struct engine *, struct txn *); /** * Begine one statement in existing transaction. */ int (*begin_statement)(struct engine *, struct txn *); /** * Called before a WAL write is made to prepare * a transaction for commit in the engine. */ int (*prepare)(struct engine *, struct txn *); /** * End the transaction in the engine, the transaction * has been successfully written to the WAL. * This method can't throw: if any error happens here, * there is no better option than panic. */ void (*commit)(struct engine *, struct txn *); /* * Called to roll back effects of a statement if an * error happens, e.g., in a trigger. */ void (*rollback_statement)(struct engine *, struct txn *, struct txn_stmt *); /* * Roll back and end the transaction in the engine. */ void (*rollback)(struct engine *, struct txn *); /** * Bootstrap an empty data directory */ int (*bootstrap)(struct engine *); /** * Begin initial recovery from checkpoint or dirty disk data. * On local recovery @recovery_vclock points to the vclock * used for assigning LSNs to statements replayed from WAL. * On remote recovery, it is set to NULL. */ int (*begin_initial_recovery)(struct engine *engine, const struct vclock *recovery_vclock); /** * Notify engine about a start of recovering from WALs * that could be local WALs during local recovery * of WAL catch up durin join on slave side */ int (*begin_final_recovery)(struct engine *); /** * Inform the engine about the end of recovery from the * binary log. */ int (*end_recovery)(struct engine *); /** * Begin a two-phase checkpoint creation in this * engine (snapshot is a memtx idea of a checkpoint). * Must not yield. */ int (*begin_checkpoint)(struct engine *); /** * Wait for a checkpoint to complete. */ int (*wait_checkpoint)(struct engine *, struct vclock *); /** * All engines prepared their checkpoints, * fix up the changes. */ void (*commit_checkpoint)(struct engine *, struct vclock *); /** * An error in one of the engines, abort checkpoint. */ void (*abort_checkpoint)(struct engine *); /** * Remove files that are not needed to recover * from checkpoint with @lsn or newer. * * If this function returns a non-zero value, garbage * collection is aborted, i.e. this method isn't called * for other engines and xlog files aren't deleted. * * Used to abort garbage collection in case memtx engine * fails to delete a snapshot file, because we recover * checkpoint list by scanning the snapshot directory. */ int (*collect_garbage)(struct engine *engine, int64_t lsn); /** * Backup callback. It is supposed to call @cb for each file * that needs to be backed up in order to restore from the * checkpoint @vclock. */ int (*backup)(struct engine *engine, struct vclock *vclock, engine_backup_cb cb, void *cb_arg); /** * Accumulate engine memory statistics. */ void (*memory_stat)(struct engine *, struct engine_memory_stat *); /** * Check definition of a new space for engine-specific * limitations. E.g. not all engines support temporary * tables. */ int (*check_space_def)(struct space_def *); }; struct engine { /** Virtual function table. */ const struct engine_vtab *vtab; /** Engine name. */ const char *name; /** Engine id. */ uint32_t id; /** Used for search for engine by name. */ struct rlist link; }; /** Register engine engine instance. */ void engine_register(struct engine *engine); /** Call a visitor function on every registered engine. */ #define engine_foreach(engine) rlist_foreach_entry(engine, &engines, link) /** Find engine engine by name. */ struct engine * engine_by_name(const char *name); /** Find engine by name and raise error if not found. */ static inline struct engine * engine_find(const char *name) { struct engine *engine = engine_by_name(name); if (engine == NULL) { diag_set(ClientError, ER_NO_SUCH_ENGINE, name); diag_log(); } return engine; } static inline struct space * engine_create_space(struct engine *engine, struct space_def *def, struct rlist *key_list) { return engine->vtab->create_space(engine, def, key_list); } static inline int engine_begin(struct engine *engine, struct txn *txn) { return engine->vtab->begin(engine, txn); } static inline int engine_begin_statement(struct engine *engine, struct txn *txn) { return engine->vtab->begin_statement(engine, txn); } static inline int engine_prepare(struct engine *engine, struct txn *txn) { return engine->vtab->prepare(engine, txn); } static inline void engine_commit(struct engine *engine, struct txn *txn) { engine->vtab->commit(engine, txn); } static inline void engine_rollback_statement(struct engine *engine, struct txn *txn, struct txn_stmt *stmt) { engine->vtab->rollback_statement(engine, txn, stmt); } static inline void engine_rollback(struct engine *engine, struct txn *txn) { engine->vtab->rollback(engine, txn); } static inline int engine_check_space_def(struct engine *engine, struct space_def *def) { return engine->vtab->check_space_def(def); } /** * Shutdown all engine factories. */ void engine_shutdown(void); /** * Initialize an empty data directory */ int engine_bootstrap(void); /** * Called at the start of recovery. */ int engine_begin_initial_recovery(const struct vclock *recovery_vclock); /** * Called in the middle of JOIN stage, * when xlog catch-up process is started */ int engine_begin_final_recovery(void); /** * Called at the end of recovery. * Build secondary keys in all spaces. */ int engine_end_recovery(void); /** * Feed checkpoint data as join events to the replicas. * (called on the master). */ int engine_join(struct vclock *vclock, struct xstream *stream); int engine_begin_checkpoint(void); /** * Create a checkpoint. */ int engine_commit_checkpoint(struct vclock *vclock); void engine_abort_checkpoint(void); int engine_collect_garbage(int64_t lsn); int engine_backup(struct vclock *vclock, engine_backup_cb cb, void *cb_arg); void engine_memory_stat(struct engine_memory_stat *stat); #if defined(__cplusplus) } /* extern "C" */ static inline struct engine * engine_find_xc(const char *name) { struct engine *engine = engine_find(name); if (engine == NULL) diag_raise(); return engine; } static inline struct space * engine_create_space_xc(struct engine *engine, struct space_def *def, struct rlist *key_list) { struct space *space = engine_create_space(engine, def, key_list); if (space == NULL) diag_raise(); return space; } static inline void engine_begin_xc(struct engine *engine, struct txn *txn) { if (engine_begin(engine, txn) != 0) diag_raise(); } static inline void engine_begin_statement_xc(struct engine *engine, struct txn *txn) { if (engine_begin_statement(engine, txn) != 0) diag_raise(); } static inline void engine_prepare_xc(struct engine *engine, struct txn *txn) { if (engine_prepare(engine, txn) != 0) diag_raise(); } static inline void engine_check_space_def_xc(struct engine *engine, struct space_def *def) { if (engine_check_space_def(engine, def) != 0) diag_raise(); } static inline void engine_bootstrap_xc(void) { if (engine_bootstrap() != 0) diag_raise(); } static inline void engine_begin_initial_recovery_xc(const struct vclock *recovery_vclock) { if (engine_begin_initial_recovery(recovery_vclock) != 0) diag_raise(); } static inline void engine_begin_final_recovery_xc(void) { if (engine_begin_final_recovery() != 0) diag_raise(); } static inline void engine_end_recovery_xc(void) { if (engine_end_recovery() != 0) diag_raise(); } static inline void engine_join_xc(struct vclock *vclock, struct xstream *stream) { if (engine_join(vclock, stream) != 0) diag_raise(); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_ENGINE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/coll_def.c0000664000000000000000000000651413306565107017514 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "coll_def.h" const char *coll_type_strs[] = { "ICU" }; const char *coll_icu_on_off_strs[] = { "DEFAULT", "ON", "OFF" }; const char *coll_icu_alternate_handling_strs[] = { "DEFAULT", "NON_IGNORABLE", "SHIFTED" }; const char *coll_icu_case_first_strs[] = { "DEFAULT", "OFF", "UPPER_FIRST", "LOWER_FIRST" }; const char *coll_icu_strength_strs[] = { "DEFAULT", "PRIMARY", "SECONDARY", "TERTIARY", "QUATERNARY", "IDENTICAL" }; static int64_t icu_on_off_from_str(const char *str, uint32_t len) { return strnindex(coll_icu_on_off_strs + 1, str, len, coll_icu_on_off_MAX - 1) + 1; } static int64_t icu_alternate_handling_from_str(const char *str, uint32_t len) { return strnindex(coll_icu_alternate_handling_strs + 1, str, len, coll_icu_alternate_handling_MAX - 1) + 1; } static int64_t icu_case_first_from_str(const char *str, uint32_t len) { return strnindex(coll_icu_case_first_strs + 1, str, len, coll_icu_case_first_MAX - 1) + 1; } static int64_t icu_strength_from_str(const char *str, uint32_t len) { return strnindex(coll_icu_strength_strs + 1, str, len, coll_icu_strength_MAX - 1) + 1; } const struct opt_def coll_icu_opts_reg[] = { OPT_DEF_ENUM("french_collation", coll_icu_on_off, struct coll_icu_def, french_collation, icu_on_off_from_str), OPT_DEF_ENUM("alternate_handling", coll_icu_alternate_handling, struct coll_icu_def, alternate_handling, icu_alternate_handling_from_str), OPT_DEF_ENUM("case_first", coll_icu_case_first, struct coll_icu_def, case_first, icu_case_first_from_str), OPT_DEF_ENUM("case_level", coll_icu_on_off, struct coll_icu_def, case_level, icu_on_off_from_str), OPT_DEF_ENUM("normalization_mode", coll_icu_on_off, struct coll_icu_def, normalization_mode, icu_on_off_from_str), OPT_DEF_ENUM("strength", coll_icu_strength, struct coll_icu_def, strength, icu_strength_from_str), OPT_DEF_ENUM("numeric_collation", coll_icu_on_off, struct coll_icu_def, numeric_collation, icu_on_off_from_str), OPT_END, }; tarantool_1.9.1.26.g63eb81e3c/src/box/space_def.c0000664000000000000000000001452613306565107017660 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "space_def.h" #include "diag.h" #include "error.h" const struct space_opts space_opts_default = { /* .temporary = */ false, }; const struct opt_def space_opts_reg[] = { OPT_DEF("temporary", OPT_BOOL, struct space_opts, temporary), OPT_END, }; /** * Size of the space_def. * @param name_len Length of the space name. * @param field_names_size Size of all names. * @param field_count Space field count. * @param[out] names_offset Offset from the beginning of a def to * a field names memory. * @param[out] fields_offset Offset from the beginning of a def to * a fields array. * @retval Size in bytes. */ static inline size_t space_def_sizeof(uint32_t name_len, uint32_t field_names_size, uint32_t field_count, uint32_t *names_offset, uint32_t *fields_offset) { *fields_offset = sizeof(struct space_def) + name_len + 1; *names_offset = *fields_offset + field_count * sizeof(struct field_def); return *names_offset + field_names_size; } struct space_def * space_def_dup(const struct space_def *src) { uint32_t names_offset, fields_offset; uint32_t field_names_size = 0; for (uint32_t i = 0; i < src->field_count; ++i) field_names_size += strlen(src->fields[i].name) + 1; size_t size = space_def_sizeof(strlen(src->name), field_names_size, src->field_count, &names_offset, &fields_offset); struct space_def *ret = (struct space_def *) malloc(size); if (ret == NULL) { diag_set(OutOfMemory, size, "malloc", "ret"); return NULL; } memcpy(ret, src, size); char *name_pos = (char *)ret + names_offset; if (src->field_count > 0) { ret->fields = (struct field_def *)((char *)ret + fields_offset); for (uint32_t i = 0; i < src->field_count; ++i) { ret->fields[i].name = name_pos; name_pos += strlen(name_pos) + 1; } } tuple_dictionary_ref(ret->dict); return ret; } struct space_def * space_def_new(uint32_t id, uint32_t uid, uint32_t exact_field_count, const char *name, uint32_t name_len, const char *engine_name, uint32_t engine_len, const struct space_opts *opts, const struct field_def *fields, uint32_t field_count) { uint32_t field_names_size = 0; for (uint32_t i = 0; i < field_count; ++i) field_names_size += strlen(fields[i].name) + 1; uint32_t names_offset, fields_offset; size_t size = space_def_sizeof(name_len, field_names_size, field_count, &names_offset, &fields_offset); struct space_def *def = (struct space_def *) malloc(size); if (def == NULL) { diag_set(OutOfMemory, size, "malloc", "def"); return NULL; } assert(name_len <= BOX_NAME_MAX); assert(engine_len <= ENGINE_NAME_MAX); def->dict = tuple_dictionary_new(fields, field_count); if (def->dict == NULL) { free(def); return NULL; } def->id = id; def->uid = uid; def->exact_field_count = exact_field_count; memcpy(def->name, name, name_len); def->name[name_len] = 0; memcpy(def->engine_name, engine_name, engine_len); def->engine_name[engine_len] = 0; def->opts = *opts; def->field_count = field_count; if (field_count == 0) { def->fields = NULL; } else { char *name_pos = (char *)def + names_offset; def->fields = (struct field_def *)((char *)def + fields_offset); for (uint32_t i = 0; i < field_count; ++i) { def->fields[i].name = name_pos; uint32_t len = strlen(fields[i].name); memcpy(def->fields[i].name, fields[i].name, len); def->fields[i].name[len] = 0; def->fields[i].type = fields[i].type; name_pos += len + 1; def->fields[i].is_nullable = fields[i].is_nullable; } } return def; } int space_def_check_compatibility(const struct space_def *old_def, const struct space_def *new_def, bool is_space_empty) { if (strcmp(new_def->engine_name, old_def->engine_name) != 0) { diag_set(ClientError, ER_ALTER_SPACE, old_def->name, "can not change space engine"); return -1; } if (new_def->id != old_def->id) { diag_set(ClientError, ER_ALTER_SPACE, old_def->name, "space id is immutable"); return -1; } if (is_space_empty) return 0; if (new_def->exact_field_count != 0 && new_def->exact_field_count != old_def->exact_field_count) { diag_set(ClientError, ER_ALTER_SPACE, old_def->name, "can not change field count on a non-empty space"); return -1; } if (new_def->opts.temporary != old_def->opts.temporary) { diag_set(ClientError, ER_ALTER_SPACE, old_def->name, "can not switch temporary flag on a non-empty space"); return -1; } uint32_t field_count = MIN(new_def->field_count, old_def->field_count); for (uint32_t i = 0; i < field_count; ++i) { enum field_type old_type = old_def->fields[i].type; enum field_type new_type = new_def->fields[i].type; if (!field_type1_contains_type2(new_type, old_type) && !field_type1_contains_type2(old_type, new_type)) { const char *msg = tt_sprintf("Can not change a field type from "\ "%s to %s on a not empty space", field_type_strs[old_type], field_type_strs[new_type]); diag_set(ClientError, ER_ALTER_SPACE, old_def->name, msg); return -1; } } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/replication.cc0000664000000000000000000005225013306565107020417 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "replication.h" #include /* &cord->slabc */ #include #include #include #include "box.h" #include "gc.h" #include "error.h" #include "vclock.h" /* VCLOCK_MAX */ uint32_t instance_id = REPLICA_ID_NIL; struct tt_uuid INSTANCE_UUID; struct tt_uuid REPLICASET_UUID; double replication_timeout = 1.0; /* seconds */ double replication_connect_timeout = 4.0; /* seconds */ int replication_connect_quorum = REPLICATION_CONNECT_QUORUM_ALL; double replication_sync_lag = 10.0; /* seconds */ struct replicaset replicaset; static int replica_compare_by_uuid(const struct replica *a, const struct replica *b) { return tt_uuid_compare(&a->uuid, &b->uuid); } rb_gen(MAYBE_UNUSED static, replica_hash_, replica_hash_t, struct replica, in_hash, replica_compare_by_uuid); #define replica_hash_foreach_safe(hash, item, next) \ for (item = replica_hash_first(hash); \ item != NULL && ((next = replica_hash_next(hash, item)) || 1); \ item = next) /** * Return the number of replicas that have to be synchronized * in order to form a quorum in the replica set. */ static inline int replicaset_quorum(void) { return MIN(replication_connect_quorum, replicaset.applier.total); } void replication_init(void) { memset(&replicaset, 0, sizeof(replicaset)); mempool_create(&replicaset.pool, &cord()->slabc, sizeof(struct replica)); replica_hash_new(&replicaset.hash); rlist_create(&replicaset.anon); vclock_create(&replicaset.vclock); fiber_cond_create(&replicaset.applier.cond); replicaset.replica_by_id = (struct replica **)calloc(VCLOCK_MAX, sizeof(struct replica *)); latch_create(&replicaset.applier.order_latch); } void replication_free(void) { free(replicaset.replica_by_id); mempool_destroy(&replicaset.pool); fiber_cond_destroy(&replicaset.applier.cond); } void replica_check_id(uint32_t replica_id) { if (replica_id == REPLICA_ID_NIL) tnt_raise(ClientError, ER_REPLICA_ID_IS_RESERVED, (unsigned) replica_id); if (replica_id >= VCLOCK_MAX) tnt_raise(LoggedError, ER_REPLICA_MAX, (unsigned) replica_id); if (replica_id == ::instance_id) tnt_raise(ClientError, ER_LOCAL_INSTANCE_ID_IS_READ_ONLY, (unsigned) replica_id); } /* Return true if replica doesn't have id, relay and applier */ static bool replica_is_orphan(struct replica *replica) { return replica->id == REPLICA_ID_NIL && replica->applier == NULL && replica->relay == NULL; } static void replica_on_applier_state_f(struct trigger *trigger, void *event); static struct replica * replica_new(void) { struct replica *replica = (struct replica *) mempool_alloc(&replicaset.pool); if (replica == NULL) tnt_raise(OutOfMemory, sizeof(*replica), "malloc", "struct replica"); replica->id = 0; replica->uuid = uuid_nil; replica->applier = NULL; replica->relay = NULL; replica->gc = NULL; rlist_create(&replica->in_anon); trigger_create(&replica->on_applier_state, replica_on_applier_state_f, NULL, NULL); replica->applier_sync_state = APPLIER_DISCONNECTED; latch_create(&replica->order_latch); return replica; } static void replica_delete(struct replica *replica) { assert(replica_is_orphan(replica)); if (replica->gc != NULL) gc_consumer_unregister(replica->gc); mempool_free(&replicaset.pool, replica); } struct replica * replicaset_add(uint32_t replica_id, const struct tt_uuid *replica_uuid) { assert(!tt_uuid_is_nil(replica_uuid)); assert(replica_id != REPLICA_ID_NIL && replica_id < VCLOCK_MAX); assert(replica_by_uuid(replica_uuid) == NULL); struct replica *replica = replica_new(); replica->uuid = *replica_uuid; replica_hash_insert(&replicaset.hash, replica); replica_set_id(replica, replica_id); return replica; } void replica_set_id(struct replica *replica, uint32_t replica_id) { assert(replica_id < VCLOCK_MAX); assert(replica->id == REPLICA_ID_NIL); /* replica id is read-only */ replica->id = replica_id; if (tt_uuid_is_equal(&INSTANCE_UUID, &replica->uuid)) { /* Assign local replica id */ assert(instance_id == REPLICA_ID_NIL); instance_id = replica_id; } replicaset.replica_by_id[replica_id] = replica; } void replica_clear_id(struct replica *replica) { assert(replica->id != REPLICA_ID_NIL && replica->id != instance_id); /* * Don't remove replicas from vclock here. * The vclock_sum() must always grow, it is a core invariant of * the recovery subsystem. Further attempts to register a replica * with the removed replica_id will re-use LSN from the last value. * Replicas with LSN == 0 also can't not be safely removed. * Some records may arrive later on due to asynchronous nature of * replication. */ replicaset.replica_by_id[replica->id] = NULL; replica->id = REPLICA_ID_NIL; if (replica_is_orphan(replica)) { replica_hash_remove(&replicaset.hash, replica); replica_delete(replica); } } static void replica_set_applier(struct replica *replica, struct applier *applier) { assert(replica->applier == NULL); replica->applier = applier; trigger_add(&replica->applier->on_state, &replica->on_applier_state); } static void replica_clear_applier(struct replica *replica) { assert(replica->applier != NULL); replica->applier = NULL; trigger_clear(&replica->on_applier_state); } static void replica_on_applier_sync(struct replica *replica) { assert(replica->applier_sync_state == APPLIER_CONNECTED); replica->applier_sync_state = APPLIER_SYNC; replicaset.applier.synced++; replicaset_check_quorum(); } static void replica_on_applier_connect(struct replica *replica) { struct applier *applier = replica->applier; assert(tt_uuid_is_nil(&replica->uuid)); assert(!tt_uuid_is_nil(&applier->uuid)); assert(replica->applier_sync_state == APPLIER_DISCONNECTED); replica->uuid = applier->uuid; struct replica *orig = replica_hash_search(&replicaset.hash, replica); if (orig != NULL && orig->applier != NULL) { say_error("duplicate connection to the same replica: " "instance uuid %s, addr1 %s, addr2 %s", tt_uuid_str(&orig->uuid), applier->source, orig->applier->source); fiber_cancel(fiber()); /* * Raise an exception to force the applier * to disconnect. */ fiber_testcancel(); } rlist_del_entry(replica, in_anon); if (orig != NULL) { /* Use existing struct replica */ replica_set_applier(orig, applier); replica_clear_applier(replica); replica_delete(replica); replica = orig; } else { /* Add a new struct replica */ replica_hash_insert(&replicaset.hash, replica); } replica->applier_sync_state = APPLIER_CONNECTED; replicaset.applier.connected++; } static void replica_on_applier_reconnect(struct replica *replica) { struct applier *applier = replica->applier; assert(!tt_uuid_is_nil(&replica->uuid)); assert(!tt_uuid_is_nil(&applier->uuid)); assert(replica->applier_sync_state == APPLIER_LOADING || replica->applier_sync_state == APPLIER_DISCONNECTED); if (replica->applier_sync_state == APPLIER_LOADING) { assert(replicaset.applier.loading > 0); replicaset.applier.loading--; } if (!tt_uuid_is_equal(&replica->uuid, &applier->uuid)) { /* * Master's UUID changed, most likely because it was * rebootstrapped. Try to look up a replica matching * the new UUID and reassign the applier to it. */ struct replica *orig = replica_by_uuid(&applier->uuid); if (orig == NULL) { orig = replica_new(); orig->uuid = applier->uuid; replica_hash_insert(&replicaset.hash, orig); } if (orig->applier != NULL) { tnt_raise(ClientError, ER_CFG, "replication", "duplicate connection to the same replica"); } replica_set_applier(orig, applier); replica_clear_applier(replica); replica->applier_sync_state = APPLIER_DISCONNECTED; replica = orig; } replica->applier_sync_state = APPLIER_CONNECTED; replicaset.applier.connected++; } static void replica_on_applier_disconnect(struct replica *replica) { switch (replica->applier_sync_state) { case APPLIER_SYNC: assert(replicaset.applier.synced > 0); replicaset.applier.synced--; FALLTHROUGH; case APPLIER_CONNECTED: assert(replicaset.applier.connected > 0); replicaset.applier.connected--; break; case APPLIER_DISCONNECTED: break; default: unreachable(); } replica->applier_sync_state = replica->applier->state; if (replica->applier_sync_state == APPLIER_LOADING) replicaset.applier.loading++; } static void replica_on_applier_state_f(struct trigger *trigger, void *event) { (void)event; struct replica *replica = container_of(trigger, struct replica, on_applier_state); switch (replica->applier->state) { case APPLIER_CONNECTED: if (tt_uuid_is_nil(&replica->uuid)) replica_on_applier_connect(replica); else replica_on_applier_reconnect(replica); break; case APPLIER_LOADING: case APPLIER_DISCONNECTED: replica_on_applier_disconnect(replica); break; case APPLIER_FOLLOW: replica_on_applier_sync(replica); break; case APPLIER_OFF: /* * Connection to self, duplicate connection * to the same master, or the applier fiber * has been cancelled. Assume synced. */ replica_on_applier_sync(replica); break; case APPLIER_STOPPED: /* Unrecoverable error. */ replica_on_applier_disconnect(replica); break; default: break; } fiber_cond_signal(&replicaset.applier.cond); } /** * Update the replica set with new "applier" objects * upon reconfiguration of box.cfg.replication. */ static void replicaset_update(struct applier **appliers, int count) { replica_hash_t uniq; memset(&uniq, 0, sizeof(uniq)); replica_hash_new(&uniq); RLIST_HEAD(anon_replicas); struct replica *replica, *next; struct applier *applier; auto uniq_guard = make_scoped_guard([&]{ replica_hash_foreach_safe(&uniq, replica, next) { replica_hash_remove(&uniq, replica); replica_delete(replica); } }); /* Check for duplicate UUID */ for (int i = 0; i < count; i++) { applier = appliers[i]; replica = replica_new(); replica_set_applier(replica, applier); if (applier->state != APPLIER_CONNECTED) { /* * The replica has not received its UUID from * the master yet and thus cannot be added to * the replica set. Instead, add it to the list * of anonymous replicas and setup a trigger * that will insert it into the replica set * when it is finally connected. */ rlist_add_entry(&anon_replicas, replica, in_anon); continue; } assert(!tt_uuid_is_nil(&applier->uuid)); replica->uuid = applier->uuid; if (replica_hash_search(&uniq, replica) != NULL) { tnt_raise(ClientError, ER_CFG, "replication", "duplicate connection to the same replica"); } replica_hash_insert(&uniq, replica); } /* * All invariants and conditions are checked, now it is safe to * apply the new configuration. Nothing can fail after this point. */ /* Prune old appliers */ replicaset_foreach(replica) { if (replica->applier == NULL) continue; applier = replica->applier; replica_clear_applier(replica); replica->applier_sync_state = APPLIER_DISCONNECTED; applier_stop(applier); applier_delete(applier); } rlist_foreach_entry_safe(replica, &replicaset.anon, in_anon, next) { applier = replica->applier; replica_clear_applier(replica); replica_delete(replica); applier_stop(applier); applier_delete(applier); } rlist_create(&replicaset.anon); /* Save new appliers */ replicaset.applier.total = count; replicaset.applier.connected = 0; replicaset.applier.loading = 0; replicaset.applier.synced = 0; replica_hash_foreach_safe(&uniq, replica, next) { replica_hash_remove(&uniq, replica); struct replica *orig = replica_hash_search(&replicaset.hash, replica); if (orig != NULL) { /* Use existing struct replica */ replica_set_applier(orig, replica->applier); replica_clear_applier(replica); replica_delete(replica); replica = orig; } else { /* Add a new struct replica */ replica_hash_insert(&replicaset.hash, replica); } replica->applier_sync_state = APPLIER_CONNECTED; replicaset.applier.connected++; } rlist_swap(&replicaset.anon, &anon_replicas); assert(replica_hash_first(&uniq) == NULL); replica_hash_foreach_safe(&replicaset.hash, replica, next) { if (replica_is_orphan(replica)) { replica_hash_remove(&replicaset.hash, replica); replica_delete(replica); } } } /** * Replica set configuration state, shared among appliers. */ struct replicaset_connect_state { /** Number of successfully connected appliers. */ int connected; /** Number of appliers that failed to connect. */ int failed; /** Signaled when an applier connects or stops. */ struct fiber_cond wakeup; }; struct applier_on_connect { struct trigger base; struct replicaset_connect_state *state; }; static void applier_on_connect_f(struct trigger *trigger, void *event) { struct applier_on_connect *on_connect = container_of(trigger, struct applier_on_connect, base); struct replicaset_connect_state *state = on_connect->state; struct applier *applier = (struct applier *)event; switch (applier->state) { case APPLIER_OFF: case APPLIER_STOPPED: state->failed++; break; case APPLIER_CONNECTED: state->connected++; break; default: return; } fiber_cond_signal(&state->wakeup); applier_pause(applier); } void replicaset_connect(struct applier **appliers, int count, double timeout, bool connect_all) { if (count == 0) { /* Cleanup the replica set. */ replicaset_update(appliers, count); return; } say_verbose("connecting to %d replicas", count); /* * Simultaneously connect to remote peers to receive their UUIDs * and fill the resulting set: * * - create a single control channel; * - register a trigger in each applier to wake up our * fiber via this channel when the remote peer becomes * connected and a UUID is received; * - wait up to CONNECT_TIMEOUT seconds for `count` messages; * - on timeout, raise a CFG error, cancel and destroy * the freshly created appliers (done in a guard); * - an success, unregister the trigger, check the UUID set * for duplicates, fill the result set, return. */ /* Memory for on_state triggers registered in appliers */ struct applier_on_connect triggers[VCLOCK_MAX]; struct replicaset_connect_state state; state.connected = state.failed = 0; fiber_cond_create(&state.wakeup); /* Add triggers and start simulations connection to remote peers */ for (int i = 0; i < count; i++) { struct applier *applier = appliers[i]; struct applier_on_connect *trigger = &triggers[i]; /* Register a trigger to wake us up when peer is connected */ trigger_create(&trigger->base, applier_on_connect_f, NULL, NULL); trigger->state = &state; trigger_add(&applier->on_state, &trigger->base); /* Start background connection */ applier_start(applier); } while (state.connected < count) { double wait_start = ev_monotonic_now(loop()); if (fiber_cond_wait_timeout(&state.wakeup, timeout) != 0) break; if (state.failed > 0 && connect_all) break; timeout -= ev_monotonic_now(loop()) - wait_start; } if (state.connected < count) { say_crit("failed to connect to %d out of %d replicas", count - state.connected, count); /* Timeout or connection failure. */ if (connect_all) goto error; } else { say_verbose("connected to %d replicas", state.connected); } for (int i = 0; i < count; i++) { /* Unregister the temporary trigger used to wake us up */ trigger_clear(&triggers[i].base); /* * Stop appliers that failed to connect. * They will be restarted once we proceed * to 'subscribe', see replicaset_follow(). */ struct applier *applier = appliers[i]; if (applier->state != APPLIER_CONNECTED) applier_stop(applier); } /* Now all the appliers are connected, update the replica set. */ replicaset_update(appliers, count); return; error: /* Destroy appliers */ for (int i = 0; i < count; i++) { trigger_clear(&triggers[i].base); applier_stop(appliers[i]); } /* ignore original error */ tnt_raise(ClientError, ER_CFG, "replication", "failed to connect to one or more replicas"); } void replicaset_follow(void) { if (replicaset.applier.total == 0) { /* * Replication is not configured. */ box_clear_orphan(); return; } struct replica *replica; replicaset_foreach(replica) { /* Resume connected appliers. */ if (replica->applier != NULL) applier_resume(replica->applier); } rlist_foreach_entry(replica, &replicaset.anon, in_anon) { /* Restart appliers that failed to connect. */ applier_start(replica->applier); } if (replicaset_quorum() == 0) { /* * Leaving orphan mode immediately since * replication_connect_quorum is set to 0. */ box_clear_orphan(); } } void replicaset_sync(void) { int quorum = replicaset_quorum(); if (quorum == 0) return; say_verbose("synchronizing with %d replicas", quorum); /* * Wait until all connected replicas synchronize up to * replication_sync_lag */ while (replicaset.applier.synced < quorum && replicaset.applier.connected + replicaset.applier.loading >= quorum) fiber_cond_wait(&replicaset.applier.cond); if (replicaset.applier.synced < quorum) { /* * Not enough replicas connected to form a quorum. * Do not stall configuration, leave the instance * in 'orphan' state. */ say_crit("entering orphan mode"); return; } say_crit("replica set sync complete, quorum of %d " "replicas formed", quorum); } void replicaset_check_quorum(void) { if (replicaset.applier.synced >= replicaset_quorum()) { if (replicaset_quorum() > 0) say_crit("leaving orphan mode"); box_clear_orphan(); } } void replica_set_relay(struct replica *replica, struct relay *relay) { assert(replica->id != REPLICA_ID_NIL); assert(replica->relay == NULL); replica->relay = relay; } void replica_clear_relay(struct replica *replica) { assert(replica->relay != NULL); replica->relay = NULL; if (replica_is_orphan(replica)) { replica_hash_remove(&replicaset.hash, replica); replica_delete(replica); } } struct replica * replicaset_first(void) { return replica_hash_first(&replicaset.hash); } struct replica * replicaset_next(struct replica *replica) { return replica_hash_next(&replicaset.hash, replica); } /** * Compare vclock and read only mode of all connected * replicas and elect a leader. * Initiallly, skip read-only replicas, since they * can not properly act as bootstrap masters (register * new nodes in _cluster table). If there are no read-write * replicas, choose a read-only replica with biggest vclock * as a leader, in hope it will become read-write soon. */ static struct replica * replicaset_round(bool skip_ro) { struct replica *leader = NULL; replicaset_foreach(replica) { if (replica->applier == NULL) continue; /** * While bootstrapping a new cluster, read-only * replicas shouldn't be considered as a leader. * The only exception if there is no read-write * replicas since there is still a possibility * that all replicas exist in cluster table. */ if (skip_ro && replica->applier->remote_is_ro) continue; if (leader == NULL) { leader = replica; continue; } /* * Choose the replica with the most advanced * vclock. If there are two or more replicas * with the same vclock, prefer the one with * the lowest uuid. */ int cmp = vclock_compare(&replica->applier->vclock, &leader->applier->vclock); if (cmp < 0) continue; if (cmp == 0 && tt_uuid_compare(&replica->uuid, &leader->uuid) > 0) continue; leader = replica; } return leader; } struct replica * replicaset_leader(void) { bool skip_ro = true; /** * Two loops, first prefers read-write replicas among others. * Second for backward compatibility, if there is no such * replicas at all. */ struct replica *leader = replicaset_round(skip_ro); if (leader == NULL) { skip_ro = false; leader = replicaset_round(skip_ro); } return leader; } struct replica * replica_by_uuid(const struct tt_uuid *uuid) { struct replica key; key.uuid = *uuid; return replica_hash_search(&replicaset.hash, &key); } struct replica * replica_by_id(uint32_t replica_id) { return replicaset.replica_by_id[replica_id]; } tarantool_1.9.1.26.g63eb81e3c/src/box/vy_stmt.c0000664000000000000000000005017113306565107017450 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vy_stmt.h" #include #include #include /* struct iovec */ #include /* for refs */ #include "diag.h" #include #include #include "error.h" #include "tuple_format.h" #include "xrow.h" #include "fiber.h" struct tuple_format_vtab vy_tuple_format_vtab = { vy_tuple_delete, }; size_t vy_max_tuple_size = 1024 * 1024; void vy_tuple_delete(struct tuple_format *format, struct tuple *tuple) { say_debug("%s(%p)", __func__, tuple); assert(tuple->refs == 0); /* * Turn off formats referencing in worker threads to avoid * multithread unsafe modifications of a reference * counter. */ if (cord_is_main()) tuple_format_unref(format); #ifndef NDEBUG memset(tuple, '#', tuple_size(tuple)); /* fail early */ #endif free(tuple); } /** * Allocate a vinyl statement object on base of the struct tuple * with malloc() and the reference counter equal to 1. * @param format Format of an index. * @param size Size of the variable part of the statement. It * includes size of MessagePack tuple data and, for * upserts, MessagePack array of operations. * @retval not NULL Success. * @retval NULL Memory error. */ static struct tuple * vy_stmt_alloc(struct tuple_format *format, uint32_t bsize) { uint32_t meta_size = tuple_format_meta_size(format); uint32_t total_size = sizeof(struct vy_stmt) + meta_size + bsize; if (unlikely(total_size > vy_max_tuple_size)) { diag_set(ClientError, ER_VINYL_MAX_TUPLE_SIZE, (unsigned) total_size); error_log(diag_last_error(diag_get())); return NULL; } struct tuple *tuple = malloc(total_size); if (unlikely(tuple == NULL)) { diag_set(OutOfMemory, total_size, "malloc", "struct vy_stmt"); return NULL; } say_debug("vy_stmt_alloc(format = %d %u, bsize = %zu) = %p", format->id, tuple_format_meta_size(format), bsize, tuple); tuple->refs = 1; tuple->format_id = tuple_format_id(format); if (cord_is_main()) tuple_format_ref(format); tuple->bsize = bsize; tuple->data_offset = sizeof(struct vy_stmt) + meta_size;; vy_stmt_set_lsn(tuple, 0); vy_stmt_set_type(tuple, 0); return tuple; } struct tuple * vy_stmt_dup(const struct tuple *stmt, struct tuple_format *format) { /* * We don't use tuple_new() to avoid the initializing of * tuple field map. This map can be simple memcopied from * the original tuple. */ assert((vy_stmt_type(stmt) == IPROTO_UPSERT) == (format->extra_size == sizeof(uint8_t))); struct tuple *res = vy_stmt_alloc(format, stmt->bsize); if (res == NULL) return NULL; assert(tuple_size(res) == tuple_size(stmt)); assert(res->data_offset == stmt->data_offset); memcpy(res, stmt, tuple_size(stmt)); res->refs = 1; res->format_id = tuple_format_id(format); assert(tuple_size(res) == tuple_size(stmt)); return res; } struct tuple * vy_stmt_dup_lsregion(const struct tuple *stmt, struct lsregion *lsregion, int64_t alloc_id) { size_t size = tuple_size(stmt); struct tuple *mem_stmt; mem_stmt = lsregion_alloc(lsregion, size, alloc_id); if (mem_stmt == NULL) { diag_set(OutOfMemory, size, "lsregion_alloc", "mem_stmt"); return NULL; } memcpy(mem_stmt, stmt, size); /* * Region allocated statements can't be referenced or unreferenced * because they are located in monolithic memory region. Referencing has * sense only for separately allocated memory blocks. * The reference count here is set to 0 for an assertion if somebody * will try to unreference this statement. */ mem_stmt->refs = 0; return mem_stmt; } /** * Create the key statement from raw MessagePack data. * @param format Format of an index. * @param key MessagePack data that contain an array of * fields WITHOUT the array header. * @param part_count Count of the key fields that will be saved as * result. * * @retval not NULL Success. * @retval NULL Memory allocation error. */ struct tuple * vy_stmt_new_select(struct tuple_format *format, const char *key, uint32_t part_count) { assert(part_count == 0 || key != NULL); /* Key don't have field map */ assert(format->field_map_size == 0); /* Key doesn't have n_upserts field. */ assert(format->extra_size != sizeof(uint8_t)); /* Calculate key length */ const char *key_end = key; for (uint32_t i = 0; i < part_count; i++) mp_next(&key_end); /* Allocate stmt */ uint32_t key_size = key_end - key; uint32_t bsize = mp_sizeof_array(part_count) + key_size; struct tuple *stmt = vy_stmt_alloc(format, bsize); if (stmt == NULL) return NULL; /* Copy MsgPack data */ char *raw = (char *) stmt + sizeof(struct vy_stmt); char *data = mp_encode_array(raw, part_count); memcpy(data, key, key_size); assert(data + key_size == raw + bsize); vy_stmt_set_type(stmt, IPROTO_SELECT); return stmt; } char * vy_key_dup(const char *key) { assert(mp_typeof(*key) == MP_ARRAY); const char *end = key; mp_next(&end); char *res = malloc(end - key); if (res == NULL) { diag_set(OutOfMemory, end - key, "malloc", "key"); return NULL; } memcpy(res, key, end - key); return res; } /** * Create a statement without type and with reserved space for operations. * Operations can be saved in the space available by @param extra. * For details @sa struct vy_stmt comment. */ static struct tuple * vy_stmt_new_with_ops(struct tuple_format *format, const char *tuple_begin, const char *tuple_end, struct iovec *ops, int op_count, enum iproto_type type) { mp_tuple_assert(tuple_begin, tuple_end); const char *tmp = tuple_begin; uint32_t field_count = mp_decode_array(&tmp); assert(field_count >= format->min_field_count); (void) field_count; size_t ops_size = 0; for (int i = 0; i < op_count; ++i) ops_size += ops[i].iov_len; /* * Allocate stmt. Offsets: one per key part + offset of the * statement end. */ size_t mpsize = (tuple_end - tuple_begin); size_t bsize = mpsize + ops_size; struct tuple *stmt = vy_stmt_alloc(format, bsize); if (stmt == NULL) return NULL; /* Copy MsgPack data */ char *raw = (char *) tuple_data(stmt); char *wpos = raw; memcpy(wpos, tuple_begin, mpsize); wpos += mpsize; for (struct iovec *op = ops, *end = ops + op_count; op != end; ++op) { memcpy(wpos, op->iov_base, op->iov_len); wpos += op->iov_len; } vy_stmt_set_type(stmt, type); /* Calculate offsets for key parts */ if (tuple_init_field_map(format, (uint32_t *) raw, raw)) { tuple_unref(stmt); return NULL; } return stmt; } struct tuple * vy_stmt_new_upsert(struct tuple_format *format, const char *tuple_begin, const char *tuple_end, struct iovec *operations, uint32_t ops_cnt) { /* * UPSERT must have the n_upserts field in the extra * memory. */ assert(format->extra_size == sizeof(uint8_t)); struct tuple *upsert = vy_stmt_new_with_ops(format, tuple_begin, tuple_end, operations, ops_cnt, IPROTO_UPSERT); if (upsert == NULL) return NULL; vy_stmt_set_n_upserts(upsert, 0); return upsert; } struct tuple * vy_stmt_new_replace(struct tuple_format *format, const char *tuple_begin, const char *tuple_end) { /* REPLACE mustn't have n_upserts field. */ assert(format->extra_size != sizeof(uint8_t)); return vy_stmt_new_with_ops(format, tuple_begin, tuple_end, NULL, 0, IPROTO_REPLACE); } struct tuple * vy_stmt_new_insert(struct tuple_format *format, const char *tuple_begin, const char *tuple_end) { /* INSERT mustn't have n_upserts field. */ assert(format->extra_size != sizeof(uint8_t)); return vy_stmt_new_with_ops(format, tuple_begin, tuple_end, NULL, 0, IPROTO_INSERT); } struct tuple * vy_stmt_replace_from_upsert(struct tuple_format *replace_format, const struct tuple *upsert) { /* REPLACE mustn't have n_upserts field. */ assert(replace_format->extra_size == 0); assert(vy_stmt_type(upsert) == IPROTO_UPSERT); /* Get statement size without UPSERT operations */ uint32_t bsize; vy_upsert_data_range(upsert, &bsize); assert(bsize <= upsert->bsize); /* Copy statement data excluding UPSERT operations */ struct tuple_format *format = tuple_format_by_id(upsert->format_id); /* * UPSERT must have the n_upserts field in the extra * memory. */ assert(format->extra_size == sizeof(uint8_t)); /* * In other fields the REPLACE tuple format must equal to * the UPSERT tuple format. */ assert(tuple_format_eq(format, replace_format)); struct tuple *replace = vy_stmt_alloc(replace_format, bsize); if (replace == NULL) return NULL; /* Copy both data and field_map. */ char *dst = (char *)replace + sizeof(struct vy_stmt); char *src = (char *)upsert + sizeof(struct vy_stmt) + format->extra_size; memcpy(dst, src, format->field_map_size + bsize); vy_stmt_set_type(replace, IPROTO_REPLACE); vy_stmt_set_lsn(replace, vy_stmt_lsn(upsert)); return replace; } static struct tuple * vy_stmt_new_surrogate_from_key(const char *key, enum iproto_type type, const struct key_def *cmp_def, struct tuple_format *format) { /** * UPSERT can't be surrogate. Also any not UPSERT tuple * mustn't have the n_upserts field. */ assert(type != IPROTO_UPSERT && format->extra_size != sizeof(uint8_t)); struct region *region = &fiber()->gc; uint32_t field_count = format->index_field_count; struct iovec *iov = region_alloc(region, sizeof(*iov) * field_count); if (iov == NULL) { diag_set(OutOfMemory, sizeof(*iov) * field_count, "region", "iov for surrogate key"); return NULL; } memset(iov, 0, sizeof(*iov) * field_count); uint32_t part_count = mp_decode_array(&key); assert(part_count == cmp_def->part_count); assert(part_count <= field_count); uint32_t nulls_count = field_count - cmp_def->part_count; uint32_t bsize = mp_sizeof_array(field_count) + mp_sizeof_nil() * nulls_count; for (uint32_t i = 0; i < part_count; ++i) { const struct key_part *part = &cmp_def->parts[i]; assert(part->fieldno < field_count); const char *svp = key; iov[part->fieldno].iov_base = (char *) key; mp_next(&key); iov[part->fieldno].iov_len = key - svp; bsize += key - svp; } struct tuple *stmt = vy_stmt_alloc(format, bsize); if (stmt == NULL) return NULL; char *raw = (char *) tuple_data(stmt); uint32_t *field_map = (uint32_t *) raw; char *wpos = mp_encode_array(raw, field_count); for (uint32_t i = 0; i < field_count; ++i) { const struct tuple_field *field = &format->fields[i]; if (field->offset_slot != TUPLE_OFFSET_SLOT_NIL) field_map[field->offset_slot] = wpos - raw; if (iov[i].iov_base == NULL) { wpos = mp_encode_nil(wpos); } else { memcpy(wpos, iov[i].iov_base, iov[i].iov_len); wpos += iov[i].iov_len; } } assert(wpos == raw + bsize); vy_stmt_set_type(stmt, type); return stmt; } struct tuple * vy_stmt_new_surrogate_delete_from_key(const char *key, const struct key_def *cmp_def, struct tuple_format *format) { return vy_stmt_new_surrogate_from_key(key, IPROTO_DELETE, cmp_def, format); } struct tuple * vy_stmt_new_surrogate_delete(struct tuple_format *format, const struct tuple *src) { uint32_t src_size; const char *src_data = tuple_data_range(src, &src_size); uint32_t total_size = src_size + format->field_map_size; /* Surrogate tuple uses less memory than the original tuple */ char *data = region_alloc(&fiber()->gc, total_size); if (data == NULL) { diag_set(OutOfMemory, src_size, "region", "tuple"); return NULL; } char *field_map_begin = data + src_size; uint32_t *field_map = (uint32_t *) (data + total_size); const char *src_pos = src_data; uint32_t src_count = mp_decode_array(&src_pos); assert(src_count >= format->min_field_count); uint32_t field_count; if (src_count < format->index_field_count) { field_count = src_count; /* * Nullify field map to be able to detect by 0, * which key fields are absent in tuple_field(). */ memset((char *)field_map - format->field_map_size, 0, format->field_map_size); } else { field_count = format->index_field_count; } char *pos = mp_encode_array(data, field_count); for (uint32_t i = 0; i < field_count; ++i) { const struct tuple_field *field = &format->fields[i]; if (! field->is_key_part) { /* Unindexed field - write NIL. */ assert(i < src_count); pos = mp_encode_nil(pos); mp_next(&src_pos); continue; } /* Indexed field - copy */ const char *src_field = src_pos; mp_next(&src_pos); memcpy(pos, src_field, src_pos - src_field); if (field->offset_slot != TUPLE_OFFSET_SLOT_NIL) field_map[field->offset_slot] = pos - data; pos += src_pos - src_field; } assert(pos <= data + src_size); uint32_t bsize = pos - data; struct tuple *stmt = vy_stmt_alloc(format, bsize); if (stmt == NULL) return NULL; char *stmt_data = (char *) tuple_data(stmt); char *stmt_field_map_begin = stmt_data - format->field_map_size; memcpy(stmt_data, data, bsize); memcpy(stmt_field_map_begin, field_map_begin, format->field_map_size); vy_stmt_set_type(stmt, IPROTO_DELETE); return stmt; } struct tuple * vy_stmt_extract_key(const struct tuple *stmt, const struct key_def *key_def, struct tuple_format *format) { struct region *region = &fiber()->gc; size_t region_svp = region_used(region); const char *key_raw = tuple_extract_key(stmt, key_def, NULL); if (key_raw == NULL) return NULL; uint32_t part_count = mp_decode_array(&key_raw); assert(part_count == key_def->part_count); struct tuple *key = vy_stmt_new_select(format, key_raw, part_count); /* Cleanup memory allocated by tuple_extract_key(). */ region_truncate(region, region_svp); return key; } struct tuple * vy_stmt_extract_key_raw(const char *data, const char *data_end, const struct key_def *key_def, struct tuple_format *format) { struct region *region = &fiber()->gc; size_t region_svp = region_used(region); const char *key_raw = tuple_extract_key_raw(data, data_end, key_def, NULL); if (key_raw == NULL) return NULL; uint32_t part_count = mp_decode_array(&key_raw); assert(part_count == key_def->part_count); struct tuple *key = vy_stmt_new_select(format, key_raw, part_count); /* Cleanup memory allocated by tuple_extract_key_raw(). */ region_truncate(region, region_svp); return key; } int vy_stmt_encode_primary(const struct tuple *value, const struct key_def *key_def, uint32_t space_id, struct xrow_header *xrow) { memset(xrow, 0, sizeof(*xrow)); enum iproto_type type = vy_stmt_type(value); xrow->type = type; xrow->lsn = vy_stmt_lsn(value); struct request request; memset(&request, 0, sizeof(request)); request.type = type; request.space_id = space_id; uint32_t size; const char *extracted = NULL; switch (type) { case IPROTO_DELETE: /* extract key */ extracted = tuple_extract_key(value, key_def, &size); if (extracted == NULL) return -1; request.key = extracted; request.key_end = request.key + size; break; case IPROTO_INSERT: case IPROTO_REPLACE: request.tuple = tuple_data_range(value, &size); request.tuple_end = request.tuple + size; break; case IPROTO_UPSERT: request.tuple = vy_upsert_data_range(value, &size); request.tuple_end = request.tuple + size; /* extract operations */ request.ops = vy_stmt_upsert_ops(value, &size); request.ops_end = request.ops + size; break; default: unreachable(); } xrow->bodycnt = xrow_encode_dml(&request, xrow->body); if (xrow->bodycnt < 0) return -1; return 0; } int vy_stmt_encode_secondary(const struct tuple *value, const struct key_def *cmp_def, struct xrow_header *xrow) { memset(xrow, 0, sizeof(*xrow)); enum iproto_type type = vy_stmt_type(value); xrow->type = type; xrow->lsn = vy_stmt_lsn(value); struct request request; memset(&request, 0, sizeof(request)); request.type = type; uint32_t size; const char *extracted = tuple_extract_key(value, cmp_def, &size); if (extracted == NULL) return -1; if (type == IPROTO_REPLACE || type == IPROTO_INSERT) { request.tuple = extracted; request.tuple_end = extracted + size; } else { assert(type == IPROTO_DELETE); request.key = extracted; request.key_end = extracted + size; } xrow->bodycnt = xrow_encode_dml(&request, xrow->body); if (xrow->bodycnt < 0) return -1; else return 0; } struct tuple * vy_stmt_decode(struct xrow_header *xrow, const struct key_def *key_def, struct tuple_format *format, struct tuple_format *upsert_format, bool is_primary) { struct request request; uint64_t key_map = dml_request_key_map(xrow->type); key_map &= ~(1ULL << IPROTO_SPACE_ID); /* space_id is optional */ if (xrow_decode_dml(xrow, &request, key_map) != 0) return NULL; struct tuple *stmt = NULL; const char *key; (void) key; struct iovec ops; switch (request.type) { case IPROTO_DELETE: /* extract key */ stmt = vy_stmt_new_surrogate_from_key(request.key, IPROTO_DELETE, key_def, format); break; case IPROTO_INSERT: case IPROTO_REPLACE: if (is_primary) { stmt = vy_stmt_new_with_ops(format, request.tuple, request.tuple_end, NULL, 0, request.type); } else { stmt = vy_stmt_new_surrogate_from_key(request.tuple, request.type, key_def, format); } break; case IPROTO_UPSERT: ops.iov_base = (char *)request.ops; ops.iov_len = request.ops_end - request.ops; stmt = vy_stmt_new_upsert(upsert_format, request.tuple, request.tuple_end, &ops, 1); break; default: /* TODO: report filename. */ diag_set(ClientError, ER_INVALID_RUN_FILE, tt_sprintf("Can't decode statement: " "unknown request type %u", (unsigned)request.type)); return NULL; } if (stmt == NULL) return NULL; /* OOM */ vy_stmt_set_lsn(stmt, xrow->lsn); return stmt; } int vy_stmt_snprint(char *buf, int size, const struct tuple *stmt) { int total = 0; uint32_t mp_size; if (stmt == NULL) { SNPRINT(total, snprintf, buf, size, ""); return total; } SNPRINT(total, snprintf, buf, size, "%s(", iproto_type_name(vy_stmt_type(stmt))); SNPRINT(total, mp_snprint, buf, size, tuple_data(stmt)); if (vy_stmt_type(stmt) == IPROTO_UPSERT) { SNPRINT(total, snprintf, buf, size, ", ops="); SNPRINT(total, mp_snprint, buf, size, vy_stmt_upsert_ops(stmt, &mp_size)); } SNPRINT(total, snprintf, buf, size, ", lsn=%lld)", (long long) vy_stmt_lsn(stmt)); return total; } const char * vy_stmt_str(const struct tuple *stmt) { char *buf = tt_static_buf(); if (vy_stmt_snprint(buf, TT_STATIC_BUF_LEN, stmt) < 0) return ""; return buf; } struct tuple_format * vy_tuple_format_new_with_colmask(struct tuple_format *mem_format) { struct tuple_format *format = tuple_format_dup(mem_format); if (format == NULL) return NULL; /* + size of column mask. */ assert(format->extra_size == 0); format->extra_size = sizeof(uint64_t); return format; } struct tuple_format * vy_tuple_format_new_upsert(struct tuple_format *mem_format) { struct tuple_format *format = tuple_format_dup(mem_format); if (format == NULL) return NULL; /* + size of n_upserts. */ assert(format->extra_size == 0); format->extra_size = sizeof(uint8_t); return format; } tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_hash.c0000664000000000000000000003167613306565107020111 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "memtx_hash.h" #include "say.h" #include "fiber.h" #include "tuple.h" #include "tuple_hash.h" #include "memtx_engine.h" #include "space.h" #include "schema.h" /* space_cache_find() */ #include "errinj.h" #include "third_party/PMurHash.h" #include static inline bool equal(struct tuple *tuple_a, struct tuple *tuple_b, const struct key_def *key_def) { return tuple_compare(tuple_a, tuple_b, key_def) == 0; } static inline bool equal_key(struct tuple *tuple, const char *key, const struct key_def *key_def) { return tuple_compare_with_key(tuple, key, key_def->part_count, key_def) == 0; } #define LIGHT_NAME _index #define LIGHT_DATA_TYPE struct tuple * #define LIGHT_KEY_TYPE const char * #define LIGHT_CMP_ARG_TYPE struct key_def * #define LIGHT_EQUAL(a, b, c) equal(a, b, c) #define LIGHT_EQUAL_KEY(a, b, c) equal_key(a, b, c) #define HASH_INDEX_EXTENT_SIZE MEMTX_EXTENT_SIZE typedef uint32_t hash_t; #include "salad/light.h" /* {{{ MemtxHash Iterators ****************************************/ struct hash_iterator { struct iterator base; /* Must be the first member. */ struct light_index_core *hash_table; struct light_index_iterator iterator; /** Memory pool the iterator was allocated from. */ struct mempool *pool; }; static void hash_iterator_free(struct iterator *iterator) { assert(iterator->free == hash_iterator_free); struct hash_iterator *it = (struct hash_iterator *) iterator; mempool_free(it->pool, it); } static int hash_iterator_ge(struct iterator *ptr, struct tuple **ret) { assert(ptr->free == hash_iterator_free); struct hash_iterator *it = (struct hash_iterator *) ptr; struct tuple **res = light_index_iterator_get_and_next(it->hash_table, &it->iterator); *ret = res != NULL ? *res : NULL; return 0; } static int hash_iterator_gt(struct iterator *ptr, struct tuple **ret) { assert(ptr->free == hash_iterator_free); ptr->next = hash_iterator_ge; struct hash_iterator *it = (struct hash_iterator *) ptr; struct tuple **res = light_index_iterator_get_and_next(it->hash_table, &it->iterator); if (res != NULL) res = light_index_iterator_get_and_next(it->hash_table, &it->iterator); *ret = res != NULL ? *res : NULL; return 0; } static int hash_iterator_eq_next(MAYBE_UNUSED struct iterator *it, struct tuple **ret) { *ret = NULL; return 0; } static int hash_iterator_eq(struct iterator *it, struct tuple **ret) { it->next = hash_iterator_eq_next; return hash_iterator_ge(it, ret); } /* }}} */ /* {{{ MemtxHash -- implementation of all hashes. **********************/ static void memtx_hash_index_destroy(struct index *base) { struct memtx_hash_index *index = (struct memtx_hash_index *)base; light_index_destroy(index->hash_table); free(index->hash_table); free(index); } static void memtx_hash_index_update_def(struct index *base) { struct memtx_hash_index *index = (struct memtx_hash_index *)base; index->hash_table->arg = index->base.def->key_def; } static ssize_t memtx_hash_index_size(struct index *base) { struct memtx_hash_index *index = (struct memtx_hash_index *)base; return index->hash_table->count; } static ssize_t memtx_hash_index_bsize(struct index *base) { struct memtx_hash_index *index = (struct memtx_hash_index *)base; return matras_extent_count(&index->hash_table->mtable) * HASH_INDEX_EXTENT_SIZE; } static int memtx_hash_index_random(struct index *base, uint32_t rnd, struct tuple **result) { struct memtx_hash_index *index = (struct memtx_hash_index *)base; struct light_index_core *hash_table = index->hash_table; *result = NULL; if (hash_table->count == 0) return 0; rnd %= (hash_table->table_size); while (!light_index_pos_valid(hash_table, rnd)) { rnd++; rnd %= (hash_table->table_size); } *result = light_index_get(hash_table, rnd); return 0; } static ssize_t memtx_hash_index_count(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { if (type == ITER_ALL) return memtx_hash_index_size(base); /* optimization */ return generic_index_count(base, type, key, part_count); } static int memtx_hash_index_get(struct index *base, const char *key, uint32_t part_count, struct tuple **result) { struct memtx_hash_index *index = (struct memtx_hash_index *)base; assert(base->def->opts.is_unique && part_count == base->def->key_def->part_count); (void) part_count; *result = NULL; uint32_t h = key_hash(key, base->def->key_def); uint32_t k = light_index_find_key(index->hash_table, h, key); if (k != light_index_end) *result = light_index_get(index->hash_table, k); return 0; } static int memtx_hash_index_replace(struct index *base, struct tuple *old_tuple, struct tuple *new_tuple, enum dup_replace_mode mode, struct tuple **result) { struct memtx_hash_index *index = (struct memtx_hash_index *)base; struct light_index_core *hash_table = index->hash_table; if (new_tuple) { uint32_t h = tuple_hash(new_tuple, base->def->key_def); struct tuple *dup_tuple = NULL; hash_t pos = light_index_replace(hash_table, h, new_tuple, &dup_tuple); if (pos == light_index_end) pos = light_index_insert(hash_table, h, new_tuple); ERROR_INJECT(ERRINJ_INDEX_ALLOC, { light_index_delete(hash_table, pos); pos = light_index_end; }); if (pos == light_index_end) { diag_set(OutOfMemory, (ssize_t)hash_table->count, "hash_table", "key"); return -1; } uint32_t errcode = replace_check_dup(old_tuple, dup_tuple, mode); if (errcode) { light_index_delete(hash_table, pos); if (dup_tuple) { uint32_t pos = light_index_insert(hash_table, h, dup_tuple); if (pos == light_index_end) { panic("Failed to allocate memory in " "recover of int hash_table"); } } struct space *sp = space_cache_find(base->def->space_id); if (sp != NULL) diag_set(ClientError, errcode, base->def->name, space_name(sp)); return -1; } if (dup_tuple) { *result = dup_tuple; return 0; } } if (old_tuple) { uint32_t h = tuple_hash(old_tuple, base->def->key_def); int res = light_index_delete_value(hash_table, h, old_tuple); assert(res == 0); (void) res; } *result = old_tuple; return 0; } static struct iterator * memtx_hash_index_create_iterator(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { struct memtx_hash_index *index = (struct memtx_hash_index *)base; struct memtx_engine *memtx = (struct memtx_engine *)base->engine; assert(part_count == 0 || key != NULL); struct hash_iterator *it = mempool_alloc(&memtx->hash_iterator_pool); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct hash_iterator), "memtx_hash_index", "iterator"); return NULL; } iterator_create(&it->base, base); it->pool = &memtx->hash_iterator_pool; it->base.free = hash_iterator_free; it->hash_table = index->hash_table; light_index_iterator_begin(it->hash_table, &it->iterator); switch (type) { case ITER_GT: if (part_count != 0) { light_index_iterator_key(it->hash_table, &it->iterator, key_hash(key, base->def->key_def), key); it->base.next = hash_iterator_gt; } else { light_index_iterator_begin(it->hash_table, &it->iterator); it->base.next = hash_iterator_ge; } break; case ITER_ALL: light_index_iterator_begin(it->hash_table, &it->iterator); it->base.next = hash_iterator_ge; break; case ITER_EQ: assert(part_count > 0); light_index_iterator_key(it->hash_table, &it->iterator, key_hash(key, base->def->key_def), key); it->base.next = hash_iterator_eq; break; default: diag_set(UnsupportedIndexFeature, base->def, "requested iterator type"); mempool_free(&memtx->hash_iterator_pool, it); return NULL; } return (struct iterator *)it; } struct hash_snapshot_iterator { struct snapshot_iterator base; struct light_index_core *hash_table; struct light_index_iterator iterator; }; /** * Destroy read view and free snapshot iterator. * Virtual method of snapshot iterator. * @sa index_vtab::create_snapshot_iterator. */ static void hash_snapshot_iterator_free(struct snapshot_iterator *iterator) { assert(iterator->free == hash_snapshot_iterator_free); struct hash_snapshot_iterator *it = (struct hash_snapshot_iterator *) iterator; light_index_iterator_destroy(it->hash_table, &it->iterator); free(iterator); } /** * Get next tuple from snapshot iterator. * Virtual method of snapshot iterator. * @sa index_vtab::create_snapshot_iterator. */ static const char * hash_snapshot_iterator_next(struct snapshot_iterator *iterator, uint32_t *size) { assert(iterator->free == hash_snapshot_iterator_free); struct hash_snapshot_iterator *it = (struct hash_snapshot_iterator *) iterator; struct tuple **res = light_index_iterator_get_and_next(it->hash_table, &it->iterator); if (res == NULL) return NULL; return tuple_data_range(*res, size); } /** * Create an ALL iterator with personal read view so further * index modifications will not affect the iteration results. * Must be destroyed by iterator->free after usage. */ static struct snapshot_iterator * memtx_hash_index_create_snapshot_iterator(struct index *base) { struct memtx_hash_index *index = (struct memtx_hash_index *)base; struct hash_snapshot_iterator *it = (struct hash_snapshot_iterator *) calloc(1, sizeof(*it)); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct hash_snapshot_iterator), "memtx_hash_index", "iterator"); return NULL; } it->base.next = hash_snapshot_iterator_next; it->base.free = hash_snapshot_iterator_free; it->hash_table = index->hash_table; light_index_iterator_begin(it->hash_table, &it->iterator); light_index_iterator_freeze(it->hash_table, &it->iterator); return (struct snapshot_iterator *) it; } static const struct index_vtab memtx_hash_index_vtab = { /* .destroy = */ memtx_hash_index_destroy, /* .commit_create = */ generic_index_commit_create, /* .commit_drop = */ generic_index_commit_drop, /* .update_def = */ memtx_hash_index_update_def, /* .size = */ memtx_hash_index_size, /* .bsize = */ memtx_hash_index_bsize, /* .min = */ generic_index_min, /* .max = */ generic_index_max, /* .random = */ memtx_hash_index_random, /* .count = */ memtx_hash_index_count, /* .get = */ memtx_hash_index_get, /* .replace = */ memtx_hash_index_replace, /* .create_iterator = */ memtx_hash_index_create_iterator, /* .create_snapshot_iterator = */ memtx_hash_index_create_snapshot_iterator, /* .info = */ generic_index_info, /* .begin_build = */ generic_index_begin_build, /* .reserve = */ generic_index_reserve, /* .build_next = */ generic_index_build_next, /* .end_build = */ generic_index_end_build, }; struct memtx_hash_index * memtx_hash_index_new(struct memtx_engine *memtx, struct index_def *def) { memtx_index_arena_init(); if (!mempool_is_initialized(&memtx->hash_iterator_pool)) { mempool_create(&memtx->hash_iterator_pool, cord_slab_cache(), sizeof(struct hash_iterator)); } struct memtx_hash_index *index = (struct memtx_hash_index *)calloc(1, sizeof(*index)); if (index == NULL) { diag_set(OutOfMemory, sizeof(*index), "malloc", "struct memtx_hash_index"); return NULL; } struct light_index_core *hash_table = (struct light_index_core *)malloc(sizeof(*hash_table)); if (hash_table == NULL) { free(index); diag_set(OutOfMemory, sizeof(*hash_table), "malloc", "struct light_index_core"); return NULL; } if (index_create(&index->base, (struct engine *)memtx, &memtx_hash_index_vtab, def) != 0) { free(hash_table); free(index); return NULL; } light_index_create(hash_table, HASH_INDEX_EXTENT_SIZE, memtx_index_extent_alloc, memtx_index_extent_free, NULL, index->base.def->key_def); index->hash_table = hash_table; return index; } /* }}} */ tarantool_1.9.1.26.g63eb81e3c/src/box/column_mask.h0000664000000000000000000000737513306560010020254 0ustar rootroot#ifndef TARANTOOL_BOX_COLUMN_MASK_H_INCLUDED #define TARANTOOL_BOX_COLUMN_MASK_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include /** * Column mask is a bitmask of update operations for one tuple. * Column mask bit 'n' is set if in the corresponding tuple * field 'n' could be changed by an update operation. * This mask is used for update and upsert optimizations, when, * for example, it is necessary to check whether the operation * has changed an indexed field. * * The last bit of the mask stands for fields in range [63, +inf). * If an update operation updates field #63 and greater, then this * last bit of the mask is set. If an * update operations changes many fields ('#' or '!'), then all * fields after and including the target field could be changed - * in such case we set not one bit, but a range of bits. */ #define COLUMN_MASK_FULL UINT64_MAX /** * Set a bit in the bitmask corresponding to a * single changed column. * @param column_mask Mask to update. * @param fieldno Updated fieldno (index base must be 0). */ static inline void column_mask_set_fieldno(uint64_t *column_mask, uint32_t fieldno) { if (fieldno >= 63) /* * @sa column_mask key_def declaration for * details. */ *column_mask |= ((uint64_t) 1) << 63; else *column_mask |= ((uint64_t) 1) << fieldno; } /** * Set bits in a bitmask for a range of changed columns. * @param column_mask Mask to update. * @param first_fieldno_in_range First fieldno of the updated * range. */ static inline void column_mask_set_range(uint64_t *column_mask, uint32_t first_fieldno_in_range) { if (first_fieldno_in_range < 63) { /* * Set all bits by default via COLUMN_MASK_FULL * and then unset bits preceding the operation * field number. Fields corresponding to these * bits will definitely not be changed. */ *column_mask |= COLUMN_MASK_FULL << first_fieldno_in_range; } else { /* A range outside "short" range. */ *column_mask |= ((uint64_t) 1) << 63; } } /** * True if the update operation does not change the key. * @param key_mask Key mask. * @param update_mask Column mask of the update operation. * * @retval true, if the key is not updated. * @retval false, if a key field is possibly updated or column * mask optimization is not applicable. */ static inline bool key_update_can_be_skipped(uint64_t key_mask, uint64_t update_mask) { return (key_mask & update_mask) == 0; } #endif tarantool_1.9.1.26.g63eb81e3c/src/box/sysview_engine.h0000664000000000000000000000407213306560010020771 0ustar rootroot#ifndef TARANTOOL_BOX_SYSVIEW_ENGINE_H_INCLUDED #define TARANTOOL_BOX_SYSVIEW_ENGINE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "engine.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct sysview_engine { struct engine base; /** Memory pool for index iterator. */ struct mempool iterator_pool; }; struct sysview_engine * sysview_engine_new(void); #if defined(__cplusplus) } /* extern "C" */ #include "diag.h" static inline struct sysview_engine * sysview_engine_new_xc(void) { struct sysview_engine *sysview = sysview_engine_new(); if (sysview == NULL) diag_raise(); return sysview; } #endif /* defined(__plusplus) */ #endif /* TARANTOOL_BOX_SYSVIEW_ENGINE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/vinyl.c0000664000000000000000000034666113306565107017120 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "vinyl.h" #include "vy_mem.h" #include "vy_run.h" #include "vy_range.h" #include "vy_index.h" #include "vy_tx.h" #include "vy_cache.h" #include "vy_log.h" #include "vy_upsert.h" #include "vy_write_iterator.h" #include "vy_read_iterator.h" #include "vy_point_lookup.h" #include "vy_quota.h" #include "vy_scheduler.h" #include "vy_stat.h" #include #include #include #include #include #include #include #include #include "coio_task.h" #include "cbus.h" #include "histogram.h" #include "tuple_update.h" #include "txn.h" #include "xrow.h" #include "xlog.h" #include "engine.h" #include "space.h" #include "index.h" #include "xstream.h" #include "info.h" #include "column_mask.h" #include "trigger.h" #include "checkpoint.h" #include "session.h" #include "wal.h" /* wal_mode() */ /** * Yield after iterating over this many objects (e.g. ranges). * Yield more often in debug mode. */ #if defined(NDEBUG) enum { VY_YIELD_LOOPS = 128 }; #else enum { VY_YIELD_LOOPS = 2 }; #endif struct vy_squash_queue; enum vy_status { VINYL_OFFLINE, VINYL_INITIAL_RECOVERY_LOCAL, VINYL_INITIAL_RECOVERY_REMOTE, VINYL_FINAL_RECOVERY_LOCAL, VINYL_FINAL_RECOVERY_REMOTE, VINYL_ONLINE, }; struct vy_env { /** Recovery status */ enum vy_status status; /** TX manager */ struct tx_manager *xm; /** Upsert squash queue */ struct vy_squash_queue *squash_queue; /** Memory pool for index iterator. */ struct mempool iterator_pool; /** Memory quota */ struct vy_quota quota; /** Timer for updating quota watermark. */ ev_timer quota_timer; /** * Amount of quota used since the last * invocation of the quota timer callback. */ size_t quota_use_curr; /** * Quota use rate, in bytes per second. * Calculated as exponentially weighted * moving average of quota_use_curr. */ size_t quota_use_rate; /** * Dump bandwidth is needed for calculating the quota watermark. * The higher the bandwidth, the later we can start dumping w/o * suffering from transaction throttling. So we want to be very * conservative about estimating the bandwidth. * * To make sure we don't overestimate it, we maintain a * histogram of all observed measurements and assume the * bandwidth to be equal to the 10th percentile, i.e. the * best result among 10% worst measurements. */ struct histogram *dump_bw; /** Common index environment. */ struct vy_index_env index_env; /** Environment for cache subsystem */ struct vy_cache_env cache_env; /** Environment for run subsystem */ struct vy_run_env run_env; /** Environment for memory subsystem. */ struct vy_mem_env mem_env; /** Scheduler */ struct vy_scheduler scheduler; /** Local recovery context. */ struct vy_recovery *recovery; /** Local recovery vclock. */ const struct vclock *recovery_vclock; /** * LSN to assign to the next statement received during * initial join. * * We can't use original statements' LSNs, because we * send statements not in the chronological order while * the receiving end expects LSNs to grow monotonically * due to the design of the lsregion allocator, which is * used for storing statements in memory. */ int64_t join_lsn; /** Path to the data directory. */ char *path; /** Max size of the memory level. */ size_t memory; /** Max time a transaction may wait for memory. */ double timeout; /** Max number of threads used for reading. */ int read_threads; /** Max number of threads used for writing. */ int write_threads; /** Try to recover corrupted data if set. */ bool force_recovery; }; enum { /** * Time interval between successive updates of * quota watermark and use rate, in seconds. */ VY_QUOTA_UPDATE_INTERVAL = 1, /** * Period of time over which the quota use rate * is averaged, in seconds. */ VY_QUOTA_RATE_AVG_PERIOD = 5, }; static inline int64_t vy_dump_bandwidth(struct vy_env *env) { /* See comment to vy_env::dump_bw. */ return histogram_percentile(env->dump_bw, 10); } struct vinyl_engine { struct engine base; /** Vinyl environment. */ struct vy_env *env; }; /** Extract vy_env from an engine object. */ static inline struct vy_env * vy_env(struct engine *engine) { return ((struct vinyl_engine *)engine)->env; } struct vinyl_index { struct index base; /** Vinyl index implementation. */ struct vy_index *db; }; /** Extract vy_index from an index object. */ struct vy_index * vy_index(struct index *index) { return ((struct vinyl_index *)index)->db; } /** Mask passed to vy_gc(). */ enum { /** Delete incomplete runs. */ VY_GC_INCOMPLETE = 1 << 0, /** Delete dropped runs. */ VY_GC_DROPPED = 1 << 1, }; static void vy_gc(struct vy_env *env, struct vy_recovery *recovery, unsigned int gc_mask, int64_t gc_lsn); struct vinyl_iterator { struct iterator base; /** Vinyl environment. */ struct vy_env *env; /** Vinyl index this iterator is for. */ struct vy_index *index; /** * Points either to tx_autocommit for autocommit mode * or to a multi-statement transaction active when the * iterator was created. */ struct vy_tx *tx; /** Search key. */ struct tuple *key; /** Vinyl read iterator. */ struct vy_read_iterator iterator; /** * Built-in transaction created when iterator is opened * in autocommit mode. */ struct vy_tx tx_autocommit; /** Trigger invoked when tx ends to close the iterator. */ struct trigger on_tx_destroy; }; static const struct engine_vtab vinyl_engine_vtab; static const struct space_vtab vinyl_space_vtab; static const struct index_vtab vinyl_index_vtab; /** * A quick intro into Vinyl cosmology and file format * -------------------------------------------------- * A single vinyl index on disk consists of a set of "range" * objects. A range contains a sorted set of index keys; * keys in different ranges do not overlap and all ranges of the * same index together span the whole key space, for example: * (-inf..100), [100..114), [114..304), [304..inf) * * A sorted set of keys in a range is called a run. A single * range may contain multiple runs, each run contains changes of * keys in the range over a certain period of time. The periods do * not overlap, while, of course, two runs of the same range may * contain changes of the same key. * All keys in a run are sorted and split between pages of * approximately equal size. The purpose of putting keys into * pages is a quicker key lookup, since (min,max) key of every * page is put into the page index, stored at the beginning of each * run. The page index of an active run is fully cached in RAM. * * All files of an index have the following name pattern: * .{run,index} * and are stored together in the index directory. * * Files that end with '.index' store page index (see vy_run_info) * while '.run' files store vinyl statements. * * is the unique id of this run. Newer runs have greater ids. * * Information about which run id belongs to which range is stored * in vinyl.meta file. */ /** {{{ Introspection */ static void vy_info_append_quota(struct vy_env *env, struct info_handler *h) { struct vy_quota *q = &env->quota; info_table_begin(h, "quota"); info_append_int(h, "used", q->used); info_append_int(h, "limit", q->limit); info_append_int(h, "watermark", q->watermark); info_append_int(h, "use_rate", env->quota_use_rate); info_append_int(h, "dump_bandwidth", vy_dump_bandwidth(env)); info_table_end(h); } static void vy_info_append_cache(struct vy_env *env, struct info_handler *h) { struct vy_cache_env *c = &env->cache_env; info_table_begin(h, "cache"); info_append_int(h, "used", c->mem_used); info_append_int(h, "limit", c->mem_quota); struct mempool_stats mstats; mempool_stats(&c->cache_entry_mempool, &mstats); info_append_int(h, "tuples", mstats.objcount); info_table_end(h); } static void vy_info_append_tx(struct vy_env *env, struct info_handler *h) { struct tx_manager *xm = env->xm; info_table_begin(h, "tx"); info_append_int(h, "commit", xm->stat.commit); info_append_int(h, "rollback", xm->stat.rollback); info_append_int(h, "conflict", xm->stat.conflict); struct mempool_stats mstats; mempool_stats(&xm->tx_mempool, &mstats); info_append_int(h, "transactions", mstats.objcount); mempool_stats(&xm->txv_mempool, &mstats); info_append_int(h, "statements", mstats.objcount); mempool_stats(&xm->read_interval_mempool, &mstats); info_append_int(h, "gap_locks", mstats.objcount); mempool_stats(&xm->read_view_mempool, &mstats); info_append_int(h, "read_views", mstats.objcount); info_table_end(h); } void vinyl_engine_info(struct vinyl_engine *vinyl, struct info_handler *h) { struct vy_env *env = vinyl->env; info_begin(h); vy_info_append_quota(env, h); vy_info_append_cache(env, h); vy_info_append_tx(env, h); info_end(h); } static void vy_info_append_stmt_counter(struct info_handler *h, const char *name, const struct vy_stmt_counter *count) { if (name != NULL) info_table_begin(h, name); info_append_int(h, "rows", count->rows); info_append_int(h, "bytes", count->bytes); if (name != NULL) info_table_end(h); } static void vy_info_append_disk_stmt_counter(struct info_handler *h, const char *name, const struct vy_disk_stmt_counter *count) { if (name != NULL) info_table_begin(h, name); info_append_int(h, "rows", count->rows); info_append_int(h, "bytes", count->bytes); info_append_int(h, "bytes_compressed", count->bytes_compressed); info_append_int(h, "pages", count->pages); if (name != NULL) info_table_end(h); } static void vy_info_append_compact_stat(struct info_handler *h, const char *name, const struct vy_compact_stat *stat) { info_table_begin(h, name); info_append_int(h, "count", stat->count); vy_info_append_stmt_counter(h, "in", &stat->in); vy_info_append_stmt_counter(h, "out", &stat->out); info_table_end(h); } static void vinyl_index_info(struct index *base, struct info_handler *h) { char buf[1024]; struct vy_index *index = vy_index(base); struct vy_index_stat *stat = &index->stat; struct vy_cache_stat *cache_stat = &index->cache.stat; info_begin(h); struct vy_stmt_counter count = stat->memory.count; vy_stmt_counter_add_disk(&count, &stat->disk.count); vy_info_append_stmt_counter(h, NULL, &count); info_append_int(h, "lookup", stat->lookup); vy_info_append_stmt_counter(h, "get", &stat->get); vy_info_append_stmt_counter(h, "put", &stat->put); info_append_double(h, "latency", latency_get(&stat->latency)); info_table_begin(h, "upsert"); info_append_int(h, "squashed", stat->upsert.squashed); info_append_int(h, "applied", stat->upsert.applied); info_table_end(h); info_table_begin(h, "memory"); vy_info_append_stmt_counter(h, NULL, &stat->memory.count); info_table_begin(h, "iterator"); info_append_int(h, "lookup", stat->memory.iterator.lookup); vy_info_append_stmt_counter(h, "get", &stat->memory.iterator.get); info_table_end(h); info_append_int(h, "index_size", vy_index_mem_tree_size(index)); info_table_end(h); info_table_begin(h, "disk"); vy_info_append_disk_stmt_counter(h, NULL, &stat->disk.count); info_table_begin(h, "iterator"); info_append_int(h, "lookup", stat->disk.iterator.lookup); vy_info_append_stmt_counter(h, "get", &stat->disk.iterator.get); vy_info_append_disk_stmt_counter(h, "read", &stat->disk.iterator.read); info_table_begin(h, "bloom"); info_append_int(h, "hit", stat->disk.iterator.bloom_hit); info_append_int(h, "miss", stat->disk.iterator.bloom_miss); info_table_end(h); info_table_end(h); vy_info_append_compact_stat(h, "dump", &stat->disk.dump); vy_info_append_compact_stat(h, "compact", &stat->disk.compact); info_append_int(h, "index_size", index->page_index_size); info_append_int(h, "bloom_size", index->bloom_size); info_table_end(h); info_table_begin(h, "cache"); vy_info_append_stmt_counter(h, NULL, &cache_stat->count); info_append_int(h, "lookup", cache_stat->lookup); vy_info_append_stmt_counter(h, "get", &cache_stat->get); vy_info_append_stmt_counter(h, "put", &cache_stat->put); vy_info_append_stmt_counter(h, "invalidate", &cache_stat->invalidate); vy_info_append_stmt_counter(h, "evict", &cache_stat->evict); info_append_int(h, "index_size", vy_cache_tree_mem_used(&index->cache.cache_tree)); info_table_end(h); info_table_begin(h, "txw"); vy_info_append_stmt_counter(h, NULL, &stat->txw.count); info_table_begin(h, "iterator"); info_append_int(h, "lookup", stat->txw.iterator.lookup); vy_info_append_stmt_counter(h, "get", &stat->txw.iterator.get); info_table_end(h); info_table_end(h); info_append_int(h, "range_count", index->range_count); info_append_int(h, "run_count", index->run_count); info_append_int(h, "run_avg", index->run_count / index->range_count); histogram_snprint(buf, sizeof(buf), index->run_hist); info_append_str(h, "run_histogram", buf); info_end(h); } static void vinyl_engine_memory_stat(struct engine *engine, struct engine_memory_stat *stat) { struct vy_env *env = vy_env(engine); struct mempool_stats mstats; stat->data += lsregion_used(&env->mem_env.allocator) - env->mem_env.tree_extent_size; stat->index += env->mem_env.tree_extent_size; stat->index += env->index_env.bloom_size; stat->index += env->index_env.page_index_size; stat->cache += env->cache_env.mem_used; stat->tx += env->xm->write_set_size + env->xm->read_set_size; mempool_stats(&env->xm->tx_mempool, &mstats); stat->tx += mstats.totals.used; mempool_stats(&env->xm->txv_mempool, &mstats); stat->tx += mstats.totals.used; mempool_stats(&env->xm->read_interval_mempool, &mstats); stat->tx += mstats.totals.used; mempool_stats(&env->xm->read_view_mempool, &mstats); stat->tx += mstats.totals.used; } /** }}} Introspection */ /** * Check if WAL is enabled. * * Vinyl needs to log all operations done on indexes in its own * journal - vylog. If we allowed to use it in conjunction with * wal_mode = 'none', vylog and WAL could get out of sync, which * can result in weird recovery errors. So we forbid DML/DDL * operations in case WAL is disabled. */ static inline int vinyl_check_wal(struct vy_env *env, const char *what) { if (env->status == VINYL_ONLINE && wal_mode() == WAL_NONE) { diag_set(ClientError, ER_UNSUPPORTED, "Vinyl", tt_sprintf("%s if wal_mode = 'none'", what)); return -1; } return 0; } /** * Given a space and an index id, return vy_index. * If index not found, return NULL and set diag. */ static struct vy_index * vy_index_find(struct space *space, uint32_t iid) { struct index *index = index_find(space, iid); if (index == NULL) return NULL; return vy_index(index); } /** * Wrapper around vy_index_find() which ensures that * the found index is unique. */ static struct vy_index * vy_index_find_unique(struct space *space, uint32_t index_id) { struct vy_index *index = vy_index_find(space, index_id); if (index != NULL && !index->opts.is_unique) { diag_set(ClientError, ER_MORE_THAN_ONE_TUPLE); return NULL; } return index; } static int vinyl_engine_check_space_def(struct space_def *def) { if (def->opts.temporary) { diag_set(ClientError, ER_ALTER_SPACE, def->name, "engine does not support temporary flag"); return -1; } return 0; } static struct space * vinyl_engine_create_space(struct engine *engine, struct space_def *def, struct rlist *key_list) { struct space *space = malloc(sizeof(*space)); if (space == NULL) { diag_set(OutOfMemory, sizeof(*space), "malloc", "struct space"); return NULL; } /* Create a format from key and field definitions. */ int key_count = 0; struct index_def *index_def; rlist_foreach_entry(index_def, key_list, link) key_count++; struct key_def **keys = region_alloc(&fiber()->gc, sizeof(*keys) * key_count); if (keys == NULL) { free(space); return NULL; } key_count = 0; rlist_foreach_entry(index_def, key_list, link) keys[key_count++] = index_def->key_def; struct tuple_format *format = tuple_format_new(&vy_tuple_format_vtab, keys, key_count, 0, def->fields, def->field_count, def->dict); if (format == NULL) { free(space); return NULL; } format->exact_field_count = def->exact_field_count; tuple_format_ref(format); if (space_create(space, engine, &vinyl_space_vtab, def, key_list, format) != 0) { tuple_format_unref(format); free(space); return NULL; } /* Format is now referenced by the space. */ tuple_format_unref(format); return space; } static void vinyl_space_destroy(struct space *space) { free(space); } static int vinyl_space_check_index_def(struct space *space, struct index_def *index_def) { if (index_def->type != TREE) { diag_set(ClientError, ER_INDEX_TYPE, index_def->name, space_name(space)); return -1; } if (index_def->key_def->is_nullable && index_def->iid == 0) { diag_set(ClientError, ER_NULLABLE_PRIMARY, space_name(space)); return -1; } /* Check that there are no ANY, ARRAY, MAP parts */ for (uint32_t i = 0; i < index_def->key_def->part_count; i++) { struct key_part *part = &index_def->key_def->parts[i]; if (part->type <= FIELD_TYPE_ANY || part->type >= FIELD_TYPE_ARRAY) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name(space), tt_sprintf("field type '%s' is not supported", field_type_strs[part->type])); return -1; } } return 0; } static struct index * vinyl_space_create_index(struct space *space, struct index_def *index_def) { assert(index_def->type == TREE); struct vinyl_engine *vinyl = (struct vinyl_engine *)space->engine; struct vinyl_index *index = calloc(1, sizeof(*index)); if (index == NULL) { diag_set(OutOfMemory, sizeof(*index), "malloc", "struct vinyl_index"); return NULL; } struct vy_env *env = vinyl->env; struct vy_index *pk = NULL; if (index_def->iid > 0) { pk = vy_index(space_index(space, 0)); assert(pk != NULL); } struct vy_index *db = vy_index_new(&env->index_env, &env->cache_env, &env->mem_env, index_def, space->format, pk); if (db == NULL) { free(index); return NULL; } if (index_create(&index->base, (struct engine *)vinyl, &vinyl_index_vtab, index_def) != 0) { vy_index_delete(db); free(index); return NULL; } index->db = db; return &index->base; } static void vinyl_index_destroy(struct index *base) { struct vy_index *index = vy_index(base); /* * There still may be a task scheduled for this index * so postpone actual deletion until the last reference * is gone. */ vy_index_unref(index); free(base); } /** * Detect whether we already have non-garbage index files, * and open an existing index if that's the case. Otherwise, * create a new index. Take the current recovery status into * account. */ static int vy_index_open(struct vy_env *env, struct vy_index *index) { /* Ensure vinyl data directory exists. */ if (access(env->path, F_OK) != 0) { diag_set(SystemError, "can not access vinyl data directory"); return -1; } int rc; switch (env->status) { case VINYL_ONLINE: /* * The recovery is complete, simply * create a new index. */ rc = vy_index_create(index); if (rc == 0) { /* Make sure reader threads are up and running. */ vy_run_env_enable_coio(&env->run_env, env->read_threads); } break; case VINYL_INITIAL_RECOVERY_REMOTE: case VINYL_FINAL_RECOVERY_REMOTE: /* * Remote recovery. The index files do not * exist locally, and we should create the * index directory from scratch. */ rc = vy_index_create(index); break; case VINYL_INITIAL_RECOVERY_LOCAL: case VINYL_FINAL_RECOVERY_LOCAL: /* * Local WAL replay or recovery from snapshot. * In either case the index directory should * have already been created, so try to load * the index files from it. */ rc = vy_index_recover(index, env->recovery, &env->run_env, vclock_sum(env->recovery_vclock), env->status == VINYL_INITIAL_RECOVERY_LOCAL, env->force_recovery); break; default: unreachable(); } return rc; } static void vinyl_index_commit_create(struct index *base, int64_t lsn) { struct vy_env *env = vy_env(base->engine); struct vy_index *index = vy_index(base); if (env->status == VINYL_INITIAL_RECOVERY_LOCAL || env->status == VINYL_FINAL_RECOVERY_LOCAL) { /* * Normally, if this is local recovery, the index * should have been logged before restart. There's * one exception though - we could've failed to log * index due to a vylog write error, in which case * the index isn't in the recovery context and we * need to retry to log it now. */ if (index->commit_lsn >= 0) { vy_scheduler_add_index(&env->scheduler, index); return; } } if (env->status == VINYL_INITIAL_RECOVERY_REMOTE) { /* * Records received during initial join do not * have LSNs so we use a fake one to identify * the index in vylog. */ lsn = ++env->join_lsn; } /* * Backward compatibility fixup: historically, we used * box.info.signature for LSN of index creation, which * lags behind the LSN of the record that created the * index by 1. So for legacy indexes use the LSN from * index options. */ if (index->opts.lsn != 0) lsn = index->opts.lsn; index->commit_lsn = lsn; assert(index->range_count == 1); struct vy_range *range = vy_range_tree_first(index->tree); /* * Since it's too late to fail now, in case of vylog write * failure we leave the records we attempted to write in * the log buffer so that they are flushed along with the * next write request. If they don't get flushed before * the instance is shut down, we will replay them on local * recovery. */ vy_log_tx_begin(); vy_log_create_index(index->commit_lsn, index->id, index->space_id, index->key_def); vy_log_insert_range(index->commit_lsn, range->id, NULL, NULL); vy_log_tx_try_commit(); /* * After we committed the index in the log, we can schedule * a task for it. */ vy_scheduler_add_index(&env->scheduler, index); } /* * Delete all runs, ranges, and slices of a given index * from the metadata log. */ static void vy_log_index_prune(struct vy_index *index, int64_t gc_lsn) { int loops = 0; for (struct vy_range *range = vy_range_tree_first(index->tree); range != NULL; range = vy_range_tree_next(index->tree, range)) { struct vy_slice *slice; rlist_foreach_entry(slice, &range->slices, in_range) vy_log_delete_slice(slice->id); vy_log_delete_range(range->id); if (++loops % VY_YIELD_LOOPS == 0) fiber_sleep(0); } struct vy_run *run; rlist_foreach_entry(run, &index->runs, in_index) { vy_log_drop_run(run->id, gc_lsn); if (++loops % VY_YIELD_LOOPS == 0) fiber_sleep(0); } } static void vinyl_index_commit_drop(struct index *base) { struct vy_env *env = vy_env(base->engine); struct vy_index *index = vy_index(base); vy_scheduler_remove_index(&env->scheduler, index); /* * We can't abort here, because the index drop request has * already been written to WAL. So if we fail to write the * change to the metadata log, we leave it in the log buffer, * to be flushed along with the next transaction. If it is * not flushed before the instance is shut down, we replay it * on local recovery from WAL. */ if (env->status == VINYL_FINAL_RECOVERY_LOCAL && index->is_dropped) return; index->is_dropped = true; vy_log_tx_begin(); vy_log_index_prune(index, checkpoint_last(NULL)); vy_log_drop_index(index->commit_lsn); vy_log_tx_try_commit(); } static void vinyl_init_system_space(struct space *space) { (void)space; unreachable(); } static int vinyl_space_prepare_truncate(struct space *old_space, struct space *new_space) { struct vy_env *env = vy_env(old_space->engine); if (vinyl_check_wal(env, "DDL") != 0) return -1; assert(old_space->index_count == new_space->index_count); uint32_t index_count = new_space->index_count; if (index_count == 0) return 0; struct vy_index *pk = vy_index(old_space->index[0]); /* * On local recovery, we need to handle the following * scenarios: * * - Space truncation was successfully logged before restart. * In this case indexes of the old space contain data added * after truncation (recovered by vy_index_recover()) and * hence we just need to swap contents between old and new * spaces. * * - We failed to log space truncation before restart. * In this case we have to replay space truncation the * same way we handle it during normal operation. * * See also vy_commit_truncate_space(). */ bool truncate_done = (env->status == VINYL_FINAL_RECOVERY_LOCAL && pk->truncate_count > old_space->truncate_count); for (uint32_t i = 0; i < index_count; i++) { struct vy_index *old_index = vy_index(old_space->index[i]); struct vy_index *new_index = vy_index(new_space->index[i]); new_index->commit_lsn = old_index->commit_lsn; if (truncate_done) { /* * We are replaying truncate from WAL and the * old space already contains data added after * truncate (recovered from vylog). Avoid * reloading the space content from vylog, * simply swap the contents of old and new * spaces instead. */ vy_index_swap(old_index, new_index); new_index->is_dropped = old_index->is_dropped; new_index->truncate_count = old_index->truncate_count; vy_scheduler_remove_index(&env->scheduler, old_index); vy_scheduler_add_index(&env->scheduler, new_index); continue; } if (vy_index_init_range_tree(new_index) != 0) return -1; new_index->truncate_count = new_space->truncate_count; } return 0; } static void vinyl_space_commit_truncate(struct space *old_space, struct space *new_space) { struct vy_env *env = vy_env(old_space->engine); assert(old_space->index_count == new_space->index_count); uint32_t index_count = new_space->index_count; if (index_count == 0) return; struct vy_index *pk = vy_index(old_space->index[0]); /* * See the comment in vy_prepare_truncate_space(). */ if (env->status == VINYL_FINAL_RECOVERY_LOCAL && pk->truncate_count > old_space->truncate_count) return; /* * Mark old indexes as dropped and remove them from the scheduler. * After this point no task can be scheduled or completed for any * of them (only aborted). */ for (uint32_t i = 0; i < index_count; i++) { struct vy_index *index = vy_index(old_space->index[i]); index->is_dropped = true; vy_scheduler_remove_index(&env->scheduler, index); } /* * Log change in metadata. * * Since we can't fail here, in case of vylog write failure * we leave records we failed to write in vylog buffer so * that they get flushed along with the next write. If they * don't, we will replay them during WAL recovery. */ vy_log_tx_begin(); int64_t gc_lsn = checkpoint_last(NULL); for (uint32_t i = 0; i < index_count; i++) { struct vy_index *old_index = vy_index(old_space->index[i]); struct vy_index *new_index = vy_index(new_space->index[i]); struct vy_range *range = vy_range_tree_first(new_index->tree); assert(!new_index->is_dropped); assert(new_index->truncate_count == new_space->truncate_count); assert(new_index->range_count == 1); vy_log_index_prune(old_index, gc_lsn); vy_log_insert_range(new_index->commit_lsn, range->id, NULL, NULL); vy_log_truncate_index(new_index->commit_lsn, new_index->truncate_count); } vy_log_tx_try_commit(); /* * After we committed space truncation in the metadata log, * we can make new indexes eligible for dump and compaction. */ for (uint32_t i = 0; i < index_count; i++) { struct vy_index *index = vy_index(new_space->index[i]); vy_scheduler_add_index(&env->scheduler, index); } } static int vinyl_space_prepare_alter(struct space *old_space, struct space *new_space) { struct vy_env *env = vy_env(old_space->engine); if (vinyl_check_wal(env, "DDL") != 0) return -1; /* * The space with no indexes can contain no rows. * Allow alter. */ if (old_space->index_count == 0) return 0; struct vy_index *pk = vy_index(old_space->index[0]); /* * During WAL recovery, the space may be not empty. But we * open existing indexes, not creating new ones. Allow * alter. */ if (env->status != VINYL_ONLINE) return 0; /* * Regardless of the space emptyness, key definition of an * existing index can not be changed, because key * definition is already in vylog. See #3169. */ if (old_space->index_count == new_space->index_count) { /* Check index_defs to be unchanged. */ for (uint32_t i = 0; i < old_space->index_count; ++i) { struct index_def *old_def, *new_def; old_def = space_index_def(old_space, i); new_def = space_index_def(new_space, i); /* * We do not support a full rebuild in * vinyl yet. */ if (index_def_change_requires_rebuild(old_def, new_def) || key_part_cmp(old_def->key_def->parts, old_def->key_def->part_count, new_def->key_def->parts, new_def->key_def->part_count) != 0) { diag_set(ClientError, ER_UNSUPPORTED, "Vinyl", "changing the definition of an index"); return -1; } } } if (pk->stat.disk.count.rows == 0 && pk->stat.memory.count.rows == 0) return 0; /* * Since space format is not persisted in vylog, it can be * altered on non-empty space to some state, compatible * with the old one. */ if (space_def_check_compatibility(old_space->def, new_space->def, false) != 0) return -1; if (old_space->index_count < new_space->index_count) { diag_set(ClientError, ER_UNSUPPORTED, "Vinyl", "adding an index to a non-empty space"); return -1; } if (! tuple_format1_can_store_format2_tuples(new_space->format, old_space->format)) { diag_set(ClientError, ER_UNSUPPORTED, "Vinyl", "changing space format of a non-empty space"); return -1; } return 0; } static int vinyl_space_check_format(struct space *new_space, struct space *old_space) { (void)new_space; struct vy_env *env = vy_env(old_space->engine); /* @sa vy_prepare_alter_space for checks below. */ if (old_space->index_count == 0) return 0; struct vy_index *pk = vy_index(old_space->index[0]); if (env->status != VINYL_ONLINE) return 0; if (pk->stat.disk.count.rows == 0 && pk->stat.memory.count.rows == 0) return 0; diag_set(ClientError, ER_UNSUPPORTED, "Vinyl", "adding new fields to a non-empty space"); return -1; } static void vinyl_space_commit_alter(struct space *old_space, struct space *new_space) { (void)old_space; if (new_space == NULL || new_space->index_count == 0) return; /* space drop */ struct tuple_format *new_format = new_space->format; struct vy_index *pk = vy_index(new_space->index[0]); struct index_def *new_index_def = space_index_def(new_space, 0); assert(pk->pk == NULL); /* Update the format with column mask. */ struct tuple_format *format = vy_tuple_format_new_with_colmask(new_format); if (format == NULL) goto fail; /* Update the upsert format. */ struct tuple_format *upsert_format = vy_tuple_format_new_upsert(new_format); if (upsert_format == NULL) { tuple_format_delete(format); goto fail; } /* Set possibly changed opts. */ pk->opts = new_index_def->opts; pk->check_is_unique = true; /* Set new formats. */ tuple_format_unref(pk->disk_format); tuple_format_unref(pk->mem_format); tuple_format_unref(pk->upsert_format); tuple_format_unref(pk->mem_format_with_colmask); pk->disk_format = new_format; tuple_format_ref(new_format); pk->upsert_format = upsert_format; tuple_format_ref(upsert_format); pk->mem_format_with_colmask = format; tuple_format_ref(format); pk->mem_format = new_format; tuple_format_ref(new_format); vy_index_validate_formats(pk); key_def_update_optionality(pk->key_def, new_format->min_field_count); key_def_update_optionality(pk->cmp_def, new_format->min_field_count); for (uint32_t i = 1; i < new_space->index_count; ++i) { struct vy_index *index = vy_index(new_space->index[i]); vy_index_unref(index->pk); vy_index_ref(pk); index->pk = pk; new_index_def = space_index_def(new_space, i); index->opts = new_index_def->opts; index->check_is_unique = index->opts.is_unique; tuple_format_unref(index->mem_format_with_colmask); tuple_format_unref(index->mem_format); tuple_format_unref(index->upsert_format); index->mem_format_with_colmask = pk->mem_format_with_colmask; index->mem_format = pk->mem_format; index->upsert_format = pk->upsert_format; tuple_format_ref(index->mem_format_with_colmask); tuple_format_ref(index->mem_format); tuple_format_ref(index->upsert_format); key_def_update_optionality(index->key_def, new_format->min_field_count); key_def_update_optionality(index->cmp_def, new_format->min_field_count); vy_index_validate_formats(index); } /* * Check if there are unique indexes that are contained * by other unique indexes. For them, we can skip check * for duplicates on INSERT. Prefer indexes with higher * ids for uniqueness check optimization as they are * likelier to have a "colder" cache. */ for (int i = new_space->index_count - 1; i >= 0; i--) { struct vy_index *index = vy_index(new_space->index[i]); if (!index->check_is_unique) continue; for (int j = 0; j < (int)new_space->index_count; j++) { struct vy_index *other = vy_index(new_space->index[j]); if (other != index && other->check_is_unique && key_def_contains(index->key_def, other->key_def)) { index->check_is_unique = false; break; } } } return; fail: /* FIXME: space_vtab::commit_alter() must not fail. */ diag_log(); unreachable(); panic("failed to alter space"); } static int vinyl_space_add_primary_key(struct space *space) { return vy_index_open(vy_env(space->engine), vy_index(space->index[0])); } static void vinyl_space_drop_primary_key(struct space *space) { (void)space; } static int vinyl_space_build_secondary_key(struct space *old_space, struct space *new_space, struct index *new_index) { (void)old_space; (void)new_space; /* * Unlike Memtx, Vinyl does not need building of a secondary index. * This is true because of two things: * 1) Vinyl does not support alter of non-empty spaces * 2) During recovery a Vinyl index already has all needed data on disk. * And there are 3 cases: * I. The secondary index is added in snapshot. Then Vinyl was * snapshotted too and all necessary for that moment data is on disk. * II. The secondary index is added in WAL. That means that vinyl * space had no data at that point and had nothing to build. The * index actually could contain recovered data, but it will handle it * by itself during WAL recovery. * III. Vinyl is online. The space is definitely empty and there's * nothing to build. * * When we start to implement alter of non-empty vinyl spaces, it * seems that we should call here: * Engine::buildSecondaryKey(old_space, new_space, new_index_arg); * but aware of three cases mentioned above. */ return vy_index_open(vy_env(new_index->engine), vy_index(new_index)); } static size_t vinyl_space_bsize(struct space *space) { /* * Return the sum size of user data this space * accommodates. Since full tuples are stored in * primary indexes, it is basically the size of * binary data stored in this space's primary index. */ struct index *pk_base = space_index(space, 0); if (pk_base == NULL) return 0; struct vy_index *pk = vy_index(pk_base); return pk->stat.memory.count.bytes + pk->stat.disk.count.bytes; } static ssize_t vinyl_index_size(struct index *base) { /* * Return the total number of statements in the index. * Note, it may be greater than the number of tuples * actually stored in the space, but it should be a * fairly good estimate. */ struct vy_index *index = vy_index(base); return index->stat.memory.count.rows + index->stat.disk.count.rows; } static ssize_t vinyl_index_bsize(struct index *base) { /* * Return the cost of indexing user data. For both * primary and secondary indexes, this includes the * size of page index, bloom filter, and memory tree * extents. For secondary indexes, we also add the * total size of statements stored on disk, because * they are only needed for building the index. */ struct vy_index *index = vy_index(base); ssize_t bsize = vy_index_mem_tree_size(index) + index->page_index_size + index->bloom_size; if (index->id > 0) bsize += index->stat.disk.count.bytes; return bsize; } /* {{{ Public API of transaction control: start/end transaction, * read, write data in the context of a transaction. */ /** * Check if a request has already been committed to an index. * * If we're recovering the WAL, it may happen so that this * particular run was dumped after the checkpoint, and we're * replaying records already present in the database. In this * case avoid overwriting a newer version with an older one. * * If the index is going to be dropped or truncated on WAL * recovery, there's no point in replaying statements for it, * either. */ static inline bool vy_is_committed_one(struct vy_env *env, struct space *space, struct vy_index *index) { if (likely(env->status != VINYL_FINAL_RECOVERY_LOCAL)) return false; if (index->is_dropped) return true; if (index->truncate_count > space->truncate_count) return true; if (vclock_sum(env->recovery_vclock) <= index->dump_lsn) return true; return false; } /** * Check if a request has already been committed to a space. * See also vy_is_committed_one(). */ static inline bool vy_is_committed(struct vy_env *env, struct space *space) { if (likely(env->status != VINYL_FINAL_RECOVERY_LOCAL)) return false; for (uint32_t iid = 0; iid < space->index_count; iid++) { struct vy_index *index = vy_index(space->index[iid]); if (!vy_is_committed_one(env, space, index)) return false; } return true; } /** * Get a vinyl tuple from the index by the key. * @param index Index in which search. * @param tx Current transaction. * @param rv Read view. * @param key Key statement. * @param[out] result The found tuple is stored here. Must be * unreferenced after usage. * * @param 0 Success. * @param -1 Memory error or read error. */ static inline int vy_index_get(struct vy_index *index, struct vy_tx *tx, const struct vy_read_view **rv, struct tuple *key, struct tuple **result) { /* * tx can be NULL, for example, if an user calls * space.index.get({key}). */ assert(tx == NULL || tx->state == VINYL_TX_READY); if (tuple_field_count(key) >= index->cmp_def->part_count) { if (tx != NULL && vy_tx_track_point(tx, index, key) != 0) return -1; return vy_point_lookup(index, tx, rv, key, result); } struct vy_read_iterator itr; vy_read_iterator_open(&itr, index, tx, ITER_EQ, key, rv); int rc = vy_read_iterator_next(&itr, result); if (*result != NULL) tuple_ref(*result); vy_read_iterator_close(&itr); return rc; } /** * Check if the index contains the key. If true, then set * a duplicate key error in the diagnostics area. * @param env Vinyl environment. * @param tx Current transaction. * @param space Target space. * @param index Index in which to search. * @param key Key statement. * * @retval 0 Success, the key isn't found. * @retval -1 Memory error or the key is found. */ static inline int vy_check_is_unique(struct vy_env *env, struct vy_tx *tx, struct space *space, struct vy_index *index, struct tuple *key) { struct tuple *found; /* * During recovery we apply rows that were successfully * applied before restart so no conflict is possible. */ if (env->status != VINYL_ONLINE) return 0; if (vy_index_get(index, tx, vy_tx_read_view(tx), key, &found)) return -1; if (found) { tuple_unref(found); diag_set(ClientError, ER_TUPLE_FOUND, index_name_by_id(space, index->id), space_name(space)); return -1; } return 0; } /** * Insert a tuple in a primary index. * @param env Vinyl environment. * @param tx Current transaction. * @param space Target space. * @param pk Primary vinyl index. * @param stmt Tuple to insert. * * @retval 0 Success. * @retval -1 Memory error or duplicate key error. */ static inline int vy_insert_primary(struct vy_env *env, struct vy_tx *tx, struct space *space, struct vy_index *pk, struct tuple *stmt) { assert(vy_stmt_type(stmt) == IPROTO_INSERT); assert(tx != NULL && tx->state == VINYL_TX_READY); assert(pk->id == 0); /* * A primary index is always unique and the new tuple must not * conflict with existing tuples. */ if (pk->check_is_unique && vy_check_is_unique(env, tx, space, pk, stmt) != 0) return -1; return vy_tx_set(tx, pk, stmt); } /** * Insert a tuple in a secondary index. * @param env Vinyl environment. * @param tx Current transaction. * @param space Target space. * @param index Secondary index. * @param stmt Tuple to replace. * * @retval 0 Success. * @retval -1 Memory error or duplicate key error. */ static int vy_insert_secondary(struct vy_env *env, struct vy_tx *tx, struct space *space, struct vy_index *index, struct tuple *stmt) { assert(vy_stmt_type(stmt) == IPROTO_INSERT || vy_stmt_type(stmt) == IPROTO_REPLACE); assert(tx != NULL && tx->state == VINYL_TX_READY); assert(index->id > 0); /* * If the index is unique then the new tuple must not * conflict with existing tuples. If the index is not * unique a conflict is impossible. */ if (index->check_is_unique && !key_update_can_be_skipped(index->key_def->column_mask, vy_stmt_column_mask(stmt)) && (!index->key_def->is_nullable || !vy_tuple_key_contains_null(stmt, index->key_def))) { struct tuple *key = vy_stmt_extract_key(stmt, index->key_def, index->env->key_format); if (key == NULL) return -1; int rc = vy_check_is_unique(env, tx, space, index, key); tuple_unref(key); if (rc != 0) return -1; } /* * We must always append the statement to transaction write set * of each index, even if operation itself does not update * the index, e.g. it's an UPDATE, to ensure we read our * own writes. */ return vy_tx_set(tx, index, stmt); } /** * Execute REPLACE in a space with a single index, possibly with * lookup for an old tuple if the space has at least one * on_replace trigger. * @param env Vinyl environment. * @param tx Current transaction. * @param space Space in which replace. * @param request Request with the tuple data. * @param stmt Statement for triggers is filled with old * statement. * * @retval 0 Success. * @retval -1 Memory error OR duplicate key error OR the primary * index is not found OR a tuple reference increment * error. */ static inline int vy_replace_one(struct vy_env *env, struct vy_tx *tx, struct space *space, struct request *request, struct txn_stmt *stmt) { (void)env; assert(tx != NULL && tx->state == VINYL_TX_READY); struct vy_index *pk = vy_index(space->index[0]); assert(pk->id == 0); if (tuple_validate_raw(pk->mem_format, request->tuple)) return -1; struct tuple *new_tuple = vy_stmt_new_replace(pk->mem_format, request->tuple, request->tuple_end); if (new_tuple == NULL) return -1; /** * If the space has triggers, then we need to fetch the * old tuple to pass it to the trigger. */ if (stmt != NULL && !rlist_empty(&space->on_replace)) { if (vy_index_get(pk, tx, vy_tx_read_view(tx), new_tuple, &stmt->old_tuple) != 0) goto error_unref; } if (vy_tx_set(tx, pk, new_tuple)) goto error_unref; if (stmt != NULL) stmt->new_tuple = new_tuple; else tuple_unref(new_tuple); return 0; error_unref: tuple_unref(new_tuple); return -1; } /** * Execute REPLACE in a space with multiple indexes and lookup for * an old tuple, that should has been set in \p stmt->old_tuple if * the space has at least one on_replace trigger. * @param env Vinyl environment. * @param tx Current transaction. * @param space Vinyl space. * @param request Request with the tuple data. * @param stmt Statement for triggers filled with old * statement. * * @retval 0 Success * @retval -1 Memory error OR duplicate key error OR the primary * index is not found OR a tuple reference increment * error. */ static inline int vy_replace_impl(struct vy_env *env, struct vy_tx *tx, struct space *space, struct request *request, struct txn_stmt *stmt) { assert(tx != NULL && tx->state == VINYL_TX_READY); struct tuple *old_stmt = NULL; struct tuple *new_stmt = NULL; struct tuple *delete = NULL; struct vy_index *pk = vy_index_find(space, 0); if (pk == NULL) /* space has no primary key */ return -1; /* Primary key is dumped last. */ assert(!vy_is_committed_one(env, space, pk)); assert(pk->id == 0); if (tuple_validate_raw(pk->mem_format, request->tuple)) return -1; new_stmt = vy_stmt_new_replace(pk->mem_format, request->tuple, request->tuple_end); if (new_stmt == NULL) return -1; /* Get full tuple from the primary index. */ if (vy_index_get(pk, tx, vy_tx_read_view(tx), new_stmt, &old_stmt) != 0) goto error; if (old_stmt == NULL) { /* * We can turn REPLACE into INSERT if the new key * does not have history. */ vy_stmt_set_type(new_stmt, IPROTO_INSERT); } /* * Replace in the primary index without explicit deletion * of the old tuple. */ if (vy_tx_set(tx, pk, new_stmt) != 0) goto error; if (space->index_count > 1 && old_stmt != NULL) { delete = vy_stmt_new_surrogate_delete(pk->mem_format, old_stmt); if (delete == NULL) goto error; } /* Update secondary keys, avoid duplicates. */ for (uint32_t iid = 1; iid < space->index_count; ++iid) { struct vy_index *index; index = vy_index(space->index[iid]); if (vy_is_committed_one(env, space, index)) continue; /* * Delete goes first, so if old and new keys * fully match, there is no look up beyond the * transaction index. */ if (old_stmt != NULL) { if (vy_tx_set(tx, index, delete) != 0) goto error; } if (vy_insert_secondary(env, tx, space, index, new_stmt) != 0) goto error; } if (delete != NULL) tuple_unref(delete); /* * The old tuple is used if there is an on_replace * trigger. */ if (stmt != NULL) { stmt->new_tuple = new_stmt; stmt->old_tuple = old_stmt; } return 0; error: if (delete != NULL) tuple_unref(delete); if (old_stmt != NULL) tuple_unref(old_stmt); if (new_stmt != NULL) tuple_unref(new_stmt); return -1; } /** * Check that the key can be used for search in a unique index. * @param index Index for checking. * @param key MessagePack'ed data, the array without a * header. * @param part_count Part count of the key. * * @retval 0 The key is valid. * @retval -1 The key is not valid, the appropriate error is set * in the diagnostics area. */ static inline int vy_unique_key_validate(struct vy_index *index, const char *key, uint32_t part_count) { assert(index->opts.is_unique); assert(key != NULL || part_count == 0); /* * The index contains tuples with concatenation of * secondary and primary key fields, while the key * supplied by the user only contains the secondary key * fields. Use the correct key def to validate the key. * The key can be used to look up in the index since the * supplied key parts uniquely identify the tuple, as long * as the index is unique. */ uint32_t original_part_count = index->key_def->part_count; if (original_part_count != part_count) { diag_set(ClientError, ER_EXACT_MATCH, original_part_count, part_count); return -1; } return key_validate_parts(index->cmp_def, key, part_count, false); } /** * Find a tuple in the primary index by the key of the specified * index. * @param index Index for which the key is specified. Can be * both primary and secondary. * @param tx Current transaction. * @param rv Read view. * @param key_raw MessagePack'ed data, the array without a * header. * @param part_count Count of parts in the key. * @param[out] result The found statement is stored here. Must be * unreferenced after usage. * * @retval 0 Success. * @retval -1 Memory error. */ static inline int vy_index_full_by_key(struct vy_index *index, struct vy_tx *tx, const struct vy_read_view **rv, const char *key_raw, uint32_t part_count, struct tuple **result) { int rc; struct tuple *key = vy_stmt_new_select(index->env->key_format, key_raw, part_count); if (key == NULL) return -1; struct tuple *found; rc = vy_index_get(index, tx, rv, key, &found); tuple_unref(key); if (rc != 0) return -1; if (index->id == 0 || found == NULL) { *result = found; return 0; } /* * No need in vy_tx_track() as the tuple is already * tracked in the secondary index. */ rc = vy_point_lookup(index->pk, tx, rv, found, result); tuple_unref(found); return rc; } /** * Delete the tuple from all indexes of the vinyl space. * @param env Vinyl environment. * @param tx Current transaction. * @param space Vinyl space. * @param tuple Tuple to delete. * * @retval 0 Success * @retval -1 Memory error or the index is not found. */ static inline int vy_delete_impl(struct vy_env *env, struct vy_tx *tx, struct space *space, const struct tuple *tuple) { struct vy_index *pk = vy_index_find(space, 0); if (pk == NULL) return -1; /* Primary key is dumped last. */ assert(!vy_is_committed_one(env, space, pk)); struct tuple *delete = vy_stmt_new_surrogate_delete(pk->mem_format, tuple); if (delete == NULL) return -1; if (vy_tx_set(tx, pk, delete) != 0) goto error; /* At second, delete from seconary indexes. */ struct vy_index *index; for (uint32_t i = 1; i < space->index_count; ++i) { index = vy_index(space->index[i]); if (vy_is_committed_one(env, space, index)) continue; if (vy_tx_set(tx, index, delete) != 0) goto error; } tuple_unref(delete); return 0; error: tuple_unref(delete); return -1; } /** * Execute DELETE in a vinyl space. * @param env Vinyl environment. * @param tx Current transaction. * @param stmt Statement for triggers filled with deleted * statement. * @param space Vinyl space. * @param request Request with the tuple data. * * @retval 0 Success * @retval -1 Memory error OR the index is not found OR a tuple * reference increment error. */ static int vy_delete(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt, struct space *space, struct request *request) { if (vy_is_committed(env, space)) return 0; struct vy_index *pk = vy_index_find(space, 0); if (pk == NULL) return -1; struct vy_index *index = vy_index_find_unique(space, request->index_id); if (index == NULL) return -1; bool has_secondary = space->index_count > 1; const char *key = request->key; uint32_t part_count = mp_decode_array(&key); if (vy_unique_key_validate(index, key, part_count)) return -1; /* * There are two cases when need to get the full tuple * before deletion. * - if the space has on_replace triggers and need to pass * to them the old tuple. * * - if the space has one or more secondary indexes, then * we need to extract secondary keys from the old tuple * and pass them to indexes for deletion. */ if (has_secondary || !rlist_empty(&space->on_replace)) { if (vy_index_full_by_key(index, tx, vy_tx_read_view(tx), key, part_count, &stmt->old_tuple) != 0) return -1; if (stmt->old_tuple == NULL) return 0; } if (has_secondary) { assert(stmt->old_tuple != NULL); return vy_delete_impl(env, tx, space, stmt->old_tuple); } else { /* Primary is the single index in the space. */ assert(index->id == 0); struct tuple *delete = vy_stmt_new_surrogate_delete_from_key(request->key, pk->key_def, pk->mem_format); if (delete == NULL) return -1; int rc = vy_tx_set(tx, pk, delete); tuple_unref(delete); return rc; } } /** * We do not allow changes of the primary key during update. * * The syntax of update operation allows the user to update the * primary key of a tuple, which is prohibited, to avoid funny * effects during replication. * * @param pk Primary index. * @param index_name Name of the index which was updated - it may * be not the primary index. * @param old_tuple The tuple before update. * @param new_tuple The tuple after update. * @param column_mask Bitmask of the update operation. * * @retval 0 Success, the primary key is not modified in the new * tuple. * @retval -1 Attempt to modify the primary key. */ static inline int vy_check_update(struct space *space, const struct vy_index *pk, const struct tuple *old_tuple, const struct tuple *new_tuple, uint64_t column_mask) { if (!key_update_can_be_skipped(pk->key_def->column_mask, column_mask) && vy_tuple_compare(old_tuple, new_tuple, pk->key_def) != 0) { diag_set(ClientError, ER_CANT_UPDATE_PRIMARY_KEY, index_name_by_id(space, pk->id), space_name(space)); return -1; } return 0; } /** * Execute UPDATE in a vinyl space. * @param env Vinyl environment. * @param tx Current transaction. * @param stmt Statement for triggers filled with old and new * statements. * @param space Vinyl space. * @param request Request with the tuple data. * * @retval 0 Success * @retval -1 Memory error OR the index is not found OR a tuple * reference increment error. */ static int vy_update(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt, struct space *space, struct request *request) { assert(tx != NULL && tx->state == VINYL_TX_READY); if (vy_is_committed(env, space)) return 0; struct vy_index *index = vy_index_find_unique(space, request->index_id); if (index == NULL) return -1; const char *key = request->key; uint32_t part_count = mp_decode_array(&key); if (vy_unique_key_validate(index, key, part_count)) return -1; if (vy_index_full_by_key(index, tx, vy_tx_read_view(tx), key, part_count, &stmt->old_tuple) != 0) return -1; /* Nothing to update. */ if (stmt->old_tuple == NULL) return 0; /* Apply update operations. */ struct vy_index *pk = vy_index(space->index[0]); assert(pk != NULL); assert(pk->id == 0); /* Primary key is dumped last. */ assert(!vy_is_committed_one(env, space, pk)); uint64_t column_mask = 0; const char *new_tuple, *new_tuple_end; uint32_t new_size, old_size; const char *old_tuple = tuple_data_range(stmt->old_tuple, &old_size); const char *old_tuple_end = old_tuple + old_size; new_tuple = tuple_update_execute(region_aligned_alloc_cb, &fiber()->gc, request->tuple, request->tuple_end, old_tuple, old_tuple_end, &new_size, request->index_base, &column_mask); if (new_tuple == NULL) return -1; new_tuple_end = new_tuple + new_size; /* * Check that the new tuple matches the space format and * the primary key was not modified. */ if (tuple_validate_raw(pk->mem_format, new_tuple)) return -1; struct tuple_format *mask_format = pk->mem_format_with_colmask; if (space->index_count == 1) { stmt->new_tuple = vy_stmt_new_replace(pk->mem_format, new_tuple, new_tuple_end); if (stmt->new_tuple == NULL) return -1; } else { stmt->new_tuple = vy_stmt_new_replace(mask_format, new_tuple, new_tuple_end); if (stmt->new_tuple == NULL) return -1; vy_stmt_set_column_mask(stmt->new_tuple, column_mask); } if (vy_check_update(space, pk, stmt->old_tuple, stmt->new_tuple, column_mask) != 0) return -1; /* * In the primary index the tuple can be replaced without * the old tuple deletion. */ if (vy_tx_set(tx, pk, stmt->new_tuple) != 0) return -1; if (space->index_count == 1) return 0; struct tuple *delete = vy_stmt_new_surrogate_delete(mask_format, stmt->old_tuple); if (delete == NULL) return -1; vy_stmt_set_column_mask(delete, column_mask); for (uint32_t i = 1; i < space->index_count; ++i) { index = vy_index(space->index[i]); if (vy_is_committed_one(env, space, index)) continue; if (vy_tx_set(tx, index, delete) != 0) goto error; if (vy_insert_secondary(env, tx, space, index, stmt->new_tuple)) goto error; } tuple_unref(delete); return 0; error: tuple_unref(delete); return -1; } /** * Insert the tuple in the space without checking duplicates in * the primary index. * @param env Vinyl environment. * @param tx Current transaction. * @param space Space in which insert. * @param stmt Tuple to upsert. * * @retval 0 Success. * @retval -1 Memory error or a secondary index duplicate error. */ static int vy_insert_first_upsert(struct vy_env *env, struct vy_tx *tx, struct space *space, struct tuple *stmt) { assert(tx != NULL && tx->state == VINYL_TX_READY); assert(space->index_count > 0); assert(vy_stmt_type(stmt) == IPROTO_INSERT); struct vy_index *pk = vy_index(space->index[0]); assert(pk->id == 0); if (vy_tx_set(tx, pk, stmt) != 0) return -1; struct vy_index *index; for (uint32_t i = 1; i < space->index_count; ++i) { index = vy_index(space->index[i]); if (vy_insert_secondary(env, tx, space, index, stmt) != 0) return -1; } return 0; } /** * Insert UPSERT into the write set of the transaction. * @param tx Transaction which deletes. * @param index Index in which \p tx deletes. * @param tuple MessagePack array. * @param tuple_end End of the tuple. * @param expr MessagePack array of update operations. * @param expr_end End of the \p expr. * * @retval 0 Success. * @retval -1 Memory error. */ static int vy_index_upsert(struct vy_tx *tx, struct vy_index *index, const char *tuple, const char *tuple_end, const char *expr, const char *expr_end) { assert(tx == NULL || tx->state == VINYL_TX_READY); struct tuple *vystmt; struct iovec operations[1]; operations[0].iov_base = (void *)expr; operations[0].iov_len = expr_end - expr; vystmt = vy_stmt_new_upsert(index->upsert_format, tuple, tuple_end, operations, 1); if (vystmt == NULL) return -1; assert(vy_stmt_type(vystmt) == IPROTO_UPSERT); int rc = vy_tx_set(tx, index, vystmt); tuple_unref(vystmt); return rc; } static int request_normalize_ops(struct request *request) { assert(request->type == IPROTO_UPSERT || request->type == IPROTO_UPDATE); assert(request->index_base != 0); char *ops; ssize_t ops_len = request->ops_end - request->ops; ops = (char *)region_alloc(&fiber()->gc, ops_len); if (ops == NULL) return -1; char *ops_end = ops; const char *pos = request->ops; int op_cnt = mp_decode_array(&pos); ops_end = mp_encode_array(ops_end, op_cnt); int op_no = 0; for (op_no = 0; op_no < op_cnt; ++op_no) { int op_len = mp_decode_array(&pos); ops_end = mp_encode_array(ops_end, op_len); uint32_t op_name_len; const char *op_name = mp_decode_str(&pos, &op_name_len); ops_end = mp_encode_str(ops_end, op_name, op_name_len); int field_no; if (mp_typeof(*pos) == MP_INT) { field_no = mp_decode_int(&pos); ops_end = mp_encode_int(ops_end, field_no); } else { field_no = mp_decode_uint(&pos); field_no -= request->index_base; ops_end = mp_encode_uint(ops_end, field_no); } if (*op_name == ':') { /** * splice op adjust string pos and copy * 2 additional arguments */ int str_pos; if (mp_typeof(*pos) == MP_INT) { str_pos = mp_decode_int(&pos); ops_end = mp_encode_int(ops_end, str_pos); } else { str_pos = mp_decode_uint(&pos); str_pos -= request->index_base; ops_end = mp_encode_uint(ops_end, str_pos); } const char *arg = pos; mp_next(&pos); memcpy(ops_end, arg, pos - arg); ops_end += pos - arg; } const char *arg = pos; mp_next(&pos); memcpy(ops_end, arg, pos - arg); ops_end += pos - arg; } request->ops = (const char *)ops; request->ops_end = (const char *)ops_end; request->index_base = 0; /* Clear the header to ensure it's rebuilt at commit. */ request->header = NULL; return 0; } /** * Execute UPSERT in a vinyl space. * @param env Vinyl environment. * @param tx Current transaction. * @param stmt Statement for triggers filled with old and new * statements. * @param space Vinyl space. * @param request Request with the tuple data and update * operations. * * @retval 0 Success * @retval -1 Memory error OR the index is not found OR a tuple * reference increment error. */ static int vy_upsert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt, struct space *space, struct request *request) { assert(tx != NULL && tx->state == VINYL_TX_READY); if (vy_is_committed(env, space)) return 0; /* Check update operations. */ if (tuple_update_check_ops(region_aligned_alloc_cb, &fiber()->gc, request->ops, request->ops_end, request->index_base)) { return -1; } if (request->index_base != 0) { if (request_normalize_ops(request)) return -1; } assert(request->index_base == 0); const char *tuple = request->tuple; const char *tuple_end = request->tuple_end; const char *ops = request->ops; const char *ops_end = request->ops_end; struct vy_index *pk = vy_index_find(space, 0); if (pk == NULL) return -1; /* Primary key is dumped last. */ assert(!vy_is_committed_one(env, space, pk)); if (tuple_validate_raw(pk->mem_format, tuple)) return -1; if (space->index_count == 1 && rlist_empty(&space->on_replace)) return vy_index_upsert(tx, pk, tuple, tuple_end, ops, ops_end); const char *old_tuple, *old_tuple_end; const char *new_tuple, *new_tuple_end; uint32_t new_size; uint64_t column_mask; /* * There are two cases when need to get the old tuple * before upsert: * - if the space has one or more on_repace triggers; * * - if the space has one or more secondary indexes: then * we need to extract secondary keys from the old tuple * to delete old tuples from secondary indexes. */ /* Find the old tuple using the primary key. */ struct tuple *key = vy_stmt_extract_key_raw(tuple, tuple_end, pk->key_def, pk->env->key_format); if (key == NULL) return -1; int rc = vy_index_get(pk, tx, vy_tx_read_view(tx), key, &stmt->old_tuple); tuple_unref(key); if (rc != 0) return -1; /* * If the old tuple was not found then UPSERT * turns into INSERT. */ if (stmt->old_tuple == NULL) { stmt->new_tuple = vy_stmt_new_insert(pk->mem_format, tuple, tuple_end); if (stmt->new_tuple == NULL) return -1; return vy_insert_first_upsert(env, tx, space, stmt->new_tuple); } uint32_t old_size; old_tuple = tuple_data_range(stmt->old_tuple, &old_size); old_tuple_end = old_tuple + old_size; /* Apply upsert operations to the old tuple. */ new_tuple = tuple_upsert_execute(region_aligned_alloc_cb, &fiber()->gc, ops, ops_end, old_tuple, old_tuple_end, &new_size, 0, false, &column_mask); if (new_tuple == NULL) return -1; /* * Check that the new tuple matched the space * format and the primary key was not modified. */ if (tuple_validate_raw(pk->mem_format, new_tuple)) return -1; new_tuple_end = new_tuple + new_size; struct tuple_format *mask_format = pk->mem_format_with_colmask; if (space->index_count == 1) { stmt->new_tuple = vy_stmt_new_replace(pk->mem_format, new_tuple, new_tuple_end); if (stmt->new_tuple == NULL) return -1; } else { stmt->new_tuple = vy_stmt_new_replace(mask_format, new_tuple, new_tuple_end); if (stmt->new_tuple == NULL) return -1; vy_stmt_set_column_mask(stmt->new_tuple, column_mask); } if (vy_check_update(space, pk, stmt->old_tuple, stmt->new_tuple, column_mask) != 0) { diag_log(); /* * Upsert is skipped, to match the semantics of * vy_index_upsert(). */ return 0; } if (vy_tx_set(tx, pk, stmt->new_tuple)) return -1; if (space->index_count == 1) return 0; /* Replace in secondary indexes works as delete insert. */ struct vy_index *index; struct tuple *delete = vy_stmt_new_surrogate_delete(mask_format, stmt->old_tuple); if (delete == NULL) return -1; vy_stmt_set_column_mask(delete, column_mask); for (uint32_t i = 1; i < space->index_count; ++i) { index = vy_index(space->index[i]); if (vy_is_committed_one(env, space, index)) continue; if (vy_tx_set(tx, index, delete) != 0) goto error; if (vy_insert_secondary(env, tx, space, index, stmt->new_tuple) != 0) goto error; } tuple_unref(delete); return 0; error: tuple_unref(delete); return -1; } /** * Execute INSERT in a vinyl space. * @param env Vinyl environment. * @param tx Current transaction. * @param stmt Statement for triggers filled with the new * statement. * @param space Vinyl space. * @param request Request with the tuple data and update * operations. * * @retval 0 Success * @retval -1 Memory error OR duplicate error OR the primary * index is not found */ static int vy_insert(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt, struct space *space, struct request *request) { assert(stmt != NULL); struct vy_index *pk = vy_index_find(space, 0); if (pk == NULL) /* The space hasn't the primary index. */ return -1; assert(pk->id == 0); /* Primary key is dumped last. */ assert(!vy_is_committed_one(env, space, pk)); if (tuple_validate_raw(pk->mem_format, request->tuple)) return -1; /* First insert into the primary index. */ stmt->new_tuple = vy_stmt_new_insert(pk->mem_format, request->tuple, request->tuple_end); if (stmt->new_tuple == NULL) return -1; if (vy_insert_primary(env, tx, space, pk, stmt->new_tuple) != 0) return -1; for (uint32_t iid = 1; iid < space->index_count; ++iid) { struct vy_index *index = vy_index(space->index[iid]); if (vy_is_committed_one(env, space, index)) continue; if (vy_insert_secondary(env, tx, space, index, stmt->new_tuple) != 0) return -1; } return 0; } /** * Execute REPLACE in a vinyl space. * @param env Vinyl environment. * @param tx Current transaction. * @param stmt Statement for triggers filled with old * statement. * @param space Vinyl space. * @param request Request with the tuple data. * * @retval 0 Success * @retval -1 Memory error OR duplicate key error OR the primary * index is not found OR a tuple reference increment * error. */ static int vy_replace(struct vy_env *env, struct vy_tx *tx, struct txn_stmt *stmt, struct space *space, struct request *request) { if (vy_is_committed(env, space)) return 0; if (request->type == IPROTO_INSERT) return vy_insert(env, tx, stmt, space, request); if (space->index_count == 1) { /* Replace in a space with a single index. */ return vy_replace_one(env, tx, space, request, stmt); } else { /* Replace in a space with secondary indexes. */ return vy_replace_impl(env, tx, space, request, stmt); } } static int vinyl_space_execute_replace(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { assert(request->index_id == 0); struct vy_env *env = vy_env(space->engine); struct vy_tx *tx = txn->engine_tx; struct txn_stmt *stmt = txn_current_stmt(txn); if (vy_replace(env, tx, stmt, space, request)) return -1; *result = stmt->new_tuple; return 0; } static int vinyl_space_execute_delete(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { struct vy_env *env = vy_env(space->engine); struct vy_tx *tx = txn->engine_tx; struct txn_stmt *stmt = txn_current_stmt(txn); if (vy_delete(env, tx, stmt, space, request)) return -1; /* * Delete may or may not set stmt->old_tuple, * but we always return NULL. */ *result = NULL; return 0; } static int vinyl_space_execute_update(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { struct vy_env *env = vy_env(space->engine); struct vy_tx *tx = txn->engine_tx; struct txn_stmt *stmt = txn_current_stmt(txn); if (vy_update(env, tx, stmt, space, request) != 0) return -1; *result = stmt->new_tuple; return 0; } static int vinyl_space_execute_upsert(struct space *space, struct txn *txn, struct request *request) { struct vy_env *env = vy_env(space->engine); struct vy_tx *tx = txn->engine_tx; struct txn_stmt *stmt = txn_current_stmt(txn); return vy_upsert(env, tx, stmt, space, request); } static inline void txn_stmt_unref_tuples(struct txn_stmt *stmt) { if (stmt->old_tuple) tuple_unref(stmt->old_tuple); if (stmt->new_tuple) tuple_unref(stmt->new_tuple); stmt->old_tuple = NULL; stmt->new_tuple = NULL; } static void txn_on_stop(struct trigger *trigger, void *event) { (void)trigger; (void)event; txn_rollback(); } static int vinyl_engine_begin(struct engine *engine, struct txn *txn) { struct vy_env *env = vy_env(engine); assert(txn->engine_tx == NULL); txn->engine_tx = vy_tx_begin(env->xm); if (txn->engine_tx == NULL) return -1; if (!txn->is_autocommit) { trigger_create(&txn->fiber_on_stop, txn_on_stop, NULL, NULL); trigger_add(&fiber()->on_stop, &txn->fiber_on_stop); } return 0; } static int vinyl_engine_prepare(struct engine *engine, struct txn *txn) { struct vy_env *env = vy_env(engine); struct vy_tx *tx = txn->engine_tx; assert(tx != NULL); if (tx->write_size > 0 && vinyl_check_wal(env, "DML") != 0) return -1; /* * The configured memory limit will never allow us to commit * this transaction. Fail early. */ if (tx->write_size > env->quota.limit) { diag_set(OutOfMemory, tx->write_size, "lsregion", "vinyl transaction"); return -1; } /* * Do not abort join/subscribe on quota timeout - replication * is asynchronous anyway and there's box.info.replication * available for the admin to track the lag so let the applier * wait as long as necessary for memory dump to complete. */ double timeout = (current_session()->type != SESSION_TYPE_APPLIER ? env->timeout : TIMEOUT_INFINITY); /* * Reserve quota needed by the transaction before allocating * memory. Since this may yield, which opens a time window for * the transaction to be sent to read view or aborted, we call * it before checking for conflicts. */ if (vy_quota_use(&env->quota, tx->write_size, timeout) != 0) { diag_set(ClientError, ER_VY_QUOTA_TIMEOUT); return -1; } size_t mem_used_before = lsregion_used(&env->mem_env.allocator); int rc = vy_tx_prepare(tx); size_t mem_used_after = lsregion_used(&env->mem_env.allocator); assert(mem_used_after >= mem_used_before); size_t write_size = mem_used_after - mem_used_before; /* * Insertion of a statement into an in-memory tree can trigger * an allocation of a new tree block. This should not normally * result in a noticeable excess of the memory limit, because * most memory is occupied by statements anyway, but we need to * adjust the quota accordingly in this case. * * The actual allocation size can also be less than reservation * if a statement is allocated from an lsregion slab allocated * by a previous transaction. Take this into account, too. */ if (write_size >= tx->write_size) vy_quota_force_use(&env->quota, write_size - tx->write_size); else vy_quota_release(&env->quota, tx->write_size - write_size); if (rc != 0) return -1; env->quota_use_curr += write_size; return 0; } static void vinyl_engine_commit(struct engine *engine, struct txn *txn) { struct vy_env *env = vy_env(engine); struct vy_tx *tx = txn->engine_tx; assert(tx != NULL); /* * vy_tx_commit() may trigger an upsert squash. * If there is no memory for a created statement, * it silently fails. But if it succeeds, we * need to account the memory in the quota. */ size_t mem_used_before = lsregion_used(&env->mem_env.allocator); vy_tx_commit(tx, txn->signature); size_t mem_used_after = lsregion_used(&env->mem_env.allocator); assert(mem_used_after >= mem_used_before); /* We can't abort the transaction at this point, use force. */ vy_quota_force_use(&env->quota, mem_used_after - mem_used_before); struct txn_stmt *stmt; stailq_foreach_entry(stmt, &txn->stmts, next) txn_stmt_unref_tuples(stmt); txn->engine_tx = NULL; if (!txn->is_autocommit) trigger_clear(&txn->fiber_on_stop); } static void vinyl_engine_rollback(struct engine *engine, struct txn *txn) { (void)engine; struct vy_tx *tx = txn->engine_tx; if (tx == NULL) return; vy_tx_rollback(tx); struct txn_stmt *stmt; stailq_foreach_entry(stmt, &txn->stmts, next) txn_stmt_unref_tuples(stmt); txn->engine_tx = NULL; if (!txn->is_autocommit) trigger_clear(&txn->fiber_on_stop); } static int vinyl_engine_begin_statement(struct engine *engine, struct txn *txn) { (void)engine; struct vy_tx *tx = txn->engine_tx; struct txn_stmt *stmt = txn_current_stmt(txn); assert(tx != NULL); stmt->engine_savepoint = vy_tx_savepoint(tx); return 0; } static void vinyl_engine_rollback_statement(struct engine *engine, struct txn *txn, struct txn_stmt *stmt) { (void)engine; struct vy_tx *tx = txn->engine_tx; assert(tx != NULL); vy_tx_rollback_to_savepoint(tx, stmt->engine_savepoint); txn_stmt_unref_tuples(stmt); } /* }}} Public API of transaction control */ /** {{{ Environment */ static void vy_env_quota_timer_cb(ev_loop *loop, ev_timer *timer, int events) { (void)loop; (void)events; struct vy_env *e = timer->data; /* * Update the quota use rate with the new measurement. */ const double weight = 1 - exp(-VY_QUOTA_UPDATE_INTERVAL / (double)VY_QUOTA_RATE_AVG_PERIOD); e->quota_use_rate = (1 - weight) * e->quota_use_rate + weight * e->quota_use_curr / VY_QUOTA_UPDATE_INTERVAL; e->quota_use_curr = 0; /* * Due to log structured nature of the lsregion allocator, * which is used for allocating statements, we cannot free * memory in chunks, only all at once. Therefore we should * configure the watermark so that by the time we hit the * limit, all memory have been dumped, i.e. * * limit - watermark watermark * ----------------- = -------------- * quota_use_rate dump_bandwidth */ int64_t dump_bandwidth = vy_dump_bandwidth(e); size_t watermark = ((double)e->quota.limit * dump_bandwidth / (dump_bandwidth + e->quota_use_rate + 1)); vy_quota_set_watermark(&e->quota, watermark); } static void vy_env_quota_exceeded_cb(struct vy_quota *quota) { struct vy_env *env = container_of(quota, struct vy_env, quota); /* * The scheduler must be disabled during local recovery so as * not to distort data stored on disk. Not that we really need * it anyway, because the memory footprint is limited by the * memory limit from the previous run. * * On the contrary, remote recovery does require the scheduler * to be up and running, because the amount of data received * when bootstrapping from a remote master is only limited by * its disk size, which can exceed the size of available * memory by orders of magnitude. */ assert(env->status != VINYL_INITIAL_RECOVERY_LOCAL && env->status != VINYL_FINAL_RECOVERY_LOCAL); if (lsregion_used(&env->mem_env.allocator) == 0) { /* * The memory limit has been exceeded, but there's * nothing to dump. This may happen if all available * quota has been consumed by pending transactions. * There's nothing we can do about that. */ return; } vy_scheduler_trigger_dump(&env->scheduler); } static void vy_env_dump_complete_cb(struct vy_scheduler *scheduler, int64_t dump_generation, double dump_duration) { struct vy_env *env = container_of(scheduler, struct vy_env, scheduler); /* Free memory and release quota. */ struct lsregion *allocator = &env->mem_env.allocator; struct vy_quota *quota = &env->quota; size_t mem_used_before = lsregion_used(allocator); lsregion_gc(allocator, dump_generation); size_t mem_used_after = lsregion_used(allocator); assert(mem_used_after <= mem_used_before); size_t mem_dumped = mem_used_before - mem_used_after; vy_quota_release(quota, mem_dumped); say_info("dumped %zu bytes in %.1f sec", mem_dumped, dump_duration); /* Account dump bandwidth. */ if (dump_duration > 0) histogram_collect(env->dump_bw, mem_dumped / dump_duration); } static struct vy_squash_queue * vy_squash_queue_new(void); static void vy_squash_queue_delete(struct vy_squash_queue *q); static void vy_squash_schedule(struct vy_index *index, struct tuple *stmt, void /* struct vy_env */ *arg); static struct vy_env * vy_env_new(const char *path, size_t memory, int read_threads, int write_threads, bool force_recovery) { enum { KB = 1000, MB = 1000 * 1000 }; static int64_t dump_bandwidth_buckets[] = { 100 * KB, 200 * KB, 300 * KB, 400 * KB, 500 * KB, 1 * MB, 2 * MB, 3 * MB, 4 * MB, 5 * MB, 10 * MB, 20 * MB, 30 * MB, 40 * MB, 50 * MB, 60 * MB, 70 * MB, 80 * MB, 90 * MB, 100 * MB, 110 * MB, 120 * MB, 130 * MB, 140 * MB, 150 * MB, 160 * MB, 170 * MB, 180 * MB, 190 * MB, 200 * MB, 220 * MB, 240 * MB, 260 * MB, 280 * MB, 300 * MB, 320 * MB, 340 * MB, 360 * MB, 380 * MB, 400 * MB, 450 * MB, 500 * MB, 550 * MB, 600 * MB, 650 * MB, 700 * MB, 750 * MB, 800 * MB, 850 * MB, 900 * MB, 950 * MB, 1000 * MB, }; struct vy_env *e = malloc(sizeof(*e)); if (unlikely(e == NULL)) { diag_set(OutOfMemory, sizeof(*e), "malloc", "struct vy_env"); return NULL; } memset(e, 0, sizeof(*e)); e->status = VINYL_OFFLINE; e->memory = memory; e->timeout = TIMEOUT_INFINITY; e->read_threads = read_threads; e->write_threads = write_threads; e->force_recovery = force_recovery; e->path = strdup(path); if (e->path == NULL) { diag_set(OutOfMemory, strlen(path), "malloc", "env->path"); goto error_path; } e->dump_bw = histogram_new(dump_bandwidth_buckets, lengthof(dump_bandwidth_buckets)); if (e->dump_bw == NULL) { diag_set(OutOfMemory, 0, "histogram_new", "dump bandwidth histogram"); goto error_dump_bw; } /* * Until we dump anything, assume bandwidth to be 10 MB/s, * which should be fine for initial guess. */ histogram_collect(e->dump_bw, 10 * MB); e->xm = tx_manager_new(); if (e->xm == NULL) goto error_xm; e->squash_queue = vy_squash_queue_new(); if (e->squash_queue == NULL) goto error_squash_queue; vy_mem_env_create(&e->mem_env, e->memory); vy_scheduler_create(&e->scheduler, e->write_threads, vy_env_dump_complete_cb, &e->run_env, &e->xm->read_views); if (vy_index_env_create(&e->index_env, e->path, &e->scheduler.generation, vy_squash_schedule, e) != 0) goto error_index_env; struct slab_cache *slab_cache = cord_slab_cache(); mempool_create(&e->iterator_pool, slab_cache, sizeof(struct vinyl_iterator)); vy_quota_create(&e->quota, vy_env_quota_exceeded_cb); ev_timer_init(&e->quota_timer, vy_env_quota_timer_cb, 0, VY_QUOTA_UPDATE_INTERVAL); e->quota_timer.data = e; ev_timer_start(loop(), &e->quota_timer); vy_cache_env_create(&e->cache_env, slab_cache); vy_run_env_create(&e->run_env); vy_log_init(e->path); return e; error_index_env: vy_mem_env_destroy(&e->mem_env); vy_scheduler_destroy(&e->scheduler); vy_squash_queue_delete(e->squash_queue); error_squash_queue: tx_manager_delete(e->xm); error_xm: histogram_delete(e->dump_bw); error_dump_bw: free(e->path); error_path: free(e); return NULL; } static void vy_env_delete(struct vy_env *e) { ev_timer_stop(loop(), &e->quota_timer); vy_scheduler_destroy(&e->scheduler); vy_squash_queue_delete(e->squash_queue); tx_manager_delete(e->xm); free(e->path); histogram_delete(e->dump_bw); mempool_destroy(&e->iterator_pool); vy_run_env_destroy(&e->run_env); vy_index_env_destroy(&e->index_env); vy_mem_env_destroy(&e->mem_env); vy_cache_env_destroy(&e->cache_env); vy_quota_destroy(&e->quota); if (e->recovery != NULL) vy_recovery_delete(e->recovery); vy_log_free(); TRASH(e); free(e); } struct vinyl_engine * vinyl_engine_new(const char *dir, size_t memory, int read_threads, int write_threads, bool force_recovery) { struct vinyl_engine *vinyl = calloc(1, sizeof(*vinyl)); if (vinyl == NULL) { diag_set(OutOfMemory, sizeof(*vinyl), "malloc", "struct vinyl_engine"); return NULL; } vinyl->env = vy_env_new(dir, memory, read_threads, write_threads, force_recovery); if (vinyl->env == NULL) { free(vinyl); return NULL; } vinyl->base.vtab = &vinyl_engine_vtab; vinyl->base.name = "vinyl"; return vinyl; } static void vinyl_engine_shutdown(struct engine *engine) { struct vinyl_engine *vinyl = (struct vinyl_engine *)engine; vy_env_delete(vinyl->env); free(vinyl); } void vinyl_engine_set_cache(struct vinyl_engine *vinyl, size_t quota) { vy_cache_env_set_quota(&vinyl->env->cache_env, quota); } void vinyl_engine_set_max_tuple_size(struct vinyl_engine *vinyl, size_t max_size) { (void)vinyl; vy_max_tuple_size = max_size; } void vinyl_engine_set_timeout(struct vinyl_engine *vinyl, double timeout) { vinyl->env->timeout = timeout; } void vinyl_engine_set_too_long_threshold(struct vinyl_engine *vinyl, double too_long_threshold) { vinyl->env->quota.too_long_threshold = too_long_threshold; vinyl->env->index_env.too_long_threshold = too_long_threshold; } /** }}} Environment */ /* {{{ Checkpoint */ static int vinyl_engine_begin_checkpoint(struct engine *engine) { struct vy_env *env = vy_env(engine); assert(env->status == VINYL_ONLINE); /* * The scheduler starts worker threads upon the first wakeup. * To avoid starting the threads for nothing, do not wake it * up if Vinyl is not used. */ if (lsregion_used(&env->mem_env.allocator) == 0) return 0; if (vy_scheduler_begin_checkpoint(&env->scheduler) != 0) return -1; return 0; } static int vinyl_engine_wait_checkpoint(struct engine *engine, struct vclock *vclock) { struct vy_env *env = vy_env(engine); assert(env->status == VINYL_ONLINE); if (vy_scheduler_wait_checkpoint(&env->scheduler) != 0) return -1; if (vy_log_rotate(vclock) != 0) return -1; return 0; } static void vinyl_engine_commit_checkpoint(struct engine *engine, struct vclock *vclock) { (void)vclock; struct vy_env *env = vy_env(engine); assert(env->status == VINYL_ONLINE); vy_scheduler_end_checkpoint(&env->scheduler); } static void vinyl_engine_abort_checkpoint(struct engine *engine) { struct vy_env *env = vy_env(engine); assert(env->status == VINYL_ONLINE); vy_scheduler_end_checkpoint(&env->scheduler); } /* }}} Checkpoint */ /** {{{ Recovery */ static int vinyl_engine_bootstrap(struct engine *engine) { struct vy_env *e = vy_env(engine); assert(e->status == VINYL_OFFLINE); if (vy_log_bootstrap() != 0) return -1; vy_quota_set_limit(&e->quota, e->memory); e->status = VINYL_ONLINE; return 0; } static int vinyl_engine_begin_initial_recovery(struct engine *engine, const struct vclock *recovery_vclock) { struct vy_env *e = vy_env(engine); assert(e->status == VINYL_OFFLINE); if (recovery_vclock != NULL) { e->xm->lsn = vclock_sum(recovery_vclock); e->recovery_vclock = recovery_vclock; e->recovery = vy_log_begin_recovery(recovery_vclock); if (e->recovery == NULL) return -1; e->status = VINYL_INITIAL_RECOVERY_LOCAL; } else { if (vy_log_bootstrap() != 0) return -1; vy_quota_set_limit(&e->quota, e->memory); e->status = VINYL_INITIAL_RECOVERY_REMOTE; } return 0; } static int vinyl_engine_begin_final_recovery(struct engine *engine) { struct vy_env *e = vy_env(engine); switch (e->status) { case VINYL_INITIAL_RECOVERY_LOCAL: e->status = VINYL_FINAL_RECOVERY_LOCAL; break; case VINYL_INITIAL_RECOVERY_REMOTE: e->status = VINYL_FINAL_RECOVERY_REMOTE; break; default: unreachable(); } return 0; } static int vinyl_engine_end_recovery(struct engine *engine) { struct vy_env *e = vy_env(engine); switch (e->status) { case VINYL_FINAL_RECOVERY_LOCAL: if (vy_log_end_recovery() != 0) return -1; /* * If the instance is shut down while a dump or * compaction task is in progress, we'll get an * unfinished run file on disk, i.e. a run file * which was either not written to the end or not * inserted into a range. We need to delete such * runs on recovery. */ vy_gc(e, e->recovery, VY_GC_INCOMPLETE, INT64_MAX); vy_recovery_delete(e->recovery); e->recovery = NULL; e->recovery_vclock = NULL; e->status = VINYL_ONLINE; vy_quota_set_limit(&e->quota, e->memory); break; case VINYL_FINAL_RECOVERY_REMOTE: e->status = VINYL_ONLINE; break; default: unreachable(); } /* * Do not start reader threads if no Vinyl index was * recovered. The threads will be started lazily upon * the first index creation, see vy_index_open(). */ if (e->index_env.index_count > 0) vy_run_env_enable_coio(&e->run_env, e->read_threads); return 0; } /** }}} Recovery */ /** {{{ Replication */ /** Relay context, passed to all relay functions. */ struct vy_join_ctx { /** Environment. */ struct vy_env *env; /** Stream to relay statements to. */ struct xstream *stream; /** Pipe to the relay thread. */ struct cpipe relay_pipe; /** Pipe to the tx thread. */ struct cpipe tx_pipe; /** * Cbus message, used for calling functions * on behalf of the relay thread. */ struct cbus_call_msg cmsg; /** ID of the space currently being relayed. */ uint32_t space_id; /** Ordinal number of the index. */ uint32_t index_id; /** * Index key definition, as defined by the user. * We only send the primary key, so the definition * provided by the user is correct for compare. */ struct key_def *key_def; /** Index format used for REPLACE and DELETE statements. */ struct tuple_format *format; /** Index format used for UPSERT statements. */ struct tuple_format *upsert_format; /** * Write iterator for merging runs before sending * them to the replica. */ struct vy_stmt_stream *wi; /** * List of run slices of the current range, linked by * vy_slice::in_join. The newer a slice the closer it * is to the head of the list. */ struct rlist slices; }; static int vy_send_range_f(struct cbus_call_msg *cmsg) { struct vy_join_ctx *ctx = container_of(cmsg, struct vy_join_ctx, cmsg); struct tuple *stmt; int rc = ctx->wi->iface->start(ctx->wi); if (rc != 0) goto err; while ((rc = ctx->wi->iface->next(ctx->wi, &stmt)) == 0 && stmt != NULL) { struct xrow_header xrow; rc = vy_stmt_encode_primary(stmt, ctx->key_def, ctx->space_id, &xrow); if (rc != 0) break; /* * Reset the LSN as the replica will ignore it * anyway - see comment to vy_env::join_lsn. */ xrow.lsn = 0; rc = xstream_write(ctx->stream, &xrow); if (rc != 0) break; fiber_gc(); } err: ctx->wi->iface->stop(ctx->wi); fiber_gc(); return rc; } /** * Merge and send all runs from the given relay context. * On success, delete runs. */ static int vy_send_range(struct vy_join_ctx *ctx) { if (rlist_empty(&ctx->slices)) return 0; /* nothing to do */ int rc = -1; struct rlist fake_read_views; rlist_create(&fake_read_views); ctx->wi = vy_write_iterator_new(ctx->key_def, ctx->format, ctx->upsert_format, true, true, &fake_read_views); if (ctx->wi == NULL) goto out; struct vy_slice *slice; rlist_foreach_entry(slice, &ctx->slices, in_join) { if (vy_write_iterator_new_slice(ctx->wi, slice) != 0) goto out_delete_wi; } /* Do the actual work from the relay thread. */ bool cancellable = fiber_set_cancellable(false); rc = cbus_call(&ctx->relay_pipe, &ctx->tx_pipe, &ctx->cmsg, vy_send_range_f, NULL, TIMEOUT_INFINITY); fiber_set_cancellable(cancellable); struct vy_slice *tmp; rlist_foreach_entry_safe(slice, &ctx->slices, in_join, tmp) vy_slice_delete(slice); rlist_create(&ctx->slices); out_delete_wi: ctx->wi->iface->close(ctx->wi); ctx->wi = NULL; out: return rc; } /** Relay callback, passed to vy_recovery_iterate(). */ static int vy_join_cb(const struct vy_log_record *record, void *arg) { struct vy_join_ctx *ctx = arg; if (record->type == VY_LOG_CREATE_INDEX || record->type == VY_LOG_INSERT_RANGE) { /* * All runs of the current range have been recovered, * so send them to the replica. */ if (vy_send_range(ctx) != 0) return -1; } if (record->type == VY_LOG_CREATE_INDEX) { ctx->space_id = record->space_id; ctx->index_id = record->index_id; if (ctx->key_def != NULL) key_def_delete(ctx->key_def); ctx->key_def = key_def_new_with_parts(record->key_parts, record->key_part_count); if (ctx->key_def == NULL) return -1; if (ctx->format != NULL) tuple_format_unref(ctx->format); ctx->format = tuple_format_new(&vy_tuple_format_vtab, &ctx->key_def, 1, 0, NULL, 0, NULL); if (ctx->format == NULL) return -1; tuple_format_ref(ctx->format); if (ctx->upsert_format != NULL) tuple_format_unref(ctx->upsert_format); ctx->upsert_format = vy_tuple_format_new_upsert(ctx->format); if (ctx->upsert_format == NULL) return -1; tuple_format_ref(ctx->upsert_format); } /* * We are only interested in the primary index. * Secondary keys will be rebuilt on the destination. */ if (ctx->index_id != 0) return 0; if (record->type == VY_LOG_INSERT_SLICE) { struct tuple_format *key_format = ctx->env->index_env.key_format; struct tuple *begin = NULL, *end = NULL; bool success = false; struct vy_run *run = vy_run_new(&ctx->env->run_env, record->run_id); if (run == NULL) goto done_slice; if (vy_run_recover(run, ctx->env->path, ctx->space_id, ctx->index_id) != 0) goto done_slice; if (record->begin != NULL) { begin = vy_key_from_msgpack(key_format, record->begin); if (begin == NULL) goto done_slice; } if (record->end != NULL) { end = vy_key_from_msgpack(key_format, record->end); if (end == NULL) goto done_slice; } struct vy_slice *slice = vy_slice_new(record->slice_id, run, begin, end, ctx->key_def); if (slice == NULL) goto done_slice; rlist_add_entry(&ctx->slices, slice, in_join); success = true; done_slice: if (run != NULL) vy_run_unref(run); if (begin != NULL) tuple_unref(begin); if (end != NULL) tuple_unref(end); if (!success) return -1; } return 0; } /** Relay cord function. */ static int vy_join_f(va_list ap) { struct vy_join_ctx *ctx = va_arg(ap, struct vy_join_ctx *); coio_enable(); cpipe_create(&ctx->tx_pipe, "tx"); struct cbus_endpoint endpoint; cbus_endpoint_create(&endpoint, cord_name(cord()), fiber_schedule_cb, fiber()); cbus_loop(&endpoint); cbus_endpoint_destroy(&endpoint, cbus_process); cpipe_destroy(&ctx->tx_pipe); return 0; } static int vinyl_engine_join(struct engine *engine, struct vclock *vclock, struct xstream *stream) { struct vy_env *env = vy_env(engine); int rc = -1; /* Allocate the relay context. */ struct vy_join_ctx *ctx = malloc(sizeof(*ctx)); if (ctx == NULL) { diag_set(OutOfMemory, PATH_MAX, "malloc", "struct vy_join_ctx"); goto out; } memset(ctx, 0, sizeof(*ctx)); ctx->env = env; ctx->stream = stream; rlist_create(&ctx->slices); /* Start the relay cord. */ char name[FIBER_NAME_MAX]; snprintf(name, sizeof(name), "initial_join_%p", stream); struct cord cord; if (cord_costart(&cord, name, vy_join_f, ctx) != 0) goto out_free_ctx; cpipe_create(&ctx->relay_pipe, name); /* * Load the recovery context from the given point in time. * Send all runs stored in it to the replica. */ struct vy_recovery *recovery; recovery = vy_recovery_new(vclock_sum(vclock), true); if (recovery == NULL) { say_error("failed to recover vylog to join a replica"); goto out_join_cord; } rc = vy_recovery_iterate(recovery, vy_join_cb, ctx); vy_recovery_delete(recovery); /* Send the last range. */ if (rc == 0) rc = vy_send_range(ctx); /* Cleanup. */ if (ctx->key_def != NULL) key_def_delete(ctx->key_def); if (ctx->format != NULL) tuple_format_unref(ctx->format); if (ctx->upsert_format != NULL) tuple_format_unref(ctx->upsert_format); struct vy_slice *slice, *tmp; rlist_foreach_entry_safe(slice, &ctx->slices, in_join, tmp) vy_slice_delete(slice); out_join_cord: cbus_stop_loop(&ctx->relay_pipe); cpipe_destroy(&ctx->relay_pipe); if (cord_cojoin(&cord) != 0) rc = -1; out_free_ctx: free(ctx); out: return rc; } static int vinyl_space_apply_initial_join_row(struct space *space, struct request *request) { assert(request->header != NULL); struct vy_env *env = vy_env(space->engine); struct vy_tx *tx = vy_tx_begin(env->xm); if (tx == NULL) return -1; struct txn_stmt stmt; memset(&stmt, 0, sizeof(stmt)); int rc = -1; switch (request->type) { case IPROTO_INSERT: case IPROTO_REPLACE: rc = vy_replace(env, tx, &stmt, space, request); break; case IPROTO_UPSERT: rc = vy_upsert(env, tx, &stmt, space, request); break; case IPROTO_DELETE: rc = vy_delete(env, tx, &stmt, space, request); break; default: diag_set(ClientError, ER_UNKNOWN_REQUEST_TYPE, request->type); break; } if (rc != 0) { vy_tx_rollback(tx); return -1; } /* * Account memory quota, see vinyl_engine_prepare() * and vinyl_engine_commit() for more details about * quota accounting. */ size_t reserved = tx->write_size; if (vy_quota_use(&env->quota, reserved, TIMEOUT_INFINITY) != 0) unreachable(); size_t mem_used_before = lsregion_used(&env->mem_env.allocator); rc = vy_tx_prepare(tx); if (rc == 0) vy_tx_commit(tx, ++env->join_lsn); else vy_tx_rollback(tx); txn_stmt_unref_tuples(&stmt); size_t mem_used_after = lsregion_used(&env->mem_env.allocator); assert(mem_used_after >= mem_used_before); size_t used = mem_used_after - mem_used_before; if (used >= reserved) vy_quota_force_use(&env->quota, used - reserved); else vy_quota_release(&env->quota, reserved - used); return rc; } /* }}} Replication */ /* {{{ Garbage collection */ /** Argument passed to vy_gc_cb(). */ struct vy_gc_arg { /** Vinyl environment. */ struct vy_env *env; /** * Specifies what kinds of runs to delete. * See VY_GC_*. */ unsigned int gc_mask; /** LSN of the oldest checkpoint to save. */ int64_t gc_lsn; /** * ID of the current space and index. * Needed for file name formatting. */ uint32_t space_id; uint32_t index_id; /** Number of times the callback has been called. */ int loops; }; /** * Garbage collection callback, passed to vy_recovery_iterate(). * * Given a record encoding information about a vinyl run, try to * delete the corresponding files. On success, write a "forget" record * to the log so that all information about the run is deleted on the * next log rotation. */ static int vy_gc_cb(const struct vy_log_record *record, void *cb_arg) { struct vy_gc_arg *arg = cb_arg; switch (record->type) { case VY_LOG_CREATE_INDEX: arg->space_id = record->space_id; arg->index_id = record->index_id; goto out; case VY_LOG_PREPARE_RUN: if ((arg->gc_mask & VY_GC_INCOMPLETE) == 0) goto out; break; case VY_LOG_DROP_RUN: if ((arg->gc_mask & VY_GC_DROPPED) == 0 || record->gc_lsn >= arg->gc_lsn) goto out; break; default: goto out; } /* Try to delete files. */ if (vy_run_remove_files(arg->env->path, arg->space_id, arg->index_id, record->run_id) != 0) goto out; /* Forget the run on success. */ vy_log_tx_begin(); vy_log_forget_run(record->run_id); /* * Leave the record in the vylog buffer on disk error. * If we fail to flush it before restart, we will retry * to delete the run file next time garbage collection * is invoked, which is harmless. */ vy_log_tx_try_commit(); out: if (++arg->loops % VY_YIELD_LOOPS == 0) fiber_sleep(0); return 0; } /** Delete unused run files, see vy_gc_arg for more details. */ static void vy_gc(struct vy_env *env, struct vy_recovery *recovery, unsigned int gc_mask, int64_t gc_lsn) { struct vy_gc_arg arg = { .env = env, .gc_mask = gc_mask, .gc_lsn = gc_lsn, }; vy_recovery_iterate(recovery, vy_gc_cb, &arg); } static int vinyl_engine_collect_garbage(struct engine *engine, int64_t lsn) { struct vy_env *env = vy_env(engine); /* Cleanup old metadata log files. */ vy_log_collect_garbage(lsn); /* Cleanup run files. */ int64_t signature = checkpoint_last(NULL); struct vy_recovery *recovery = vy_recovery_new(signature, false); if (recovery == NULL) { say_error("failed to recover vylog for garbage collection"); return 0; } vy_gc(env, recovery, VY_GC_DROPPED, lsn); vy_recovery_delete(recovery); return 0; } /* }}} Garbage collection */ /* {{{ Backup */ /** Argument passed to vy_backup_cb(). */ struct vy_backup_arg { /** Vinyl environment. */ struct vy_env *env; /** Backup callback. */ int (*cb)(const char *, void *); /** Argument passed to @cb. */ void *cb_arg; /** * ID of the current space and index. * Needed for file name formatting. */ uint32_t space_id; uint32_t index_id; /** Number of times the callback has been called. */ int loops; }; /** Backup callback, passed to vy_recovery_iterate(). */ static int vy_backup_cb(const struct vy_log_record *record, void *cb_arg) { struct vy_backup_arg *arg = cb_arg; if (record->type == VY_LOG_CREATE_INDEX) { arg->space_id = record->space_id; arg->index_id = record->index_id; } if (record->type != VY_LOG_CREATE_RUN || record->is_dropped) goto out; char path[PATH_MAX]; for (int type = 0; type < vy_file_MAX; type++) { vy_run_snprint_path(path, sizeof(path), arg->env->path, arg->space_id, arg->index_id, record->run_id, type); if (arg->cb(path, arg->cb_arg) != 0) return -1; } out: if (++arg->loops % VY_YIELD_LOOPS == 0) fiber_sleep(0); return 0; } static int vinyl_engine_backup(struct engine *engine, struct vclock *vclock, engine_backup_cb cb, void *cb_arg) { struct vy_env *env = vy_env(engine); /* Backup the metadata log. */ const char *path = vy_log_backup_path(vclock); if (path == NULL) return 0; /* vinyl not used */ if (cb(path, cb_arg) != 0) return -1; /* Backup run files. */ struct vy_recovery *recovery; recovery = vy_recovery_new(vclock_sum(vclock), true); if (recovery == NULL) { say_error("failed to recover vylog for backup"); return -1; } struct vy_backup_arg arg = { .env = env, .cb = cb, .cb_arg = cb_arg, }; int rc = vy_recovery_iterate(recovery, vy_backup_cb, &arg); vy_recovery_delete(recovery); return rc; } /* }}} Backup */ /** * This structure represents a request to squash a sequence of * UPSERT statements by inserting the resulting REPLACE statement * after them. */ struct vy_squash { /** Next in vy_squash_queue->queue. */ struct stailq_entry next; /** Vinyl environment. */ struct vy_env *env; /** Index this request is for. */ struct vy_index *index; /** Key to squash upserts for. */ struct tuple *stmt; }; struct vy_squash_queue { /** Fiber doing background upsert squashing. */ struct fiber *fiber; /** Used to wake up the fiber to process more requests. */ struct fiber_cond cond; /** Queue of vy_squash objects to be processed. */ struct stailq queue; /** Mempool for struct vy_squash. */ struct mempool pool; }; static struct vy_squash * vy_squash_new(struct mempool *pool, struct vy_env *env, struct vy_index *index, struct tuple *stmt) { struct vy_squash *squash; squash = mempool_alloc(pool); if (squash == NULL) return NULL; squash->env = env; vy_index_ref(index); squash->index = index; tuple_ref(stmt); squash->stmt = stmt; return squash; } static void vy_squash_delete(struct mempool *pool, struct vy_squash *squash) { vy_index_unref(squash->index); tuple_unref(squash->stmt); mempool_free(pool, squash); } static int vy_squash_process(struct vy_squash *squash) { struct errinj *inj = errinj(ERRINJ_VY_SQUASH_TIMEOUT, ERRINJ_DOUBLE); if (inj != NULL && inj->dparam > 0) fiber_sleep(inj->dparam); struct vy_index *index = squash->index; struct vy_env *env = squash->env; /* * vy_apply_upsert() is used for primary key only, * so this is the same as index->key_def */ struct key_def *def = index->cmp_def; /* Upserts enabled only in the primary index. */ assert(index->id == 0); /* * Use the committed read view to avoid squashing * prepared, but not committed statements. */ struct tuple *result; if (vy_point_lookup(index, NULL, &env->xm->p_committed_read_view, squash->stmt, &result) != 0) return -1; if (result == NULL) return 0; /* * While we were reading on-disk runs, new statements could * have been inserted into the in-memory tree. Apply them to * the result. */ struct vy_mem *mem = index->mem; struct tree_mem_key tree_key = { .stmt = result, .lsn = vy_stmt_lsn(result), }; struct vy_mem_tree_iterator mem_itr = vy_mem_tree_lower_bound(&mem->tree, &tree_key, NULL); if (vy_mem_tree_iterator_is_invalid(&mem_itr)) { /* * The in-memory tree we are squashing an upsert * for was dumped, nothing to do. */ tuple_unref(result); return 0; } /** * Algorithm of the squashing. * Assume, during building the non-UPSERT statement * 'result' in the mem some new UPSERTs were inserted, and * some of them were commited, while the other were just * prepared. And lets UPSERT_THRESHOLD to be equal to 3, * for example. * Mem * -------------------------------------+ * UPSERT, lsn = 1, n_ups = 0 | * UPSERT, lsn = 2, n_ups = 1 | Commited * UPSERT, lsn = 3, n_ups = 2 | * -------------------------------------+ * UPSERT, lsn = MAX, n_ups = 3 | * UPSERT, lsn = MAX + 1, n_ups = 4 | Prepared * UPSERT, lsn = MAX + 2, n_ups = 5 | * -------------------------------------+ * In such a case the UPSERT statements with * lsns = {1, 2, 3} are squashed. But now the n_upsert * values in the prepared statements are not correct. * If we will not update values, then the * vy_index_commit_upsert will not be able to squash them. * * So after squashing it is necessary to update n_upsert * value in the prepared statements: * Mem * -------------------------------------+ * UPSERT, lsn = 1, n_ups = 0 | * UPSERT, lsn = 2, n_ups = 1 | Commited * REPLACE, lsn = 3 | * -------------------------------------+ * UPSERT, lsn = MAX, n_ups = 0 !!! | * UPSERT, lsn = MAX + 1, n_ups = 1 !!! | Prepared * UPSERT, lsn = MAX + 2, n_ups = 2 !!! | * -------------------------------------+ */ vy_mem_tree_iterator_prev(&mem->tree, &mem_itr); const struct tuple *mem_stmt; int64_t stmt_lsn; /* * According to the described algorithm, squash the * commited UPSERTs at first. */ while (!vy_mem_tree_iterator_is_invalid(&mem_itr)) { mem_stmt = *vy_mem_tree_iterator_get_elem(&mem->tree, &mem_itr); stmt_lsn = vy_stmt_lsn(mem_stmt); if (vy_tuple_compare(result, mem_stmt, def) != 0) break; /** * Leave alone prepared statements; they will be handled * in vy_range_commit_stmt. */ if (stmt_lsn >= MAX_LSN) break; if (vy_stmt_type(mem_stmt) != IPROTO_UPSERT) { /** * Somebody inserted non-upsert statement, * squashing is useless. */ tuple_unref(result); return 0; } assert(index->id == 0); struct tuple *applied = vy_apply_upsert(mem_stmt, result, def, mem->format, mem->upsert_format, true); index->stat.upsert.applied++; tuple_unref(result); if (applied == NULL) return -1; result = applied; /** * In normal cases we get a result with the same lsn as * in mem_stmt. * But if there are buggy upserts that do wrong things, * they are ignored and the result has lower lsn. * We should fix the lsn in any case to replace * exactly mem_stmt in general and the buggy upsert * in particular. */ vy_stmt_set_lsn(result, stmt_lsn); vy_mem_tree_iterator_prev(&mem->tree, &mem_itr); } /* * The second step of the algorithm above is updating of * n_upsert values of the prepared UPSERTs. */ if (stmt_lsn >= MAX_LSN) { uint8_t n_upserts = 0; while (!vy_mem_tree_iterator_is_invalid(&mem_itr)) { mem_stmt = *vy_mem_tree_iterator_get_elem(&mem->tree, &mem_itr); if (vy_tuple_compare(result, mem_stmt, def) != 0 || vy_stmt_type(mem_stmt) != IPROTO_UPSERT) break; assert(vy_stmt_lsn(mem_stmt) >= MAX_LSN); vy_stmt_set_n_upserts((struct tuple *)mem_stmt, n_upserts); if (n_upserts <= VY_UPSERT_THRESHOLD) ++n_upserts; vy_mem_tree_iterator_prev(&mem->tree, &mem_itr); } } index->stat.upsert.squashed++; /* * Insert the resulting REPLACE statement to the mem * and adjust the quota. */ size_t mem_used_before = lsregion_used(&env->mem_env.allocator); const struct tuple *region_stmt = NULL; int rc = vy_index_set(index, mem, result, ®ion_stmt); tuple_unref(result); size_t mem_used_after = lsregion_used(&env->mem_env.allocator); assert(mem_used_after >= mem_used_before); if (rc == 0) { /* * We don't modify the resulting statement, * so there's no need in invalidating the cache. */ vy_mem_commit_stmt(mem, region_stmt); vy_quota_force_use(&env->quota, mem_used_after - mem_used_before); } return rc; } static struct vy_squash_queue * vy_squash_queue_new(void) { struct vy_squash_queue *sq = malloc(sizeof(*sq)); if (sq == NULL) { diag_set(OutOfMemory, sizeof(*sq), "malloc", "sq"); return NULL; } sq->fiber = NULL; fiber_cond_create(&sq->cond); stailq_create(&sq->queue); mempool_create(&sq->pool, cord_slab_cache(), sizeof(struct vy_squash)); return sq; } static void vy_squash_queue_delete(struct vy_squash_queue *sq) { if (sq->fiber != NULL) { sq->fiber = NULL; /* Sic: fiber_cancel() can't be used here */ fiber_cond_signal(&sq->cond); } struct vy_squash *squash, *next; stailq_foreach_entry_safe(squash, next, &sq->queue, next) vy_squash_delete(&sq->pool, squash); free(sq); } static int vy_squash_queue_f(va_list va) { struct vy_squash_queue *sq = va_arg(va, struct vy_squash_queue *); while (sq->fiber != NULL) { if (stailq_empty(&sq->queue)) { fiber_cond_wait(&sq->cond); continue; } struct vy_squash *squash; squash = stailq_shift_entry(&sq->queue, struct vy_squash, next); if (vy_squash_process(squash) != 0) diag_log(); vy_squash_delete(&sq->pool, squash); } return 0; } /* * For a given UPSERT statement, insert the resulting REPLACE * statement after it. Done in a background fiber. */ static void vy_squash_schedule(struct vy_index *index, struct tuple *stmt, void *arg) { struct vy_env *env = arg; struct vy_squash_queue *sq = env->squash_queue; say_verbose("%s: schedule upsert optimization for %s", vy_index_name(index), vy_stmt_str(stmt)); /* Start the upsert squashing fiber on demand. */ if (sq->fiber == NULL) { sq->fiber = fiber_new("vinyl.squash_queue", vy_squash_queue_f); if (sq->fiber == NULL) goto fail; fiber_start(sq->fiber, sq); } struct vy_squash *squash = vy_squash_new(&sq->pool, env, index, stmt); if (squash == NULL) goto fail; stailq_add_tail_entry(&sq->queue, squash, next); fiber_cond_signal(&sq->cond); return; fail: diag_log(); diag_clear(diag_get()); } /* {{{ Cursor */ static void vinyl_iterator_on_tx_destroy(struct trigger *trigger, void *event) { (void)event; struct vinyl_iterator *it = container_of(trigger, struct vinyl_iterator, on_tx_destroy); it->tx = NULL; } static int vinyl_iterator_last(struct iterator *base, struct tuple **ret) { (void)base; *ret = NULL; return 0; } static void vinyl_iterator_close(struct vinyl_iterator *it) { vy_read_iterator_close(&it->iterator); vy_index_unref(it->index); it->index = NULL; tuple_unref(it->key); it->key = NULL; if (it->tx == &it->tx_autocommit) { /* * Rollback the automatic transaction. * Use vy_tx_destroy() so as not to spoil * the statistics of rollbacks issued by * user transactions. */ vy_tx_destroy(it->tx); } else { trigger_clear(&it->on_tx_destroy); } it->tx = NULL; it->base.next = vinyl_iterator_last; } static int vinyl_iterator_primary_next(struct iterator *base, struct tuple **ret) { assert(base->next = vinyl_iterator_primary_next); struct vinyl_iterator *it = (struct vinyl_iterator *)base; assert(it->index->id == 0); struct tuple *tuple; if (it->tx == NULL) { diag_set(ClientError, ER_CURSOR_NO_TRANSACTION); goto fail; } if (it->tx->state == VINYL_TX_ABORT || it->tx->read_view->is_aborted) { diag_set(ClientError, ER_READ_VIEW_ABORTED); goto fail; } if (vy_read_iterator_next(&it->iterator, &tuple) != 0) goto fail; if (tuple == NULL) { /* EOF. Close the iterator immediately. */ vinyl_iterator_close(it); *ret = NULL; return 0; } *ret = tuple_bless(tuple); if (*ret != NULL) return 0; fail: vinyl_iterator_close(it); return -1; } static int vinyl_iterator_secondary_next(struct iterator *base, struct tuple **ret) { assert(base->next = vinyl_iterator_secondary_next); struct vinyl_iterator *it = (struct vinyl_iterator *)base; assert(it->index->id > 0); struct tuple *tuple; next: if (it->tx == NULL) { diag_set(ClientError, ER_CURSOR_NO_TRANSACTION); goto fail; } if (it->tx->state == VINYL_TX_ABORT || it->tx->read_view->is_aborted) { diag_set(ClientError, ER_READ_VIEW_ABORTED); goto fail; } if (vy_read_iterator_next(&it->iterator, &tuple) != 0) goto fail; if (tuple == NULL) { /* EOF. Close the iterator immediately. */ vinyl_iterator_close(it); *ret = NULL; return 0; } #ifndef NDEBUG struct errinj *delay = errinj(ERRINJ_VY_DELAY_PK_LOOKUP, ERRINJ_BOOL); if (delay && delay->bparam) { while (delay->bparam) fiber_sleep(0.01); } #endif /* * Get the full tuple from the primary index. * Note, there's no need in vy_tx_track() as the * tuple is already tracked in the secondary index. */ struct tuple *full_tuple; if (vy_point_lookup(it->index->pk, it->tx, vy_tx_read_view(it->tx), tuple, &full_tuple) != 0) goto fail; if (full_tuple == NULL) { /* * All indexes of a space must be consistent, i.e. * if a tuple is present in one index, it must be * present in all other indexes as well, so we can * get here only if there's a bug somewhere in vinyl. * Don't abort as core dump won't really help us in * this case. Just warn the user and proceed to the * next tuple. */ say_warn("%s: key %s missing in primary index", vy_index_name(it->index), vy_stmt_str(tuple)); goto next; } *ret = tuple_bless(full_tuple); tuple_unref(full_tuple); if (*ret != NULL) return 0; fail: vinyl_iterator_close(it); return -1; } static void vinyl_iterator_free(struct iterator *base) { assert(base->free == vinyl_iterator_free); struct vinyl_iterator *it = (struct vinyl_iterator *)base; if (base->next != vinyl_iterator_last) vinyl_iterator_close(it); mempool_free(&it->env->iterator_pool, it); } static struct iterator * vinyl_index_create_iterator(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { struct vy_index *index = vy_index(base); struct vy_env *env = vy_env(base->engine); if (type > ITER_GT) { diag_set(UnsupportedIndexFeature, base->def, "requested iterator type"); return NULL; } struct vinyl_iterator *it = mempool_alloc(&env->iterator_pool); if (it == NULL) { diag_set(OutOfMemory, sizeof(struct vinyl_iterator), "mempool", "struct vinyl_iterator"); return NULL; } it->key = vy_stmt_new_select(index->env->key_format, key, part_count); if (it->key == NULL) { mempool_free(&env->iterator_pool, it); return NULL; } iterator_create(&it->base, base); if (index->id == 0) it->base.next = vinyl_iterator_primary_next; else it->base.next = vinyl_iterator_secondary_next; it->base.free = vinyl_iterator_free; it->env = env; it->index = index; vy_index_ref(index); struct vy_tx *tx = in_txn() ? in_txn()->engine_tx : NULL; assert(tx == NULL || tx->state == VINYL_TX_READY); if (tx != NULL) { /* * Register a trigger that will abort this iterator * when the transaction ends. */ trigger_create(&it->on_tx_destroy, vinyl_iterator_on_tx_destroy, NULL, NULL); trigger_add(&tx->on_destroy, &it->on_tx_destroy); } else { tx = &it->tx_autocommit; vy_tx_create(env->xm, tx); } it->tx = tx; vy_read_iterator_open(&it->iterator, index, tx, type, it->key, (const struct vy_read_view **)&tx->read_view); return (struct iterator *)it; } static int vinyl_index_get(struct index *base, const char *key, uint32_t part_count, struct tuple **ret) { assert(base->def->opts.is_unique); assert(base->def->key_def->part_count == part_count); struct vy_index *index = vy_index(base); struct vy_env *env = vy_env(base->engine); struct vy_tx *tx = in_txn() ? in_txn()->engine_tx : NULL; const struct vy_read_view **rv = (tx != NULL ? vy_tx_read_view(tx) : &env->xm->p_global_read_view); struct tuple *tuple; if (vy_index_full_by_key(index, tx, rv, key, part_count, &tuple) != 0) return -1; if (tuple != NULL) { *ret = tuple_bless(tuple); tuple_unref(tuple); return *ret == NULL ? -1 : 0; } *ret = NULL; return 0; } /*** }}} Cursor */ static const struct engine_vtab vinyl_engine_vtab = { /* .shutdown = */ vinyl_engine_shutdown, /* .create_space = */ vinyl_engine_create_space, /* .join = */ vinyl_engine_join, /* .begin = */ vinyl_engine_begin, /* .begin_statement = */ vinyl_engine_begin_statement, /* .prepare = */ vinyl_engine_prepare, /* .commit = */ vinyl_engine_commit, /* .rollback_statement = */ vinyl_engine_rollback_statement, /* .rollback = */ vinyl_engine_rollback, /* .bootstrap = */ vinyl_engine_bootstrap, /* .begin_initial_recovery = */ vinyl_engine_begin_initial_recovery, /* .begin_final_recovery = */ vinyl_engine_begin_final_recovery, /* .end_recovery = */ vinyl_engine_end_recovery, /* .begin_checkpoint = */ vinyl_engine_begin_checkpoint, /* .wait_checkpoint = */ vinyl_engine_wait_checkpoint, /* .commit_checkpoint = */ vinyl_engine_commit_checkpoint, /* .abort_checkpoint = */ vinyl_engine_abort_checkpoint, /* .collect_garbage = */ vinyl_engine_collect_garbage, /* .backup = */ vinyl_engine_backup, /* .memory_stat = */ vinyl_engine_memory_stat, /* .check_space_def = */ vinyl_engine_check_space_def, }; static const struct space_vtab vinyl_space_vtab = { /* .destroy = */ vinyl_space_destroy, /* .bsize = */ vinyl_space_bsize, /* .apply_initial_join_row = */ vinyl_space_apply_initial_join_row, /* .execute_replace = */ vinyl_space_execute_replace, /* .execute_delete = */ vinyl_space_execute_delete, /* .execute_update = */ vinyl_space_execute_update, /* .execute_upsert = */ vinyl_space_execute_upsert, /* .init_system_space = */ vinyl_init_system_space, /* .check_index_def = */ vinyl_space_check_index_def, /* .create_index = */ vinyl_space_create_index, /* .add_primary_key = */ vinyl_space_add_primary_key, /* .drop_primary_key = */ vinyl_space_drop_primary_key, /* .check_format = */ vinyl_space_check_format, /* .build_secondary_key = */ vinyl_space_build_secondary_key, /* .prepare_truncate = */ vinyl_space_prepare_truncate, /* .commit_truncate = */ vinyl_space_commit_truncate, /* .prepare_alter = */ vinyl_space_prepare_alter, /* .commit_alter = */ vinyl_space_commit_alter, }; static const struct index_vtab vinyl_index_vtab = { /* .destroy = */ vinyl_index_destroy, /* .commit_create = */ vinyl_index_commit_create, /* .commit_drop = */ vinyl_index_commit_drop, /* .update_def = */ generic_index_update_def, /* .size = */ vinyl_index_size, /* .bsize = */ vinyl_index_bsize, /* .min = */ generic_index_min, /* .max = */ generic_index_max, /* .random = */ generic_index_random, /* .count = */ generic_index_count, /* .get = */ vinyl_index_get, /* .replace = */ generic_index_replace, /* .create_iterator = */ vinyl_index_create_iterator, /* .create_snapshot_iterator = */ generic_index_create_snapshot_iterator, /* .info = */ vinyl_index_info, /* .begin_build = */ generic_index_begin_build, /* .reserve = */ generic_index_reserve, /* .build_next = */ generic_index_build_next, /* .end_build = */ generic_index_end_build, }; tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_extract_key.h0000664000000000000000000000350413306560010021465 0ustar rootroot#ifndef TARANTOOL_BOX_TUPLE_EXTRACT_KEY_H_INCLUDED #define TARANTOOL_BOX_TUPLE_EXTRACT_KEY_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct key_def; /** * Initialize key extraction functions in the key_def * @param key_def key definition */ void tuple_extract_key_set(struct key_def *key_def); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_TUPLE_EXTRACT_KEY_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/tuple.c0000664000000000000000000002676413306565107017107 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "tuple.h" #include "trivia/util.h" #include "memory.h" #include "fiber.h" #include "tt_uuid.h" #include "small/quota.h" #include "small/small.h" #include "tuple_update.h" #include "coll_cache.h" static struct mempool tuple_iterator_pool; static struct small_alloc runtime_alloc; enum { /** Lowest allowed slab_alloc_minimal */ OBJSIZE_MIN = 16, }; static const double ALLOC_FACTOR = 1.05; /** * Last tuple returned by public C API * \sa tuple_bless() */ struct tuple *box_tuple_last; struct tuple_format *tuple_format_runtime; static void runtime_tuple_delete(struct tuple_format *format, struct tuple *tuple); /** A virtual method table for tuple_format_runtime */ static struct tuple_format_vtab tuple_format_runtime_vtab = { runtime_tuple_delete, }; struct tuple * tuple_new(struct tuple_format *format, const char *data, const char *end) { assert(format->vtab.destroy == tuple_format_runtime_vtab.destroy); mp_tuple_assert(data, end); size_t data_len = end - data; size_t meta_size = tuple_format_meta_size(format); size_t total = sizeof(struct tuple) + meta_size + data_len; struct tuple *tuple = (struct tuple *) smalloc(&runtime_alloc, total); if (tuple == NULL) { diag_set(OutOfMemory, (unsigned) total, "malloc", "tuple"); return NULL; } tuple->refs = 0; tuple->bsize = data_len; tuple->format_id = tuple_format_id(format); tuple_format_ref(format); tuple->data_offset = sizeof(struct tuple) + meta_size; char *raw = (char *) tuple + tuple->data_offset; uint32_t *field_map = (uint32_t *) raw; memcpy(raw, data, data_len); if (tuple_init_field_map(format, field_map, raw)) { runtime_tuple_delete(format, tuple); return NULL; } say_debug("%s(%zu) = %p", __func__, data_len, tuple); return tuple; } static void runtime_tuple_delete(struct tuple_format *format, struct tuple *tuple) { assert(format->vtab.destroy == tuple_format_runtime_vtab.destroy); say_debug("%s(%p)", __func__, tuple); assert(tuple->refs == 0); size_t total = sizeof(struct tuple) + tuple_format_meta_size(format) + tuple->bsize; tuple_format_unref(format); smfree(&runtime_alloc, tuple, total); } int tuple_validate_raw(struct tuple_format *format, const char *tuple) { if (format->field_count == 0) return 0; /* Nothing to check */ /* Check to see if the tuple has a sufficient number of fields. */ uint32_t field_count = mp_decode_array(&tuple); if (format->exact_field_count > 0 && format->exact_field_count != field_count) { diag_set(ClientError, ER_EXACT_FIELD_COUNT, (unsigned) field_count, (unsigned) format->exact_field_count); return -1; } if (unlikely(field_count < format->min_field_count)) { diag_set(ClientError, ER_MIN_FIELD_COUNT, (unsigned) field_count, (unsigned) format->min_field_count); return -1; } /* Check field types */ struct tuple_field *field = &format->fields[0]; uint32_t i = 0; uint32_t defined_field_count = MIN(field_count, format->field_count); for (; i < defined_field_count; ++i, ++field) { if (key_mp_type_validate(field->type, mp_typeof(*tuple), ER_FIELD_TYPE, i + TUPLE_INDEX_BASE, field->is_nullable)) return -1; mp_next(&tuple); } return 0; } /** * Incremented on every snapshot and is used to distinguish tuples * which were created after start of a snapshot (these tuples can * be freed right away, since they are not used for snapshot) or * before start of a snapshot (these tuples can be freed only * after the snapshot has finished, otherwise it'll write bad data * to the snapshot file). */ const char * tuple_seek(struct tuple_iterator *it, uint32_t fieldno) { const char *field = tuple_field(it->tuple, fieldno); if (likely(field != NULL)) { it->pos = field; it->fieldno = fieldno; return tuple_next(it); } else { it->pos = it->end; it->fieldno = tuple_field_count(it->tuple); return NULL; } } const char * tuple_next(struct tuple_iterator *it) { if (it->pos < it->end) { const char *field = it->pos; mp_next(&it->pos); assert(it->pos <= it->end); it->fieldno++; return field; } return NULL; } int tuple_init(field_name_hash_f hash) { field_name_hash = hash; /* * Create a format for runtime tuples */ tuple_format_runtime = tuple_format_new(&tuple_format_runtime_vtab, NULL, 0, 0, NULL, 0, NULL); if (tuple_format_runtime == NULL) return -1; /* Make sure this one stays around. */ tuple_format_ref(tuple_format_runtime); small_alloc_create(&runtime_alloc, &cord()->slabc, OBJSIZE_MIN, ALLOC_FACTOR); mempool_create(&tuple_iterator_pool, &cord()->slabc, sizeof(struct tuple_iterator)); box_tuple_last = NULL; if (coll_cache_init() != 0) return -1; return 0; } void tuple_arena_create(struct slab_arena *arena, struct quota *quota, uint64_t arena_max_size, uint32_t slab_size, const char *arena_name) { /* * Ensure that quota is a multiple of slab_size, to * have accurate value of quota_used_ratio. */ size_t prealloc = small_align(arena_max_size, slab_size); say_info("mapping %zu bytes for %s tuple arena...", prealloc, arena_name); if (slab_arena_create(arena, quota, prealloc, slab_size, MAP_PRIVATE) != 0) { if (errno == ENOMEM) { panic("failed to preallocate %zu bytes: Cannot "\ "allocate memory, check option '%s_memory' in box.cfg(..)", prealloc, arena_name); } else { panic_syserror("failed to preallocate %zu bytes for %s"\ " tuple arena", prealloc, arena_name); } } } void tuple_arena_destroy(struct slab_arena *arena) { slab_arena_destroy(arena); } void tuple_free(void) { /* Unref last tuple returned by public C API */ if (box_tuple_last != NULL) { tuple_unref(box_tuple_last); box_tuple_last = NULL; } mempool_destroy(&tuple_iterator_pool); small_alloc_destroy(&runtime_alloc); tuple_format_free(); coll_cache_destroy(); } box_tuple_format_t * box_tuple_format_default(void) { return tuple_format_runtime; } box_tuple_format_t * box_tuple_format_new(struct key_def **keys, uint16_t key_count) { box_tuple_format_t *format = tuple_format_new(&tuple_format_runtime_vtab, keys, key_count, 0, NULL, 0, NULL); if (format != NULL) tuple_format_ref(format); return format; } int box_tuple_ref(box_tuple_t *tuple) { assert(tuple != NULL); return tuple_ref(tuple); } void box_tuple_unref(box_tuple_t *tuple) { assert(tuple != NULL); return tuple_unref(tuple); } uint32_t box_tuple_field_count(const box_tuple_t *tuple) { assert(tuple != NULL); return tuple_field_count(tuple); } size_t box_tuple_bsize(const box_tuple_t *tuple) { assert(tuple != NULL); return tuple->bsize; } ssize_t tuple_to_buf(const struct tuple *tuple, char *buf, size_t size) { uint32_t bsize; const char *data = tuple_data_range(tuple, &bsize); if (likely(bsize <= size)) { memcpy(buf, data, bsize); } return bsize; } ssize_t box_tuple_to_buf(const box_tuple_t *tuple, char *buf, size_t size) { assert(tuple != NULL); return tuple_to_buf(tuple, buf, size); } box_tuple_format_t * box_tuple_format(const box_tuple_t *tuple) { assert(tuple != NULL); return tuple_format(tuple); } const char * box_tuple_field(const box_tuple_t *tuple, uint32_t fieldno) { assert(tuple != NULL); return tuple_field(tuple, fieldno); } typedef struct tuple_iterator box_tuple_iterator_t; box_tuple_iterator_t * box_tuple_iterator(box_tuple_t *tuple) { assert(tuple != NULL); struct tuple_iterator *it = (struct tuple_iterator *) mempool_alloc(&tuple_iterator_pool); if (it == NULL) { diag_set(OutOfMemory, tuple_iterator_pool.objsize, "mempool", "new slab"); return NULL; } if (tuple_ref(tuple) != 0) { mempool_free(&tuple_iterator_pool, it); return NULL; } tuple_rewind(it, tuple); return it; } void box_tuple_iterator_free(box_tuple_iterator_t *it) { tuple_unref(it->tuple); mempool_free(&tuple_iterator_pool, it); } uint32_t box_tuple_position(box_tuple_iterator_t *it) { return it->fieldno; } void box_tuple_rewind(box_tuple_iterator_t *it) { tuple_rewind(it, it->tuple); } const char * box_tuple_seek(box_tuple_iterator_t *it, uint32_t fieldno) { return tuple_seek(it, fieldno); } const char * box_tuple_next(box_tuple_iterator_t *it) { return tuple_next(it); } box_tuple_t * box_tuple_update(const box_tuple_t *tuple, const char *expr, const char *expr_end) { struct tuple_format *format = tuple_format_runtime; uint32_t new_size = 0, bsize; const char *old_data = tuple_data_range(tuple, &bsize); struct region *region = &fiber()->gc; size_t used = region_used(region); const char *new_data = tuple_update_execute(region_aligned_alloc_cb, region, expr, expr_end, old_data, old_data + bsize, &new_size, 1, NULL); if (new_data == NULL) { region_truncate(region, used); return NULL; } struct tuple *ret = tuple_new(format, new_data, new_data + new_size); region_truncate(region, used); if (ret != NULL) return tuple_bless(ret); return NULL; } box_tuple_t * box_tuple_upsert(const box_tuple_t *tuple, const char *expr, const char *expr_end) { struct tuple_format *format = tuple_format_runtime; uint32_t new_size = 0, bsize; const char *old_data = tuple_data_range(tuple, &bsize); struct region *region = &fiber()->gc; size_t used = region_used(region); const char *new_data = tuple_upsert_execute(region_aligned_alloc_cb, region, expr, expr_end, old_data, old_data + bsize, &new_size, 1, false, NULL); if (new_data == NULL) { region_truncate(region, used); return NULL; } struct tuple *ret = tuple_new(format, new_data, new_data + new_size); region_truncate(region, used); if (ret != NULL) return tuple_bless(ret); return NULL; } box_tuple_t * box_tuple_new(box_tuple_format_t *format, const char *data, const char *end) { struct tuple *ret = tuple_new(format, data, end); if (ret == NULL) return NULL; /* Can't fail on zero refs. */ return tuple_bless(ret); } int tuple_snprint(char *buf, int size, const struct tuple *tuple) { int total = 0; if (tuple == NULL) { SNPRINT(total, snprintf, buf, size, ""); return total; } SNPRINT(total, mp_snprint, buf, size, tuple_data(tuple)); return total; } const char * tuple_str(const struct tuple *tuple) { char *buf = tt_static_buf(); if (tuple_snprint(buf, TT_STATIC_BUF_LEN, tuple) < 0) return ""; return buf; } tarantool_1.9.1.26.g63eb81e3c/src/box/call.c0000664000000000000000000001502413306560010016640 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "box/call.h" #include "lua/call.h" #include "schema.h" #include "session.h" #include "func.h" #include "port.h" #include "box.h" #include "txn.h" #include "xrow.h" #include "iproto_constants.h" #include "rmean.h" #include "small/obuf.h" /** * Find a function by name and check "EXECUTE" permissions. * * @param name function name * @param name_len length of @a name * @param[out] funcp function object * Sic: *pfunc == NULL means that perhaps the user has a global * "EXECUTE" privilege, so no specific grant to a function. * * @retval -1 on access denied * @retval 0 on success */ static inline int access_check_func(const char *name, uint32_t name_len, struct func **funcp) { struct func *func = func_by_name(name, name_len); struct credentials *credentials = effective_user(); /* * If the user has universal access, don't bother with checks. * No special check for ADMIN user is necessary * since ADMIN has universal access. */ if ((credentials->universal_access & (PRIV_X | PRIV_U)) == (PRIV_X | PRIV_U)) { *funcp = func; return 0; } user_access_t access = PRIV_X | PRIV_U; user_access_t func_access = access & ~credentials->universal_access; if (func == NULL || /* Check for missing Usage access, ignore owner rights. */ func_access & PRIV_U || /* Check for missing specific access, respect owner rights. */ (func->def->uid != credentials->uid && func_access & ~func->access[credentials->auth_token].effective)) { /* Access violation, report error. */ struct user *user = user_find(credentials->uid); if (user != NULL) { if (!(access & credentials->universal_access)) { diag_set(AccessDeniedError, priv_name(PRIV_U), schema_object_name(SC_UNIVERSE), "", user->def->name); } else { diag_set(AccessDeniedError, priv_name(PRIV_X), schema_object_name(SC_FUNCTION), tt_cstr(name, name_len), user->def->name); } } return -1; } *funcp = func; return 0; } static int box_c_call(struct func *func, struct call_request *request, struct port *port) { assert(func != NULL && func->def->language == FUNC_LANGUAGE_C); /* Create a call context */ port_tuple_create(port); box_function_ctx_t ctx = { port }; /* Clear all previous errors */ diag_clear(&fiber()->diag); assert(!in_txn()); /* transaction is not started */ /* Call function from the shared library */ int rc = func_call(func, &ctx, request->args, request->args_end); func = NULL; /* May be deleted by DDL */ if (rc != 0) { if (diag_last_error(&fiber()->diag) == NULL) { /* Stored procedure forget to set diag */ diag_set(ClientError, ER_PROC_C, "unknown error"); } port_destroy(port); return -1; } return 0; } int box_func_reload(const char *name) { size_t name_len = strlen(name); struct func *func = NULL; if ((access_check_func(name, name_len, &func)) != 0) return -1; if (func == NULL) { diag_set(ClientError, ER_NO_SUCH_FUNCTION, name); return -1; } if (func->def->language != FUNC_LANGUAGE_C || func->func == NULL) return 0; /* Nothing to do */ if (func_reload(func) == 0) return 0; return -1; } int box_process_call(struct call_request *request, struct port *port) { rmean_collect(rmean_box, IPROTO_CALL, 1); /** * Find the function definition and check access. */ const char *name = request->name; assert(name != NULL); uint32_t name_len = mp_decode_strl(&name); struct func *func = NULL; /** * Sic: func == NULL means that perhaps the user has a global * "EXECUTE" privilege, so no specific grant to a function. */ if (access_check_func(name, name_len, &func) != 0) return -1; /* permission denied */ /** * Change the current user id if the function is * a set-definer-uid one. If the function is not * defined, it's obviously not a setuid one. */ struct credentials *orig_credentials = NULL; if (func && func->def->setuid) { orig_credentials = effective_user(); /* Remember and change the current user id. */ if (func->owner_credentials.auth_token >= BOX_USER_MAX) { /* * Fill the cache upon first access, since * when func is created, no user may * be around to fill it (recovery of * system spaces from a snapshot). */ struct user *owner = user_find(func->def->uid); if (owner == NULL) return -1; credentials_init(&func->owner_credentials, owner->auth_token, owner->def->uid); } fiber_set_user(fiber(), &func->owner_credentials); } int rc; if (func && func->def->language == FUNC_LANGUAGE_C) { rc = box_c_call(func, request, port); } else { rc = box_lua_call(request, port); } /* Restore the original user */ if (orig_credentials) fiber_set_user(fiber(), orig_credentials); if (rc != 0) { txn_rollback(); return -1; } if (in_txn()) { diag_set(ClientError, ER_FUNCTION_TX_ACTIVE); txn_rollback(); return -1; } return 0; } int box_process_eval(struct call_request *request, struct port *port) { rmean_collect(rmean_box, IPROTO_EVAL, 1); /* Check permissions */ if (access_check_universe(PRIV_X) != 0) return -1; if (box_lua_eval(request, port) != 0) { txn_rollback(); return -1; } if (in_txn()) { diag_set(ClientError, ER_FUNCTION_TX_ACTIVE); txn_rollback(); return -1; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/replication.h0000664000000000000000000002534213306565107020263 0ustar rootroot#ifndef INCLUDES_BOX_REPLICATION_H #define INCLUDES_BOX_REPLICATION_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "tt_uuid.h" #include "trigger.h" #include #define RB_COMPACT 1 #include /* replicaset_t */ #include #include "applier.h" #include #include "fiber_cond.h" #include "vclock.h" #include "latch.h" /** * @module replication - global state of multi-master * replicated database. * * Right now we only support asynchronous master-master * replication. * * Each replica set has a globally unique identifier. Each * replica in the replica set is identified as well. * A replica which is part of one replica set can not join * another replica set. * * Replica set and instance identifiers are stored in a system * space _cluster on all replicas. The instance identifier * is also stored in each snapshot header, this is how * the instance knows which instance id in the _cluster space * is its own id. * * Replica set and instance identifiers are globally unique * (UUID, universally unique identifiers). In addition * to these unique but long identifiers, a short integer id * is used for pervasive replica identification in a replication * stream, a snapshot, or internal data structures. * The mapping between 16-byte globally unique id and * 4 byte replica set local id is stored in _cluster space. When * a replica joins the replica set, it sends its globally unique * identifier to one of the masters, and gets its replica set * local identifier as part of the reply to the JOIN request * (in fact, it gets it as a REPLACE request in _cluster * system space along with the rest of the replication * stream). * * Replica set state on each replica is represented by a * table like below: * * ---------------------------------- * | replica id | confirmed lsn | * ---------------------------------- * | 1 | 1258 | <-- changes of the first replica * ---------------------------------- * | 2 | 1292 | <-- changes of the local instance * ---------------------------------- * * This table is called in the code "vector clock". * and is implemented in @file vclock.h */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct gc_consumer; static const int REPLICATION_CONNECT_QUORUM_ALL = INT_MAX; /** * Network timeout. Determines how often master and slave exchange * heartbeat messages. Set by box.cfg.replication_timeout. */ extern double replication_timeout; /** * Maximal time box.cfg() may wait for connections to all configured * replicas to be established. If box.cfg() fails to connect to all * replicas within the timeout, it will either leave the instance in * the orphan mode (recovery) or fail (bootstrap, reconfiguration). */ extern double replication_connect_timeout; /** * Minimal number of replicas to sync for this instance to switch * to the write mode. If set to REPLICATION_CONNECT_QUORUM_ALL, * wait for all configured masters. */ extern int replication_connect_quorum; /** * Switch applier from "sync" to "follow" as soon as the replication * lag is less than the value of the following variable. */ extern double replication_sync_lag; /** * Wait for the given period of time before trying to reconnect * to a master. */ static inline double replication_reconnect_interval(void) { return replication_timeout; } /** * Disconnect a replica if no heartbeat message has been * received from it within the given period. */ static inline double replication_disconnect_timeout(void) { return replication_timeout * 4; } void replication_init(void); void replication_free(void); /** Instance id vclock identifier. */ extern uint32_t instance_id; /** UUID of the instance. */ extern struct tt_uuid INSTANCE_UUID; /** UUID of the replica set. */ extern struct tt_uuid REPLICASET_UUID; typedef rb_tree(struct replica) replica_hash_t; /** * Replica set state. * * A replica set is a set of appliers and their matching * relays, usually connected in full mesh. */ struct replicaset { /** Memory pool for struct replica allocations. */ struct mempool pool; /** Hash of replicas indexed by UUID. */ replica_hash_t hash; /** * List of replicas that haven't received a UUID. * It contains both replicas that are still trying * to connect and those that failed to connect. */ struct rlist anon; /** * TX thread local vclock reflecting the state * of the cluster as maintained by appliers. */ struct vclock vclock; /** Applier state. */ struct { /** * Total number of replicas with attached * appliers. */ int total; /** * Number of appliers that have successfully * connected and received their UUIDs. */ int connected; /** * Number of appliers that are disconnected, * because replica is loading. */ int loading; /** * Number of appliers that have successfully * synchronized and hence contribute to the * quorum. */ int synced; /** * Signaled whenever an applier changes its * state. */ struct fiber_cond cond; /* * The latch is used to order replication requests * running on behalf of all dead replicas * (replicas which have a server id but don't have * struct replica object). */ struct latch order_latch; } applier; /** Map of all known replica_id's to correspponding replica's. */ struct replica **replica_by_id; }; extern struct replicaset replicaset; /** * Summary information about a replica in the replica set. */ struct replica { /** Link in replicaset::hash. */ rb_node(struct replica) in_hash; /** * Replica UUID or nil if the replica or nil if the * applier has not received from the master yet. */ struct tt_uuid uuid; /** * Replica ID or nil if the replica has not been * registered in the _cluster space yet. */ uint32_t id; /** Applier fiber. */ struct applier *applier; /** Relay thread. */ struct relay *relay; /** Garbage collection state associated with the replica. */ struct gc_consumer *gc; /** Link in the anon_replicas list. */ struct rlist in_anon; /** * Trigger invoked when the applier changes its state. */ struct trigger on_applier_state; /** * During initial connect or reconnect we require applier * to sync with the master before the replica can leave * read-only mode. This enum reflects the state of the * state machine for applier sync. Technically it is a * subset of the applier state machine, but since it's * much simpler and is used for a different purpose * (achieving replication connect quorum), we keep it * separate from applier. */ enum applier_state applier_sync_state; /* The latch is used to order replication requests. */ struct latch order_latch; }; enum { /** * Reserved id used for local requests, checkpoint rows * and in cases where id is unknown. */ REPLICA_ID_NIL = 0, }; /** * Find a replica by UUID */ struct replica * replica_by_uuid(const struct tt_uuid *uuid); /** * Find a replica by ID */ struct replica * replica_by_id(uint32_t replica_id); /** * Return the replica set leader. */ struct replica * replicaset_leader(void); struct replica * replicaset_first(void); struct replica * replicaset_next(struct replica *replica); #define replicaset_foreach(var) \ for (struct replica *var = replicaset_first(); \ var != NULL; var = replicaset_next(var)) /** * Set numeric replica-set-local id of remote replica. * table. Add replica to the replica set vclock with LSN = 0. */ void replica_set_id(struct replica *replica, uint32_t id); /* * Clear the numeric replica-set-local id of a replica. * * The replica is removed from the replication vector clock. */ void replica_clear_id(struct replica *replica); /** * Register \a relay of a \a replica. * \pre a replica can have only one relay * \pre replica->id != REPLICA_ID_NIL */ void replica_set_relay(struct replica *replica, struct relay *relay); /** * Unregister \a relay from the \a replica. */ void replica_clear_relay(struct replica *replica); #if defined(__cplusplus) } /* extern "C" */ void replica_check_id(uint32_t replica_id); /** * Register the universally unique identifier of a remote replica and * a matching replica-set-local identifier in the _cluster registry. * Called from on_replace_dd_cluster() when a remote master joins the * replica set. */ struct replica * replicaset_add(uint32_t replica_id, const struct tt_uuid *instance_uuid); /** * Try to connect appliers to remote peers and receive UUID. * Appliers that did not connect will connect asynchronously. * On success, update the replica set with new appliers. * \post appliers are connected to remote hosts and paused. * Use replicaset_follow() to resume appliers. * * \param appliers the array of appliers * \param count size of appliers array * \param timeout connection timeout * \param connect_all if this flag is set, fail unless all * appliers have successfully connected */ void replicaset_connect(struct applier **appliers, int count, double timeout, bool connect_all); /** * Resume all appliers registered with the replica set. */ void replicaset_follow(void); /** * Wait until a replication quorum is formed. * Return immediately if a quorum cannot be * formed because of errors. */ void replicaset_sync(void); /** * Check if a replication quorum has been formed and * switch the server to the write mode if so. */ void replicaset_check_quorum(void); #endif /* defined(__cplusplus) */ #endif tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_bitset.c0000664000000000000000000003524213306565107020451 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "memtx_bitset.h" #include #include #include "trivia/util.h" #include "fiber.h" #include "tuple.h" #include "memtx_engine.h" #ifndef OLD_GOOD_BITSET #include "small/matras.h" struct bitset_hash_entry { struct tuple *tuple; uint32_t id; }; #define mh_int_t uint32_t #define mh_arg_t int #if UINTPTR_MAX == 0xffffffff #define mh_hash_key(a, arg) ((uintptr_t)(a)) #else #define mh_hash_key(a, arg) ((uint32_t)(((uintptr_t)(a)) >> 33 ^ ((uintptr_t)(a)) ^ ((uintptr_t)(a)) << 11)) #endif #define mh_hash(a, arg) mh_hash_key((a)->tuple, arg) #define mh_cmp(a, b, arg) ((a)->tuple != (b)->tuple) #define mh_cmp_key(a, b, arg) ((a) != (b)->tuple) #define mh_node_t struct bitset_hash_entry #define mh_key_t struct tuple * #define mh_name _bitset_index #define MH_SOURCE 1 #include enum { SPARE_ID_END = 0xFFFFFFFF }; static int memtx_bitset_index_register_tuple(struct memtx_bitset_index *index, struct tuple *tuple) { uint32_t id; struct tuple **place; if (index->spare_id != SPARE_ID_END) { id = index->spare_id; void *mem = matras_get(index->id_to_tuple, id); index->spare_id = *(uint32_t *)mem; place = (struct tuple **)mem; } else { place = (struct tuple **)matras_alloc(index->id_to_tuple, &id); } *place = tuple; struct bitset_hash_entry entry; entry.id = id; entry.tuple = tuple; uint32_t pos = mh_bitset_index_put(index->tuple_to_id, &entry, 0, 0); if (pos == mh_end(index->tuple_to_id)) { *(uint32_t *)tuple = index->spare_id; index->spare_id = id; diag_set(OutOfMemory, (ssize_t) pos, "hash", "key"); return -1; } return 0; } static void memtx_bitset_index_unregister_tuple(struct memtx_bitset_index *index, struct tuple *tuple) { uint32_t k = mh_bitset_index_find(index->tuple_to_id, tuple, 0); struct bitset_hash_entry *e = mh_bitset_index_node(index->tuple_to_id, k); void *mem = matras_get(index->id_to_tuple, e->id); *(uint32_t *)mem = index->spare_id; index->spare_id = e->id; mh_bitset_index_del(index->tuple_to_id, k, 0); } static uint32_t memtx_bitset_index_tuple_to_value(struct memtx_bitset_index *index, struct tuple *tuple) { uint32_t k = mh_bitset_index_find(index->tuple_to_id, tuple, 0); struct bitset_hash_entry *e = mh_bitset_index_node(index->tuple_to_id, k); return e->id; } static struct tuple * memtx_bitset_index_value_to_tuple(struct memtx_bitset_index *index, uint32_t value) { void *mem = matras_get(index->id_to_tuple, value); return *(struct tuple **)mem; } #else /* #ifndef OLD_GOOD_BITSET */ static inline struct tuple * value_to_tuple(size_t value); static inline size_t tuple_to_value(struct tuple *tuple) { /* * @todo small_ptr_compress() is broken * https://github.com/tarantool/tarantool/issues/49 */ /* size_t value = small_ptr_compress(tuple); */ size_t value = (intptr_t) tuple >> 2; assert(value_to_tuple(value) == tuple); return value; } static inline struct tuple * value_to_tuple(size_t value) { /* return (struct tuple *) salloc_ptr_from_index(value); */ return (struct tuple *) (value << 2); } #endif /* #ifndef OLD_GOOD_BITSET */ struct bitset_index_iterator { struct iterator base; /* Must be the first member. */ struct bitset_iterator bitset_it; #ifndef OLD_GOOD_BITSET struct memtx_bitset_index *bitset_index; #endif /* #ifndef OLD_GOOD_BITSET */ /** Memory pool the iterator was allocated from. */ struct mempool *pool; }; static struct bitset_index_iterator * bitset_index_iterator(struct iterator *it) { return (struct bitset_index_iterator *) it; } static void bitset_index_iterator_free(struct iterator *iterator) { assert(iterator->free == bitset_index_iterator_free); struct bitset_index_iterator *it = bitset_index_iterator(iterator); bitset_iterator_destroy(&it->bitset_it); mempool_free(it->pool, it); } static int bitset_index_iterator_next(struct iterator *iterator, struct tuple **ret) { assert(iterator->free == bitset_index_iterator_free); struct bitset_index_iterator *it = bitset_index_iterator(iterator); size_t value = bitset_iterator_next(&it->bitset_it); if (value == SIZE_MAX) { *ret = NULL; return 0; } #ifndef OLD_GOOD_BITSET *ret = memtx_bitset_index_value_to_tuple(it->bitset_index, value); #else /* #ifndef OLD_GOOD_BITSET */ *ret = value_to_tuple(value); #endif /* #ifndef OLD_GOOD_BITSET */ return 0; } static void memtx_bitset_index_destroy(struct index *base) { struct memtx_bitset_index *index = (struct memtx_bitset_index *)base; bitset_index_destroy(&index->index); #ifndef OLD_GOOD_BITSET mh_bitset_index_delete(index->tuple_to_id); matras_destroy(index->id_to_tuple); free(index->id_to_tuple); #endif /* #ifndef OLD_GOOD_BITSET */ free(index); } static ssize_t memtx_bitset_index_size(struct index *base) { struct memtx_bitset_index *index = (struct memtx_bitset_index *)base; return bitset_index_size(&index->index); } static ssize_t memtx_bitset_index_bsize(struct index *base) { struct memtx_bitset_index *index = (struct memtx_bitset_index *)base; size_t result = 0; result += bitset_index_bsize(&index->index); #ifndef OLD_GOOD_BITSET result += matras_extent_count(index->id_to_tuple) * MEMTX_EXTENT_SIZE; result += mh_bitset_index_memsize(index->tuple_to_id); #endif /* #ifndef OLD_GOOD_BITSET */ return result; } static inline const char * make_key(const char *field, uint32_t *key_len) { static uint64_t u64key; switch (mp_typeof(*field)) { case MP_UINT: u64key = mp_decode_uint(&field); *key_len = sizeof(uint64_t); return (const char *) &u64key; break; case MP_STR: return mp_decode_str(&field, key_len); break; default: *key_len = 0; unreachable(); return NULL; } } static int memtx_bitset_index_replace(struct index *base, struct tuple *old_tuple, struct tuple *new_tuple, enum dup_replace_mode mode, struct tuple **result) { struct memtx_bitset_index *index = (struct memtx_bitset_index *)base; assert(!base->def->opts.is_unique); assert(old_tuple != NULL || new_tuple != NULL); (void) mode; *result = NULL; if (old_tuple != NULL) { #ifndef OLD_GOOD_BITSET uint32_t value = memtx_bitset_index_tuple_to_value(index, old_tuple); #else /* #ifndef OLD_GOOD_BITSET */ size_t value = tuple_to_value(old_tuple); #endif /* #ifndef OLD_GOOD_BITSET */ if (bitset_index_contains_value(&index->index, (size_t)value)) { *result = old_tuple; assert(old_tuple != new_tuple); bitset_index_remove_value(&index->index, value); #ifndef OLD_GOOD_BITSET memtx_bitset_index_unregister_tuple(index, old_tuple); #endif /* #ifndef OLD_GOOD_BITSET */ } } if (new_tuple != NULL) { const char *field; field = tuple_field(new_tuple, base->def->key_def->parts[0].fieldno); uint32_t key_len; const void *key = make_key(field, &key_len); #ifndef OLD_GOOD_BITSET if (memtx_bitset_index_register_tuple(index, new_tuple) != 0) return -1; uint32_t value = memtx_bitset_index_tuple_to_value(index, new_tuple); #else /* #ifndef OLD_GOOD_BITSET */ uint32_t value = tuple_to_value(new_tuple); #endif /* #ifndef OLD_GOOD_BITSET */ if (bitset_index_insert(&index->index, key, key_len, value) < 0) { #ifndef OLD_GOOD_BITSET memtx_bitset_index_unregister_tuple(index, new_tuple); #endif /* #ifndef OLD_GOOD_BITSET */ diag_set(OutOfMemory, 0, "memtx_bitset_index", "insert"); return -1; } } return 0; } static struct iterator * memtx_bitset_index_create_iterator(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { struct memtx_bitset_index *index = (struct memtx_bitset_index *)base; struct memtx_engine *memtx = (struct memtx_engine *)base->engine; assert(part_count == 0 || key != NULL); (void) part_count; struct bitset_index_iterator *it; it = mempool_alloc(&memtx->bitset_iterator_pool); if (!it) { diag_set(OutOfMemory, sizeof(*it), "memtx_bitset_index", "iterator"); return NULL; } iterator_create(&it->base, base); it->pool = &memtx->bitset_iterator_pool; it->base.next = bitset_index_iterator_next; it->base.free = bitset_index_iterator_free; bitset_iterator_create(&it->bitset_it, realloc); #ifndef OLD_GOOD_BITSET it->bitset_index = index; #endif const void *bitset_key = NULL; uint32_t bitset_key_size = 0; if (type != ITER_ALL) { assert(part_count == 1); bitset_key = make_key(key, &bitset_key_size); } struct bitset_expr expr; bitset_expr_create(&expr, realloc); int rc = 0; switch (type) { case ITER_ALL: rc = bitset_index_expr_all(&expr); break; case ITER_EQ: rc = bitset_index_expr_equals(&expr, bitset_key, bitset_key_size); break; case ITER_BITS_ALL_SET: rc = bitset_index_expr_all_set(&expr, bitset_key, bitset_key_size); break; case ITER_BITS_ALL_NOT_SET: rc = bitset_index_expr_all_not_set(&expr, bitset_key, bitset_key_size); break; case ITER_BITS_ANY_SET: rc = bitset_index_expr_any_set(&expr, bitset_key, bitset_key_size); break; default: diag_set(UnsupportedIndexFeature, base->def, "requested iterator type"); goto fail; } if (rc != 0) { diag_set(OutOfMemory, 0, "memtx_bitset_index", "iterator expression"); goto fail; } if (bitset_index_init_iterator(&index->index, &it->bitset_it, &expr) != 0) { diag_set(OutOfMemory, 0, "memtx_bitset_index", "iterator state"); goto fail; } bitset_expr_destroy(&expr); return (struct iterator *)it; fail: bitset_expr_destroy(&expr); mempool_free(&memtx->bitset_iterator_pool, it); return NULL; } static ssize_t memtx_bitset_index_count(struct index *base, enum iterator_type type, const char *key, uint32_t part_count) { struct memtx_bitset_index *index = (struct memtx_bitset_index *)base; if (type == ITER_ALL) return bitset_index_size(&index->index); assert(part_count == 1); /* checked by key_validate() */ uint32_t bitset_key_size = 0; const void *bitset_key = make_key(key, &bitset_key_size); struct bit_iterator bit_it; size_t bit; if (type == ITER_BITS_ANY_SET) { /** * Optimization: for an empty key return 0. */ bit_iterator_init(&bit_it, bitset_key, bitset_key_size, true); bit = bit_iterator_next(&bit_it); if (bit == SIZE_MAX) return 0; /** * Optimiation: for a single bit key use * bitset_index_count(). */ if (bit_iterator_next(&bit_it) == SIZE_MAX) return bitset_index_count(&index->index, bit); } else if (type == ITER_BITS_ALL_SET) { /** * Optimization: for an empty key return 0. */ bit_iterator_init(&bit_it, bitset_key, bitset_key_size, true); bit = bit_iterator_next(&bit_it); if (bit == SIZE_MAX) return 0; /** * Optimiation: for a single bit key use * bitset_index_count(). */ if (bit_iterator_next(&bit_it) == SIZE_MAX) return bitset_index_count(&index->index, bit); } else if (type == ITER_BITS_ALL_NOT_SET) { /** * Optimization: for an empty key return the number of items * in the index. */ bit_iterator_init(&bit_it, bitset_key, bitset_key_size, true); bit = bit_iterator_next(&bit_it); if (bit == SIZE_MAX) return bitset_index_size(&index->index); /** * Optimiation: for the single bit key use * bitset_index_count(). */ if (bit_iterator_next(&bit_it) == SIZE_MAX) return bitset_index_size(&index->index) - bitset_index_count(&index->index, bit); } /* Call generic method */ return generic_index_count(base, type, key, part_count); } static const struct index_vtab memtx_bitset_index_vtab = { /* .destroy = */ memtx_bitset_index_destroy, /* .commit_create = */ generic_index_commit_create, /* .commit_drop = */ generic_index_commit_drop, /* .update_def = */ generic_index_update_def, /* .size = */ memtx_bitset_index_size, /* .bsize = */ memtx_bitset_index_bsize, /* .min = */ generic_index_min, /* .max = */ generic_index_max, /* .random = */ generic_index_random, /* .count = */ memtx_bitset_index_count, /* .get = */ generic_index_get, /* .replace = */ memtx_bitset_index_replace, /* .create_iterator = */ memtx_bitset_index_create_iterator, /* .create_snapshot_iterator = */ generic_index_create_snapshot_iterator, /* .info = */ generic_index_info, /* .begin_build = */ generic_index_begin_build, /* .reserve = */ generic_index_reserve, /* .build_next = */ generic_index_build_next, /* .end_build = */ generic_index_end_build, }; struct memtx_bitset_index * memtx_bitset_index_new(struct memtx_engine *memtx, struct index_def *def) { assert(!def->opts.is_unique); memtx_index_arena_init(); if (!mempool_is_initialized(&memtx->bitset_iterator_pool)) { mempool_create(&memtx->bitset_iterator_pool, cord_slab_cache(), sizeof(struct bitset_index_iterator)); } struct memtx_bitset_index *index = (struct memtx_bitset_index *)calloc(1, sizeof(*index)); if (index == NULL) { diag_set(OutOfMemory, sizeof(*index), "malloc", "struct memtx_bitset_index"); return NULL; } if (index_create(&index->base, (struct engine *)memtx, &memtx_bitset_index_vtab, def) != 0) { free(index); return NULL; } #ifndef OLD_GOOD_BITSET index->spare_id = SPARE_ID_END; index->id_to_tuple = (struct matras *)malloc(sizeof(*index->id_to_tuple)); if (index->id_to_tuple == NULL) panic("failed to allocate memtx bitset index"); matras_create(index->id_to_tuple, MEMTX_EXTENT_SIZE, sizeof(struct tuple *), memtx_index_extent_alloc, memtx_index_extent_free, NULL); index->tuple_to_id = mh_bitset_index_new(); if (index->tuple_to_id == NULL) panic("failed to allocate memtx bitset index"); #endif /* #ifndef OLD_GOOD_BITSET */ bitset_index_create(&index->index, realloc); return index; } tarantool_1.9.1.26.g63eb81e3c/src/box/tuple_hash.cc0000664000000000000000000002514313306565107020243 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "tuple_hash.h" #include "third_party/PMurHash.h" #include "coll.h" /* Tuple and key hasher */ namespace { enum { HASH_SEED = 13U }; template static inline uint32_t field_hash(uint32_t *ph, uint32_t *pcarry, const char **field) { /* * (!) All fields, except TYPE_STRING hashed **including** MsgPack format * identifier (e.g. 0xcc). This was done **intentionally** * for performance reasons. Please follow MsgPack specification * and pack all your numbers to the most compact representation. * If you still want to add support for broken MsgPack, * please don't forget to patch tuple_compare_field(). */ const char *f = *field; uint32_t size; mp_next(field); size = *field - f; /* calculate the size of field */ assert(size < INT32_MAX); PMurHash32_Process(ph, pcarry, f, size); return size; } template <> inline uint32_t field_hash(uint32_t *ph, uint32_t *pcarry, const char **pfield) { /* * (!) MP_STR fields hashed **excluding** MsgPack format * indentifier. We have to do that to keep compatibility * with old third-party MsgPack (spec-old.md) implementations. * \sa https://github.com/tarantool/tarantool/issues/522 */ uint32_t size; const char *f = mp_decode_str(pfield, &size); assert(size < INT32_MAX); PMurHash32_Process(ph, pcarry, f, size); return size; } template struct KeyFieldHash {}; template struct KeyFieldHash { static void hash(uint32_t *ph, uint32_t *pcarry, const char **pfield, uint32_t *ptotal_size) { *ptotal_size += field_hash(ph, pcarry, pfield); KeyFieldHash:: hash(ph, pcarry, pfield, ptotal_size); } }; template struct KeyFieldHash { static void hash(uint32_t *ph, uint32_t *pcarry, const char **pfield, uint32_t *ptotal_size) { *ptotal_size += field_hash(ph, pcarry, pfield); } }; template struct KeyHash { static uint32_t hash(const char *key, const struct key_def *) { uint32_t h = HASH_SEED; uint32_t carry = 0; uint32_t total_size = 0; KeyFieldHash::hash(&h, &carry, &key, &total_size); return PMurHash32_Result(h, carry, total_size); } }; template <> struct KeyHash { static uint32_t hash(const char *key, const struct key_def *key_def) { uint64_t val = mp_decode_uint(&key); (void) key_def; if (likely(val <= UINT32_MAX)) return val; return ((uint32_t)((val)>>33^(val)^(val)<<11)); } }; template struct TupleFieldHash { }; template struct TupleFieldHash { static void hash(const char **pfield, uint32_t *ph, uint32_t *pcarry, uint32_t *ptotal_size) { *ptotal_size += field_hash(ph, pcarry, pfield); TupleFieldHash:: hash(pfield, ph, pcarry, ptotal_size); } }; template struct TupleFieldHash { static void hash(const char **pfield, uint32_t *ph, uint32_t *pcarry, uint32_t *ptotal_size) { *ptotal_size += field_hash(ph, pcarry, pfield); } }; template struct TupleHash { static uint32_t hash(const struct tuple *tuple, const struct key_def *key_def) { uint32_t h = HASH_SEED; uint32_t carry = 0; uint32_t total_size = 0; const char *field = tuple_field(tuple, key_def->parts->fieldno); TupleFieldHash:: hash(&field, &h, &carry, &total_size); return PMurHash32_Result(h, carry, total_size); } }; template <> struct TupleHash { static uint32_t hash(const struct tuple *tuple, const struct key_def *key_def) { const char *field = tuple_field(tuple, key_def->parts->fieldno); uint64_t val = mp_decode_uint(&field); if (likely(val <= UINT32_MAX)) return val; return ((uint32_t)((val)>>33^(val)^(val)<<11)); } }; }; /* namespace { */ #define HASHER(...) \ { KeyHash<__VA_ARGS__>::hash, TupleHash<__VA_ARGS__>::hash, \ { __VA_ARGS__, UINT32_MAX } }, struct hasher_signature { key_hash_t kf; tuple_hash_t tf; uint32_t p[64]; }; /** * field1 type, field2 type, ... */ static const hasher_signature hash_arr[] = { HASHER(FIELD_TYPE_UNSIGNED) HASHER(FIELD_TYPE_STRING) HASHER(FIELD_TYPE_UNSIGNED, FIELD_TYPE_UNSIGNED) HASHER(FIELD_TYPE_STRING , FIELD_TYPE_UNSIGNED) HASHER(FIELD_TYPE_UNSIGNED, FIELD_TYPE_STRING) HASHER(FIELD_TYPE_STRING , FIELD_TYPE_STRING) HASHER(FIELD_TYPE_UNSIGNED, FIELD_TYPE_UNSIGNED, FIELD_TYPE_UNSIGNED) HASHER(FIELD_TYPE_STRING , FIELD_TYPE_UNSIGNED, FIELD_TYPE_UNSIGNED) HASHER(FIELD_TYPE_UNSIGNED, FIELD_TYPE_STRING , FIELD_TYPE_UNSIGNED) HASHER(FIELD_TYPE_STRING , FIELD_TYPE_STRING , FIELD_TYPE_UNSIGNED) HASHER(FIELD_TYPE_UNSIGNED, FIELD_TYPE_UNSIGNED, FIELD_TYPE_STRING) HASHER(FIELD_TYPE_STRING , FIELD_TYPE_UNSIGNED, FIELD_TYPE_STRING) HASHER(FIELD_TYPE_UNSIGNED, FIELD_TYPE_STRING , FIELD_TYPE_STRING) HASHER(FIELD_TYPE_STRING , FIELD_TYPE_STRING , FIELD_TYPE_STRING) }; #undef HASHER template uint32_t tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def); uint32_t key_hash_slowpath(const char *key, const struct key_def *key_def); void tuple_hash_func_set(struct key_def *key_def) { if (key_def->is_nullable) goto slowpath; /* * Check that key_def defines sequential a key without holes * starting from **arbitrary** field. */ for (uint32_t i = 1; i < key_def->part_count; i++) { if (key_def->parts[i - 1].fieldno + 1 != key_def->parts[i].fieldno) goto slowpath; } if (key_def_has_collation(key_def)) { /* Precalculated comparators don't use collation */ goto slowpath; } /* * Try to find pre-generated tuple_hash() and key_hash() * implementations */ for (uint32_t k = 0; k < sizeof(hash_arr) / sizeof(hash_arr[0]); k++) { uint32_t i = 0; for (; i < key_def->part_count; i++) { if (key_def->parts[i].type != hash_arr[k].p[i]) { break; } } if (i == key_def->part_count && hash_arr[k].p[i] == UINT32_MAX){ key_def->tuple_hash = hash_arr[k].tf; key_def->key_hash = hash_arr[k].kf; return; } } slowpath: if (key_def->has_optional_parts) key_def->tuple_hash = tuple_hash_slowpath; else key_def->tuple_hash = tuple_hash_slowpath; key_def->key_hash = key_hash_slowpath; } static uint32_t tuple_hash_field(uint32_t *ph1, uint32_t *pcarry, const char **field, struct coll *coll) { const char *f = *field; uint32_t size; switch (mp_typeof(**field)) { case MP_STR: /* * (!) MP_STR fields hashed **excluding** MsgPack format * indentifier. We have to do that to keep compatibility * with old third-party MsgPack (spec-old.md) implementations. * \sa https://github.com/tarantool/tarantool/issues/522 */ f = mp_decode_str(field, &size); if (coll != NULL) return coll->hash(f, size, ph1, pcarry, coll); break; default: mp_next(field); size = *field - f; /* calculate the size of field */ /* * (!) All other fields hashed **including** MsgPack format * identifier (e.g. 0xcc). This was done **intentionally** * for performance reasons. Please follow MsgPack specification * and pack all your numbers to the most compact representation. * If you still want to add support for broken MsgPack, * please don't forget to patch tuple_compare_field(). */ break; } assert(size < INT32_MAX); PMurHash32_Process(ph1, pcarry, f, size); return size; } static inline uint32_t tuple_hash_null(uint32_t *ph1, uint32_t *pcarry) { assert(mp_sizeof_nil() == 1); const char null = 0xc0; PMurHash32_Process(ph1, pcarry, &null, 1); return mp_sizeof_nil(); } template uint32_t tuple_hash_slowpath(const struct tuple *tuple, const struct key_def *key_def) { assert(has_optional_parts == key_def->has_optional_parts); uint32_t h = HASH_SEED; uint32_t carry = 0; uint32_t total_size = 0; uint32_t prev_fieldno = key_def->parts[0].fieldno; const char *field = tuple_field(tuple, key_def->parts[0].fieldno); const char *end = (char *)tuple + tuple_size(tuple); if (has_optional_parts && field == NULL) { total_size += tuple_hash_null(&h, &carry); } else { total_size += tuple_hash_field(&h, &carry, &field, key_def->parts[0].coll); } for (uint32_t part_id = 1; part_id < key_def->part_count; part_id++) { /* If parts of key_def are not sequential we need to call * tuple_field. Otherwise, tuple is hashed sequentially without * need of tuple_field */ if (prev_fieldno + 1 != key_def->parts[part_id].fieldno) { field = tuple_field(tuple, key_def->parts[part_id].fieldno); } if (has_optional_parts && (field == NULL || field >= end)) { total_size += tuple_hash_null(&h, &carry); } else { total_size += tuple_hash_field(&h, &carry, &field, key_def->parts[part_id].coll); } prev_fieldno = key_def->parts[part_id].fieldno; } return PMurHash32_Result(h, carry, total_size); } uint32_t key_hash_slowpath(const char *key, const struct key_def *key_def) { uint32_t h = HASH_SEED; uint32_t carry = 0; uint32_t total_size = 0; for (const struct key_part *part = key_def->parts; part < key_def->parts + key_def->part_count; part++) { total_size += tuple_hash_field(&h, &carry, &key, part->coll); } return PMurHash32_Result(h, carry, total_size); } tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_tuple.cc0000664000000000000000000001326713306565107020456 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "memtx_tuple.h" #include "small/small.h" #include "small/region.h" #include "small/quota.h" #include "fiber.h" #include "box.h" struct memtx_tuple { /* * sic: the header of the tuple is used * to store a free list pointer in smfree_delayed. * Please don't change it without understanding * how smfree_delayed and snapshotting COW works. */ /** Snapshot generation version. */ uint32_t version; struct tuple base; }; /** Memtx slab arena */ extern struct slab_arena memtx_arena; /* defined in memtx_engine.cc */ /* Memtx slab_cache for tuples */ static struct slab_cache memtx_slab_cache; /** Common quota for memtx tuples and indexes */ static struct quota memtx_quota; /** Memtx tuple allocator */ struct small_alloc memtx_alloc; /* used box box.slab.info() */ /* The maximal allowed tuple size, box.cfg.memtx_max_tuple_size */ size_t memtx_max_tuple_size = 1 * 1024 * 1024; /* set dynamically */ uint32_t snapshot_version; enum { /** Lowest allowed slab_alloc_minimal */ OBJSIZE_MIN = 16, SLAB_SIZE = 16 * 1024 * 1024, }; void memtx_tuple_init(uint64_t tuple_arena_max_size, uint32_t objsize_min, float alloc_factor) { /* Apply lowest allowed objsize bounds */ if (objsize_min < OBJSIZE_MIN) objsize_min = OBJSIZE_MIN; /** Preallocate entire quota. */ quota_init(&memtx_quota, tuple_arena_max_size); tuple_arena_create(&memtx_arena, &memtx_quota, tuple_arena_max_size, SLAB_SIZE, "memtx"); slab_cache_create(&memtx_slab_cache, &memtx_arena); small_alloc_create(&memtx_alloc, &memtx_slab_cache, objsize_min, alloc_factor); } void memtx_tuple_free(void) { } struct tuple_format_vtab memtx_tuple_format_vtab = { memtx_tuple_delete, }; struct tuple * memtx_tuple_new(struct tuple_format *format, const char *data, const char *end) { assert(mp_typeof(*data) == MP_ARRAY); size_t tuple_len = end - data; size_t meta_size = tuple_format_meta_size(format); size_t total = sizeof(struct memtx_tuple) + meta_size + tuple_len; ERROR_INJECT(ERRINJ_TUPLE_ALLOC, do { diag_set(OutOfMemory, (unsigned) total, "slab allocator", "memtx_tuple"); return NULL; } while(false); ); if (unlikely(total > memtx_max_tuple_size)) { diag_set(ClientError, ER_MEMTX_MAX_TUPLE_SIZE, (unsigned) total); error_log(diag_last_error(diag_get())); return NULL; } struct memtx_tuple *memtx_tuple = (struct memtx_tuple *) smalloc(&memtx_alloc, total); /** * Use a nothrow version and throw an exception here, * to throw an instance of ClientError. Apart from being * more nice to the user, ClientErrors are ignored in * force_recovery=true mode, allowing us to start * with lower arena than necessary in the circumstances * of disaster recovery. */ if (memtx_tuple == NULL) { diag_set(OutOfMemory, (unsigned) total, "slab allocator", "memtx_tuple"); return NULL; } struct tuple *tuple = &memtx_tuple->base; tuple->refs = 0; memtx_tuple->version = snapshot_version; assert(tuple_len <= UINT32_MAX); /* bsize is UINT32_MAX */ tuple->bsize = tuple_len; tuple->format_id = tuple_format_id(format); tuple_format_ref(format); /* * Data offset is calculated from the begin of the struct * tuple base, not from memtx_tuple, because the struct * tuple is not the first field of the memtx_tuple. */ tuple->data_offset = sizeof(struct tuple) + meta_size; char *raw = (char *) tuple + tuple->data_offset; uint32_t *field_map = (uint32_t *) raw; memcpy(raw, data, tuple_len); if (tuple_init_field_map(format, field_map, raw)) { memtx_tuple_delete(format, tuple); return NULL; } say_debug("%s(%zu) = %p", __func__, tuple_len, memtx_tuple); return tuple; } void memtx_tuple_delete(struct tuple_format *format, struct tuple *tuple) { say_debug("%s(%p)", __func__, tuple); assert(tuple->refs == 0); size_t total = sizeof(struct memtx_tuple) + tuple_format_meta_size(format) + tuple->bsize; tuple_format_unref(format); struct memtx_tuple *memtx_tuple = container_of(tuple, struct memtx_tuple, base); if (memtx_alloc.free_mode != SMALL_DELAYED_FREE || memtx_tuple->version == snapshot_version) smfree(&memtx_alloc, memtx_tuple, total); else smfree_delayed(&memtx_alloc, memtx_tuple, total); } void memtx_tuple_begin_snapshot() { snapshot_version++; small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, true); } void memtx_tuple_end_snapshot() { small_alloc_setopt(&memtx_alloc, SMALL_DELAYED_FREE_MODE, false); } tarantool_1.9.1.26.g63eb81e3c/src/box/xstream.cc0000664000000000000000000000305413306560010017553 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "xstream.h" #include "exception.h" int xstream_write(struct xstream *stream, struct xrow_header *row) { try { stream->write(stream, row); } catch (Exception *e) { return -1; } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/box/recovery.cc0000664000000000000000000003336013306565107017745 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "recovery.h" #include "small/rlist.h" #include "scoped_guard.h" #include "trigger.h" #include "fiber.h" #include "xlog.h" #include "xrow.h" #include "xstream.h" #include "wal.h" /* wal_watcher */ #include "replication.h" #include "session.h" #include "coio_file.h" #include "error.h" /* * Recovery subsystem * ------------------ * * A facade of the recovery subsystem is struct recovery. * * Depending on the actual task being performed the recovery * can be in a different state. * * Let's enumerate all possible distinct states of recovery: * * IR - initial recovery, initiated right after server start: * reading data from a checkpoint and existing WALs * and restoring the in-memory state * IRR - initial replication relay mode, reading data from * existing WALs (xlogs) and sending it to the client. * * HS - standby mode, entered once all existing WALs are read: * following the WAL directory for all changes done by the master * and updating the in-memory state * RR - replication relay, following the WAL directory for all * changes done by the master and sending them to the * replica * * The following state transitions are possible/supported: * * recovery_init() -> IR | IRR # recover() * IR -> HS # recovery_follow_local() * IRR -> RR # recovery_follow_local() */ const struct type_info type_XlogGapError = make_type("XlogGapError", &type_XlogError); struct XlogGapError: public XlogError { /** Used by BuildXlogGapError() */ XlogGapError(const char *file, unsigned line, const struct vclock *from, const struct vclock *to) :XlogError(&type_XlogGapError, file, line) { char *s_from = vclock_to_string(from); char *s_to = vclock_to_string(to); snprintf(errmsg, sizeof(errmsg), "Missing .xlog file between LSN %lld %s and %lld %s", (long long) vclock_sum(from), s_from ? s_from : "", (long long) vclock_sum(to), s_to ? s_to : ""); free(s_from); free(s_to); } virtual void raise() { throw this; } }; /* {{{ Initial recovery */ /** * Throws an exception in case of error. */ struct recovery * recovery_new(const char *wal_dirname, bool force_recovery, struct vclock *vclock) { struct recovery *r = (struct recovery *) calloc(1, sizeof(*r)); if (r == NULL) { tnt_raise(OutOfMemory, sizeof(*r), "malloc", "struct recovery"); } auto guard = make_scoped_guard([=]{ free(r); }); xdir_create(&r->wal_dir, wal_dirname, XLOG, &INSTANCE_UUID); r->wal_dir.force_recovery = force_recovery; vclock_copy(&r->vclock, vclock); /** * Avoid scanning WAL dir before we recovered * the snapshot and know instance UUID - this will * make sure the scan skips files with wrong * UUID, see replication/cluster.test for * details. */ xdir_check_xc(&r->wal_dir); r->watcher = NULL; rlist_create(&r->on_close_log); guard.is_active = false; return r; } static inline void recovery_close_log(struct recovery *r) { if (!xlog_cursor_is_open(&r->cursor)) return; if (xlog_cursor_is_eof(&r->cursor)) { say_info("done `%s'", r->cursor.name); } else { say_warn("file `%s` wasn't correctly closed", r->cursor.name); } xlog_cursor_close(&r->cursor, false); trigger_run_xc(&r->on_close_log, NULL); } void recovery_delete(struct recovery *r) { recovery_stop_local(r); trigger_destroy(&r->on_close_log); xdir_destroy(&r->wal_dir); if (xlog_cursor_is_open(&r->cursor)) { /* * Possible if shutting down a replication * relay or if error during startup. */ xlog_cursor_close(&r->cursor, false); } free(r); } void recovery_exit(struct recovery *r) { /* Avoid fibers, there is no event loop */ r->watcher = NULL; recovery_delete(r); } /** * Read all rows in a file starting from the last position. * Advance the position. If end of file is reached, * set l.eof_read. * The reading will be stopped on reaching stop_vclock. * Use NULL for boundless recover */ static void recover_xlog(struct recovery *r, struct xstream *stream, struct vclock *stop_vclock) { struct xrow_header row; uint64_t row_count = 0; while (xlog_cursor_next_xc(&r->cursor, &row, r->wal_dir.force_recovery) == 0) { /* * Read the next row from xlog file. * * xlog_cursor_next_xc() returns 1 when * it can not read more rows. This doesn't mean * the file is fully read: it's fully read only * when EOF marker has been read, see i.eof_read */ if (stop_vclock != NULL && r->vclock.signature >= stop_vclock->signature) return; int64_t current_lsn = vclock_get(&r->vclock, row.replica_id); if (row.lsn <= current_lsn) continue; /* already applied, skip */ /* * All rows in xlog files have an assigned * replica id. */ assert(row.replica_id != 0); /* * We can promote the vclock either before or * after xstream_write(): it only makes any impact * in case of forced recovery, when we skip the * failed row anyway. */ vclock_follow(&r->vclock, row.replica_id, row.lsn); if (xstream_write(stream, &row) == 0) { ++row_count; if (row_count % 100000 == 0) say_info("%.1fM rows processed", row_count / 1000000.); } else { say_error("can't apply row: "); diag_log(); if (!r->wal_dir.force_recovery) diag_raise(); } } } /** * Find out if there are new .xlog files since the current * LSN, and read them all up. * * Reading will be stopped on reaching recovery * vclock signature > to_checkpoint (after playing to_checkpoint record) * use NULL for boundless recover * * This function will not close r->current_wal if * recovery was successful. */ void recover_remaining_wals(struct recovery *r, struct xstream *stream, struct vclock *stop_vclock, bool scan_dir) { struct vclock *clock; if (scan_dir) xdir_scan_xc(&r->wal_dir); if (xlog_cursor_is_open(&r->cursor)) { /* If there's a WAL open, recover from it first. */ assert(!xlog_cursor_is_eof(&r->cursor)); clock = vclockset_search(&r->wal_dir.index, &r->cursor.meta.vclock); if (clock != NULL) goto recover_current_wal; /* * The current WAL has disappeared under our feet - * assume anything can happen in production and go on. */ say_error("file `%s' was deleted under our feet", r->cursor.name); } for (clock = vclockset_match(&r->wal_dir.index, &r->vclock); clock != NULL; clock = vclockset_next(&r->wal_dir.index, clock)) { if (stop_vclock != NULL && clock->signature >= stop_vclock->signature) { break; } if (xlog_cursor_is_eof(&r->cursor) && vclock_sum(&r->cursor.meta.vclock) >= vclock_sum(clock)) { /* * If we reached EOF while reading last xlog, * we don't need to rescan it. */ continue; } if (vclock_compare(clock, &r->vclock) > 0) { /** * The best clock we could find is * greater or is incomparable with the * current state of recovery. */ XlogGapError *e = tnt_error(XlogGapError, &r->vclock, clock); if (!r->wal_dir.force_recovery) throw e; e->log(); /* Ignore missing WALs */ say_warn("ignoring a gap in LSN"); } recovery_close_log(r); xdir_open_cursor_xc(&r->wal_dir, vclock_sum(clock), &r->cursor); say_info("recover from `%s'", r->cursor.name); recover_current_wal: recover_xlog(r, stream, stop_vclock); } if (xlog_cursor_is_eof(&r->cursor)) recovery_close_log(r); if (stop_vclock != NULL && vclock_compare(&r->vclock, stop_vclock) != 0) tnt_raise(XlogGapError, &r->vclock, stop_vclock); region_free(&fiber()->gc); } void recovery_finalize(struct recovery *r, struct xstream *stream) { recovery_stop_local(r); recover_remaining_wals(r, stream, NULL, true); recovery_close_log(r); /* * Check if next xlog exists. If it's true this xlog is * corrupted and we should rename it (to avoid getting * problem on the next xlog write with the same name). * Possible reasons are: * - last xlog has corrupted rows * - last xlog has corrupted header * - last xlog has zero size */ char *name = xdir_format_filename(&r->wal_dir, vclock_sum(&r->vclock), NONE); if (access(name, F_OK) == 0) { say_info("rename corrupted xlog %s", name); char to[PATH_MAX]; snprintf(to, sizeof(to), "%s.corrupted", name); if (rename(name, to) != 0) { tnt_raise(SystemError, "%s: can't rename corrupted xlog", name); } } } /* }}} */ /* {{{ Local recovery: support of hot standby and replication relay */ /** * Implements a subscription to WAL updates via fs events. * Any change to the WAL dir itself or a change in the XLOG * file triggers a wakeup. The WAL dir path is set in the * constructor. XLOG file path is set with set_log_path(). */ class WalSubscription { public: struct fiber *f; unsigned events; struct ev_stat dir_stat; struct ev_stat file_stat; char dir_path[PATH_MAX]; char file_path[PATH_MAX]; static void dir_stat_cb(struct ev_loop *, struct ev_stat *stat, int) { ((WalSubscription *)stat->data)->wakeup(WAL_EVENT_ROTATE); } static void file_stat_cb(struct ev_loop *, struct ev_stat *stat, int) { ((WalSubscription *)stat->data)->wakeup(WAL_EVENT_WRITE); } void wakeup(unsigned events) { this->events |= events; if (f->flags & FIBER_IS_CANCELLABLE) fiber_wakeup(f); } WalSubscription(const char *wal_dir) { f = fiber(); events = 0; if ((size_t)snprintf(dir_path, sizeof(dir_path), "%s", wal_dir) >= sizeof(dir_path)) { panic("path too long: %s", wal_dir); } ev_stat_init(&dir_stat, dir_stat_cb, "", 0.0); ev_stat_init(&file_stat, file_stat_cb, "", 0.0); dir_stat.data = this; file_stat.data = this; ev_stat_set(&dir_stat, dir_path, 0.0); ev_stat_start(loop(), &dir_stat); } ~WalSubscription() { ev_stat_stop(loop(), &file_stat); ev_stat_stop(loop(), &dir_stat); } void set_log_path(const char *path) { /* * Avoid toggling ev_stat if the path didn't change. * Note: .file_path valid iff file_stat is active. */ if (path && ev_is_active(&file_stat) && strcmp(file_path, path) == 0) { return; } ev_stat_stop(loop(), &file_stat); if (path == NULL) return; if ((size_t)snprintf(file_path, sizeof(file_path), "%s", path) >= sizeof(file_path)) { panic("path too long: %s", path); } ev_stat_set(&file_stat, file_path, 0.0); ev_stat_start(loop(), &file_stat); } }; static int hot_standby_f(va_list ap) { struct recovery *r = va_arg(ap, struct recovery *); struct xstream *stream = va_arg(ap, struct xstream *); bool scan_dir = true; ev_tstamp wal_dir_rescan_delay = va_arg(ap, ev_tstamp); fiber_set_user(fiber(), &admin_credentials); WalSubscription subscription(r->wal_dir.dirname); while (! fiber_is_cancelled()) { /* * Recover until there is no new stuff which appeared in * the log dir while recovery was running. * * Use vclock signature to represent the current wal * since the xlog object itself may be freed in * recover_remaining_rows(). */ int64_t start, end; do { start = vclock_sum(&r->vclock); recover_remaining_wals(r, stream, NULL, scan_dir); end = vclock_sum(&r->vclock); /* * Continue, given there's been progress *and* there is a * chance new WALs have appeared since. * Sic: end * is < start (is 0) if someone deleted all logs * on the filesystem. */ } while (end > start && !xlog_cursor_is_open(&r->cursor)); subscription.set_log_path(xlog_cursor_is_open(&r->cursor) ? r->cursor.name : NULL); bool timed_out = false; if (subscription.events == 0) { /** * Allow an immediate wakeup/break loop * from recovery_stop_local(). */ fiber_set_cancellable(true); timed_out = fiber_yield_timeout(wal_dir_rescan_delay); fiber_set_cancellable(false); } scan_dir = timed_out || (subscription.events & WAL_EVENT_ROTATE) != 0; subscription.events = 0; } return 0; } void recovery_follow_local(struct recovery *r, struct xstream *stream, const char *name, ev_tstamp wal_dir_rescan_delay) { /* * Scan wal_dir and recover all existing at the moment xlogs. * Blocks until finished. */ recover_remaining_wals(r, stream, NULL, true); /* * Start 'hot_standby' background fiber to follow xlog changes. * It will pick up from the position of the currently open * xlog. */ assert(r->watcher == NULL); r->watcher = fiber_new_xc(name, hot_standby_f); fiber_set_joinable(r->watcher, true); fiber_start(r->watcher, r, stream, wal_dir_rescan_delay); } void recovery_stop_local(struct recovery *r) { if (r->watcher) { struct fiber *f = r->watcher; r->watcher = NULL; fiber_cancel(f); if (fiber_join(f) != 0) diag_raise(); } } /* }}} */ tarantool_1.9.1.26.g63eb81e3c/src/box/gc.h0000664000000000000000000000716313306560010016330 0ustar rootroot#ifndef TARANTOOL_BOX_GC_H_INCLUDED #define TARANTOOL_BOX_GC_H_INCLUDED /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct gc_consumer; /** * Initialize the garbage collection state. */ void gc_init(void); /** * Destroy the garbage collection state. */ void gc_free(void); /** * Invoke garbage collection in order to remove files left * from old checkpoints. The number of checkpoints saved by * this function is specified by box.cfg.checkpoint_count. */ void gc_run(void); /** * Update the checkpoint_count configuration option and * rerun garbage collection. */ void gc_set_checkpoint_count(int checkpoint_count); /** * Register a consumer. * * This will stop garbage collection of objects newer than * @signature until the consumer is unregistered or advanced. * @name is a human-readable name of the consumer, it will * be used for reporting the consumer to the user. * * Returns a pointer to the new consumer object or NULL on * memory allocation failure. */ struct gc_consumer * gc_consumer_register(const char *name, int64_t signature); /** * Unregister a consumer and invoke garbage collection * if needed. */ void gc_consumer_unregister(struct gc_consumer *consumer); /** * Advance the vclock signature tracked by a consumer and * invoke garbage collection if needed. */ void gc_consumer_advance(struct gc_consumer *consumer, int64_t signature); /** Return the name of a consumer. */ const char * gc_consumer_name(const struct gc_consumer *consumer); /** Return the signature a consumer tracks. */ int64_t gc_consumer_signature(const struct gc_consumer *consumer); /** * Iterator over registered consumers. The iterator is valid * as long as the caller doesn't yield. */ struct gc_consumer_iterator { struct gc_consumer *curr; }; /** Init an iterator over consumers. */ static inline void gc_consumer_iterator_init(struct gc_consumer_iterator *it) { it->curr = NULL; } /** * Iterate to the next registered consumer. Return a pointer * to the next consumer object or NULL if there is no more * consumers. */ struct gc_consumer * gc_consumer_iterator_next(struct gc_consumer_iterator *it); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_GC_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/index_def.c0000664000000000000000000001705113306565107017670 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "index_def.h" #include "schema_def.h" #include "identifier.h" const char *index_type_strs[] = { "HASH", "TREE", "BITSET", "RTREE" }; const char *rtree_index_distance_type_strs[] = { "EUCLID", "MANHATTAN" }; const struct index_opts index_opts_default = { /* .unique = */ true, /* .dimension = */ 2, /* .distance = */ RTREE_INDEX_DISTANCE_TYPE_EUCLID, /* .range_size = */ 1073741824, /* .page_size = */ 8192, /* .run_count_per_level = */ 2, /* .run_size_ratio = */ 3.5, /* .bloom_fpr = */ 0.05, /* .lsn = */ 0, }; const struct opt_def index_opts_reg[] = { OPT_DEF("unique", OPT_BOOL, struct index_opts, is_unique), OPT_DEF("dimension", OPT_INT64, struct index_opts, dimension), OPT_DEF_ENUM("distance", rtree_index_distance_type, struct index_opts, distance, NULL), OPT_DEF("range_size", OPT_INT64, struct index_opts, range_size), OPT_DEF("page_size", OPT_INT64, struct index_opts, page_size), OPT_DEF("run_count_per_level", OPT_INT64, struct index_opts, run_count_per_level), OPT_DEF("run_size_ratio", OPT_FLOAT, struct index_opts, run_size_ratio), OPT_DEF("bloom_fpr", OPT_FLOAT, struct index_opts, bloom_fpr), OPT_DEF("lsn", OPT_INT64, struct index_opts, lsn), OPT_END, }; struct index_def * index_def_new(uint32_t space_id, uint32_t iid, const char *name, uint32_t name_len, enum index_type type, const struct index_opts *opts, struct key_def *key_def, struct key_def *pk_def) { assert(name_len <= BOX_NAME_MAX); /* Use calloc to make index_def_delete() safe at all times. */ struct index_def *def = (struct index_def *) calloc(1, sizeof(*def)); if (def == NULL) { diag_set(OutOfMemory, sizeof(*def), "malloc", "struct index_def"); return NULL; } def->name = strndup(name, name_len); if (def->name == NULL) { index_def_delete(def); diag_set(OutOfMemory, name_len + 1, "malloc", "index_def name"); return NULL; } if (identifier_check(def->name, name_len)) { index_def_delete(def); return NULL; } def->key_def = key_def_dup(key_def); if (pk_def != NULL) { def->cmp_def = key_def_merge(key_def, pk_def); if (! opts->is_unique) { def->cmp_def->unique_part_count = def->cmp_def->part_count; } else { def->cmp_def->unique_part_count = def->key_def->part_count; } } else { def->cmp_def = key_def_dup(key_def); } if (def->key_def == NULL || def->cmp_def == NULL) { index_def_delete(def); return NULL; } def->type = type; def->space_id = space_id; def->iid = iid; def->opts = *opts; return def; } struct index_def * index_def_dup(const struct index_def *def) { struct index_def *dup = (struct index_def *) malloc(sizeof(*dup)); if (dup == NULL) { diag_set(OutOfMemory, sizeof(*dup), "malloc", "struct index_def"); return NULL; } *dup = *def; dup->name = strdup(def->name); if (dup->name == NULL) { free(dup); diag_set(OutOfMemory, strlen(def->name) + 1, "malloc", "index_def name"); return NULL; } dup->key_def = key_def_dup(def->key_def); dup->cmp_def = key_def_dup(def->cmp_def); if (dup->key_def == NULL || dup->cmp_def == NULL) { index_def_delete(dup); return NULL; } rlist_create(&dup->link); return dup; } /** Free a key definition. */ void index_def_delete(struct index_def *index_def) { free(index_def->name); if (index_def->key_def) { TRASH(index_def->key_def); free(index_def->key_def); } if (index_def->cmp_def) { TRASH(index_def->cmp_def); free(index_def->cmp_def); } TRASH(index_def); free(index_def); } bool index_def_change_requires_rebuild(const struct index_def *old_index_def, const struct index_def *new_index_def) { if (old_index_def->iid != new_index_def->iid || old_index_def->type != new_index_def->type || (!old_index_def->opts.is_unique && new_index_def->opts.is_unique) || !key_part_check_compatibility(old_index_def->key_def->parts, old_index_def->key_def->part_count, new_index_def->key_def->parts, new_index_def->key_def->part_count)) { return true; } if (old_index_def->type == RTREE) { if (old_index_def->opts.dimension != new_index_def->opts.dimension || old_index_def->opts.distance != new_index_def->opts.distance) return true; } return false; } int index_def_cmp(const struct index_def *key1, const struct index_def *key2) { assert(key1->space_id == key2->space_id); if (key1->iid != key2->iid) return key1->iid < key2->iid ? -1 : 1; if (strcmp(key1->name, key2->name)) return strcmp(key1->name, key2->name); if (key1->type != key2->type) return (int) key1->type < (int) key2->type ? -1 : 1; if (index_opts_cmp(&key1->opts, &key2->opts)) return index_opts_cmp(&key1->opts, &key2->opts); return key_part_cmp(key1->key_def->parts, key1->key_def->part_count, key2->key_def->parts, key2->key_def->part_count); } bool index_def_is_valid(struct index_def *index_def, const char *space_name) { if (index_def->iid >= BOX_INDEX_MAX) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name, "index id too big"); return false; } if (index_def->iid == 0 && index_def->opts.is_unique == false) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name, "primary key must be unique"); return false; } if (index_def->key_def->part_count == 0) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name, "part count must be positive"); return false; } if (index_def->key_def->part_count > BOX_INDEX_PART_MAX) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name, "too many key parts"); return false; } for (uint32_t i = 0; i < index_def->key_def->part_count; i++) { assert(index_def->key_def->parts[i].type < field_type_MAX); if (index_def->key_def->parts[i].fieldno > BOX_INDEX_FIELD_MAX) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name, "field no is too big"); return false; } for (uint32_t j = 0; j < i; j++) { /* * Courtesy to a user who could have made * a typo. */ if (index_def->key_def->parts[i].fieldno == index_def->key_def->parts[j].fieldno) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name, "same key part is indexed twice"); return false; } } } return true; } tarantool_1.9.1.26.g63eb81e3c/src/box/sequence.c0000664000000000000000000002216113306560010017535 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "sequence.h" #include #include #include #include #include #include #include "diag.h" #include "error.h" #include "errcode.h" #include "fiber.h" #include "index.h" #include "schema.h" #include "session.h" #include "trivia/util.h" #include "third_party/PMurHash.h" enum { SEQUENCE_HASH_SEED = 13U, SEQUENCE_DATA_EXTENT_SIZE = 512, }; /** Sequence state. */ struct sequence_data { /** Sequence id. */ uint32_t id; /** Sequence value. */ int64_t value; }; static inline bool sequence_data_equal(struct sequence_data data1, struct sequence_data data2) { return data1.id == data2.id; } static inline bool sequence_data_equal_key(struct sequence_data data, uint32_t id) { return data.id == id; } #define LIGHT_NAME _sequence #define LIGHT_DATA_TYPE struct sequence_data #define LIGHT_KEY_TYPE uint32_t #define LIGHT_CMP_ARG_TYPE int #define LIGHT_EQUAL(a, b, c) sequence_data_equal(a, b) #define LIGHT_EQUAL_KEY(a, b, c) sequence_data_equal_key(a, b) #include "salad/light.h" static struct light_sequence_core sequence_data_index; static struct mempool sequence_data_extent_pool; static void * sequence_data_extent_alloc(void *ctx) { (void)ctx; void *ret = mempool_alloc(&sequence_data_extent_pool); if (ret == NULL) diag_set(OutOfMemory, SEQUENCE_DATA_EXTENT_SIZE, "mempool", "sequence_data_extent"); return ret; } static void sequence_data_extent_free(void *ctx, void *extent) { (void)ctx; mempool_free(&sequence_data_extent_pool, extent); } static inline uint32_t sequence_hash(uint32_t id) { return PMurHash32(SEQUENCE_HASH_SEED, &id, sizeof(id)); } void sequence_init(void) { mempool_create(&sequence_data_extent_pool, &cord()->slabc, SEQUENCE_DATA_EXTENT_SIZE); light_sequence_create(&sequence_data_index, SEQUENCE_DATA_EXTENT_SIZE, sequence_data_extent_alloc, sequence_data_extent_free, NULL, 0); } void sequence_free(void) { light_sequence_destroy(&sequence_data_index); mempool_destroy(&sequence_data_extent_pool); } void sequence_reset(struct sequence *seq) { uint32_t key = seq->def->id; uint32_t hash = sequence_hash(key); uint32_t pos = light_sequence_find_key(&sequence_data_index, hash, key); if (pos != light_sequence_end) light_sequence_delete(&sequence_data_index, pos); } int sequence_set(struct sequence *seq, int64_t value) { uint32_t key = seq->def->id; uint32_t hash = sequence_hash(key); struct sequence_data new_data, old_data; new_data.id = key; new_data.value = value; if (light_sequence_replace(&sequence_data_index, hash, new_data, &old_data) != light_sequence_end) return 0; if (light_sequence_insert(&sequence_data_index, hash, new_data) != light_sequence_end) return 0; return -1; } int sequence_update(struct sequence *seq, int64_t value) { uint32_t key = seq->def->id; uint32_t hash = sequence_hash(key); uint32_t pos = light_sequence_find_key(&sequence_data_index, hash, key); struct sequence_data new_data, data; new_data.id = key; new_data.value = value; if (pos != light_sequence_end) { data = light_sequence_get(&sequence_data_index, pos); if ((seq->def->step > 0 && value > data.value) || (seq->def->step < 0 && value < data.value)) { if (light_sequence_replace(&sequence_data_index, hash, new_data, &data) == light_sequence_end) unreachable(); } } else { if (light_sequence_insert(&sequence_data_index, hash, new_data) == light_sequence_end) return -1; } return 0; } int sequence_next(struct sequence *seq, int64_t *result) { int64_t value; struct sequence_def *def = seq->def; struct sequence_data new_data, old_data; uint32_t key = seq->def->id; uint32_t hash = sequence_hash(key); uint32_t pos = light_sequence_find_key(&sequence_data_index, hash, key); if (pos == light_sequence_end) { new_data.id = key; new_data.value = def->start; if (light_sequence_insert(&sequence_data_index, hash, new_data) == light_sequence_end) return -1; *result = def->start; return 0; } old_data = light_sequence_get(&sequence_data_index, pos); value = old_data.value; if (def->step > 0) { if (value < def->min) { value = def->min; goto done; } if (value >= 0 && def->step > INT64_MAX - value) goto overflow; value += def->step; if (value > def->max) goto overflow; } else { assert(def->step < 0); if (value > def->max) { value = def->max; goto done; } if (value < 0 && def->step < INT64_MIN - value) goto overflow; value += def->step; if (value < def->min) goto overflow; } done: assert(value >= def->min && value <= def->max); new_data.id = key; new_data.value = value; if (light_sequence_replace(&sequence_data_index, hash, new_data, &old_data) == light_sequence_end) unreachable(); *result = value; return 0; overflow: if (!def->cycle) { diag_set(ClientError, ER_SEQUENCE_OVERFLOW, def->name); return -1; } value = def->step > 0 ? def->min : def->max; goto done; } int access_check_sequence(struct sequence *seq) { struct credentials *cr = effective_user(); /* * If the user has universal access, don't bother with checks. * No special check for ADMIN user is necessary since ADMIN has * universal access. */ user_access_t access = PRIV_U | PRIV_W; user_access_t sequence_access = access & ~cr->universal_access; if (sequence_access && /* Check for missing Usage access, ignore owner rights. */ (sequence_access & PRIV_U || /* Check for missing specific access, respect owner rights. */ (seq->def->uid != cr->uid && sequence_access & ~seq->access[cr->auth_token].effective))) { /* Access violation, report error. */ struct user *user = user_find(cr->uid); if (user != NULL) { if (!(cr->universal_access & PRIV_U)) { diag_set(AccessDeniedError, priv_name(PRIV_U), schema_object_name(SC_UNIVERSE), "", user->def->name); } else { diag_set(AccessDeniedError, priv_name(access), schema_object_name(SC_SEQUENCE), seq->def->name, user->def->name); } } return -1; } return 0; } struct sequence_data_iterator { struct snapshot_iterator base; /** Iterator over the data index. */ struct light_sequence_iterator iter; /** Last tuple returned by the iterator. */ char tuple[0]; }; #define SEQUENCE_TUPLE_BUF_SIZE (mp_sizeof_array(2) + \ 2 * mp_sizeof_uint(UINT64_MAX)) static const char * sequence_data_iterator_next(struct snapshot_iterator *base, uint32_t *size) { struct sequence_data_iterator *iter = (struct sequence_data_iterator *)base; struct sequence_data *data = light_sequence_iterator_get_and_next(&sequence_data_index, &iter->iter); if (data == NULL) return NULL; char *buf_end = iter->tuple; buf_end = mp_encode_array(buf_end, 2); buf_end = mp_encode_uint(buf_end, data->id); buf_end = (data->value >= 0 ? mp_encode_uint(buf_end, data->value) : mp_encode_int(buf_end, data->value)); assert(buf_end <= iter->tuple + SEQUENCE_TUPLE_BUF_SIZE); *size = buf_end - iter->tuple; return iter->tuple; } static void sequence_data_iterator_free(struct snapshot_iterator *base) { struct sequence_data_iterator *iter = (struct sequence_data_iterator *)base; light_sequence_iterator_destroy(&sequence_data_index, &iter->iter); TRASH(iter); free(iter); } struct snapshot_iterator * sequence_data_iterator_create(void) { struct sequence_data_iterator *iter = calloc(1, sizeof(*iter) + SEQUENCE_TUPLE_BUF_SIZE); if (iter == NULL) { diag_set(OutOfMemory, sizeof(*iter) + SEQUENCE_TUPLE_BUF_SIZE, "malloc", "sequence_data_iterator"); return NULL; } iter->base.free = sequence_data_iterator_free; iter->base.next = sequence_data_iterator_next; light_sequence_iterator_begin(&sequence_data_index, &iter->iter); light_sequence_iterator_freeze(&sequence_data_index, &iter->iter); return &iter->base; } tarantool_1.9.1.26.g63eb81e3c/src/box/iproto_constants.h0000664000000000000000000002325513306565107021363 0ustar rootroot#ifndef TARANTOOL_IPROTO_CONSTANTS_H_INCLUDED #define TARANTOOL_IPROTO_CONSTANTS_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #if defined(__cplusplus) extern "C" { #endif enum { /** Maximal iproto package body length (2GiB) */ IPROTO_BODY_LEN_MAX = 2147483648UL, /* Maximal length of text handshake (greeting) */ IPROTO_GREETING_SIZE = 128, /** marker + len + prev crc32 + cur crc32 + (padding) */ XLOG_FIXHEADER_SIZE = 19 }; enum iproto_key { IPROTO_REQUEST_TYPE = 0x00, IPROTO_SYNC = 0x01, /* Replication keys (header) */ IPROTO_REPLICA_ID = 0x02, IPROTO_LSN = 0x03, IPROTO_TIMESTAMP = 0x04, IPROTO_SCHEMA_VERSION = 0x05, IPROTO_SERVER_VERSION = 0x06, IPROTO_SERVER_IS_RO = 0x07, /* Leave a gap for other keys in the header. */ IPROTO_SPACE_ID = 0x10, IPROTO_INDEX_ID = 0x11, IPROTO_LIMIT = 0x12, IPROTO_OFFSET = 0x13, IPROTO_ITERATOR = 0x14, IPROTO_INDEX_BASE = 0x15, /* Leave a gap between integer values and other keys */ IPROTO_KEY = 0x20, IPROTO_TUPLE = 0x21, IPROTO_FUNCTION_NAME = 0x22, IPROTO_USER_NAME = 0x23, /* Replication keys (body) */ IPROTO_INSTANCE_UUID = 0x24, IPROTO_CLUSTER_UUID = 0x25, IPROTO_VCLOCK = 0x26, IPROTO_EXPR = 0x27, /* EVAL */ IPROTO_OPS = 0x28, /* UPSERT but not UPDATE ops, because of legacy */ /* Leave a gap between request keys and response keys */ IPROTO_DATA = 0x30, IPROTO_ERROR = 0x31, IPROTO_KEY_MAX }; #define bit(c) (1ULL<= IPROTO_KEY_MAX) return NULL; return iproto_key_strs[key]; } /** A data manipulation request. */ static inline bool iproto_type_is_dml(uint32_t type) { return (type >= IPROTO_SELECT && type <= IPROTO_DELETE) || type == IPROTO_UPSERT || type == IPROTO_NOP; } /** * Returns a map of mandatory members of IPROTO DML request. * @param type iproto type. */ static inline uint64_t dml_request_key_map(uint32_t type) { /** Advanced requests don't have a defined key map. */ assert(iproto_type_is_dml(type)); extern const uint64_t iproto_body_key_map[]; return iproto_body_key_map[type]; } /** * A read only request, CALL is included since it * may be read-only, and there are separate checks * for all database requests issues from CALL. */ static inline bool iproto_type_is_select(uint32_t type) { return type <= IPROTO_SELECT || type == IPROTO_CALL || type == IPROTO_EVAL; } /** A common request with a mandatory and simple body (key, tuple, ops) */ static inline bool iproto_type_is_request(uint32_t type) { return type > IPROTO_OK && type <= IPROTO_TYPE_STAT_MAX; } /** * The request is "synchronous": no other requests * on this connection should be taken before this one * ends. */ static inline bool iproto_type_is_sync(uint32_t type) { return type == IPROTO_JOIN || type == IPROTO_SUBSCRIBE; } /** This is an error. */ static inline bool iproto_type_is_error(uint32_t type) { return (type & IPROTO_TYPE_ERROR) != 0; } /** The snapshot row metadata repeats the structure of REPLACE request. */ struct PACKED request_replace_body { uint8_t m_body; uint8_t k_space_id; uint8_t m_space_id; uint32_t v_space_id; uint8_t k_tuple; }; /** * Xrow keys for Vinyl run information. * @sa struct vy_run_info. */ enum vy_run_info_key { /** Min key in the run. */ VY_RUN_INFO_MIN_KEY = 1, /** Max key in the run. */ VY_RUN_INFO_MAX_KEY = 2, /** Min LSN over all statements in the run. */ VY_RUN_INFO_MIN_LSN = 3, /** Max LSN over all statements in the run. */ VY_RUN_INFO_MAX_LSN = 4, /** Number of pages in the run. */ VY_RUN_INFO_PAGE_COUNT = 5, /** Bloom filter for keys. */ VY_RUN_INFO_BLOOM = 6, /** The last key in this enum + 1 */ VY_RUN_INFO_KEY_MAX }; /** * Return vy_run_info key name by @a key code. * @param key key */ static inline const char * vy_run_info_key_name(enum vy_run_info_key key) { if (key <= 0 || key >= VY_RUN_INFO_KEY_MAX) return NULL; extern const char *vy_run_info_key_strs[]; return vy_run_info_key_strs[key]; } /** * Xrow keys for Vinyl page information. * @sa struct vy_run_info. */ enum vy_page_info_key { /** Offset of page data in the run file. */ VY_PAGE_INFO_OFFSET = 1, /** Size of page data in the run file. */ VY_PAGE_INFO_SIZE = 2, /** Size of page data in memory, i.e. unpacked. */ VY_PAGE_INFO_UNPACKED_SIZE = 3, /* Number of statements in the page. */ VY_PAGE_INFO_ROW_COUNT = 4, /* Minimal key stored in the page. */ VY_PAGE_INFO_MIN_KEY = 5, /** Offset of the row index in the page. */ VY_PAGE_INFO_ROW_INDEX_OFFSET = 6, /** The last key in this enum + 1 */ VY_PAGE_INFO_KEY_MAX }; /** * Return vy_page_info key name by @a key code. * @param key key */ static inline const char * vy_page_info_key_name(enum vy_page_info_key key) { if (key <= 0 || key >= VY_PAGE_INFO_KEY_MAX) return NULL; extern const char *vy_page_info_key_strs[]; return vy_page_info_key_strs[key]; } /** * Xrow keys for Vinyl row index. * @sa struct vy_page_info. */ enum vy_row_index_key { /** Array of row offsets. */ VY_ROW_INDEX_DATA = 1, /** The last key in this enum + 1 */ VY_ROW_INDEX_KEY_MAX }; /** * Return vy_page_info key name by @a key code. * @param key key */ static inline const char * vy_row_index_key_name(enum vy_row_index_key key) { if (key <= 0 || key >= VY_ROW_INDEX_KEY_MAX) return NULL; extern const char *vy_row_index_key_strs[]; return vy_row_index_key_strs[key]; } #if defined(__cplusplus) } /* extern "C" */ #endif #endif /* TARANTOOL_IPROTO_CONSTANTS_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/box/port.h0000664000000000000000000001076013306565107016734 0ustar rootroot#ifndef INCLUDES_TARANTOOL_BOX_PORT_H #define INCLUDES_TARANTOOL_BOX_PORT_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct tuple; struct obuf; /** * A single port represents a destination of box_process output. * One such destination can be a Lua stack, or the binary * protocol. * An instance of a port is usually short lived, as it is created * for every server request. State of the instance is represented * by the tuples added to it. E.g.: * * struct port port; * port_tuple_create(&port); * for (tuple in tuples) * port_tuple_add(tuple); * * port_dump(&port, obuf); * port_destroy(&port); * * Beginning with Tarantool 1.5, tuple can have different internal * structure and port_tuple_add() requires a double * dispatch: first, by the type of the port the tuple is being * added to, second, by the type of the tuple format, since the * format defines the internal structure of the tuple. */ struct port; struct port_vtab { /** * Dump the content of a port to an output buffer. * On success returns number of entries dumped. * On failure sets diag and returns -1. */ int (*dump)(struct port *port, struct obuf *out); /** * Same as dump(), but use the legacy Tarantool 1.6 * format. */ int (*dump_16)(struct port *port, struct obuf *out); /** * Destroy a port and release associated resources. */ void (*destroy)(struct port *port); }; /** * Abstract port instance. It is supposed to be converted to * a concrete port realization, e.g. port_tuple. */ struct port { /** Virtual method table. */ const struct port_vtab *vtab; /** * Implementation dependent content. Needed to declare * an abstract port instance on stack. */ char pad[48]; }; struct port_tuple_entry { struct port_tuple_entry *next; struct tuple *tuple; }; /** * Port implementation used for storing tuples. */ struct port_tuple { const struct port_vtab *vtab; int size; struct port_tuple_entry *first; struct port_tuple_entry *last; struct port_tuple_entry first_entry; }; static_assert(sizeof(struct port_tuple) <= sizeof(struct port), "sizeof(struct port_tuple) must be <= sizeof(struct port)"); extern const struct port_vtab port_tuple_vtab; /** * Convert an abstract port instance to a tuple port. */ static inline struct port_tuple * port_tuple(struct port *port) { assert(port->vtab == &port_tuple_vtab); return (struct port_tuple *)port; } /** * Create a port for storing tuples. */ void port_tuple_create(struct port *port); /** * Append a tuple to a port. */ int port_tuple_add(struct port *port, struct tuple *tuple); /** * Destroy an abstract port instance. */ void port_destroy(struct port *port); /** * Dump an abstract port instance to an output buffer. * Return number of entries dumped on success, -1 on error. */ int port_dump(struct port *port, struct obuf *out); /** * Same as port_dump(), but use the legacy Tarantool 1.6 * format. */ int port_dump_16(struct port *port, struct obuf *out); void port_init(void); void port_free(void); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined __cplusplus */ #endif /* INCLUDES_TARANTOOL_BOX_PORT_H */ tarantool_1.9.1.26.g63eb81e3c/src/box/memtx_space.c0000664000000000000000000007110313306565107020246 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "memtx_space.h" #include "space.h" #include "iproto_constants.h" #include "txn.h" #include "tuple_update.h" #include "xrow.h" #include "memtx_hash.h" #include "memtx_tree.h" #include "memtx_rtree.h" #include "memtx_bitset.h" #include "memtx_tuple.h" #include "column_mask.h" #include "sequence.h" static void memtx_space_destroy(struct space *space) { free(space); } static size_t memtx_space_bsize(struct space *space) { struct memtx_space *memtx_space = (struct memtx_space *)space; return memtx_space->bsize; } /* {{{ DML */ void memtx_space_update_bsize(struct space *space, const struct tuple *old_tuple, const struct tuple *new_tuple) { struct memtx_space *memtx_space = (struct memtx_space *)space; ssize_t old_bsize = old_tuple ? box_tuple_bsize(old_tuple) : 0; ssize_t new_bsize = new_tuple ? box_tuple_bsize(new_tuple) : 0; assert((ssize_t)memtx_space->bsize + new_bsize - old_bsize >= 0); memtx_space->bsize += new_bsize - old_bsize; } /** * A version of space_replace for a space which has * no indexes (is not yet fully built). */ int memtx_space_replace_no_keys(struct space *space, struct txn_stmt *stmt, enum dup_replace_mode mode) { (void)stmt; (void)mode; struct index *index = index_find(space, 0); assert(index == NULL); /* not reached. */ (void) index; return -1; } enum { /** * This number is calculated based on the * max (realistic) number of insertions * a deletion from a B-tree or an R-tree * can lead to, and, as a result, the max * number of new block allocations. */ RESERVE_EXTENTS_BEFORE_DELETE = 8, RESERVE_EXTENTS_BEFORE_REPLACE = 16 }; /** * A short-cut version of replace() used during bulk load * from snapshot. */ int memtx_space_replace_build_next(struct space *space, struct txn_stmt *stmt, enum dup_replace_mode mode) { assert(stmt->old_tuple == NULL && mode == DUP_INSERT); (void) mode; if (stmt->old_tuple) { /* * Called from txn_rollback() In practice * is impossible: all possible checks for tuple * validity are done before the space is changed, * and WAL is off, so this part can't fail. */ panic("Failed to commit transaction when loading " "from snapshot"); } if (index_build_next(space->index[0], stmt->new_tuple) != 0) return -1; stmt->engine_savepoint = stmt; memtx_space_update_bsize(space, NULL, stmt->new_tuple); return 0; } /** * A short-cut version of replace() used when loading * data from XLOG files. */ int memtx_space_replace_primary_key(struct space *space, struct txn_stmt *stmt, enum dup_replace_mode mode) { if (index_replace(space->index[0], stmt->old_tuple, stmt->new_tuple, mode, &stmt->old_tuple) != 0) return -1; stmt->engine_savepoint = stmt; memtx_space_update_bsize(space, stmt->old_tuple, stmt->new_tuple); return 0; } /** * @brief A single method to handle REPLACE, DELETE and UPDATE. * * @param space space * @param old_tuple the tuple that should be removed (can be NULL) * @param new_tuple the tuple that should be inserted (can be NULL) * @param mode dup_replace_mode, used only if new_tuple is not * NULL and old_tuple is NULL, and only for the * primary key. * * For DELETE, new_tuple must be NULL. old_tuple must be * previously found in the primary key. * * For REPLACE, old_tuple must be NULL. The additional * argument dup_replace_mode further defines how REPLACE * should proceed. * * For UPDATE, both old_tuple and new_tuple must be given, * where old_tuple must be previously found in the primary key. * * Let's consider these three cases in detail: * * 1. DELETE, old_tuple is not NULL, new_tuple is NULL * The effect is that old_tuple is removed from all * indexes. dup_replace_mode is ignored. * * 2. REPLACE, old_tuple is NULL, new_tuple is not NULL, * has one simple sub-case and two with further * ramifications: * * A. dup_replace_mode is DUP_INSERT. Attempts to insert the * new tuple into all indexes. If *any* of the unique indexes * has a duplicate key, deletion is aborted, all of its * effects are removed, and an error is thrown. * * B. dup_replace_mode is DUP_REPLACE. It means an existing * tuple has to be replaced with the new one. To do it, tries * to find a tuple with a duplicate key in the primary index. * If the tuple is not found, throws an error. Otherwise, * replaces the old tuple with a new one in the primary key. * Continues on to secondary keys, but if there is any * secondary key, which has a duplicate tuple, but one which * is different from the duplicate found in the primary key, * aborts, puts everything back, throws an exception. * * For example, if there is a space with 3 unique keys and * two tuples { 1, 2, 3 } and { 3, 1, 2 }: * * This REPLACE/DUP_REPLACE is OK: { 1, 5, 5 } * This REPLACE/DUP_REPLACE is not OK: { 2, 2, 2 } (there * is no tuple with key '2' in the primary key) * This REPLACE/DUP_REPLACE is not OK: { 1, 1, 1 } (there * is a conflicting tuple in the secondary unique key). * * C. dup_replace_mode is DUP_REPLACE_OR_INSERT. If * there is a duplicate tuple in the primary key, behaves the * same way as DUP_REPLACE, otherwise behaves the same way as * DUP_INSERT. * * 3. UPDATE has to delete the old tuple and insert a new one. * dup_replace_mode is ignored. * Note that old_tuple primary key doesn't have to match * new_tuple primary key, thus a duplicate can be found. * For this reason, and since there can be duplicates in * other indexes, UPDATE is the same as DELETE + * REPLACE/DUP_INSERT. * * @return old_tuple. DELETE, UPDATE and REPLACE/DUP_REPLACE * always produce an old tuple. REPLACE/DUP_INSERT always returns * NULL. REPLACE/DUP_REPLACE_OR_INSERT may or may not find * a duplicate. * * The method is all-or-nothing in all cases. Changes are either * applied to all indexes, or nothing applied at all. * * Note, that even in case of REPLACE, dup_replace_mode only * affects the primary key, for secondary keys it's always * DUP_INSERT. * * The call never removes more than one tuple: if * old_tuple is given, dup_replace_mode is ignored. * Otherwise, it's taken into account only for the * primary key. */ int memtx_space_replace_all_keys(struct space *space, struct txn_stmt *stmt, enum dup_replace_mode mode) { struct tuple *old_tuple = stmt->old_tuple; struct tuple *new_tuple = stmt->new_tuple; /* * Ensure we have enough slack memory to guarantee * successful statement-level rollback. */ if (memtx_index_extent_reserve(new_tuple ? RESERVE_EXTENTS_BEFORE_REPLACE : RESERVE_EXTENTS_BEFORE_DELETE) != 0) return -1; uint32_t i = 0; /* Update the primary key */ struct index *pk = index_find(space, 0); if (pk == NULL) return -1; assert(pk->def->opts.is_unique); /* * If old_tuple is not NULL, the index has to * find and delete it, or return an error. */ if (index_replace(pk, old_tuple, new_tuple, mode, &old_tuple) != 0) return -1; assert(old_tuple || new_tuple); /* Update secondary keys. */ for (i++; i < space->index_count; i++) { struct tuple *unused; struct index *index = space->index[i]; if (index_replace(index, old_tuple, new_tuple, DUP_INSERT, &unused) != 0) goto rollback; } stmt->old_tuple = old_tuple; stmt->engine_savepoint = stmt; memtx_space_update_bsize(space, old_tuple, new_tuple); return 0; rollback: for (; i > 0; i--) { struct tuple *unused; struct index *index = space->index[i - 1]; /* Rollback must not fail. */ if (index_replace(index, new_tuple, old_tuple, DUP_INSERT, &unused) != 0) { diag_log(); unreachable(); panic("failed to rollback change"); } } return -1; } static inline enum dup_replace_mode dup_replace_mode(uint32_t op) { return op == IPROTO_INSERT ? DUP_INSERT : DUP_REPLACE_OR_INSERT; } static int memtx_space_apply_initial_join_row(struct space *space, struct request *request) { struct memtx_space *memtx_space = (struct memtx_space *)space; if (request->type != IPROTO_INSERT) { diag_set(ClientError, ER_UNKNOWN_REQUEST_TYPE, request->type); return -1; } request->header->replica_id = 0; struct txn *txn = txn_begin_stmt(space); if (txn == NULL) return -1; struct txn_stmt *stmt = txn_current_stmt(txn); stmt->new_tuple = memtx_tuple_new(space->format, request->tuple, request->tuple_end); if (stmt->new_tuple == NULL) goto rollback; tuple_ref(stmt->new_tuple); if (memtx_space->replace(space, stmt, DUP_INSERT) != 0) goto rollback; return txn_commit_stmt(txn, request); rollback: say_error("rollback: %s", diag_last_error(diag_get())->errmsg); txn_rollback_stmt(); return -1; } static int memtx_space_execute_replace(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { struct memtx_space *memtx_space = (struct memtx_space *)space; struct txn_stmt *stmt = txn_current_stmt(txn); enum dup_replace_mode mode = dup_replace_mode(request->type); stmt->new_tuple = memtx_tuple_new(space->format, request->tuple, request->tuple_end); if (stmt->new_tuple == NULL) return -1; tuple_ref(stmt->new_tuple); if (memtx_space->replace(space, stmt, mode) != 0) return -1; /** The new tuple is referenced by the primary key. */ *result = stmt->new_tuple; return 0; } static int memtx_space_execute_delete(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { struct memtx_space *memtx_space = (struct memtx_space *)space; struct txn_stmt *stmt = txn_current_stmt(txn); /* Try to find the tuple by unique key. */ struct index *pk = index_find_unique(space, request->index_id); if (pk == NULL) return -1; const char *key = request->key; uint32_t part_count = mp_decode_array(&key); if (exact_key_validate(pk->def->key_def, key, part_count) != 0) return -1; if (index_get(pk, key, part_count, &stmt->old_tuple) != 0) return -1; if (stmt->old_tuple != NULL && memtx_space->replace(space, stmt, DUP_REPLACE_OR_INSERT) != 0) return -1; *result = stmt->old_tuple; return 0; } static int memtx_space_execute_update(struct space *space, struct txn *txn, struct request *request, struct tuple **result) { struct memtx_space *memtx_space = (struct memtx_space *)space; struct txn_stmt *stmt = txn_current_stmt(txn); /* Try to find the tuple by unique key. */ struct index *pk = index_find_unique(space, request->index_id); if (pk == NULL) return -1; const char *key = request->key; uint32_t part_count = mp_decode_array(&key); if (exact_key_validate(pk->def->key_def, key, part_count) != 0) return -1; if (index_get(pk, key, part_count, &stmt->old_tuple) != 0) return -1; if (stmt->old_tuple == NULL) { *result = NULL; return 0; } /* Update the tuple; legacy, request ops are in request->tuple */ uint32_t new_size = 0, bsize; const char *old_data = tuple_data_range(stmt->old_tuple, &bsize); const char *new_data = tuple_update_execute(region_aligned_alloc_cb, &fiber()->gc, request->tuple, request->tuple_end, old_data, old_data + bsize, &new_size, request->index_base, NULL); if (new_data == NULL) return -1; stmt->new_tuple = memtx_tuple_new(space->format, new_data, new_data + new_size); if (stmt->new_tuple == NULL) return -1; tuple_ref(stmt->new_tuple); if (stmt->old_tuple != NULL && memtx_space->replace(space, stmt, DUP_REPLACE) != 0) return -1; *result = stmt->new_tuple; return 0; } static int memtx_space_execute_upsert(struct space *space, struct txn *txn, struct request *request) { struct memtx_space *memtx_space = (struct memtx_space *)space; struct txn_stmt *stmt = txn_current_stmt(txn); /* * Check all tuple fields: we should produce an error on * malformed tuple even if upsert turns into an update. */ if (tuple_validate_raw(space->format, request->tuple)) return -1; struct index *index = index_find_unique(space, 0); if (index == NULL) return -1; uint32_t part_count = index->def->key_def->part_count; /* Extract the primary key from tuple. */ const char *key = tuple_extract_key_raw(request->tuple, request->tuple_end, index->def->key_def, NULL); if (key == NULL) return -1; /* Cut array header */ mp_decode_array(&key); /* Try to find the tuple by primary key. */ if (index_get(index, key, part_count, &stmt->old_tuple) != 0) return -1; if (stmt->old_tuple == NULL) { /** * Old tuple was not found. A write optimized * engine may only know this after commit, so * some errors which happen on this branch would * only make it to the error log in it. * To provide identical semantics, we should not throw * anything. However, considering the kind of * error which may occur, throwing it won't break * cross-engine compatibility: * - update ops are checked before commit * - OOM may happen at any time * - duplicate key has to be checked by * write-optimized engine before commit, so if * we get it here, it's also OK to throw it * @sa https://github.com/tarantool/tarantool/issues/1156 */ if (tuple_update_check_ops(region_aligned_alloc_cb, &fiber()->gc, request->ops, request->ops_end, request->index_base)) { return -1; } stmt->new_tuple = memtx_tuple_new(space->format, request->tuple, request->tuple_end); if (stmt->new_tuple == NULL) return -1; tuple_ref(stmt->new_tuple); } else { uint32_t new_size = 0, bsize; const char *old_data = tuple_data_range(stmt->old_tuple, &bsize); /* * Update the tuple. * tuple_upsert_execute() fails on totally wrong * tuple ops, but ignores ops that not suitable * for the tuple. */ uint64_t column_mask = COLUMN_MASK_FULL; const char *new_data = tuple_upsert_execute(region_aligned_alloc_cb, &fiber()->gc, request->ops, request->ops_end, old_data, old_data + bsize, &new_size, request->index_base, false, &column_mask); if (new_data == NULL) return -1; stmt->new_tuple = memtx_tuple_new(space->format, new_data, new_data + new_size); if (stmt->new_tuple == NULL) return -1; tuple_ref(stmt->new_tuple); struct index *pk = space->index[0]; if (!key_update_can_be_skipped(pk->def->key_def->column_mask, column_mask) && tuple_compare(stmt->old_tuple, stmt->new_tuple, pk->def->key_def) != 0) { /* Primary key is changed: log error and do nothing. */ diag_set(ClientError, ER_CANT_UPDATE_PRIMARY_KEY, pk->def->name, space_name(space)); diag_log(); tuple_unref(stmt->new_tuple); stmt->old_tuple = NULL; stmt->new_tuple = NULL; } } /* * It's OK to use DUP_REPLACE_OR_INSERT: we don't risk * inserting a new tuple if the old one exists, since * we checked this case explicitly and skipped the upsert * above. */ if (stmt->new_tuple != NULL && memtx_space->replace(space, stmt, DUP_REPLACE_OR_INSERT) != 0) return -1; /* Return nothing: UPSERT does not return data. */ return 0; } /* }}} DML */ /* {{{ DDL */ static int memtx_space_check_index_def(struct space *space, struct index_def *index_def) { if (index_def->key_def->is_nullable) { if (index_def->iid == 0) { diag_set(ClientError, ER_NULLABLE_PRIMARY, space_name(space)); return -1; } if (index_def->type != TREE) { diag_set(ClientError, ER_UNSUPPORTED, index_type_strs[index_def->type], "nullable parts"); return -1; } } switch (index_def->type) { case HASH: if (! index_def->opts.is_unique) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name(space), "HASH index must be unique"); return -1; } break; case TREE: /* TREE index has no limitations. */ break; case RTREE: if (index_def->key_def->part_count != 1) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name(space), "RTREE index key can not be multipart"); return -1; } if (index_def->opts.is_unique) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name(space), "RTREE index can not be unique"); return -1; } if (index_def->key_def->parts[0].type != FIELD_TYPE_ARRAY) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name(space), "RTREE index field type must be ARRAY"); return -1; } /* no furter checks of parts needed */ return 0; case BITSET: if (index_def->key_def->part_count != 1) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name(space), "BITSET index key can not be multipart"); return -1; } if (index_def->opts.is_unique) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name(space), "BITSET can not be unique"); return -1; } if (index_def->key_def->parts[0].type != FIELD_TYPE_UNSIGNED && index_def->key_def->parts[0].type != FIELD_TYPE_STRING) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name(space), "BITSET index field type must be NUM or STR"); return -1; } /* no furter checks of parts needed */ return 0; default: diag_set(ClientError, ER_INDEX_TYPE, index_def->name, space_name(space)); return -1; } /* Only HASH and TREE indexes checks parts there */ /* Check that there are no ANY, ARRAY, MAP parts */ for (uint32_t i = 0; i < index_def->key_def->part_count; i++) { struct key_part *part = &index_def->key_def->parts[i]; if (part->type <= FIELD_TYPE_ANY || part->type >= FIELD_TYPE_ARRAY) { diag_set(ClientError, ER_MODIFY_INDEX, index_def->name, space_name(space), tt_sprintf("field type '%s' is not supported", field_type_strs[part->type])); return -1; } } return 0; } static struct snapshot_iterator * sequence_data_index_create_snapshot_iterator(struct index *index) { (void)index; return sequence_data_iterator_create(); } static struct index * sequence_data_index_new(struct memtx_engine *memtx, struct index_def *def) { struct memtx_hash_index *index = memtx_hash_index_new(memtx, def); if (index == NULL) return NULL; static struct index_vtab vtab; static bool vtab_initialized; if (!vtab_initialized) { vtab = *index->base.vtab; vtab.create_snapshot_iterator = sequence_data_index_create_snapshot_iterator; vtab_initialized = true; } index->base.vtab = &vtab; return &index->base; } static struct index * memtx_space_create_index(struct space *space, struct index_def *index_def) { struct memtx_engine *memtx = (struct memtx_engine *)space->engine; if (space->def->id == BOX_SEQUENCE_DATA_ID) { /* * The content of _sequence_data is not updated * when a sequence is used for auto increment in * a space. To make sure all sequence values are * written to snapshot, use a special snapshot * iterator that walks over the sequence cache. */ return sequence_data_index_new(memtx, index_def); } switch (index_def->type) { case HASH: return (struct index *)memtx_hash_index_new(memtx, index_def); case TREE: return (struct index *)memtx_tree_index_new(memtx, index_def); case RTREE: return (struct index *)memtx_rtree_index_new(memtx, index_def); case BITSET: return (struct index *)memtx_bitset_index_new(memtx, index_def); default: unreachable(); return NULL; } } /** * Replicate engine state in a newly created space. * This function is invoked when executing a replace into _index * space originating either from a snapshot or from the binary * log. It brings the newly created space up to date with the * engine recovery state: if the event comes from the snapshot, * then the primary key is not built, otherwise it's created * right away. */ static void memtx_space_do_add_primary_key(struct space *space, enum memtx_recovery_state state) { struct memtx_space *memtx_space = (struct memtx_space *)space; switch (state) { case MEMTX_INITIALIZED: panic("can't create a new space before snapshot recovery"); break; case MEMTX_INITIAL_RECOVERY: index_begin_build(space->index[0]); memtx_space->replace = memtx_space_replace_build_next; break; case MEMTX_FINAL_RECOVERY: index_begin_build(space->index[0]); index_end_build(space->index[0]); memtx_space->replace = memtx_space_replace_primary_key; break; case MEMTX_OK: index_begin_build(space->index[0]); index_end_build(space->index[0]); memtx_space->replace = memtx_space_replace_all_keys; break; } } static int memtx_space_add_primary_key(struct space *space) { struct memtx_engine *memtx = (struct memtx_engine *)space->engine; memtx_space_do_add_primary_key(space, memtx->state); return 0; } static int memtx_space_check_format(struct space *new_space, struct space *old_space) { if (old_space->index_count == 0) return 0; struct index *pk = old_space->index[0]; if (index_size(pk) == 0) return 0; struct iterator *it = index_create_iterator(pk, ITER_ALL, NULL, 0); if (it == NULL) return -1; int rc; struct tuple *tuple; while ((rc = iterator_next(it, &tuple)) == 0 && tuple != NULL) { /* * Check that the tuple is OK according to the * new format. */ rc = tuple_validate(new_space->format, tuple); if (rc != 0) break; } iterator_delete(it); return rc; } static void memtx_space_drop_primary_key(struct space *space) { struct memtx_space *memtx_space = (struct memtx_space *)space; memtx_space->replace = memtx_space_replace_no_keys; } static void memtx_init_system_space(struct space *space) { memtx_space_do_add_primary_key(space, MEMTX_OK); } static int memtx_space_build_secondary_key(struct space *old_space, struct space *new_space, struct index *new_index) { /** * If it's a secondary key, and we're not building them * yet (i.e. it's snapshot recovery for memtx), do nothing. */ if (new_index->def->iid != 0) { struct memtx_space *memtx_space; memtx_space = (struct memtx_space *)new_space; if (!(memtx_space->replace == memtx_space_replace_all_keys)) return 0; } struct index *pk = index_find(old_space, 0); if (pk == NULL) return -1; struct errinj *inj = errinj(ERRINJ_BUILD_SECONDARY, ERRINJ_INT); if (inj != NULL && inj->iparam == (int)new_index->def->iid) { diag_set(ClientError, ER_INJECTION, "buildSecondaryKey"); return -1; } /* Now deal with any kind of add index during normal operation. */ struct iterator *it = index_create_iterator(pk, ITER_ALL, NULL, 0); if (it == NULL) return -1; /* * The index has to be built tuple by tuple, since * there is no guarantee that all tuples satisfy * new index' constraints. If any tuple can not be * added to the index (insufficient number of fields, * etc., the build is aborted. */ /* Build the new index. */ int rc; struct tuple *tuple; while ((rc = iterator_next(it, &tuple)) == 0 && tuple != NULL) { /* * Check that the tuple is OK according to the * new format. */ rc = tuple_validate(new_space->format, tuple); if (rc != 0) break; /* * @todo: better message if there is a duplicate. */ struct tuple *old_tuple; rc = index_replace(new_index, NULL, tuple, DUP_INSERT, &old_tuple); if (rc != 0) break; assert(old_tuple == NULL); /* Guaranteed by DUP_INSERT. */ (void) old_tuple; } iterator_delete(it); return rc; } static int memtx_space_prepare_truncate(struct space *old_space, struct space *new_space) { struct memtx_space *old_memtx_space = (struct memtx_space *)old_space; struct memtx_space *new_memtx_space = (struct memtx_space *)new_space; new_memtx_space->replace = old_memtx_space->replace; return 0; } static void memtx_space_prune(struct space *space) { struct index *index = space_index(space, 0); if (index == NULL) return; struct iterator *it = index_create_iterator(index, ITER_ALL, NULL, 0); if (it == NULL) goto fail; int rc; struct tuple *tuple; while ((rc = iterator_next(it, &tuple)) == 0 && tuple != NULL) tuple_unref(tuple); iterator_delete(it); if (rc == 0) return; fail: /* * This function is called from space_vtab::commit_alter() * or commit_truncate(), which do not tolerate failures, * so we have no other choice but panic here. Good news is * memtx iterators do not fail so we should not normally * get here. */ diag_log(); unreachable(); panic("failed to prune space"); } static void memtx_space_commit_truncate(struct space *old_space, struct space *new_space) { (void)new_space; memtx_space_prune(old_space); } static int memtx_space_prepare_alter(struct space *old_space, struct space *new_space) { struct memtx_space *old_memtx_space = (struct memtx_space *)old_space; struct memtx_space *new_memtx_space = (struct memtx_space *)new_space; new_memtx_space->replace = old_memtx_space->replace; bool is_empty = old_space->index_count == 0 || index_size(old_space->index[0]) == 0; return space_def_check_compatibility(old_space->def, new_space->def, is_empty); } static void memtx_space_commit_alter(struct space *old_space, struct space *new_space) { struct memtx_space *old_memtx_space = (struct memtx_space *)old_space; struct memtx_space *new_memtx_space = (struct memtx_space *)new_space; /* Delete all tuples when the last index is dropped. */ if (new_space->index_count == 0) memtx_space_prune(old_space); else new_memtx_space->bsize = old_memtx_space->bsize; } /* }}} DDL */ static const struct space_vtab memtx_space_vtab = { /* .destroy = */ memtx_space_destroy, /* .bsize = */ memtx_space_bsize, /* .apply_initial_join_row = */ memtx_space_apply_initial_join_row, /* .execute_replace = */ memtx_space_execute_replace, /* .execute_delete = */ memtx_space_execute_delete, /* .execute_update = */ memtx_space_execute_update, /* .execute_upsert = */ memtx_space_execute_upsert, /* .init_system_space = */ memtx_init_system_space, /* .check_index_def = */ memtx_space_check_index_def, /* .create_index = */ memtx_space_create_index, /* .add_primary_key = */ memtx_space_add_primary_key, /* .drop_primary_key = */ memtx_space_drop_primary_key, /* .check_format = */ memtx_space_check_format, /* .build_secondary_key = */ memtx_space_build_secondary_key, /* .prepare_truncate = */ memtx_space_prepare_truncate, /* .commit_truncate = */ memtx_space_commit_truncate, /* .prepare_alter = */ memtx_space_prepare_alter, /* .commit_alter = */ memtx_space_commit_alter, }; struct space * memtx_space_new(struct memtx_engine *memtx, struct space_def *def, struct rlist *key_list) { struct memtx_space *memtx_space = malloc(sizeof(*memtx_space)); if (memtx_space == NULL) { diag_set(OutOfMemory, sizeof(*memtx_space), "malloc", "struct memtx_space"); return NULL; } /* Create a format from key and field definitions. */ int key_count = 0; struct index_def *index_def; rlist_foreach_entry(index_def, key_list, link) key_count++; struct key_def **keys = region_alloc(&fiber()->gc, sizeof(*keys) * key_count); if (keys == NULL) { free(memtx_space); return NULL; } key_count = 0; rlist_foreach_entry(index_def, key_list, link) keys[key_count++] = index_def->key_def; struct tuple_format *format = tuple_format_new(&memtx_tuple_format_vtab, keys, key_count, 0, def->fields, def->field_count, def->dict); if (format == NULL) { free(memtx_space); return NULL; } format->exact_field_count = def->exact_field_count; tuple_format_ref(format); if (space_create((struct space *)memtx_space, (struct engine *)memtx, &memtx_space_vtab, def, key_list, format) != 0) { tuple_format_unref(format); free(memtx_space); return NULL; } /* Format is now referenced by the space. */ tuple_format_unref(format); memtx_space->bsize = 0; memtx_space->replace = memtx_space_replace_no_keys; return (struct space *)memtx_space; } tarantool_1.9.1.26.g63eb81e3c/src/box/schema.cc0000664000000000000000000003735413306565107017356 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "schema.h" #include "func.h" #include "sequence.h" #include "tuple.h" #include "assoc.h" #include "alter.h" #include "scoped_guard.h" #include "version.h" #include "user.h" #include /** * @module Data Dictionary * * The data dictionary is responsible for storage and caching * of system metadata, such as information about existing * spaces, indexes, tuple formats. Space and index metadata * is called in dedicated spaces, _space and _index respectively. * The contents of these spaces is fully cached in a cache of * struct space objects. * * struct space is an in-memory instance representing a single * space with its metadata, space data, and methods to manage * it. */ /** All existing spaces. */ static struct mh_i32ptr_t *spaces; static struct mh_i32ptr_t *funcs; static struct mh_strnptr_t *funcs_by_name; static struct mh_i32ptr_t *sequences; uint32_t schema_version = 0; uint32_t dd_version_id = version_id(1, 6, 4); struct rlist on_alter_space = RLIST_HEAD_INITIALIZER(on_alter_space); struct rlist on_alter_sequence = RLIST_HEAD_INITIALIZER(on_alter_sequence); /** * Lock of scheme modification */ struct latch schema_lock = LATCH_INITIALIZER(schema_lock); bool space_is_system(struct space *space) { return space->def->id > BOX_SYSTEM_ID_MIN && space->def->id < BOX_SYSTEM_ID_MAX; } /** Return space by its number */ struct space * space_by_id(uint32_t id) { mh_int_t space = mh_i32ptr_find(spaces, id, NULL); if (space == mh_end(spaces)) return NULL; return (struct space *) mh_i32ptr_node(spaces, space)->val; } /** Return current schema version */ uint32_t box_schema_version() { return schema_version; } /** * Visit all spaces and apply 'func'. */ int space_foreach(int (*func)(struct space *sp, void *udata), void *udata) { mh_int_t i; struct space *space; char key[6]; assert(mp_sizeof_uint(BOX_SYSTEM_ID_MIN) <= sizeof(key)); mp_encode_uint(key, BOX_SYSTEM_ID_MIN); /* * Make sure we always visit system spaces first, * in order from lowest space id to the highest.. * This is essential for correctly recovery from the * snapshot, and harmless otherwise. */ space = space_by_id(BOX_SPACE_ID); struct index *pk = space ? space_index(space, 0) : NULL; if (pk) { struct iterator *it = index_create_iterator(pk, ITER_GE, key, 1); if (it == NULL) return -1; int rc; struct tuple *tuple; while ((rc = iterator_next(it, &tuple)) == 0 && tuple != NULL) { uint32_t id; if (tuple_field_u32(tuple, BOX_SPACE_FIELD_ID, &id) != 0) continue; space = space_cache_find(id); if (space == NULL) continue; if (! space_is_system(space)) break; rc = func(space, udata); if (rc != 0) break; } iterator_delete(it); if (rc != 0) return -1; } mh_foreach(spaces, i) { space = (struct space *) mh_i32ptr_node(spaces, i)->val; if (space_is_system(space)) continue; if (func(space, udata) != 0) return -1; } return 0; } /** Delete a space from the space cache and Lua. */ struct space * space_cache_delete(uint32_t id) { mh_int_t k = mh_i32ptr_find(spaces, id, NULL); assert(k != mh_end(spaces)); struct space *space = (struct space *)mh_i32ptr_node(spaces, k)->val; mh_i32ptr_del(spaces, k, NULL); schema_version++; return space; } /** * Update the space in the space cache and in Lua. Returns * the old space instance, if any, or NULL if it's a new space. */ struct space * space_cache_replace(struct space *space) { const struct mh_i32ptr_node_t node = { space_id(space), space }; struct mh_i32ptr_node_t old, *p_old = &old; mh_int_t k = mh_i32ptr_put(spaces, &node, &p_old, NULL); if (k == mh_end(spaces)) { panic_syserror("Out of memory for the data " "dictionary cache."); } schema_version++; return p_old ? (struct space *) p_old->val : NULL; } /** A wrapper around space_new() for data dictionary spaces. */ static void sc_space_new(uint32_t id, const char *name, struct key_def *key_def, struct trigger *replace_trigger, struct trigger *stmt_begin_trigger) { struct index_def *index_def = index_def_new(id, /* space id */ 0 /* index id */, "primary", /* name */ strlen("primary"), TREE /* index type */, &index_opts_default, key_def, NULL); if (index_def == NULL) diag_raise(); auto index_def_guard = make_scoped_guard([=] { index_def_delete(index_def); }); struct space_def *def = space_def_new_xc(id, ADMIN, 0, name, strlen(name), "memtx", strlen("memtx"), &space_opts_default, NULL, 0); auto def_guard = make_scoped_guard([=] { space_def_delete(def); }); struct rlist key_list; rlist_create(&key_list); rlist_add_entry(&key_list, index_def, link); struct space *space = space_new_xc(def, &key_list); (void) space_cache_replace(space); if (replace_trigger) trigger_add(&space->on_replace, replace_trigger); if (stmt_begin_trigger) trigger_add(&space->on_stmt_begin, stmt_begin_trigger); /* * Data dictionary spaces are fully built since: * - they contain data right from the start * - they are fully operable already during recovery * - if there is a record in the snapshot which mandates * addition of a new index to a system space, this * index is built tuple-by-tuple, not in bulk, which * ensures validation of tuples when starting from * a snapshot of older version. */ init_system_space(space); trigger_run_xc(&on_alter_space, space); } uint32_t schema_find_id(uint32_t system_space_id, uint32_t index_id, const char *name, uint32_t len) { if (len > BOX_NAME_MAX) return BOX_ID_NIL; struct space *space = space_cache_find_xc(system_space_id); struct index *index = index_find_system_xc(space, index_id); uint32_t size = mp_sizeof_str(len); struct region *region = &fiber()->gc; uint32_t used = region_used(region); char *key = (char *) region_alloc_xc(region, size); auto guard = make_scoped_guard([=] { region_truncate(region, used); }); mp_encode_str(key, name, len); struct iterator *it = index_create_iterator_xc(index, ITER_EQ, key, 1); IteratorGuard iter_guard(it); struct tuple *tuple = iterator_next_xc(it); if (tuple) { /* id is always field #1 */ return tuple_field_u32_xc(tuple, 0); } return BOX_ID_NIL; } /** * Initialize a prototype for the two mandatory data * dictionary spaces and create a cache entry for them. * When restoring data from the snapshot these spaces * will get altered automatically to their actual format. */ void schema_init() { /* Initialize the space cache. */ spaces = mh_i32ptr_new(); funcs = mh_i32ptr_new(); funcs_by_name = mh_strnptr_new(); sequences = mh_i32ptr_new(); /* * Create surrogate space objects for the mandatory system * spaces (the primal eggs from which we get all the * chicken). Their definitions will be overwritten by the * data in the snapshot, and they will thus be * *re-created* during recovery. Note, the index type * must be TREE and space identifiers must be the smallest * one to ensure that these spaces are always recovered * (and re-created) first. */ /* _schema - key/value space with schema description */ struct key_def *key_def = key_def_new(1); /* part count */ if (key_def == NULL) diag_raise(); auto key_def_guard = make_scoped_guard([&] { key_def_delete(key_def); }); key_def_set_part(key_def, 0 /* part no */, 0 /* field no */, FIELD_TYPE_STRING, false, NULL); sc_space_new(BOX_SCHEMA_ID, "_schema", key_def, &on_replace_schema, NULL); /* _space - home for all spaces. */ key_def_set_part(key_def, 0 /* part no */, 0 /* field no */, FIELD_TYPE_UNSIGNED, false, NULL); /* _collation - collation description. */ sc_space_new(BOX_COLLATION_ID, "_collation", key_def, &on_replace_collation, NULL); sc_space_new(BOX_SPACE_ID, "_space", key_def, &alter_space_on_replace_space, &on_stmt_begin_space); /* _truncate - auxiliary space for triggering space truncation. */ sc_space_new(BOX_TRUNCATE_ID, "_truncate", key_def, &on_replace_truncate, &on_stmt_begin_truncate); /* _sequence - definition of all sequence objects. */ sc_space_new(BOX_SEQUENCE_ID, "_sequence", key_def, &on_replace_sequence, NULL); /* _sequence_data - current sequence value. */ sc_space_new(BOX_SEQUENCE_DATA_ID, "_sequence_data", key_def, &on_replace_sequence_data, NULL); /* _space_seq - association space <-> sequence. */ sc_space_new(BOX_SPACE_SEQUENCE_ID, "_space_sequence", key_def, &on_replace_space_sequence, NULL); /* _user - all existing users */ sc_space_new(BOX_USER_ID, "_user", key_def, &on_replace_user, NULL); /* _func - all executable objects on which one can have grants */ sc_space_new(BOX_FUNC_ID, "_func", key_def, &on_replace_func, NULL); /* * _priv - association user <-> object * The real index is defined in the snapshot. */ sc_space_new(BOX_PRIV_ID, "_priv", key_def, &on_replace_priv, NULL); /* * _cluster - association instance uuid <-> instance id * The real index is defined in the snapshot. */ sc_space_new(BOX_CLUSTER_ID, "_cluster", key_def, &on_replace_cluster, NULL); key_def_delete(key_def); key_def = key_def_new(2); /* part count */ if (key_def == NULL) diag_raise(); /* space no */ key_def_set_part(key_def, 0 /* part no */, 0 /* field no */, FIELD_TYPE_UNSIGNED, false, NULL); /* index no */ key_def_set_part(key_def, 1 /* part no */, 1 /* field no */, FIELD_TYPE_UNSIGNED, false, NULL); sc_space_new(BOX_INDEX_ID, "_index", key_def, &alter_space_on_replace_index, &on_stmt_begin_index); } void schema_free(void) { if (spaces == NULL) return; while (mh_size(spaces) > 0) { mh_int_t i = mh_first(spaces); struct space *space = (struct space *) mh_i32ptr_node(spaces, i)->val; space_cache_delete(space_id(space)); space_delete(space); } mh_i32ptr_delete(spaces); while (mh_size(funcs) > 0) { mh_int_t i = mh_first(funcs); struct func *func = ((struct func *) mh_i32ptr_node(funcs, i)->val); func_cache_delete(func->def->fid); } mh_i32ptr_delete(funcs); while (mh_size(sequences) > 0) { mh_int_t i = mh_first(sequences); struct sequence *seq = ((struct sequence *) mh_i32ptr_node(sequences, i)->val); sequence_cache_delete(seq->def->id); } mh_i32ptr_delete(sequences); } void func_cache_replace(struct func_def *def) { struct func *old = func_by_id(def->fid); if (old) { func_update(old, def); return; } if (mh_size(funcs) >= BOX_FUNCTION_MAX) tnt_raise(ClientError, ER_FUNCTION_MAX, BOX_FUNCTION_MAX); struct func *func = func_new(def); if (func == NULL) { error: panic_syserror("Out of memory for the data " "dictionary cache (stored function)."); } const struct mh_i32ptr_node_t node = { def->fid, func }; mh_int_t k1 = mh_i32ptr_put(funcs, &node, NULL, NULL); if (k1 == mh_end(funcs)) { func->def = NULL; func_delete(func); goto error; } size_t def_name_len = strlen(func->def->name); uint32_t name_hash = mh_strn_hash(func->def->name, def_name_len); const struct mh_strnptr_node_t strnode = { func->def->name, def_name_len, name_hash, func }; mh_int_t k2 = mh_strnptr_put(funcs_by_name, &strnode, NULL, NULL); if (k2 == mh_end(funcs_by_name)) { mh_i32ptr_del(funcs, k1, NULL); func->def = NULL; func_delete(func); goto error; } } void func_cache_delete(uint32_t fid) { mh_int_t k = mh_i32ptr_find(funcs, fid, NULL); if (k == mh_end(funcs)) return; struct func *func = (struct func *) mh_i32ptr_node(funcs, k)->val; mh_i32ptr_del(funcs, k, NULL); k = mh_strnptr_find_inp(funcs_by_name, func->def->name, strlen(func->def->name)); if (k != mh_end(funcs)) mh_strnptr_del(funcs_by_name, k, NULL); func_delete(func); } struct func * func_by_id(uint32_t fid) { mh_int_t func = mh_i32ptr_find(funcs, fid, NULL); if (func == mh_end(funcs)) return NULL; return (struct func *) mh_i32ptr_node(funcs, func)->val; } struct func * func_by_name(const char *name, uint32_t name_len) { mh_int_t func = mh_strnptr_find_inp(funcs_by_name, name, name_len); if (func == mh_end(funcs_by_name)) return NULL; return (struct func *) mh_strnptr_node(funcs_by_name, func)->val; } bool schema_find_grants(const char *type, uint32_t id) { struct space *priv = space_cache_find_xc(BOX_PRIV_ID); /** "object" index */ struct index *index = index_find_system_xc(priv, 2); /* * +10 = max(mp_sizeof_uint32) + * max(mp_sizeof_strl(uint32)). */ char key[GRANT_NAME_MAX + 10]; assert(strlen(type) <= GRANT_NAME_MAX); mp_encode_uint(mp_encode_str(key, type, strlen(type)), id); struct iterator *it = index_create_iterator_xc(index, ITER_EQ, key, 2); IteratorGuard iter_guard(it); return iterator_next_xc(it); } struct sequence * sequence_by_id(uint32_t id) { mh_int_t k = mh_i32ptr_find(sequences, id, NULL); if (k == mh_end(sequences)) return NULL; return (struct sequence *) mh_i32ptr_node(sequences, k)->val; } struct sequence * sequence_cache_find(uint32_t id) { struct sequence *seq = sequence_by_id(id); if (seq == NULL) tnt_raise(ClientError, ER_NO_SUCH_SEQUENCE, int2str(id)); return seq; } void sequence_cache_replace(struct sequence_def *def) { struct sequence *seq = sequence_by_id(def->id); if (seq == NULL) { /* Create a new sequence. */ seq = (struct sequence *) calloc(1, sizeof(*seq)); if (seq == NULL) goto error; struct mh_i32ptr_node_t node = { def->id, seq }; if (mh_i32ptr_put(sequences, &node, NULL, NULL) == mh_end(sequences)) goto error; } else { /* Update an existing sequence. */ free(seq->def); } seq->def = def; return; error: panic_syserror("Out of memory for the data " "dictionary cache (sequence)."); } void sequence_cache_delete(uint32_t id) { struct sequence *seq = sequence_by_id(id); if (seq != NULL) { /* Delete sequence data. */ sequence_reset(seq); mh_i32ptr_del(sequences, seq->def->id, NULL); free(seq->def); TRASH(seq); free(seq); } } const char * schema_find_name(enum schema_object_type type, uint32_t object_id) { switch (type) { case SC_UNIVERSE: return ""; case SC_SPACE: { struct space *space = space_by_id(object_id); if (space == NULL) break; return space->def->name; } case SC_FUNCTION: { struct func *func = func_by_id(object_id); if (func == NULL) break; return func->def->name; } case SC_SEQUENCE: { struct sequence *seq = sequence_by_id(object_id); if (seq == NULL) break; return seq->def->name; } case SC_ROLE: case SC_USER: { struct user *role = user_by_id(object_id); if (role == NULL) break; return role->def->name; } default: break; } assert(false); return "(nil)"; } tarantool_1.9.1.26.g63eb81e3c/src/box/key_def.h0000664000000000000000000003777313306565107017373 0ustar rootroot#ifndef TARANTOOL_BOX_KEY_DEF_H_INCLUDED #define TARANTOOL_BOX_KEY_DEF_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" #include "error.h" #include "diag.h" #include #include #include "field_def.h" #include "coll.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /* MsgPack type names */ extern const char *mp_type_strs[]; struct key_part_def { /** Tuple field index for this part. */ uint32_t fieldno; /** Type of the tuple field. */ enum field_type type; /** Collation ID for string comparison. */ uint32_t coll_id; /** True if a key part can store NULLs. */ bool is_nullable; }; /** * Set key_part_def.coll_id to COLL_NONE if * the field does not have a collation. */ #define COLL_NONE UINT32_MAX /** Descriptor of a single part in a multipart key. */ struct key_part { /** Tuple field index for this part */ uint32_t fieldno; /** Type of the tuple field */ enum field_type type; /** Collation definition for string comparison */ struct coll *coll; /** True if a part can store NULLs. */ bool is_nullable; }; struct key_def; struct tuple; /** @copydoc tuple_compare_with_key() */ typedef int (*tuple_compare_with_key_t)(const struct tuple *tuple_a, const char *key, uint32_t part_count, const struct key_def *key_def); /** @copydoc tuple_compare() */ typedef int (*tuple_compare_t)(const struct tuple *tuple_a, const struct tuple *tuple_b, const struct key_def *key_def); /** @copydoc tuple_extract_key() */ typedef char *(*tuple_extract_key_t)(const struct tuple *tuple, const struct key_def *key_def, uint32_t *key_size); /** @copydoc tuple_extract_key_raw() */ typedef char *(*tuple_extract_key_raw_t)(const char *data, const char *data_end, const struct key_def *key_def, uint32_t *key_size); /** @copydoc tuple_hash() */ typedef uint32_t (*tuple_hash_t)(const struct tuple *tuple, const struct key_def *key_def); /** @copydoc key_hash() */ typedef uint32_t (*key_hash_t)(const char *key, const struct key_def *key_def); /* Definition of a multipart key. */ struct key_def { /** @see tuple_compare() */ tuple_compare_t tuple_compare; /** @see tuple_compare_with_key() */ tuple_compare_with_key_t tuple_compare_with_key; /** @see tuple_extract_key() */ tuple_extract_key_t tuple_extract_key; /** @see tuple_extract_key_raw() */ tuple_extract_key_raw_t tuple_extract_key_raw; /** @see tuple_hash() */ tuple_hash_t tuple_hash; /** @see key_hash() */ key_hash_t key_hash; /** * Minimal part count which always is unique. For example, * if a secondary index is unique, then * unique_part_count == secondary index part count. But if * a secondary index is not unique, then * unique_part_count == part count of a merged key_def. */ uint32_t unique_part_count; /** True, if at least one part can store NULL. */ bool is_nullable; /** * True, if some key parts can be absent in a tuple. These * fields assumed to be MP_NIL. */ bool has_optional_parts; /** Key fields mask. @sa column_mask.h for details. */ uint64_t column_mask; /** The size of the 'parts' array. */ uint32_t part_count; /** Description of parts of a multipart index. */ struct key_part parts[]; }; /** * Duplicate key_def. * @param src Original key_def. * * @retval not NULL Duplicate of src. * @retval NULL Memory error. */ struct key_def * key_def_dup(const struct key_def *src); /** * Delete @a key_def. * @param def Key_def to delete. */ void key_def_delete(struct key_def *def); /** \cond public */ typedef struct key_def box_key_def_t; typedef struct tuple box_tuple_t; /** * Create key definition with key fields with passed typed on passed positions. * May be used for tuple format creation and/or tuple comparison. * * \param fields array with key field identifiers * \param types array with key field types (see enum field_type) * \param part_count the number of key fields * \returns a new key definition object */ box_key_def_t * box_key_def_new(uint32_t *fields, uint32_t *types, uint32_t part_count); /** * Delete key definition * * \param key_def key definition to delete */ void box_key_def_delete(box_key_def_t *key_def); /** * Compare tuples using the key definition. * @param tuple_a first tuple * @param tuple_b second tuple * @param key_def key definition * @retval 0 if key_fields(tuple_a) == key_fields(tuple_b) * @retval <0 if key_fields(tuple_a) < key_fields(tuple_b) * @retval >0 if key_fields(tuple_a) > key_fields(tuple_b) */ int box_tuple_compare(const box_tuple_t *tuple_a, const box_tuple_t *tuple_b, const box_key_def_t *key_def); /** * @brief Compare tuple with key using the key definition. * @param tuple tuple * @param key key with MessagePack array header * @param key_def key definition * * @retval 0 if key_fields(tuple) == parts(key) * @retval <0 if key_fields(tuple) < parts(key) * @retval >0 if key_fields(tuple) > parts(key) */ int box_tuple_compare_with_key(const box_tuple_t *tuple_a, const char *key_b, const box_key_def_t *key_def); /** \endcond public */ static inline size_t key_def_sizeof(uint32_t part_count) { return sizeof(struct key_def) + sizeof(struct key_part) * part_count; } /** * Allocate a new key_def with the given part count. */ struct key_def * key_def_new(uint32_t part_count); /** * Allocate a new key_def with the given part count * and initialize its parts. */ struct key_def * key_def_new_with_parts(struct key_part_def *parts, uint32_t part_count); /** * Dump part definitions of the given key def. */ void key_def_dump_parts(const struct key_def *def, struct key_part_def *parts); /** * Set a single key part in a key def. * @pre part_no < part_count */ void key_def_set_part(struct key_def *def, uint32_t part_no, uint32_t fieldno, enum field_type type, bool is_nullable, struct coll *coll); /** * Update 'has_optional_parts' of @a key_def with correspondence * to @a min_field_count. * @param def Key definition to update. * @param min_field_count Minimal field count. All parts out of * this value are optional. */ void key_def_update_optionality(struct key_def *def, uint32_t min_field_count); /** * An snprint-style function to print a key definition. */ int key_def_snprint_parts(char *buf, int size, const struct key_part_def *parts, uint32_t part_count); /** * Return size of key parts array when encoded in MsgPack. * See also key_def_encode_parts(). */ size_t key_def_sizeof_parts(const struct key_part_def *parts, uint32_t part_count); /** * Encode key parts array in MsgPack and return a pointer following * the end of encoded data. */ char * key_def_encode_parts(char *data, const struct key_part_def *parts, uint32_t part_count); /** * 1.6.6+ * Decode parts array from tuple field and write'em to index_def structure. * Throws a nice error about invalid types, but does not check ranges of * resulting values field_no and field_type * Parts expected to be a sequence of arrays like this: * [NUM, STR, ..][NUM, STR, ..].., * OR * {field=NUM, type=STR, ..}{field=NUM, type=STR, ..}.., */ int key_def_decode_parts(struct key_part_def *parts, uint32_t part_count, const char **data, const struct field_def *fields, uint32_t field_count); /** * 1.6.0-1.6.5 * TODO: Remove it in newer version, find all 1.6.5- * Decode parts array from tuple fieldw and write'em to index_def structure. * Does not check anything since tuple must be validated before * Parts expected to be a sequence of 2 * arrays values this: * NUM, STR, NUM, STR, .., */ int key_def_decode_parts_160(struct key_part_def *parts, uint32_t part_count, const char **data, const struct field_def *fields, uint32_t field_count); /** * Returns the part in index_def->parts for the specified fieldno. * If fieldno is not in index_def->parts returns NULL. */ const struct key_part * key_def_find(const struct key_def *key_def, uint32_t fieldno); /** * Check if key definition @a first contains all parts of * key definition @a second. * @retval true if @a first is a superset of @a second * @retval false otherwise */ bool key_def_contains(const struct key_def *first, const struct key_def *second); /** * Allocate a new key_def with a set union of key parts from * first and second key defs. Parts of the new key_def consist * of the first key_def's parts and those parts of the second * key_def that were not among the first parts. * @retval not NULL Ok. * @retval NULL Memory error. */ struct key_def * key_def_merge(const struct key_def *first, const struct key_def *second); /* * Check that parts of the key match with the key definition. * @param key_def Key definition. * @param key MessagePack'ed data for matching. * @param part_count Field count in the key. * @param allow_nullable True if nullable parts are allowed. * * @retval 0 The key is valid. * @retval -1 The key is invalid. */ int key_validate_parts(const struct key_def *key_def, const char *key, uint32_t part_count, bool allow_nullable); /** * Return true if @a index_def defines a sequential key without * holes starting from the first field. In other words, for all * key parts index_def->parts[part_id].fieldno == part_id. * @param index_def index_def * @retval true index_def is sequential * @retval false otherwise */ static inline bool key_def_is_sequential(const struct key_def *key_def) { for (uint32_t part_id = 0; part_id < key_def->part_count; part_id++) { if (key_def->parts[part_id].fieldno != part_id) return false; } return true; } /** * Return true if @a key_def defines has fields that requires * special collation comparison. * @param key_def key_def * @retval true if the key_def has collation fields * @retval false otherwise */ static inline bool key_def_has_collation(const struct key_def *key_def) { for (uint32_t part_id = 0; part_id < key_def->part_count; part_id++) { if (key_def->parts[part_id].coll != NULL) return true; } return false; } /** A helper table for key_mp_type_validate */ extern const uint32_t key_mp_type[]; /** * @brief Checks if \a field_type (MsgPack) is compatible \a type (KeyDef). * @param type KeyDef type * @param field_type MsgPack type * @param field_no - a field number (is used to store an error message) * * @retval 0 mp_type is valid. * @retval -1 mp_type is invalid. */ static inline int key_mp_type_validate(enum field_type key_type, enum mp_type mp_type, int err, uint32_t field_no, bool is_nullable) { assert(key_type < field_type_MAX); assert((size_t) mp_type < CHAR_BIT * sizeof(*key_mp_type)); uint32_t mask = key_mp_type[key_type] | (is_nullable * (1U << MP_NIL)); if (unlikely((mask & (1U << mp_type)) == 0)) { diag_set(ClientError, err, field_no, field_type_strs[key_type]); return -1; } return 0; } /** * Compare two key part arrays. * * One key part is considered to be greater than the other if: * - its fieldno is greater * - given the same fieldno, NUM < STRING * * A key part array is considered greater than the other if all * its key parts are greater, or, all common key parts are equal * but there are additional parts in the bigger array. */ int key_part_cmp(const struct key_part *parts1, uint32_t part_count1, const struct key_part *parts2, uint32_t part_count2); /** * Find out whether alteration of an index has changed it * substantially enough to warrant a rebuild or not. For example, * change of index id is not a substantial change, whereas change * of index type or incompatible change of key parts requires * a rebuild. */ bool key_part_check_compatibility(const struct key_part *old_parts, uint32_t old_part_count, const struct key_part *new_parts, uint32_t new_part_count); /** * Extract key from tuple by given key definition and return * buffer allocated on box_txn_alloc with this key. This function * has O(n) complexity, where n is the number of key parts. * @param tuple - tuple from which need to extract key * @param key_def - definition of key that need to extract * @param key_size - here will be size of extracted key * * @retval not NULL Success * @retval NULL Memory allocation error */ static inline char * tuple_extract_key(const struct tuple *tuple, const struct key_def *key_def, uint32_t *key_size) { return key_def->tuple_extract_key(tuple, key_def, key_size); } /** * Extract key from raw msgpuck by given key definition and return * buffer allocated on box_txn_alloc with this key. * This function has O(n*m) complexity, where n is the number of key parts * and m is the tuple size. * @param data - msgpuck data from which need to extract key * @param data_end - pointer at the end of data * @param key_def - definition of key that need to extract * @param key_size - here will be size of extracted key * * @retval not NULL Success * @retval NULL Memory allocation error */ static inline char * tuple_extract_key_raw(const char *data, const char *data_end, const struct key_def *key_def, uint32_t *key_size) { return key_def->tuple_extract_key_raw(data, data_end, key_def, key_size); } /** * Compare keys using the key definition. * @param key_a key parts with MessagePack array header * @param part_count_a the number of parts in the key_a * @param key_b key_parts with MessagePack array header * @param part_count_b the number of parts in the key_b * @param key_def key definition * * @retval 0 if key_a == key_b * @retval <0 if key_a < key_b * @retval >0 if key_a > key_b */ int key_compare(const char *key_a, const char *key_b, const struct key_def *key_def); /** * Compare tuples using the key definition. * @param tuple_a first tuple * @param tuple_b second tuple * @param key_def key definition * @retval 0 if key_fields(tuple_a) == key_fields(tuple_b) * @retval <0 if key_fields(tuple_a) < key_fields(tuple_b) * @retval >0 if key_fields(tuple_a) > key_fields(tuple_b) */ static inline int tuple_compare(const struct tuple *tuple_a, const struct tuple *tuple_b, const struct key_def *key_def) { return key_def->tuple_compare(tuple_a, tuple_b, key_def); } /** * @brief Compare tuple with key using the key definition. * @param tuple tuple * @param key key parts without MessagePack array header * @param part_count the number of parts in @a key * @param key_def key definition * * @retval 0 if key_fields(tuple) == parts(key) * @retval <0 if key_fields(tuple) < parts(key) * @retval >0 if key_fields(tuple) > parts(key) */ static inline int tuple_compare_with_key(const struct tuple *tuple, const char *key, uint32_t part_count, const struct key_def *key_def) { return key_def->tuple_compare_with_key(tuple, key, part_count, key_def); } #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BOX_KEY_DEF_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/evio.cc0000664000000000000000000002430213306565107016255 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "evio.h" #include "uri.h" #include "scoped_guard.h" #include #include #include #include #include #include #include static void evio_setsockopt_server(int fd, int family, int type); /** Note: this function does not throw. */ void evio_close(ev_loop *loop, struct ev_io *evio) { /* Stop I/O events. Safe to do even if not started. */ ev_io_stop(loop, evio); /* Close the socket. */ close(evio->fd); /* Make sure evio_has_fd() returns a proper value. */ evio->fd = -1; } /** * Create an endpoint for communication. * Set socket as non-block and apply protocol specific options. */ void evio_socket(struct ev_io *coio, int domain, int type, int protocol) { assert(coio->fd == -1); /* Don't leak fd if setsockopt fails. */ coio->fd = sio_socket(domain, type, protocol); evio_setsockopt_client(coio->fd, domain, type); } static void evio_setsockopt_keepalive(int fd) { int on = 1; /* * SO_KEEPALIVE to ensure connections don't hang * around for too long when a link goes away. */ sio_setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)); #ifdef __linux__ /* * On Linux, we are able to fine-tune keepalive * intervals. Set smaller defaults, since the system-wide * defaults are in days. */ int keepcnt = 5; sio_setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &keepcnt, sizeof(int)); int keepidle = 30; sio_setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &keepidle, sizeof(int)); int keepintvl = 60; sio_setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &keepintvl, sizeof(int)); #endif } /** Set common client socket options. */ void evio_setsockopt_client(int fd, int family, int type) { int on = 1; /* In case this throws, the socket is not leaked. */ sio_setfl(fd, O_NONBLOCK, on); if (type == SOCK_STREAM && family != AF_UNIX) { /* * SO_KEEPALIVE to ensure connections don't hang * around for too long when a link goes away. */ evio_setsockopt_keepalive(fd); /* * Lower latency is more important than higher * bandwidth, and we usually write entire * request/response in a single syscall. */ sio_setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); } } /** Set options for server sockets. */ static void evio_setsockopt_server(int fd, int family, int type) { int on = 1; /* In case this throws, the socket is not leaked. */ sio_setfl(fd, O_NONBLOCK, on); /* Allow reuse local adresses. */ sio_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); /* Send all buffered messages on socket before take * control out from close(2) or shutdown(2). */ struct linger linger = { 0, 0 }; sio_setsockopt(fd, SOL_SOCKET, SO_LINGER, &linger, sizeof(linger)); if (type == SOCK_STREAM && family != AF_UNIX) evio_setsockopt_keepalive(fd); } static inline const char * evio_service_name(struct evio_service *service) { return service->name; } /** * A callback invoked by libev when acceptor socket is ready. * Accept the socket, initialize it and pass to the on_accept * callback. */ static void evio_service_accept_cb(ev_loop * /* loop */, ev_io *watcher, int /* revents */) { struct evio_service *service = (struct evio_service *) watcher->data; while (1) { /* * Accept all pending connections from backlog during event * loop iteration. Significally speed up acceptor with enabled * io_collect_interval. */ int fd = -1; try { struct sockaddr_storage addr; socklen_t addrlen = sizeof(addr); fd = sio_accept(service->ev.fd, (struct sockaddr *)&addr, &addrlen); if (fd < 0) /* EAGAIN, EWOULDLOCK, EINTR */ return; /* set common client socket options */ evio_setsockopt_client(fd, service->addr.sa_family, SOCK_STREAM); /* * Invoke the callback and pass it the accepted * socket. */ service->on_accept(service, fd, (struct sockaddr *)&addr, addrlen); } catch (Exception *e) { if (fd >= 0) close(fd); e->log(); return; } } } /* * Check if the unix socket which we file to create exists and * no one is listening on it. Unlink the file if it's the case. */ static bool evio_service_reuse_addr(struct evio_service *service) { if ((service->addr.sa_family != AF_UNIX) || (errno != EADDRINUSE)) return false; int save_errno = errno; int cl_fd = sio_socket(service->addr.sa_family, SOCK_STREAM, 0); if (connect(cl_fd, &service->addr, service->addr_len) == 0) goto err; if (errno != ECONNREFUSED) goto err; if (unlink(((struct sockaddr_un *)(&service->addr))->sun_path)) goto err; close(cl_fd); return true; err: errno = save_errno; close(cl_fd); return false; } /** * Try to bind on the configured port. * * Throws an exception if error. */ static void evio_service_bind_addr(struct evio_service *service) { say_debug("%s: binding to %s...", evio_service_name(service), sio_strfaddr(&service->addr, service->addr_len)); /* Create a socket. */ int fd = sio_socket(service->addr.sa_family, SOCK_STREAM, IPPROTO_TCP); auto fd_guard = make_scoped_guard([=]{ close(fd); }); evio_setsockopt_server(fd, service->addr.sa_family, SOCK_STREAM); if (sio_bind(fd, &service->addr, service->addr_len)) { assert(errno == EADDRINUSE); if (!evio_service_reuse_addr(service) || sio_bind(fd, &service->addr, service->addr_len)) { tnt_raise(SocketError, fd, "bind"); } } say_info("%s: bound to %s", evio_service_name(service), sio_strfaddr(&service->addr, service->addr_len)); /* Register the socket in the event loop. */ ev_io_set(&service->ev, fd, EV_READ); fd_guard.is_active = false; } /** * Listen on bounded port. * * @retval 0 for success */ void evio_service_listen(struct evio_service *service) { say_debug("%s: listening on %s...", evio_service_name(service), sio_strfaddr(&service->addr, service->addr_len)); int fd = service->ev.fd; if (sio_listen(fd)) { /* raise for addr in use to */ tnt_raise(SocketError, fd, "listen"); } ev_io_start(service->loop, &service->ev); } void evio_service_init(ev_loop *loop, struct evio_service *service, const char *name, void (*on_accept)(struct evio_service *, int, struct sockaddr *, socklen_t), void *on_accept_param) { memset(service, 0, sizeof(struct evio_service)); snprintf(service->name, sizeof(service->name), "%s", name); service->loop = loop; service->on_accept = on_accept; service->on_accept_param = on_accept_param; /* * Initialize libev objects to be able to detect if they * are active or not in evio_service_stop(). */ ev_init(&service->ev, evio_service_accept_cb); ev_io_set(&service->ev, -1, 0); service->ev.data = service; } /** * Try to bind. */ void evio_service_bind(struct evio_service *service, const char *uri) { struct uri u; if (uri_parse(&u, uri) || u.service == NULL) tnt_raise(SocketError, -1, "invalid uri for bind: %s", uri); snprintf(service->serv, sizeof(service->serv), "%.*s", (int) u.service_len, u.service); if (u.host != NULL && strncmp(u.host, "*", u.host_len) != 0) { snprintf(service->host, sizeof(service->host), "%.*s", (int) u.host_len, u.host); } /* else { service->host[0] = '\0'; } */ assert(! ev_is_active(&service->ev)); if (strcmp(service->host, URI_HOST_UNIX) == 0) { /* UNIX domain socket */ struct sockaddr_un *un = (struct sockaddr_un *) &service->addr; service->addr_len = sizeof(*un); snprintf(un->sun_path, sizeof(un->sun_path), "%s", service->serv); un->sun_family = AF_UNIX; return evio_service_bind_addr(service); } /* IP socket */ struct addrinfo hints, *res; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_PASSIVE|AI_ADDRCONFIG; /* make no difference between empty string and NULL for host */ if (getaddrinfo(*service->host ? service->host : NULL, service->serv, &hints, &res) != 0 || res == NULL) tnt_raise(SocketError, -1, "can't resolve uri for bind"); auto addrinfo_guard = make_scoped_guard([=]{ freeaddrinfo(res); }); for (struct addrinfo *ai = res; ai != NULL; ai = ai->ai_next) { memcpy(&service->addr, ai->ai_addr, ai->ai_addrlen); service->addr_len = ai->ai_addrlen; try { return evio_service_bind_addr(service); } catch (SocketError *e) { say_error("%s: failed to bind on %s: %s", evio_service_name(service), sio_strfaddr(ai->ai_addr, ai->ai_addrlen), e->get_errmsg()); /* ignore */ } } tnt_raise(SocketError, -1, "%s: failed to bind", evio_service_name(service)); } /** It's safe to stop a service which is not started yet. */ void evio_service_stop(struct evio_service *service) { say_info("%s: stopped", evio_service_name(service)); if (ev_is_active(&service->ev)) { ev_io_stop(service->loop, &service->ev); } if (service->ev.fd >= 0) { close(service->ev.fd); ev_io_set(&service->ev, -1, 0); if (service->addr.sa_family == AF_UNIX) { unlink(((struct sockaddr_un *) &service->addr)->sun_path); } } } tarantool_1.9.1.26.g63eb81e3c/src/pickle.c0000664000000000000000000000257513306560010016413 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "pickle.h" tarantool_1.9.1.26.g63eb81e3c/src/lua/0000775000000000000000000000000013306565107015564 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/src/lua/table.lua0000664000000000000000000000401513306560010017342 0ustar rootrootlocal function table_deepcopy_internal(orig, cyclic) cyclic = cyclic or {} local copy = orig if type(orig) == 'table' then local mt, copy_function = getmetatable(orig), nil if mt then copy_function = mt.__copy end if copy_function == nil then copy = {} if cyclic[orig] ~= nil then copy = cyclic[orig] else cyclic[orig] = copy for orig_key, orig_value in pairs(orig) do local key = table_deepcopy_internal(orig_key, cyclic) copy[key] = table_deepcopy_internal(orig_value, cyclic) end if mt ~= nil then setmetatable(copy, mt) end end else copy = copy_function(orig) end end return copy end --- Deepcopy lua table (all levels) -- Supports __copy metamethod for copying custom tables with metatables -- @function deepcopy -- @table inp original table -- @shallow[opt] sep flag for shallow copy -- @returns table (copy) local function table_deepcopy(orig) return table_deepcopy_internal(orig, nil) end --- Copy any table (only top level) -- Supports __copy metamethod for copying custom tables with metatables -- @function copy -- @table inp original table -- @shallow[opt] sep flag for shallow copy -- @returns table (copy) local function table_shallowcopy(orig) local copy = orig if type(orig) == 'table' then local mt, copy_function = getmetatable(orig), nil if mt then copy_function = mt.__copy end if copy_function == nil then copy = {} for orig_key, orig_value in pairs(orig) do copy[orig_key] = orig_value end if mt ~= nil then setmetatable(copy, mt) end else copy = copy_function(orig) end end return copy end -- table library extension local table = require('table') table.copy = table_shallowcopy table.deepcopy = table_deepcopy tarantool_1.9.1.26.g63eb81e3c/src/lua/trigger.c0000664000000000000000000001404513306560010017363 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/trigger.h" #include "lua/utils.h" #include #include struct lbox_trigger { struct trigger base; /** A reference to Lua trigger function. */ int ref; /* * A pointer to a C function which pushes the * event data to Lua stack as arguments of the * Lua trigger. */ lbox_push_event_f push_event; /** * A pointer to a C function which is called * upon successful execution of the trigger * callback. */ lbox_pop_event_f pop_event; }; static void lbox_trigger_destroy(struct trigger *ptr) { if (tarantool_L) { struct lbox_trigger *trigger = (struct lbox_trigger *) ptr; luaL_unref(tarantool_L, LUA_REGISTRYINDEX, trigger->ref); } free(ptr); } static void lbox_trigger_run(struct trigger *ptr, void *event) { struct lbox_trigger *trigger = (struct lbox_trigger *) ptr; /* * Create a new coro and reference it. Remove it * from tarantool_L stack, which is a) scarce * b) can be used by other triggers while this * trigger yields, so when it's time to clean * up the coro, we wouldn't know which stack position * it is on. * * XXX: lua_newthread() may throw if out of memory, * this needs to be wrapped with lua_pcall() as well. * Don't, since it's a stupid overhead on every trigger * invocation, and in future we plan to hack into Lua * C API to fix this. */ struct lua_State *L = lua_newthread(tarantool_L); int coro_ref = luaL_ref(tarantool_L, LUA_REGISTRYINDEX); lua_rawgeti(L, LUA_REGISTRYINDEX, trigger->ref); int top = trigger->push_event(L, event); if (luaT_call(L, top, LUA_MULTRET)) { luaL_unref(tarantool_L, LUA_REGISTRYINDEX, coro_ref); diag_raise(); } if (trigger->pop_event != NULL && trigger->pop_event(L, event) != 0) { luaL_unref(tarantool_L, LUA_REGISTRYINDEX, coro_ref); diag_raise(); } luaL_unref(tarantool_L, LUA_REGISTRYINDEX, coro_ref); } static struct lbox_trigger * lbox_trigger_find(struct lua_State *L, int index, struct rlist *list) { struct lbox_trigger *trigger; /** Find the old trigger, if any. */ rlist_foreach_entry(trigger, list, base.link) { if (trigger->base.run == lbox_trigger_run) { lua_rawgeti(L, LUA_REGISTRYINDEX, trigger->ref); bool found = lua_equal(L, index, lua_gettop(L)); lua_pop(L, 1); if (found) return trigger; } } return NULL; } static int lbox_list_all_triggers(struct lua_State *L, struct rlist *list) { struct lbox_trigger *trigger; int count = 1; lua_newtable(L); rlist_foreach_entry_reverse(trigger, list, base.link) { if (trigger->base.run == lbox_trigger_run) { lua_rawgeti(L, LUA_REGISTRYINDEX, trigger->ref); lua_rawseti(L, -2, count); count++; } } return 1; } static void lbox_trigger_check_input(struct lua_State *L, int top) { assert(lua_checkstack(L, top)); /* Push optional arguments. */ while (lua_gettop(L) < top) lua_pushnil(L); /* * (nil, function) is OK, deletes the trigger * (function, nil), is OK, adds the trigger * (function, function) is OK, replaces the trigger * no arguments is OK, lists all trigger * anything else is error. */ if ((lua_isnil(L, top) && lua_isnil(L, top - 1)) || (lua_isfunction(L, top) && lua_isnil(L, top - 1)) || (lua_isnil(L, top) && lua_isfunction(L, top - 1)) || (lua_isfunction(L, top) && lua_isfunction(L, top - 1))) return; luaL_error(L, "trigger reset: incorrect arguments"); } int lbox_trigger_reset(struct lua_State *L, int top, struct rlist *list, lbox_push_event_f push_event, lbox_pop_event_f pop_event) { /** * If the stack is empty, pushes nils for optional * arguments */ lbox_trigger_check_input(L, top); /* If no args - return triggers table */ if (lua_isnil(L, top) && lua_isnil(L, top - 1)) return lbox_list_all_triggers(L, list); struct lbox_trigger *trg = lbox_trigger_find(L, top, list); if (trg) { luaL_unref(L, LUA_REGISTRYINDEX, trg->ref); } else if (lua_isfunction(L, top)) { return luaL_error(L, "trigger reset: Trigger is not found"); } /* * During update of a trigger, we must preserve its * relative position in the list. */ if (lua_isfunction(L, top - 1)) { if (trg == NULL) { trg = (struct lbox_trigger *) malloc(sizeof(*trg)); if (trg == NULL) luaL_error(L, "failed to allocate trigger"); trg->base.run = lbox_trigger_run; trg->base.data = NULL; trg->base.destroy = lbox_trigger_destroy; trg->ref = LUA_NOREF; trg->push_event = push_event; trg->pop_event = pop_event; trigger_add(list, &trg->base); } /* * Make the new trigger occupy the top * slot of the Lua stack. */ lua_pop(L, 1); /* Reference. */ trg->ref = luaL_ref(L, LUA_REGISTRYINDEX); lua_rawgeti(L, LUA_REGISTRYINDEX, trg->ref); return 1; } else if (trg) { trigger_clear(&trg->base); free(trg); } return 0; } tarantool_1.9.1.26.g63eb81e3c/src/lua/init.c0000664000000000000000000004076313306565107016705 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/init.h" #include "lua/utils.h" #include "main.h" #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__APPLE__) #include #endif #include #include #include #include #include #include #include "version.h" #include "backtrace.h" #include "coio.h" #include "lua/fiber.h" #include "lua/fiber_cond.h" #include "lua/fiber_channel.h" #include "lua/errno.h" #include "lua/socket.h" #include "lua/utils.h" #include "third_party/lua-cjson/lua_cjson.h" #include "third_party/lua-yaml/lyaml.h" #include "lua/msgpack.h" #include "lua/pickle.h" #include "lua/fio.h" #include "lua/httpc.h" #include "digest.h" #include #include #include #include /** * The single Lua state of the transaction processor (tx) thread. */ struct lua_State *tarantool_L; static struct ibuf tarantool_lua_ibuf_body; struct ibuf *tarantool_lua_ibuf = &tarantool_lua_ibuf_body; /** * The fiber running the startup Lua script */ struct fiber *script_fiber; bool start_loop = true; /* contents of src/lua/ files */ extern char strict_lua[], uuid_lua[], msgpackffi_lua[], fun_lua[], crypto_lua[], digest_lua[], init_lua[], buffer_lua[], errno_lua[], fiber_lua[], httpc_lua[], log_lua[], uri_lua[], socket_lua[], help_lua[], help_en_US_lua[], tap_lua[], fio_lua[], argparse_lua[], iconv_lua[], /* jit.* library */ vmdef_lua[], bc_lua[], bcsave_lua[], dis_x86_lua[], dis_x64_lua[], dump_lua[], csv_lua[], v_lua[], clock_lua[], title_lua[], env_lua[], pwd_lua[], table_lua[], trigger_lua[], string_lua[], p_lua[], /* LuaJIT 2.1 profiler */ zone_lua[] /* LuaJIT 2.1 profiler */; static const char *lua_modules[] = { /* Make it first to affect load of all other modules */ "strict", strict_lua, "fun", fun_lua, "tarantool", init_lua, "errno", errno_lua, "fiber", fiber_lua, "env", env_lua, "string", string_lua, "table", table_lua, "buffer", buffer_lua, "msgpackffi", msgpackffi_lua, "crypto", crypto_lua, "digest", digest_lua, "uuid", uuid_lua, "log", log_lua, "uri", uri_lua, "fio", fio_lua, "csv", csv_lua, "clock", clock_lua, "socket", socket_lua, "title", title_lua, "tap", tap_lua, "help.en_US", help_en_US_lua, "help", help_lua, "internal.argparse", argparse_lua, "internal.trigger", trigger_lua, "pwd", pwd_lua, "http.client", httpc_lua, "iconv", iconv_lua, /* jit.* library */ "jit.vmdef", vmdef_lua, "jit.bc", bc_lua, "jit.bcsave", bcsave_lua, "jit.dis_x86", dis_x86_lua, "jit.dis_x64", dis_x64_lua, "jit.dump", dump_lua, "jit.v", v_lua, /* Profiler */ "jit.p", p_lua, "jit.zone", zone_lua, NULL }; /* * {{{ box Lua library: common functions */ /** * Convert lua number or string to lua cdata 64bit number. */ static int lbox_tonumber64(struct lua_State *L) { luaL_checkany(L, 1); int base = luaL_optint(L, 2, -1); luaL_argcheck(L, (2 <= base && base <= 36) || base == -1, 2, "base out of range"); switch (lua_type(L, 1)) { case LUA_TNUMBER: base = (base == -1 ? 10 : base); if (base != 10) return luaL_argerror(L, 1, "string expected"); lua_settop(L, 1); /* return original value as is */ return 1; case LUA_TSTRING: { size_t argl = 0; const char *arg = luaL_checklstring(L, 1, &argl); /* Trim whitespaces at begin/end */ while (argl > 0 && isspace(arg[argl - 1])) { argl--; } while (isspace(*arg)) { arg++; argl--; } /* * Check if we're parsing custom format: * 1) '0x' or '0X' trim in case of base == 16 or base == -1 * 2) '0b' or '0B' trim in case of base == 2 or base == -1 * 3) '-' for negative numbers */ char negative = 0; if (arg[0] == '-') { arg++; argl--; negative = 1; } if (argl > 2 && arg[0] == '0') { if ((arg[1] == 'x' || arg[1] == 'X') && (base == 16 || base == -1)) { base = 16; arg += 2; argl -= 2; } else if ((arg[1] == 'b' || arg[1] == 'B') && (base == 2 || base == -1)) { base = 2; arg += 2; argl -= 2; } } base = (base == -1 ? 10 : base); errno = 0; char *arge; unsigned long long result = strtoull(arg, &arge, base); if (errno == 0 && arge == arg + argl) { if (argl == 0) { lua_pushnil(L); } else if (negative) { luaL_pushint64(L, -1 * (long long )result); } else { luaL_pushuint64(L, result); } return 1; } break; } /* LUA_TSTRING */ case LUA_TCDATA: { base = (base == -1 ? 10 : base); if (base != 10) return luaL_argerror(L, 1, "string expected"); uint32_t ctypeid = 0; luaL_checkcdata(L, 1, &ctypeid); if (ctypeid >= CTID_INT8 && ctypeid <= CTID_DOUBLE) { lua_pushvalue(L, 1); return 1; } break; } /* LUA_TCDATA */ } lua_pushnil(L); return 1; } /* }}} */ /** * Original LuaJIT/Lua logic: * * 1) If environment variable 'envname' is empty, it uses only * 2) Otherwise: * - If it contains ';;', then ';;' is replaced with ';'';' * - Otherwise is uses only what's inside this value. **/ static void tarantool_lua_pushpath_env(struct lua_State *L, const char *envname) { const char *path = getenv(envname); if (path != NULL) { const char *def = lua_tostring(L, -1); path = luaL_gsub(L, path, ";;", ";\1;"); luaL_gsub(L, path, "\1", def); lua_remove(L, -2); lua_remove(L, -2); } } /** * Prepend the variable list of arguments to the Lua * package search path */ static void tarantool_lua_setpaths(struct lua_State *L) { const char *home = getenv("HOME"); char cwd[PATH_MAX] = {'\0'}; getcwd(cwd, sizeof(cwd)); lua_getglobal(L, "package"); int top = lua_gettop(L); if (home != NULL) { lua_pushstring(L, home); lua_pushliteral(L, "/.luarocks/share/lua/5.1/?.lua;"); lua_pushstring(L, home); lua_pushliteral(L, "/.luarocks/share/lua/5.1/?/init.lua;"); lua_pushstring(L, home); lua_pushliteral(L, "/.luarocks/share/lua/?.lua;"); lua_pushstring(L, home); lua_pushliteral(L, "/.luarocks/share/lua/?/init.lua;"); } lua_pushliteral(L, MODULE_LUAPATH ";"); /* overwrite standard paths */ lua_concat(L, lua_gettop(L) - top); tarantool_lua_pushpath_env(L, "LUA_PATH"); lua_setfield(L, top, "path"); if (home != NULL) { lua_pushstring(L, home); lua_pushliteral(L, "/.luarocks/lib/lua/5.1/?" MODULE_LIBSUFFIX ";"); lua_pushstring(L, home); lua_pushliteral(L, "/.luarocks/lib/lua/?" MODULE_LIBSUFFIX ";"); } lua_pushliteral(L, MODULE_LIBPATH ";"); /* overwrite standard paths */ lua_concat(L, lua_gettop(L) - top); tarantool_lua_pushpath_env(L, "LUA_CPATH"); lua_setfield(L, top, "cpath"); assert(lua_gettop(L) == top); lua_pop(L, 1); /* package */ } static int tarantool_panic_handler(lua_State *L) { const char *problem = lua_tostring(L, -1); #ifdef ENABLE_BACKTRACE print_backtrace(); #endif say_crit("%s", problem); int level = 1; lua_Debug ar; while (lua_getstack(L, level++, &ar) == 1) { if (lua_getinfo(L, "nSl", &ar) == 0) break; say_crit("#%d %s (%s), %s:%d", level, ar.name, ar.namewhat, ar.short_src, ar.currentline); } return 1; } static int luaopen_tarantool(lua_State *L) { /* Set _G._TARANTOOL (like _VERSION) */ lua_pushstring(L, tarantool_version()); lua_setfield(L, LUA_GLOBALSINDEX, "_TARANTOOL"); static const struct luaL_Reg initlib[] = { {NULL, NULL} }; luaL_register_module(L, "tarantool", initlib); /* version */ lua_pushstring(L, tarantool_version()); lua_setfield(L, -2, "version"); /* build */ lua_pushstring(L, "build"); lua_newtable(L); /* build.target */ lua_pushstring(L, "target"); lua_pushstring(L, BUILD_INFO); lua_settable(L, -3); /* build.options */ lua_pushstring(L, "options"); lua_pushstring(L, BUILD_OPTIONS); lua_settable(L, -3); /* build.compiler */ lua_pushstring(L, "compiler"); lua_pushstring(L, COMPILER_INFO); lua_settable(L, -3); /* build.mod_format */ lua_pushstring(L, "mod_format"); lua_pushstring(L, TARANTOOL_LIBEXT); lua_settable(L, -3); /* build.flags */ lua_pushstring(L, "flags"); lua_pushstring(L, TARANTOOL_C_FLAGS); lua_settable(L, -3); lua_settable(L, -3); /* box.info.build */ return 1; } void tarantool_lua_init(const char *tarantool_bin, int argc, char **argv) { lua_State *L = luaL_newstate(); if (L == NULL) { panic("failed to initialize Lua"); } ibuf_create(tarantool_lua_ibuf, tarantool_lua_slab_cache(), 16000); luaL_openlibs(L); tarantool_lua_setpaths(L); /* Initialize ffi to enable luaL_pushcdata/luaL_checkcdata functions */ luaL_loadstring(L, "return require('ffi')"); lua_call(L, 0, 0); lua_register(L, "tonumber64", lbox_tonumber64); tarantool_lua_utils_init(L); tarantool_lua_fiber_init(L); tarantool_lua_fiber_cond_init(L); tarantool_lua_fiber_channel_init(L); tarantool_lua_errno_init(L); tarantool_lua_fio_init(L); tarantool_lua_socket_init(L); tarantool_lua_pickle_init(L); tarantool_lua_digest_init(L); luaopen_http_client_driver(L); lua_pop(L, 1); luaopen_msgpack(L); lua_pop(L, 1); luaopen_yaml(L); lua_pop(L, 1); luaopen_json(L); lua_pop(L, 1); #if defined(HAVE_GNU_READLINE) /* * Disable libreadline signals handlers. All signals are handled in * main thread by libev watchers. */ rl_catch_signals = 0; rl_catch_sigwinch = 0; #endif lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); for (const char **s = lua_modules; *s; s += 2) { const char *modname = *s; const char *modsrc = *(s + 1); const char *modfile = lua_pushfstring(L, "@builtin/%s.lua", modname); if (luaL_loadbuffer(L, modsrc, strlen(modsrc), modfile)) panic("Error loading Lua module %s...: %s", modname, lua_tostring(L, -1)); lua_pushstring(L, modname); lua_call(L, 1, 1); if (!lua_isnil(L, -1)) { lua_setfield(L, -3, modname); /* package.loaded.modname = t */ } else { lua_pop(L, 1); /* nil */ } lua_pop(L, 1); /* chunkname */ } lua_pop(L, 1); /* _LOADED */ luaopen_tarantool(L); lua_pop(L, 1); lua_newtable(L); lua_pushinteger(L, -1); lua_pushstring(L, tarantool_bin); lua_settable(L, -3); for (int i = 0; i < argc; i++) { lua_pushinteger(L, i); lua_pushstring(L, argv[i]); lua_settable(L, -3); } lua_setfield(L, LUA_GLOBALSINDEX, "arg"); #ifdef NDEBUG /* Unload strict after boot in release mode */ if (luaL_dostring(L, "require('strict').off()") != 0) panic("Failed to unload 'strict' Lua module"); #endif /* NDEBUG */ lua_atpanic(L, tarantool_panic_handler); /* clear possible left-overs of init */ lua_settop(L, 0); tarantool_L = L; } char *history = NULL; struct slab_cache * tarantool_lua_slab_cache() { return &cord()->slabc; } /** * Push argument and call a function on the top of Lua stack */ static void lua_main(lua_State *L, int argc, char **argv) { assert(lua_isfunction(L, -1)); lua_checkstack(L, argc - 1); for (int i = 1; i < argc; i++) lua_pushstring(L, argv[i]); if (luaT_call(L, lua_gettop(L) - 1, 0) != 0) { struct error *e = diag_last_error(&fiber()->diag); panic("%s", e->errmsg); } /* clear the stack from return values. */ lua_settop(L, 0); } /** * Execute start-up script. */ static int run_script_f(va_list ap) { struct lua_State *L = va_arg(ap, struct lua_State *); const char *path = va_arg(ap, const char *); bool interactive = va_arg(ap, int); int optc = va_arg(ap, int); char **optv = va_arg(ap, char **); int argc = va_arg(ap, int); char **argv = va_arg(ap, char **); struct diag *diag = &fiber()->diag; /* * Load libraries and execute chunks passed by -l and -e * command line options */ for (int i = 0; i < optc; i += 2) { assert(optv[i][0] == '-' && optv[i][2] == '\0'); switch (optv[i][1]) { case 'l': /* * Load library */ lua_getglobal(L, "require"); lua_pushstring(L, optv[i + 1]); if (luaT_call(L, 1, 1) != 0) { struct error *e = diag_last_error(diag); panic("%s", e->errmsg); } /* Non-standard: set name = require('name') */ lua_setglobal(L, optv[i + 1]); lua_settop(L, 0); break; case 'e': /* * Execute chunk */ if (luaL_loadbuffer(L, optv[i + 1], strlen(optv[i + 1]), "=(command line)") != 0) { panic("%s", lua_tostring(L, -1)); } if (luaT_call(L, 0, 0) != 0) { struct error *e = diag_last_error(diag); panic("%s", e->errmsg); } lua_settop(L, 0); break; default: unreachable(); /* checked by getopt() in main() */ } } /* * Return control to tarantool_lua_run_script. * tarantool_lua_run_script then will start an auxiliary event * loop and re-schedule this fiber. */ fiber_sleep(0.0); if (path && strcmp(path, "-") != 0 && access(path, F_OK) == 0) { /* Execute script. */ if (luaL_loadfile(L, path) != 0) panic("%s", lua_tostring(L, -1)); lua_main(L, argc, argv); } else if (!isatty(STDIN_FILENO) || (path && strcmp(path, "-") == 0)) { /* Execute stdin */ if (luaL_loadfile(L, NULL) != 0) panic("%s", lua_tostring(L, -1)); lua_main(L, argc, argv); } else { interactive = true; } /* * Start interactive mode when it was explicitly requested * by "-i" option or stdin is TTY or there are no script. */ if (interactive) { say_crit("%s %s\ntype 'help' for interactive help", tarantool_package(), tarantool_version()); /* get console.start from package.loaded */ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); lua_getfield(L, -1, "console"); lua_getfield(L, -1, "start"); lua_remove(L, -2); /* remove package.loaded.console */ lua_remove(L, -2); /* remove package.loaded */ start_loop = false; lua_main(L, argc, argv); } /* * Lua script finished. Stop the auxiliary event loop and * return control back to tarantool_lua_run_script. */ ev_break(loop(), EVBREAK_ALL); return 0; } void tarantool_lua_run_script(char *path, bool interactive, int optc, char **optv, int argc, char **argv) { const char *title = path ? basename(path) : "interactive"; /* * init script can call box.fiber.yield (including implicitly via * box.insert, box.update, etc...), but box.fiber.yield() today, * when called from 'sched' fiber crashes the server. * To work this problem around we must run init script in * a separate fiber. */ script_fiber = fiber_new(title, run_script_f); if (script_fiber == NULL) panic("%s", diag_last_error(diag_get())->errmsg); fiber_start(script_fiber, tarantool_L, path, interactive, optc, optv, argc, argv); /* * Run an auxiliary event loop to re-schedule run_script fiber. * When this fiber finishes, it will call ev_break to stop the loop. */ ev_run(loop(), 0); /* The fiber running the startup script has ended. */ script_fiber = NULL; } void tarantool_lua_free() { /* * Some part of the start script panicked, and called * exit(). The call stack in this case leads us back to * luaL_call() in run_script(). Trying to free a Lua state * from within luaL_call() is not the smartest idea (@sa * gh-612). */ if (script_fiber) return; /* * Got to be done prior to anything else, since GC * handlers can refer to other subsystems (e.g. fibers). */ if (tarantool_L) { /* collects garbage, invoking userdata gc */ lua_close(tarantool_L); } tarantool_L = NULL; #if 0 /* Temporarily moved to tarantool_free(), tarantool_lua_free() not * being called due to cleanup order issues */ if (isatty(STDIN_FILENO)) { /* * Restore terminal state. Doesn't hurt if exiting not * due to a signal. */ rl_cleanup_after_signal(); } #endif } tarantool_1.9.1.26.g63eb81e3c/src/lua/fiber_cond.h0000664000000000000000000000330213306560010020011 0ustar rootroot#ifndef TARANTOOL_LUA_FIBER_COND_H_INCLUDED #define TARANTOOL_LUA_FIBER_COND_H_INCLUDED 1 /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void tarantool_lua_fiber_cond_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LUA_FIBER_COND_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lua/fiber_channel.c0000664000000000000000000001745413306560010020506 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/fiber_channel.h" #include "lua/fiber.h" #include #include #include #include /* Help CC understand control flow better, prevent warnings about * uninitialized variables. */ NORETURN int luaL_error(lua_State *L, const char *fmt, ...); #include "lua/utils.h" #include #include static const char channel_typename[] = "fiber.channel"; static int luaT_fiber_channel(struct lua_State *L) { lua_Integer size = 0; if (lua_isnoneornil(L, 1)) { size = 0; } else if (lua_isnumber(L, 1)) { size = lua_tointeger(L, -1); if (size < 0) luaL_error(L, "fiber.channel(size): negative size"); } else { luaL_error(L, "fiber.channel(size): bad arguments"); } struct fiber_channel *ch = (struct fiber_channel *) lua_newuserdata(L, fiber_channel_memsize(size)); if (ch == NULL) luaL_error(L, "fiber.channel: not enough memory"); fiber_channel_create(ch, size); luaL_getmetatable(L, channel_typename); lua_setmetatable(L, -2); return 1; } static inline struct fiber_channel * luaT_checkfiberchannel(struct lua_State *L, int index, const char *source) { assert(index > 0); if (index > lua_gettop(L)) luaL_error(L, "usage: %s", source); /* Note: checkudata errs on mismatch, no point in checking res */ return (struct fiber_channel *) luaL_checkudata(L, index, channel_typename); } static int luaT_fiber_channel_gc(struct lua_State *L) { struct fiber_channel *ch = (struct fiber_channel *) luaL_checkudata(L, -1, channel_typename); if (ch) fiber_channel_destroy(ch); return 0; } static int luaT_fiber_channel_is_full(struct lua_State *L) { struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, "channel:is_full()"); lua_pushboolean(L, fiber_channel_is_full(ch)); return 1; } static int luaT_fiber_channel_is_empty(struct lua_State *L) { struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, "channel:is_empty()"); lua_pushboolean(L, fiber_channel_is_empty(ch)); return 1; } static void lua_ipc_value_destroy(struct ipc_msg *base) { struct ipc_value *value = (struct ipc_value *) base; luaL_unref(tarantool_L, LUA_REGISTRYINDEX, value->i); ipc_value_delete(base); } static int luaT_fiber_channel_put(struct lua_State *L) { static const char usage[] = "channel:put(var [, timeout])"; int rc = -1; struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, usage); ev_tstamp timeout; /* val */ if (lua_gettop(L) < 2) luaL_error(L, "usage: %s", usage); /* timeout (optional) */ if (lua_isnoneornil(L, 3)) { timeout = TIMEOUT_INFINITY; } else if (lua_isnumber(L, 3)) { timeout = lua_tonumber(L, 3); if (timeout < 0) luaL_error(L, "usage: %s", usage); } else { luaL_error(L, "usage: %s", usage); } struct ipc_value *value = ipc_value_new(); if (value == NULL) goto end; value->base.destroy = lua_ipc_value_destroy; lua_pushvalue(L, 2); value->i = luaL_ref(L, LUA_REGISTRYINDEX); rc = fiber_channel_put_msg_timeout(ch, &value->base, timeout); if (rc) { value->base.destroy(&value->base); #if 0 /* Treat everything except timeout as error. */ if (!type_cast(TimedOut, diag_last_error(&fiber()->diag))) diag_raise(); #else luaL_testcancel(L); #endif } end: lua_pushboolean(L, rc == 0); return 1; } static int luaT_fiber_channel_get(struct lua_State *L) { static const char usage[] = "channel:get([timeout])"; struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, usage); ev_tstamp timeout; /* timeout (optional) */ if (lua_isnoneornil(L, 2)) { timeout = TIMEOUT_INFINITY; } else if (lua_isnumber(L, 2)) { timeout = lua_tonumber(L, 2); if (timeout < 0) luaL_error(L, "usage: %s", usage); } else { luaL_error(L, "usage: %s", usage); } struct ipc_value *value; if (fiber_channel_get_msg_timeout(ch, (struct ipc_msg **) &value, timeout)) { #if 0 /* Treat everything except timeout as error. */ if (!type_cast(TimedOut, diag_last_error(&fiber()->diag))) diag_raise(); #else luaL_testcancel(L); #endif lua_pushnil(L); return 1; } lua_rawgeti(L, LUA_REGISTRYINDEX, value->i); value->base.destroy(&value->base); return 1; } static int luaT_fiber_channel_has_readers(struct lua_State *L) { struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, "channel:has_readers()"); lua_pushboolean(L, fiber_channel_has_readers(ch)); return 1; } static int luaT_fiber_channel_has_writers(struct lua_State *L) { struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, "channel:has_writers()"); lua_pushboolean(L, fiber_channel_has_writers(ch)); return 1; } static int luaT_fiber_channel_size(struct lua_State *L) { struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, "channel:size()"); lua_pushinteger(L, fiber_channel_size(ch)); return 1; } static int luaT_fiber_channel_count(struct lua_State *L) { struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, "channel:count()"); lua_pushinteger(L, fiber_channel_count(ch)); return 1; } static int luaT_fiber_channel_close(struct lua_State *L) { struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, "channel:close()"); /* Shutdown the channel for writing and wakeup waiters */ fiber_channel_close(ch); return 0; } static int luaT_fiber_channel_is_closed(struct lua_State *L) { struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, "channel:is_closed()"); lua_pushboolean(L, fiber_channel_is_closed(ch)); return 1; } static int luaT_fiber_channel_to_string(struct lua_State *L) { struct fiber_channel *ch = luaT_checkfiberchannel(L, 1, ""); if (fiber_channel_is_closed(ch)) { lua_pushstring(L, "channel: closed"); } else { lua_pushfstring(L, "channel: %d", (int)fiber_channel_count(ch)); } return 1; } void tarantool_lua_fiber_channel_init(struct lua_State *L) { static const struct luaL_Reg channel_meta[] = { {"__gc", luaT_fiber_channel_gc}, {"__tostring", luaT_fiber_channel_to_string}, {"is_full", luaT_fiber_channel_is_full}, {"is_empty", luaT_fiber_channel_is_empty}, {"put", luaT_fiber_channel_put}, {"get", luaT_fiber_channel_get}, {"has_readers", luaT_fiber_channel_has_readers}, {"has_writers", luaT_fiber_channel_has_writers}, {"count", luaT_fiber_channel_count}, {"size", luaT_fiber_channel_size}, {"close", luaT_fiber_channel_close}, {"is_closed", luaT_fiber_channel_is_closed}, {NULL, NULL} }; luaL_register_type(L, channel_typename, channel_meta); static const struct luaL_Reg ipc_lib[] = { {"channel", luaT_fiber_channel}, {NULL, NULL} }; luaL_register_module(L, "fiber", ipc_lib); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/lua/trigger.h0000664000000000000000000000701313306560010017365 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_TRIGGER_H #define INCLUDES_TARANTOOL_LUA_TRIGGER_H /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; /** * The job of lbox_push_event_f is to push trigger arguments * to Lua stack. */ typedef int (*lbox_push_event_f)(struct lua_State *L, void *event); /** * If not NULL, lbox_pop_event_f will be called after successful * execution of the trigger callback. It can be used to parse the * return value of the trigger callback and update the 'event' * accordingly. If this function returns a non-zero value, an * error will be raised for the caller. */ typedef int (*lbox_pop_event_f)(struct lua_State *L, void *event); /** * Create a Lua trigger, replace an existing one, * or delete a trigger. * * The function accepts a Lua stack with at least * one argument. * * The argument at the top of the stack defines the old value * of the trigger, which serves as a search key if the trigger * needs to be updated. If it is not present or is nil, a new * trigger is created. * The argument just below the top must reference a Lua function * or closure for which the trigger needs to be set. * If argument below the top is nil, but argument at the top is an * existing trigger, it's erased. * * An existing trigger is searched on the 'list' by checking * trigger->data of all triggers on the list which have the same * trigger->run function as passed in in 'run' argument. * * When a new trigger is set, the function passed in the first * value on Lua stack is referenced, and the reference is saved * in trigger->data (if an old trigger is found it's current Lua * function is first dereferenced, the reference is destroyed). * * @param top defines the top of the stack. If the actual * lua_gettop(L) is less than 'top', the stack is filled * with nils (this allows the second argument to be * optional). */ int lbox_trigger_reset(struct lua_State *L, int top, struct rlist *list, lbox_push_event_f push_f, lbox_pop_event_f pop_f); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_TRIGGER_H */ tarantool_1.9.1.26.g63eb81e3c/src/lua/fiber_channel.h0000664000000000000000000000334113306560010020501 0ustar rootroot#ifndef TARANTOOL_LUA_FIBER_CHANNEL_H_INCLUDED #define TARANTOOL_LUA_FIBER_CHANNEL_H_INCLUDED /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void tarantool_lua_fiber_channel_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LUA_FIBER_CHANNEL_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lua/fiber_cond.c0000664000000000000000000000713113306560010020010 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/fiber_cond.h" #include "third_party/tarantool_ev.h" #include "lua/utils.h" #include "fiber.h" #include static const char cond_typename[] = "fiber.cond"; static int luaT_fiber_cond_new(struct lua_State *L) { struct fiber_cond *e = lua_newuserdata(L, sizeof(*e)); if (e == NULL) luaL_error(L, "fiber.cond: not enough memory"); fiber_cond_create(e); luaL_getmetatable(L, cond_typename); lua_setmetatable(L, -2); return 1; } static inline struct fiber_cond * luaT_checkfibercond(struct lua_State *L, int index, const char *source) { if (index > lua_gettop(L)) luaL_error(L, "usage: %s", source); return (struct fiber_cond *)luaL_checkudata(L, index, cond_typename); } static int luaT_fiber_cond_gc(struct lua_State *L) { fiber_cond_destroy(luaT_checkfibercond(L, 1, "cond:destroy()")); return 0; } static int luaT_fiber_cond_signal(struct lua_State *L) { fiber_cond_signal(luaT_checkfibercond(L, 1, "cond:signal()")); return 0; } static int luaT_fiber_cond_broadcast(struct lua_State *L) { fiber_cond_broadcast(luaT_checkfibercond(L, 1, "cond:broadcast()")); return 0; } static int luaT_fiber_cond_wait(struct lua_State *L) { static const char usage[] = "cond:wait([timeout])"; int rc; struct fiber_cond *e = luaT_checkfibercond(L, 1, usage); ev_tstamp timeout = TIMEOUT_INFINITY; if (!lua_isnoneornil(L, 2)) { if (!lua_isnumber(L, 2) || (timeout = lua_tonumber(L, 2)) < .0) { luaL_error(L, "usage: %s", usage); } } rc = fiber_cond_wait_timeout(e, timeout); if (rc != 0) luaL_testcancel(L); lua_pushboolean(L, rc == 0); return 1; } static int luaT_fiber_cond_tostring(struct lua_State *L) { struct fiber_cond *cond = luaT_checkfibercond(L, 1, ""); (void)cond; lua_pushstring(L, "cond"); return 1; } void tarantool_lua_fiber_cond_init(struct lua_State *L) { static const struct luaL_Reg cond_meta[] = { {"__gc", luaT_fiber_cond_gc}, {"__tostring", luaT_fiber_cond_tostring}, {"signal", luaT_fiber_cond_signal}, {"broadcast", luaT_fiber_cond_broadcast}, {"wait", luaT_fiber_cond_wait}, {NULL, NULL} }; luaL_register_type(L, cond_typename, cond_meta); static const struct luaL_Reg cond_lib[] = { {"cond", luaT_fiber_cond_new}, {NULL, NULL} }; luaL_register_module(L, "fiber", cond_lib); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/lua/digest.h0000664000000000000000000000335413306560010017205 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_DIGEST_H #define INCLUDES_TARANTOOL_LUA_DIGEST_H /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif unsigned char * SHA1internal(const unsigned char *d, size_t n, unsigned char *md); struct lua_State; void tarantool_lua_digest_init(struct lua_State *L); #if defined(__cplusplus) } #endif #endif /* INCLUDES_TARANTOOL_LUA_DIGEST_H */ tarantool_1.9.1.26.g63eb81e3c/src/lua/fio.h0000664000000000000000000000327113306560010016501 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_FIO_H #define INCLUDES_TARANTOOL_LUA_FIO_H /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void tarantool_lua_fio_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_FIO_H */ tarantool_1.9.1.26.g63eb81e3c/src/lua/errno.c0000664000000000000000000001346413306560010017051 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "errno.h" #include #include #include #include #include #include "lua/utils.h" extern char errno_lua[]; void tarantool_lua_errno_init(struct lua_State *L) { static const struct { char name[32]; int value; } elist[] = { #ifdef E2BIG { "E2BIG", E2BIG }, #endif #ifdef EACCES { "EACCES", EACCES }, #endif #ifdef EADDRINUSE { "EADDRINUSE", EADDRINUSE }, #endif #ifdef EADDRNOTAVAIL { "EADDRNOTAVAIL", EADDRNOTAVAIL }, #endif #ifdef EAFNOSUPPORT { "EAFNOSUPPORT", EAFNOSUPPORT }, #endif #ifdef EAGAIN { "EAGAIN", EAGAIN }, #endif #ifdef EALREADY { "EALREADY", EALREADY }, #endif #ifdef EBADF { "EBADF", EBADF }, #endif #ifdef EBADMSG { "EBADMSG", EBADMSG }, #endif #ifdef EBUSY { "EBUSY", EBUSY }, #endif #ifdef ECANCELED { "ECANCELED", ECANCELED }, #endif #ifdef ECHILD { "ECHILD", ECHILD }, #endif #ifdef ECONNABORTED { "ECONNABORTED", ECONNABORTED }, #endif #ifdef ECONNREFUSED { "ECONNREFUSED", ECONNREFUSED }, #endif #ifdef ECONNRESET { "ECONNRESET", ECONNRESET }, #endif #ifdef EDEADLK { "EDEADLK", EDEADLK }, #endif #ifdef EDESTADDRREQ { "EDESTADDRREQ", EDESTADDRREQ }, #endif #ifdef EDOM { "EDOM", EDOM }, #endif #ifdef EDQUOT { "EDQUOT", EDQUOT }, #endif #ifdef EEXIST { "EEXIST", EEXIST }, #endif #ifdef EFAULT { "EFAULT", EFAULT }, #endif #ifdef EFBIG { "EFBIG", EFBIG }, #endif #ifdef EHOSTUNREACH { "EHOSTUNREACH", EHOSTUNREACH }, #endif #ifdef EIDRM { "EIDRM", EIDRM }, #endif #ifdef EILSEQ { "EILSEQ", EILSEQ }, #endif #ifdef EINPROGRESS { "EINPROGRESS", EINPROGRESS }, #endif #ifdef EINTR { "EINTR", EINTR }, #endif #ifdef EINVAL { "EINVAL", EINVAL }, #endif #ifdef EIO { "EIO", EIO }, #endif #ifdef EISCONN { "EISCONN", EISCONN }, #endif #ifdef EISDIR { "EISDIR", EISDIR }, #endif #ifdef ELOOP { "ELOOP", ELOOP }, #endif #ifdef EMFILE { "EMFILE", EMFILE }, #endif #ifdef EMLINK { "EMLINK", EMLINK }, #endif #ifdef EMSGSIZE { "EMSGSIZE", EMSGSIZE }, #endif #ifdef EMULTIHOP { "EMULTIHOP", EMULTIHOP }, #endif #ifdef ENAMETOOLONG { "ENAMETOOLONG", ENAMETOOLONG }, #endif #ifdef ENETDOWN { "ENETDOWN", ENETDOWN }, #endif #ifdef ENETRESET { "ENETRESET", ENETRESET }, #endif #ifdef ENETUNREACH { "ENETUNREACH", ENETUNREACH }, #endif #ifdef ENFILE { "ENFILE", ENFILE }, #endif #ifdef ENOBUFS { "ENOBUFS", ENOBUFS }, #endif #ifdef ENODATA { "ENODATA", ENODATA }, #endif #ifdef ENODEV { "ENODEV", ENODEV }, #endif #ifdef ENOENT { "ENOENT", ENOENT }, #endif #ifdef ENOEXEC { "ENOEXEC", ENOEXEC }, #endif #ifdef ENOLCK { "ENOLCK", ENOLCK }, #endif #ifdef ENOLINK { "ENOLINK", ENOLINK }, #endif #ifdef ENOMEM { "ENOMEM", ENOMEM }, #endif #ifdef ENOMSG { "ENOMSG", ENOMSG }, #endif #ifdef ENOPROTOOPT { "ENOPROTOOPT", ENOPROTOOPT }, #endif #ifdef ENOSPC { "ENOSPC", ENOSPC }, #endif #ifdef ENOSR { "ENOSR", ENOSR }, #endif #ifdef ENOSTR { "ENOSTR", ENOSTR }, #endif #ifdef ENOSYS { "ENOSYS", ENOSYS }, #endif #ifdef ENOTCONN { "ENOTCONN", ENOTCONN }, #endif #ifdef ENOTDIR { "ENOTDIR", ENOTDIR }, #endif #ifdef ENOTEMPTY { "ENOTEMPTY", ENOTEMPTY }, #endif #ifdef ENOTSOCK { "ENOTSOCK", ENOTSOCK }, #endif #ifdef ENOTSUP { "ENOTSUP", ENOTSUP }, #endif #ifdef ENOTTY { "ENOTTY", ENOTTY }, #endif #ifdef ENXIO { "ENXIO", ENXIO }, #endif #ifdef EOPNOTSUPP { "EOPNOTSUPP", EOPNOTSUPP }, #endif #ifdef EOVERFLOW { "EOVERFLOW", EOVERFLOW }, #endif #ifdef EPERM { "EPERM", EPERM }, #endif #ifdef EPIPE { "EPIPE", EPIPE }, #endif #ifdef EPROTO { "EPROTO", EPROTO }, #endif #ifdef EPROTONOSUPPORT { "EPROTONOSUPPORT", EPROTONOSUPPORT }, #endif #ifdef EPROTOTYPE { "EPROTOTYPE", EPROTOTYPE }, #endif #ifdef ERANGE { "ERANGE", ERANGE }, #endif #ifdef EROFS { "EROFS", EROFS }, #endif #ifdef ESPIPE { "ESPIPE", ESPIPE }, #endif #ifdef ESRCH { "ESRCH", ESRCH }, #endif #ifdef ESTALE { "ESTALE", ESTALE }, #endif #ifdef ETIME { "ETIME", ETIME }, #endif #ifdef ETIMEDOUT { "ETIMEDOUT", ETIMEDOUT }, #endif #ifdef ETXTBSY { "ETXTBSY", ETXTBSY }, #endif #ifdef EWOULDBLOCK { "EWOULDBLOCK", EWOULDBLOCK }, #endif #ifdef EXDEV { "EXDEV", EXDEV }, #endif { "", 0 } }; static const luaL_Reg errnolib[] = { { NULL, NULL} }; luaL_register_module(L, "errno", errnolib); for (int i = 0; elist[i].name[0]; i++) { lua_pushstring(L, elist[i].name); lua_pushinteger(L, elist[i].value); lua_rawset(L, -3); } lua_pop(L, -1); } tarantool_1.9.1.26.g63eb81e3c/src/lua/msgpack.h0000664000000000000000000001273313306560010017354 0ustar rootroot#ifndef TARANTOOL_LUA_MSGPACK_H_INCLUDED #define TARANTOOL_LUA_MSGPACK_H_INCLUDED /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include "utils.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #include struct luaL_serializer; /** * Default instance of msgpack serializer (msgpack = require('msgpack')). * This instance is used by all box's Lua/C API bindings (e.g. space:replace()). * All changes made by msgpack.cfg{} function are also affect box's bindings * (this is a feature). */ extern struct luaL_serializer *luaL_msgpack_default; /** * A streaming API so that it's possible to encode to any output * stream. */ /** * Ask the allocator to reserve at least size bytes. It can reserve * more, and update *size with the new size. */ typedef void *(*luamp_reserve_f)(void *ctx, size_t *size); /** Actually use the bytes. */ typedef void *(*luamp_alloc_f)(void *ctx, size_t size); /** Actually use the bytes. */ typedef void (*luamp_error_f)(void *error_ctx); struct mpstream { /** * When pos >= end, or required size doesn't fit in * pos..end range alloc() is called to advance the stream * and reserve() to get a new chunk. */ char *buf, *pos, *end; void *ctx; luamp_reserve_f reserve; luamp_alloc_f alloc; luamp_error_f error; void *error_ctx; }; /** * luaL_error() */ void luamp_error(void *); void mpstream_init(struct mpstream *stream, void *ctx, luamp_reserve_f reserve, luamp_alloc_f alloc, luamp_error_f error, void *error_ctx); void mpstream_reset(struct mpstream *stream); void mpstream_reserve_slow(struct mpstream *stream, size_t size); static inline void mpstream_flush(struct mpstream *stream) { stream->alloc(stream->ctx, stream->pos - stream->buf); stream->buf = stream->pos; } static inline char * mpstream_reserve(struct mpstream *stream, size_t size) { if (stream->pos + size > stream->end) mpstream_reserve_slow(stream, size); return stream->pos; } static inline void mpstream_advance(struct mpstream *stream, size_t size) { assert(stream->pos + size <= stream->end); stream->pos += size; } enum { LUAMP_ALLOC_FACTOR = 256 }; void luamp_encode_array(struct luaL_serializer *cfg, struct mpstream *stream, uint32_t size); void luamp_encode_map(struct luaL_serializer *cfg, struct mpstream *stream, uint32_t size); void luamp_encode_uint(struct luaL_serializer *cfg, struct mpstream *stream, uint64_t num); void luamp_encode_int(struct luaL_serializer *cfg, struct mpstream *stream, int64_t num); void luamp_encode_float(struct luaL_serializer *cfg, struct mpstream *stream, float num); void luamp_encode_double(struct luaL_serializer *cfg, struct mpstream *stream, double num); void luamp_encode_str(struct luaL_serializer *cfg, struct mpstream *stream, const char *str, uint32_t len); void luamp_encode_nil(struct luaL_serializer *cfg, struct mpstream *stream); void luamp_encode_bool(struct luaL_serializer *cfg, struct mpstream *stream, bool val); /* low-level function needed for execute_lua_call() */ enum mp_type luamp_encode_r(struct lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream, struct luaL_field *field, int level); enum mp_type luamp_encode(struct lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream, int index); void luamp_decode(struct lua_State *L, struct luaL_serializer *cfg, const char **data); typedef enum mp_type (*luamp_encode_extension_f)(struct lua_State *, int, struct mpstream *); /** * @brief Set a callback that executed by encoder on unsupported Lua type * @param handler callback */ void luamp_set_encode_extension(luamp_encode_extension_f handler); typedef void (*luamp_decode_extension_f)(struct lua_State *L, const char **data); /** * @brief Set a callback that executed by decode on unsupported extension * @param handler callback */ void luamp_set_decode_extension(luamp_decode_extension_f handler); /** * @brief Lua/C API exports * @param L Lua stack * @return 1 */ LUALIB_API int luaopen_msgpack(lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LUA_MSGPACK_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lua/httpc.c0000664000000000000000000002254213306560010017043 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /** * Unique name for userdata metatables */ #define DRIVER_LUA_UDATA_NAME "httpc" #include #include "src/httpc.h" #include "say.h" #include "lua/utils.h" #include "lua/httpc.h" #include "src/fiber.h" /** Internal util functions * {{{ */ static inline struct httpc_env* luaT_httpc_checkenv(lua_State *L) { return (struct httpc_env *) luaL_checkudata(L, 1, DRIVER_LUA_UDATA_NAME); } static inline void lua_add_key_u64(lua_State *L, const char *key, uint64_t value) { lua_pushstring(L, key); lua_pushinteger(L, value); lua_settable(L, -3); } static void parse_headers(lua_State *L, char *buffer, size_t len) { struct http_parser parser; char *end_buf = buffer + len; lua_pushstring(L, "headers"); lua_newtable(L); while (true) { int rc = http_parse_header_line(&parser, &buffer, end_buf); if (rc == HTTP_PARSE_INVALID) { continue; } if (rc == HTTP_PARSE_DONE) { break; } if (rc == HTTP_PARSE_OK) { lua_pushlstring(L, parser.header_name, parser.header_name_idx); /* check value of header, if exists */ lua_pushlstring(L, parser.header_name, parser.header_name_idx); lua_gettable(L, -3); int value_len = parser.header_value_end - parser.header_value_start; /* table of values to handle duplicates*/ if (lua_isnil(L, -1)) { lua_pop(L, 1); lua_newtable(L); lua_pushinteger(L, 1); lua_pushlstring(L, parser.header_value_start, value_len); lua_settable(L, -3); } else if (lua_istable(L, -1)) { lua_pushinteger(L, lua_objlen(L, -1) + 1); lua_pushlstring(L, parser.header_value_start, value_len); lua_settable(L, -3); } /*headers[parser.header] = {value}*/ lua_settable(L, -3); } } /* headers */ lua_settable(L, -3); lua_pushstring(L, "proto"); lua_newtable(L); lua_pushinteger(L, 1); lua_pushinteger(L, (parser.http_major > 0) ? parser.http_major: 0); lua_settable(L, -3); lua_pushinteger(L, 2); lua_pushinteger(L, (parser.http_minor > 0) ? parser.http_minor: 0); lua_settable(L, -3); /* proto */ lua_settable(L, -3); } /* }}} */ /** lib Lua API {{{ */ static int luaT_httpc_request(lua_State *L) { struct httpc_env *ctx = luaT_httpc_checkenv(L); if (ctx == NULL) return luaL_error(L, "can't get httpc environment"); const char *method = luaL_checkstring(L, 2); const char *url = luaL_checkstring(L, 3); struct httpc_request *req = httpc_request_new(ctx, method, url); if (req == NULL) return luaT_error(L); double timeout = TIMEOUT_INFINITY; if (lua_isstring(L, 4)) { size_t len = 0; const char *body = lua_tolstring(L, 4, &len); if (len > 0 && httpc_set_body(req, body, len) != 0) { httpc_request_delete(req); return luaT_error(L); } } else if (!lua_isnil(L, 4)) { httpc_request_delete(req); return luaL_error(L, "fourth argument must be a string"); } if (!lua_istable(L, 5)) { httpc_request_delete(req); return luaL_error(L, "fifth argument must be a table"); } lua_getfield(L, 5, "headers"); if (!lua_isnil(L, -1)) { lua_pushnil(L); while (lua_next(L, -2) != 0) { if (httpc_set_header(req, "%s: %s", lua_tostring(L, -2), lua_tostring(L, -1)) < 0) { httpc_request_delete(req); return luaT_error(L); } lua_pop(L, 1); } } lua_pop(L, 1); lua_getfield(L, 5, "ca_path"); if (!lua_isnil(L, -1)) httpc_set_ca_path(req, lua_tostring(L, -1)); lua_pop(L, 1); lua_getfield(L, 5, "ca_file"); if (!lua_isnil(L, -1)) httpc_set_ca_file(req, lua_tostring(L, -1)); lua_pop(L, 1); lua_getfield(L, 5, "unix_socket"); if (!lua_isnil(L, -1)) { if(httpc_set_unix_socket(req, lua_tostring(L, -1))) { httpc_request_delete(req); return luaT_error(L); } } lua_pop(L, 1); lua_getfield(L, 5, "verify_host"); if (!lua_isnil(L, -1)) httpc_set_verify_host(req, lua_toboolean(L, -1) == 1 ? 2 : 0); lua_pop(L, 1); lua_getfield(L, 5, "verify_peer"); if (!lua_isnil(L, -1)) httpc_set_verify_peer(req, lua_toboolean(L, -1)); lua_pop(L, 1); lua_getfield(L, 5, "ssl_key"); if (!lua_isnil(L, -1)) httpc_set_ssl_key(req, lua_tostring(L, -1)); lua_pop(L, 1); lua_getfield(L, 5, "ssl_cert"); if (!lua_isnil(L, -1)) httpc_set_ssl_cert(req, lua_tostring(L, -1)); lua_pop(L, 1); long keepalive_idle = 0; long keepalive_interval = 0; lua_getfield(L, 5, "keepalive_idle"); if (!lua_isnil(L, -1)) keepalive_idle = (long) lua_tonumber(L, -1); lua_pop(L, 1); lua_getfield(L, 5, "keepalive_interval"); if (!lua_isnil(L, -1)) keepalive_interval = (long) lua_tonumber(L, -1); lua_pop(L, 1); if (httpc_set_keepalive(req, keepalive_idle, keepalive_interval) < 0) { httpc_request_delete(req); return luaT_error(L); } lua_getfield(L, 5, "low_speed_limit"); if (!lua_isnil(L, -1)) httpc_set_low_speed_limit(req, (long) lua_tonumber(L, -1)); lua_pop(L, 1); lua_getfield(L, 5, "low_speed_time"); if (!lua_isnil(L, -1)) httpc_set_low_speed_time(req, (long) lua_tonumber(L, -1)); lua_pop(L, 1); lua_getfield(L, 5, "timeout"); if (!lua_isnil(L, -1)) timeout = lua_tonumber(L, -1); lua_pop(L, 1); lua_getfield(L, 5, "verbose"); if (!lua_isnil(L, -1) && lua_isboolean(L, -1)) httpc_set_verbose(req, true); lua_pop(L, 1); if (httpc_execute(req, timeout) != 0) { httpc_request_delete(req); return luaT_error(L); } lua_newtable(L); lua_pushstring(L, "status"); lua_pushinteger(L, req->status); lua_settable(L, -3); lua_pushstring(L, "reason"); lua_pushstring(L, req->reason); lua_settable(L, -3); size_t headers_len = region_used(&req->resp_headers); if (headers_len > 0) { char *headers = region_join(&req->resp_headers, headers_len); if (headers == NULL) { diag_set(OutOfMemory, headers_len, "region", "headers"); httpc_request_delete(req); return luaT_error(L); } parse_headers(L, headers, headers_len); } size_t body_len = region_used(&req->resp_body); if (body_len > 0) { char *body = region_join(&req->resp_body, body_len); if (body == NULL) { diag_set(OutOfMemory, body_len, "region", "body"); httpc_request_delete(req); return luaT_error(L); } lua_pushstring(L, "body"); lua_pushlstring(L, body, body_len); lua_settable(L, -3); } /* clean up */ httpc_request_delete(req); return 1; } static int luaT_httpc_stat(lua_State *L) { struct httpc_env *ctx = luaT_httpc_checkenv(L); if (ctx == NULL) return luaL_error(L, "can't get httpc environment"); lua_newtable(L); lua_add_key_u64(L, "active_requests", (uint64_t) ctx->curl_env.stat.active_requests); lua_add_key_u64(L, "sockets_added", (uint64_t) ctx->curl_env.stat.sockets_added); lua_add_key_u64(L, "sockets_deleted", (uint64_t) ctx->curl_env.stat.sockets_deleted); lua_add_key_u64(L, "total_requests", ctx->stat.total_requests); lua_add_key_u64(L, "http_200_responses", ctx->stat.http_200_responses); lua_add_key_u64(L, "http_other_responses", ctx->stat.http_other_responses); lua_add_key_u64(L, "failed_requests", (uint64_t) ctx->stat.failed_requests); return 1; } static int luaT_httpc_new(lua_State *L) { struct httpc_env *ctx = (struct httpc_env *) lua_newuserdata(L, sizeof(struct httpc_env)); if (ctx == NULL) return luaL_error(L, "lua_newuserdata failed: httpc_env"); long max_conns = luaL_checklong(L, 1); if (httpc_env_create(ctx, max_conns) != 0) return luaT_error(L); luaL_getmetatable(L, DRIVER_LUA_UDATA_NAME); lua_setmetatable(L, -2); return 1; } static int luaT_httpc_cleanup(lua_State *L) { httpc_env_destroy(luaT_httpc_checkenv(L)); /** remove all methods operating on ctx */ lua_newtable(L); lua_setmetatable(L, -2); lua_pushboolean(L, true); lua_pushinteger(L, 0); return 2; } /* * }}} */ /* * Lists of exporting: object and/or functions to the Lua */ static const struct luaL_Reg Module[] = { {"new", luaT_httpc_new}, {NULL, NULL} }; static const struct luaL_Reg Client[] = { {"request", luaT_httpc_request}, {"stat", luaT_httpc_stat}, {"__gc", luaT_httpc_cleanup}, {NULL, NULL} }; /* * Lib initializer */ LUA_API int luaopen_http_client_driver(lua_State *L) { luaL_register_type(L, DRIVER_LUA_UDATA_NAME, Client); luaL_register_module(L, "http.client", Module); return 1; } tarantool_1.9.1.26.g63eb81e3c/src/lua/errno.h0000664000000000000000000000316013306560010017046 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef TARANTOOL_LUA_ERRNO_H_INCLUDED #define TARANTOOL_LUA_ERRNO_H_INCLUDED #ifdef __cplusplus extern "C" { #endif struct lua_State; void tarantool_lua_errno_init(struct lua_State *L); #ifdef __cplusplus } #endif #endif /* TARANTOOL_LUA_ERRNO_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lua/uuid.lua0000664000000000000000000000713313306560010017225 0ustar rootroot-- uuid.lua (internal file) local ffi = require("ffi") local builtin = ffi.C ffi.cdef[[ struct tt_uuid { uint32_t time_low; uint16_t time_mid; uint16_t time_hi_and_version; uint8_t clock_seq_hi_and_reserved; uint8_t clock_seq_low; uint8_t node[6]; }; void tt_uuid_create(struct tt_uuid *uu); int tt_uuid_from_string(const char *in, struct tt_uuid *uu); void tt_uuid_to_string(const struct tt_uuid *uu, char *out); void tt_uuid_bswap(struct tt_uuid *uu); bool tt_uuid_is_nil(const struct tt_uuid *uu); bool tt_uuid_is_equal(const struct tt_uuid *lhs, const struct tt_uuid *rhs); char * tt_uuid_str(const struct tt_uuid *uu); extern const struct tt_uuid uuid_nil; ]] local uuid_t = ffi.typeof('struct tt_uuid') local UUID_STR_LEN = 36 local UUID_LEN = ffi.sizeof(uuid_t) local uuidbuf = ffi.new(uuid_t) local uuid_tostring = function(uu) if not ffi.istype(uuid_t, uu) then return error('Usage: uuid:str()') end return ffi.string(builtin.tt_uuid_str(uu), UUID_STR_LEN) end local uuid_fromstr = function(str) if type(str) ~= 'string' then error("fromstr(str)") end local uu = ffi.new(uuid_t) local rc = builtin.tt_uuid_from_string(str, uu) if rc ~= 0 then return nil end return uu end local need_bswap = function(order) if order == nil or order == 'l' or order == 'h' or order == 'host' then return false elseif order == 'b' or order == 'n' or order == 'network' then return true else error('invalid byteorder, valid is l, b, h, n') end end local uuid_tobin = function(uu, byteorder) if not ffi.istype(uuid_t, uu) then return error('Usage: uuid:bin([byteorder])') end if need_bswap(byteorder) then if uu ~= uuidbuf then ffi.copy(uuidbuf, uu, UUID_LEN) end builtin.tt_uuid_bswap(uuidbuf) return ffi.string(ffi.cast('char *', uuidbuf), UUID_LEN) end return ffi.string(ffi.cast('char *', uu), UUID_LEN) end local uuid_frombin = function(bin, byteorder) if type(bin) ~= 'string' or #bin ~= UUID_LEN then error("frombin(bin, [byteorder])") end local uu = ffi.new(uuid_t) ffi.copy(uu, bin, UUID_LEN) if need_bswap(byteorder) then builtin.tt_uuid_bswap(uu) end return uu end local uuid_isnil = function(uu) if not ffi.istype(uuid_t, uu) then return error('Usage: uuid:isnil()') end return builtin.tt_uuid_is_nil(uu) end local uuid_eq = function(lhs, rhs) if not ffi.istype(uuid_t, rhs) then return false end if not ffi.istype(uuid_t, lhs) then return error('Usage: uuid == var') end return builtin.tt_uuid_is_equal(lhs, rhs) end local uuid_new = function() local uu = ffi.new(uuid_t) builtin.tt_uuid_create(uu) return uu end local uuid_new_bin = function(byteorder) builtin.tt_uuid_create(uuidbuf) return uuid_tobin(uuidbuf, byteorder) end local uuid_new_str = function() builtin.tt_uuid_create(uuidbuf) return uuid_tostring(uuidbuf) end local uuid_mt = { __tostring = uuid_tostring; __eq = uuid_eq; __index = { isnil = uuid_isnil; bin = uuid_tobin; -- binary host byteorder str = uuid_tostring; -- RFC4122 string } } ffi.metatype(uuid_t, uuid_mt) return setmetatable({ NULL = builtin.uuid_nil; new = uuid_new; fromstr = uuid_fromstr; frombin = uuid_frombin; bin = uuid_new_bin; -- optimized shortcut for new():bin() str = uuid_new_str; -- optimized shortcut for new():str() }, { __call = uuid_new; -- shortcut for new() }) tarantool_1.9.1.26.g63eb81e3c/src/lua/utils.h0000664000000000000000000003556313306565107017111 0ustar rootroot#ifndef TARANTOOL_LUA_UTILS_H_INCLUDED #define TARANTOOL_LUA_UTILS_H_INCLUDED /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include /* modf, isfinite */ #include /* enum mp_type */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #include #include /* luaL_error */ #include #include #include #include #include #include #include #include struct lua_State; struct ibuf; struct error; /** * Single global lua_State shared by core and modules. * Created with tarantool_lua_init(). * const char *msg = lua_tostring(L, -1); * snprintf(m_errmsg, sizeof(m_errmsg), "%s", msg ? msg : ""); */ extern struct lua_State *tarantool_L; extern struct ibuf *tarantool_lua_ibuf; /** \cond public */ /** * @brief Push cdata of given \a ctypeid onto the stack. * CTypeID must be used from FFI at least once. Allocated memory returned * uninitialized. Only numbers and pointers are supported. * @param L Lua State * @param ctypeid FFI's CTypeID of this cdata * @sa luaL_checkcdata * @return memory associated with this cdata */ LUA_API void * luaL_pushcdata(struct lua_State *L, uint32_t ctypeid); /** * @brief Checks whether the function argument idx is a cdata * @param L Lua State * @param idx stack index * @param ctypeid FFI's CTypeID of this cdata * @sa luaL_pushcdata * @return memory associated with this cdata */ LUA_API void * luaL_checkcdata(struct lua_State *L, int idx, uint32_t *ctypeid); /** * @brief Sets finalizer function on a cdata object. * Equivalent to call ffi.gc(obj, function). * Finalizer function must be on the top of the stack. * @param L Lua State * @param idx object */ LUA_API void luaL_setcdatagc(struct lua_State *L, int idx); /** * @brief Return CTypeID (FFI) of given СDATA type * @param L Lua State * @param ctypename С type name as string (e.g. "struct request" or "uint32_t") * @sa luaL_pushcdata * @sa luaL_checkcdata * @return CTypeID */ LUA_API uint32_t luaL_ctypeid(struct lua_State *L, const char *ctypename); /** * @brief Declare symbols for FFI * @param L Lua State * @param ctypename C definitions, e.g "struct stat" * @sa ffi.cdef(def) * @retval 0 on success * @retval LUA_ERRRUN, LUA_ERRMEM, LUA_ERRERR otherwise */ LUA_API int luaL_cdef(struct lua_State *L, const char *ctypename); /** \endcond public */ static inline lua_Integer luaL_arrlen(struct lua_State *L, int idx) { lua_Integer max = 0; lua_pushnil(L); while (lua_next(L, idx)) { lua_pop(L, 1); /* pop the value */ if (lua_type(L, -1) != LUA_TNUMBER) continue; lua_Number k = lua_tonumber(L, -1); if (k <= max || floor(k) != k) continue; max = k; } return max; } static inline lua_Integer luaL_maplen(struct lua_State *L, int idx) { lua_Integer size = 0; lua_pushnil(L); while (lua_next(L, idx)) { lua_pop(L, 1); /* pop the value */ size++; } return size; } /** * Common configuration options for Lua serializers (MsgPack, YAML, JSON) */ struct luaL_serializer { /** * luaL_tofield tries to classify table into one of four kinds * during encoding: * * + map - at least one table index is not unsigned integer. * + regular array - all array indexes are available. * + sparse array - at least one array index is missing. * + excessively sparse arrat - the number of values missing * exceeds the configured ratio. * * An array is excessively sparse when **all** the following * conditions are met: * * + encode_sparse_ratio > 0. * + max(table) > encode_sparse_safe. * + max(table) > count(table) * encode_sparse_ratio. * * luaL_tofield will never consider an array to be excessively sparse * when encode_sparse_ratio = 0. The encode_sparse_safe limit ensures * that small Lua arrays are always encoded as sparse arrays. * By default, attempting to encode an excessively sparse array will * generate an error. If encode_sparse_convert is set to true, * excessively sparse arrays will be handled as maps. * * This conversion logic is modeled after Mark Pulford's CJSON module. * @sa http://www.kyne.com.au/~mark/software/lua-cjson-manual.html */ int encode_sparse_convert; /** @see encode_sparse_convert */ int encode_sparse_ratio; /** @see encode_sparse_convert */ int encode_sparse_safe; /** Max recursion depth for encoding (MsgPack, CJSON only) */ int encode_max_depth; /** Enables encoding of NaN and Inf numbers */ int encode_invalid_numbers; /** Floating point numbers precision (YAML, CJSON only) */ int encode_number_precision; /** * Enables __serialize meta-value checking: * * + 'seq', 'sequence', 'array' - table encoded as an array * + 'map', 'mappping' - table encoded as a map. * 'seq' or 'map' also enable flow (compact) mode for YAML serializer * (flow="[1,2,3]" vs block=" - 1\n - 2\n - 3\n"). * + function - the meta-method is called to unpack serializable * representation of table, cdata or userdata objects. */ int encode_load_metatables; /** Enables tostring() usage for unknown types */ int encode_use_tostring; /** Use NULL for all unrecognizable types */ int encode_invalid_as_nil; /** Enables decoding NaN and Inf numbers */ int decode_invalid_numbers; /** Save __serialize meta-value for decoded arrays and maps */ int decode_save_metatables; /** Max recursion depts for decoding (CJSON only) */ int decode_max_depth; /** Enable support for compact represenation (internal, YAML-only). */ int has_compact; }; extern int luaL_nil_ref; extern int luaL_map_metatable_ref; extern int luaL_array_metatable_ref; #define LUAL_SERIALIZER "serializer" #define LUAL_SERIALIZE "__serialize" struct luaL_serializer * luaL_newserializer(struct lua_State *L, const char *modname, const luaL_Reg *reg); static inline struct luaL_serializer * luaL_checkserializer(struct lua_State *L) { return (struct luaL_serializer *) luaL_checkudata(L, lua_upvalueindex(1), LUAL_SERIALIZER); } /** A single value on the Lua stack. */ struct luaL_field { union { struct { const char *data; uint32_t len; } sval; int64_t ival; double dval; float fval; bool bval; /* Array or map. */ uint32_t size; }; enum mp_type type; bool compact; /* a flag used by YAML serializer */ }; /** * @brief Convert a value from the Lua stack to a lua_field structure. * This function is designed for use with Lua bindings and data * serialization functions (YAML, MsgPack, JSON, etc.). * * Conversion rules: * - LUA_TNUMBER when is integer and >= 0 -> UINT * - LUA_TNUMBER when is integer and < 0 -> INT * - LUA_TNUMBER when is not integer -> DOUBLE * - LUA_TBOOLEAN -> BOOL * - LUA_TSTRING -> STRING * - LUA_TNIL -> NIL * - LUA_TTABLE when is array -> ARRAY * - LUA_TTABLE when is not array -> MAP * - LUA_TUSERDATA, LUA_TLIGHTUSERDATA, CTID_P_VOID when == NULL -> NIL * - CTID_INT*, CTID_CCHAR when >= 0 -> UINT * - CTID_INT*, CTID_CCHAR when < 0 -> INT * - CTID_FLOAT -> FLOAT * - CTID_DOUBLE -> DOUBLE * - CTID_BOOL -> BOOL * - otherwise -> EXT * * ARRAY vs MAP recognition works based on encode_sparse_convert, * encode_sparse_ratio, encode_sparse_safe and encode_load_metatables config * parameters (see above). Tables are not saved to lua_field structure and * should be processed manually, according to returned type and size value. * * This function doesn't try to unpack unknown types and simple returns MP_EXT. * The caller can use luaL_tofield() for basic conversion, then invoke internal * hooks(if available) and then call luaL_checkfield(), which will try to * unpack cdata/userdata objects or raise and error. * * @param L stack * @param cfg configuration * @param index stack index * @param field conversion result */ void luaL_tofield(struct lua_State *L, struct luaL_serializer *cfg, int index, struct luaL_field *field); /** * @brief Try to convert userdata/cdata values using defined conversion logic. * Must be used only after lua_tofield(). * * @param L stack * @param cfg configuration * @param idx stack index * @param field conversion result */ void luaL_convertfield(struct lua_State *L, struct luaL_serializer *cfg, int idx, struct luaL_field *field); /** * @brief A wrapper for luaL_tofield() and luaL_convertfield() that * tries to convert value or raise an error. * @param L stack * @param cfg configuration * @param idx stack index * @param field conversion result * @sa lua_tofield() * @sa luaL_convertfield() * * Common conversion order for tables: * size/count detection -> (sparse array checking) -> (__serialize) * * Common conversion order for userdata/cdata objects: * (internal trigger) -> (__serialize) -> (tostring) -> (nil) -> exception * * Common conversion order for other types: * (tostring) -> (nil) -> exception */ static inline void luaL_checkfield(struct lua_State *L, struct luaL_serializer *cfg, int idx, struct luaL_field *field) { luaL_tofield(L, cfg, idx, field); if (field->type != MP_EXT) return; luaL_convertfield(L, cfg, idx, field); } void luaL_register_type(struct lua_State *L, const char *type_name, const struct luaL_Reg *methods); void luaL_register_module(struct lua_State *L, const char *modname, const struct luaL_Reg *methods); /** \cond public */ /** * Push uint64_t onto the stack * * @param L is a Lua State * @param val is a value to push */ LUA_API void luaL_pushuint64(struct lua_State *L, uint64_t val); /** * Push int64_t onto the stack * * @param L is a Lua State * @param val is a value to push */ LUA_API void luaL_pushint64(struct lua_State *L, int64_t val); /** * Checks whether the argument idx is a uint64 or a convertable string and * returns this number. * \throws error if the argument can't be converted. */ LUA_API uint64_t luaL_checkuint64(struct lua_State *L, int idx); /** * Checks whether the argument idx is a int64 or a convertable string and * returns this number. * \throws error if the argument can't be converted. */ LUA_API int64_t luaL_checkint64(struct lua_State *L, int idx); /** * Checks whether the argument idx is a uint64 or a convertable string and * returns this number. * \return the converted number or 0 of argument can't be converted. */ LUA_API uint64_t luaL_touint64(struct lua_State *L, int idx); /** * Checks whether the argument idx is a int64 or a convertable string and * returns this number. * \return the converted number or 0 of argument can't be converted. */ LUA_API int64_t luaL_toint64(struct lua_State *L, int idx); /** * Re-throws the last Tarantool error as a Lua object. * \sa lua_error() * \sa box_error_last() */ LUA_API int luaT_error(lua_State *L); /** * Like lua_call(), but with the proper support of Tarantool errors. * \sa lua_call() */ LUA_API int luaT_call(lua_State *L, int nargs, int nreturns); /** * Like lua_cpcall(), but with the proper support of Tarantool errors. * \sa lua_cpcall() */ LUA_API int luaT_cpcall(lua_State *L, lua_CFunction func, void *ud); /** * Get global Lua state used by Tarantool */ LUA_API lua_State * luaT_state(void); /** \endcond public */ void luaT_pusherror(struct lua_State *L, struct error *e); struct error * luaL_iserror(struct lua_State *L, int narg); /** * Push Lua Table with __serialize = 'map' hint onto the stack. * Tables with __serialize hint are properly handled by all serializers. * @param L stack * @param idx index in the stack */ static inline void luaL_setmaphint(struct lua_State *L, int idx) { if (idx < 0) idx = lua_gettop(L) + idx + 1; assert(lua_type(L, idx) == LUA_TTABLE); lua_rawgeti(L, LUA_REGISTRYINDEX, luaL_map_metatable_ref); lua_setmetatable(L, idx); } /** * Push Lua Table with __serialize = 'seq' hint onto the stack. * Tables with __serialize hint are properly handled by all serializers. * @param L stack * @param idx index in the stack */ static inline void luaL_setarrayhint(struct lua_State *L, int idx) { if (idx < 0) idx = lua_gettop(L) + idx + 1; assert(lua_type(L, idx) == LUA_TTABLE); lua_rawgeti(L, LUA_REGISTRYINDEX, luaL_array_metatable_ref); lua_setmetatable(L, idx); } /** * Push ffi's NULL (cdata: NULL) onto the stack. * Can be used as replacement of nil in Lua tables. * @param L stack */ static inline void luaL_pushnull(struct lua_State *L) { lua_rawgeti(L, LUA_REGISTRYINDEX, luaL_nil_ref); } /** * Return true if the value at Lua stack is ffi's NULL * (cdata: NULL). * @param L stack * @param idx stack index */ static inline bool luaL_isnull(struct lua_State *L, int idx) { if (lua_type(L, idx) == LUA_TCDATA) { GCcdata *cd = cdataV(L->base + idx - 1); return cd->ctypeid == CTID_P_VOID && *(void **)cdataptr(cd) == NULL; } return false; } static inline void luaL_checkfinite(struct lua_State *L, struct luaL_serializer *cfg, lua_Number number) { if (!cfg->decode_invalid_numbers && !isfinite(number)) luaL_error(L, "number must not be NaN or Inf"); } int tarantool_lua_utils_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #include "exception.h" #include static inline void luaT_call_xc(lua_State *L, int nargs, int nreturns) { if (luaT_call(L, nargs, nreturns) != 0) diag_raise(); } /** * Make a reference to an object on top of the Lua stack and * release it at the end of the scope. */ struct LuarefGuard { int ref; bool is_active; explicit LuarefGuard(int ref_arg) { ref = ref_arg; is_active = true; } explicit LuarefGuard(struct lua_State *L) { ref = luaL_ref(L, LUA_REGISTRYINDEX); is_active = true; } ~LuarefGuard() { if (is_active) luaL_unref(tarantool_L, LUA_REGISTRYINDEX, ref); } }; #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LUA_UTILS_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lua/httpc.lua0000664000000000000000000003006213306560010017376 0ustar rootroot-- -- Copyright (C) 2016-2017 Tarantool AUTHORS: please see AUTHORS file. -- -- Redistribution and use in source and binary forms, with or -- without modification, are permitted provided that the following -- conditions are met: -- -- 1. Redistributions of source code must retain the above -- copyright notice, this list of conditions and the -- following disclaimer. -- -- 2. Redistributions in binary form must reproduce the above -- copyright notice, this list of conditions and the following -- disclaimer in the documentation and/or other materials -- provided with the distribution. -- -- THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND -- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -- TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL -- OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, -- INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -- BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -- LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -- THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -- SUCH DAMAGE. -- local fiber = require('fiber') local driver = package.loaded.http.client package.loaded.http = nil local curl_mt -- -- - create a new curl instance. -- -- Parameters: -- -- max_connectionss - Maximum number of entries in the connection cache */ -- -- Returns: -- curl object or raise error() -- local http_new = function(opts) opts = opts or {} opts.max_connections = opts.max_connections or 5 local curl = driver.new(opts.max_connections) return setmetatable({ curl = curl, }, curl_mt ) end local check_args_fmt = 'Use client:%s(...) instead of client.%s(...):' local function check_args(self, method) if type(self) ~= 'table' then error(check_args_fmt:format(method, method), 2) end end -- -- RFC2616: http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 -- -- Multiple message-header fields with the same field-name MAY be present -- in a message if and only if the entire field-value for that header field -- is defined as a comma-separated list [i.e., #(values)]. It MUST be possible -- to combine the multiple header fields into one "field-name: field-value" -- pair, without changing the semantics of the message, by appending each -- subsequent field-value to the first, each separated by a comma. The order -- in which header fields with the same field-name are received is therefore -- significant to the interpretation of the combined field value, and thus -- a proxy MUST NOT change the order of these field values when a message -- is forwarded. -- -- Tarantool implementation concatenates all headers by default except -- the blacklist below. -- local special_headers = { ["age"] = true, ["authorization"] = true, ["content-length"] = true, ["content-type"] = true, ["etag"] = true, ["expires"] = true, ["from"] = true, ["host"] = true, ["if-modified-since"] = true, ["if-unmodified-since"] = true, ["last-modified"] = true, ["location"] = true, ["max-forwards"] = true, ["proxy-authorization"] = true, ["referer"] = true, ["retry-after"] = true, ["user-agent"] = true, } local special_characters = { ['('] = true, [')'] = true, ['['] = true, [']'] = true, ['<'] = true, ['>'] = true, ['>'] = true, ['@'] = true, [','] = true, [';'] = true, [':'] = true, ['\\'] = true, ['\"'] = true, ['/'] = true, ['?'] = true, ['='] = true, ['{'] = true, ['}'] = true, [' '] = true, ['\t'] = true } local option_keys = { ["Expires"] = true, ["Max-Age"] = true, ["Domain"] = true, ["Path"] = true, ["Secure"] = true, ["HttpOnly"] = true, ["SameSite"] = true, } --local function process_set_cookies(value, result) -- local key_start, value_start -- local key, val -- local symbols = value:gmatch('.') -- local options = {} -- local cur = 0 -- for v in symbols do -- cur = cur + 1 -- if v == ' ' or v == '\t' then -- goto continue -- end -- key_start = cur -- -- parse cookie name -- while not special_characters[v] do -- if v == nil then -- return -- end -- v = symbols() -- cur = cur + 1 -- end -- key = value:sub(key_start, cur) -- if not v or v ~= '=' then -- -- invalid header -- return -- end -- while v == ' ' do -- v = symbols() -- cur = cur + 1 -- end -- -- if v == nil then -- return -- end -- -- while v and v ~= ';' do -- if v == nil then -- break -- end -- v = symbols() -- cur = cur + 1 -- end -- -- result[key] = {val, options} -- ::continue:: -- end --end local function process_cookie(cookie, result) local vals = cookie:split(';') local val = vals[1]:split('=') if #val < 2 then return end val[1] = string.strip(val[1]) for c in val[1]:gmatch('.') do if special_characters[c] then return end end local options = {} table.remove(vals, 1) for _, opt in pairs(vals) do local splitted = opt:split('=') splitted = string.strip(splitted[1]) if option_keys[splitted] then table.insert(options, string.strip(opt)) end end result[val[1]] = {string.strip(val[2]), options} end local function process_cookies(cookies) local result = {} for _, val in pairs(cookies) do process_cookie(val, result) end return result end local function process_headers(headers) for header, value in pairs(headers) do if type(value) == 'table' then if special_headers[header] then headers[header] = value[1] else headers[header] = table.concat(value, ',') end end end return headers end -- -- This function does HTTP request -- -- Parameters: -- -- method - HTTP method, like GET, POST, PUT and so on -- url - HTTP url, like https://tarantool.org/doc -- body - this parameter is optional, you may use it for passing -- options - this is a table of options. -- data to a server. Like 'My text string!' -- -- ca_path - a path to ssl certificate dir; -- -- ca_file - a path to ssl certificate file; -- -- verify_host - set on/off verification of the certificate's name (CN) -- against host; -- -- verify_peer - set on/off verification of the peer's SSL certificate; -- -- ssl_key - set path to the file with private key for TLS and SSL client -- certificate; -- -- ssl_cert - set path to the file with SSL client certificate; -- -- headers - a table of HTTP headers; -- -- keepalive_idle & keepalive_interval - -- non-universal keepalive knobs (Linux, AIX, HP-UX, more); -- -- low_speed_time & low_speed_limit - -- If the download receives less than -- "low speed limit" bytes/second -- during "low speed time" seconds, -- the operations is aborted. -- You could i.e if you have -- a pretty high speed connection, abort if -- it is less than 2000 bytes/sec -- during 20 seconds; -- -- timeout - Time-out the read operation and -- waiting for the curl api request -- after this amount of seconds; -- -- verbose - set on/off verbose mode -- -- Returns: -- { -- status=NUMBER, -- reason=ERRMSG -- body=STRING, -- headers=STRING, -- errmsg=STRING -- } -- -- Raises error() on invalid arguments and OOM -- curl_mt = { __index = { -- -- see above -- request = function(self, method, url, body, opts) if not method or not url then error('request(method, url [, options]])') end local resp = self.curl:request(method, url, body, opts or {}) if resp and resp.headers then if resp.headers['set-cookie'] ~= nil then resp.cookies = process_cookies(resp.headers['set-cookie']) end resp.headers = process_headers(resp.headers) end return resp end, -- -- - see -- get = function(self, url, options) check_args(self, 'get') return self:request('GET', url, nil, options) end, -- -- - see -- post = function(self, url, body, options) check_args(self, 'post') return self:request('POST', url, body, options) end, -- -- - see -- put = function(self, url, body, options) check_args(self, 'put') return self:request('PUT', url, body, options) end, -- -- - see -- patch = function(self, url, body, options) check_args(self, 'patch') return self:request('PATCH', url, body, options) end, -- -- see -- options = function(self, url, options) check_args(self, 'options') return self:request('OPTIONS', url, nil, options) end, -- -- see -- head = function(self, url, options) check_args(self, 'head') return self:request('HEAD', url, nil, options) end, -- -- see -- delete = function(self, url, options) check_args(self, 'delete') return self:request('DELETE', url, nil, options) end, -- -- see -- trace = function(self, url, options) check_args(self, 'trace') return self:request('TRACE', url, nil, options) end, -- -- see -- connect = function(self, url, options) check_args(self, 'connect') return self:request('CONNECT', url, nil, options) end, -- -- - this function returns a table with many values of statistic. -- -- Returns { -- -- active_requests - this is number of currently executing requests -- -- sockets_added - -- this is a total number of added sockets into libev loop -- -- sockets_deleted - -- this is a total number of deleted sockets from libev -- loop -- -- total_requests - this is a total number of requests -- -- http_200_responses - -- this is a total number of requests which have -- returned a code HTTP 200 -- -- http_other_responses - -- this is a total number of requests which have -- requests not a HTTP 200 -- -- failed_requests - this is a total number of requests which have -- failed (included systeme erros, curl errors, HTTP -- errors and so on) -- } -- or error() -- stat = function(self) return self.curl:stat() end, }, } -- -- Export -- local http_default = http_new() local this_module = { new = http_new, } local function http_default_wrap(fname) return function(...) return http_default[fname](http_default, ...) end end for _, name in ipairs({ 'get', 'delete', 'trace', 'options', 'head', 'connect', 'post', 'put', 'patch', 'request'}) do this_module[name] = http_default_wrap(name) end return this_module tarantool_1.9.1.26.g63eb81e3c/src/lua/crypto.lua0000664000000000000000000002775213306560010017610 0ustar rootroot-- crypto.lua (internal file) local ffi = require('ffi') local buffer = require('buffer') ffi.cdef[[ int tnt_openssl_init(void); /* from openssl/err.h */ unsigned long ERR_get_error(void); char *ERR_error_string(unsigned long e, char *buf); /* from openssl/evp.h */ typedef void ENGINE; typedef struct {} EVP_MD_CTX; typedef struct {} EVP_MD; EVP_MD_CTX *tnt_EVP_MD_CTX_new(void); void tnt_EVP_MD_CTX_free(EVP_MD_CTX *ctx); int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl); int EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, size_t cnt); int EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s); const EVP_MD *EVP_get_digestbyname(const char *name); typedef struct {} HMAC_CTX; HMAC_CTX *tnt_HMAC_CTX_new(void); void tnt_HMAC_CTX_free(HMAC_CTX *ctx); int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl); int HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len); int HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len); typedef struct {} EVP_CIPHER_CTX; typedef struct {} EVP_CIPHER; EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(); void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx); int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *impl, const unsigned char *key, const unsigned char *iv, int enc); int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl, const unsigned char *in, int inl); int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl); int EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *ctx); int tnt_EVP_CIPHER_iv_length(const EVP_CIPHER *cipher); int tnt_EVP_CIPHER_key_length(const EVP_CIPHER *cipher); int EVP_CIPHER_block_size(const EVP_CIPHER *cipher); const EVP_CIPHER *EVP_get_cipherbyname(const char *name); ]] ffi.C.tnt_openssl_init(); local function openssl_err_str() return ffi.string(ffi.C.ERR_error_string(ffi.C.ERR_get_error(), nil)) end local digests = {} for class, name in pairs({ md2 = 'MD2', md4 = 'MD4', md5 = 'MD5', sha = 'SHA', sha1 = 'SHA1', sha224 = 'SHA224', sha256 = 'SHA256', sha384 = 'SHA384', sha512 = 'SHA512', dss = 'DSS', dss1 = 'DSS1', mdc2 = 'MDC2', ripemd160 = 'RIPEMD160'}) do local digest = ffi.C.EVP_get_digestbyname(class) if digest ~= nil then digests[class] = digest end end local digest_mt = {} local function digest_gc(ctx) ffi.C.tnt_EVP_MD_CTX_free(ctx) end local function digest_new(digest) local ctx = ffi.C.tnt_EVP_MD_CTX_new() if ctx == nil then return error('Can\'t create digest ctx: ' .. openssl_err_str()) end ffi.gc(ctx, digest_gc) local self = setmetatable({ ctx = ctx, digest = digest, buf = buffer.ibuf(64), initialized = false, outl = ffi.new('int[1]') }, digest_mt) self:init() return self end local function digest_init(self) if self.ctx == nil then return error('Digest context isn\'t usable') end if ffi.C.EVP_DigestInit_ex(self.ctx, self.digest, nil) ~= 1 then return error('Can\'t init digest: ' .. openssl_err_str()) end self.initialized = true end local function digest_update(self, input) if not self.initialized then return error('Digest not initialized') end if ffi.C.EVP_DigestUpdate(self.ctx, input, input:len()) ~= 1 then return error('Can\'t update digest: ' .. openssl_err_str()) end end local function digest_final(self) if not self.initialized then return error('Digest not initialized') end self.initialized = false if ffi.C.EVP_DigestFinal_ex(self.ctx, self.buf.wpos, self.outl) ~= 1 then return error('Can\'t finalize digest: ' .. openssl_err_str()) end return ffi.string(self.buf.wpos, self.outl[0]) end local function digest_free(self) ffi.C.tnt_EVP_MD_CTX_free(self.ctx) ffi.gc(self.ctx, nil) self.ctx = nil self.initialized = false end digest_mt = { __index = { init = digest_init, update = digest_update, result = digest_final, free = digest_free } } local hmacs = digests local hmac_mt = {} local function hmac_gc(ctx) ffi.C.tnt_HMAC_CTX_free(ctx) end local function hmac_new(digest, key) if key == nil then return error('Key should be specified for HMAC operations') end local ctx = ffi.C.tnt_HMAC_CTX_new() if ctx == nil then return error('Can\'t create HMAC ctx: ' .. openssl_err_str()) end ffi.gc(ctx, hmac_gc) local self = setmetatable({ ctx = ctx, digest = digest, buf = buffer.ibuf(64), initialized = false, outl = ffi.new('int[1]') }, hmac_mt) self:init(key) return self end local function hmac_init(self, key) if self.ctx == nil then return error('HMAC context isn\'t usable') end if ffi.C.HMAC_Init_ex(self.ctx, key, key:len(), self.digest, nil) ~= 1 then return error('Can\'t init HMAC: ' .. openssl_err_str()) end self.initialized = true end local function hmac_update(self, input) if not self.initialized then return error('HMAC not initialized') end if ffi.C.HMAC_Update(self.ctx, input, input:len()) ~= 1 then return error('Can\'t update HMAC: ' .. openssl_err_str()) end end local function hmac_final(self) if not self.initialized then return error('HMAC not initialized') end self.initialized = false if ffi.C.HMAC_Final(self.ctx, self.buf.wpos, self.outl) ~= 1 then return error('Can\'t finalize HMAC: ' .. openssl_err_str()) end return ffi.string(self.buf.wpos, self.outl[0]) end local function hmac_free(self) ffi.C.tnt_HMAC_CTX_free(self.ctx) ffi.gc(self.ctx, nil) self.ctx = nil self.initialized = false end hmac_mt = { __index = { init = hmac_init, update = hmac_update, result = hmac_final, free = hmac_free } } local ciphers = {} for algo, algo_name in pairs({des = 'DES', aes128 = 'AES-128', aes192 = 'AES-192', aes256 = 'AES-256'}) do local algo_api = {} for mode, mode_name in pairs({cfb = 'CFB', ofb = 'OFB', cbc = 'CBC', ecb = 'ECB'}) do local cipher = ffi.C.EVP_get_cipherbyname(algo_name .. '-' .. mode_name) if cipher ~= nil then algo_api[mode] = cipher end end if algo_api ~= {} then ciphers[algo] = algo_api end end local cipher_mt = {} local function cipher_gc(ctx) ffi.C.EVP_CIPHER_CTX_free(ctx) end local function cipher_new(cipher, key, iv, direction) if key == nil or key:len() ~= ffi.C.tnt_EVP_CIPHER_key_length(cipher) then return error('Key length should be equal to cipher key length (' .. tostring(ffi.C.tnt_EVP_CIPHER_key_length(cipher)) .. ' bytes)') end if iv == nil or iv:len() ~= ffi.C.tnt_EVP_CIPHER_iv_length(cipher) then return error('Initial vector length should be equal to cipher iv length (' .. tostring(ffi.C.tnt_EVP_CIPHER_iv_length(cipher)) .. ' bytes)') end local ctx = ffi.C.EVP_CIPHER_CTX_new() if ctx == nil then return error('Can\'t create cipher ctx: ' .. openssl_err_str()) end ffi.gc(ctx, cipher_gc) local self = setmetatable({ ctx = ctx, cipher = cipher, block_size = ffi.C.EVP_CIPHER_block_size(cipher), direction = direction, buf = buffer.ibuf(), initialized = false, outl = ffi.new('int[1]') }, cipher_mt) self:init(key, iv) return self end local function cipher_init(self, key, iv) if self.ctx == nil then return error('Cipher context isn\'t usable') end if ffi.C.EVP_CipherInit_ex(self.ctx, self.cipher, nil, key, iv, self.direction) ~= 1 then return error('Can\'t init cipher:' .. openssl_err_str()) end self.initialized = true end local function cipher_update(self, input) if not self.initialized then return error('Cipher not initialized') end if type(input) ~= 'string' then error("Usage: cipher:update(string)") end local wpos = self.buf:reserve(input:len() + self.block_size - 1) if ffi.C.EVP_CipherUpdate(self.ctx, wpos, self.outl, input, input:len()) ~= 1 then return error('Can\'t update cipher:' .. openssl_err_str()) end return ffi.string(wpos, self.outl[0]) end local function cipher_final(self) if not self.initialized then return error('Cipher not initialized') end self.initialized = false local wpos = self.buf:reserve(self.block_size - 1) if ffi.C.EVP_CipherFinal_ex(self.ctx, wpos, self.outl) ~= 1 then return error('Can\'t finalize cipher:' .. openssl_err_str()) end self.initialized = false return ffi.string(wpos, self.outl[0]) end local function cipher_free(self) ffi.C.EVP_CIPHER_CTX_free(self.ctx) ffi.gc(self.ctx, nil) self.ctx = nil self.initialized = false self.buf:reset() end cipher_mt = { __index = { init = cipher_init, update = cipher_update, result = cipher_final, free = cipher_free } } local digest_api = {} for class, digest in pairs(digests) do digest_api[class] = setmetatable({ new = function () return digest_new(digest) end }, { __call = function (self, str) if type(str) ~= 'string' then error("Usage: digest."..class.."(string)") end local ctx = digest_new(digest) ctx:update(str) local res = ctx:result() ctx:free() return res end }) end digest_api = setmetatable(digest_api, {__index = function(self, digest) return error('Digest method "' .. digest .. '" is not supported') end }) local hmac_api = {} for class, digest in pairs(hmacs) do hmac_api[class] = setmetatable({ new = function (key) return hmac_new(digest, key) end }, { __call = function (self, key, str) if type(str) ~= 'string' then error("Usage: hmac."..class.."(key, string)") end local ctx = hmac_new(digest, key) ctx:update(str) local res = ctx:result() ctx:free() return res end }) hmac_api[class .. '_hex'] = function (key, str) if type(str) ~= 'string' then error("Usage: hmac."..class.."_hex(key, string)") end return string.hex(hmac_api[class](key, str)) end end hmac_api = setmetatable(hmac_api, {__index = function(self, digest) return error('HMAC method "' .. digest .. '" is not supported') end }) local function cipher_mode_error(self, mode) error('Cipher mode ' .. mode .. ' is not supported') end local cipher_api = {} for class, subclass in pairs(ciphers) do local class_api = {} for subclass, cipher in pairs(subclass) do class_api[subclass] = {} for direction, param in pairs({encrypt = 1, decrypt = 0}) do class_api[subclass][direction] = setmetatable({ new = function (key, iv) return cipher_new(cipher, key, iv, param) end }, { __call = function (self, str, key, iv) local ctx = cipher_new(cipher, key, iv, param) local res = ctx:update(str) res = res .. ctx:result() ctx:free() return res end }) end end class_api = setmetatable(class_api, {__index = cipher_mode_error}) if class_api ~= {} then cipher_api[class] = class_api end end cipher_api = setmetatable(cipher_api, {__index = function(self, cipher) return error('Cipher method "' .. cipher .. '" is not supported') end }) return { digest = digest_api, hmac = hmac_api, cipher = cipher_api, } tarantool_1.9.1.26.g63eb81e3c/src/lua/crypto.c0000664000000000000000000000301713306560010017235 0ustar rootroot#include #include #include #include /* Helper function for openssl init */ int tnt_openssl_init() { #if OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER) OpenSSL_add_all_digests(); OpenSSL_add_all_ciphers(); ERR_load_crypto_strings(); #else OPENSSL_init_crypto(0, NULL); OPENSSL_init_ssl(0, NULL); #endif return 0; } /* Helper functions for tarantool crypto api */ int tnt_EVP_CIPHER_key_length(const EVP_CIPHER *cipher) { return EVP_CIPHER_key_length(cipher); } int tnt_EVP_CIPHER_iv_length(const EVP_CIPHER *cipher) { return EVP_CIPHER_iv_length(cipher); } EVP_MD_CTX *tnt_EVP_MD_CTX_new(void) { #if OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER) return EVP_MD_CTX_create(); #else return EVP_MD_CTX_new(); #endif }; void tnt_EVP_MD_CTX_free(EVP_MD_CTX *ctx) { #if OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER) return EVP_MD_CTX_destroy(ctx); #else return EVP_MD_CTX_free(ctx); #endif } HMAC_CTX *tnt_HMAC_CTX_new(void) { #if OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER) HMAC_CTX *ctx = (HMAC_CTX *)OPENSSL_malloc(sizeof(HMAC_CTX)); if(!ctx){ return NULL; } HMAC_CTX_init(ctx); return ctx; #else return HMAC_CTX_new(); #endif } void tnt_HMAC_CTX_free(HMAC_CTX *ctx) { #if OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER) HMAC_cleanup(ctx); /* Remove key from memory */ OPENSSL_free(ctx); #else HMAC_CTX_free(ctx); #endif } tarantool_1.9.1.26.g63eb81e3c/src/lua/httpc.h0000664000000000000000000000303013306560010017037 0ustar rootroot#ifndef TARANTOOL_LUA_HTTPC_H_INCLUDED #define TARANTOOL_LUA_HTTPC_H_INCLUDED 1 /* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ struct lua_State; int luaopen_http_client_driver(struct lua_State *L); #endif /* TARANTOOL_LUA_HTTPC_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lua/strict.lua0000664000000000000000000000254013306560010017564 0ustar rootroot-- strict.lua -- checks uses of undeclared global variables -- All global variables must be 'declared' through a regular assignment -- (even assigning nil will do) in a main chunk before being used -- anywhere or assigned to inside a function. -- local getinfo, error, rawset, rawget = debug.getinfo, error, rawset, rawget local mt = {} mt.__declared = {} local function what () local d = getinfo(3, "S") return d and d.what or "C" end mt.__newindex = function (t, n, v) if not mt.__declared[n] then local w = what() if w ~= "main" and w ~= "C" then error("assign to undeclared variable '"..n.."'", 2) end mt.__declared[n] = true end rawset(t, n, v) end mt.__index = function (t, n) if not mt.__declared[n] and what() ~= "C" then error("variable '"..n.."' is not declared", 2) end return rawget(t, n) end local function off() mt.__declared = {} local m = getmetatable(_G) if m == nil then return end if m == mt then setmetatable(_G, nil) else m.__newindex = nil m.__index = nil end end local function on() local m = getmetatable(_G) if m == mt then return end if m == nil then setmetatable(_G, mt) else m.__newindex = mt.__newindex m.__index = mt.__index end end on() return { on = on, off = off, } tarantool_1.9.1.26.g63eb81e3c/src/lua/init.h0000664000000000000000000000552613306560010016674 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_H #define INCLUDES_TARANTOOL_LUA_H /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; struct luaL_Reg; extern bool start_loop; extern struct lua_State *tarantool_L; /** * Create an instance of Lua interpreter and load it with * Tarantool modules. Creates a Lua state, imports global * Tarantool modules, then calls box_lua_init(), which performs * module-specific imports. The created state can be freed as any * other, with lua_close(). * * @return L on success, 0 if out of memory */ void tarantool_lua_init(const char *tarantool_bin, int argc, char **argv); /** Free Lua subsystem resources. */ void tarantool_lua_free(); /** * This function exists because lua_tostring does not use * __tostring metamethod, and this metamethod has to be used * if we want to print Lua userdata correctly. */ const char * tarantool_lua_tostring(struct lua_State *L, int index); /** * Load and execute start-up file * * @param interactive force interactive mode * @param argc argc the number of command line arguments * @param argv argv command line arguments */ void tarantool_lua_run_script(char *path, bool force_interactive, int optc, char **optv, int argc, char **argv); extern char *history; struct slab_cache * tarantool_lua_slab_cache(); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_LUA_H */ tarantool_1.9.1.26.g63eb81e3c/src/lua/pwd.lua0000664000000000000000000001254713306560010017056 0ustar rootrootlocal ffi = require('ffi') local errno = require('errno') -- GID_T, UID_T and TIME_T are, essentially, `integer types`. -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/types.h.html ffi.cdef[[ typedef int uid_t; typedef int gid_t; typedef long time_t; ]] -- POSIX demands to have three fields in struct group: -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/grp.h.html -- char *gr_name The name of the group. -- gid_t gr_gid Numerical group ID. -- char **gr_mem Pointer to a null-terminated array of character pointers to -- member names. -- -- So we'll extract only them. ffi.cdef[[ struct group { char *gr_name; /* group name */ char *gr_passwd; /* group password */ gid_t gr_gid; /* group id */ char **gr_mem; /* group members */ }; ]] -- POSIX demands to have five fields in struct group: -- char *pw_name User's login name. -- uid_t pw_uid Numerical user ID. -- gid_t pw_gid Numerical group ID. -- char *pw_dir Initial working directory. -- char *pw_shell Program to use as shell. -- -- So we'll extract only them. if ffi.os == 'OSX' or ffi.os == 'BSD' then ffi.cdef[[ struct passwd { char *pw_name; /* user name */ char *pw_passwd; /* encrypted password */ uid_t pw_uid; /* user uid */ gid_t pw_gid; /* user gid */ time_t pw_change; /* password change time */ char *pw_class; /* user access class */ char *pw_gecos; /* Honeywell login info */ char *pw_dir; /* home directory */ char *pw_shell; /* default shell */ time_t pw_expire; /* account expiration */ int pw_fields; /* internal: fields filled in */ }; ]] else ffi.cdef[[ struct passwd { char *pw_name; /* username */ char *pw_passwd; /* user password */ int pw_uid; /* user ID */ int pw_gid; /* group ID */ char *pw_gecos; /* user information */ char *pw_dir; /* home directory */ char *pw_shell; /* shell program */ }; ]] end ffi.cdef[[ uid_t getuid(); struct passwd *getpwuid(uid_t uid); struct passwd *getpwnam(const char *login); void endpwent(); struct passwd *getpwent(); void setpwent(); gid_t getgid(); struct group *getgrgid(gid_t gid); struct group *getgrnam(const char *group); struct group *getgrent(); void endgrent(); void setgrent(); ]] local function _getpw(uid) local pw = nil errno(0) if type(uid) == 'number' then pw = ffi.C.getpwuid(uid) elseif type(uid) == 'string' then pw = ffi.C.getpwnam(uid) else error("Bad type of uid (expected 'string'/'number')", 2) end return pw end local function _getgr(gid) local gr = nil errno(0) if type(gid) == 'number' then gr = ffi.C.getgrgid(gid) elseif type(gid) == 'string' then gr = ffi.C.getgrnam(gid) else error("Bad type of gid (expected 'string'/'number')", 2) end return gr end local pwgr_errstr = "get%s* failed [errno %d]: %s" local function getgr(gid) if gid == nil then gid = tonumber(ffi.C.getgid()) end local gr = _getgr(gid) if gr == nil then if errno() ~= 0 then error(pwgr_errstr:format('pw', errno(), errno.strerror()), 2) end return nil end local gr_mem, group_members = gr.gr_mem, {} local i = 0 while true do local member = gr_mem[i] if member == nil then break end table.insert(group_members, ffi.string(member)) i = i + 1 end local group = { id = tonumber(gr.gr_gid), name = ffi.string(gr.gr_name), members = group_members, } return group end local function getpw(uid) if uid == nil then uid = tonumber(ffi.C.getuid()) end local pw = _getpw(uid) if pw == nil then if errno() ~= 0 then error(pwgr_errstr:format('pw', errno(), errno.strerror()), 2) end return nil end local user = { name = ffi.string(pw.pw_name), id = tonumber(pw.pw_uid), group = getgr(pw.pw_gid), workdir = ffi.string(pw.pw_dir), shell = ffi.string(pw.pw_shell), } return user end local function getpwall() errno(0) ffi.C.setpwent() if errno() ~= 0 then return nil end local pws = {} while true do local pw = ffi.C.getpwent() if pw == nil then break end table.insert(pws, getpw(pw.pw_uid)) end ffi.C.endpwent() if errno() ~= 0 then return nil end return pws end local function getgrall() errno(0) ffi.C.setgrent() if errno() ~= 0 then return nil end local grs = {} while true do local gr = ffi.C.getgrent() if gr == nil then break end table.insert(grs, getpw(gr.gr_gid)) end ffi.C.endgrent() if errno() ~= 0 then return nil end return grs end return { getpw = getpw, getgr = getgr, getpwall = getpwall, getgrall = getgrall, } tarantool_1.9.1.26.g63eb81e3c/src/lua/log.lua0000664000000000000000000000673113306560010017043 0ustar rootroot-- log.lua -- local ffi = require('ffi') ffi.cdef[[ typedef void (*sayfunc_t)(int level, const char *filename, int line, const char *error, const char *format, ...); void say_set_log_level(int new_level); void say_set_log_format(enum say_format format); extern sayfunc_t _say; extern struct ev_loop; extern struct ev_signal; extern void say_logrotate(struct ev_loop *, struct ev_signal *, int); enum say_level { S_FATAL, S_SYSERROR, S_ERROR, S_CRIT, S_WARN, S_INFO, S_VERBOSE, S_DEBUG }; enum say_format { SF_PLAIN, SF_JSON }; pid_t log_pid; extern int log_level; extern int log_format; ]] local S_WARN = ffi.C.S_WARN local S_INFO = ffi.C.S_INFO local S_VERBOSE = ffi.C.S_VERBOSE local S_DEBUG = ffi.C.S_DEBUG local S_ERROR = ffi.C.S_ERROR local json = require("json").new() json.cfg{ encode_invalid_numbers = true, encode_load_metatables = true, encode_use_tostring = true, encode_invalid_as_nil = true, } local special_fields = { "file", "level", "pid", "line", "cord_name", "fiber_name", "fiber_id", "error_msg" } local function say(level, fmt, ...) if ffi.C.log_level < level then -- don't waste cycles on debug.getinfo() return end local type_fmt = type(fmt) local format = "%s" if select('#', ...) ~= 0 then local stat stat, fmt = pcall(string.format, fmt, ...) if not stat then error(fmt, 3) end elseif type_fmt == 'table' then -- ignore internal keys for _, field in ipairs(special_fields) do fmt[field] = nil end fmt = json.encode(fmt) if ffi.C.log_format == ffi.C.SF_JSON then -- indicate that message is already encoded in JSON format = "json" end elseif type_fmt ~= 'string' then fmt = tostring(fmt) end local debug = require('debug') local frame = debug.getinfo(3, "Sl") local line, file = 0, 'eval' if type(frame) == 'table' then line = frame.currentline or 0 file = frame.short_src or frame.src or 'eval' end ffi.C._say(level, file, line, nil, format, fmt) end local function say_closure(lvl) return function (fmt, ...) say(lvl, fmt, ...) end end local function log_rotate() ffi.C.say_logrotate(nil, nil, 0) end local function log_level(level) return ffi.C.say_set_log_level(level) end local function log_format(format_name) if format_name == "json" then ffi.C.say_set_log_format(ffi.C.SF_JSON) elseif format_name == "plain" then ffi.C.say_set_log_format(ffi.C.SF_PLAIN) else error("log_format: expected 'json' or 'plain'") end end local function log_pid() return tonumber(ffi.C.log_pid) end local compat_warning_said = false local compat_v16 = { logger_pid = function() if not compat_warning_said then compat_warning_said = true say(S_WARN, 'logger_pid() is deprecated, please use pid() instead') end return log_pid() end; } return setmetatable({ warn = say_closure(S_WARN); info = say_closure(S_INFO); verbose = say_closure(S_VERBOSE); debug = say_closure(S_DEBUG); error = say_closure(S_ERROR); rotate = log_rotate; pid = log_pid; level = log_level; log_format = log_format; }, { __index = compat_v16; }) tarantool_1.9.1.26.g63eb81e3c/src/lua/env.lua0000664000000000000000000000155213306560010017046 0ustar rootrootlocal ffi = require('ffi') local os = require('os') local errno = require('errno') ffi.cdef[[ extern char **environ; int setenv(const char *name, const char *value, int overwrite); int unsetenv(const char *name); ]] local environ = ffi.C.environ os.environ = function() if not environ then return nil end local r = {} local i = 0 while environ[i] ~= nil do local e = ffi.string(environ[i]) local eq = e:find('=') if eq then r[e:sub(1, eq - 1)] = e:sub(eq + 1) end i = i + 1 end return r end os.setenv = function(key, value) local rv = nil if value ~= nil then rv = ffi.C.setenv(key, value, 1) else rv = ffi.C.unsetenv(key) end if rv == -1 then error(string.format('error %d: %s', errno(), errno.errstring())) end end tarantool_1.9.1.26.g63eb81e3c/src/lua/string.lua0000664000000000000000000002621313306560010017565 0ustar rootrootlocal ffi = require('ffi') ffi.cdef[[ const char * memmem(const char *haystack, size_t haystack_len, const char *needle, size_t needle_len); int memcmp(const char *mem1, const char *mem2, size_t num); int isspace(int c); ]] local c_char_ptr = ffi.typeof('const char *') local memcmp = ffi.C.memcmp local memmem = ffi.C.memmem local isspace = ffi.C.isspace local err_string_arg = "bad argument #%d to '%s' (%s expected, got %s)" local function string_split_empty(inp, maxsplit) local p = c_char_ptr(inp) local p_end = p + #inp local rv = {} while true do -- skip the leading whitespaces while p < p_end and isspace(p[0]) ~= 0 do p = p + 1 end if p == p_end then break end if maxsplit <= 0 then table.insert(rv, ffi.string(p, p_end - p)) break end local chunk = p -- skip all non-whitespace characters while p < p_end and isspace(p[0]) == 0 do p = p + 1 end assert((p - chunk) > 0) table.insert(rv, ffi.string(chunk, p - chunk)) maxsplit = maxsplit - 1 end return rv end local function string_split_internal(inp, sep, maxsplit) local p = c_char_ptr(inp) local p_end = p + #inp local sep_len = #sep if sep_len == 0 then error(err_string_arg:format(2, 'string.split', 'non-empty string', "empty string"), 3) end local rv = {} while true do assert(p <= p_end) if maxsplit <= 0 or p == p_end then table.insert(rv, ffi.string(p, p_end - p)) break end local chunk = p p = memmem(p, p_end - p, sep, sep_len) if p == nil then table.insert(rv, ffi.string(chunk, p_end - chunk)) break end table.insert(rv, ffi.string(chunk, p - chunk)) p = p + sep_len maxsplit = maxsplit - 1 end return rv end local function string_split(inp, sep, max) if type(inp) ~= 'string' then error(err_string_arg:format(1, 'string.split', 'string', type(inp)), 2) end if sep ~= nil and type(sep) ~= 'string' then error(err_string_arg:format(2, 'string.split', 'string', type(sep)), 2) end if max ~= nil and (type(max) ~= 'number' or max < 0) then error(err_string_arg:format(3, 'string.split', 'positive integer', type(max)), 2) end max = max or 0xffffffff if not sep then return string_split_empty(inp, max) end return string_split_internal(inp, sep, max) end --- Left-justify string in a field of given width. -- Append "width - len(inp)" chars to given string. Input is never trucated. -- @function ljust -- @string inp the string -- @int width at least bytes to be returned -- @string[opt] char char of length 1 to fill with (" " by default) -- @returns result string local function string_ljust(inp, width, char) if type(inp) ~= 'string' then error(err_string_arg:format(1, 'string.ljust', 'string', type(inp)), 2) end if type(width) ~= 'number' or width < 0 then error(err_string_arg:format(2, 'string.ljust', 'positive integer', type(width)), 2) end if char ~= nil and (type(char) ~= 'string' or #char ~= 1) then error(err_string_arg:format(3, 'string.ljust', 'char', type(char)), 2) end char = char or " " local delta = width - #inp if delta < 0 then return inp end return inp .. char:rep(delta) end --- Right-justify string in a field of given width. -- Prepend "width - len(inp)" chars to given string. Input is never trucated. -- @function rjust -- @string inp the string -- @int width at least bytes to be returned -- @string[opt] char char of length 1 to fill with (" " by default) -- @returns result string local function string_rjust(inp, width, char) if type(inp) ~= 'string' then error(err_string_arg:format(1, 'string.rjust', 'string', type(inp)), 2) end if type(width) ~= 'number' or width < 0 then error(err_string_arg:format(2, 'string.rjust', 'positive integer', type(width)), 2) end if char ~= nil and (type(char) ~= 'string' or #char ~= 1) then error(err_string_arg:format(3, 'string.rjust', 'char', type(char)), 2) end char = char or " " local delta = width - #inp if delta < 0 then return inp end return char:rep(delta) .. inp end --- Center string in a field of given width. -- Prepend and append "(width - len(inp))/2" chars to given string. -- Input is never trucated. -- @function center -- @string inp the string -- @int width at least bytes to be returned -- @string[opt] char char of length 1 to fill with (" " by default) -- @returns result string local function string_center(inp, width, char) if type(inp) ~= 'string' then error(err_string_arg:format(1, 'string.center', 'string', type(inp)), 2) end if type(width) ~= 'number' or width < 0 then error(err_string_arg:format(2, 'string.center', 'positive integer', type(width)), 2) end if char ~= nil and (type(char) ~= 'string' or #char ~= 1) then error(err_string_arg:format(3, 'string.center', 'char', type(char)), 2) end char = char or " " local delta = width - #inp if delta < 0 then return inp end local pad_left = math.floor(delta / 2) local pad_right = delta - pad_left return char:rep(pad_left) .. inp .. char:rep(pad_right) end -- For now the best way to check, that string starts with sequence -- (with patterns disabled) is to cut line and check strings for equality --- Check that string (or substring) starts with given string -- Optionally restricting the matching with the given offsets -- @function startswith -- @string inp original string -- @string head the substring to check against -- @int[opt] _start start index of matching boundary -- @int[opt] _end end index of matching boundary -- @returns boolean local function string_startswith(inp, head, _start, _end) if type(inp) ~= 'string' then error(err_string_arg:format(1, 'string.startswith', 'string', type(inp)), 2) end if type(head) ~= 'string' then error(err_string_arg:format(2, 'string.startswith', 'string', type(head)), 2) end if _start ~= nil and type(_start) ~= 'number' then error(err_string_arg:format(3, 'string.startswith', 'integer', type(_start)), 2) end if _end ~= nil and type(_end) ~= 'number' then error(err_string_arg:format(4, 'string.startswith', 'integer', type(_end)), 2) end -- prepare input arguments (move negative values [offset from the end] to -- positive ones and/or assign default values) local head_len, inp_len = #head, #inp if _start == nil then _start = 1 elseif _start < 0 then _start = inp_len + _start + 1 if _start < 0 then _start = 0 end end if _end == nil or _end > inp_len then _end = inp_len elseif _end < 0 then _end = inp_len + _end + 1 if _end < 0 then _end = 0 end end -- check for degenerate case (interval lesser than input) if head_len == 0 then return true elseif _end - _start + 1 < head_len or _start > _end then return false end _start = _start - 1 _end = _start + head_len - 1 return memcmp(c_char_ptr(inp) + _start, c_char_ptr(head), head_len) == 0 end --- Check that string (or substring) ends with given string -- Optionally restricting the matching with the given offsets -- @function endswith -- @string inp original string -- @string tail the substring to check against -- @int[opt] _start start index of matching boundary -- @int[opt] _end end index of matching boundary -- @returns boolean local function string_endswith(inp, tail, _start, _end) local tail_len, inp_len = #tail, #inp if type(inp) ~= 'string' then error(err_string_arg:format(1, 'string.endswith', 'string', type(inp)), 2) end if type(tail) ~= 'string' then error(err_string_arg:format(2, 'string.endswith', 'string', type(inp)), 2) end if _start ~= nil and type(_start) ~= 'number' then error(err_string_arg:format(3, 'string.endswith', 'integer', type(inp)), 2) end if _end ~= nil and type(_end) ~= 'number' then error(err_string_arg:format(4, 'string.endswith', 'integer', type(inp)), 2) end -- prepare input arguments (move negative values [offset from the end] to -- positive ones and/or assign default values) if _start == nil then _start = 1 elseif _start < 0 then _start = inp_len + _start + 1 if _start < 0 then _start = 0 end end if _end == nil or _end > inp_len then _end = inp_len elseif _end < 0 then _end = inp_len + _end + 1 if _end < 0 then _end = 0 end end -- check for degenerate case (interval lesser than input) if tail_len == 0 then return true elseif _end - _start + 1 < tail_len or _start > _end then return false end _start = _end - tail_len return memcmp(c_char_ptr(inp) + _start, c_char_ptr(tail), tail_len) == 0 end local function string_hex(inp) if type(inp) ~= 'string' then error(err_string_arg:format(1, 'string.hex', 'string', type(inp)), 2) end local len = inp:len() * 2 local res = ffi.new('char[?]', len + 1) local uinp = ffi.cast('const unsigned char *', inp) for i = 0, inp:len() - 1 do ffi.C.snprintf(res + i * 2, 3, "%02x", ffi.cast('unsigned', uinp[i])) end return ffi.string(res, len) end local function string_strip(inp) if type(inp) ~= 'string' then error(err_string_arg:format(1, "string.strip", 'string', type(inp)), 2) end return (string.gsub(inp, "^%s*(.-)%s*$", "%1")) end local function string_lstrip(inp) if type(inp) ~= 'string' then error(err_string_arg:format(1, "string.lstrip", 'string', type(inp)), 2) end return (string.gsub(inp, "^%s*(.-)", "%1")) end local function string_rstrip(inp) if type(inp) ~= 'string' then error(err_string_arg:format(1, "string.rstrip", 'string', type(inp)), 2) end return (string.gsub(inp, "(.-)%s*$", "%1")) end -- It'll automatically set string methods, too. local string = require('string') string.split = string_split string.ljust = string_ljust string.rjust = string_rjust string.center = string_center string.startswith = string_startswith string.endswith = string_endswith string.hex = string_hex string.strip = string_strip string.lstrip = string_lstrip string.rstrip = string_rstrip tarantool_1.9.1.26.g63eb81e3c/src/lua/socket.lua0000664000000000000000000011761713306560010017560 0ustar rootroot-- socket.lua (internal file) local TIMEOUT_INFINITY = 500 * 365 * 86400 local LIMIT_INFINITY = 2147483647 local ffi = require('ffi') local boxerrno = require('errno') local internal = require('socket') local fiber = require('fiber') local fio = require('fio') local log = require('log') local buffer = require('buffer') local format = string.format ffi.cdef[[ struct gc_socket { const int fd; }; typedef uint32_t socklen_t; typedef ptrdiff_t ssize_t; int connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen); int bind(int sockfd, const struct sockaddr *addr, socklen_t addrlen); ssize_t write(int fd, const char *octets, size_t len); ssize_t read(int fd, void *buf, size_t count); int listen(int fd, int backlog); int socket(int domain, int type, int protocol); int coio_close(int s); int shutdown(int s, int how); ssize_t send(int sockfd, const void *buf, size_t len, int flags); ssize_t recv(int s, void *buf, size_t len, int flags); int accept(int s, void *addr, void *addrlen); ssize_t sendto(int sockfd, const void *buf, size_t len, int flags, const struct sockaddr *dest_addr, socklen_t addrlen); int lbox_socket_local_resolve(const char *host, const char *port, struct sockaddr *addr, socklen_t *socklen); int lbox_socket_nonblock(int fd, int mode); int setsockopt(int s, int level, int iname, const void *opt, size_t optlen); int getsockopt(int s, int level, int iname, void *ptr, size_t *optlen); typedef struct { int active; int timeout; } linger_t; struct protoent { char *p_name; /* official protocol name */ char **p_aliases; /* alias list */ int p_proto; /* protocol number */ }; struct protoent *getprotobyname(const char *name); void *memmem(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen); ]] local gc_socket_t = ffi.metatype(ffi.typeof('struct gc_socket'), { __gc = function (socket) if socket.fd < 0 then return end if ffi.C.coio_close(socket.fd) ~= 0 then log.error("socket: failed to close fd=%d on gc: %s", socket.fd, boxerrno.strerror()) end end }) local socket_mt local function check_socket(socket) local gc_socket = type(socket) == 'table' and socket._gc_socket if ffi.istype(gc_socket_t, gc_socket) then local fd = gc_socket.fd if fd >= 0 then return fd else error("attempt to use closed socket") end else local msg = "Usage: socket:method()" if socket ~= nil then msg = msg .. ", called with non-socket" end error(msg) end end local function make_socket(fd) local socket = { _gc_socket = ffi.new(gc_socket_t, { fd = fd }) } return setmetatable(socket, socket_mt) end local gc_socket_sentinel = ffi.new(gc_socket_t, { fd = -1 }) local function socket_close(socket) local fd = check_socket(socket) socket._errno = nil local r = ffi.C.coio_close(fd) -- .fd is const to prevent tampering ffi.copy(socket._gc_socket, gc_socket_sentinel, ffi.sizeof(gc_socket_t)) if r ~= 0 then socket._errno = boxerrno() return false end return true end local soname_mt = { __tostring = function(si) if si.host == nil and si.port == nil then return '' end if si.host == nil then return format('%s:%s', '0', tostring(si.port)) end if si.port == nil then return format('%s:%', tostring(si.host), 0) end return format('%s:%s', tostring(si.host), tostring(si.port)) end } local function socket_name(self) local fd = check_socket(self) local aka = internal.name(fd) if aka == nil then self._errno = boxerrno() return nil end self._errno = nil setmetatable(aka, soname_mt) return aka end local function socket_peer(self) local fd = check_socket(self) local peer = internal.peer(fd) if peer == nil then self._errno = boxerrno() return nil end self._errno = nil setmetatable(peer, soname_mt) return peer end local function socket_fd(self) return check_socket(self) end local function get_ivalue(table, key) if type(key) == 'number' then return key end return table[key] end local function get_iflags(table, flags) if flags == nil then return 0 end local res = 0 if type(flags) ~= 'table' then flags = { flags } end for i, f in pairs(flags) do if table[f] == nil then return nil end res = bit.bor(res, table[f]) end return res end local function getprotobyname(name) if type(name) == 'number' then return name elseif type(name) ~= 'string' then boxerrno(boxerrno.EINVAL) return nil end local num = internal.protocols[name] if num ~= nil then return num end local p = ffi.C.getprotobyname(name) if p == nil then boxerrno(boxerrno.EPROTOTYPE) return nil end num = p.p_proto -- update cache internal.protocols[name] = num return num end local function socket_errno(self) check_socket(self) if self['_errno'] == nil then return 0 else return self['_errno'] end end local function socket_error(self) check_socket(self) if self['_errno'] == nil then return nil else return boxerrno.strerror(self._errno) end end -- addrbuf is equivalent to struct sockaddr_storage local addrbuf = ffi.new('char[128]') -- enough to fit any address local addr = ffi.cast('struct sockaddr *', addrbuf) local addr_len = ffi.new('socklen_t[1]') local function socket_sysconnect(self, host, port) local fd = check_socket(self) self._errno = nil host = tostring(host) port = tostring(port) addr_len[0] = ffi.sizeof(addrbuf) local res = ffi.C.lbox_socket_local_resolve(host, port, addr, addr_len) if res == 0 then res = ffi.C.connect(fd, addr, addr_len[0]); if res == 0 then return true end end self._errno = boxerrno() return false end local function syswrite(self, charptr, size) local fd = check_socket(self) self._errno = nil local done = ffi.C.write(fd, charptr, size) if done < 0 then self._errno = boxerrno() return nil end return tonumber(done) end local function socket_syswrite(self, arg1, arg2) -- TODO: ffi.istype('char *', arg1) doesn't work for ffi.new('char[256]') if type(arg1) == 'cdata' and arg2 ~= nil then return syswrite(self, arg1, arg2) elseif type(arg1) == 'string' then return syswrite(self, arg1, #arg1) else error('Usage: socket:syswrite(data) or socket:syswrite(const char *, size)') end end local function sysread(self, charptr, size) local fd = check_socket(self) self._errno = nil local res = ffi.C.read(fd, charptr, size) if res < 0 then self._errno = boxerrno() return nil end return tonumber(res) end local function socket_sysread(self, arg1, arg2) -- TODO: ffi.istype('char *', arg1) doesn't work for ffi.new('char[256]') if type(arg1) == 'cdata' and arg2 ~= nil then return sysread(self, arg1, arg2) end local size = arg1 or buffer.READAHEAD local buf = buffer.IBUF_SHARED buf:reset() local p = buf:alloc(size) local res = sysread(self, p, size) if res then local str = ffi.string(p, res) buf:recycle() return str else buf:recycle() return res end end local function socket_nonblock(self, nb) local fd = check_socket(self) self._errno = nil local res if nb == nil then res = ffi.C.lbox_socket_nonblock(fd, 0x80) elseif nb then res = ffi.C.lbox_socket_nonblock(fd, 1) else res = ffi.C.lbox_socket_nonblock(fd, 0) end if res < 0 then self._errno = boxerrno() return nil end if res == 1 then return true else return false end end local function do_wait(self, what, timeout) local fd = check_socket(self) self._errno = nil timeout = timeout or TIMEOUT_INFINITY local res = internal.iowait(fd, what, timeout) if res == 0 then self._errno = boxerrno.ETIMEDOUT return 0 end return res end local function socket_readable(self, timeout) return do_wait(self, 1, timeout) ~= 0 end local function socket_writable(self, timeout) return do_wait(self, 2, timeout) ~= 0 end local function socket_wait(self, timeout) return do_wait(self, 'RW', timeout) end local function socket_listen(self, backlog) local fd = check_socket(self) self._errno = nil if backlog == nil then backlog = 256 end local res = ffi.C.listen(fd, backlog) if res < 0 then self._errno = boxerrno() return false end return true end local function socket_bind(self, host, port) local fd = check_socket(self) self._errno = nil host = tostring(host) port = tostring(port) addr_len[0] = ffi.sizeof(addrbuf) local res = ffi.C.lbox_socket_local_resolve(host, port, addr, addr_len) if res == 0 then res = ffi.C.bind(fd, addr, addr_len[0]); end if res == 0 then return true end self._errno = boxerrno() return false end local function socket_shutdown(self, how) local fd = check_socket(self) local hvariants = { ['R'] = 0, ['READ'] = 0, ['receive'] = 0, ['W'] = 1, ['WRITE'] = 1, ['send'] = 1, ['RW'] = 2, ['READ_WRITE'] = 2, ["both"] = 2, [0] = 0, [1] = 1, [2] = 2 } local ihow = hvariants[how] if ihow == nil then ihow = 2 end self._errno = nil if ffi.C.shutdown(fd, ihow) < 0 then self._errno = boxerrno() return false end return true end local function getsol(level) if type(level) == 'number' then return level elseif type(level) ~= 'string' then boxerrno(boxerrno.EINVAL) return nil elseif level == 'SOL_SOCKET' or level == 'socket' then return internal.SOL_SOCKET end level = (level:match('IPPROTO_([A-Z]*)') or level:match('SOL_([A-Z]*)') or level):lower() level = getprotobyname(level) if level == nil then return nil end return level end local function socket_setsockopt(self, level, name, value) local fd = check_socket(self) level = getsol(level) if level == nil then self._errno = boxerrno() return false end local info = get_ivalue(internal.SO_OPT[level] or {}, name) if info == nil then error(format("Unknown socket option name: %s", tostring(name))) end if not info.rw then error(format("Socket option %s is read only", name)) end self._errno = nil if type(value) == 'boolean' then if value then value = 1 else value = 0 end end if info.type == 1 then local value = ffi.new("int[1]", value) local res = ffi.C.setsockopt(fd, level, info.iname, value, ffi.sizeof('int')) if res < 0 then self._errno = boxerrno() return false end return true end if info.type == 2 then local res = ffi.C.setsockopt(fd, level, info.iname, value, ffi.sizeof('size_t')) if res < 0 then self._errno = boxerrno() return false end return true end if name == 'SO_LINGER' then error("Use s:linger(active[, timeout])") end error(format("Unsupported socket option: %s", name)) end local function socket_getsockopt(self, level, name) local fd = check_socket(self) level = getsol(level) if level == nil then self._errno = boxerrno() return false end local info = get_ivalue(internal.SO_OPT[level] or {}, name) if info == nil then error(format("Unknown socket option name: %s", tostring(name))) end self._errno = nil if info.type == 1 then local value = ffi.new("int[1]", 0) local len = ffi.new("size_t[1]", ffi.sizeof('int')) local res = ffi.C.getsockopt(fd, level, info.iname, value, len) if res < 0 then self._errno = boxerrno() return nil end if len[0] ~= 4 then error(format("Internal error: unexpected optlen: %d", len[0])) end return tonumber(value[0]) end if info.type == 2 then local value = ffi.new("char[256]", { 0 }) local len = ffi.new("size_t[1]", 256) local res = ffi.C.getsockopt(fd, level, info.iname, value, len) if res < 0 then self._errno = boxerrno() return nil end return ffi.string(value, tonumber(len[0])) end if name == 'SO_LINGER' then error("Use s:linger()") end error(format("Unsupported socket option: %s", name)) end local function socket_linger(self, active, timeout) local fd = check_socket(self) local level = internal.SOL_SOCKET local info = internal.SO_OPT[level].SO_LINGER self._errno = nil if active == nil then local value = ffi.new("linger_t[1]") local len = ffi.new("size_t[1]", 2 * ffi.sizeof('int')) local res = ffi.C.getsockopt(fd, level, info.iname, value, len) if res < 0 then self._errno = boxerrno() return nil end if value[0].active ~= 0 then active = true else active = false end return active, value[0].timeout end if timeout == nil then timeout = 0 end local iactive if active then iactive = 1 else iactive = 0 end local value = ffi.new("linger_t[1]", { { active = iactive, timeout = timeout } }) local len = 2 * ffi.sizeof('int') local res = ffi.C.setsockopt(fd, level, info.iname, value, len) if res < 0 then self._errno = boxerrno() return nil end return active, timeout end local function socket_accept(self) local server_fd = check_socket(self) self._errno = nil local client_fd, from = internal.accept(server_fd) if client_fd == nil then self._errno = boxerrno() return nil end local client = make_socket(client_fd) if not client:nonblock(true) then client:close() return end return client, from end local errno_is_transient = { [boxerrno.EAGAIN] = true; [boxerrno.EWOULDBLOCK] = true; [boxerrno.EINTR] = true; } local errno_is_fatal = { [boxerrno.EBADF] = true; [boxerrno.EINVAL] = true; [boxerrno.EOPNOTSUPP] = true; [boxerrno.ENOTSOCK] = true; } local function check_limit(self, limit) if self.rbuf:size() >= limit then return limit end return nil end local function check_delimiter(self, limit, eols) if limit == 0 then return 0 end local rbuf = self.rbuf if rbuf:size() == 0 then return nil end local shortest for i, eol in ipairs(eols) do local data = ffi.C.memmem(rbuf.rpos, rbuf:size(), eol, #eol) if data ~= nil then local len = ffi.cast('char *', data) - rbuf.rpos + #eol if shortest == nil or shortest > len then shortest = len end end end if shortest ~= nil and shortest <= limit then return shortest elseif limit <= rbuf:size() then return limit end return nil end local function read(self, limit, timeout, check, ...) assert(limit >= 0) limit = math.min(limit, LIMIT_INFINITY) local rbuf = self.rbuf if rbuf == nil then rbuf = buffer.ibuf() self.rbuf = rbuf end local len = check(self, limit, ...) if len ~= nil then self._errno = nil local data = ffi.string(rbuf.rpos, len) rbuf.rpos = rbuf.rpos + len return data end while timeout > 0 do local started = fiber.clock() assert(rbuf:size() < limit) local to_read = math.min(limit - rbuf:size(), buffer.READAHEAD) local data = rbuf:reserve(to_read) assert(rbuf:unused() >= to_read) local res = sysread(self, data, rbuf:unused()) if res == 0 then -- eof self._errno = nil local len = rbuf:size() local data = ffi.string(rbuf.rpos, len) rbuf.rpos = rbuf.rpos + len return data elseif res ~= nil then rbuf.wpos = rbuf.wpos + res local len = check(self, limit, ...) if len ~= nil then self._errno = nil local data = ffi.string(rbuf.rpos, len) rbuf.rpos = rbuf.rpos + len return data end elseif not errno_is_transient[self._errno] then return nil end if not socket_readable(self, timeout) then return nil end if timeout <= 0 then break end timeout = timeout - ( fiber.clock() - started ) end self._errno = boxerrno.ETIMEDOUT return nil end local function socket_read(self, opts, timeout) check_socket(self) timeout = timeout or TIMEOUT_INFINITY if type(opts) == 'number' then return read(self, opts, timeout, check_limit) elseif type(opts) == 'string' then return read(self, LIMIT_INFINITY, timeout, check_delimiter, { opts }) elseif type(opts) == 'table' then local chunk = opts.chunk or opts.size or LIMIT_INFINITY local delimiter = opts.delimiter or opts.line if delimiter == nil then return read(self, chunk, timeout, check_limit) elseif type(delimiter) == 'string' then return read(self, chunk, timeout, check_delimiter, { delimiter }) elseif type(delimiter) == 'table' then return read(self, chunk, timeout, check_delimiter, delimiter) end end error('Usage: s:read(delimiter|chunk|{delimiter = x, chunk = x}, timeout)') end local function socket_write(self, octets, timeout) check_socket(self) if timeout == nil then timeout = TIMEOUT_INFINITY end local s = ffi.cast('const char *', octets) local p = s local e = s + #octets if p == e then return 0 end local started = fiber.clock() while true do local written = syswrite(self, p, e - p) if written == 0 then return p - s -- eof elseif written ~= nil then p = p + written assert(p <= e) if p == e then return e - s end elseif not errno_is_transient[self._errno] then return nil end timeout = timeout - (fiber.clock() - started) if timeout <= 0 or not socket_writable(self, timeout) then break end end end local function socket_send(self, octets, flags) local fd = check_socket(self) local iflags = get_iflags(internal.SEND_FLAGS, flags) self._errno = nil local res = ffi.C.send(fd, octets, string.len(octets), iflags) if res < 0 then self._errno = boxerrno() return nil end return tonumber(res) end local function socket_recv(self, size, flags) local fd = check_socket(self) local iflags = get_iflags(internal.SEND_FLAGS, flags) if iflags == nil then self._errno = boxerrno.EINVAL return nil end size = size or 512 self._errno = nil local buf = ffi.new("char[?]", size) local res = ffi.C.recv(fd, buf, size, iflags) if res == -1 then self._errno = boxerrno() return nil end return ffi.string(buf, res) end local function socket_recvfrom(self, size, flags) local fd = check_socket(self) local iflags = get_iflags(internal.SEND_FLAGS, flags) if iflags == nil then self._errno = boxerrno.EINVAL return nil end size = size or 512 self._errno = nil local res, from = internal.recvfrom(fd, size, iflags) if res == nil then self._errno = boxerrno() return nil end return res, from end local function socket_sendto(self, host, port, octets, flags) local fd = check_socket(self) local iflags = get_iflags(internal.SEND_FLAGS, flags) if iflags == nil then self._errno = boxerrno.EINVAL return nil end self._errno = nil if octets == nil or octets == '' then return true end host = tostring(host) port = tostring(port) octets = tostring(octets) addr_len[0] = ffi.sizeof(addrbuf) local res = ffi.C.lbox_socket_local_resolve(host, port, addr, addr_len) if res == 0 then res = ffi.C.sendto(fd, octets, string.len(octets), iflags, addr, addr_len[0]) end if res < 0 then self._errno = boxerrno() return nil end return tonumber(res) end local function socket_new(domain, stype, proto) local idomain = get_ivalue(internal.DOMAIN, domain) if idomain == nil then boxerrno(boxerrno.EINVAL) return nil end local itype = get_ivalue(internal.SO_TYPE, stype) if itype == nil then boxerrno(boxerrno.EINVAL) return nil end local iproto = getprotobyname(proto) if iproto == nil then return nil end local fd = ffi.C.socket(idomain, itype, iproto) if fd >= 0 then local socket = make_socket(fd) if not socket:nonblock(true) then socket:close() else return socket end end end local function getaddrinfo(host, port, timeout, opts) if type(timeout) == 'table' and opts == nil then opts = timeout timeout = TIMEOUT_INFINITY elseif timeout == nil then timeout = TIMEOUT_INFINITY end if port == nil then port = 0 end local ga_opts = {} if opts ~= nil then if opts.type ~= nil then local itype = get_ivalue(internal.SO_TYPE, opts.type) if itype == nil then boxerrno(boxerrno.EINVAL) return nil end ga_opts.type = itype end if opts.family ~= nil then local ifamily = get_ivalue(internal.DOMAIN, opts.family) if ifamily == nil then boxerrno(boxerrno.EINVAL) return nil end ga_opts.family = ifamily end if opts.protocol ~= nil then local p = getprotobyname(opts.protocol) if p == nil then return nil end ga_opts.protocol = p end if opts.flags ~= nil then ga_opts.flags = get_iflags(internal.AI_FLAGS, opts.flags) if ga_opts.flags == nil then boxerrno(boxerrno.EINVAL) return nil end end end return internal.getaddrinfo(host, port, timeout, ga_opts) end -- tcp connector local function socket_tcp_connect(s, address, port, timeout) local res = socket_sysconnect(s, address, port) if res then -- Even through the socket is nonblocking, if the server to which we -- are connecting is on the same host, the connect is normally -- established immediately when we call connect (Stevens UNP). return true end if s._errno ~= boxerrno.EINPROGRESS then return nil end -- Wait until the connection is established or ultimately fails. -- In either condition the socket becomes writable. To tell these -- conditions appart SO_ERROR must be consulted (man connect). if socket_writable(s, timeout) then s._errno = socket_getsockopt(s, 'SOL_SOCKET', 'SO_ERROR') else s._errno = boxerrno.ETIMEDOUT end if s._errno ~= 0 then return nil end -- Connected return true end local function tcp_connect(host, port, timeout) if host == 'unix/' then local s = socket_new('AF_UNIX', 'SOCK_STREAM', 0) if not s then -- Address family is not supported by the host return nil end if not socket_tcp_connect(s, host, port, timeout) then local save_errno = s._errno s:close() boxerrno(save_errno) return nil end boxerrno(0) return s end local timeout = timeout or TIMEOUT_INFINITY local stop = fiber.clock() + timeout local dns = getaddrinfo(host, port, timeout, { type = 'SOCK_STREAM', protocol = 'tcp' }) if dns == nil or #dns == 0 then boxerrno(boxerrno.EINVAL) return nil end for i, remote in pairs(dns) do timeout = stop - fiber.clock() if timeout <= 0 then boxerrno(boxerrno.ETIMEDOUT) return nil end local s = socket_new(remote.family, remote.type, remote.protocol) if s then if socket_tcp_connect(s, remote.host, remote.port, timeout) then boxerrno(0) return s end local save_errno = s:errno() s:close() boxerrno(save_errno) end end -- errno is set by socket_tcp_connect() return nil end local function tcp_server_handler(server, sc, from) fiber.name(format("%s/%s:%s", server.name, from.host, from.port), {truncate = true}) local status, message = pcall(server.handler, sc, from) sc:shutdown() sc:close() if not status then error(message) end end local function tcp_server_loop(server, s, addr) fiber.name(format("%s/%s:%s", server.name, addr.host, addr.port), {truncate = true}) log.info("started") while socket_readable(s) do local sc, from = socket_accept(s) if sc == nil then local errno = s._errno if not errno_is_transient[errno] then log.error('accept(%s) failed: %s', tostring(s), socket_error(s)) end if errno_is_fatal[errno] then break end else fiber.create(tcp_server_handler, server, sc, from) end end -- Socket was closed if addr.family == 'AF_UNIX' and addr.port then fio.unlink(addr.port) -- remove unix socket end log.info("stopped") end local function tcp_server_usage() error('Usage: socket.tcp_server(host, port, handler | opts)') end local function tcp_server_bind_addr(s, addr) if socket_bind(s, addr.host, addr.port) then return true end if addr.family ~= 'AF_UNIX' then return false end if boxerrno() ~= boxerrno.EADDRINUSE then return false end local save_errno = boxerrno() local sc = tcp_connect(addr.host, addr.port) if sc ~= nil then sc:close() boxerrno(save_errno) return false end if boxerrno() ~= boxerrno.ECONNREFUSED then boxerrno(save_errno) return false end log.info("tcp_server: remove dead UNIX socket: %s", addr.port) if not fio.unlink(addr.port) then log.warn("tcp_server: %s", boxerrno.strerror()) boxerrno(save_errno) return false end return socket_bind(s, addr.host, addr.port) end local function tcp_server_bind(host, port, prepare, timeout) timeout = timeout and tonumber(timeout) or TIMEOUT_INFINITY local dns if host == 'unix/' then dns = {{host = host, port = port, family = 'AF_UNIX', protocol = 0, type = 'SOCK_STREAM' }} else dns = getaddrinfo(host, port, timeout, { type = 'SOCK_STREAM', flags = 'AI_PASSIVE'}) if dns == nil then return nil end end for _, addr in ipairs(dns) do local s = socket_new(addr.family, addr.type, addr.protocol) if s ~= nil then local backlog if prepare then backlog = prepare(s) else socket_setsockopt(s, 'SOL_SOCKET', 'SO_REUSEADDR', 1) -- ignore error end if not tcp_server_bind_addr(s, addr) or not s:listen(backlog) then local save_errno = boxerrno() socket_close(s) boxerrno(save_errno) return nil end return s, addr end end -- DNS resolved successfully, but addresss family is not supported boxerrno(boxerrno.EAFNOSUPPORT) return nil end local function tcp_server(host, port, opts, timeout) local server = {} if type(opts) == 'function' then server.handler = opts elseif type(opts) == 'table' then if type(opts.handler) ~='function' or (opts.prepare ~= nil and type(opts.prepare) ~= 'function') then tcp_server_usage() end for k, v in pairs(opts) do server[k] = v end else tcp_server_usage() end server.name = server.name or 'server' local s, addr = tcp_server_bind(host, port, server.prepare, timeout) if not s then return nil end fiber.create(tcp_server_loop, server, s, addr) return s, addr end socket_mt = { __index = { close = socket_close; errno = socket_errno; error = socket_error; sysconnect = socket_sysconnect; syswrite = socket_syswrite; sysread = socket_sysread; nonblock = socket_nonblock; readable = socket_readable; writable = socket_writable; wait = socket_wait; listen = socket_listen; bind = socket_bind; shutdown = socket_shutdown; setsockopt = socket_setsockopt; getsockopt = socket_getsockopt; linger = socket_linger; accept = socket_accept; read = socket_read; write = socket_write; send = socket_send; recv = socket_recv; recvfrom = socket_recvfrom; sendto = socket_sendto; name = socket_name; peer = socket_peer; fd = socket_fd; }; __tostring = function(self) local fd = check_socket(self) local save_errno = self._errno local name = format("fd %d", fd) local aka = socket_name(self) if aka ~= nil then name = format("%s, aka %s:%s", name, aka.host, aka.port) end local peer = socket_peer(self) if peer ~= nil then name = format("%s, peer %s:%s", name, peer.host, peer.port) end self._errno = save_errno return name end, __serialize = function(self) -- Allow YAML, MsgPack and JSON to dump objects with sockets local fd = check_socket(self) return { fd = fd, peer = socket_peer(self), name = socket_name(self) } end } -------------------------------------------------------------------------------- -- Lua Socket Emulation -------------------------------------------------------------------------------- local lsocket_tcp_mt local lsocket_tcp_server_mt local lsocket_tcp_client_mt -- -- TCP Master Socket -- local function lsocket_tcp_tostring(self) local fd = check_socket(self) return string.format("tcp{master}: fd=%d", fd) end local function lsocket_tcp_close(self) if not socket_close(self) then return nil, socket_error(self) end return 1 end local function lsocket_tcp_getsockname(self) local aka = socket_name(self) if aka == nil then return nil, socket_error(self) end return aka.host, tostring(aka.port), aka.family:match("AF_(.*)"):lower() end local function lsocket_tcp_getpeername(self) local peer = socket_peer(self) if peer == nil then return nil, socket_error(self) end return peer.host, tostring(peer.port), peer.family:match("AF_(.*)"):lower() end local function lsocket_tcp_settimeout(self, value, mode) check_socket(self) self.timeout = value -- mode is effectively ignored return 1 end local function lsocket_tcp_setoption(self, option, value) local r if option == 'reuseaddr' then r = socket_setsockopt(self, 'socket', 'SO_REUSEADDR', value) elseif option == 'keepalive' then r = socket_setsockopt(self, 'socket', 'SO_KEEPALIVE', value) elseif option == 'linger' then value = type(value) == 'table' and value.on or value -- Sic: value.timeout is ignored r = socket_linger(self, value) elseif option == 'tcp-nodelay' then r = socket_setsockopt(self, 'tcp', 'TCP_NODELAY', value) else error(format("Unknown socket option name: %s", tostring(option))) end if not r then return nil, socket_error(self) end return 1 end local function lsocket_tcp_bind(self, address, port) if not socket_bind(self, address, port) then return nil, socket_error(self) end return 1 end local function lsocket_tcp_listen(self, backlog) if not socket_listen(self, backlog) then return nil, socket_error(self) end setmetatable(self, lsocket_tcp_server_mt) return 1 end local function lsocket_tcp_connect(self, host, port) check_socket(self) local deadline = fiber.clock() + (self.timeout or TIMEOUT_INFINITY) -- This function is broken by design local ga_opts = { family = 'AF_INET', type = 'SOCK_STREAM' } local timeout = deadline - fiber.clock() local dns = getaddrinfo(host, port, timeout, ga_opts) if dns == nil or #dns == 0 then self._errno = boxerrno.EINVAL return nil, socket_error(self) end for _, remote in ipairs(dns) do timeout = deadline - fiber.clock() if socket_tcp_connect(self, remote.host, remote.port, timeout) then return 1 end end return nil, socket_error(self) end lsocket_tcp_mt = { __index = { close = lsocket_tcp_close; getsockname = lsocket_tcp_getsockname; getpeername = lsocket_tcp_getpeername; settimeout = lsocket_tcp_settimeout; setoption = lsocket_tcp_setoption; bind = lsocket_tcp_bind; listen = lsocket_tcp_listen; connect = lsocket_tcp_connect; }; __tostring = lsocket_tcp_tostring; __serialize = lsocket_tcp_tostring; }; -- -- TCP Server Socket -- local function lsocket_tcp_server_tostring(self) local fd = check_socket(self) return string.format("tcp{server}: fd=%d", fd) end local function lsocket_tcp_accept(self) check_socket(self) local deadline = fiber.clock() + (self.timeout or TIMEOUT_INFINITY) repeat local client = socket_accept(self) if client then setmetatable(client, lsocket_tcp_client_mt) return client end local errno = socket_errno(self) if not errno_is_transient[errno] then break end until not socket_readable(self, deadline - fiber.clock()) return nil, socket_error(self) end lsocket_tcp_server_mt = { __index = { close = lsocket_tcp_close; getsockname = lsocket_tcp_getsockname; getpeername = lsocket_tcp_getpeername; settimeout = lsocket_tcp_settimeout; setoption = lsocket_tcp_setoption; accept = lsocket_tcp_accept; }; __tostring = lsocket_tcp_server_tostring; __serialize = lsocket_tcp_server_tostring; }; -- -- TCP Client Socket -- local function lsocket_tcp_client_tostring(self) local fd = check_socket(self) return string.format("tcp{client}: fd=%d", fd) end local function lsocket_tcp_receive(self, pattern, prefix) check_socket(self) prefix = prefix or '' local timeout = self.timeout or TIMEOUT_INFINITY local data if type(pattern) == 'number' then data = read(self, pattern, timeout, check_limit) if data == nil then return nil, socket_error(self) elseif #data < pattern then -- eof return nil, 'closed', prefix..data else return prefix..data end elseif pattern == "*l" or pattern == nil then data = read(self, LIMIT_INFINITY, timeout, check_delimiter, {"\n"}) if data == nil then return nil, socket_error(self) elseif #data > 0 and data:byte(#data) == 10 then -- remove '\n' return prefix..data:sub(1, #data - 1) else -- eof return nil, 'closed', prefix..data end elseif pattern == "*a" then local result = { prefix } local deadline = fiber.clock() + (self.timeout or TIMEOUT_INFINITY) repeat local data = socket_sysread(self) if data == nil then if not errno_is_transient[self._errno] then return nil, socket_error(self) end elseif data == '' then break else table.insert(result, data) end until not socket_readable(self, deadline - fiber.clock()) if #result == 1 then return nil, 'closed', table.concat(result) end return table.concat(result) else error("Usage: socket:receive(pattern, [, prefix])") end end local function lsocket_tcp_send(self, data, i, j) if i ~= nil then data = string.sub(data, i, j) end local sent = socket_write(self, data, self.timeout) if not sent then return nil, socket_error(self) end return (i or 1) + sent - 1 end local function lsocket_tcp_shutdown(self, how) if not socket_shutdown(self, how) then return nil, socket_error(self) end return 1 end lsocket_tcp_client_mt = { __index = { close = lsocket_tcp_close; getsockname = lsocket_tcp_getsockname; getpeername = lsocket_tcp_getpeername; settimeout = lsocket_tcp_settimeout; setoption = lsocket_tcp_setoption; receive = lsocket_tcp_receive; send = lsocket_tcp_send; shutdown = lsocket_tcp_shutdown; }; __tostring = lsocket_tcp_client_tostring; __serialize = lsocket_tcp_client_tostring; }; -- -- Unconnected tcp socket (tcp{master}) should not have receive() and -- send methods according to LuaSocket documentation[1]. Unfortunally, -- original implementation is buggy and doesn't match the documentation. -- Some modules (e.g. MobDebug) rely on this bug and attempt to invoke -- receive()/send() on unconnected sockets. -- [1]: http://w3.impa.br/~diego/software/luasocket/tcp.html -- lsocket_tcp_mt.__index.receive = lsocket_tcp_receive; lsocket_tcp_mt.__index.send = lsocket_tcp_send; -- -- TCP Constructor and Shortcuts -- local function lsocket_tcp() local s = socket_new('AF_INET', 'SOCK_STREAM', 'tcp') if not s then return nil, socket_error(self) end return setmetatable(s, lsocket_tcp_mt) end local function lsocket_connect(host, port) if host == nil or port == nil then error("Usage: luasocket.connect(host, port)") end local s = tcp_connect(host, port) if not s then return nil, boxerrno.strerror() end setmetatable(s, lsocket_tcp_client_mt) return s end local function lsocket_bind(host, port, backlog) if host == nil or port == nil then error("Usage: luasocket.bind(host, port [, backlog])") end local function prepare(s) return backlog end local s = tcp_server_bind(host, port, prepare) if not s then return nil, boxerrno.strerror() end return setmetatable(s, lsocket_tcp_server_mt) end -------------------------------------------------------------------------------- -- Module Definition -------------------------------------------------------------------------------- return setmetatable({ getaddrinfo = getaddrinfo, tcp_connect = tcp_connect, tcp_server = tcp_server, iowait = internal.iowait, internal = internal, }, { __call = function(self, ...) return socket_new(...) end; __index = { tcp = lsocket_tcp; connect = lsocket_connect; bind = lsocket_bind; } }) tarantool_1.9.1.26.g63eb81e3c/src/lua/fiber.c0000664000000000000000000003575313306565107017034 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/fiber.h" #include #include "lua/utils.h" #include "backtrace.h" #include #include #include void luaL_testcancel(struct lua_State *L) { if (fiber_is_cancelled()) { diag_set(FiberIsCancelled); luaT_error(L); } } /* {{{ fiber Lua library: access to Tarantool fibers * * Each fiber can be running, suspended or dead. * When a fiber is created (fiber.create()) it's * running. * * All fibers are part of the fiber registry, fiber. * This registry can be searched either by * fiber id (fid), which is numeric, or by fiber name, * which is a string. If there is more than one * fiber with the given name, the first fiber that * matches is returned. * * Once fiber chunk is done or calls "return", * the fiber is considered dead. Its carcass is put into * fiber pool, and can be reused when another fiber is * created. * * A runaway fiber can be stopped with fiber.cancel(). * fiber.cancel(), however, is advisory -- it works * only if the runaway fiber is calling fiber.testcancel() * once in a while. Most box.* hooks, such as box.delete() * or box.update(), are calling fiber.testcancel(). * * Thus a runaway fiber can really only become cuckoo * if it does a lot of computations and doesn't check * whether it's been cancelled (just don't do that). * * The other potential problem comes from * fibers which never get scheduled, because are subscribed * to or get no events. Such morphing fibers can be killed * with fiber.cancel(), since fiber.cancel() * sends an asynchronous wakeup event to the fiber. */ static const char *fiberlib_name = "fiber"; /** * @pre: stack top contains a table * @post: sets table field specified by name of the table on top * of the stack to a weak kv table and pops that weak table. */ static void lbox_create_weak_table(struct lua_State *L, const char *name) { lua_newtable(L); /* and a metatable */ lua_newtable(L); /* weak keys and values */ lua_pushstring(L, "kv"); /* pops 'kv' */ lua_setfield(L, -2, "__mode"); /* pops the metatable */ lua_setmetatable(L, -2); /* assigns and pops table */ lua_setfield(L, -2, name); /* gets memoize back. */ lua_getfield(L, -1, name); assert(! lua_isnil(L, -1)); } /** * Push a userdata for the given fiber onto Lua stack. */ static void lbox_pushfiber(struct lua_State *L, int fid) { /* * Use 'memoize' pattern and keep a single userdata for * the given fiber. This is important to not run __gc * twice for a copy of an attached fiber -- __gc should * not remove attached fiber's coro prematurely. */ luaL_getmetatable(L, fiberlib_name); lua_getfield(L, -1, "memoize"); if (lua_isnil(L, -1)) { /* first access - instantiate memoize */ /* pop the nil */ lua_pop(L, 1); /* create memoize table */ lbox_create_weak_table(L, "memoize"); } /* Find out whether the fiber is already in the memoize table. */ lua_pushinteger(L, fid); lua_gettable(L, -2); if (lua_isnil(L, -1)) { /* no userdata for fiber created so far */ /* pop the nil */ lua_pop(L, 1); /* push the key back */ lua_pushinteger(L, fid); /* create a new userdata */ int *ptr = (int *) lua_newuserdata(L, sizeof(int)); *ptr = fid; luaL_getmetatable(L, fiberlib_name); lua_setmetatable(L, -2); /* memoize it */ lua_settable(L, -3); lua_pushinteger(L, fid); /* get it back */ lua_gettable(L, -2); } } static struct fiber * lbox_checkfiber(struct lua_State *L, int index) { uint32_t fid; if (lua_type(L, index) == LUA_TNUMBER) { fid = lua_tonumber(L, index); } else { fid = *(uint32_t *) luaL_checkudata(L, index, fiberlib_name); } struct fiber *f = fiber_find(fid); if (f == NULL) luaL_error(L, "the fiber is dead"); return f; } static int lbox_fiber_id(struct lua_State *L) { uint32_t fid; if (lua_gettop(L) == 0) fid = fiber()->fid; else fid = *(uint32_t *) luaL_checkudata(L, 1, fiberlib_name); lua_pushinteger(L, fid); return 1; } #ifdef ENABLE_BACKTRACE static int fiber_backtrace_cb(int frameno, void *frameret, const char *func, size_t offset, void *cb_ctx) { char buf[512]; int l = snprintf(buf, sizeof(buf), "#%-2d %p in ", frameno, frameret); if (func) snprintf(buf + l, sizeof(buf) - l, "%s+%zu", func, offset); else snprintf(buf + l, sizeof(buf) - l, "?"); struct lua_State *L = (struct lua_State*)cb_ctx; lua_pushnumber(L, frameno + 1); lua_pushstring(L, buf); lua_settable(L, -3); return 0; } #endif static int lbox_fiber_statof(struct fiber *f, void *cb_ctx, bool backtrace) { struct lua_State *L = (struct lua_State *) cb_ctx; lua_pushinteger(L, f->fid); lua_newtable(L); lua_pushliteral(L, "name"); lua_pushstring(L, fiber_name(f)); lua_settable(L, -3); lua_pushstring(L, "fid"); lua_pushnumber(L, f->fid); lua_settable(L, -3); lua_pushstring(L, "csw"); lua_pushnumber(L, f->csw); lua_settable(L, -3); lua_pushliteral(L, "memory"); lua_newtable(L); lua_pushstring(L, "used"); lua_pushnumber(L, region_used(&f->gc)); lua_settable(L, -3); lua_pushstring(L, "total"); lua_pushnumber(L, region_total(&f->gc) + f->stack_size + sizeof(struct fiber)); lua_settable(L, -3); lua_settable(L, -3); if (backtrace) { #ifdef ENABLE_BACKTRACE lua_pushstring(L, "backtrace"); lua_newtable(L); if (f != fiber()) backtrace_foreach(fiber_backtrace_cb, &f->ctx, L); lua_settable(L, -3); #endif /* ENABLE_BACKTRACE */ } lua_settable(L, -3); return 0; } #ifdef ENABLE_BACKTRACE static int lbox_fiber_statof_bt(struct fiber *f, void *cb_ctx) { return lbox_fiber_statof(f, cb_ctx, true); } #endif static int lbox_fiber_statof_nobt(struct fiber *f, void *cb_ctx) { return lbox_fiber_statof(f, cb_ctx, false); } /** * Return fiber statistics. */ static int lbox_fiber_info(struct lua_State *L) { #ifdef ENABLE_BACKTRACE bool do_backtrace = true; if (lua_istable(L, 1)) { lua_pushstring(L, "backtrace"); lua_gettable(L, 1); if (lua_isnil(L, -1)){ lua_pop(L, 1); lua_pushstring(L, "bt"); lua_gettable(L, 1); } if (!lua_isnil(L, -1)) do_backtrace = lua_toboolean(L, -1); lua_pop(L, 1); } if (do_backtrace) { lua_newtable(L); fiber_stat(lbox_fiber_statof_bt, L); } else #endif /* ENABLE_BACKTRACE */ { lua_newtable(L); fiber_stat(lbox_fiber_statof_nobt, L); } lua_createtable(L, 0, 1); lua_pushliteral(L, "mapping"); /* YAML will use block mode */ lua_setfield(L, -2, LUAL_SERIALIZE); lua_setmetatable(L, -2); return 1; } static int lua_fiber_run_f(va_list ap) { int result; int coro_ref = va_arg(ap, int); struct lua_State *L = va_arg(ap, struct lua_State *); result = luaT_call(L, lua_gettop(L) - 1, 0); /* Destroy local storage */ int storage_ref = (int)(intptr_t) fiber_get_key(fiber(), FIBER_KEY_LUA_STORAGE); if (storage_ref > 0) luaL_unref(L, LUA_REGISTRYINDEX, storage_ref); luaL_unref(L, LUA_REGISTRYINDEX, coro_ref); return result; } /** * Create, resume and detach a fiber * given the function and its arguments. */ static int lbox_fiber_create(struct lua_State *L) { if (lua_gettop(L) < 1 || !lua_isfunction(L, 1)) luaL_error(L, "fiber.create(function, ...): bad arguments"); if (fiber_checkstack()) luaL_error(L, "fiber.create(): out of fiber stack"); struct lua_State *child_L = lua_newthread(L); int coro_ref = luaL_ref(L, LUA_REGISTRYINDEX); struct fiber *f = fiber_new("lua", lua_fiber_run_f); if (f == NULL) { luaL_unref(L, LUA_REGISTRYINDEX, coro_ref); luaT_error(L); } /* Move the arguments to the new coro */ lua_xmove(L, child_L, lua_gettop(L)); /* XXX: 'fiber' is leaked if this throws a Lua error. */ lbox_pushfiber(L, f->fid); fiber_start(f, coro_ref, child_L); return 1; } /** * Get fiber status. * This follows the rules of Lua coroutine.status() function: * Returns the status of fibier, as a string: * - "running", if the fiber is running (that is, it called status); * - "suspended", if the fiber is suspended in a call to yield(), * or if it has not started running yet; * - "dead" if the fiber has finished its body function, or if it * has stopped with an error. */ static int lbox_fiber_status(struct lua_State *L) { struct fiber *f; if (lua_gettop(L)) { uint32_t fid = *(uint32_t *) luaL_checkudata(L, 1, fiberlib_name); f = fiber_find(fid); } else { f = fiber(); } const char *status; if (f == NULL || f->fid == 0) { /* This fiber is dead. */ status = "dead"; } else if (f == fiber()) { /* The fiber is the current running fiber. */ status = "running"; } else { /* None of the above: must be suspended. */ status = "suspended"; } lua_pushstring(L, status); return 1; } /** * Get or set fiber name. * With no arguments, gets or sets the current fiber * name. It's also possible to get/set the name of * another fiber. * Last argument can be a map with a single key: * {truncate = boolean}. If truncate is true, then a new fiber * name is truncated to a max possible fiber name length. * If truncate is false (or was not specified), then too long * new name raise error. */ static int lbox_fiber_name(struct lua_State *L) { struct fiber *f = fiber(); int name_index; int opts_index; int top = lua_gettop(L); if (lua_type(L, 1) == LUA_TUSERDATA) { f = lbox_checkfiber(L, 1); name_index = 2; opts_index = 3; } else { name_index = 1; opts_index = 2; } if (top == name_index || top == opts_index) { /* Set name. */ const char *name = luaL_checkstring(L, name_index); int name_len = strlen(name); if (top == opts_index && lua_istable(L, opts_index)) { lua_getfield(L, opts_index, "truncate"); /* Truncate the name if needed. */ if (lua_isboolean(L, -1) && lua_toboolean(L, -1) && name_len > FIBER_NAME_MAX) name_len = FIBER_NAME_MAX; lua_pop(L, 1); } if (name_len > FIBER_NAME_MAX) luaL_error(L, "Fiber name is too long"); fiber_set_name(f, name); return 0; } else { lua_pushstring(L, fiber_name(f)); return 1; } } static int lbox_fiber_storage(struct lua_State *L) { struct fiber *f = lbox_checkfiber(L, 1); int storage_ref = (int)(intptr_t) fiber_get_key(f, FIBER_KEY_LUA_STORAGE); if (storage_ref <= 0) { lua_newtable(L); /* create local storage on demand */ storage_ref = luaL_ref(L, LUA_REGISTRYINDEX); fiber_set_key(f, FIBER_KEY_LUA_STORAGE, (void *)(intptr_t) storage_ref); } lua_rawgeti(L, LUA_REGISTRYINDEX, storage_ref); return 1; } static int lbox_fiber_index(struct lua_State *L) { if (lua_gettop(L) < 2) return 0; if (lua_isstring(L, 2) && strcmp(lua_tostring(L, 2), "storage") == 0) return lbox_fiber_storage(L); /* Get value from metatable */ lua_getmetatable(L, 1); lua_pushvalue(L, 2); lua_gettable(L, -2); return 1; } /** * Yield to the sched fiber and sleep. * @param[in] amount of time to sleep (double) * * Only the current fiber can be made to sleep. */ static int lbox_fiber_sleep(struct lua_State *L) { if (! lua_isnumber(L, 1) || lua_gettop(L) != 1) luaL_error(L, "fiber.sleep(delay): bad arguments"); double delay = lua_tonumber(L, 1); fiber_sleep(delay); luaL_testcancel(L); return 0; } static int lbox_fiber_yield(struct lua_State *L) { fiber_sleep(0); luaL_testcancel(L); return 0; } static int lbox_fiber_self(struct lua_State *L) { lbox_pushfiber(L, fiber()->fid); return 1; } static int lbox_fiber_find(struct lua_State *L) { if (lua_gettop(L) != 1) luaL_error(L, "fiber.find(id): bad arguments"); int fid = lua_tonumber(L, -1); struct fiber *f = fiber_find(fid); if (f) lbox_pushfiber(L, f->fid); else lua_pushnil(L); return 1; } /** * Running and suspended fibers can be cancelled. * Zombie fibers can't. */ static int lbox_fiber_cancel(struct lua_State *L) { struct fiber *f = lbox_checkfiber(L, 1); fiber_cancel(f); /* * Check if we're ourselves cancelled. * This also implements cancel for the case when * f == fiber(). */ luaL_testcancel(L); return 0; } static int lbox_fiber_serialize(struct lua_State *L) { struct fiber *f = lbox_checkfiber(L, 1); lua_createtable(L, 0, 1); lua_pushinteger(L, f->fid); lua_setfield(L, -2, "id"); lua_pushstring(L, fiber_name(f)); lua_setfield(L, -2, "name"); lbox_fiber_status(L); lua_setfield(L, -2, "status"); return 1; } static int lbox_fiber_tostring(struct lua_State *L) { char buf[20]; struct fiber *f = lbox_checkfiber(L, 1); snprintf(buf, sizeof(buf), "fiber: %d", f->fid); lua_pushstring(L, buf); return 1; } /** * Check if this current fiber has been cancelled and * throw an exception if this is the case. */ static int lbox_fiber_testcancel(struct lua_State *L) { if (lua_gettop(L) != 0) luaL_error(L, "fiber.testcancel(): bad arguments"); luaL_testcancel(L); return 0; } static int lbox_fiber_wakeup(struct lua_State *L) { struct fiber *f = lbox_checkfiber(L, 1); /* * It's unsafe to wakeup fibers which don't expect * it. */ if (f->flags & FIBER_IS_CANCELLABLE) fiber_wakeup(f); return 0; } static const struct luaL_Reg lbox_fiber_meta [] = { {"id", lbox_fiber_id}, {"name", lbox_fiber_name}, {"cancel", lbox_fiber_cancel}, {"status", lbox_fiber_status}, {"testcancel", lbox_fiber_testcancel}, {"__serialize", lbox_fiber_serialize}, {"__tostring", lbox_fiber_tostring}, {"wakeup", lbox_fiber_wakeup}, {"__index", lbox_fiber_index}, {NULL, NULL} }; static const struct luaL_Reg fiberlib[] = { {"info", lbox_fiber_info}, {"sleep", lbox_fiber_sleep}, {"yield", lbox_fiber_yield}, {"self", lbox_fiber_self}, {"id", lbox_fiber_id}, {"find", lbox_fiber_find}, {"kill", lbox_fiber_cancel}, {"wakeup", lbox_fiber_wakeup}, {"cancel", lbox_fiber_cancel}, {"testcancel", lbox_fiber_testcancel}, {"create", lbox_fiber_create}, {"status", lbox_fiber_status}, {"name", lbox_fiber_name}, {NULL, NULL} }; void tarantool_lua_fiber_init(struct lua_State *L) { luaL_register_module(L, fiberlib_name, fiberlib); lua_pop(L, 1); luaL_register_type(L, fiberlib_name, lbox_fiber_meta); } /* * }}} */ tarantool_1.9.1.26.g63eb81e3c/src/lua/digest.c0000664000000000000000000000602113306560010017172 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include "utils.h" #define PBKDF2_MAX_DIGEST_SIZE 128 unsigned char * SHA1internal(const unsigned char *d, size_t n, unsigned char *md) { static __thread unsigned char result[20]; SHA1_CTX ctx; SHA1Init(&ctx); SHA1Update(&ctx, d, n); SHA1Final(result, &ctx); if (md) memcpy(md, result, 20); return result; } static ssize_t digest_pbkdf2_f(va_list ap) { char *password = va_arg(ap, char *); size_t password_size = va_arg(ap, size_t); const unsigned char *salt = va_arg(ap, unsigned char *); size_t salt_size = va_arg(ap, size_t); unsigned char *digest = va_arg(ap, unsigned char *); int num_iterations = va_arg(ap, int); int digest_len = va_arg(ap, int); if (PKCS5_PBKDF2_HMAC(password, password_size, salt, salt_size, num_iterations, EVP_sha256(), digest_len, digest) == 0) { return -1; } return 0; } int lua_pbkdf2(lua_State *L) { const char *password = lua_tostring(L, 1); const char *salt = lua_tostring(L, 2); int num_iterations = lua_tointeger(L, 3); int digest_len = lua_tointeger(L, 4); unsigned char digest[PBKDF2_MAX_DIGEST_SIZE]; if (coio_call(digest_pbkdf2_f, password, strlen(password), salt, strlen(salt), digest, num_iterations, digest_len) < 0) { lua_pushnil(L); return 1; } lua_pushlstring(L, (char *) digest, digest_len); return 1; } void tarantool_lua_digest_init(struct lua_State *L) { static const struct luaL_Reg lua_digest_methods [] = { {"pbkdf2", lua_pbkdf2}, {NULL, NULL} }; luaL_register_module(L, "digest", lua_digest_methods); lua_pop(L, 1); }; tarantool_1.9.1.26.g63eb81e3c/src/lua/iconv.lua0000664000000000000000000000651413306560010017377 0ustar rootrootlocal ffi = require('ffi') local errno = require('errno') local buffer = require('buffer') ffi.cdef[[ typedef struct iconv *iconv_t; iconv_t iconv_open(const char *tocode, const char *fromcode); void iconv_close(iconv_t cd); size_t iconv(iconv_t cd, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft); /* * add prefix 'lib' under FreeBSD */ iconv_t libiconv_open(const char *tocode, const char *fromcode); void libiconv_close(iconv_t cd); size_t libiconv(iconv_t cd, const char **inbuf, size_t *inbytesleft, char **outbuf, size_t *outbytesleft); ]] local iconv_t = ffi.typeof('struct iconv') local char_ptr_arr_t = ffi.typeof('char *[1]') local cchar_ptr_arr_t = ffi.typeof('const char *[1]') local cchar_ptr_t = ffi.typeof('const char *') local size_t_arr_t = ffi.typeof('size_t [1]') local _iconv_open local _iconv_close local _iconv -- To fix #3073, BSD iconv implementation is not fully -- compatible with iconv, so use external iconv.so lib if jit.os == 'BSD' then _iconv_open = ffi.C.libiconv_open _iconv_close = ffi.C.libiconv_close _iconv = ffi.C.libiconv else _iconv_open = ffi.C.iconv_open _iconv_close = ffi.C.iconv_close _iconv = ffi.C.iconv end local E2BIG = errno['E2BIG'] local EINVAL = errno['EINVAL'] local EILSEQ = errno['EILSEQ'] local BUF_SIZE = 64 local conv_rv_error = ffi.cast('void *', -1) local function iconv_convert(iconv, data) if not ffi.istype(iconv_t, iconv) then error("Usage: iconv:convert(data: string)") end local data_len = data:len() local data_ptr = cchar_ptr_arr_t(cchar_ptr_t(data)) local data_left = size_t_arr_t(data_len) -- prepare at lease BUF_SIZE and at most data_len bytes in shared buffer local output_len = data_len >= BUF_SIZE and data_len or BUF_SIZE local buf = buffer.IBUF_SHARED; local buf_ptr = char_ptr_arr_t() local buf_left = size_t_arr_t() buf:reset() while data_left[0] > 0 do buf_ptr[0] = buf:reserve(output_len) buf_left[0] = buf:unused() local res = _iconv(iconv, data_ptr, data_left, buf_ptr, buf_left) if res == ffi.cast('size_t', -1) and errno() ~= E2BIG then _iconv(iconv, nil, nil, nil, nil) if errno() == EINVAL then error('Invalid multibyte sequence') end if errno() == EILSEQ then error('Incomplete multibyte sequence') end error('Unknown conversion error: ' .. errno.strerror()) end buf:alloc(buf:unused() - buf_left[0]) end -- iconv function sets cd's conversion state to the initial state _iconv(iconv, nil, nil, nil, nil) local result = ffi.string(buf.rpos, buf:size()) buf:reset() return result end local iconv_mt = { __call = iconv_convert, __gc = _iconv_close, __tostring = function(iconv) return string.format("iconv: %p", iconv) end } ffi.metatype(iconv_t, iconv_mt) local function iconv_new(to, from) if type(to) ~= 'string' or type(from) ~= 'string' then error('Usage: iconv.new("CP1251", "KOI8-R")') end local iconv = _iconv_open(to, from) if iconv == conv_rv_error then error('iconv: '..errno.strerror()) end return iconv; end return { new = iconv_new, } tarantool_1.9.1.26.g63eb81e3c/src/lua/digest.lua0000664000000000000000000001663313306560010017543 0ustar rootroot-- digest.lua (internal file) local ffi = require('ffi') local crypto = require('crypto') local bit = require('bit') ffi.cdef[[ /* internal implementation */ unsigned char *SHA1internal(const unsigned char *d, size_t n, unsigned char *md); /* from libc */ int snprintf(char *str, size_t size, const char *format, ...); typedef uint32_t (*crc32_func)(uint32_t crc, const unsigned char *buf, unsigned int len); extern int32_t guava(int64_t state, int32_t buckets); extern crc32_func crc32_calc; /* base64 */ int base64_bufsize(int binsize, int options); int base64_decode(const char *in_base64, int in_len, char *out_bin, int out_len); int base64_encode(const char *in_bin, int in_len, char *out_base64, int out_len, int options); /* random */ void random_bytes(char *, size_t); /* from third_party/PMurHash.h */ void PMurHash32_Process(uint32_t *ph1, uint32_t *pcarry, const void *key, int len); uint32_t PMurHash32_Result(uint32_t h1, uint32_t carry, uint32_t total_length); uint32_t PMurHash32(uint32_t seed, const void *key, int len); ]] -- @sa base64.h local BASE64_NOPAD = 1 local BASE64_NOWRAP = 2 local BASE64_URLSAFE = 7 local digest_shortcuts = { sha = 'SHA', sha224 = 'SHA224', sha256 = 'SHA256', sha384 = 'SHA384', sha512 = 'SHA512', md5 = 'MD5', md4 = 'MD4', } local internal = require("digest") local PMurHash local PMurHash_methods = { update = function(self, str) if type(str) ~= 'string' then error("Usage: murhash:update(string)") end ffi.C.PMurHash32_Process(self.seed, self.value, str, string.len(str)) self.total_length = self.total_length + string.len(str) end, result = function(self) return ffi.C.PMurHash32_Result(self.seed[0], self.value[0], self.total_length) end, clear = function(self) self.seed[0] = self.default_seed self.total_length = 0 self.value[0] = 0 end, copy = function(self) local new_self = PMurHash.new() new_self.seed[0] = self.seed[0] new_self.value[0] = self.value[0] new_self.total_length = self.total_length return new_self end } PMurHash = { default_seed = 13, new = function(opts) opts = opts or {} local self = setmetatable({}, { __index = PMurHash_methods }) self.default_seed = (opts.seed or PMurHash.default_seed) self.seed = ffi.new("int[1]", self.default_seed) self.value = ffi.new("int[1]", 0) self.total_length = 0 return self end } setmetatable(PMurHash, { __call = function(self, str) if type(str) ~= 'string' then error("Usage: digest.murhash(string)") end return ffi.C.PMurHash32(PMurHash.default_seed, str, string.len(str)) end }) local CRC32 local CRC32_methods = { update = function(self, str) if type(str) ~= 'string' then error("Usage crc32:update(string)") end self.value = ffi.C.crc32_calc(self.value, str, string.len(str)) end, result = function(self) return self.value end, clear = function(self) self.value = CRC32.crc_begin end, copy = function(self) local new_self = CRC32.new() new_self.value = self.value return new_self end } CRC32 = { crc_begin = 4294967295, new = function() local self = setmetatable({}, { __index = CRC32_methods }) self.value = CRC32.crc_begin return self end } setmetatable(CRC32, { __call = function(self, str) if type(str) ~= 'string' then error("Usage digest.crc32(string)") end return ffi.C.crc32_calc(CRC32.crc_begin, str, string.len(str)) end }) local pbkdf2 = function(pass, salt, iters, digest_len) if type(pass) ~= 'string' or type(salt) ~= 'string' then error("Usage: digest.pbkdf2(pass, salt[,iters][,digest_len])") end if iters and type(iters) ~= 'number' then error("iters must be a number") end if digest_len and type(digest_len) ~= 'number' then error("digest_len must be a number") end iters = iters or 100000 digest_len = digest_len or 128 if digest_len > 128 then error("too big digest size") end return internal.pbkdf2(pass, salt, iters, digest_len) end local m = { base64_encode = function(bin, options) if type(bin) ~= 'string' or options ~= nil and type(options) ~= 'table' then error('Usage: digest.base64_encode(string[, table])') end local mask = 0 if options ~= nil then if options.urlsafe then mask = bit.bor(mask, BASE64_URLSAFE) end if options.nopad then mask = bit.bor(mask, BASE64_NOPAD) end if options.nowrap then mask = bit.bor(mask, BASE64_NOWRAP) end end local blen = #bin local slen = ffi.C.base64_bufsize(blen, mask) local str = ffi.new('char[?]', slen) local len = ffi.C.base64_encode(bin, blen, str, slen, mask) return ffi.string(str, len) end, base64_decode = function(str) if type(str) ~= 'string' then error('Usage: digest.base64_decode(string)') end local slen = #str local blen = math.ceil(slen * 3 / 4) local bin = ffi.new('char[?]', blen) local len = ffi.C.base64_decode(str, slen, bin, blen) return ffi.string(bin, len) end, crc32 = CRC32, crc32_update = function(crc, str) if type(str) ~= 'string' then error("Usage: digest.crc32_update(string)") end return ffi.C.crc32_calc(tonumber(crc), str, string.len(str)) end, sha1 = function(str) if type(str) ~= 'string' then error("Usage: digest.sha1(string)") end local r = ffi.C.SHA1internal(str, #str, nil) return ffi.string(r, 20) end, sha1_hex = function(str) if type(str) ~= 'string' then error("Usage: digest.sha1_hex(string)") end local r = ffi.C.SHA1internal(str, #str, nil) return string.hex(ffi.string(r, 20)) end, guava = function(state, buckets) return ffi.C.guava(state, buckets) end, urandom = function(n) if n == nil then error('Usage: digest.urandom(len)') end local buf = ffi.new('char[?]', n) ffi.C.random_bytes(buf, n) return ffi.string(buf, n) end, murmur = PMurHash, pbkdf2 = pbkdf2, pbkdf2_hex = function(pass, salt, iters, digest_len) if type(pass) ~= 'string' or type(salt) ~= 'string' then error("Usage: digest.pbkdf2_hex(pass, salt)") end return string.hex(pbkdf2(pass, salt, iters, digest_len)) end } for digest, name in pairs(digest_shortcuts) do m[digest] = function (str) return crypto.digest[digest](str) end m[digest .. '_hex'] = function (str) if type(str) ~= 'string' then error('Usage: digest.'..digest..'_hex(string)') end return string.hex(crypto.digest[digest](str)) end end m['aes256cbc'] = { encrypt = function (str, key, iv) return crypto.cipher.aes256.cbc.encrypt(str, key, iv) end, decrypt = function (str, key, iv) return crypto.cipher.aes256.cbc.decrypt(str, key, iv) end } return m tarantool_1.9.1.26.g63eb81e3c/src/lua/fio.lua0000664000000000000000000002716113306560010017037 0ustar rootroot-- fio.lua (internal file) local fio = require('fio') local ffi = require('ffi') local buffer = require('buffer') ffi.cdef[[ int umask(int mask); char *dirname(char *path); int chdir(const char *path); ]] local const_char_ptr_t = ffi.typeof('const char *') local internal = fio.internal fio.internal = nil local function sprintf(fmt, ...) if select('#', ...) == 0 then return fmt end return string.format(fmt, ...) end local fio_methods = {} -- read() -> str -- read(buf) -> len -- read(size) -> str -- read(buf, size) -> len fio_methods.read = function(self, buf, size) local tmpbuf if (not ffi.istype(const_char_ptr_t, buf) and buf == nil) or (ffi.istype(const_char_ptr_t, buf) and size == nil) then local st, err = self:stat() if st == nil then return nil, err end size = st.size end if not ffi.istype(const_char_ptr_t, buf) then size = buf or size tmpbuf = buffer.ibuf() buf = tmpbuf:reserve(size) end local res, err = internal.read(self.fh, buf, size) if res == nil then if tmpbuf ~= nil then tmpbuf:recycle() end return nil, err end if tmpbuf ~= nil then tmpbuf:alloc(res) res = ffi.string(tmpbuf.rpos, tmpbuf:size()) tmpbuf:recycle() end return res end -- write(str) -- write(buf, len) fio_methods.write = function(self, data, len) if not ffi.istype(const_char_ptr_t, data) then data = tostring(data) len = #data end local res, err = internal.write(self.fh, data, len) if err ~= nil then return false, err end return res >= 0 end -- pwrite(str, offset) -- pwrite(buf, len, offset) fio_methods.pwrite = function(self, data, len, offset) if not ffi.istype(const_char_ptr_t, data) then data = tostring(data) offset = len len = #data end local res, err = internal.pwrite(self.fh, data, len, offset) if err ~= nil then return false, err end return res >= 0 end -- pread(size, offset) -> str -- pread(buf, size, offset) -> len fio_methods.pread = function(self, buf, size, offset) local tmpbuf if not ffi.istype(const_char_ptr_t, buf) then offset = size size = buf tmpbuf = buffer.IBUF_SHARED tmpbuf:reset() buf = tmpbuf:reserve(size) end local res, err = internal.pread(self.fh, buf, size, offset) if res == nil then if tmpbuf ~= nil then tmpbuf:recycle() end return nil, err end if tmpbuf ~= nil then tmpbuf:alloc(res) res = ffi.string(tmpbuf.rpos, tmpbuf:size()) tmpbuf:recycle() end return res end fio_methods.truncate = function(self, length) if length == nil then length = 0 end return internal.ftruncate(self.fh, length) end fio_methods.seek = function(self, offset, whence) if whence == nil then whence = 'SEEK_SET' end if type(whence) == 'string' then if fio.c.seek[whence] == nil then error(sprintf("Unknown whence: %s", whence)) end whence = fio.c.seek[whence] else whence = tonumber(whence) end local res = internal.lseek(self.fh, tonumber(offset), whence) return tonumber(res) end fio_methods.close = function(self) local res, err = internal.close(self.fh) self.fh = -1 if err ~= nil then return false, err end return res end fio_methods.fsync = function(self) return internal.fsync(self.fh) end fio_methods.fdatasync = function(self) return internal.fdatasync(self.fh) end fio_methods.stat = function(self) return internal.fstat(self.fh) end local fio_mt = { __index = fio_methods } fio.open = function(path, flags, mode) local iflag = 0 local imode = 0 if type(path) ~= 'string' then error("Usage open(path[, flags[, mode]])") end if type(flags) ~= 'table' then flags = { flags } end if type(mode) ~= 'table' then mode = { mode or (bit.band(0x1FF, fio.umask())) } end for _, flag in pairs(flags) do if type(flag) == 'number' then iflag = bit.bor(iflag, flag) else if fio.c.flag[ flag ] == nil then error(sprintf("Unknown flag: %s", flag)) end iflag = bit.bor(iflag, fio.c.flag[ flag ]) end end for _, m in pairs(mode) do if type(m) == 'string' then if fio.c.mode[m] == nil then error(sprintf("Unknown mode: %s", m)) end imode = bit.bor(imode, fio.c.mode[m]) else imode = bit.bor(imode, tonumber(m)) end end local fh, err = internal.open(tostring(path), iflag, imode) if err ~= nil then return nil, err end fh = { fh = fh } setmetatable(fh, fio_mt) return fh end fio.pathjoin = function(...) local i, path = 1, nil local len = select('#', ...) while i <= len do local sp = select(i, ...) if sp == nil then error("Undefined path part "..i) end sp = tostring(sp) if sp ~= '' then path = sp break else i = i + 1 end end if path == nil then return '.' end i = i + 1 while i <= len do local sp = select(i, ...) if sp == nil then error("Undefined path part") end sp = tostring(sp) if sp ~= '' then path = path .. '/' .. sp end i = i + 1 end path = path:gsub('/+', '/') if path ~= '/' then path = path:gsub('/$', '') end return path end fio.basename = function(path, suffix) if type(path) ~= 'string' then error("Usage fio.basename(path[, suffix])") end path = tostring(path) path = string.gsub(path, '.*/', '') if suffix ~= nil then suffix = tostring(suffix) if #suffix > 0 then suffix = string.gsub(suffix, '(.)', '[%1]') path = string.gsub(path, suffix, '') end end return path end fio.dirname = function(path) if type(path) ~= 'string' then error("Usage fio.dirname(path)") end path = ffi.new('char[?]', #path + 1, path) return ffi.string(ffi.C.dirname(path)) end fio.umask = function(umask) if umask == nil then local old = ffi.C.umask(0) ffi.C.umask(old) return old end umask = tonumber(umask) return ffi.C.umask(tonumber(umask)) end fio.abspath = function(path) -- following established conventions of fio module: -- letting nil through and converting path to string if path == nil then error("Usage fio.abspath(path)") end path = path local joined_path = '' local path_tab = {} if string.sub(path, 1, 1) == '/' then joined_path = path else joined_path = fio.pathjoin(fio.cwd(), path) end for sp in string.gmatch(joined_path, '[^/]+') do if sp == '..' then table.remove(path_tab) elseif sp ~= '.' then table.insert(path_tab, sp) end end return '/' .. table.concat(path_tab, '/') end fio.chdir = function(path) if type(path)~='string' then error("Usage: fio.chdir(path)") end return ffi.C.chdir(path) == 0 end fio.listdir = function(path) if type(path) ~= 'string' then error("Usage: fio.listdir(path)") end local str, err = internal.listdir(path) if err ~= nil then return nil, string.format("can't listdir %s: %s", path, err) end local t = {} if str == "" then return t end local names = string.split(str, "\n") for i, name in ipairs(names) do table.insert(t, name) end return t end fio.mktree = function(path, mode) if type(path) ~= "string" then error("Usage: fio.mktree(path[, mode])") end path = fio.abspath(path) local path = string.gsub(path, '^/', '') local dirs = string.split(path, "/") if #dirs == 1 then return fio.mkdir(path, mode) end local st, err local current_dir = "/" for i, dir in ipairs(dirs) do current_dir = fio.pathjoin(current_dir, dir) if not fio.stat(current_dir) then st, err = fio.mkdir(current_dir, mode) if err ~= nil then return false, "Error create dir " .. current_dir .. err end end end return true end fio.rmtree = function(path) if type(path) ~= 'string' then error("Usage: fio.rmtree(path)") end local status, err path = fio.abspath(path) local ls, err = fio.listdir(path) if err ~= nil then return nil, err end for i, f in ipairs(ls) do local tmppath = fio.pathjoin(path, f) local st = fio.stat(tmppath) if st and st:is_dir() then st, err = fio.rmtree(tmppath) if err ~= nil then return nil, err end end end status, err = fio.rmdir(path) if err ~= nil then return false, string.format("failed to remove %s: %s", path, err) end return true end fio.copyfile = function(from, to) if type(from) ~= 'string' or type(to) ~= 'string' then error('Usage: fio.copyfile(from, to)') end local st = fio.stat(to) if st and st:is_dir() then to = fio.pathjoin(to, fio.basename(from)) end local _, err = internal.copyfile(from, to) if err ~= nil then return false, string.format("failed to copy %s to %s: %s", from, to, err) end return true end fio.copytree = function(from, to) if type(from) ~= 'string' or type(to) ~= 'string' then error('Usage: fio.copytree(from, to)') end local status, reason local st = fio.stat(from) if not st then return false, string.format("Directory %s does not exist", from) end if not st:is_dir() then return false, errno.strerror(errno.ENOTDIR) end local ls, err = fio.listdir(from) if err ~= nil then return false, err end -- create tree of destination status, reason = fio.mktree(to) if reason ~= nil then return false, reason end for i, f in ipairs(ls) do local ffrom = fio.pathjoin(from, f) local fto = fio.pathjoin(to, f) local st = fio.lstat(ffrom) if st and st:is_dir() then status, reason = fio.copytree(ffrom, fto) if reason ~= nil then return false, reason end end if st:is_reg() then status, reason = fio.copyfile(ffrom, fto) if reason ~= nil then return false, reason end end if st:is_link() then local link_to, reason = fio.readlink(ffrom) if reason ~= nil then return false, reason end status, reason = fio.symlink(link_to, fto) if reason ~= nil then return false, "can't create symlink in place of existing file "..fto end end end return true end fio.path = {} fio.path.is_file = function(filename) local fs = fio.stat(filename) return fs ~= nil and fs:is_reg() or false end fio.path.is_link = function(filename) local fs = fio.lstat(filename) return fs ~= nil and fs:is_link() or false end fio.path.is_dir = function(filename) local fs = fio.stat(filename) return fs ~= nil and fs:is_dir() or false end fio.path.exists = function(filename) return fio.stat(filename) ~= nil end fio.path.lexists = function(filename) return fio.lstat(filename) ~= nil end return fio tarantool_1.9.1.26.g63eb81e3c/src/lua/fiber.h0000664000000000000000000000342313306560010017012 0ustar rootroot#ifndef TARANTOOL_LUA_FIBER_H_INCLUDED #define TARANTOOL_LUA_FIBER_H_INCLUDED /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; /** * Initialize box.fiber system */ void tarantool_lua_fiber_init(struct lua_State *L); void luaL_testcancel(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LUA_FIBER_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/lua/help.lua0000664000000000000000000000225713306565107017225 0ustar rootrootlocal doc = require('help.en_US') help = {} help[1] = {} help[1] = "To get help, see the Tarantool manual at http://tarantool.org/doc/" help[2] = "To start the interactive Tarantool tutorial, type 'tutorial()'" tutorial = {} tutorial[1] = help[2] local help_function_data = {}; local help_object_data = {} local function help_call(table, param) return help end setmetatable(help, { __call = help_call }) local screen_id = 1; local function tutorial_call(table, action) if action == 'start' then screen_id = 1; elseif action == 'next' or action == 'more' then screen_id = screen_id + 1 elseif action == 'prev' then screen_id = screen_id - 1 elseif type(action) == 'number' and action % 1 == 0 then screen_id = tonumber(action) elseif action ~= nil then error('Usage: tutorial("start" | "next" | "prev" | 1 .. '.. #doc.tutorial..')') end if screen_id < 1 then screen_id = 1 elseif screen_id > #doc.tutorial then screen_id = #doc.tutorial end return doc.tutorial[screen_id] end setmetatable(tutorial, { __call = tutorial_call }) return { help = help; tutorial = tutorial; } tarantool_1.9.1.26.g63eb81e3c/src/lua/buffer.lua0000664000000000000000000000737213306560010017535 0ustar rootroot-- buffer.lua (internal file) local ffi = require('ffi') local READAHEAD = 16320 ffi.cdef[[ struct slab_cache; struct slab_cache * tarantool_lua_slab_cache(); extern struct ibuf *tarantool_lua_ibuf; struct ibuf { struct slab_cache *slabc; char *buf; /** Start of input. */ char *rpos; /** End of useful input */ char *wpos; /** End of ibuf. */ char *epos; size_t start_capacity; }; void ibuf_create(struct ibuf *ibuf, struct slab_cache *slabc, size_t start_capacity); void ibuf_destroy(struct ibuf *ibuf); void ibuf_reinit(struct ibuf *ibuf); void * ibuf_reserve_slow(struct ibuf *ibuf, size_t size); ]] local builtin = ffi.C local ibuf_t = ffi.typeof('struct ibuf') local function errorf(s, ...) error(string.format(s, ...)) end local function checkibuf(buf, method) if not ffi.istype(ibuf_t, buf) then errorf('Attempt to call method without object, use ibuf:%s()', method) end end local function ibuf_capacity(buf) checkibuf(buf, 'capacity') return tonumber(buf.epos - buf.buf) end local function ibuf_pos(buf) checkibuf(buf, 'pos') return tonumber(buf.rpos - buf.buf) end local function ibuf_used(buf) checkibuf(buf, 'size') return tonumber(buf.wpos - buf.rpos) end local function ibuf_unused(buf) checkibuf(buf, 'unused') return tonumber(buf.epos - buf.wpos) end local function ibuf_recycle(buf) checkibuf(buf, 'recycle') builtin.ibuf_reinit(buf) end local function ibuf_reset(buf) checkibuf(buf, 'reset') buf.rpos = buf.buf buf.wpos = buf.buf end local function ibuf_reserve_slow(buf, size) local ptr = builtin.ibuf_reserve_slow(buf, size) if ptr == nil then errorf("Failed to allocate %d bytes in ibuf", size) end return ffi.cast('char *', ptr) end local function ibuf_reserve(buf, size) checkibuf(buf, 'reserve') if buf.wpos + size <= buf.epos then return buf.wpos end return ibuf_reserve_slow(buf, size) end local function ibuf_alloc(buf, size) checkibuf(buf, 'alloc') local wpos if buf.wpos + size <= buf.epos then wpos = buf.wpos else wpos = ibuf_reserve_slow(buf, size) end buf.wpos = buf.wpos + size return wpos end local function checksize(buf, size) if buf.rpos + size > buf.wpos then errorf("Attempt to read out of range bytes: needed=%d size=%d", tonumber(size), ibuf_used(buf)) end end local function ibuf_checksize(buf, size) checkibuf(buf, 'checksize') checksize(buf, size) return buf.rpos end local function ibuf_read(buf, size) checkibuf(buf, 'read') checksize(buf, size) local rpos = buf.rpos buf.rpos = rpos + size return rpos end local function ibuf_serialize(buf) local properties = { rpos = buf.rpos, wpos = buf.wpos } return { ibuf = properties } end local ibuf_methods = { recycle = ibuf_recycle; reset = ibuf_reset; reserve = ibuf_reserve; alloc = ibuf_alloc; checksize = ibuf_checksize; read = ibuf_read; __serialize = ibuf_serialize; size = ibuf_used; capacity = ibuf_capacity; pos = ibuf_pos; unused = ibuf_unused; } local function ibuf_tostring(ibuf) return '' end local ibuf_mt = { __gc = ibuf_recycle; __index = ibuf_methods; __tostring = ibuf_tostring; }; ffi.metatype(ibuf_t, ibuf_mt); local function ibuf_new(arg, arg2) local buf = ffi.new(ibuf_t) local slabc = builtin.tarantool_lua_slab_cache() builtin.ibuf_create(buf, slabc, READAHEAD) if arg == nil then return buf elseif type(arg) == 'number' then ibuf_reserve(buf, arg) return buf end errorf('Usage: ibuf([size])') end return { ibuf = ibuf_new; IBUF_SHARED = ffi.C.tarantool_lua_ibuf; READAHEAD = READAHEAD; } tarantool_1.9.1.26.g63eb81e3c/src/lua/utils.c0000664000000000000000000006163213306565107017100 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/utils.h" #include #include #include #include #include int luaL_nil_ref = LUA_REFNIL; int luaL_map_metatable_ref = LUA_REFNIL; int luaL_array_metatable_ref = LUA_REFNIL; static int CTID_CONST_STRUCT_ERROR_REF = 0; void * luaL_pushcdata(struct lua_State *L, uint32_t ctypeid) { /* * ctypeid is actually has CTypeID type. * CTypeId is defined somewhere inside luajit's internal * headers. */ assert(sizeof(ctypeid) == sizeof(CTypeID)); /* Code below is based on ffi_new() from luajit/src/lib_ffi.c */ /* Get information about ctype */ CTSize size; CTState *cts = ctype_cts(L); CTInfo info = lj_ctype_info(cts, ctypeid, &size); assert(size != CTSIZE_INVALID); /* Allocate a new cdata */ GCcdata *cd = lj_cdata_new(cts, ctypeid, size); /* Anchor the uninitialized cdata with the stack. */ TValue *o = L->top; setcdataV(L, o, cd); incr_top(L); /* * lj_cconv_ct_init is omitted for non-structs because it actually * does memset() * Caveats: cdata memory is returned uninitialized */ if (ctype_isstruct(info)) { /* Initialize cdata. */ CType *ct = ctype_raw(cts, ctypeid); lj_cconv_ct_init(cts, ct, size, cdataptr(cd), o, (MSize)(L->top - o)); /* Handle ctype __gc metamethod. Use the fast lookup here. */ cTValue *tv = lj_tab_getinth(cts->miscmap, -(int32_t)ctypeid); if (tv && tvistab(tv) && (tv = lj_meta_fast(L, tabV(tv), MM_gc))) { GCtab *t = cts->finalizer; if (gcref(t->metatable)) { /* Add to finalizer table, if still enabled. */ copyTV(L, lj_tab_set(L, t, o), tv); lj_gc_anybarriert(L, t); cd->marked |= LJ_GC_CDATA_FIN; } } } lj_gc_check(L); return cdataptr(cd); } void * luaL_checkcdata(struct lua_State *L, int idx, uint32_t *ctypeid) { /* Calculate absolute value in the stack. */ if (idx < 0) idx = lua_gettop(L) + idx + 1; if (lua_type(L, idx) != LUA_TCDATA) { *ctypeid = 0; luaL_error(L, "expected cdata as %d argument", idx); return NULL; } GCcdata *cd = cdataV(L->base + idx - 1); *ctypeid = cd->ctypeid; return (void *)cdataptr(cd); } uint32_t luaL_ctypeid(struct lua_State *L, const char *ctypename) { int idx = lua_gettop(L); /* This function calls ffi.typeof to determine CDataType */ /* Get ffi.typeof function */ luaL_loadstring(L, "return require('ffi').typeof"); lua_call(L, 0, 1); /* FFI must exist */ assert(lua_gettop(L) == idx + 1 && lua_isfunction(L, idx + 1)); /* Push the first argument to ffi.typeof */ lua_pushstring(L, ctypename); /* Call ffi.typeof() */ lua_call(L, 1, 1); /* Returned type must be LUA_TCDATA with CTID_CTYPEID */ uint32_t ctypetypeid; CTypeID ctypeid = *(CTypeID *)luaL_checkcdata(L, idx + 1, &ctypetypeid); assert(ctypetypeid == CTID_CTYPEID); lua_settop(L, idx); return ctypeid; } int luaL_cdef(struct lua_State *L, const char *what) { int idx = lua_gettop(L); (void) idx; /* This function calls ffi.cdef */ /* Get ffi.typeof function */ luaL_loadstring(L, "return require('ffi').cdef"); lua_call(L, 0, 1); /* FFI must exist */ assert(lua_gettop(L) == idx + 1 && lua_isfunction(L, idx + 1)); /* Push the argument to ffi.cdef */ lua_pushstring(L, what); /* Call ffi.cdef() */ return lua_pcall(L, 1, 0, 0); } void luaL_setcdatagc(struct lua_State *L, int idx) { /* Calculate absolute value in the stack. */ if (idx < 0) idx = lua_gettop(L) + idx + 1; /* Code below is based on ffi_gc() from luajit/src/lib_ffi.c */ /* Get cdata from the stack */ assert(lua_type(L, idx) == LUA_TCDATA); GCcdata *cd = cdataV(L->base + idx - 1); /* Get finalizer from the stack */ TValue *fin = lj_lib_checkany(L, lua_gettop(L)); #if !defined(NDEBUG) CTState *cts = ctype_cts(L); CType *ct = ctype_raw(cts, cd->ctypeid); (void) ct; assert(ctype_isptr(ct->info) || ctype_isstruct(ct->info) || ctype_isrefarray(ct->info)); #endif /* !defined(NDEBUG) */ /* Set finalizer */ lj_cdata_setfin(L, cd, gcval(fin), itype(fin)); /* Pop finalizer */ lua_pop(L, 1); } #define OPTION(type, name, defvalue) { #name, \ offsetof(struct luaL_serializer, name), type, defvalue} /** * Configuration options for serializers * @sa struct luaL_serializer */ static struct { const char *name; size_t offset; /* offset in structure */ int type; int defvalue; } OPTIONS[] = { OPTION(LUA_TBOOLEAN, encode_sparse_convert, 1), OPTION(LUA_TNUMBER, encode_sparse_ratio, 2), OPTION(LUA_TNUMBER, encode_sparse_safe, 10), OPTION(LUA_TNUMBER, encode_max_depth, 32), OPTION(LUA_TBOOLEAN, encode_invalid_numbers, 1), OPTION(LUA_TNUMBER, encode_number_precision, 14), OPTION(LUA_TBOOLEAN, encode_load_metatables, 1), OPTION(LUA_TBOOLEAN, encode_use_tostring, 0), OPTION(LUA_TBOOLEAN, encode_invalid_as_nil, 0), OPTION(LUA_TBOOLEAN, decode_invalid_numbers, 1), OPTION(LUA_TBOOLEAN, decode_save_metatables, 1), OPTION(LUA_TNUMBER, decode_max_depth, 32), { NULL, 0, 0, 0}, }; /** * @brief serializer.cfg{} Lua binding for serializers. * serializer.cfg is a table that contains current configuration values from * luaL_serializer structure. serializer.cfg has overriden __call() method * to change configuration keys in internal userdata (like box.cfg{}). * Please note that direct change in serializer.cfg.key will not affect * internal state of userdata. * @param L lua stack * @return 0 */ static int luaL_serializer_cfg(lua_State *L) { luaL_checktype(L, 1, LUA_TTABLE); /* serializer */ luaL_checktype(L, 2, LUA_TTABLE); /* serializer.cfg */ struct luaL_serializer *cfg = luaL_checkserializer(L); /* Iterate over all available options and checks keys in passed table */ for (int i = 0; OPTIONS[i].name != NULL; i++) { lua_getfield(L, 2, OPTIONS[i].name); if (lua_isnil(L, -1)) { lua_pop(L, 1); /* key hasn't changed */ continue; } /* * Update struct luaL_serializer using pointer to a * configuration value (all values must be `int` for that). */ int *pval = (int *) ((char *) cfg + OPTIONS[i].offset); /* Update struct luaL_serializer structure */ switch (OPTIONS[i].type) { case LUA_TBOOLEAN: *pval = lua_toboolean(L, -1); lua_pushboolean(L, *pval); break; case LUA_TNUMBER: *pval = lua_tointeger(L, -1); lua_pushinteger(L, *pval); break; default: unreachable(); } /* Save normalized value to serializer.cfg table */ lua_setfield(L, 1, OPTIONS[i].name); } return 0; } /** * @brief serializer.new() Lua binding. * @param L stack * @param reg methods to register * @param parent parent serializer to inherit configuration * @return new serializer */ struct luaL_serializer * luaL_newserializer(struct lua_State *L, const char *modname, const luaL_Reg *reg) { luaL_checkstack(L, 1, "too many upvalues"); /* Create new module */ lua_newtable(L); /* Create new configuration */ struct luaL_serializer *serializer = (struct luaL_serializer *) lua_newuserdata(L, sizeof(*serializer)); luaL_getmetatable(L, LUAL_SERIALIZER); lua_setmetatable(L, -2); memset(serializer, 0, sizeof(*serializer)); for (; reg->name != NULL; reg++) { /* push luaL_serializer as upvalue */ lua_pushvalue(L, -1); /* register method */ lua_pushcclosure(L, reg->func, 1); lua_setfield(L, -3, reg->name); } /* Add cfg{} */ lua_newtable(L); /* cfg */ lua_newtable(L); /* metatable */ lua_pushvalue(L, -3); /* luaL_serializer */ lua_pushcclosure(L, luaL_serializer_cfg, 1); lua_setfield(L, -2, "__call"); lua_setmetatable(L, -2); /* Save configuration values to serializer.cfg */ for (int i = 0; OPTIONS[i].name != NULL; i++) { int *pval = (int *) ((char *) serializer + OPTIONS[i].offset); *pval = OPTIONS[i].defvalue; switch (OPTIONS[i].type) { case LUA_TBOOLEAN: lua_pushboolean(L, *pval); break; case LUA_TNUMBER: lua_pushinteger(L, *pval); break; default: unreachable(); } lua_setfield(L, -2, OPTIONS[i].name); } lua_setfield(L, -3, "cfg"); lua_pop(L, 1); /* remove upvalues */ luaL_pushnull(L); lua_setfield(L, -2, "NULL"); lua_rawgeti(L, LUA_REGISTRYINDEX, luaL_array_metatable_ref); lua_setfield(L, -2, "array_mt"); lua_rawgeti(L, LUA_REGISTRYINDEX, luaL_map_metatable_ref); lua_setfield(L, -2, "map_mt"); if (modname != NULL) { /* Register module */ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); lua_pushstring(L, modname); /* add alias */ lua_pushvalue(L, -3); lua_settable(L, -3); lua_pop(L, 1); /* _LOADED */ } return serializer; } static int lua_gettable_wrapper(lua_State *L) { lua_gettable(L, -2); return 1; } static void lua_field_inspect_ucdata(struct lua_State *L, struct luaL_serializer *cfg, int idx, struct luaL_field *field) { if (!cfg->encode_load_metatables) return; /* * Try to call LUAL_SERIALIZE method on udata/cdata * LuaJIT specific: lua_getfield/lua_gettable raises exception on * cdata if field doesn't exist. */ int top = lua_gettop(L); lua_pushcfunction(L, lua_gettable_wrapper); lua_pushvalue(L, idx); lua_pushliteral(L, LUAL_SERIALIZE); if (lua_pcall(L, 2, 1, 0) == 0 && !lua_isnil(L, -1)) { if (!lua_isfunction(L, -1)) luaL_error(L, "invalid " LUAL_SERIALIZE " value"); /* copy object itself */ lua_pushvalue(L, idx); lua_pcall(L, 1, 1, 0); /* replace obj with the unpacked value */ lua_replace(L, idx); luaL_tofield(L, cfg, idx, field); } /* else ignore lua_gettable exceptions */ lua_settop(L, top); /* remove temporary objects */ } static void lua_field_inspect_table(struct lua_State *L, struct luaL_serializer *cfg, int idx, struct luaL_field *field) { assert(lua_type(L, idx) == LUA_TTABLE); const char *type; uint32_t size = 0; uint32_t max = 0; /* Try to get field LUAL_SERIALIZER_TYPE from metatable */ if (!cfg->encode_load_metatables || !luaL_getmetafield(L, idx, LUAL_SERIALIZE)) goto skip; if (lua_isfunction(L, -1)) { /* copy object itself */ lua_pushvalue(L, idx); lua_call(L, 1, 1); /* replace obj with the unpacked value */ lua_replace(L, idx); luaL_tofield(L, cfg, idx, field); return; } else if (!lua_isstring(L, -1)) { luaL_error(L, "invalid " LUAL_SERIALIZE " value"); } type = lua_tostring(L, -1); if (strcmp(type, "array") == 0 || strcmp(type, "seq") == 0 || strcmp(type, "sequence") == 0) { field->type = MP_ARRAY; /* Override type */ field->size = luaL_arrlen(L, idx); /* YAML: use flow mode if __serialize == 'seq' */ if (cfg->has_compact && type[3] == '\0') field->compact = true; lua_pop(L, 1); /* type */ return; } else if (strcmp(type, "map") == 0 || strcmp(type, "mapping") == 0) { field->type = MP_MAP; /* Override type */ field->size = luaL_maplen(L, idx); /* YAML: use flow mode if __serialize == 'map' */ if (cfg->has_compact && type[3] == '\0') field->compact = true; lua_pop(L, 1); /* type */ return; } else { luaL_error(L, "invalid " LUAL_SERIALIZE " value"); } skip: field->type = MP_ARRAY; /* Calculate size and check that table can represent an array */ lua_pushnil(L); while (lua_next(L, idx)) { size++; lua_pop(L, 1); /* pop the value */ lua_Number k; if (lua_type(L, -1) != LUA_TNUMBER || ((k = lua_tonumber(L, -1)) != size && (k < 1 || floor(k) != k))) { /* Finish size calculation */ while (lua_next(L, idx)) { size++; lua_pop(L, 1); /* pop the value */ } field->type = MP_MAP; field->size = size; return; } if (k > max) max = k; } /* Encode excessively sparse arrays as objects (if enabled) */ if (cfg->encode_sparse_ratio > 0 && max > size * (uint32_t)cfg->encode_sparse_ratio && max > (uint32_t)cfg->encode_sparse_safe) { if (!cfg->encode_sparse_convert) luaL_error(L, "excessively sparse array"); field->type = MP_MAP; field->size = size; return; } assert(field->type == MP_ARRAY); field->size = max; } static void lua_field_tostring(struct lua_State *L, struct luaL_serializer *cfg, int idx, struct luaL_field *field) { int top = lua_gettop(L); lua_getglobal(L, "tostring"); lua_pushvalue(L, idx); lua_call(L, 1, 1); lua_replace(L, idx); lua_settop(L, top); luaL_tofield(L, cfg, idx, field); } void luaL_tofield(struct lua_State *L, struct luaL_serializer *cfg, int index, struct luaL_field *field) { if (index < 0) index = lua_gettop(L) + index + 1; double num; double intpart; size_t size; #define CHECK_NUMBER(x) ({\ if (!isfinite(x) && !cfg->encode_invalid_numbers) { \ if (!cfg->encode_invalid_as_nil) \ luaL_error(L, "number must not be NaN or Inf"); \ field->type = MP_NIL; \ }}) switch (lua_type(L, index)) { case LUA_TNUMBER: num = lua_tonumber(L, index); if (isfinite(num) && modf(num, &intpart) != 0.0) { field->type = MP_DOUBLE; field->dval = num; } else if (num >= 0 && num < exp2(64)) { field->type = MP_UINT; field->ival = (uint64_t) num; } else if (num > -exp2(63) && num < exp2(63)) { field->type = MP_INT; field->ival = (int64_t) num; } else { field->type = MP_DOUBLE; field->dval = num; CHECK_NUMBER(num); } return; case LUA_TCDATA: { uint32_t ctypeid = 0; void *cdata = luaL_checkcdata(L, index, &ctypeid); int64_t ival; switch (ctypeid) { case CTID_BOOL: field->type = MP_BOOL; field->bval = *(bool*) cdata; return; case CTID_CCHAR: case CTID_INT8: ival = *(int8_t *) cdata; field->type = (ival >= 0) ? MP_UINT : MP_INT; field->ival = ival; return; case CTID_INT16: ival = *(int16_t *) cdata; field->type = (ival >= 0) ? MP_UINT : MP_INT; field->ival = ival; return; case CTID_INT32: ival = *(int32_t *) cdata; field->type = (ival >= 0) ? MP_UINT : MP_INT; field->ival = ival; return; case CTID_INT64: ival = *(int64_t *) cdata; field->type = (ival >= 0) ? MP_UINT : MP_INT; field->ival = ival; return; case CTID_UINT8: field->type = MP_UINT; field->ival = *(uint8_t *) cdata; return; case CTID_UINT16: field->type = MP_UINT; field->ival = *(uint16_t *) cdata; return; case CTID_UINT32: field->type = MP_UINT; field->ival = *(uint32_t *) cdata; return; case CTID_UINT64: field->type = MP_UINT; field->ival = *(uint64_t *) cdata; return; case CTID_FLOAT: field->type = MP_FLOAT; field->fval = *(float *) cdata; CHECK_NUMBER(field->fval); return; case CTID_DOUBLE: field->type = MP_DOUBLE; field->dval = *(double *) cdata; CHECK_NUMBER(field->dval); return; case CTID_P_CVOID: case CTID_P_VOID: if (*(void **) cdata == NULL) { field->type = MP_NIL; return; } /* Fall through */ default: field->type = MP_EXT; return; } return; } case LUA_TBOOLEAN: field->type = MP_BOOL; field->bval = lua_toboolean(L, index); return; case LUA_TNIL: field->type = MP_NIL; return; case LUA_TSTRING: field->sval.data = lua_tolstring(L, index, &size); field->sval.len = (uint32_t) size; field->type = MP_STR; return; case LUA_TTABLE: { field->compact = false; lua_field_inspect_table(L, cfg, index, field); return; } case LUA_TLIGHTUSERDATA: case LUA_TUSERDATA: field->sval.data = NULL; field->sval.len = 0; if (lua_touserdata(L, index) == NULL) { field->type = MP_NIL; return; } /* Fall through */ default: field->type = MP_EXT; return; } #undef CHECK_NUMBER } void luaL_convertfield(struct lua_State *L, struct luaL_serializer *cfg, int idx, struct luaL_field *field) { if (idx < 0) idx = lua_gettop(L) + idx + 1; assert(field->type == MP_EXT); /* must be called after tofield() */ if (cfg->encode_load_metatables) { int type = lua_type(L, idx); if (type == LUA_TCDATA) { /* * Don't call __serialize on primitive types * https://github.com/tarantool/tarantool/issues/1226 */ GCcdata *cd = cdataV(L->base + idx - 1); if (cd->ctypeid > CTID_CTYPEID) lua_field_inspect_ucdata(L, cfg, idx, field); } else if (type == LUA_TUSERDATA) { lua_field_inspect_ucdata(L, cfg, idx, field); } } if (field->type == MP_EXT && cfg->encode_use_tostring) lua_field_tostring(L, cfg, idx, field); if (field->type != MP_EXT) return; if (cfg->encode_invalid_as_nil) { field->type = MP_NIL; return; } luaL_error(L, "unsupported Lua type '%s'", lua_typename(L, lua_type(L, idx))); } /** * A helper to register a single type metatable. */ void luaL_register_type(struct lua_State *L, const char *type_name, const struct luaL_Reg *methods) { luaL_newmetatable(L, type_name); /* * Conventionally, make the metatable point to itself * in __index. If 'methods' contain a field for __index, * this is a no-op. */ lua_pushvalue(L, -1); lua_setfield(L, -2, "__index"); lua_pushstring(L, type_name); lua_setfield(L, -2, "__metatable"); luaL_register(L, NULL, methods); lua_pop(L, 1); } void luaL_register_module(struct lua_State *L, const char *modname, const struct luaL_Reg *methods) { assert(methods != NULL && modname != NULL); /* use luaL_register instead */ lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); if (strchr(modname, '.') == NULL) { /* root level, e.g. box */ lua_getfield(L, -1, modname); /* get package.loaded.modname */ if (!lua_istable(L, -1)) { /* module is not found */ lua_pop(L, 1); /* remove previous result */ lua_newtable(L); lua_pushvalue(L, -1); lua_setfield(L, -3, modname); /* _LOADED[modname] = new table */ } } else { /* 1+ level, e.g. box.space */ if (luaL_findtable(L, -1, modname, 0) != NULL) luaL_error(L, "Failed to register library"); } lua_remove(L, -2); /* remove _LOADED table */ luaL_register(L, NULL, methods); } /* * Maximum integer that doesn't lose precision on tostring() conversion. * Lua uses sprintf("%.14g") to format its numbers, see gh-1279. */ #define DBL_INT_MAX (1e14 - 1) #define DBL_INT_MIN (-1e14 + 1) void luaL_pushuint64(struct lua_State *L, uint64_t val) { #if defined(LJ_DUALNUM) /* see setint64V() */ if (val <= INT32_MAX) { /* push int32_t */ lua_pushinteger(L, (int32_t) val); } else #endif /* defined(LJ_DUALNUM) */ if (val <= DBL_INT_MAX) { /* push double */ lua_pushnumber(L, (double) val); } else { /* push uint64_t */ *(uint64_t *) luaL_pushcdata(L, CTID_UINT64) = val; } } void luaL_pushint64(struct lua_State *L, int64_t val) { #if defined(LJ_DUALNUM) /* see setint64V() */ if (val >= INT32_MIN && val <= INT32_MAX) { /* push int32_t */ lua_pushinteger(L, (int32_t) val); } else #endif /* defined(LJ_DUALNUM) */ if (val >= DBL_INT_MIN && val <= DBL_INT_MAX) { /* push double */ lua_pushnumber(L, (double) val); } else { /* push int64_t */ *(int64_t *) luaL_pushcdata(L, CTID_INT64) = val; } } static inline int luaL_convertint64(lua_State *L, int idx, bool unsignd, int64_t *result) { uint32_t ctypeid; void *cdata; /* * This code looks mostly like luaL_tofield(), but has less * cases and optimized for numbers. */ switch (lua_type(L, idx)) { case LUA_TNUMBER: *result = lua_tonumber(L, idx); return 0; case LUA_TCDATA: cdata = luaL_checkcdata(L, idx, &ctypeid); switch (ctypeid) { case CTID_CCHAR: case CTID_INT8: *result = *(int8_t *) cdata; return 0; case CTID_INT16: *result = *(int16_t *) cdata; return 0; case CTID_INT32: *result = *(int32_t *) cdata; return 0; case CTID_INT64: *result = *(int64_t *) cdata; return 0; case CTID_UINT8: *result = *(uint8_t *) cdata; return 0; case CTID_UINT16: *result = *(uint16_t *) cdata; return 0; case CTID_UINT32: *result = *(uint32_t *) cdata; return 0; case CTID_UINT64: *result = *(uint64_t *) cdata; return 0; } *result = 0; return -1; case LUA_TSTRING: { const char *arg = luaL_checkstring(L, idx); char *arge; errno = 0; *result = (unsignd ? (long long) strtoull(arg, &arge, 10) : strtoll(arg, &arge, 10)); if (errno == 0 && arge != arg) return 0; return 1; } } *result = 0; return -1; } uint64_t luaL_checkuint64(struct lua_State *L, int idx) { int64_t result; if (luaL_convertint64(L, idx, true, &result) != 0) { lua_pushfstring(L, "expected uint64_t as %d argument", idx); lua_error(L); return 0; } return result; } int64_t luaL_checkint64(struct lua_State *L, int idx) { int64_t result; if (luaL_convertint64(L, idx, false, &result) != 0) { lua_pushfstring(L, "expected int64_t as %d argument", idx); lua_error(L); return 0; } return result; } uint64_t luaL_touint64(struct lua_State *L, int idx) { int64_t result; if (luaL_convertint64(L, idx, true, &result) == 0) return result; return 0; } int64_t luaL_toint64(struct lua_State *L, int idx) { int64_t result; if (luaL_convertint64(L, idx, false, &result) == 0) return result; return 0; } struct error * luaL_iserror(struct lua_State *L, int narg) { assert(CTID_CONST_STRUCT_ERROR_REF != 0); if (lua_type(L, narg) != LUA_TCDATA) return NULL; uint32_t ctypeid; void *data = luaL_checkcdata(L, narg, &ctypeid); if (ctypeid != (uint32_t) CTID_CONST_STRUCT_ERROR_REF) return NULL; struct error *e = *(struct error **) data; assert(e->refs); return e; } static struct error * luaL_checkerror(struct lua_State *L, int narg) { struct error *error = luaL_iserror(L, narg); if (error == NULL) { luaL_error(L, "Invalid argument #%d (error expected, got %s)", narg, lua_typename(L, lua_type(L, narg))); } return error; } static int luaL_error_gc(struct lua_State *L) { struct error *error = luaL_checkerror(L, 1); error_unref(error); return 0; } void luaT_pusherror(struct lua_State *L, struct error *e) { assert(CTID_CONST_STRUCT_ERROR_REF != 0); struct error **ptr = (struct error **) luaL_pushcdata(L, CTID_CONST_STRUCT_ERROR_REF); *ptr = e; /* The order is important - first reference the error, then set gc */ error_ref(e); lua_pushcfunction(L, luaL_error_gc); luaL_setcdatagc(L, -2); } int luaT_error(lua_State *L) { struct error *e = diag_last_error(&fiber()->diag); assert(e != NULL); /* * gh-1955 luaT_pusherror allocates Lua objects, thus it may trigger * GC. GC may invoke finalizers which are arbitrary Lua code, * potentially invalidating last error object, hence error_ref * below. */ error_ref(e); luaT_pusherror(L, e); error_unref(e); lua_error(L); unreachable(); return 0; } static inline int lbox_catch(lua_State *L) { struct error *e = luaL_iserror(L, -1); if (e != NULL) { /* Re-throw original error */ diag_add_error(&fiber()->diag, e); } else { /* Convert Lua error to a Tarantool exception. */ diag_set(LuajitError, lua_tostring(L, -1)); } return 1; } int luaT_call(struct lua_State *L, int nargs, int nreturns) { if (lua_pcall(L, nargs, nreturns, 0)) return lbox_catch(L); return 0; } int luaT_cpcall(lua_State *L, lua_CFunction func, void *ud) { if (lua_cpcall(L, func, ud)) return lbox_catch(L); return 0; } lua_State * luaT_state(void) { return tarantool_L; } int tarantool_lua_utils_init(struct lua_State *L) { static const struct luaL_Reg serializermeta[] = { {NULL, NULL}, }; /* Get CTypeID for `struct error *' */ int rc = luaL_cdef(L, "struct error;"); assert(rc == 0); (void) rc; CTID_CONST_STRUCT_ERROR_REF = luaL_ctypeid(L, "const struct error &"); assert(CTID_CONST_STRUCT_ERROR_REF != 0); luaL_register_type(L, LUAL_SERIALIZER, serializermeta); /* Create NULL constant */ *(void **) luaL_pushcdata(L, CTID_P_VOID) = NULL; luaL_nil_ref = luaL_ref(L, LUA_REGISTRYINDEX); lua_createtable(L, 0, 1); lua_pushliteral(L, "map"); /* YAML will use flow mode */ lua_setfield(L, -2, LUAL_SERIALIZE); /* automatically reset hints on table change */ luaL_loadstring(L, "setmetatable((...), nil); return rawset(...)"); lua_setfield(L, -2, "__newindex"); luaL_map_metatable_ref = luaL_ref(L, LUA_REGISTRYINDEX); lua_createtable(L, 0, 1); lua_pushliteral(L, "seq"); /* YAML will use flow mode */ lua_setfield(L, -2, LUAL_SERIALIZE); /* automatically reset hints on table change */ luaL_loadstring(L, "setmetatable((...), nil); return rawset(...)"); lua_setfield(L, -2, "__newindex"); luaL_array_metatable_ref = luaL_ref(L, LUA_REGISTRYINDEX); return 0; } tarantool_1.9.1.26.g63eb81e3c/src/lua/msgpack.c0000664000000000000000000003532513306560010017351 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/msgpack.h" #include "lua/utils.h" #if defined(LUAJIT) #include #endif /* defined(LUAJIT) */ #include /* struct luaL_error */ #include #include #include #include void luamp_error(void *error_ctx) { struct lua_State *L = (struct lua_State *) error_ctx; luaL_error(L, diag_last_error(diag_get())->errmsg); } void mpstream_init(struct mpstream *stream, void *ctx, luamp_reserve_f reserve, luamp_alloc_f alloc, luamp_error_f error, void *error_ctx) { stream->ctx = ctx; stream->reserve = reserve; stream->alloc = alloc; stream->error = error; stream->error_ctx = error_ctx; mpstream_reset(stream); } void mpstream_reserve_slow(struct mpstream *stream, size_t size) { stream->alloc(stream->ctx, stream->pos - stream->buf); stream->buf = (char *) stream->reserve(stream->ctx, &size); if (stream->buf == NULL) { diag_set(OutOfMemory, size, "mpstream", "reserve"); stream->error(stream->error_ctx); } stream->pos = stream->buf; stream->end = stream->pos + size; } void mpstream_reset(struct mpstream *stream) { size_t size = 0; stream->buf = (char *) stream->reserve(stream->ctx, &size); if (stream->buf == NULL) { diag_set(OutOfMemory, size, "mpstream", "reset"); stream->error(stream->error_ctx); } stream->pos = stream->buf; stream->end = stream->pos + size; } static uint32_t CTID_CHAR_PTR; static uint32_t CTID_STRUCT_IBUF; struct luaL_serializer *luaL_msgpack_default = NULL; static enum mp_type luamp_encode_extension_default(struct lua_State *L, int idx, struct mpstream *stream); static void luamp_decode_extension_default(struct lua_State *L, const char **data); static luamp_encode_extension_f luamp_encode_extension = luamp_encode_extension_default; static luamp_decode_extension_f luamp_decode_extension = luamp_decode_extension_default; void luamp_encode_array(struct luaL_serializer *cfg, struct mpstream *stream, uint32_t size) { (void) cfg; assert(mp_sizeof_array(size) <= 5); char *data = mpstream_reserve(stream, 5); char *pos = mp_encode_array(data, size); mpstream_advance(stream, pos - data); } void luamp_encode_map(struct luaL_serializer *cfg, struct mpstream *stream, uint32_t size) { (void) cfg; assert(mp_sizeof_map(size) <= 5); char *data = mpstream_reserve(stream, 5); char *pos = mp_encode_map(data, size); mpstream_advance(stream, pos - data); } void luamp_encode_uint(struct luaL_serializer *cfg, struct mpstream *stream, uint64_t num) { (void) cfg; assert(mp_sizeof_uint(num) <= 9); char *data = mpstream_reserve(stream, 9); char *pos = mp_encode_uint(data, num); mpstream_advance(stream, pos - data); } void luamp_encode_int(struct luaL_serializer *cfg, struct mpstream *stream, int64_t num) { (void) cfg; assert(mp_sizeof_int(num) <= 9); char *data = mpstream_reserve(stream, 9); char *pos = mp_encode_int(data, num); mpstream_advance(stream, pos - data); } void luamp_encode_float(struct luaL_serializer *cfg, struct mpstream *stream, float num) { (void) cfg; assert(mp_sizeof_float(num) <= 5); char *data = mpstream_reserve(stream, 5); char *pos = mp_encode_float(data, num); mpstream_advance(stream, pos - data); } void luamp_encode_double(struct luaL_serializer *cfg, struct mpstream *stream, double num) { (void) cfg; assert(mp_sizeof_double(num) <= 9); char *data = mpstream_reserve(stream, 9); char *pos = mp_encode_double(data, num); mpstream_advance(stream, pos - data); } void luamp_encode_str(struct luaL_serializer *cfg, struct mpstream *stream, const char *str, uint32_t len) { (void) cfg; assert(mp_sizeof_str(len) <= 5 + len); char *data = mpstream_reserve(stream, 5 + len); char *pos = mp_encode_str(data, str, len); mpstream_advance(stream, pos - data); } void luamp_encode_nil(struct luaL_serializer *cfg, struct mpstream *stream) { (void) cfg; assert(mp_sizeof_nil() <= 1); char *data = mpstream_reserve(stream, 1); char *pos = mp_encode_nil(data); mpstream_advance(stream, pos - data); } void luamp_encode_bool(struct luaL_serializer *cfg, struct mpstream *stream, bool val) { (void) cfg; assert(mp_sizeof_bool(val) <= 1); char *data = mpstream_reserve(stream, 1); char *pos = mp_encode_bool(data, val); mpstream_advance(stream, pos - data); } static enum mp_type luamp_encode_extension_default(struct lua_State *L, int idx, struct mpstream *stream) { (void) L; (void) idx; (void) stream; return MP_EXT; } void luamp_set_encode_extension(luamp_encode_extension_f handler) { if (handler == NULL) { luamp_encode_extension = luamp_encode_extension_default; } else { luamp_encode_extension = handler; } } static void luamp_decode_extension_default(struct lua_State *L, const char **data) { luaL_error(L, "msgpack.decode: unsupported extension: %u", (unsigned char) **data); } void luamp_set_decode_extension(luamp_decode_extension_f handler) { if (handler == NULL) { luamp_decode_extension = luamp_decode_extension_default; } else { luamp_decode_extension = handler; } } enum mp_type luamp_encode_r(struct lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream, struct luaL_field *field, int level) { int top = lua_gettop(L); enum mp_type type; restart: /* used by MP_EXT */ switch (field->type) { case MP_UINT: luamp_encode_uint(cfg, stream, field->ival); return MP_UINT; case MP_STR: luamp_encode_str(cfg, stream, field->sval.data, field->sval.len); return MP_STR; case MP_BIN: luamp_encode_str(cfg, stream, field->sval.data, field->sval.len); return MP_BIN; case MP_INT: luamp_encode_int(cfg, stream, field->ival); return MP_INT; case MP_FLOAT: luamp_encode_float(cfg, stream, field->fval); return MP_FLOAT; case MP_DOUBLE: luamp_encode_double(cfg, stream, field->dval); return MP_DOUBLE; case MP_BOOL: luamp_encode_bool(cfg, stream, field->bval); return MP_BOOL; case MP_NIL: luamp_encode_nil(cfg, stream); return MP_NIL; case MP_MAP: /* Map */ if (level >= cfg->encode_max_depth) { luamp_encode_nil(cfg, stream); /* Limit nested maps */ return MP_NIL; } luamp_encode_map(cfg, stream, field->size); lua_pushnil(L); /* first key */ while (lua_next(L, top) != 0) { lua_pushvalue(L, -2); /* push a copy of key to top */ luaL_tofield(L, cfg, lua_gettop(L), field); luamp_encode_r(L, cfg, stream, field, level + 1); lua_pop(L, 1); /* pop a copy of key */ luaL_tofield(L, cfg, lua_gettop(L), field); luamp_encode_r(L, cfg, stream, field, level + 1); lua_pop(L, 1); /* pop value */ } assert(lua_gettop(L) == top); return MP_MAP; case MP_ARRAY: /* Array */ if (level >= cfg->encode_max_depth) { luamp_encode_nil(cfg, stream); /* Limit nested arrays */ return MP_NIL; } uint32_t size = field->size; luamp_encode_array(cfg, stream, size); for (uint32_t i = 0; i < size; i++) { lua_rawgeti(L, top, i + 1); luaL_tofield(L, cfg, top + 1, field); luamp_encode_r(L, cfg, stream, field, level + 1); lua_pop(L, 1); } assert(lua_gettop(L) == top); return MP_ARRAY; case MP_EXT: /* Run trigger if type can't be encoded */ type = luamp_encode_extension(L, top, stream); if (type != MP_EXT) return type; /* Value has been packed by the trigger */ /* Try to convert value to serializable type */ luaL_convertfield(L, cfg, top, field); /* handled by luaL_convertfield */ assert(field->type != MP_EXT); assert(lua_gettop(L) == top); goto restart; } return MP_EXT; } enum mp_type luamp_encode(struct lua_State *L, struct luaL_serializer *cfg, struct mpstream *stream, int index) { int top = lua_gettop(L); if (index < 0) index = top + index + 1; bool on_top = (index == top); if (!on_top) { lua_pushvalue(L, index); /* copy a value to the stack top */ } struct luaL_field field; luaL_tofield(L, cfg, lua_gettop(L), &field); enum mp_type top_type = luamp_encode_r(L, cfg, stream, &field, 0); if (!on_top) { lua_remove(L, top + 1); /* remove a value copy */ } return top_type; } void luamp_decode(struct lua_State *L, struct luaL_serializer *cfg, const char **data) { double d; switch (mp_typeof(**data)) { case MP_UINT: luaL_pushuint64(L, mp_decode_uint(data)); break; case MP_INT: luaL_pushint64(L, mp_decode_int(data)); break; case MP_FLOAT: d = mp_decode_float(data); luaL_checkfinite(L, cfg, d); lua_pushnumber(L, d); return; case MP_DOUBLE: d = mp_decode_double(data); luaL_checkfinite(L, cfg, d); lua_pushnumber(L, d); return; case MP_STR: { uint32_t len = 0; const char *str = mp_decode_str(data, &len); lua_pushlstring(L, str, len); return; } case MP_BIN: { uint32_t len = 0; const char *str = mp_decode_bin(data, &len); lua_pushlstring(L, str, len); return; } case MP_BOOL: lua_pushboolean(L, mp_decode_bool(data)); return; case MP_NIL: mp_decode_nil(data); luaL_pushnull(L); return; case MP_ARRAY: { uint32_t size = mp_decode_array(data); lua_createtable(L, size, 0); for (uint32_t i = 0; i < size; i++) { luamp_decode(L, cfg, data); lua_rawseti(L, -2, i + 1); } if (cfg->decode_save_metatables) luaL_setarrayhint(L, -1); return; } case MP_MAP: { uint32_t size = mp_decode_map(data); lua_createtable(L, 0, size); for (uint32_t i = 0; i < size; i++) { luamp_decode(L, cfg, data); luamp_decode(L, cfg, data); lua_settable(L, -3); } if (cfg->decode_save_metatables) luaL_setmaphint(L, -1); return; } case MP_EXT: luamp_decode_extension(L, data); break; } } static int lua_msgpack_encode(lua_State *L) { int index = lua_gettop(L); if (index < 1) return luaL_error(L, "msgpack.encode: a Lua object expected"); struct ibuf *buf; if (index > 1) { uint32_t ctypeid; buf = luaL_checkcdata(L, 2, &ctypeid); if (ctypeid != CTID_STRUCT_IBUF) return luaL_error(L, "msgpack.encode: argument 2 " "must be of type 'struct ibuf'"); } else { buf = tarantool_lua_ibuf; ibuf_reset(buf); } size_t used = ibuf_used(buf); struct luaL_serializer *cfg = luaL_checkserializer(L); struct mpstream stream; mpstream_init(&stream, buf, ibuf_reserve_cb, ibuf_alloc_cb, luamp_error, L); luamp_encode(L, cfg, &stream, 1); mpstream_flush(&stream); if (index > 1) { lua_pushinteger(L, ibuf_used(buf) - used); } else { lua_pushlstring(L, buf->buf, ibuf_used(buf)); ibuf_reinit(buf); } return 1; } static int lua_msgpack_decode_cdata(lua_State *L, bool check) { uint32_t ctypeid; const char *data = *(const char **)luaL_checkcdata(L, 1, &ctypeid); if (ctypeid != CTID_CHAR_PTR) { return luaL_error(L, "msgpack.decode: " "a Lua string or 'char *' expected"); } if (check) { size_t data_len = luaL_checkinteger(L, 2); const char *p = data; if (mp_check(&p, data + data_len) != 0) return luaL_error(L, "msgpack.decode: invalid MsgPack"); } struct luaL_serializer *cfg = luaL_checkserializer(L); luamp_decode(L, cfg, &data); *(const char **)luaL_pushcdata(L, ctypeid) = data; return 2; } static int lua_msgpack_decode_string(lua_State *L, bool check) { ptrdiff_t offset = 0; size_t data_len; const char *data = lua_tolstring(L, 1, &data_len); if (lua_gettop(L) > 1) { offset = luaL_checkinteger(L, 2) - 1; if (offset < 0 || (size_t)offset >= data_len) return luaL_error(L, "msgpack.decode: " "offset is out of bounds"); } if (check) { const char *p = data + offset; if (mp_check(&p, data + data_len) != 0) return luaL_error(L, "msgpack.decode: invalid MsgPack"); } struct luaL_serializer *cfg = luaL_checkserializer(L); const char *p = data + offset; luamp_decode(L, cfg, &p); lua_pushinteger(L, p - data + 1); return 2; } static int lua_msgpack_decode(lua_State *L) { int index = lua_gettop(L); int type = index >= 1 ? lua_type(L, 1) : LUA_TNONE; switch (type) { case LUA_TCDATA: return lua_msgpack_decode_cdata(L, true); case LUA_TSTRING: return lua_msgpack_decode_string(L, true); default: return luaL_error(L, "msgpack.decode: " "a Lua string or 'char *' expected"); } } static int lua_msgpack_decode_unchecked(lua_State *L) { int index = lua_gettop(L); int type = index >= 1 ? lua_type(L, 1) : LUA_TNONE; switch (type) { case LUA_TCDATA: return lua_msgpack_decode_cdata(L, false); case LUA_TSTRING: return lua_msgpack_decode_string(L, false); default: return luaL_error(L, "msgpack.decode: " "a Lua string or 'char *' expected"); } } static int lua_ibuf_msgpack_decode(lua_State *L) { uint32_t ctypeid = 0; const char *rpos = *(const char **)luaL_checkcdata(L, 1, &ctypeid); if (rpos == NULL) { luaL_error(L, "msgpack.ibuf_decode: rpos is null"); } struct luaL_serializer *cfg = luaL_checkserializer(L); luamp_decode(L, cfg, &rpos); *(const char **)luaL_pushcdata(L, ctypeid) = rpos; lua_pushvalue(L, -2); return 2; } static int lua_msgpack_new(lua_State *L); static const luaL_Reg msgpacklib[] = { { "encode", lua_msgpack_encode }, { "decode", lua_msgpack_decode }, { "decode_unchecked", lua_msgpack_decode_unchecked }, { "ibuf_decode", lua_ibuf_msgpack_decode }, { "new", lua_msgpack_new }, { NULL, NULL } }; static int lua_msgpack_new(lua_State *L) { luaL_newserializer(L, NULL, msgpacklib); return 1; } LUALIB_API int luaopen_msgpack(lua_State *L) { int rc = luaL_cdef(L, "struct ibuf;"); assert(rc == 0); (void) rc; CTID_STRUCT_IBUF = luaL_ctypeid(L, "struct ibuf"); assert(CTID_STRUCT_IBUF != 0); CTID_CHAR_PTR = luaL_ctypeid(L, "char *"); assert(CTID_CHAR_PTR != 0); luaL_msgpack_default = luaL_newserializer(L, "msgpack", msgpacklib); return 1; } tarantool_1.9.1.26.g63eb81e3c/src/lua/init.lua0000664000000000000000000002003713306560010017220 0ustar rootroot-- init.lua -- internal file local ffi = require('ffi') ffi.cdef[[ struct type_info; struct method_info; struct error; enum ctype { CTYPE_VOID = 0, CTYPE_INT, CTYPE_CONST_CHAR_PTR }; struct type_info { const char *name; const struct type_info *parent; const struct method_info *methods; }; enum { DIAG_ERRMSG_MAX = 512, DIAG_FILENAME_MAX = 256 }; typedef void (*error_f)(struct error *e); struct error { error_f _destroy; error_f _raise; error_f _log; const struct type_info *_type; int _refs; /** Line number. */ unsigned _line; /* Source file name. */ char _file[DIAG_FILENAME_MAX]; /* Error description. */ char _errmsg[DIAG_ERRMSG_MAX]; }; enum { METHOD_ARG_MAX = 8 }; struct method_info { const struct type_info *owner; const char *name; enum ctype rtype; enum ctype atype[METHOD_ARG_MAX]; int nargs; bool isconst; union { /* Add extra space to get proper struct size in C */ void *_spacer[2]; }; }; char * exception_get_string(struct error *e, const struct method_info *method); int exception_get_int(struct error *e, const struct method_info *method); double tarantool_uptime(void); typedef int32_t pid_t; pid_t getpid(void); ]] local fio = require("fio") local REFLECTION_CACHE = {} local function reflection_enumerate(err) local key = tostring(err._type) local result = REFLECTION_CACHE[key] if result ~= nil then return result end result = {} -- See type_foreach_method() in reflection.h local t = err._type while t ~= nil do local m = t.methods while m.name ~= nil do result[ffi.string(m.name)] = m m = m + 1 end t = t.parent end REFLECTION_CACHE[key] = result return result end local function reflection_get(err, method) if method.nargs ~= 0 then return nil -- NYI end if method.rtype == ffi.C.CTYPE_INT then return tonumber(ffi.C.exception_get_int(err, method)) elseif method.rtype == ffi.C.CTYPE_CONST_CHAR_PTR then local str = ffi.C.exception_get_string(err, method) if str == nil then return nil end return ffi.string(str) end end local function error_type(err) return ffi.string(err._type.name) end local function error_message(err) return ffi.string(err._errmsg) end local function error_trace(err) if err._file[0] == 0 then return {} end return { { file = ffi.string(err._file), line = tonumber(err._line) }; } end local error_fields = { ["type"] = error_type; ["message"] = error_message; ["trace"] = error_trace; } local function error_unpack(err) if not ffi.istype('struct error', err) then error("Usage: error:unpack()") end local result = {} for key, getter in pairs(error_fields) do result[key] = getter(err) end for key, getter in pairs(reflection_enumerate(err)) do local value = reflection_get(err, getter) if value ~= nil then result[key] = value end end return result end local function error_raise(err) if not ffi.istype('struct error', err) then error("Usage: error:raise()") end error(err) end local function error_match(err, ...) if not ffi.istype('struct error', err) then error("Usage: error:match()") end return string.match(error_message(err), ...) end local function error_serialize(err) -- Return an error message only in admin console to keep compatibility return error_message(err) end local error_methods = { ["unpack"] = error_unpack; ["raise"] = error_raise; ["match"] = error_match; -- Tarantool 1.6 backward compatibility ["__serialize"] = error_serialize; } local function error_index(err, key) local getter = error_fields[key] if getter ~= nil then return getter(err) end getter = reflection_enumerate(err)[key] if getter ~= nil and getter.nargs == 0 then local val = reflection_get(err, getter) if val ~= nil then return val end end return error_methods[key] end local error_mt = { __index = error_index; __tostring = error_message; }; ffi.metatype('struct error', error_mt); dostring = function(s, ...) local chunk, message = loadstring(s) if chunk == nil then error(message, 2) end return chunk(...) end local function uptime() return tonumber(ffi.C.tarantool_uptime()); end local function pid() return tonumber(ffi.C.getpid()) end local soext = (jit.os == "OSX" and "dylib" or "so") local function mksymname(name) local mark = string.find(name, "-") if mark then name = string.sub(name, mark + 1) end return "luaopen_" .. string.gsub(name, "%.", "_") end local function load_lib(file, name) return package.loadlib(file, mksymname(name)) end local function load_lua(file) return loadfile(file) end local function search_cwd_lib(name) local path = "./?."..soext return package.searchpath(name, path) end local function search_cwd_lua(name) local path = "./?.lua;./?/init.lua" return package.searchpath(name, path) end local function traverse_rocks(name, pathes_search) local cwd = fio.cwd() local index = string.len(cwd) + 1 local strerr = "" while index ~= nil do cwd = string.sub(cwd, 1, index - 1) for i, path in ipairs(pathes_search) do local file, err = package.searchpath(name, cwd .. path) if err == nil then return file end strerr = strerr .. err end index = string.find(cwd, "/[^/]*$") end return nil, strerr end local function search_rocks_lua(name) local pathes_search = { "/.rocks/share/tarantool/?.lua;", "/.rocks/share/tarantool/?/init.lua;", } return traverse_rocks(name, pathes_search) end local function search_rocks_lib(name) local pathes_search = { "/.rocks/lib/tarantool/?."..soext } return traverse_rocks(name, pathes_search) end local function cwd_loader_func(lib) local search_cwd = lib and search_cwd_lib or search_cwd_lua local load_func = lib and load_lib or load_lua return function(name) if not name then return "empty name of module" end local file, err = search_cwd(name) if not file then return err end local loaded, err = load_func(file, name) if err == nil then return loaded else return err end end end local function rocks_loader_func(lib) local search_rocks = lib and search_rocks_lib or search_rocks_lua local load_func = lib and load_lib or load_lua return function (name) if not name then return "empty name of module" end local file, err = search_rocks(name) if not file then return err end local loaded, err = load_func(file, name) if err == nil then return loaded else return err end end end local function search_path_func(cpath) return function(name) return package.searchpath(name, cpath and package.cpath or package.path) end end local function search(name) if not name then return "empty name of module" end local searchers = { search_cwd_lua, search_cwd_lib, search_rocks_lua, search_rocks_lib, search_path_func(false), search_path_func(true) } for _, searcher in ipairs(searchers) do local file = searcher(name) if file ~= nil then return file end end return nil end -- loader_preload 1 table.insert(package.loaders, 2, cwd_loader_func(false)) table.insert(package.loaders, 3, cwd_loader_func(true)) table.insert(package.loaders, 4, rocks_loader_func(false)) table.insert(package.loaders, 5, rocks_loader_func(true)) -- package.path 6 -- package.cpath 7 -- croot 8 package.search = search return { uptime = uptime; pid = pid; } tarantool_1.9.1.26.g63eb81e3c/src/lua/help_en_US.lua0000664000000000000000000004310613306565107020314 0ustar rootroot--- help (en_US) return { tutorial = { [[ Tutorial -- Screen #1 -- Hello, Moon ==================================== Welcome to the Tarantool tutorial. It will introduce you to Tarantool’s Lua application server and database server, which is what’s running what you’re seeing. This is INTERACTIVE -- you’re expected to enter requests based on the suggestions or examples in the screen’s text. The first request is: 5.1, "Olá", "Lua" ------------------ This isn’t your grandfather’s "Hello World" ... the decimal literal 5.1 and the two strings inside single quotes ("Hello Moon" in Portuguese) will just be repeated, without need for a print() statement. Take that one-line request and enter it below after the "tarantool>" prompt, then type Enter. You’ll see the response: --- - 5.1 - Olá - Lua ... Then you’ll get a chance to repeat -- perhaps entering something else such as "Longer String",-1,-3,0. When you’re ready to go to the next screen, enter . ]]; [[ Tutorial -- Screen #2 -- Variables ================================== Improve on "5.1, "Olá", "Lua"" by using variables rather than literals, and put the strings inside braces, which means they’re elements of a TABLE. More in the Lua manual: http://www.lua.org/pil/2.html You don’t need to declare variables in advance because Lua figures out the data type from what you assign to it. Assignment is done with the "=" operator. If the data type of variable t is table, then the elements can be referenced as t[1], t[2], and so on. More in the Lua manual: http://www.lua.org/pil/2.5.html Request #2 is: n = 5.1 t = {"Olá", "Lua"} n, t[1], t[2] ------------------ Take all the three lines and enter them below after the "tarantool>" prompt, then type Enter. Or try different values in a different order. When you’re ready to go to the next screen, enter . Or, to go to the previous screen, enter . ]]; [[ Tutorial -- Screen #3 -- Loops ============================== Add action by using loops rather than static displays. There are several syntaxes for loops in Lua, but we’ll just use one: for variable-name = start-value, end-value, 1 do loop-body end which is good enough if you want to assign a start-value to a variable, do what’s in the loop body, add 1 to the variable, and repeat until it equals end-value. More in the Lua manual: http://www.lua.org/pil/4.3.4.html. Request #3 is: result_table = {} n = 5.1 for i=1,2,1 do result_table[i] = n * i end result_table ------------------------------------------ Take all four lines and enter them below after the "tarantool>" prompt, then type Enter. For adventure, change the loop to "for i=1,3,1" (don’t worry, it won’t crash). When you’re ready to go to the next screen, enter . ]]; [[ Tutorial -- Screen #4 -- Operators ================================== Among the many operators that Lua supports, you most often see: For arithmetic: * (multiply), + (add), - (subtract), / (divide). For strings: .. (concatenate) More in the Lua manual: http://www.lua.org/pil/3.1.html Request #4 is: n = 5.1 t = {"Olá", "Lua"} for i=1,2,1 do n = n * 2 t[1] = t[1] .. t[2] end n,t[1],t[2] ------------------------------------------------ Before you type that in and see Tarantool display the result, try to predict whether the display will be (a) 20.4 OláLuaLua Lua (b) 10.2 Olá Lua Lua Lua (c) 5.1 Olá Lua The answer will appear when you type in the request. When you’re ready to go to the next screen, enter . ]]; [[ Tutorial -- Screen #5 -- Conditions =================================== A condition involves a comparison operator such as "==", ">", ">=", "<", "<=". Conditions are used in statements of the form if ... then. More in the Lua manual: http://www.lua.org/pil/4.3.1.html Request #5 is: x = 17 if x * 2 > 34 then result = x else result = "no" end result ---------------------------------------------------- Before you type in those three lines and see Tarantool display the result, try to predict whether the display will be (a) 17 (b) 34 (c) no The answer will appear when you type in the request. When you’re ready to go to the next screen, enter . ]]; [[ Tutorial -- Screen #6 -- Delimiters =================================== This is just to prepare for later exercises which will go over many lines. There is a Tarantool instruction that means More in the Tarantool manual: http://tarantool.org/doc/book/administration.html#requests Request #6 is: console = require("console"); console.delimiter("!") ---------------------------------------------------- It’s not an exercise -- just do it. Cancelling the delimiter could be done with console.delimiter("")! but you’ll see "!" in following exercises. You'll need a custom delimiter only in the trial console at http://try.tarantool.org. Tarantool console in production is smarter. It can tell when a multi-line request has not ended (for example, if it sees that a function declaration does not have an end keyword, as we'll be writing on the next screen). When you’re ready to go to the next screen, enter . Yes, now has to end with an exclamation mark too! ]]; [[ Tutorial -- Screen #7 -- Simple functions ========================================= A function, or a stored procedure that returns a value, is a named set of Lua requests whose simplest form is function function_name () body end More in the Lua manual: http://www.lua.org/pil/5.html Request #7 is: n = 0 function func () for i=1,100,1 do n = n + i end return n end! func()! ------------------------------ This defines a function which sums all the numbers between 1 and 100, and returns the final result. The request "func()!" invokes the function. ]]; [[ Tutorial -- Screen #8 -- Improved functions =========================================== Improve the simple function by avoiding globals. The variable n could be passed as a parameter and the variable i could be declared as local. More in the Lua manual: http://www.lua.org/pil/4.2.html Request #8 is: function func (n) local i for i=1,100,1 do n = n + i end return n end! func(0)! ------------------------------ ]]; [[ Tutorial -- Screen #9 -- Comments ================================= There are several ways to add comments, but one will do: (minus sign) (minus sign) comment-text. More in the Lua manual: http://www.lua.org/pil/1.3.html Request #9 is: -- func is a function which returns a sum. -- n is a parameter. i is a local variable. -- "!" is a delimiter (introduced in Screen #6) -- func is a function (introduced in Screen #7) -- n is a parameter (introduced in Screen #8) -- "n = n + 1" is an operator usage (introduced in Screen #4) -- "for ... do ... end" is a loop (introduced in Screen #3) function func(n) -- n is a parameter local i -- i is a local variable for i=1,100,1 do n = n + i end return n end! -- invoke the function func(0)! ------------------------------------------- Obviously it will work, so just type now. ]]; [[ Tutorial -- Screen #10 -- Packages ================================== Many developers have gone to the trouble of making packages of functions (sometimes called "modules") that have a general utility. More in the Luarocks list: http://luarocks.org/ Most packages have to be "required", with the syntax variable_name = require("package-name") which should look familiar because earlier you said console = require("console") At this point, if you just say the variable_name, you’ll see a list of the package’s members and functions. If then you use a "." operator as in package-variable-name.function_name() you’ll invoke a package’s function. (At a different level you’ll have to use a ":" operator, as you’ll see in later examples.) Request #10 is: fiber = require("fiber")! fiber! fiber.status()! ------------------------- First you’ll see a list of functions, one of which is "status". Then you’ll see the fiber's current status (the fiber is running now). More on fibers on the next screen, so type now. ]]; [[ Tutorial -- Screen #11 -- The fiber package =========================================== Make a function that will run like a daemon in the background until you cancel it. For this you need a fiber. Tarantool is a "cooperative multitasking" application server, which means that multiple tasks each get a slice, but they have to yield occasionally so that other tasks get a chance. That’s what a properly designed fiber will do. More in the Tarantool manual: http://tarantool.org/doc/reference/reference_lua/fiber.html Request #11 is: fiber = require("fiber")! gvar = 0! function function_x() for i=0,600,1 do gvar = gvar + 1 fiber.sleep(1) end end! fid = fiber.create(function_x)! gvar! ------------------------------- The fiber.sleep(1) function will go to sleep for one second, which is one way of yielding. So the "for i=0,600,1" loop will go on for about 600 seconds (10 minutes). During waking moments, gvar will go up by 1 -- and gvar is deliberately a global variable. So it’s possible to monitor it: slowly type "gvar!" a few times and notice how the value mysteriously increases. ]]; [[ Tutorial -- Screen #12 -- The socket package ============================================ Connect to the Internet and send a message to Tarantool's web-site. Request #12 is: function socket_get () local socket, sock, result socket = require("socket") sock = socket.tcp_connect("tarantool.org", 80) sock:send("GET / HTTP/1.0\r\nHost: tarantool.org\r\n\r\n") result = sock:read(17) sock:close() return result end! socket_get()! -------------------------------- Briefly these requests are opening a socket and sending a "GET" request to tarantool.org’s server. The response will be short, for example "- "HTTP/1.1 302 OK\r\n"" but it shows you’ve gotten in touch with a distant server. More in the Tarantool manual: http://tarantool.org/doc/reference/reference_lua/socket.html ]]; [[ Tutorial -- Screen #13 -- The box package ========================================= So far you’ve seen Tarantool in action as a Lua application server. Henceforth you’ll see it as a DBMS (database management system) server -- with Lua stored procedures. In serious situations you’d have to ask the database administrator to create database objects and grant read/write access to you, but here you’re the "admin" user -- you have administrative powers -- so you can start manipulating data immediately. More in the Tarantool manual: http://tarantool.org/doc/book/box/index.html Request #13 is: box.schema.space.create("tutor")! box.space.tutor:create_index("primary",{})! box.space.tutor:replace{1,"First tuple"}! box.space.tutor:replace{2,"Second tuple"}! box.space.tutor:replace{3,"Third tuple"}! box.space.tutor:replace{4,"Fourth tuple"}! box.space.tutor:replace{5,"Fifth tuple"}! box.space.tutor! ------------------------------------------- Please ignore all the requests except the last one. You’ll see a description of a space named tutor. To understand the description, you just have to know that: ** fields are numbered item-storage areas (vaguely like columns in an SQL DBMS) ** tuples are collections of fields, as are Lua tables (vaguely like rows in an SQL DBMS) ** spaces are where Tarantool stores sets of tuples (vaguely like databases in an SQL DBMS) ** indexes are objects that make lookups of tuples faster (vaguely like indexes in an SQL DBMS) Much of the description doesn’t matter right now; it’s enough if you see that package box gets a space which is named tutor, and it has one index on the first field. ]]; [[ Tutorial -- Screen #14 -- box.select() ====================================== The most common data-manipulation function is box.select(). One of the syntaxes is: box.space.tutor.index.primary:select({1}, {iterator = "GE"}) and it returns a set of tuples via the index of the tutor space. Now that you know that, and considering that you already know how to make functions and loops in Lua, it’s simple to figure out how to search and display the first five tuples in the database. Request #14 is: -- This function will select and display 5 tuples in space=tutor function database_display (space_name) local i local result = "" t = box.space[space_name].index.primary:select({1}, {iterator = "GE"}) for i=1,5,1 do result = result .. t[i][1] .. " " .. t[i][2] .. "\n" end return result end! database_display("tutor")! -------------------------- So select() is returning a set of tuples into a Lua table named t, and the loop is going to print each element of the table. That is, when you call database_display()! you’ll see a display of what’s in the tuples. ]]; [[ Tutorial -- Screen #15 -- box.replace() ======================================= Pick any of the tuples that were displayed on the last screen. Recall that the first field is the indexed field. That’s all you need to replace the rest of the fields with new values. The syntax of box.replace(), pared down, is: box.space.tutor:replace{primary-key-field, other-fields} More in the Tarantool manual: http://tarantool.org/doc/book/box/box_space.html#lua-function.space_object.replace Tarantool by default keeps database changes in memory, but box.replace() will cause a write to a log, and log information can later be consolidated with another box function (box.snapshot). Request #15 is: box.space.tutor:replace{1, "My First Piece Of Data"}! ----------------------------------------------------- If there is already a "tuple" (our equivalent of a record) whose number is equal to 1, it will be replaced with your new data. Otherwise it will be created for the first time. The display will be the formal description of the new tuple. ]]; [[ Tutorial -- Screen #16 -- Create your own space =============================================== You’ve now selected and replaced tuples from the tutor space, and you could select and replace many tuples because you know how to make variables and functions and loops that do selecting or replacing. But you’ve been confined to a space and an index that Tarantool started with. Suppose that you want to create your own. More in the Tarantool manual: http://tarantool.org/doc/getting_started.html#starting-tarantool-and-making-your-first-database Request #16 is: box.schema.space.create("test", {engine="memtx"})! -------------------------------------------------- The new space’s name will be "test" and the engine will be "memtx" -- the engine which keeps all tuples in memory, and writes changes to a log file to ensure that data can’t be lost. Although "memtx" is the default engine anyway, specifying it does no harm. ]]; [[ Tutorial -- Screen #17 -- Create your own index =============================================== Having a space isn’t enough -- you must have at least one index. Indexes make access faster. Indexes can be declared to be "unique", which is important because some combination of the fields must be unique, for identification purposes. More in the Tarantool manual: https://tarantool.org/doc/book/box/data_model.html#index Request #17 is: box.space.test:create_index("primary",{unique = true, parts = {1, "NUM"}})! box.space.test:create_index("secondary",{parts = {2, "STR"}})! -------------------------------------------------------------- This means the first index will be named primary, will be unique, will be on the first field of each tuple, and will be numeric. The second index will be named secondary, doesn’t have to be unique, will be on the second field of each tuple, and will be in order by string value. ]]; [[ Tutorial -- Screen #18 -- Insert multiple tuples ================================================ In a loop, put some tuples in your new space. Because of the index definitions, the first field must be a number, the second field must be a string, and the later fields can be anything. Use a function in the Lua string library to make values for the second field. More in the Lua manual: http://www.lua.org/pil/20.html Request #18 is: for i=65,70,1 do box.space.test:replace{i, string.char(i)} end! ----------------------------------------- Tip: to select the tuples later, use the function that you created earlier: database_display("test")! ]]; [[ Tutorial -- Screen #19 -- Become another user ============================================= Remember, you’re currently "admin" -- administrator. Now switch to being "guest", a much less powerful user. Request #19 is: box.session.su("guest") -- switch user to "guest"! box.space.test:replace{100,""} -- try to add a tuple! ----------------------------------------------------- The result will be an error message telling you that you don’t have the privilege to do that any more. That’s good news. It shows that Tarantool prevents unauthorized users from working with databases. But you can say box.session.su("admin")! to become a powerful user again, because for this tutorial the "admin" user isn’t protected by a password. ]]; [[ Tutorial -- Screen #20 -- The bigger Tutorials ============================================== You can continue to type in whatever Lua instructions, package requires, and database-manipulations you want, here on this screen. But to really get into Tarantool, you should download it so that you can be your own administrator and create your own permanent databases. The Tarantool manual has three significant tutorials: Insert one million tuples with a Lua stored procedure, Sum a JSON field for all tuples, and Indexed pattern search. See http://tarantool.org/en/doc/tutorials/lua_tutorials.html Request #20 is: ((Whatever you want. Enjoy!)) When you’re finished, don’t type , just wander off and have a nice day. ]]; }; --[[ tutorial ]]-- } tarantool_1.9.1.26.g63eb81e3c/src/lua/fiber.lua0000664000000000000000000000115213306560010017341 0ustar rootroot-- fiber.lua (internal file) local fiber = require('fiber') local ffi = require('ffi') ffi.cdef[[ double fiber_time(void); uint64_t fiber_time64(void); double fiber_clock(void); uint64_t fiber_clock64(void); ]] local C = ffi.C local function fiber_time() return tonumber(C.fiber_time()) end local function fiber_time64() return C.fiber_time64() end local function fiber_clock() return tonumber(C.fiber_clock()) end local function fiber_clock64() return C.fiber_clock64() end fiber.time = fiber_time fiber.time64 = fiber_time64 fiber.clock = fiber_clock fiber.clock64 = fiber_clock64 return fiber tarantool_1.9.1.26.g63eb81e3c/src/lua/msgpackffi.lua0000664000000000000000000004200413306560010020365 0ustar rootroot-- msgpackffi.lua (internal file) local ffi = require('ffi') local buffer = require('buffer') local builtin = ffi.C local msgpack = require('msgpack') -- .NULL, .array_mt, .map_mt, .cfg local MAXNESTING = 16 local int8_ptr_t = ffi.typeof('int8_t *') local uint8_ptr_t = ffi.typeof('uint8_t *') local uint16_ptr_t = ffi.typeof('uint16_t *') local uint32_ptr_t = ffi.typeof('uint32_t *') local uint64_ptr_t = ffi.typeof('uint64_t *') local const_char_ptr_t = ffi.typeof('const char *') ffi.cdef([[ char * mp_encode_float(char *data, float num); char * mp_encode_double(char *data, double num); float mp_decode_float(const char **data); double mp_decode_double(const char **data); union tmpint { uint16_t u16; uint32_t u32; uint64_t u64; }; ]]) local strict_alignment = (jit.arch == 'arm') local tmpint if strict_alignment then tmpint = ffi.new('union tmpint[1]') end local function bswap_u16(num) return bit.rshift(bit.bswap(tonumber(num)), 16) end -------------------------------------------------------------------------------- -- Encoder -------------------------------------------------------------------------------- local encode_ext_cdata = {} -- Set trigger that called when encoding cdata local function on_encode(ctype_or_udataname, callback) if type(ctype_or_udataname) ~= "cdata" or type(callback) ~= "function" then error("Usage: on_encode(ffi.typeof('mytype'), function(buf, obj)") end local ctypeid = tonumber(ffi.typeof(ctype_or_udataname)) local prev = encode_ext_cdata[ctypeid] encode_ext_cdata[ctypeid] = callback return prev end local function encode_fix(buf, code, num) local p = buf:alloc(1) p[0] = bit.bor(code, tonumber(num)) end local function encode_u8(buf, code, num) local p = buf:alloc(2) p[0] = code ffi.cast(uint8_ptr_t, p + 1)[0] = num end local encode_u16 if strict_alignment then encode_u16 = function(buf, code, num) tmpint[0].u16 = bswap_u16(num) local p = buf:alloc(3) p[0] = code ffi.copy(p + 1, tmpint, 2) end else encode_u16 = function(buf, code, num) local p = buf:alloc(3) p[0] = code ffi.cast(uint16_ptr_t, p + 1)[0] = bswap_u16(num) end end local encode_u32 if strict_alignment then encode_u32 = function(buf, code, num) tmpint[0].u32 = ffi.cast('uint32_t', bit.bswap(tonumber(num))) local p = buf:alloc(5) p[0] = code ffi.copy(p + 1, tmpint, 4) end else encode_u32 = function(buf, code, num) local p = buf:alloc(5) p[0] = code ffi.cast(uint32_ptr_t, p + 1)[0] = ffi.cast('uint32_t', bit.bswap(tonumber(num))) end end local encode_u64 if strict_alignment then encode_u64 = function(buf, code, num) tmpint[0].u64 = bit.bswap(ffi.cast('uint64_t', num)) local p = buf:alloc(9) p[0] = code ffi.copy(p + 1, tmpint, 8) end else encode_u64 = function(buf, code, num) local p = buf:alloc(9) p[0] = code ffi.cast(uint64_ptr_t, p + 1)[0] = bit.bswap(ffi.cast('uint64_t', num)) end end local function encode_float(buf, num) local p = buf:alloc(5) builtin.mp_encode_float(p, num) end local function encode_double(buf, num) local p = buf:alloc(9) builtin.mp_encode_double(p, num) end local function encode_int(buf, num) if num >= 0 then if num <= 0x7f then encode_fix(buf, 0, num) elseif num <= 0xff then encode_u8(buf, 0xcc, num) elseif num <= 0xffff then encode_u16(buf, 0xcd, num) elseif num <= 0xffffffff then encode_u32(buf, 0xce, num) else encode_u64(buf, 0xcf, 0ULL + num) end else if num >= -0x20 then encode_fix(buf, 0xe0, num) elseif num >= -0x80 then encode_u8(buf, 0xd0, num) elseif num >= -0x8000 then encode_u16(buf, 0xd1, num) elseif num >= -0x80000000 then encode_u32(buf, 0xd2, num) else encode_u64(buf, 0xd3, 0LL + num) end end end local function encode_str(buf, str) local len = #str buf:reserve(5 + len) if len <= 31 then encode_fix(buf, 0xa0, len) elseif len <= 0xff then encode_u8(buf, 0xd9, len) elseif len <= 0xffff then encode_u16(buf, 0xda, len) else encode_u32(buf, 0xdb, len) end local p = buf:alloc(len) ffi.copy(p, str, len) end local function encode_array(buf, size) if size <= 0xf then encode_fix(buf, 0x90, size) elseif size <= 0xffff then encode_u16(buf, 0xdc, size) else encode_u32(buf, 0xdd, size) end end local function encode_map(buf, size) if size <= 0xf then encode_fix(buf, 0x80, size) elseif size <= 0xffff then encode_u16(buf, 0xde, size) else encode_u32(buf, 0xdf, size) end end local function encode_bool(buf, val) encode_fix(buf, 0xc2, val and 1 or 0) end local function encode_bool_cdata(buf, val) encode_fix(buf, 0xc2, val ~= 0 and 1 or 0) end local function encode_nil(buf) local p = buf:alloc(1) p[0] = 0xc0 end local function encode_r(buf, obj, level) ::restart:: if type(obj) == "number" then -- Lua-way to check that number is an integer if obj % 1 == 0 and obj > -1e63 and obj < 1e64 then encode_int(buf, obj) else encode_double(buf, obj) end elseif type(obj) == "string" then encode_str(buf, obj) elseif type(obj) == "table" then if level >= MAXNESTING then -- Limit nested tables encode_nil(buf) return end local serialize = nil local mt = getmetatable(obj) if mt ~= nil then serialize = mt.__serialize end -- calculate the number of array and map elements in the table -- TODO: pairs() aborts JIT local array_count, map_count = 0, 0 for key in pairs(obj) do if type(key) == 'number' and key >= 1 and key == math.floor(key) and key == array_count + 1 then array_count = array_count + 1 else map_count = map_count + 1 end end if (serialize == nil and map_count == 0) or serialize == 'array' or serialize == 'seq' or serialize == 'sequence' then encode_array(buf, array_count) for i=1,array_count,1 do encode_r(buf, obj[i], level + 1) end elseif (serialize == nil and map_count > 0) or serialize == 'map' or serialize == 'mapping' then encode_map(buf, array_count + map_count) for key, val in pairs(obj) do encode_r(buf, key, level + 1) encode_r(buf, val, level + 1) end elseif type(serialize) == 'function' then obj = serialize(obj) goto restart else error("Invalid __serialize value") end elseif obj == nil then encode_nil(buf) elseif type(obj) == "boolean" then encode_bool(buf, obj) elseif type(obj) == "cdata" then if obj == nil then -- a workaround for nil encode_nil(buf, obj) return end local ctypeid = tonumber(ffi.typeof(obj)) local fun = encode_ext_cdata[ctypeid] if fun ~= nil then fun(buf, obj) else error("can not encode FFI type: '"..ffi.typeof(obj).."'") end else error("can not encode Lua type: '"..type(obj).."'") end end local function encode(obj) local tmpbuf = buffer.IBUF_SHARED tmpbuf:reset() encode_r(tmpbuf, obj, 0) local r = ffi.string(tmpbuf.rpos, tmpbuf:size()) tmpbuf:recycle() return r end local function encode_ibuf(obj, ibuf) encode_r(ibuf, obj, 0) end on_encode(ffi.typeof('uint8_t'), encode_int) on_encode(ffi.typeof('uint16_t'), encode_int) on_encode(ffi.typeof('uint32_t'), encode_int) on_encode(ffi.typeof('uint64_t'), encode_int) on_encode(ffi.typeof('int8_t'), encode_int) on_encode(ffi.typeof('int16_t'), encode_int) on_encode(ffi.typeof('int32_t'), encode_int) on_encode(ffi.typeof('int64_t'), encode_int) on_encode(ffi.typeof('char'), encode_int) on_encode(ffi.typeof('const char'), encode_int) on_encode(ffi.typeof('unsigned char'), encode_int) on_encode(ffi.typeof('const unsigned char'), encode_int) on_encode(ffi.typeof('bool'), encode_bool_cdata) on_encode(ffi.typeof('float'), encode_float) on_encode(ffi.typeof('double'), encode_double) -------------------------------------------------------------------------------- -- Decoder -------------------------------------------------------------------------------- local decode_r -- See similar constants in utils.cc local DBL_INT_MAX = 1e14 - 1 local DBL_INT_MIN = -1e14 + 1 local function decode_u8(data) local num = ffi.cast(uint8_ptr_t, data[0])[0] data[0] = data[0] + 1 return tonumber(num) end local decode_u16 if strict_alignment then decode_u16 = function(data) ffi.copy(tmpint, data[0], 2) data[0] = data[0] + 2 return tonumber(bswap_u16(tmpint[0].u16)) end else decode_u16 = function(data) local num = bswap_u16(ffi.cast(uint16_ptr_t, data[0])[0]) data[0] = data[0] + 2 return tonumber(num) end end local decode_u32 if strict_alignment then decode_u32 = function(data) ffi.copy(tmpint, data[0], 4) data[0] = data[0] + 4 return tonumber( ffi.cast('uint32_t', bit.bswap(tonumber(tmpint[0].u32)))) end else decode_u32 = function(data) local num = ffi.cast('uint32_t', bit.bswap(tonumber(ffi.cast(uint32_ptr_t, data[0])[0]))) data[0] = data[0] + 4 return tonumber(num) end end local decode_u64 if strict_alignment then decode_u64 = function(data) ffi.copy(tmpint, data[0], 8); data[0] = data[0] + 8 local num = bit.bswap(tmpint[0].u64) if num <= DBL_INT_MAX then return tonumber(num) -- return as 'number' end return num -- return as 'cdata' end else decode_u64 = function(data) local num = bit.bswap(ffi.cast(uint64_ptr_t, data[0])[0]) data[0] = data[0] + 8 if num <= DBL_INT_MAX then return tonumber(num) -- return as 'number' end return num -- return as 'cdata' end end local function decode_i8(data) local num = ffi.cast(int8_ptr_t, data[0])[0] data[0] = data[0] + 1 return tonumber(num) end local decode_i16 if strict_alignment then decode_i16 = function(data) ffi.copy(tmpint, data[0], 2) local num = bswap_u16(tmpint[0].u16) data[0] = data[0] + 2 -- note: this double cast is actually necessary return tonumber(ffi.cast('int16_t', ffi.cast('uint16_t', num))) end else decode_i16 = function(data) local num = bswap_u16(ffi.cast(uint16_ptr_t, data[0])[0]) data[0] = data[0] + 2 -- note: this double cast is actually necessary return tonumber(ffi.cast('int16_t', ffi.cast('uint16_t', num))) end end local decode_i32 if strict_alignment then decode_i32 = function(data) ffi.copy(tmpint, data[0], 4) local num = bit.bswap(tonumber(tmpint[0].u32)) data[0] = data[0] + 4 return num end else decode_i32 = function(data) local num = bit.bswap(tonumber(ffi.cast(uint32_ptr_t, data[0])[0])) data[0] = data[0] + 4 return num end end local decode_i64 if strict_alignment then decode_i64 = function(data) ffi.copy(tmpint, data[0], 8) data[0] = data[0] + 8 local num = bit.bswap(ffi.cast('int64_t', tmpint[0].u64)) if num >= -DBL_INT_MAX and num <= DBL_INT_MAX then return tonumber(num) -- return as 'number' end return num -- return as 'cdata' end else decode_i64 = function(data) local num = bit.bswap(ffi.cast('int64_t', ffi.cast(uint64_ptr_t, data[0])[0])) data[0] = data[0] + 8 if num >= -DBL_INT_MAX and num <= DBL_INT_MAX then return tonumber(num) -- return as 'number' end return num -- return as 'cdata' end end local function decode_float(data) data[0] = data[0] - 1 -- mp_decode_float need type code return tonumber(builtin.mp_decode_float(data)) end local function decode_double(data) data[0] = data[0] - 1 -- mp_decode_double need type code return tonumber(builtin.mp_decode_double(data)) end local function decode_str(data, size) local ret = ffi.string(data[0], size) data[0] = data[0] + size return ret end local function decode_array(data, size) assert (type(size) == "number") local arr = {} local i for i=1,size,1 do table.insert(arr, decode_r(data)) end if not msgpack.cfg.decode_save_metatables then return arr end return setmetatable(arr, msgpack.array_mt) end local function decode_map(data, size) assert (type(size) == "number") local map = {} local i for i=1,size,1 do local key = decode_r(data); local val = decode_r(data); map[key] = val end if not msgpack.cfg.decode_save_metatables then return map end return setmetatable(map, msgpack.map_mt) end local decoder_hint = { --[[{{{ MP_BIN]] [0xc4] = function(data) return decode_str(data, decode_u8(data)) end; [0xc5] = function(data) return decode_str(data, decode_u16(data)) end; [0xc6] = function(data) return decode_str(data, decode_u32(data)) end; --[[MP_FLOAT, MP_DOUBLE]] [0xca] = decode_float; [0xcb] = decode_double; --[[MP_UINT]] [0xcc] = decode_u8; [0xcd] = decode_u16; [0xce] = decode_u32; [0xcf] = decode_u64; --[[MP_INT]] [0xd0] = decode_i8; [0xd1] = decode_i16; [0xd2] = decode_i32; [0xd3] = decode_i64; --[[MP_STR]] [0xd9] = function(data) return decode_str(data, decode_u8(data)) end; [0xda] = function(data) return decode_str(data, decode_u16(data)) end; [0xdb] = function(data) return decode_str(data, decode_u32(data)) end; --[[MP_ARRAY]] [0xdc] = function(data) return decode_array(data, decode_u16(data)) end; [0xdd] = function(data) return decode_array(data, decode_u32(data)) end; --[[MP_MAP]] [0xde] = function(data) return decode_map(data, decode_u16(data)) end; [0xdf] = function(data) return decode_map(data, decode_u32(data)) end; } decode_r = function(data) local c = data[0][0] data[0] = data[0] + 1 if c <= 0x7f then return tonumber(c) -- fixint elseif c >= 0xa0 and c <= 0xbf then return decode_str(data, bit.band(c, 0x1f)) -- fixstr elseif c >= 0x90 and c <= 0x9f then return decode_array(data, bit.band(c, 0xf)) -- fixarray elseif c >= 0x80 and c <= 0x8f then return decode_map(data, bit.band(c, 0xf)) -- fixmap elseif c >= 0xe0 then return tonumber(ffi.cast('signed char',c)) -- negfixint elseif c == 0xc0 then return msgpack.NULL elseif c == 0xc2 then return false elseif c == 0xc3 then return true else local fun = decoder_hint[c]; assert (type(fun) == "function") return fun(data) end end --- -- A temporary const char ** buffer. -- All decode_XXX functions accept const char **data as its first argument, -- like libmsgpuck does. After decoding data[0] position is changed to the next -- element. It is significally faster on LuaJIT to use double pointer than -- return result, newpos. -- local bufp = ffi.new('const unsigned char *[1]'); local function check_offset(offset, len) if offset == nil then return 1 end local offset = ffi.cast('ptrdiff_t', offset) if offset < 1 or offset > len then error(string.format("offset = %d is out of bounds [1..%d]", tonumber(offset), len)) end return offset end -- decode_unchecked(str, offset) -> res, new_offset -- decode_unchecked(buf) -> res, new_buf local function decode_unchecked(str, offset) if type(str) == "string" then offset = check_offset(offset, #str) local buf = ffi.cast(const_char_ptr_t, str) bufp[0] = buf + offset - 1 local r = decode_r(bufp) return r, bufp[0] - buf + 1 elseif ffi.istype(const_char_ptr_t, str) then bufp[0] = str local r = decode_r(bufp) return r, bufp[0] else error("msgpackffi.decode_unchecked(str, offset) -> res, new_offset | ".. "msgpackffi.decode_unchecked(const char *buf) -> res, new_buf") end end -------------------------------------------------------------------------------- -- exports -------------------------------------------------------------------------------- return { NULL = msgpack.NULL; array_mt = msgpack.array_mt; map_mt = msgpack.map_mt; encode = encode; on_encode = on_encode; decode_unchecked = decode_unchecked; decode = decode_unchecked; -- just for tests internal = { encode_fix = encode_fix; encode_array = encode_array; encode_r = encode_r; } } tarantool_1.9.1.26.g63eb81e3c/src/lua/fio.c0000664000000000000000000004541513306560010016502 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/fio.h" #include #include #include #include #include #include #include #include #include #include "coio_task.h" #include #include #include #include "lua/utils.h" #include "coio_file.h" static inline void lbox_fio_pushsyserror(struct lua_State *L) { diag_set(SystemError, "fio: %s", strerror(errno)); luaT_pusherror(L, diag_get()->last); } static int lbox_fio_open(struct lua_State *L) { const char *pathname; if (lua_gettop(L) < 1) { usage: luaL_error(L, "Usage: fio.open(path, flags, mode)"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; int flags = lua_tointeger(L, 2); int mode = lua_tointeger(L, 3); int fh = coio_file_open(pathname, flags, mode); if (fh < 0) { lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } lua_pushinteger(L, fh); return 1; } static int lbox_fio_pwrite(struct lua_State *L) { int fh = lua_tointeger(L, 1); const char *buf = lua_tostring(L, 2); uint32_t ctypeid = 0; if (buf == NULL) buf = *(const char **)luaL_checkcdata(L, 2, &ctypeid); size_t len = lua_tonumber(L, 3); size_t offset = lua_tonumber(L, 4); int res = coio_pwrite(fh, buf, len, offset); if (res < 0) { lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } lua_pushinteger(L, res); return 1; } static int lbox_fio_pread(struct lua_State *L) { int fh = lua_tointeger(L, 1); uint32_t ctypeid; char *buf = *(char **)luaL_checkcdata(L, 2, &ctypeid); size_t len = lua_tonumber(L, 3); size_t offset = lua_tonumber(L, 4); if (!len) { lua_pushinteger(L, 0); return 1; } int res = coio_pread(fh, buf, len, offset); if (res < 0) { lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } lua_pushinteger(L, res); return 1; } static inline int lbox_fio_pushbool(struct lua_State *L, bool res) { lua_pushboolean(L, res); if (!res) { lbox_fio_pushsyserror(L); return 2; } return 1; } static int lbox_fio_rename(struct lua_State *L) { const char *oldpath; const char *newpath; if (lua_gettop(L) < 2) { usage: luaL_error(L, "Usage: fio.rename(oldpath, newpath)"); } oldpath = lua_tostring(L, 1); newpath = lua_tostring(L, 2); if (oldpath == NULL || newpath == NULL) goto usage; int res = coio_rename(oldpath, newpath); return lbox_fio_pushbool(L, res == 0); } static int lbox_fio_unlink(struct lua_State *L) { const char *pathname; if (lua_gettop(L) < 1) { usage: luaL_error(L, "Usage: fio.unlink(pathname)"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; int res = coio_unlink(pathname); return lbox_fio_pushbool(L, res == 0); } static int lbox_fio_ftruncate(struct lua_State *L) { int fd = lua_tointeger(L, 1); off_t length = lua_tonumber(L, 2); int res = coio_ftruncate(fd, length); return lbox_fio_pushbool(L, res == 0); } static int lbox_fio_truncate(struct lua_State *L) { const char *pathname; int top = lua_gettop(L); if (top < 1) { usage: luaL_error(L, "Usage: fio.truncate(pathname[, newlen])"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; off_t length; if (top >= 2) length = lua_tonumber(L, 2); else length = 0; int res = coio_truncate(pathname, length); return lbox_fio_pushbool(L, res == 0); } static int lbox_fio_write(struct lua_State *L) { int fh = lua_tointeger(L, 1); const char *buf = lua_tostring(L, 2); uint32_t ctypeid = 0; if (buf == NULL) buf = *(const char **)luaL_checkcdata(L, 2, &ctypeid); size_t len = lua_tonumber(L, 3); int res = coio_write(fh, buf, len); if (res < 0) { lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } lua_pushinteger(L, res); return 1; } static int lbox_fio_chown(struct lua_State *L) { const char *pathname; if (lua_gettop(L) < 3) { usage: luaL_error(L, "Usage: fio.chown(pathname, owner, group)"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; uid_t owner; if (lua_isnumber(L, 2)) { owner = lua_tointeger(L, 2); } else { const char *username = lua_tostring(L, 2); if (username == NULL) username = ""; struct passwd *entry = getpwnam(username); if (!entry) { errno = EINVAL; lua_pushnil(L); return 1; } owner = entry->pw_uid; } gid_t group; if (lua_isnumber(L, 3)) { group = lua_tointeger(L, 3); } else { const char *groupname = lua_tostring(L, 3); if (groupname == NULL) groupname = ""; struct group *entry = getgrnam(groupname); if (!entry) { errno = EINVAL; lua_pushnil(L); return 1; } group = entry->gr_gid; } int res = coio_chown(pathname, owner, group); return lbox_fio_pushbool(L, res == 0); } static int lbox_fio_chmod(struct lua_State *L) { const char *pathname; if (lua_gettop(L) < 2) { usage: luaL_error(L, "Usage: fio.chmod(pathname, mode)"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; mode_t mode = lua_tointeger(L, 2); return lbox_fio_pushbool(L, coio_chmod(pathname, mode) == 0); } static int lbox_fio_read(struct lua_State *L) { int fh = lua_tointeger(L, 1); uint32_t ctypeid; char *buf = *(char **)luaL_checkcdata(L, 2, &ctypeid); size_t len = lua_tonumber(L, 3); if (!len) { lua_pushinteger(L, 0); return 1; } int res = coio_read(fh, buf, len); if (res < 0) { lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } lua_pushinteger(L, res); return 1; } static int lbox_fio_lseek(struct lua_State *L) { int fh = lua_tointeger(L, 1); off_t offset = lua_tonumber(L, 2); int whence = lua_tointeger(L, 3); off_t res = coio_lseek(fh, offset, whence); lua_pushnumber(L, res); return 1; } #if defined(__APPLE__) static int lbox_fio_pushtimespec(struct lua_State *L, const time_t *ts) { lua_pushnumber(L, *ts); return 1; } #else static int lbox_fio_pushtimespec(struct lua_State *L, const struct timespec *ts) { double nsec = ts->tv_nsec; nsec /= 1000000000; lua_pushnumber(L, ts->tv_sec + nsec); return 1; } #endif #define PUSHTABLE(name, method, value) { \ lua_pushliteral(L, name); \ method(L, value); \ lua_settable(L, -3); \ } #define DEF_STAT_METHOD(method_name, macro_name) \ static int \ lbox_fio_stat_##method_name(struct lua_State *L) \ { \ if (lua_gettop(L) < 1 || !lua_istable(L, 1)) \ luaL_error(L, "usage: stat:" #method_name "()"); \ lua_pushliteral(L, "mode"); \ lua_gettable(L, 1); \ int mode = lua_tointeger(L, -1); \ lua_pop(L, 1); \ lua_pushboolean(L, macro_name(mode) ? 1 : 0); \ return 1; \ } DEF_STAT_METHOD(is_reg, S_ISREG); DEF_STAT_METHOD(is_dir, S_ISDIR); DEF_STAT_METHOD(is_chr, S_ISCHR); DEF_STAT_METHOD(is_blk, S_ISBLK); DEF_STAT_METHOD(is_fifo, S_ISFIFO); #ifdef S_ISLNK DEF_STAT_METHOD(is_link, S_ISLNK); #endif #ifdef S_ISSOCK DEF_STAT_METHOD(is_sock, S_ISSOCK); #endif static int lbox_fio_pushstat(struct lua_State *L, int res, const struct stat *stat) { if (res < 0) { lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } lua_newtable(L); PUSHTABLE("dev", lua_pushinteger, stat->st_dev); PUSHTABLE("inode", lua_pushinteger, stat->st_ino); PUSHTABLE("mode", lua_pushinteger, stat->st_mode); PUSHTABLE("nlink", lua_pushinteger, stat->st_nlink); PUSHTABLE("uid", lua_pushinteger, stat->st_uid); PUSHTABLE("gid", lua_pushinteger, stat->st_gid); PUSHTABLE("rdev", lua_pushinteger, stat->st_rdev); PUSHTABLE("size", lua_pushinteger, stat->st_size); PUSHTABLE("blksize", lua_pushinteger, stat->st_blksize); PUSHTABLE("blocks", lua_pushinteger, stat->st_blocks); #if defined(__APPLE__) PUSHTABLE("ctime", lbox_fio_pushtimespec, &stat->st_ctime); PUSHTABLE("mtime", lbox_fio_pushtimespec, &stat->st_mtime); PUSHTABLE("atime", lbox_fio_pushtimespec, &stat->st_atime); #else PUSHTABLE("ctime", lbox_fio_pushtimespec, &stat->st_ctim); PUSHTABLE("mtime", lbox_fio_pushtimespec, &stat->st_mtim); PUSHTABLE("atime", lbox_fio_pushtimespec, &stat->st_atim); #endif int top = lua_gettop(L); /* metatable for tables *stat */ lua_newtable(L); lua_pushliteral(L, "__index"); lua_newtable(L); static const struct luaL_Reg stat_methods[] = { { "is_reg", lbox_fio_stat_is_reg }, { "is_dir", lbox_fio_stat_is_dir }, { "is_chr", lbox_fio_stat_is_chr }, { "is_blk", lbox_fio_stat_is_blk }, { "is_fifo", lbox_fio_stat_is_fifo }, #ifdef S_ISLNK { "is_link", lbox_fio_stat_is_link }, #endif #ifdef S_ISSOCK { "is_sock", lbox_fio_stat_is_sock }, #endif { NULL, NULL } }; luaL_register(L, NULL, stat_methods); lua_settable(L, -3); lua_setmetatable(L, top); lua_settop(L, top); return 1; } static int lbox_fio_lstat(struct lua_State *L) { const char *pathname; if (lua_gettop(L) < 1) { usage: luaL_error(L, "pathname is absent"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; struct stat stat; int res = coio_lstat(pathname, &stat); return lbox_fio_pushstat(L, res, &stat); } static int lbox_fio_stat(struct lua_State *L) { const char *pathname; if (lua_gettop(L) < 1) { usage: luaL_error(L, "Usage: fio.stat(pathname)"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; struct stat stat; int res = coio_stat(pathname, &stat); return lbox_fio_pushstat(L, res, &stat); } static int lbox_fio_fstat(struct lua_State *L) { int fd = lua_tointeger(L, 1); struct stat stat; int res = coio_fstat(fd, &stat); return lbox_fio_pushstat(L, res, &stat); } static int lbox_fio_mkdir(struct lua_State *L) { const char *pathname; int top = lua_gettop(L); if (top < 1) { usage: luaL_error(L, "Usage fio.mkdir(pathname[, mode])"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; mode_t mode; if (top >= 2 && !lua_isnil(L, 2)) mode = lua_tointeger(L, 2); else mode = 0777; return lbox_fio_pushbool(L, coio_mkdir(pathname, mode) == 0); } static int lbox_fio_rmdir(struct lua_State *L) { const char *pathname; if (lua_gettop(L) < 1) { usage: luaL_error(L, "Usage: fio.rmdir(pathname)"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; return lbox_fio_pushbool(L, coio_rmdir(pathname) == 0); } static int lbox_fio_listdir(struct lua_State *L) { const char *pathname; if (lua_gettop(L) < 1) { luaL_error(L, "Usage: fio.listdir(pathname)"); } pathname = lua_tostring(L, 1); char *buf; if (coio_readdir(pathname, &buf) < 0) { lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } lua_pushstring(L, buf); free(buf); return 1; } static int lbox_fio_glob(struct lua_State *L) { const char *pattern; if (lua_gettop(L) < 1) { usage: luaL_error(L, "Usage: fio.glob(pattern)"); } pattern = lua_tostring(L, 1); if (pattern == NULL) goto usage; glob_t globbuf; switch (glob(pattern, GLOB_NOESCAPE, NULL, &globbuf)) { case 0: break; case GLOB_NOMATCH: lua_newtable(L); return 1; default: case GLOB_NOSPACE: errno = ENOMEM; lua_pushnil(L); return 1; } lua_newtable(L); for (size_t i = 0; i < globbuf.gl_pathc; i++) { lua_pushinteger(L, i + 1); lua_pushstring(L, globbuf.gl_pathv[i]); lua_settable(L, -3); } globfree(&globbuf); return 1; } static int lbox_fio_link(struct lua_State *L) { const char *target; const char *linkpath; if (lua_gettop(L) < 2) { usage: luaL_error(L, "Usage: fio.link(target, linkpath)"); } target = lua_tostring(L, 1); linkpath = lua_tostring(L, 2); if (target == NULL || linkpath == NULL) goto usage; return lbox_fio_pushbool(L, coio_link(target, linkpath) == 0); } static int lbox_fio_symlink(struct lua_State *L) { const char *target; const char *linkpath; if (lua_gettop(L) < 2) { usage: luaL_error(L, "Usage: fio.symlink(target, linkpath)"); } target = lua_tostring(L, 1); linkpath = lua_tostring(L, 2); if (target == NULL || linkpath == NULL) goto usage; return lbox_fio_pushbool(L, coio_symlink(target, linkpath) == 0); } static int lbox_fio_readlink(struct lua_State *L) { const char *pathname; if (lua_gettop(L) < 1) { usage: luaL_error(L, "Usage: fio.readlink(pathname)"); } pathname = lua_tostring(L, 1); if (pathname == NULL) goto usage; char *path = (char *)lua_newuserdata(L, PATH_MAX); int res = coio_readlink(pathname, path, PATH_MAX); if (res < 0) { lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } lua_pushlstring(L, path, res); lua_remove(L, -2); return 1; } static int lbox_fio_tempdir(struct lua_State *L) { char *buf = (char *)lua_newuserdata(L, PATH_MAX); if (!buf) { errno = ENOMEM; lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } if (coio_tempdir(buf, PATH_MAX) != 0) { lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } lua_pushstring(L, buf); lua_remove(L, -2); return 1; } static int lbox_fio_cwd(struct lua_State *L) { char *buf = (char *)lua_newuserdata(L, PATH_MAX); if (!buf) { errno = ENOMEM; lua_pushnil(L); lbox_fio_pushsyserror(L); return 2; } if (getcwd(buf, PATH_MAX)) { lua_pushstring(L, buf); lua_remove(L, -2); } else { lbox_fio_pushsyserror(L); lua_pushnil(L); return 2; } return 1; } static int lbox_fio_fsync(struct lua_State *L) { int fd = lua_tointeger(L, 1); return lbox_fio_pushbool(L, coio_fsync(fd) == 0); } static int lbox_fio_fdatasync(struct lua_State *L) { int fd = lua_tointeger(L, 1); return lbox_fio_pushbool(L, coio_fdatasync(fd) == 0); } static int lbox_fio_sync(struct lua_State *L) { return lbox_fio_pushbool(L, coio_sync() == 0); } static int lbox_fio_close(struct lua_State *L) { int fd = lua_tointeger(L, 1); return lbox_fio_pushbool(L, coio_file_close(fd) == 0); } static int lbox_fio_copyfile(struct lua_State *L) { const char *source = lua_tostring(L, -2); const char *dest = lua_tostring(L, -1); assert(source != NULL && dest != NULL); return lbox_fio_pushbool(L, coio_copyfile(source, dest) == 0); } void tarantool_lua_fio_init(struct lua_State *L) { static const struct luaL_Reg fio_methods[] = { { "lstat", lbox_fio_lstat }, { "stat", lbox_fio_stat }, { "mkdir", lbox_fio_mkdir }, { "rmdir", lbox_fio_rmdir }, { "glob", lbox_fio_glob }, { "link", lbox_fio_link }, { "symlink", lbox_fio_symlink }, { "readlink", lbox_fio_readlink }, { "unlink", lbox_fio_unlink }, { "rename", lbox_fio_rename }, { "chown", lbox_fio_chown }, { "chmod", lbox_fio_chmod }, { "truncate", lbox_fio_truncate }, { "tempdir", lbox_fio_tempdir }, { "cwd", lbox_fio_cwd }, { "sync", lbox_fio_sync }, { NULL, NULL } }; luaL_register_module(L, "fio", fio_methods); /* internal table */ lua_pushliteral(L, "internal"); lua_newtable(L); static const struct luaL_Reg internal_methods[] = { { "open", lbox_fio_open }, { "close", lbox_fio_close }, { "pwrite", lbox_fio_pwrite }, { "pread", lbox_fio_pread }, { "read", lbox_fio_read }, { "write", lbox_fio_write }, { "lseek", lbox_fio_lseek }, { "ftruncate", lbox_fio_ftruncate }, { "fsync", lbox_fio_fsync }, { "fdatasync", lbox_fio_fdatasync }, { "listdir", lbox_fio_listdir }, { "fstat", lbox_fio_fstat }, { "copyfile", lbox_fio_copyfile, }, { NULL, NULL } }; luaL_register(L, NULL, internal_methods); lua_settable(L, -3); lua_pushliteral(L, "c"); lua_newtable(L); lua_pushliteral(L, "flag"); lua_newtable(L); #ifdef O_APPEND PUSHTABLE("O_APPEND", lua_pushinteger, O_APPEND); #endif #ifdef O_ASYNC PUSHTABLE("O_ASYNC", lua_pushinteger, O_ASYNC); #endif #ifdef O_CLOEXEC PUSHTABLE("O_CLOEXEC", lua_pushinteger, O_CLOEXEC); #endif #ifdef O_CREAT PUSHTABLE("O_CREAT", lua_pushinteger, O_CREAT); #endif #ifdef O_DIRECT PUSHTABLE("O_DIRECT", lua_pushinteger, O_DIRECT); #endif #ifdef O_DIRECTORY PUSHTABLE("O_DIRECTORY", lua_pushinteger, O_DIRECTORY); #endif #ifdef O_EXCL PUSHTABLE("O_EXCL", lua_pushinteger, O_EXCL); #endif #ifdef O_LARGEFILE PUSHTABLE("O_LARGEFILE", lua_pushinteger, O_LARGEFILE); #endif #ifdef O_NOATIME PUSHTABLE("O_NOATIME", lua_pushinteger, O_NOATIME); #endif #ifdef O_NOCTTY PUSHTABLE("O_NOCTTY", lua_pushinteger, O_NOCTTY); #endif #ifdef O_NOFOLLOW PUSHTABLE("O_NOFOLLOW", lua_pushinteger, O_NOFOLLOW); #endif #ifdef O_NONBLOCK PUSHTABLE("O_NONBLOCK", lua_pushinteger, O_NONBLOCK); #endif #ifdef O_NDELAY PUSHTABLE("O_NDELAY", lua_pushinteger, O_NDELAY); #endif #ifdef O_PATH PUSHTABLE("O_PATH", lua_pushinteger, O_PATH); #endif #ifdef O_SYNC PUSHTABLE("O_SYNC", lua_pushinteger, O_SYNC); #endif #ifdef O_TMPFILE PUSHTABLE("O_TMPFILE", lua_pushinteger, O_TMPFILE); #endif #ifdef O_TRUNC PUSHTABLE("O_TRUNC", lua_pushinteger, O_TRUNC); #endif PUSHTABLE("O_RDONLY", lua_pushinteger, O_RDONLY); PUSHTABLE("O_WRONLY", lua_pushinteger, O_WRONLY); PUSHTABLE("O_RDWR", lua_pushinteger, O_RDWR); lua_settable(L, -3); lua_pushliteral(L, "mode"); lua_newtable(L); PUSHTABLE("S_IRWXU", lua_pushinteger, S_IRWXU); PUSHTABLE("S_IRUSR", lua_pushinteger, S_IRUSR); PUSHTABLE("S_IWUSR", lua_pushinteger, S_IWUSR); PUSHTABLE("S_IXUSR", lua_pushinteger, S_IXUSR); PUSHTABLE("S_IRWXG", lua_pushinteger, S_IRWXG); PUSHTABLE("S_IRGRP", lua_pushinteger, S_IRGRP); PUSHTABLE("S_IWGRP", lua_pushinteger, S_IWGRP); PUSHTABLE("S_IXGRP", lua_pushinteger, S_IXGRP); PUSHTABLE("S_IRWXO", lua_pushinteger, S_IRWXO); PUSHTABLE("S_IROTH", lua_pushinteger, S_IROTH); PUSHTABLE("S_IWOTH", lua_pushinteger, S_IWOTH); PUSHTABLE("S_IXOTH", lua_pushinteger, S_IXOTH); lua_settable(L, -3); lua_pushliteral(L, "seek"); lua_newtable(L); PUSHTABLE("SEEK_SET", lua_pushinteger, SEEK_SET); PUSHTABLE("SEEK_CUR", lua_pushinteger, SEEK_CUR); PUSHTABLE("SEEK_END", lua_pushinteger, SEEK_END); #ifdef SEEK_DATA PUSHTABLE("SEEK_DATA", lua_pushinteger, SEEK_DATA); #endif #ifdef SEEK_HOLE PUSHTABLE("SEEK_HOLE", lua_pushinteger, SEEK_HOLE); #endif lua_settable(L, -3); lua_settable(L, -3); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/lua/uri.lua0000664000000000000000000000431013306560010017050 0ustar rootroot-- uri.lua (internal file) local ffi = require('ffi') ffi.cdef[[ struct uri { const char *scheme; size_t scheme_len; const char *login; size_t login_len; const char *password; size_t password_len; const char *host; size_t host_len; const char *service; size_t service_len; const char *path; size_t path_len; const char *query; size_t query_len; const char *fragment; size_t fragment_len; int host_hint; }; int uri_parse(struct uri *uri, const char *str); int uri_format(char *str, size_t len, struct uri *uri, bool write_password); ]] local builtin = ffi.C; local uribuf = ffi.new('struct uri') local function parse(str) if str == nil then error("Usage: uri.parse(string)") end if builtin.uri_parse(uribuf, str) ~= 0 then return nil end local result = {} for _, k in ipairs({ 'scheme', 'login', 'password', 'host', 'service', 'path', 'query', 'fragment'}) do if uribuf[k] ~= nil then result[k] = ffi.string(uribuf[k], uribuf[k..'_len']) end end if uribuf.host_hint == 1 then result.ipv4 = result.host elseif uribuf.host_hint == 2 then result.ipv6 = result.host elseif uribuf.host_hint == 3 then result.unix = result.service end return result end local function format(uri, write_password) uribuf.scheme = uri.scheme uribuf.scheme_len = string.len(uri.scheme or '') uribuf.login = uri.login uribuf.login_len = string.len(uri.login or '') uribuf.password = uri.password uribuf.password_len = string.len(uri.password or '') uribuf.host = uri.host uribuf.host_len = string.len(uri.host or '') uribuf.service = uri.service uribuf.service_len = string.len(uri.service or '') uribuf.path = uri.path uribuf.path_len = string.len(uri.path or '') uribuf.query = uri.query uribuf.query_len = string.len(uri.query or '') uribuf.fragment = uri.fragment uribuf.fragment_len = string.len(uri.fragment or '') local str = ffi.new('char[1024]') builtin.uri_format(str, 1024, uribuf, write_password and 1 or 0) return ffi.string(str) end return { parse = parse, format = format, }; tarantool_1.9.1.26.g63eb81e3c/src/lua/socket.c0000664000000000000000000005710213306560010017211 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/socket.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* coio_wait() */ #include /* coio_getaddrinfo() */ #include #include "lua/utils.h" #include "lua/fiber.h" extern int coio_wait(int fd, int event, double timeout); extern char socket_lua[]; static const struct { char name[32]; int value; } domains[] = { #ifdef PF_UNIX { "PF_UNIX", PF_UNIX }, #endif #ifdef PF_LOCAL { "PF_LOCAL", PF_LOCAL }, #endif #ifdef PF_INET { "PF_INET", PF_INET }, #endif #ifdef PF_INET6 { "PF_INET6", PF_INET6 }, #endif #ifdef PF_IPX { "PF_IPX", PF_IPX }, #endif #ifdef PF_NETLINK { "PF_NETLINK", PF_NETLINK }, #endif #ifdef PF_X25 { "PF_X25", PF_X25 }, #endif #ifdef PF_AX25 { "PF_AX25", PF_AX25 }, #endif #ifdef PF_ATMPVC { "PF_ATMPVC", PF_ATMPVC }, #endif #ifdef PF_APPLETALK { "PF_APPLETALK", PF_APPLETALK }, #endif #ifdef PF_PACKET { "PF_PACKET", PF_PACKET }, #endif { "", 0 } }; static const struct { char name[32]; int value; } types[] = { #ifdef SOCK_STREAM { "SOCK_STREAM", SOCK_STREAM }, #endif #ifdef SOCK_DGRAM { "SOCK_DGRAM", SOCK_DGRAM }, #endif #ifdef SOCK_SEQPACKET { "SOCK_SEQPACKET", SOCK_SEQPACKET }, #endif #ifdef SOCK_RAW { "SOCK_RAW", SOCK_RAW }, #endif #ifdef SOCK_RDM { "SOCK_RDM", SOCK_RDM }, #endif { "", 0 } }; static const struct { char name[32]; int value; } send_flags[] = { #ifdef MSG_OOB {"MSG_OOB", MSG_OOB }, #endif #ifdef MSG_PEEK {"MSG_PEEK", MSG_PEEK }, #endif #ifdef MSG_DONTROUTE {"MSG_DONTROUTE", MSG_DONTROUTE }, #endif #ifdef MSG_TRYHARD {"MSG_TRYHARD", MSG_TRYHARD }, #endif #ifdef MSG_CTRUNC {"MSG_CTRUNC", MSG_CTRUNC }, #endif #ifdef MSG_PROXY {"MSG_PROXY", MSG_PROXY }, #endif #ifdef MSG_TRUNC {"MSG_TRUNC", MSG_TRUNC }, #endif #ifdef MSG_DONTWAIT {"MSG_DONTWAIT", MSG_DONTWAIT }, #endif #ifdef MSG_EOR {"MSG_EOR", MSG_EOR }, #endif #ifdef MSG_WAITALL {"MSG_WAITALL", MSG_WAITALL }, #endif #ifdef MSG_FIN {"MSG_FIN", MSG_FIN }, #endif #ifdef MSG_SYN {"MSG_SYN", MSG_SYN }, #endif #ifdef MSG_CONFIRM {"MSG_CONFIRM", MSG_CONFIRM }, #endif #ifdef MSG_RST {"MSG_RST", MSG_RST }, #endif #ifdef MSG_ERRQUEUE {"MSG_ERRQUEUE", MSG_ERRQUEUE }, #endif #ifdef MSG_NOSIGNAL {"MSG_NOSIGNAL", MSG_NOSIGNAL }, #endif #ifdef MSG_MORE {"MSG_MORE", MSG_MORE }, #endif #ifdef MSG_WAITFORONE {"MSG_WAITFORONE", MSG_WAITFORONE }, #endif #ifdef MSG_FASTOPEN {"MSG_FASTOPEN", MSG_FASTOPEN }, #endif #ifdef MSG_CMSG_CLOEXEC {"MSG_CMSG_CLOEXEC", MSG_CMSG_CLOEXEC}, #endif { "", 0 } }; /** SOL_SOCKET/IPPROTO_IP/IPPROTO_TCP/IPPROTO_UDP options */ struct lbox_sockopt_reg { /** Option name */ const char *name; /** Option key */ int value; /** 0 - unknown, 1 - int option, 2 - size_t option */ int type; /** true if option is writable */ bool rw; }; /** SOL_SOCKET options */ static const struct lbox_sockopt_reg so_opts[] = { #ifdef SO_ACCEPTCONN {"SO_ACCEPTCONN", SO_ACCEPTCONN, 1, 0, }, #endif #ifdef SO_BINDTODEVICE {"SO_BINDTODEVICE", SO_BINDTODEVICE, 2, 1, }, #endif #ifdef SO_BROADCAST {"SO_BROADCAST", SO_BROADCAST, 1, 1, }, #endif #ifdef SO_DEBUG {"SO_DEBUG", SO_DEBUG, 1, 1, }, #endif #ifdef SO_DOMAIN {"SO_DOMAIN", SO_DOMAIN, 1, 0, }, #endif #ifdef SO_ERROR {"SO_ERROR", SO_ERROR, 1, 0, }, #endif #ifdef SO_DONTROUTE {"SO_DONTROUTE", SO_DONTROUTE, 1, 1, }, #endif #ifdef SO_KEEPALIVE {"SO_KEEPALIVE", SO_KEEPALIVE, 1, 1, }, #endif #ifdef SO_LINGER {"SO_LINGER", SO_LINGER, 0, 0, }, #endif #ifdef SO_MARK {"SO_MARK", SO_MARK, 1, 1, }, #endif #ifdef SO_OOBINLINE {"SO_OOBINLINE", SO_OOBINLINE, 1, 1, }, #endif #ifdef SO_PASSCRED {"SO_PASSCRED", SO_PASSCRED, 1, 1, }, #endif #ifdef SO_PEERCRED {"SO_PEERCRED", SO_PEERCRED, 1, 0, }, #endif #ifdef SO_PRIORITY {"SO_PRIORITY", SO_PRIORITY, 1, 1, }, #endif #ifdef SO_RCVBUF {"SO_RCVBUF", SO_RCVBUF, 1, 1, }, #endif #ifdef SO_RCVBUFFORCE {"SO_RCVBUFFORCE", SO_RCVBUFFORCE, 1, 1, }, #endif #ifdef SO_RCVLOWAT {"SO_RCVLOWAT", SO_RCVLOWAT, 1, 1, }, #endif #ifdef SO_SNDLOWAT {"SO_SNDLOWAT", SO_SNDLOWAT, 1, 1, }, #endif #ifdef SO_RCVTIMEO {"SO_RCVTIMEO", SO_RCVTIMEO, 1, 1, }, #endif #ifdef SO_SNDTIMEO {"SO_SNDTIMEO", SO_SNDTIMEO, 1, 1, }, #endif #ifdef SO_REUSEADDR {"SO_REUSEADDR", SO_REUSEADDR, 1, 1, }, #endif #ifdef SO_SNDBUF {"SO_SNDBUF", SO_SNDBUF, 1, 1, }, #endif #ifdef SO_SNDBUFFORCE {"SO_SNDBUFFORCE", SO_SNDBUFFORCE, 1, 1, }, #endif #ifdef SO_TIMESTAMP {"SO_TIMESTAMP", SO_TIMESTAMP, 1, 1, }, #endif #ifdef SO_PROTOCOL {"SO_PROTOCOL", SO_PROTOCOL, 1, 0, }, #endif {"SO_TYPE", SO_TYPE, 1, 0, }, {"", 0, 0, 0, } }; /** IPPROTO_TCP options */ static const struct lbox_sockopt_reg so_tcp_opts[] = { #ifdef TCP_NODELAY {"TCP_NODELAY", TCP_NODELAY, 1, 1, }, #endif #ifdef TCP_MAXSEG {"TCP_MAXSEG", TCP_MAXSEG, 1, 1, }, #endif #ifdef TCP_CORK {"TCP_CORK", TCP_CORK, 1, 1, }, #endif #ifdef TCP_KEEPIDLE {"TCP_KEEPIDLE", TCP_KEEPIDLE, 1, 1, }, #endif #ifdef TCP_KEEPINTVL {"TCP_KEEPINTVL", TCP_KEEPINTVL, 1, 1, }, #endif #ifdef TCP_KEEPCNT {"TCP_KEEPCNT", TCP_KEEPCNT, 1, 1, }, #endif #ifdef TCP_SYNCNT {"TCP_SYNCNT", TCP_SYNCNT, 1, 1, }, #endif #ifdef TCP_LINGER2 {"TCP_LINGER2", TCP_LINGER2, 1, 1, }, #endif #ifdef TCP_DEFER_ACCEPT {"TCP_DEFER_ACCEPT", TCP_DEFER_ACCEPT, 1, 1, }, #endif #ifdef TCP_WINDOW_CLAMP {"TCP_WINDOW_CLAMP", TCP_WINDOW_CLAMP, 1, 1, }, #endif #ifdef TCP_INFO {"TCP_INFO", TCP_INFO, 0, 0, }, #endif #ifdef TCP_QUICKACK {"TCP_QUICKACK", TCP_QUICKACK, 1, 1, }, #endif #ifdef TCP_CONGESTION {"TCP_CONGESTION", TCP_CONGESTION, 1, 1, }, #endif #ifdef TCP_MD5SIG {"TCP_MD5SIG", TCP_MD5SIG, 1, 1, }, #endif #ifdef TCP_COOKIE_TRANSACTIONS {"TCP_COOKIE_TRANSACTIONS", TCP_COOKIE_TRANSACTIONS, 1, 1, }, #endif #ifdef TCP_THIN_LINEAR_TIMEOUTS {"TCP_THIN_LINEAR_TIMEOUTS", TCP_THIN_LINEAR_TIMEOUTS, 1, 1, }, #endif #ifdef TCP_THIN_DUPACK {"TCP_THIN_DUPACK", TCP_THIN_DUPACK, 1, 1, }, #endif #ifdef TCP_USER_TIMEOUT {"TCP_USER_TIMEOUT", TCP_USER_TIMEOUT, 1, 1, }, #endif #ifdef TCP_REPAIR {"TCP_REPAIR", TCP_REPAIR, 1, 1, }, #endif #ifdef TCP_REPAIR_QUEUE {"TCP_REPAIR_QUEUE", TCP_REPAIR_QUEUE, 1, 1, }, #endif #ifdef TCP_QUEUE_SEQ {"TCP_QUEUE_SEQ", TCP_QUEUE_SEQ, 1, 1, }, #endif #ifdef TCP_REPAIR_OPTIONS {"TCP_REPAIR_OPTIONS", TCP_REPAIR_OPTIONS, 1, 1, }, #endif #ifdef TCP_FASTOPEN {"TCP_FASTOPEN", TCP_FASTOPEN, 1, 1, }, #endif #ifdef TCP_TIMESTAMP {"TCP_TIMESTAMP", TCP_TIMESTAMP, 1, 1, }, #endif #ifdef TCP_NOTSENT_LOWAT {"TCP_NOTSENT_LOWAT", TCP_NOTSENT_LOWAT, 1, 1, }, #endif #ifdef TCP_CC_INFO {"TCP_CC_INFO", TCP_CC_INFO, 1, 1, }, #endif #ifdef TCP_SAVE_SYN {"TCP_SAVE_SYN", TCP_SAVE_SYN, 1, 1, }, #endif #ifdef TCP_SAVED_SYN {"TCP_SAVED_SYN", TCP_SAVED_SYN, 1, 1, }, #endif {"", 0, 0, 0, } }; static const struct { char name[32]; int value; } ai_flags[] = { #ifdef AI_PASSIVE {"AI_PASSIVE", AI_PASSIVE }, #endif #ifdef AI_CANONNAME {"AI_CANONNAME", AI_CANONNAME }, #endif #ifdef AI_NUMERICHOST {"AI_NUMERICHOST", AI_NUMERICHOST }, #endif #ifdef AI_V4MAPPED {"AI_V4MAPPED", AI_V4MAPPED }, #endif #ifdef AI_ALL {"AI_ALL", AI_ALL }, #endif #ifdef AI_ADDRCONFIG {"AI_ADDRCONFIG", AI_ADDRCONFIG }, #endif #ifdef AI_IDN {"AI_IDN", AI_IDN }, #endif #ifdef AI_CANONIDN {"AI_CANONIDN", AI_CANONIDN }, #endif #ifdef AI_IDN_ALLOW_UNASSIGNED {"AI_IDN_ALLOW_UNASSIGNED", AI_IDN_ALLOW_UNASSIGNED }, #endif #ifdef AI_IDN_USE_STD3_ASCII_RULES {"AI_IDN_USE_STD3_ASCII_RULES", AI_IDN_USE_STD3_ASCII_RULES }, #endif #ifdef AI_NUMERICSERV {"AI_NUMERICSERV", AI_NUMERICSERV }, #endif {"", 0 } }; int lbox_socket_local_resolve(const char *host, const char *port, struct sockaddr *addr, socklen_t *socklen) { if (strcmp(host, "unix/") == 0) { struct sockaddr_un *uaddr = (struct sockaddr_un *) addr; if (*socklen < sizeof(*uaddr)) { errno = ENOBUFS; return -1; } memset(uaddr, 0, sizeof(*uaddr)); uaddr->sun_family = AF_UNIX; snprintf(uaddr->sun_path, sizeof(uaddr->sun_path), "%s", port); *socklen = sizeof(*uaddr); return 0; } /* IPv4 */ in_addr_t iaddr = inet_addr(host); if (iaddr != (in_addr_t)(-1)) { struct sockaddr_in *inaddr = (struct sockaddr_in *) addr; if (*socklen < sizeof(*inaddr)) { errno = ENOBUFS; return -1; } memset(inaddr, 0, sizeof(*inaddr)); inaddr->sin_family = AF_INET; inaddr->sin_addr.s_addr = iaddr; inaddr->sin_port = htons(atoi(port)); *socklen = sizeof(*inaddr); return 0; } /* IPv6 */ struct in6_addr ipv6; if (inet_pton(AF_INET6, host, &ipv6) == 1) { struct sockaddr_in6 *inaddr6 = (struct sockaddr_in6 *) addr; if (*socklen < sizeof(*inaddr6)) { errno = ENOBUFS; return -1; } memset(inaddr6, 0, sizeof(*inaddr6)); inaddr6->sin6_family = AF_INET6; inaddr6->sin6_port = htons(atoi(port)); memcpy(inaddr6->sin6_addr.s6_addr, &ipv6, sizeof(ipv6)); *socklen = sizeof(*inaddr6); return 0; } errno = EINVAL; return -1; } int lbox_socket_nonblock(int fh, int mode) { int flags = fcntl(fh, F_GETFL, 0); if (flags < 0) return -1; /* GET */ if (mode == 0x80) { if (flags & O_NONBLOCK) return 1; else return 0; } if (mode) { if (flags & O_NONBLOCK) return 1; flags |= O_NONBLOCK; } else { if ((flags & O_NONBLOCK) == 0) return 0; flags &= ~O_NONBLOCK; } flags = fcntl(fh, F_SETFL, flags); if (flags < 0) return -1; return mode ? 1 : 0; } static int lbox_socket_iowait(struct lua_State *L) { if (lua_gettop(L) < 2) goto usage; int fh = luaL_optinteger(L, 1, -1); ev_tstamp timeout = luaL_optnumber(L, 3, TIMEOUT_INFINITY); /* * A special shortcut for gh-1204: if fd and events are nil then * just sleep. This hack simplifies integration of third-party Lua * modules with Tarantool event loop. */ if (unlikely(fh < 0)) { if (!lua_isnil(L, 2)) goto usage; /* Just sleep, like poll(0, NULL, timeout) */ fiber_sleep(timeout); return 0; } if (likely(lua_type(L, 2) == LUA_TNUMBER)) { /* Fast path: `events' is a bitmask of (COIO_READ|COIO_WRITE) */ int events = lua_tointeger(L, 2); if (events <= 0 || events > (COIO_READ | COIO_WRITE)) goto usage; int ret = coio_wait(fh, events, timeout); lua_pushinteger(L, ret); return 1; } /* Сonvenient version: `events' is a string ('R', 'W', 'RW') */ int events = 0; const char *events_str = lua_tostring(L, 2); if (events_str == NULL) goto usage; for (const char *e = events_str; *e != '\0'; ++e) { /* Lower-case is needed to simplify integration with cqueues */ switch (*e) { case 'r': case 'R': events |= COIO_READ; break; case 'w': case 'W': events |= COIO_WRITE; break; default: goto usage; } } if (events == 0) goto usage; int ret = coio_wait(fh, events, timeout); luaL_testcancel(L); const char *result[] = { "", "R", "W", "RW" }; assert(ret >= 0 && ret <= (COIO_READ | COIO_WRITE)); lua_pushstring(L, result[ret]); return 1; usage: return luaL_error(L, "Usage: iowait(fd, 1 | 'r' | 2 | 'w' | 3 | 'rw'" " [, timeout])"); } static int lbox_socket_push_family(struct lua_State *L, int family) { switch (family) { #ifdef AF_UNIX case AF_UNIX: lua_pushliteral(L, "AF_UNIX"); break; #endif #ifdef AF_INET case AF_INET: lua_pushliteral(L, "AF_INET"); break; #endif #ifdef AF_INET6 case AF_INET6: lua_pushliteral(L, "AF_INET6"); break; #endif #ifdef AF_IPX case AF_IPX: lua_pushliteral(L, "AF_IPX"); break; #endif #ifdef AF_NETLINK case AF_NETLINK: lua_pushliteral(L, "AF_NETLINK"); break; #endif #ifdef AF_X25 case AF_X25: lua_pushliteral(L, "AF_X25"); break; #endif #ifdef AF_AX25 case AF_AX25: lua_pushliteral(L, "AF_AX25"); break; #endif #ifdef AF_ATMPVC case AF_ATMPVC: lua_pushliteral(L, "AF_ATMPVC"); break; #endif #ifdef AF_APPLETALK case AF_APPLETALK: lua_pushliteral(L, "AF_APPLETALK"); break; #endif #ifdef AF_PACKET case AF_PACKET: lua_pushliteral(L, "AF_PACKET"); break; #endif default: lua_pushinteger(L, family); break; } return 1; } static int lbox_socket_push_protocol(struct lua_State *L, int protonumber) { if (protonumber == 0) { lua_pushinteger(L, 0); return 1; } struct protoent *p = getprotobynumber(protonumber); if (p) { lua_pushstring(L, p->p_name); } else { lua_pushinteger(L, protonumber); } return 1; } static int lbox_socket_push_sotype(struct lua_State *L, int sotype) { /* man 7 socket says that sotype can contain some flags */ #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC) sotype &= ~(SOCK_NONBLOCK | SOCK_CLOEXEC); #endif switch (sotype) { #ifdef SOCK_STREAM case SOCK_STREAM: lua_pushliteral(L, "SOCK_STREAM"); break; #endif #ifdef SOCK_DGRAM case SOCK_DGRAM: lua_pushliteral(L, "SOCK_DGRAM"); break; #endif #ifdef SOCK_SEQPACKET case SOCK_SEQPACKET: lua_pushliteral(L, "SOCK_SEQPACKET"); break; #endif #ifdef SOCK_RAW case SOCK_RAW: lua_pushliteral(L, "SOCK_RAW"); break; #endif #ifdef SOCK_RDM case SOCK_RDM: lua_pushliteral(L, "SOCK_RDM"); break; #endif #ifdef SOCK_PACKET case SOCK_PACKET: lua_pushliteral(L, "SOCK_PACKET"); break; #endif default: lua_pushinteger(L, sotype); break; } return 1; } static int lbox_socket_push_addr(struct lua_State *L, const struct sockaddr *addr, socklen_t alen) { lua_newtable(L); lua_pushliteral(L, "family"); lbox_socket_push_family(L, addr->sa_family); lua_rawset(L, -3); switch (addr->sa_family) { case PF_INET: case PF_INET6: { char shost[NI_MAXHOST]; char sservice[NI_MAXSERV]; int rc = getnameinfo(addr, alen, shost, sizeof(shost), sservice, sizeof(sservice), NI_NUMERICHOST|NI_NUMERICSERV ); if (rc == 0) { lua_pushliteral(L, "host"); lua_pushstring(L, shost); lua_rawset(L, -3); lua_pushliteral(L, "port"); lua_pushinteger(L, atol(sservice)); lua_rawset(L, -3); } break; } case PF_UNIX: lua_pushliteral(L, "host"); lua_pushliteral(L, "unix/"); lua_rawset(L, -3); if (alen > sizeof(addr->sa_family)) { lua_pushliteral(L, "port"); lua_pushstring(L, ((struct sockaddr_un *)addr)->sun_path); lua_rawset(L, -3); } else { lua_pushliteral(L, "port"); lua_pushliteral(L, ""); lua_rawset(L, -3); } break; default: /* unknown family */ lua_pop(L, 1); lua_pushnil(L); break; } return 1; } static int lbox_getaddrinfo_result_wrapper(struct lua_State *L) { struct addrinfo *result = (struct addrinfo*)lua_topointer(L, 1); lua_newtable(L); int i = 1; for (struct addrinfo *rp = result; rp; rp = rp->ai_next, i++) { lua_pushinteger(L, i); lbox_socket_push_addr(L, rp->ai_addr, rp->ai_addrlen); if (lua_isnil(L, -1)) { lua_pop(L, 2); i--; continue; } lua_pushliteral(L, "protocol"); lbox_socket_push_protocol(L, rp->ai_protocol); lua_rawset(L, -3); lua_pushliteral(L, "type"); lbox_socket_push_sotype(L, rp->ai_socktype); lua_rawset(L, -3); if (rp->ai_canonname) { lua_pushliteral(L, "canonname"); lua_pushstring(L, rp->ai_canonname); lua_rawset(L, -3); } lua_rawset(L, -3); } return 1; } static int lbox_socket_getaddrinfo(struct lua_State *L) { assert(lua_gettop(L) == 4); lua_pushvalue(L, 1); const char *host = lua_tostring(L, -1); lua_pushvalue(L, 2); const char *port = lua_tostring(L, -1); ev_tstamp timeout = lua_tonumber(L, 3); struct addrinfo hints, *result = NULL; memset(&hints, 0, sizeof(hints)); if (lua_istable(L, 4)) { lua_getfield(L, 4, "family"); if (lua_isnumber(L, -1)) hints.ai_family = lua_tointeger(L, -1); lua_pop(L, 1); lua_getfield(L, 4, "type"); if (lua_isnumber(L, -1)) hints.ai_socktype = lua_tointeger(L, -1); lua_pop(L, 1); lua_getfield(L, 4, "protocol"); if (lua_isnumber(L, -1)) hints.ai_protocol = lua_tointeger(L, -1); lua_pop(L, 1); lua_getfield(L, 4, "flags"); if (lua_isnumber(L, -1)) hints.ai_flags = lua_tointeger(L, -1); lua_pop(L, 1); } int dns_res = 0; dns_res = coio_getaddrinfo(host, port, &hints, &result, timeout); lua_pop(L, 2); /* host, port */ if (dns_res != 0) { lua_pushnil(L); return 1; } /* no results */ if (!result) { lua_newtable(L); return 1; } lua_pushcfunction(L, lbox_getaddrinfo_result_wrapper); lua_pushlightuserdata(L, result); int rc = luaT_call(L, 1, 1); freeaddrinfo(result); if (rc != 0) return luaT_error(L); return 1; } static int lbox_socket_name(struct lua_State *L, int (*getname_func) (int, struct sockaddr *, socklen_t *)) { lua_pushvalue(L, 1); int fh = lua_tointeger(L, -1); lua_pop(L, 1); struct sockaddr_storage addr; socklen_t len = sizeof(addr); if (getname_func(fh, (struct sockaddr *)&addr, &len) != 0) { lua_pushnil(L); return 1; } lbox_socket_push_addr(L, (const struct sockaddr *)&addr, len); if (lua_isnil(L, -1)) return 1; int type; len = sizeof(type); if (getsockopt(fh, SOL_SOCKET, SO_TYPE, &type, &len) == 0) { lua_pushliteral(L, "type"); lbox_socket_push_sotype(L, type); lua_rawset(L, -3); } else { type = -1; } int protocol = 0; #ifdef SO_PROTOCOL len = sizeof(protocol); if (getsockopt(fh, SOL_SOCKET, SO_PROTOCOL, &protocol, &len) == 0) { lua_pushliteral(L, "protocol"); lbox_socket_push_protocol(L, protocol); lua_rawset(L, -3); } #else if (addr.ss_family == AF_INET || addr.ss_family == AF_INET6) { if (type == SOCK_STREAM) protocol = IPPROTO_TCP; if (type == SOCK_DGRAM) protocol = IPPROTO_UDP; } lua_pushliteral(L, "protocol"); lbox_socket_push_protocol(L, protocol); lua_rawset(L, -3); #endif return 1; } static int lbox_socket_soname(struct lua_State *L) { return lbox_socket_name(L, getsockname); } static int lbox_socket_peername(struct lua_State *L) { return lbox_socket_name(L, getpeername); } static int lbox_socket_accept_wrapper(struct lua_State *L) { int sc = lua_tointeger(L, 1); struct sockaddr_storage *fa = (struct sockaddr_storage*) lua_topointer(L, 2); socklen_t len = lua_tointeger(L, 3); lua_pushnumber(L, sc); lbox_socket_push_addr(L, (struct sockaddr *)fa, len); return 2; } static int lbox_socket_accept(struct lua_State *L) { int fh = lua_tointeger(L, 1); struct sockaddr_storage fa; socklen_t len = sizeof(fa); int sc = accept(fh, (struct sockaddr*)&fa, &len); if (sc < 0) { if (errno != EAGAIN && errno != EWOULDBLOCK && errno != EINTR) say_syserror("accept(%d)", fh); lua_pushnil(L); return 1; } lua_pushcfunction(L, lbox_socket_accept_wrapper); lua_pushnumber(L, sc); lua_pushlightuserdata(L, &fa); lua_pushinteger(L, len); if (luaT_call(L, 3, 2)) { close(sc); return luaT_error(L); } return 2; } static int lbox_socket_recvfrom_wrapper(struct lua_State *L) { char *buf = (char *)lua_topointer(L, 1); socklen_t len = lua_tointeger(L, 2); lua_pushlstring(L, buf, len); return 1; } static int lbox_socket_recvfrom(struct lua_State *L) { int fh = lua_tointeger(L, 1); int size = lua_tointeger(L, 2); int flags = lua_tointeger(L, 3); struct sockaddr_storage fa; socklen_t len = sizeof(fa); char *buf = (char *)malloc(size); if (!buf) { errno = ENOMEM; lua_pushnil(L); return 1; } ssize_t res = recvfrom(fh, buf, size, flags, (struct sockaddr*)&fa, &len); if (res < 0) { free(buf); lua_pushnil(L); return 1; } lua_pushcfunction(L, lbox_socket_recvfrom_wrapper); lua_pushlightuserdata(L, buf); lua_pushinteger(L, res); int rc = luaT_call(L, 2, 1); free(buf); if (rc) return luaT_error(L); lbox_socket_push_addr(L, (struct sockaddr *)&fa, len); return 2; } static void lbox_socket_pushsockopt(lua_State *L, const struct lbox_sockopt_reg *reg) { lua_newtable(L); for (int i = 0; reg[i].name[0]; i++) { lua_pushstring(L, reg[i].name); lua_newtable(L); lua_pushliteral(L, "iname"); lua_pushinteger(L, reg[i].value); lua_rawset(L, -3); lua_pushliteral(L, "type"); lua_pushinteger(L, reg[i].type); lua_rawset(L, -3); lua_pushliteral(L, "rw"); lua_pushboolean(L, reg[i].rw); lua_rawset(L, -3); lua_rawset(L, -3); } } void tarantool_lua_socket_init(struct lua_State *L) { static const struct luaL_Reg internal_methods[] = { { "iowait", lbox_socket_iowait }, { "getaddrinfo", lbox_socket_getaddrinfo }, { "name", lbox_socket_soname }, { "peer", lbox_socket_peername }, { "recvfrom", lbox_socket_recvfrom }, { "accept", lbox_socket_accept }, { NULL, NULL } }; luaL_register_module(L, "socket", internal_methods); /* domains table */ lua_pushliteral(L, "DOMAIN"); lua_newtable(L); for (int i = 0; domains[i].name[0]; i++) { lua_pushstring(L, domains[i].name); lua_pushinteger(L, domains[i].value); lua_rawset(L, -3); lua_pushliteral(L, "AF_"); /* Add AF_ alias */ lua_pushstring(L, domains[i].name + 3); lua_concat(L, 2); lua_pushinteger(L, domains[i].value); lua_rawset(L, -3); } lua_rawset(L, -3); /* SO_TYPE */ lua_pushliteral(L, "SO_TYPE"); lua_newtable(L); for (int i = 0; types[i].name[0]; i++) { lua_pushstring(L, types[i].name); lua_pushinteger(L, types[i].value); lua_rawset(L, -3); } lua_rawset(L, -3); /* SEND_FLAGS */ lua_pushliteral(L, "SEND_FLAGS"); lua_newtable(L); for (int i = 0; send_flags[i].name[0]; i++) { lua_pushstring(L, send_flags[i].name); lua_pushinteger(L, send_flags[i].value); lua_rawset(L, -3); } lua_rawset(L, -3); /* AI_FLAGS */ lua_pushliteral(L, "AI_FLAGS"); lua_newtable(L); for (int i = 0; ai_flags[i].name[0]; i++) { lua_pushstring(L, ai_flags[i].name); lua_pushinteger(L, ai_flags[i].value); lua_rawset(L, -3); } lua_rawset(L, -3); /* basic protocols */ lua_pushliteral(L, "protocols"); lua_newtable(L); lua_pushliteral(L, "ip"); lua_pushinteger(L, IPPROTO_IP); lua_rawset(L, -3); lua_pushliteral(L, "tcp"); lua_pushinteger(L, IPPROTO_TCP); lua_rawset(L, -3); lua_pushliteral(L, "udp"); lua_pushinteger(L, IPPROTO_UDP); lua_rawset(L, -3); lua_rawset(L, -3); /* setsockopt() SOL_SOCKET level */ lua_pushliteral(L, "SOL_SOCKET"); lua_pushinteger(L, SOL_SOCKET); lua_rawset(L, -3); /* setsockopt() options */ lua_pushliteral(L, "SO_OPT"); lua_newtable(L); /* setsockopt(SOL_SOCKET) options */ lua_pushinteger(L, SOL_SOCKET); lbox_socket_pushsockopt(L, so_opts); lua_rawset(L, -3); /* setsockopt(IPPROTO_TCP) options */ lua_pushinteger(L, IPPROTO_TCP); lbox_socket_pushsockopt(L, so_tcp_opts); lua_rawset(L, -3); lua_rawset(L, -3); lua_pop(L, 1); /* socket.internal */ } tarantool_1.9.1.26.g63eb81e3c/src/lua/pickle.c0000664000000000000000000001641013306560010017165 0ustar rootroot/* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "lua/pickle.h" #include #include #include #include #include "lua/utils.h" #include "lua/msgpack.h" /* luaL_msgpack_default */ #include #include "bit/bit.h" static inline void luaL_region_dup(struct lua_State *L, struct region *region, const void *ptr, size_t size) { void *to = region_alloc(region, size); if (to == NULL) { diag_set(OutOfMemory, size, "region", "luaL_region_dup"); luaT_error(L); } (void) memcpy(to, ptr, size); } static int lbox_pack(struct lua_State *L) { const char *format = luaL_checkstring(L, 1); /* first arg comes second */ int i = 2; int nargs = lua_gettop(L); const char *str; struct region *buf = &fiber()->gc; /* * XXX: this code leaks region memory in case of any * Lua memory. In absence of external unwind, Lua C API * makes it painfully difficult to clean up resources * properly in case of error. * Hope for the best, fiber_gc() will be called eventually. */ size_t used = region_used(buf); struct luaL_field field; double dbl; float flt; while (*format) { if (i > nargs) luaL_error(L, "pickle.pack: argument count does not match " "the format"); luaL_checkfield(L, luaL_msgpack_default, i, &field); switch (*format) { case 'B': case 'b': /* signed and unsigned 8-bit integers */ if (field.type != MP_UINT && field.type != MP_INT) luaL_error(L, "pickle.pack: expected 8-bit int"); luaL_region_dup(L, buf, &field.ival, sizeof(uint8_t)); break; case 'S': case 's': /* signed and unsigned 16-bit integers */ if (field.type != MP_UINT && field.type != MP_INT) luaL_error(L, "pickle.pack: expected 16-bit int"); luaL_region_dup(L, buf, &field.ival, sizeof(uint16_t)); break; case 'n': /* signed and unsigned 16-bit big endian integers */ if (field.type != MP_UINT && field.type != MP_INT) luaL_error(L, "pickle.pack: expected 16-bit int"); field.ival = (uint16_t) htons((uint16_t) field.ival); luaL_region_dup(L, buf, &field.ival, sizeof(uint16_t)); break; case 'I': case 'i': /* signed and unsigned 32-bit integers */ if (field.type != MP_UINT && field.ival != MP_INT) luaL_error(L, "pickle.pack: expected 32-bit int"); luaL_region_dup(L, buf, &field.ival, sizeof(uint32_t)); break; case 'N': /* signed and unsigned 32-bit big endian integers */ if (field.type != MP_UINT && field.ival != MP_INT) luaL_error(L, "pickle.pack: expected 32-bit int"); field.ival = htonl(field.ival); luaL_region_dup(L, buf, &field.ival, sizeof(uint32_t)); break; case 'L': case 'l': /* signed and unsigned 64-bit integers */ if (field.type != MP_UINT && field.type != MP_INT) luaL_error(L, "pickle.pack: expected 64-bit int"); luaL_region_dup(L, buf, &field.ival, sizeof(uint64_t)); break; case 'Q': case 'q': /* signed and unsigned 64-bit integers */ if (field.type != MP_UINT && field.type != MP_INT) luaL_error(L, "pickle.pack: expected 64-bit int"); field.ival = bswap_u64(field.ival); luaL_region_dup(L, buf, &field.ival, sizeof(uint64_t)); break; case 'd': dbl = (double) lua_tonumber(L, i); luaL_region_dup(L, buf, &dbl, sizeof(double)); break; case 'f': flt = (float) lua_tonumber(L, i); luaL_region_dup(L, buf, &flt, sizeof(float)); break; case 'A': case 'a': { /* A sequence of bytes */ size_t len; str = luaL_checklstring(L, i, &len); luaL_region_dup(L, buf, str, len); break; } default: luaL_error(L, "pickle.pack: unsupported pack " "format specifier '%c'", *format); } i++; format++; } size_t total_len = region_used(buf) - used; const char *res = (char *) region_join(buf, total_len); if (res == NULL) { region_truncate(buf, used); diag_set(OutOfMemory, total_len, "region", "region_join"); luaT_error(L); } lua_pushlstring(L, res, total_len); region_truncate(buf, used); return 1; } static int lbox_unpack(struct lua_State *L) { size_t format_size = 0; const char *format = luaL_checklstring(L, 1, &format_size); const char *f = format; size_t str_size = 0; const char *str = luaL_checklstring(L, 2, &str_size); const char *end = str + str_size; const char *s = str; int save_stacksize = lua_gettop(L); #define CHECK_SIZE(cur) if (unlikely((cur) >= end)) { \ luaL_error(L, "pickle.unpack('%c'): got %d bytes (expected: %d+)", \ *f, (int) (end - str), (int) 1 + ((cur) - str)); \ } while (*f) { switch (*f) { case 'b': CHECK_SIZE(s); lua_pushnumber(L, load_u8(s)); s++; break; case 's': CHECK_SIZE(s + 1); lua_pushnumber(L, load_u16(s)); s += 2; break; case 'n': CHECK_SIZE(s + 1); lua_pushnumber(L, ntohs(load_u16(s))); s += 2; break; case 'i': CHECK_SIZE(s + 3); lua_pushnumber(L, load_u32(s)); s += 4; break; case 'N': CHECK_SIZE(s + 3); lua_pushnumber(L, ntohl(load_u32(s))); s += 4; break; case 'l': CHECK_SIZE(s + 7); luaL_pushuint64(L, load_u64(s)); s += 8; break; case 'q': CHECK_SIZE(s + 7); luaL_pushuint64(L, bswap_u64(load_u64(s))); s += 8; break; case 'd': CHECK_SIZE(s + 7); lua_pushnumber(L, load_double(s)); s += 8; break; case 'f': CHECK_SIZE(s + 3); lua_pushnumber(L, load_float(s)); s += 4; break; case 'a': case 'A': /* The rest of the data is a Lua string. */ lua_pushlstring(L, s, end - s); s = end; break; default: luaL_error(L, "pickle.unpack: unsupported " "format specifier '%c'", *f); } f++; } assert(s <= end); if (s != end) { luaL_error(L, "pickle.unpack('%s'): too many bytes: " "unpacked %d, total %d", format, s - str, str_size); } return lua_gettop(L) - save_stacksize; #undef CHECK_SIZE } void tarantool_lua_pickle_init(struct lua_State *L) { static const luaL_Reg picklelib[] = { {"pack", lbox_pack}, {"unpack", lbox_unpack}, { NULL, NULL} }; luaL_register_module(L, "pickle", picklelib); lua_pop(L, 1); } tarantool_1.9.1.26.g63eb81e3c/src/lua/title.lua0000664000000000000000000000263013306560010017375 0ustar rootrootlocal ffi = require('ffi') ffi.cdef[[ void title_update(); const char *title_get(); void title_set_interpretor_name(const char *); const char *title_get_interpretor_name(); void title_set_script_name(const char *); const char *title_get_script_name(); void title_set_custom(const char *); const char *title_get_custom(); void title_set_status(const char *); const char *title_get_status(); ]] local title = {} function title.update(kv) if type(kv) == 'string' then kv = {custom_title = kv} end if type(kv) ~= 'table' then return end if kv.interpretor_name ~= nil then ffi.C.title_set_interpretor_name(tostring(kv.interpretor_name)) end if kv.script_name ~= nil then ffi.C.title_set_script_name(tostring(kv.script_name)) end if kv.status ~= nil then ffi.C.title_set_status(tostring(kv.status)) end if kv.custom_title ~= nil then ffi.C.title_set_custom(tostring(kv.custom_title)) end if not kv.__defer_update then ffi.C.title_update() end end function title.get() local function S(s) return s~=nil and ffi.string(s) or nil end return S(ffi.C.title_get()), { interpretor_name = S(ffi.C.title_get_interpretor_name()), script_name = S(ffi.C.title_get_script_name()), status = S(ffi.C.title_get_status()), custom_title = S(ffi.C.title_get_custom()) } end return title tarantool_1.9.1.26.g63eb81e3c/src/lua/errno.lua0000664000000000000000000000073513306560010017405 0ustar rootrootlocal ffi = require('ffi') local errno_list = require('errno') ffi.cdef[[ char *strerror(int errnum); ]] local function strerror(errno) if errno == nil then errno = ffi.errno() end return ffi.string(ffi.C.strerror(tonumber(errno))) end return setmetatable({ strerror = strerror }, { __index = errno_list, __newindex = function() error("Can't create new errno constants") end, __call = function(self, ...) return ffi.errno(...) end }) tarantool_1.9.1.26.g63eb81e3c/src/lua/tap.lua0000664000000000000000000001757513306565107017072 0ustar rootroot--- tap.lua internal file --- --- The Test Anything Protocol vesion 13 producer --- -- yaml formatter must be able to encode any Lua variable local yaml = require('yaml').new() yaml.cfg{ encode_invalid_numbers = true; encode_load_metatables = true; encode_use_tostring = true; encode_invalid_as_nil = true; } local ffi = require('ffi') -- for iscdata local function traceback(level) local trace = {} level = level or 3 while true do local info = debug.getinfo(level, "nSl") if not info then break end local frame = { source = info.source; src = info.short_src; line = info.linedefined or 0; what = info.what; name = info.name; namewhat = info.namewhat; filename = info.source:sub(1, 1) == "@" and info.source:sub(2) or 'eval' } table.insert(trace, frame) level = level + 1 end return trace end local function diag(test, fmt, ...) io.write(string.rep(' ', 4 * test.level), "# ", string.format(fmt, ...), "\n") end local function ok(test, cond, message, extra) test.total = test.total + 1 io.write(string.rep(' ', 4 * test.level)) if cond then io.write(string.format("ok - %s\n", message)) return true end test.failed = test.failed + 1 io.write(string.format("not ok - %s\n", message)) extra = extra or {} if test.trace then local frame = debug.getinfo(3, "Sl") extra.trace = traceback() extra.filename = extra.trace[#extra.trace].filename extra.line = extra.trace[#extra.trace].line end if next(extra) == nil then return false -- don't have extra information end -- print aligned yaml output for line in yaml.encode(extra):gmatch("[^\n]+") do io.write(string.rep(' ', 2 + 4 * test.level), line, "\n") end return false end local function fail(test, message, extra) return ok(test, false, message, extra) end local function skip(test, message, extra) ok(test, true, message.." # skip", extra) end local nan = 0/0 local function cmpdeeply(got, expected, extra) if type(expected) == "number" or type(got) == "number" then extra.got = got extra.expected = expected if got ~= got and expected ~= expected then return true -- nan end return got == expected end if ffi.istype('bool', got) then got = (got == 1) end if ffi.istype('bool', expected) then expected = (expected == 1) end if got == nil and expected == nil then return true end if type(got) ~= type(expected) then extra.got = type(got) extra.expected = type(expected) return false end if type(got) ~= 'table' then extra.got = got extra.expected = expected return got == expected end local path = extra.path or '/' for i, v in pairs(got) do extra.path = path .. '/' .. i if not cmpdeeply(v, expected[i], extra) then return false end end for i, v in pairs(expected) do extra.path = path .. '/' .. i if not cmpdeeply(got[i], v, extra) then return false end end extra.path = path return true end local function like(test, got, pattern, message, extra) extra = extra or {} extra.got = got extra.expected = pattern return ok(test, string.match(tostring(got), pattern) ~= nil, message, extra) end local function unlike(test, got, pattern, message, extra) extra = extra or {} extra.got = got extra.expected = pattern return ok(test, string.match(tostring(got), pattern) == nil, message, extra) end local function is(test, got, expected, message, extra) extra = extra or {} extra.got = got extra.expected = expected return ok(test, got == expected, message, extra) end local function isnt(test, got, unexpected, message, extra) extra = extra or {} extra.got = got extra.unexpected = unexpected return ok(test, got ~= unexpected, message, extra) end local function is_deeply(test, got, expected, message, extra) extra = extra or {} extra.got = got extra.expected = expected return ok(test, cmpdeeply(got, expected, extra), message, extra) end local function isnil(test, v, message, extra) return is(test, not v and 'nil' or v, 'nil', message, extra) end local function isnumber(test, v, message, extra) return is(test, type(v), 'number', message, extra) end local function isstring(test, v, message, extra) return is(test, type(v), 'string', message, extra) end local function istable(test, v, message, extra) return is(test, type(v), 'table', message, extra) end local function isboolean(test, v, message, extra) return is(test, type(v), 'boolean', message, extra) end local function isfunction(test, v, message, extra) return is(test, type(v), 'function', message, extra) end local function isudata(test, v, utype, message, extra) extra = extra or {} extra.expected = 'userdata<'..utype..'>' if type(v) == 'userdata' then extra.got = 'userdata<'..getmetatable(v)..'>' return ok(test, getmetatable(v) == utype, message, extra) else extra.got = type(v) return fail(test, message, extra) end end local function iscdata(test, v, ctype, message, extra) extra = extra or {} extra.expected = ffi.typeof(ctype) if type(v) == 'cdata' then extra.got = ffi.typeof(v) return ok(test, ffi.istype(ctype, v), message, extra) else extra.got = type(v) return fail(test, message, extra) end end local test_mt local function test(parent, name, fun, ...) local level = parent ~= nil and parent.level + 1 or 0 local test = setmetatable({ parent = parent; name = name; level = level; total = 0; failed = 0; planned = 0; trace = parent == nil and true or parent.trace; }, test_mt) if fun ~= nil then test:diag('%s', test.name) fun(test, ...) test:diag('%s: end', test.name) return test:check() else return test end end local function plan(test, planned) test.planned = planned io.write(string.rep(' ', 4 * test.level), string.format("1..%d\n", planned)) end local function check(test) if test.checked then error('check called twice') end test.checked = true if test.planned ~= test.total then if test.parent ~= nil then ok(test.parent, false, "bad plan", { planned = test.planned; run = test.total}) else diag(test, string.format("bad plan: planned %d run %d", test.planned, test.total)) end elseif test.failed > 0 then if test.parent ~= nil then ok(test.parent, false, "failed subtests", { failed = test.failed; planned = test.planned; }) else diag(test, "failed subtest: %d", test.failed) end else if test.parent ~= nil then ok(test.parent, true, test.name) end end return test.planned == test.total and test.failed == 0 end test_mt = { __index = { test = test; plan = plan; check = check; diag = diag; ok = ok; fail = fail; skip = skip; is = is; isnt = isnt; isnil = isnil; isnumber = isnumber; isstring = isstring; istable = istable; isboolean = isboolean; isfunction = isfunction; isudata = isudata; iscdata = iscdata; is_deeply = is_deeply; like = like; unlike = unlike; } } local function root_test(...) io.write('TAP version 13', '\n') return test(nil, ...) end return { test = root_test; } tarantool_1.9.1.26.g63eb81e3c/src/lua/trigger.lua0000664000000000000000000000455213306560010017724 0ustar rootrootlocal fun = require('fun') local log = require('log') local table_clear = require('table.clear') -- -- Checks that argument is a callable, i.e. a function or a table -- with __call metamethod. -- local function is_callable(arg) if type(arg) == 'function' then return true elseif type(arg) == 'table' then local mt = getmetatable(arg) if mt ~= nil and type(mt.__call) == 'function' then return true end end return false end local trigger_list_mt = { __call = function(self, new_trigger, old_trigger) -- prepare, check arguments if new_trigger ~= nil and not is_callable(new_trigger) then error(string.format("Usage: %s(callable)", self.name)) end if old_trigger ~= nil and not is_callable(old_trigger) then error(string.format("Usage: trigger(new_callable, old_callable)", self.name)) end -- do something if new_trigger == nil and old_trigger == nil then -- list all the triggers return fun.iter(ipairs(self)):totable() elseif new_trigger ~= nil and old_trigger == nil then -- append new trigger return table.insert(self, new_trigger) elseif new_trigger == nil and old_trigger ~= nil then -- delete old trigger for pos, func in ipairs(self) do if old_trigger == func then table.remove(self, pos) return old_trigger end end error(string.format("%s: trigger is not found", self.name)) else -- if both of the arguments are functions, then -- we'll replace triggers and return the old one for pos, func in ipairs(self) do if old_trigger == func then self[pos] = new_trigger return old_trigger end end error(string.format("%s: trigger is not found", self.name)) end end, __index = { run = function(self, ...) -- ipairs ignores .name for _, func in ipairs(self) do func(...) end end, } } local function trigger_list_new(name) return setmetatable({ name = name }, trigger_list_mt) end return { new = trigger_list_new } tarantool_1.9.1.26.g63eb81e3c/src/lua/clock.lua0000664000000000000000000000172413306560010017352 0ustar rootroot-- clock.lua -- internal file local clock = {} local ffi = require('ffi') ffi.cdef[[ double clock_realtime(void); double clock_monotonic(void); double clock_process(void); double clock_thread(void); uint64_t clock_realtime64(void); uint64_t clock_monotonic64(void); uint64_t clock_process64(void); uint64_t clock_thread64(void); ]] local C = ffi.C clock.realtime = C.clock_realtime clock.monotonic = C.clock_monotonic clock.proc = C.clock_process clock.thread = C.clock_thread clock.realtime64 = C.clock_realtime64 clock.monotonic64 = C.clock_monotonic64 clock.proc64 = C.clock_process64 clock.thread64 = C.clock_thread64 clock.time = clock.realtime clock.time64 = clock.realtime64 clock.bench = function(fun, ...) local overhead = clock.proc() overhead = clock.proc() - overhead local start_time = clock.proc() local res = {0, fun(...)} res[1] = clock.proc() - start_time - overhead, res return res end return clock tarantool_1.9.1.26.g63eb81e3c/src/lua/crypto.h0000664000000000000000000000362013306560010017242 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_CRYPTO_H #define INCLUDES_TARANTOOL_LUA_CRYPTO_H /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #if defined(__cplusplus) extern "C" { #endif int tnt_EVP_CIPHER_key_length(const EVP_CIPHER *cipher); int tnt_EVP_CIPHER_iv_length(const EVP_CIPHER *cipher); int tnt_openssl_init(); EVP_MD_CTX *tnt_EVP_MD_CTX_new(void); void tnt_EVP_MD_CTX_free(EVP_MD_CTX *ctx); HMAC_CTX *tnt_HMAC_CTX_new(void); void tnt_HMAC_CTX_free(HMAC_CTX *ctx); #if defined(__cplusplus) } #endif #endif /* INCLUDES_TARANTOOL_LUA_CRYPTO_H */ tarantool_1.9.1.26.g63eb81e3c/src/lua/argparse.lua0000664000000000000000000001164013306560010020061 0ustar rootrootlocal fun = require('fun') local function parse_param_prefix(param) if param == nil then return nil end local is_long = (param:find("^[-][-]") ~= nil) local is_short = not is_long and (param:find("^[-]") ~= nil) local is_dash = is_short and (param:find("^[-]$") ~= nil) return is_long, is_short, is_dash end local function result_set_add(t_out, key, val) if val == nil then table.insert(t_out, key) elseif t_out[key] == nil then t_out[key] = val elseif type(t_out[key]) == 'table' then table.insert(t_out[key], val) else t_out[key] = {t_out[key], val} end end local function convert_parameter_simple(name, convert_from, convert_to) if convert_to == 'number' then local converted = tonumber(convert_from) if converted == nil then error( ('Bad value for parameter %s. expected type %s, got "%s"') :format(name, convert_to, convert_from) ) end return converted elseif convert_to == 'boolean' then if type(convert_from) ~= 'boolean' then error( ('Bad input for parameter "%s". Expected boolean, got "%s"') :format(name, convert_from) ) end elseif convert_to == 'string' then if type(convert_from) ~= 'string' then error( ('Bad input for parameter "%s". Expected string, got "%s"') :format(name, convert_from) ) end else error( ('Bad convertion format "%s" provided for %s') :format(convert_to, name) ) end return convert_from end local function convert_parameter(name, convert_from, convert_to) if convert_to == nil then return convert_from end if convert_to:find('+') then convert_to = convert_to:sub(1, -2) if type(convert_from) ~= 'table' then convert_from = { convert_from } end convert_from = fun.iter(convert_from):map(function(v) return convert_parameter_simple(name, v, convert_to) end):totable() else if type(convert_from) == 'table' then convert_from = table.remove(convert_from) end convert_from = convert_parameter_simple(name, convert_from, convert_to) end return convert_from end local function parameters_parse(t_in, options) local t_out, t_in = {}, t_in or {} local skip_param = false for i, v in ipairs(t_in) do -- we've used this parameter as value if skip_param == true then skip_param = false goto nextparam end local is_long, is_short, is_dash = parse_param_prefix(v) if not is_dash and is_short then local commands = v:sub(2) if not (commands:match("^[%a]+$")) then error(("bad argument #%d: ID not valid"):format(i)) end for id in v:sub(2):gmatch("%a") do result_set_add(t_out, id, true) end elseif is_long then local command = v:sub(3) if command:find('=') then local key, val = command:match("^([%a_][%w_-]+)%=(.*)$") if key == nil or val == nil then error(("bad argument #%d: ID not valid"):format(i)) end result_set_add(t_out, key, val) else if command:match("^([%a_][%w_-]+)$") == nil then error(("bad argument #%d: ID not valid"):format(i)) end local val = true do -- in case next argument is value of this key (not --arg) local next_arg = t_in[i + 1] local is_long, is_short, is_dash = parse_param_prefix(next_arg) if is_dash then skip_param = true elseif is_long == false and not is_short and not is_dash then val = next_arg skip_param = true end end result_set_add(t_out, command, val) end else table.insert(t_out, v) end ::nextparam:: end if options then local lookup, unknown = {}, {} for _, v in ipairs(options) do if type(v) ~= 'table' then v = {v} end lookup[v[1]] = (v[2] or true) end for k, v in pairs(t_out) do if lookup[k] == nil and type(k) == "string" then table.insert(unknown, k) elseif type(lookup[k]) == 'string' then t_out[k] = convert_parameter(k, v, lookup[k]) end end if #unknown > 0 then error(("unknown options: %s"):format(table.concat(unknown, ", "))) end end return t_out end return { parse = parameters_parse } tarantool_1.9.1.26.g63eb81e3c/src/lua/csv.lua0000664000000000000000000001507613306560010017057 0ustar rootroot-- csv.lua (internal file) local ffi = require('ffi') local log = require('log') ffi.cdef[[ typedef void (*csv_emit_row_t)(void *ctx); typedef void (*csv_emit_field_t)(void *ctx, const char *field, const char *end); struct csv { void *emit_ctx; csv_emit_row_t emit_row; csv_emit_field_t emit_field; char delimiter; char quote_char; char prev_symbol; int error_status; int ending_spaces; void *(*realloc)(void*, size_t); int state; char *buf; char *bufp; size_t buf_len; }; void csv_create(struct csv *csv); void csv_destroy(struct csv *csv); void csv_setopt(struct csv *csv, int opt, ...); struct csv_iterator { struct csv *csv; const char *buf_begin; const char *buf_end; const char *field; size_t field_len; }; void csv_iterator_create(struct csv_iterator *it, struct csv *csv); int csv_next(struct csv_iterator *); void csv_feed(struct csv_iterator *, const char *, size_t); size_t csv_escape_field(struct csv *csv, const char *field, size_t field_len, char *dst, size_t buf_size); enum { CSV_IT_OK, CSV_IT_EOL, CSV_IT_NEEDMORE, CSV_IT_EOF, CSV_IT_ERROR }; ]] local iter = function(csvstate, i) local readable = csvstate[1] local csv_chunk_size = csvstate[2] local csv = csvstate[3] local it = csvstate[4] local tup = {} local st = ffi.C.csv_next(it) while st ~= ffi.C.CSV_IT_EOF do if st == ffi.C.CSV_IT_NEEDMORE then if readable then local buf = readable:read(csv_chunk_size) ffi.C.csv_feed(it, buf, string.len(buf)) -- extend buf lifetime - csv_feed saves pointers csvstate[5] = buf else ffi.C.csv_feed(it, "", 0) end elseif st == ffi.C.CSV_IT_EOL then i = i + 1 if i > 0 then return i, tup end elseif st == ffi.C.CSV_IT_OK then if i >= 0 then tup[#tup + 1] = ffi.string(it.field, it.field_len) end elseif st == ffi.C.CSV_IT_ERROR then log.warn("CSV file has errors") break elseif st == ffi.C.CSV_IT_EOF then ffi.C.csv_destroy(csv) break end st = ffi.C.csv_next(it) end end local module = {} --@brief parse csv string by string --@param readable must be string or object with method read(num) returns string --@param opts.chunk_size (default 4096). Parser will read by chunk_size symbols --@param opts.delimiter (default ','). --@param opts.quote_char (default '"'). --@param opts.skip_head_lines (default 0). Skip header. --@return iter function, iterator state module.iterate = function(readable, opts) opts = opts or {} if type(readable) ~= "string" and type(readable.read) ~= "function" then error("Usage: load(string or object with method read(num)" .. "returns string)") end if not opts.chunk_size then opts.chunk_size = 4096 end if not opts.delimiter then opts.delimiter = ',' end if not opts.quote_char then opts.quote_char = '"' end if not opts.skip_head_lines then opts.skip_head_lines = 0 end local str if type(readable) == "string" then str = readable readable = nil else str = readable:read(opts.chunk_size) end if not str then --read not works error("Usage: load(string or object with method read(num)" .. "returns string)") end local it = ffi.new('struct csv_iterator') local csv = ffi.new('struct csv') ffi.C.csv_create(csv) ffi.gc(csv, ffi.C.csv_destroy) csv.delimiter = string.byte(opts.delimiter) csv.quote_char = string.byte(opts.quote_char) ffi.C.csv_iterator_create(it, csv) ffi.C.csv_feed(it, str, string.len(str)) -- csv_feed remembers the pointer; -- str included in csv state to make sure it lives long enough return iter, {readable, opts.chunk_size, csv, it, str}, -opts.skip_head_lines end --@brief parse csv and make table --@return table module.load = function(readable, opts) opts = opts or {} local result = {} for i, tup in module.iterate(readable, opts) do result[i] = tup end return result end --@brief dumps tuple or table as csv --@param t is tuple or table --@param writable must be object with method write(string) like file or socket --@param opts.delimiter (default ','). --@param opts.quote_char (default '"'). --@return there is no writable it returns csv as string module.dump = function(t, opts, writable) opts = opts or {} writable = writable or nil if not opts.delimiter then opts.delimiter = ',' end if not opts.quote_char then opts.quote_char = '"' end if (type(writable) ~= "nil" and type(writable.write) ~= "function") or type(t) ~= "table" then error("Usage: dump(table[, opts, writable])") end local csv = ffi.new('struct csv') ffi.C.csv_create(csv) ffi.gc(csv, ffi.C.csv_destroy) csv.delimiter = string.byte(opts.delimiter) csv.quote_char = string.byte(opts.quote_char) local bufsz = 256 local buf = csv.realloc(ffi.cast(ffi.typeof('void *'), 0), bufsz) if type(t[1]) ~= 'table' then t = {t} end local result_table if type(writable) == 'nil' then result_table = {} end for k, line in pairs(t) do local first = true local output_tuple = {} for k2, field in pairs(line) do local strf = tostring(field) local buf_new_size = (strf:len() + 1) * 2 if buf_new_size > bufsz then bufsz = buf_new_size buf = csv.realloc(buf, bufsz) end local len = ffi.C.csv_escape_field(csv, strf, string.len(strf), buf, bufsz) if first then first = false else output_tuple[#output_tuple + 1] = opts.delimiter end output_tuple[#output_tuple + 1] = ffi.string(buf, len) end output_tuple[#output_tuple + 1] = '\n' if result_table then result_table[#result_table + 1] = table.concat(output_tuple) else writable:write(table.concat(output_tuple)) end output_tuple = {} end ffi.C.csv_destroy(csv) csv.realloc(buf, 0) if result_table then return table.concat(result_table) end end return module tarantool_1.9.1.26.g63eb81e3c/src/lua/socket.h0000664000000000000000000000353713306560010017221 0ustar rootroot#ifndef INCLUDES_TARANTOOL_LUA_SOCKET_H #define INCLUDES_TARANTOOL_LUA_SOCKET_H /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #if defined(__cplusplus) extern "C" { #endif struct lua_State; void tarantool_lua_socket_init(struct lua_State *L); int lbox_socket_local_resolve(const char *host, const char *port, struct sockaddr *addr, socklen_t *socklen); int lbox_socket_nonblock(int fh, int mode); #if defined(__cplusplus) } /* extern "C" */ #endif #endif /* INCLUDES_TARANTOOL_LUA_BSDSOCKET_H */ tarantool_1.9.1.26.g63eb81e3c/src/lua/pickle.h0000664000000000000000000000330613306560010017172 0ustar rootroot#ifndef TARANTOOL_LUA_PICKLE_H_INCLUDED #define TARANTOOL_LUA_PICKLE_H_INCLUDED /* * Copyright 2010-2015, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct lua_State; void tarantool_lua_pickle_init(struct lua_State *L); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_LUA_PICKLE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/cfg.h0000664000000000000000000000376413306565107015725 0ustar rootroot#ifndef INCLUDES_TARANTOOL_CFG_H #define INCLUDES_TARANTOOL_CFG_H /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ int cfg_geti(const char *param); int cfg_geti_default(const char *param, int default_val); int64_t cfg_geti64(const char *param); const char * cfg_gets(const char *param); double cfg_getd(const char *param); double cfg_getd_default(const char *param, double default_val); int cfg_getarr_size(const char *name); const char * cfg_getarr_elem(const char *name, int i); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* INCLUDES_TARANTOOL_CFG_H */ tarantool_1.9.1.26.g63eb81e3c/src/latency.c0000664000000000000000000000532213306565107016610 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "latency.h" #include #include "histogram.h" #include "trivia/util.h" enum { USEC_PER_MSEC = 1000, USEC_PER_SEC = 1000000, }; enum { LATENCY_PERCENTILE = 99, }; int latency_create(struct latency *latency) { enum { US = 1, MS = USEC_PER_MSEC, S = USEC_PER_SEC }; static int64_t buckets[] = { 100 * US, 200 * US, 300 * US, 400 * US, 500 * US, 600 * US, 700 * US, 800 * US, 900 * US, 1 * MS, 2 * MS, 3 * MS, 4 * MS, 5 * MS, 6 * MS, 7 * MS, 8 * MS, 9 * MS, 10 * MS, 20 * MS, 30 * MS, 40 * MS, 50 * MS, 60 * MS, 70 * MS, 80 * MS, 90 * MS, 100 * MS, 200 * MS, 300 * MS, 400 * MS, 500 * MS, 600 * MS, 700 * MS, 800 * MS, 900 * MS, 1 * S, 2 * S, 3 * S, 4 * S, 5 * S, 6 * S, 7 * S, 8 * S, 9 * S, 10 * S, }; latency->histogram = histogram_new(buckets, lengthof(buckets)); if (latency->histogram == NULL) return -1; histogram_collect(latency->histogram, 0); return 0; } void latency_destroy(struct latency *latency) { histogram_delete(latency->histogram); } void latency_collect(struct latency *latency, double value) { int64_t value_usec = value * USEC_PER_SEC; histogram_collect(latency->histogram, value_usec); } double latency_get(struct latency *latency) { int64_t value_usec = histogram_percentile(latency->histogram, LATENCY_PERCENTILE); return (double)value_usec / USEC_PER_SEC; } tarantool_1.9.1.26.g63eb81e3c/src/say.h0000664000000000000000000002704613306565107015761 0ustar rootroot#ifndef TARANTOOL_SAY_H_INCLUDED #define TARANTOOL_SAY_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include /* pid_t */ #include #include "small/rlist.h" #include "fiber_cond.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ extern pid_t log_pid; /** \cond public */ /** Log levels */ enum say_level { S_FATAL, /* do not use this value directly */ S_SYSERROR, S_ERROR, S_CRIT, S_WARN, S_INFO, S_VERBOSE, S_DEBUG }; /** Log formats */ enum say_format { SF_PLAIN, SF_JSON, say_format_MAX }; extern int log_level; static inline bool say_log_level_is_enabled(int level) { return level <= log_level; } /** \endcond public */ extern enum say_format log_format; enum say_logger_type { /** * Before the app server core is initialized, we do not * decorate output and simply print every message to * stdout intact. */ SAY_LOGGER_BOOT, /** * The core has initialized and we can decorate output * with pid, thread/fiber id, time, etc. */ SAY_LOGGER_STDERR, /** box.cfg option to log to file. */ SAY_LOGGER_FILE, /** box.cfg option to log to another process via a pipe */ SAY_LOGGER_PIPE, /** box.cfg option to log to syslog. */ SAY_LOGGER_SYSLOG }; enum syslog_facility { SYSLOG_KERN = 0, SYSLOG_USER, SYSLOG_MAIL, SYSLOG_DAEMON, SYSLOG_AUTH, SYSLOG_INTERN, SYSLOG_LPR, SYSLOG_NEWS, SYSLOG_UUCP, SYSLOG_CLOCK, SYSLOG_AUTHPRIV, SYSLOG_FTP, SYSLOG_NTP, SYSLOG_AUDIT, SYSLOG_ALERT, SYSLOG_CRON, SYSLOG_LOCAL0, SYSLOG_LOCAL1, SYSLOG_LOCAL2, SYSLOG_LOCAL3, SYSLOG_LOCAL4, SYSLOG_LOCAL5, SYSLOG_LOCAL6, SYSLOG_LOCAL7, syslog_facility_MAX, }; struct log; typedef int (*log_format_func_t)(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap); /** * A log object. There is a singleton for the default log. */ struct log { int fd; /** The current log level. */ int level; enum say_logger_type type; /** path to file if logging to file. */ char *path; bool nonblock; log_format_func_t format_func; /** pid of the process if logging to pipe. */ pid_t pid; /* Application identifier used to group syslog messages. */ char *syslog_ident; /** * Used to wake up the main logger thread from a eio thread. */ ev_async log_async; /** * Conditional variable securing variable below * from concurrent usage. */ struct fiber_cond rotate_cond; /** Counter identifying number of threads executing log_rotate. */ int rotating_threads; enum syslog_facility syslog_facility; struct rlist in_log_list; }; /** * Create a new log object. * @param log log to initialize * @param init_str box.cfg log option * @param nonblock box.cfg non-block option * @return 0 on success, -1 on system error, the error is saved in * the diagnostics area */ int log_create(struct log *log, const char *init_str, bool nonblock); void log_destroy(struct log *log); /** Perform log write. */ int log_say(struct log *log, int level, const char *filename, int line, const char *error, const char *format, ...); /** * Set log level. Can be used dynamically. * * @param log log object * @param level level to set */ void log_set_level(struct log *log, enum say_level level); /** * Set log format. Can be used dynamically. * * @param log log object * @param format_func function to format log messages */ void log_set_format(struct log *log, log_format_func_t format_func); /** * Set log level for the default logger. Can be used dynamically. * @param format constant level */ void say_set_log_level(int new_level); /** * Set log format for default logger. Can be used dynamically. * * Can't be applied in case syslog or boot (will be ignored) * @param say format */ void say_set_log_format(enum say_format format); /** * Return say format by name. * * @param format_name format name. * @retval say_format_MAX on error * @retval say_format otherwise */ enum say_format say_format_by_name(const char *format); struct ev_loop; struct ev_signal; void say_logrotate(struct ev_loop *, struct ev_signal *, int /* revents */); /** Init default logger. */ void say_logger_init(const char *init_str, int log_level, int nonblock, const char *log_format, int background); /** Free default logger */ void say_logger_free(); CFORMAT(printf, 5, 0) void vsay(int level, const char *filename, int line, const char *error, const char *format, va_list ap); /** \cond public */ typedef void (*sayfunc_t)(int, const char *, int, const char *, const char *, ...); /** Internal function used to implement say() macros */ CFORMAT(printf, 5, 0) extern sayfunc_t _say; /** * Format and print a message to Tarantool log file. * * \param level (int) - log level (see enum \link say_level \endlink) * \param file (const char * ) - file name to print * \param line (int) - line number to print * \param format (const char * ) - printf()-like format string * \param ... - format arguments * \sa printf() * \sa enum say_level */ #define say_file_line(level, file, line, format, ...) ({ \ if (say_log_level_is_enabled(level)) \ _say(level, file, line, format, ##__VA_ARGS__); }) /** * Format and print a message to Tarantool log file. * * \param level (int) - log level (see enum \link say_level \endlink) * \param format (const char * ) - printf()-like format string * \param ... - format arguments * \sa printf() * \sa enum say_level */ #define say(level, format, ...) ({ \ say_file_line(level, __FILE__, __LINE__, format, ##__VA_ARGS__); }) /** * Format and print a message to Tarantool log file. * * \param format (const char * ) - printf()-like format string * \param ... - format arguments * \sa printf() * \sa enum say_level * Example: * \code * say_info("Some useful information: %s", status); * \endcode */ #define say_error(format, ...) say(S_ERROR, NULL, format, ##__VA_ARGS__) /** \copydoc say_error() */ #define say_crit(format, ...) say(S_CRIT, NULL, format, ##__VA_ARGS__) /** \copydoc say_error() */ #define say_warn(format, ...) say(S_WARN, NULL, format, ##__VA_ARGS__) /** \copydoc say_error() */ #define say_info(format, ...) say(S_INFO, NULL, format, ##__VA_ARGS__) /** \copydoc say_error() */ #define say_verbose(format, ...) say(S_VERBOSE, NULL, format, ##__VA_ARGS__) /** \copydoc say_error() */ #define say_debug(format, ...) say(S_DEBUG, NULL, format, ##__VA_ARGS__) /** \copydoc say_error(). */ #define say_syserror(format, ...) say(S_SYSERROR, strerror(errno), format, \ ##__VA_ARGS__) /** \endcond public */ #define panic_status(status, ...) ({ say(S_FATAL, NULL, __VA_ARGS__); exit(status); }) #define panic(...) panic_status(EXIT_FAILURE, __VA_ARGS__) #define panic_syserror(...) ({ say(S_FATAL, strerror(errno), __VA_ARGS__); exit(EXIT_FAILURE); }) /** * Format and print a message to Tarantool log file. * * \param log (struct log *) - logger object * \param level (int) - log level (see enum \link say_level \endlink) * \param format (const char * ) - printf()-like format string * \param ... - format arguments * \sa printf() * \sa enum say_level */ #define log_say_level(log, _level, format, ...) ({ \ if (_level <= log->level) \ log_say(log, _level, __FILE__, __LINE__,\ format, ##__VA_ARGS__); }) /** * Format and print a message to specified logger. * * \param log (struct log *) - logger object * \param format (const char * ) - printf()-like format string * \param ... - format arguments * \sa printf() * \sa enum say_level * Example: * \code * log_say_info("Some useful information: %s", status); * \endcode */ #define log_say_error(log, format, ...) \ log_say_level(log, S_ERROR, NULL, format, ##__VA_ARGS__) /** \copydoc log_say_error() */ #define log_say_crit(log, format, ...) \ log_say_level(log, S_CRIT, NULL, format, ##__VA_ARGS__) /** \copydoc log_say_error() */ #define log_say_warn(log, format, ...) \ log_say_level(log, S_WARN, NULL, format, ##__VA_ARGS__) /** \copydoc log_say_error() */ #define log_say_info(log, format, ...) \ log_say_level(log, S_INFO, NULL, format, ##__VA_ARGS__) /** \copydoc log_say_error() */ #define log_say_verbose(log, format, ...) \ log_say_level(log, S_VERBOSE, NULL, format, ##__VA_ARGS__) /** \copydoc log_say_error() */ #define log_say_debug(log, format, ...) \ log_say_level(log, S_DEBUG, NULL, format, ##__VA_ARGS__) /** \copydoc log_say_error(). */ #define log_say_syserror(log, format, ...) \ log_say_level(log, S_SYSERROR, strerror(errno), format, ##__VA_ARGS__) /** * validates logger init string; * @returns 0 if validation passed or -1 * with an error message written to diag */ int say_check_init_str(const char *str); /* internals, for unit testing */ /** * Determine logger type and strip type prefix from init_str. * * @return -1 on error, 0 on success */ int say_parse_logger_type(const char **str, enum say_logger_type *type); /** Syslog logger initialization params */ struct say_syslog_opts { const char *identity; enum syslog_facility facility; /* Input copy (content unspecified). */ char *copy; }; /** * Parse syslog logger init string (without the prefix) * @retval -1 error, message is in diag * @retval 0 success */ int say_parse_syslog_opts(const char *init_str, struct say_syslog_opts *opts); /** Release memory allocated by the option parser. */ void say_free_syslog_opts(struct say_syslog_opts *opts); /** * Format functions * @param log logger structure * @param buf buffer where the formatted message should be written to * @param len size of buffer * @param level log level of message * @param filename name of file where log was called * @param line name of file where log was called * @param error error in case of system errors * @param format format of message * @param ap message parameters * @return number of bytes written to buf */ int say_format_json(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap); int say_format_plain(struct log *log, char *buf, int len, int level, const char *filename, int line, const char *error, const char *format, va_list ap); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_SAY_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/reflection.h0000664000000000000000000001613113306560010017274 0ustar rootroot#ifndef TARANTOOL_REFLECTION_H_INCLUDED #define TARANTOOL_REFLECTION_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include /* strcmp */ #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ struct type_info; struct method_info; /** * Primitive C types */ enum ctype { CTYPE_VOID = 0, CTYPE_INT, CTYPE_CONST_CHAR_PTR }; struct type_info { const char *name; const struct type_info *parent; const struct method_info *methods; }; inline bool type_assignable(const struct type_info *type, const struct type_info *object) { assert(object != NULL); do { if (object == type) return true; assert(object->parent != object); object = object->parent; } while (object != NULL); return false; } /** * Determine if the specified object is assignment-compatible with * the object represented by type. */ #define type_cast(T, obj) ({ \ T *r = NULL; \ if (type_assignable(&type_ ## T, (obj->type))) \ r = (T *) obj; \ (r); \ }) #if defined(__cplusplus) /* Pointer to arbitrary C++ member function */ typedef void (type_info::*method_thiscall_f)(void); #endif enum { METHOD_ARG_MAX = 8 }; struct method_info { const struct type_info *owner; const char *name; enum ctype rtype; enum ctype atype[METHOD_ARG_MAX]; int nargs; bool isconst; union { /* Add extra space to get proper struct size in C */ void *_spacer[2]; #if defined(__cplusplus) method_thiscall_f thiscall; #endif /* defined(__cplusplus) */ }; }; #define type_foreach_method(m, method) \ for(const struct type_info *_m = (m); _m != NULL; _m = _m->parent) \ for (const struct method_info *(method) = _m->methods; \ (method)->name != NULL; (method)++) inline const struct method_info * type_method_by_name(const struct type_info *type, const char *name) { type_foreach_method(type, method) { if (strcmp(method->name, name) == 0) return method; } return NULL; } extern const struct method_info METHODS_SENTINEL; #if defined(__cplusplus) } /* extern "C" */ static_assert(sizeof(((struct method_info *) 0)->thiscall) <= sizeof(((struct method_info *) 0)->_spacer), "sizeof(thiscall)"); /* * Begin of C++ syntax sugar */ /* * Initializer for struct type_info without methods */ inline struct type_info make_type(const char *name, const struct type_info *parent) { /* TODO: sorry, unimplemented: non-trivial designated initializers */ struct type_info t; t.name = name; t.parent = parent; t.methods = &METHODS_SENTINEL; return t; } /* * Initializer for struct type_info with methods */ inline struct type_info make_type(const char *name, const struct type_info *parent, const struct method_info *methods) { /* TODO: sorry, unimplemented: non-trivial designated initializers */ struct type_info t; t.name = name; t.parent = parent; t.methods = methods; return t; } template inline enum ctype ctypeof(); template<> inline enum ctype ctypeof() { return CTYPE_VOID; } template<> inline enum ctype ctypeof() { return CTYPE_INT; } template<> inline enum ctype ctypeof() { return CTYPE_CONST_CHAR_PTR; } /** * \cond false */ template struct method_helper; /** A helper for recursive templates */ template struct method_helper { static bool invokable(const struct method_info *method) { if (method->atype[N] != ctypeof()) return false; return method_helper::invokable(method); } static void init(struct method_info *method) { method->atype[N] = ctypeof(); return method_helper::init(method); } }; template struct method_helper { static bool invokable(const struct method_info *) { return true; } static void init(struct method_info *method) { method->nargs = N; } }; /** * \endcond false */ /** * Initializer for R (T::*)(void) C++ member methods */ template inline struct method_info make_method(const struct type_info *owner, const char *name, R (T::*method_arg)(Args...)) { struct method_info m; m.owner = owner; m.name = name; m.thiscall = (method_thiscall_f) method_arg; m.isconst = false; m.rtype = ctypeof(); memset(m.atype, 0, sizeof(m.atype)); method_helper<0, Args...>::init(&m); return m; } template inline struct method_info make_method(const struct type_info *owner, const char *name, R (T::*method_arg)(Args...) const) { struct method_info m = make_method(owner, name, (R (T::*)(Args...)) method_arg); m.isconst = true; return m; } /** * Check if method is invokable with provided argument types */ template inline bool method_invokable(const struct method_info *method, T *object) { static_assert(sizeof...(Args) <= METHOD_ARG_MAX, "too many arguments"); if (!type_assignable(method->owner, object->type)) return false; if (method->rtype != ctypeof()) return false; if (method->nargs != sizeof...(Args)) return false; return method_helper<0, Args...>::invokable(method); } template inline bool method_invokable(const struct method_info *method, const T *object) { if (!method->isconst) return false; return method_invokable(method, const_cast(object)); } /** * Invoke method with object and provided arguments. */ template inline R method_invoke(const struct method_info *method, T *object, Args... args) { assert((method_invokable(method, object))); typedef R (T::*MemberFunction)(Args...); return (object->*(MemberFunction) method->thiscall)(args...); } #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_REFLECTION_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/crc32.h0000664000000000000000000000360113306560010016054 0ustar rootroot#ifndef TARANTOOL_CRC32_H_INCLUDED #define TARANTOOL_CRC32_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "trivia/util.h" #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ typedef uint32_t (*crc32_func)(uint32_t crc, const char *buf, unsigned int len); /* * Pointer to an architecture-specific implementation of * CRC32 calculation method. */ extern crc32_func crc32_calc; void crc32_init(); #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_CRC32_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/curl.c0000664000000000000000000002127213306560010016104 0ustar rootroot/* * Copyright 2010-2017, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "curl.h" #include #include #include "fiber.h" /** * Process events */ static void curl_multi_process(CURLM *multi, curl_socket_t sockfd, int events) { /* * Notify curl about events */ CURLMcode code; int still_running = 0; /* * From curl://curl.haxx.se/libcurl/c/curl_multi_socket_action.html: * Before version 7.20.0: If you receive CURLM_CALL_MULTI_PERFORM, * this basically means that you should call curl_multi_socket_action * again before you wait for more actions on libcurl's sockets. * You don't have to do it immediately, but the return code means that * libcurl may have more data available to return or that there may be * more data to send off before it is "satisfied". */ do { code = curl_multi_socket_action(multi, sockfd, events, &still_running); } while (code == CURLM_CALL_MULTI_PERFORM); if (code != CURLM_OK) { /* Sic: we can't handle errors properly in EV callbacks */ say_error("curl_multi_socket_action failed for sockfd=%d: %s", sockfd, curl_multi_strerror(code)); } /* * Check for resuls */ CURLMsg *msg; int msgs_left; while ((msg = curl_multi_info_read(multi, &msgs_left))) { if (msg->msg != CURLMSG_DONE) continue; CURL *easy = msg->easy_handle; CURLcode code = msg->data.result; struct curl_request *request = NULL; curl_easy_getinfo(easy, CURLINFO_PRIVATE, (void *) &request); request->code = (int) code; fiber_cond_signal(&request->cond); } } /** * libev timer callback used by curl_multi_timer_cb() * @see curl_multi_timer_cb() */ static void curl_timer_cb(struct ev_loop *loop, struct ev_timer *watcher, int revents) { (void) loop; (void) revents; struct curl_env *env = (struct curl_env *) watcher->data; say_debug("curl %p: event timer", env); curl_multi_process(env->multi, CURL_SOCKET_TIMEOUT, 0); } /** * libcurl callback for CURLMOPT_TIMERFUNCTION * @see curl://curl.haxx.se/libcurl/c/CURLMOPT_TIMERFUNCTION.html */ static int curl_multi_timer_cb(CURLM *multi, long timeout_ms, void *envp) { (void) multi; struct curl_env *env = (struct curl_env *) envp; say_debug("curl %p: wait timeout=%ldms", env, timeout_ms); ev_timer_stop(loop(), &env->timer_event); if (timeout_ms > 0) { /* * From CURLMOPT_TIMERFUNCTION manual: * Your callback function should install a non-repeating timer * with an interval of timeout_ms. Each time that timer fires, * call curl_multi_socket_action(). */ double timeout = (double) timeout_ms / 1000.0; ev_timer_init(&env->timer_event, curl_timer_cb, timeout, 0); ev_timer_start(loop(), &env->timer_event); return 0; } else if (timeout_ms == 0) { /* * From CURLMOPT_TIMERFUNCTION manual: * A timeout_ms value of 0 means you should call * curl_multi_socket_action or curl_multi_perform (once) as * soon as possible. */ curl_timer_cb(loop(), &env->timer_event, 0); return 0; } else { assert(timeout_ms == -1); /* * From CURLMOPT_TIMERFUNCTION manual: * A timeout_ms value of -1 means you should delete your * timer. */ return 0; } } /** Human-readable names for libev events. Used for debug. */ static const char *evstr[] = { [EV_READ] = "IN", [EV_WRITE] = "OUT", [EV_READ | EV_WRITE] = "INOUT", }; /** * libev I/O callback used by curl_multi_sock_cb() */ static void curl_sock_cb(struct ev_loop *loop, struct ev_io *watcher, int revents) { (void) loop; struct curl_env *env = (struct curl_env *) watcher->data; int fd = watcher->fd; say_debug("curl %p: event fd=%d %s", env, fd, evstr[revents]); const int action = ((revents & EV_READ ? CURL_POLL_IN : 0) | (revents & EV_WRITE ? CURL_POLL_OUT : 0)); curl_multi_process(env->multi, fd, action); } /** * libcurl callback for CURLMOPT_SOCKETFUNCTION * @see curl://curl.haxx.se/libcurl/c/CURLMOPT_SOCKETFUNCTION.html */ static int curl_multi_sock_cb(CURL *easy, curl_socket_t fd, int what, void *envp, void *watcherp) { (void) easy; struct curl_env *env = (struct curl_env *) envp; struct ev_io *watcher = (struct ev_io *) watcherp; if (what == CURL_POLL_REMOVE) { say_debug("curl %p: remove fd=%d", env, fd); assert(watcher != NULL); ev_io_stop(loop(), watcher); ++env->stat.sockets_deleted; mempool_free(&env->sock_pool, watcher); return 0; } if (watcher == NULL) { watcher = mempool_alloc(&env->sock_pool); if (watcher == NULL) { diag_set(OutOfMemory, sizeof(*watcher), "mempool", "curl sock"); return -1; } ev_io_init(watcher, curl_sock_cb, fd, 0); watcher->data = env; ++env->stat.sockets_added; curl_multi_assign(env->multi, fd, watcher); say_debug("curl %p: add fd=%d", env, fd); } if (what == CURL_POLL_NONE) return 0; /* register, not interested in readiness (yet) */ const int events = ((what & CURL_POLL_IN ? EV_READ : 0) | (what & CURL_POLL_OUT ? EV_WRITE : 0)); if (watcher->events == events) return 0; /* already registered, nothing to do */ /* Re-register watcher */ say_debug("curl %p: poll fd=%d %s", env, fd, evstr[events]); ev_io_stop(loop(), watcher); ev_io_set(watcher, fd, events); ev_io_start(loop(), watcher); return 0; } int curl_env_create(struct curl_env *env, long max_conns) { memset(env, 0, sizeof(*env)); mempool_create(&env->sock_pool, &cord()->slabc, sizeof(struct ev_io)); env->multi = curl_multi_init(); if (env->multi == NULL) { diag_set(SystemError, "failed to init multi handler"); goto error_exit; } ev_init(&env->timer_event, curl_timer_cb); env->timer_event.data = (void *) env; curl_multi_setopt(env->multi, CURLMOPT_TIMERFUNCTION, curl_multi_timer_cb); curl_multi_setopt(env->multi, CURLMOPT_TIMERDATA, (void *) env); curl_multi_setopt(env->multi, CURLMOPT_SOCKETFUNCTION, curl_multi_sock_cb); curl_multi_setopt(env->multi, CURLMOPT_SOCKETDATA, (void *) env); curl_multi_setopt(env->multi, CURLMOPT_MAXCONNECTS, max_conns); return 0; error_exit: curl_env_destroy(env); return -1; } void curl_env_destroy(struct curl_env *env) { assert(env); if (env->multi != NULL) curl_multi_cleanup(env->multi); mempool_destroy(&env->sock_pool); } int curl_request_create(struct curl_request *curl_request) { curl_request->easy = curl_easy_init(); if (curl_request->easy == NULL) { diag_set(OutOfMemory, 0, "curl", "easy"); return -1; } curl_request->code = CURLE_OK; fiber_cond_create(&curl_request->cond); return 0; } void curl_request_destroy(struct curl_request *curl_request) { if (curl_request->easy != NULL) curl_easy_cleanup(curl_request->easy); fiber_cond_destroy(&curl_request->cond); } CURLMcode curl_execute(struct curl_request *curl_request, struct curl_env *env, double timeout) { CURLMcode mcode; mcode = curl_multi_add_handle(env->multi, curl_request->easy); if (mcode != CURLM_OK) goto curl_merror; /* Don't wait on a cond if request has already failed */ if (curl_request->code == CURLE_OK) { ++env->stat.active_requests; int rc = fiber_cond_wait_timeout(&curl_request->cond, timeout); if (rc < 0 || fiber_is_cancelled()) curl_request->code = CURLE_OPERATION_TIMEDOUT; --env->stat.active_requests; } mcode = curl_multi_remove_handle(env->multi, curl_request->easy); if (mcode != CURLM_OK) goto curl_merror; return CURLM_OK; curl_merror: switch (mcode) { case CURLM_OUT_OF_MEMORY: diag_set(OutOfMemory, 0, "curl", "internal"); break; default: errno = EINVAL; diag_set(SystemError, "curl_multi_error: %s", curl_multi_strerror(mcode)); } return mcode; } tarantool_1.9.1.26.g63eb81e3c/src/module_footer.h0000664000000000000000000000017013306560010020001 0ustar rootroot#if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_MODULE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/title.c0000664000000000000000000001162513306560010016261 0ustar rootroot/* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "title.h" #include "proc_title.h" #include #include #include #include static char *title_buf; size_t title_buf_size; static char *interpretor_name; static char *script_name; static char *custom; static char *status; char **title_init(int argc, char **argv) { char **argv_copy = proc_title_init(argc, argv); if (argv_copy == NULL) return NULL; title_buf_size = proc_title_max_length(); title_buf = malloc(title_buf_size); /* ensure title_get() always yields a valid string */ if (title_buf != NULL && title_buf_size != 0) title_buf[0] = '\0'; title_set_interpretor_name(argv[0]); return argv_copy; } void title_free(int argc, char **argv) { free(title_buf); title_buf = NULL; free(script_name); script_name = NULL; free(custom); custom = NULL; free(status); status = NULL; proc_title_free(argc, argv); } const char *title_get() { return title_buf; } /** * Return a name without preceding path, e.g. /a/b/c -> c. * Like basename(), but doesn't modify the subject string. * Unlike basename, returns an empty string for directories * /a/b/c/ */ static const char * my_basename(const char *name) { const char *sep = NULL; const char *p; if (name == NULL) return NULL; for (p = name; *p != '\0'; p++) { if (*p == '/') sep = p; } if (sep) return sep[1] ? sep + 1 : NULL; return name; } void title_update() { if (title_buf == NULL || title_buf_size == 0) return; char *output = title_buf; char *output_end = title_buf + title_buf_size; int rc; const char *script_name_short = my_basename(script_name); const char *interpretor_name_short = my_basename(interpretor_name); const char *part1 = "tarantool", *part2 = NULL, *part3 = status; /* * prefix */ if (script_name_short == NULL) { if (interpretor_name_short != NULL) { part1 = interpretor_name_short; } } else if (interpretor_name_short == NULL) { part1 = script_name_short; } else { assert(script_name_short); assert(interpretor_name_short); part1 = script_name_short; /* * Omit interpretor name when it is the prefix of * scriptname, ex: tarantool/tarantoolctl */ if (strncmp(script_name_short, interpretor_name_short, strlen(interpretor_name_short)) == 0) { part1 = script_name_short; } else { part1 = interpretor_name_short; part2 = script_name_short; } } #define OUTPUT(...) snprintf(output, output_end - output, __VA_ARGS__) assert(part1); if (part2) { if (part3) { rc = OUTPUT("%s %s <%s>", part1, part2, part3); } else { rc = OUTPUT("%s %s", part1, part2); } } else { if (part3) { rc = OUTPUT("%s <%s>", part1, part3); } else { rc = OUTPUT("%s", part1); } } if (rc < 0 || (output += rc) >= output_end) goto done; /* * custom title */ if (custom) { rc = OUTPUT(": %s", custom); if (rc < 0 || (output += rc) >= output_end) goto done; } #undef OUTPUT done: if (output >= output_end) { output = output_end - 1; } /* * failed snprintf leaves the buffer in unspecified state hence * explicit NUL termination */ *output = '\0'; proc_title_set("%s", title_buf); } #define DEFINE_STRING_ACCESSORS(name) \ const char *title_get_ ## name() { return name; } \ void title_set_ ## name(const char *str) \ { \ if (str == NULL || str[0] == '\0') { \ free(name); name = NULL; \ return; \ } \ size_t len = strlen(str); \ char *p = realloc(name, len + 1); \ if (p) { \ name = memcpy(p, str, len + 1); \ } \ } DEFINE_STRING_ACCESSORS(interpretor_name) DEFINE_STRING_ACCESSORS(script_name) DEFINE_STRING_ACCESSORS(custom) DEFINE_STRING_ACCESSORS(status) tarantool_1.9.1.26.g63eb81e3c/src/scoped_guard.h0000664000000000000000000000406013306565107017613 0ustar rootroot#ifndef TARANTOOL_SCOPED_GUARD_H_INCLUDED #define TARANTOOL_SCOPED_GUARD_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ template struct ScopedGuard { Functor f; bool is_active; explicit ScopedGuard(const Functor& fun) : f(fun), is_active(true) { /* nothing */ } ScopedGuard(ScopedGuard&& guard) : f(guard.f), is_active(true) { guard.is_active = false; abort(); } ~ScopedGuard() { if (is_active) f(); } private: explicit ScopedGuard(const ScopedGuard&) = delete; ScopedGuard& operator=(const ScopedGuard&) = delete; }; template inline ScopedGuard make_scoped_guard(Functor guard) { return ScopedGuard(guard); } #endif /* TARANTOOL_SCOPED_GUARD_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/backtrace.h0000664000000000000000000000400613306560010017057 0ustar rootroot#ifndef TARANTOOL_BACKTRACE_H_INCLUDED #define TARANTOOL_BACKTRACE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/config.h" #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #ifdef ENABLE_BACKTRACE #include void print_backtrace(); typedef int (backtrace_cb)(int frameno, void *frameret, const char *func, size_t offset, void *cb_ctx); void backtrace_foreach(backtrace_cb cb, coro_context *coro_ctx, void *cb_ctx); void backtrace_proc_cache_clear(); #endif /* ENABLE_BACKTRACE */ #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_BACKTRACE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/src/pickle.h0000664000000000000000000000425713306560010016417 0ustar rootroot#ifndef TARANTOOL_PICKLE_H_INCLUDED #define TARANTOOL_PICKLE_H_INCLUDED /* * Copyright 2010-2016, Tarantool AUTHORS, please see AUTHORS file. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * pickle (pick-little-endian) -- serialize/de-serialize data from * tuple and iproto binary formats. * * load_* - no boundary checking */ static inline uint32_t load_u32(const char **data) { const uint32_t *b = (const uint32_t *) *data; *data += sizeof(uint32_t); return *b; } #define pack_u(bits) \ static inline char * \ pack_u##bits(char *buf, uint##bits##_t val) \ { \ *(uint##bits##_t *) buf = val; \ return buf + sizeof(uint##bits##_t); \ } pack_u(8) pack_u(16) pack_u(32) pack_u(64) #if defined(__cplusplus) } /* extern "C" */ #endif /* defined(__cplusplus) */ #endif /* TARANTOOL_PICKLE_H_INCLUDED */ tarantool_1.9.1.26.g63eb81e3c/Doxyfile0000664000000000000000000022325413306560010015716 0ustar rootroot# Doxyfile 1.8.1.2 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "Tarantool" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "Get your data in RAM. Get compute close to data. Enjoy the performance." # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = doc/www/theme/static/logo.png # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = doc/doxygen # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = YES # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = src/ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = *_p.h # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # style sheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.tarantool # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = YES # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 1 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = YES # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as thech can be used by a validating XML parser to check the # # syntax of the XML files. # # default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = __attribute__(x)= API_EXPORT= LUA_API= # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES tarantool_1.9.1.26.g63eb81e3c/third_party/0000775000000000000000000000000013306560010016531 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/clock_gettime.c0000664000000000000000000000346413306560010021515 0ustar rootroot/* * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the * following disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "trivia/util.h" int clock_gettime(uint32_t clock_id, struct timespec *tp) { tp->tv_nsec = 0; tp->tv_sec = 0; uint64_t tk; struct timeval tv; switch (clock_id) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: gettimeofday (&tv, 0); tp->tv_sec = tv.tv_sec; tp->tv_nsec = tv.tv_usec * 1000; break; case CLOCK_PROCESS_CPUTIME_ID: case CLOCK_THREAD_CPUTIME_ID: tk = clock(); tp->tv_sec = tk / CLOCKS_PER_SEC; tp->tv_nsec = (tk % CLOCKS_PER_SEC) * (1000000000 / CLOCKS_PER_SEC); break; } return 0; } tarantool_1.9.1.26.g63eb81e3c/third_party/qsort_arg.h0000664000000000000000000000076313306560010020711 0ustar rootroot#ifndef QSORT_ARG_H #define QSORT_ARG_H #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ /** * General version of qsort that calls single-threaded of multi-threaded * qsort depending on open MP availability and given array size. */ void qsort_arg(void *a, size_t n, size_t es, int (*cmp)(const void *a, const void *b, void *arg), void *arg); #if defined(__cplusplus) } #endif /* defined(__cplusplus) */ #endif tarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/0000775000000000000000000000000013306565107020266 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/lyaml.h0000664000000000000000000000030013306565107021546 0ustar rootroot#ifndef LYAML_H #define LYAML_H #ifdef __cplusplus extern "C" { #endif #include LUALIB_API int luaopen_yaml(lua_State *L); #ifdef __cplusplus } #endif #endif /* #ifndef LYAML_H */ tarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/b64.h0000664000000000000000000000016413306560010021017 0ustar rootroot#include int frombase64(lua_State *, const unsigned char *, unsigned int); int tobase64(lua_State *, int); tarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/README0000664000000000000000000000207113306560010021132 0ustar rootrootNAME yaml - Lua YAML serialization using LibYAML SYNOPSIS require 'yaml' serialized = yaml.dump({ 1, 2, 3, 4 }) content = yaml.load(serialized) DESCRIPTION This module is a Lua binding for Kirill Siminov's excellent LibYAML. LibYAML is generally considered to be the best C YAML 1.1 implementation. LibYAML 0.1.3 is included as part of this release. This module defines the functions dump, load, and configure within the global yaml table. Portions of this software were inspired by Perl's YAML::LibYAML module by Ingy dt Net. SEE ALSO * LibYAML (http://pyyaml.org/wiki/LibYAML) * luayaml: libsyck YAML binding (http://luaforge.net/projects/luayaml) * YAML::LibYAML (http://search.cpan.org/~nuffin/YAML-LibYAML) AUTHOR Andrew Danforth If you are using this module successfully I would love to hear from you. COPYRIGHT Copyright (c) 2009, Andrew Danforth THANKS Thanks to the following people for suggestions and patches: Peter Mawhorter Cyril Romain Adrian Sampson tarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/lyaml.cc0000664000000000000000000004747013306565107021727 0ustar rootroot/* * lyaml.c, LibYAML binding for Lua * * Copyright (c) 2009, Andrew Danforth * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * Portions of this software were inspired by Perl's YAML::LibYAML module by * Ingy döt Net * */ #include "lyaml.h" #include "trivia/util.h" #include #include #include #include extern "C" { #include #include #include #include #include #include #include #include #include "yaml.h" #include "b64.h" } /* extern "C" */ #include "lua/utils.h" #define LUAYAML_TAG_PREFIX "tag:yaml.org,2002:" #define OOM_ERRMSG "yaml: out of memory" #define RETURN_ERRMSG(s, msg) do { \ lua_pushstring(s->L, msg); \ s->error = 1; \ return; \ } while(0) struct lua_yaml_loader { lua_State *L; struct luaL_serializer *cfg; int anchortable_index; int document_count; yaml_parser_t parser; yaml_event_t event; char validevent; char error; }; struct lua_yaml_dumper { lua_State *L; struct luaL_serializer *cfg; int anchortable_index; unsigned int anchor_number; yaml_emitter_t emitter; char error; lua_State *outputL; luaL_Buffer yamlbuf; }; static void generate_error_message(struct lua_yaml_loader *loader) { char buf[256]; luaL_Buffer b; luaL_buffinit(loader->L, &b); luaL_addstring(&b, loader->parser.problem ? loader->parser.problem : "A problem"); snprintf(buf, sizeof(buf), " at document: %d", loader->document_count); luaL_addstring(&b, buf); if (loader->parser.problem_mark.line || loader->parser.problem_mark.column) { snprintf(buf, sizeof(buf), ", line: %d, column: %d\n", (int) loader->parser.problem_mark.line + 1, (int) loader->parser.problem_mark.column + 1); luaL_addstring(&b, buf); } else { luaL_addstring(&b, "\n"); } if (loader->parser.context) { snprintf(buf, sizeof(buf), "%s at line: %d, column: %d\n", loader->parser.context, (int) loader->parser.context_mark.line + 1, (int) loader->parser.context_mark.column + 1); luaL_addstring(&b, buf); } luaL_pushresult(&b); } static inline void delete_event(struct lua_yaml_loader *loader) { if (loader->validevent) { yaml_event_delete(&loader->event); loader->validevent = 0; } } static inline int do_parse(struct lua_yaml_loader *loader) { delete_event(loader); if (yaml_parser_parse(&loader->parser, &loader->event) != 1) { generate_error_message(loader); loader->error = 1; return 0; } loader->validevent = 1; return 1; } static int load_node(struct lua_yaml_loader *loader); static void handle_anchor(struct lua_yaml_loader *loader) { const char *anchor = (char *)loader->event.data.scalar.anchor; if (!anchor) return; lua_pushstring(loader->L, anchor); lua_pushvalue(loader->L, -2); lua_rawset(loader->L, loader->anchortable_index); } static void load_map(struct lua_yaml_loader *loader) { lua_createtable(loader->L, 0, 5); if (loader->cfg->decode_save_metatables) luaL_setmaphint(loader->L, -1); handle_anchor(loader); while (1) { int r; /* load key */ if (load_node(loader) == 0 || loader->error) return; /* load value */ r = load_node(loader); if (loader->error) return; if (r != 1) RETURN_ERRMSG(loader, "unanticipated END event"); lua_rawset(loader->L, -3); } } static void load_sequence(struct lua_yaml_loader *loader) { int index = 1; lua_createtable(loader->L, 5, 0); if (loader->cfg->decode_save_metatables) luaL_setarrayhint(loader->L, -1); handle_anchor(loader); while (load_node(loader) == 1 && !loader->error) lua_rawseti(loader->L, -2, index++); } static void load_scalar(struct lua_yaml_loader *loader) { const char *str = (char *)loader->event.data.scalar.value; unsigned int length = loader->event.data.scalar.length; const char *tag = (char *)loader->event.data.scalar.tag; if (tag && !strncmp(tag, LUAYAML_TAG_PREFIX, sizeof(LUAYAML_TAG_PREFIX) - 1)) { tag += sizeof(LUAYAML_TAG_PREFIX) - 1; if (!strcmp(tag, "str")) { lua_pushlstring(loader->L, str, length); return; } else if (!strcmp(tag, "int")) { lua_pushinteger(loader->L, strtol(str, NULL, 10)); return; } else if (!strcmp(tag, "float")) { double dval = fpconv_strtod(str, NULL); luaL_checkfinite(loader->L, loader->cfg, dval); lua_pushnumber(loader->L, dval); return; } else if (!strcmp(tag, "bool")) { lua_pushboolean(loader->L, !strcmp(str, "true") || !strcmp(str, "yes")); return; } else if (!strcmp(tag, "binary")) { frombase64(loader->L, (const unsigned char *)str, length); return; } } if (loader->event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE) { if (!strcmp(str, "~")) { luaL_pushnull(loader->L); return; } else if (!strcmp(str, "true") || !strcmp(str, "yes")) { lua_pushboolean(loader->L, 1); return; } else if (!strcmp(str, "false") || !strcmp(str, "no")) { lua_pushboolean(loader->L, 0); return; } else if (!strcmp(str, "null")) { luaL_pushnull(loader->L); return; } else if (!length) { lua_pushliteral(loader->L, ""); return; } /* plain scalar and Lua can convert it to a number? make it so... */ char *endptr = NULL; long long ival = strtoll(str, &endptr, 10); if (endptr == str + length && ival != LLONG_MAX) { luaL_pushint64(loader->L, ival); return; } unsigned long long uval = strtoull(str, &endptr, 10); if (endptr == str + length) { luaL_pushuint64(loader->L, uval); return; } double dval = fpconv_strtod(str, &endptr); if (endptr == str + length) { luaL_checkfinite(loader->L, loader->cfg, dval); lua_pushnumber(loader->L, dval); return; } } lua_pushlstring(loader->L, str, length); handle_anchor(loader); } static void load_alias(struct lua_yaml_loader *loader) { char *anchor = (char *)loader->event.data.alias.anchor; lua_pushstring(loader->L, anchor); lua_rawget(loader->L, loader->anchortable_index); if (lua_isnil(loader->L, -1)) { char buf[256]; snprintf(buf, sizeof(buf), "invalid reference: %s", anchor); RETURN_ERRMSG(loader, buf); } } static int load_node(struct lua_yaml_loader *loader) { if (!do_parse(loader)) return -1; switch (loader->event.type) { case YAML_DOCUMENT_END_EVENT: case YAML_MAPPING_END_EVENT: case YAML_SEQUENCE_END_EVENT: return 0; case YAML_MAPPING_START_EVENT: load_map(loader); return 1; case YAML_SEQUENCE_START_EVENT: load_sequence(loader); return 1; case YAML_SCALAR_EVENT: load_scalar(loader); return 1; case YAML_ALIAS_EVENT: load_alias(loader); return 1; case YAML_NO_EVENT: lua_pushliteral(loader->L, "libyaml returned YAML_NO_EVENT"); loader->error = 1; return -1; default: lua_pushliteral(loader->L, "invalid event"); loader->error = 1; return -1; } } static void load(struct lua_yaml_loader *loader) { if (!do_parse(loader)) return; if (loader->event.type != YAML_STREAM_START_EVENT) RETURN_ERRMSG(loader, "expected STREAM_START_EVENT"); while (1) { if (!do_parse(loader)) return; if (loader->event.type == YAML_STREAM_END_EVENT) return; loader->document_count++; if (load_node(loader) != 1) RETURN_ERRMSG(loader, "unexpected END event"); if (loader->error) return; if (!do_parse(loader)) return; if (loader->event.type != YAML_DOCUMENT_END_EVENT) RETURN_ERRMSG(loader, "expected DOCUMENT_END_EVENT"); /* reset anchor table */ lua_newtable(loader->L); lua_replace(loader->L, loader->anchortable_index); } } static int l_load(lua_State *L) { struct lua_yaml_loader loader; luaL_argcheck(L, lua_isstring(L, 1), 1, "must provide a string argument"); loader.L = L; loader.cfg = luaL_checkserializer(L); loader.validevent = 0; loader.error = 0; loader.document_count = 0; /* create table used to track anchors */ lua_newtable(L); loader.anchortable_index = lua_gettop(L); if (!yaml_parser_initialize(&loader.parser)) luaL_error(L, OOM_ERRMSG); yaml_parser_set_input_string(&loader.parser, (const unsigned char *)lua_tostring(L, 1), lua_strlen(L, 1)); load(&loader); delete_event(&loader); yaml_parser_delete(&loader.parser); if (loader.error) lua_error(L); return loader.document_count; } static int dump_node(struct lua_yaml_dumper *dumper); static yaml_char_t *get_yaml_anchor(struct lua_yaml_dumper *dumper) { const char *s = ""; lua_pushvalue(dumper->L, -1); lua_rawget(dumper->L, dumper->anchortable_index); if (!lua_toboolean(dumper->L, -1)) { lua_pop(dumper->L, 1); return NULL; } if (lua_isboolean(dumper->L, -1)) { /* this element is referenced more than once but has not been named */ char buf[32]; snprintf(buf, sizeof(buf), "%u", dumper->anchor_number++); lua_pop(dumper->L, 1); lua_pushvalue(dumper->L, -1); lua_pushstring(dumper->L, buf); s = lua_tostring(dumper->L, -1); lua_rawset(dumper->L, dumper->anchortable_index); } else { /* this is an aliased element */ yaml_event_t ev; const char *str = lua_tostring(dumper->L, -1); if (!yaml_alias_event_initialize(&ev, (yaml_char_t *) str) || !yaml_emitter_emit(&dumper->emitter, &ev)) luaL_error(dumper->L, OOM_ERRMSG); lua_pop(dumper->L, 1); } return (yaml_char_t *)s; } static int dump_table(struct lua_yaml_dumper *dumper, struct luaL_field *field){ yaml_event_t ev; yaml_char_t *anchor = get_yaml_anchor(dumper); if (anchor && !*anchor) return 1; yaml_mapping_style_t yaml_style = (field->compact) ? (YAML_FLOW_MAPPING_STYLE) : YAML_BLOCK_MAPPING_STYLE; if (!yaml_mapping_start_event_initialize(&ev, anchor, NULL, 0, yaml_style) || !yaml_emitter_emit(&dumper->emitter, &ev)) luaL_error(dumper->L, OOM_ERRMSG); lua_pushnil(dumper->L); while (lua_next(dumper->L, -2)) { lua_pushvalue(dumper->L, -2); /* push copy of key on top of stack */ if (!dump_node(dumper) || dumper->error) return 0; lua_pop(dumper->L, 1); /* pop copy of key */ if (!dump_node(dumper) || dumper->error) return 0; lua_pop(dumper->L, 1); } if (!yaml_mapping_end_event_initialize(&ev) || !yaml_emitter_emit(&dumper->emitter, &ev)) luaL_error(dumper->L, OOM_ERRMSG); return 1; } static int dump_array(struct lua_yaml_dumper *dumper, struct luaL_field *field){ unsigned i; yaml_event_t ev; yaml_char_t *anchor = get_yaml_anchor(dumper); if (anchor && !*anchor) return 1; yaml_sequence_style_t yaml_style = (field->compact) ? (YAML_FLOW_SEQUENCE_STYLE) : YAML_BLOCK_SEQUENCE_STYLE; if (!yaml_sequence_start_event_initialize(&ev, anchor, NULL, 0, yaml_style) || !yaml_emitter_emit(&dumper->emitter, &ev)) luaL_error(dumper->L, OOM_ERRMSG); for (i = 0; i < field->size; i++) { lua_rawgeti(dumper->L, -1, i + 1); if (!dump_node(dumper) || dumper->error) return 0; lua_pop(dumper->L, 1); } if (!yaml_sequence_end_event_initialize(&ev) || !yaml_emitter_emit(&dumper->emitter, &ev)) luaL_error(dumper->L, OOM_ERRMSG); return 1; } static int yaml_is_flow_mode(struct lua_yaml_dumper *dumper) { /* * Tarantool-specific: always quote strings in FLOW SEQUENCE * Flow: [1, 'a', 'testing'] * Block: * - 1 * - a * - testing */ if (dumper->emitter.flow_level > 0) { return 1; } else { yaml_event_t *evp; for (evp = dumper->emitter.events.head; evp != dumper->emitter.events.tail; evp++) { if ((evp->type == YAML_SEQUENCE_START_EVENT && evp->data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE) || (evp->type == YAML_MAPPING_START_EVENT && evp->data.mapping_start.style == YAML_FLOW_MAPPING_STYLE)) { return 1; break; } } } return 0; } static int dump_node(struct lua_yaml_dumper *dumper) { size_t len = 0; const char *str = ""; yaml_char_t *tag = NULL; yaml_event_t ev; yaml_scalar_style_t style = YAML_PLAIN_SCALAR_STYLE; int is_binary = 0; char buf[FPCONV_G_FMT_BUFSIZE]; struct luaL_field field; int top = lua_gettop(dumper->L); luaL_checkfield(dumper->L, dumper->cfg, top, &field); switch(field.type) { case MP_UINT: snprintf(buf, sizeof(buf) - 1, "%" PRIu64, field.ival); buf[sizeof(buf) - 1] = 0; str = buf; len = strlen(buf); break; case MP_INT: snprintf(buf, sizeof(buf) - 1, "%" PRIi64, field.ival); buf[sizeof(buf) - 1] = 0; str = buf; len = strlen(buf); break; case MP_FLOAT: fpconv_g_fmt(buf, field.fval, dumper->cfg->encode_number_precision); str = buf; len = strlen(buf); break; case MP_DOUBLE: fpconv_g_fmt(buf, field.dval, dumper->cfg->encode_number_precision); str = buf; len = strlen(buf); break; case MP_ARRAY: return dump_array(dumper, &field); case MP_MAP: return dump_table(dumper, &field); case MP_STR: str = lua_tolstring(dumper->L, -1, &len); if (lua_isnumber(dumper->L, -1)) { /* string is convertible to number, quote it to preserve type */ style = YAML_SINGLE_QUOTED_SCALAR_STYLE; break; } style = YAML_ANY_SCALAR_STYLE; // analyze_string(dumper, str, len, &is_binary); if (utf8_check_printable(str, len)) { if (yaml_is_flow_mode(dumper)) { style = YAML_SINGLE_QUOTED_SCALAR_STYLE; } else if (strstr(str, "\n\n") != NULL) { /* * Tarantool-specific: use literal style for string with empty lines. * Useful for tutorial(). */ style = YAML_LITERAL_SCALAR_STYLE; } break; } /* Fall through */ case MP_BIN: is_binary = 1; tobase64(dumper->L, -1); str = lua_tolstring(dumper->L, -1, &len); tag = (yaml_char_t *) LUAYAML_TAG_PREFIX "binary"; break; case MP_BOOL: if (field.bval) { str = "true"; len = 4; } else { str = "false"; len = 5; } break; case MP_NIL: style = YAML_PLAIN_SCALAR_STYLE; str = "null"; len = 4; break; case MP_EXT: assert(0); /* checked by luaL_checkfield() */ break; } if (!yaml_scalar_event_initialize(&ev, NULL, tag, (unsigned char *)str, len, !is_binary, !is_binary, style) || !yaml_emitter_emit(&dumper->emitter, &ev)) luaL_error(dumper->L, OOM_ERRMSG); if (is_binary) lua_pop(dumper->L, 1); return 1; } static void dump_document(struct lua_yaml_dumper *dumper) { yaml_event_t ev; if (!yaml_document_start_event_initialize(&ev, NULL, NULL, NULL, 0) || !yaml_emitter_emit(&dumper->emitter, &ev)) luaL_error(dumper->L, OOM_ERRMSG); if (!dump_node(dumper) || dumper->error) return; if (!yaml_document_end_event_initialize(&ev, 0) || !yaml_emitter_emit(&dumper->emitter, &ev)) luaL_error(dumper->L, OOM_ERRMSG); } static int append_output(void *arg, unsigned char *buf, size_t len) { struct lua_yaml_dumper *dumper = (struct lua_yaml_dumper *)arg; luaL_addlstring(&dumper->yamlbuf, (char *)buf, len); return 1; } static void find_references(struct lua_yaml_dumper *dumper) { int newval = -1, type = lua_type(dumper->L, -1); if (type != LUA_TTABLE) return; lua_pushvalue(dumper->L, -1); /* push copy of table */ lua_rawget(dumper->L, dumper->anchortable_index); if (lua_isnil(dumper->L, -1)) newval = 0; else if (!lua_toboolean(dumper->L, -1)) newval = 1; lua_pop(dumper->L, 1); if (newval != -1) { lua_pushvalue(dumper->L, -1); lua_pushboolean(dumper->L, newval); lua_rawset(dumper->L, dumper->anchortable_index); } if (newval) return; /* recursively process other table values */ lua_pushnil(dumper->L); while (lua_next(dumper->L, -2) != 0) { find_references(dumper); /* find references on value */ lua_pop(dumper->L, 1); find_references(dumper); /* find references on key */ } } static int l_dump(lua_State *L) { struct lua_yaml_dumper dumper; int i, argcount = lua_gettop(L); yaml_event_t ev; dumper.L = L; dumper.cfg = luaL_checkserializer(L); dumper.error = 0; /* create thread to use for YAML buffer */ dumper.outputL = lua_newthread(L); luaL_buffinit(dumper.outputL, &dumper.yamlbuf); if (!yaml_emitter_initialize(&dumper.emitter)) luaL_error(L, OOM_ERRMSG); yaml_emitter_set_unicode(&dumper.emitter, 1); yaml_emitter_set_indent(&dumper.emitter, 2); yaml_emitter_set_width(&dumper.emitter, 2); yaml_emitter_set_break(&dumper.emitter, YAML_LN_BREAK); yaml_emitter_set_output(&dumper.emitter, &append_output, &dumper); if (!yaml_stream_start_event_initialize(&ev, YAML_UTF8_ENCODING) || !yaml_emitter_emit(&dumper.emitter, &ev)) luaL_error(L, OOM_ERRMSG); for (i = 0; i < argcount; i++) { lua_newtable(L); dumper.anchortable_index = lua_gettop(L); dumper.anchor_number = 0; lua_pushvalue(L, i + 1); /* push copy of arg we're processing */ find_references(&dumper); dump_document(&dumper); if (dumper.error) break; lua_pop(L, 2); /* pop copied arg and anchor table */ } if (!yaml_stream_end_event_initialize(&ev) || !yaml_emitter_emit(&dumper.emitter, &ev) || !yaml_emitter_flush(&dumper.emitter)) luaL_error(L, OOM_ERRMSG); yaml_emitter_delete(&dumper.emitter); /* finalize and push YAML buffer */ luaL_pushresult(&dumper.yamlbuf); if (dumper.error) lua_error(L); /* move buffer to original thread */ lua_xmove(dumper.outputL, L, 1); return 1; } static int l_new(lua_State *L); static const luaL_Reg yamllib[] = { { "encode", l_dump }, { "decode", l_load }, { "new", l_new }, { NULL, NULL} }; static int l_new(lua_State *L) { struct luaL_serializer *s = luaL_newserializer(L, NULL, yamllib); s->has_compact = 1; return 1; } int luaopen_yaml(lua_State *L) { struct luaL_serializer *s = luaL_newserializer(L, "yaml", yamllib); s->has_compact = 1; return 1; } /* vim: et sw=3 ts=3 sts=3: */ tarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/LICENSE.LibYAML0000664000000000000000000000204213306560010022445 0ustar rootrootCopyright (c) 2006 Kirill Simonov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. tarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/TODO0000664000000000000000000000027213306560010020743 0ustar rootroot* allow creating dump/load objects with internal configuration settings * better error checking when using LibYAML dump functions * potentially support additional Lua types (functions?) tarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/LICENSE0000664000000000000000000000211713306560010021260 0ustar rootrootCopyright (c) 2009, Andrew Danforth Copyright (c) 2013-2015, Tarantool Authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. tarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/b64.c0000664000000000000000000000562613306560010021022 0ustar rootroot#include #include #include "b64.h" int frombase64(lua_State *L, const unsigned char *str, unsigned int len) { int d = 0, dlast = 0, phase = 0; unsigned char c; static int table[256] = { -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, /* 00-0F */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, /* 10-1F */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,62,-1,-1,-1,63, /* 20-2F */ 52,53,54,55,56,57,58,59,60,61,-1,-1,-1,-1,-1,-1, /* 30-3F */ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14, /* 40-4F */ 15,16,17,18,19,20,21,22,23,24,25,-1,-1,-1,-1,-1, /* 50-5F */ -1,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40, /* 60-6F */ 41,42,43,44,45,46,47,48,49,50,51,-1,-1,-1,-1,-1, /* 70-7F */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, /* 80-8F */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, /* 90-9F */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, /* A0-AF */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, /* B0-BF */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, /* C0-CF */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, /* D0-DF */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, /* E0-EF */ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1 /* F0-FF */ }; luaL_Buffer b; luaL_buffinit(L, &b); for (; len--; ++str) { d = table[(int)*str]; if (d == -1) continue; switch(phase) { case 0: ++phase; break; case 1: c = ((dlast << 2) | ((d & 0x30) >> 4)); luaL_addchar(&b, c); ++phase; break; case 2: c = (((dlast & 0xf) << 4) | ((d & 0x3c) >> 2)); luaL_addchar(&b, c); ++phase; break; case 3: c = (((dlast & 0x03 ) << 6) | d); luaL_addchar(&b, c); phase = 0; break; } dlast = d; } luaL_pushresult(&b); return 1; } static void b64_encode(luaL_Buffer *b, unsigned int c1, unsigned int c2, unsigned int c3, int n) { static const char code[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; unsigned long tuple = c3 + 256UL * (c2 + 256UL * c1); int i; char s[4]; for (i = 0; i < 4; i++) { s[3-i] = code[tuple % 64]; tuple /= 64; } for (i = n+1; i < 4; i++) s[i] = '='; luaL_addlstring(b, s, 4); } int tobase64(lua_State *L, int pos) { size_t l; const unsigned char *s = (const unsigned char*)luaL_checklstring(L, pos, &l); luaL_Buffer b; int n; luaL_buffinit(L, &b); for (n = l / 3; n--; s += 3) b64_encode(&b, s[0], s[1], s[2], 3); switch (l % 3) { case 1: b64_encode(&b, s[0], 0, 0, 1); break; case 2: b64_encode(&b, s[0], s[1], 0, 2); break; } luaL_pushresult(&b); return 1; } tarantool_1.9.1.26.g63eb81e3c/third_party/lua-yaml/HISTORY0000664000000000000000000000072113306560010021336 0ustar rootroot0.1: May 12 2009 * initial release 0.2: November 23 2009 * updated libyaml to version 0.1.3 * now properly dumps and loads strings containing binary data using base64 * dumped strings are quoted when they could be loaded as numbers * nulls are loaded as yaml.null, a function that returns itself and can be used to test for equality * load now also recognizes 'yes' as a boolean truth value * zero length scalars are not converted to nil tarantool_1.9.1.26.g63eb81e3c/third_party/queue.h0000664000000000000000000004434213306560010020035 0ustar rootroot/*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)queue.h 8.5 (Berkeley) 8/20/94 * $FreeBSD: src/sys/sys/queue.h,v 1.60.2.1 2005/08/16 22:41:39 phk Exp $ */ #ifndef _SYS_QUEUE_H_ #define _SYS_QUEUE_H_ #include /* * This file defines four types of data structures: singly-linked lists, * singly-linked tail queues, lists and tail queues. * * A singly-linked list is headed by a single forward pointer. The elements * are singly linked for minimum space and pointer manipulation overhead at * the expense of O(n) removal for arbitrary elements. New elements can be * added to the list after an existing element or at the head of the list. * Elements being removed from the head of the list should use the explicit * macro for this purpose for optimum efficiency. A singly-linked list may * only be traversed in the forward direction. Singly-linked lists are ideal * for applications with large datasets and few or no removals or for * implementing a LIFO queue. * * A singly-linked tail queue is headed by a pair of pointers, one to the * head of the list and the other to the tail of the list. The elements are * singly linked for minimum space and pointer manipulation overhead at the * expense of O(n) removal for arbitrary elements. New elements can be added * to the list after an existing element, at the head of the list, or at the * end of the list. Elements being removed from the head of the tail queue * should use the explicit macro for this purpose for optimum efficiency. * A singly-linked tail queue may only be traversed in the forward direction. * Singly-linked tail queues are ideal for applications with large datasets * and few or no removals or for implementing a FIFO queue. * * A list is headed by a single forward pointer (or an array of forward * pointers for a hash table header). The elements are doubly linked * so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before * or after an existing element or at the head of the list. A list * may only be traversed in the forward direction. * * A tail queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or * after an existing element, at the head of the list, or at the end of * the list. A tail queue may be traversed in either direction. * * For details on the use of these macros, see the queue(3) manual page. * * * SLIST LIST STAILQ TAILQ * _HEAD + + + + * _HEAD_INITIALIZER + + + + * _ENTRY + + + + * _INIT + + + + * _EMPTY + + + + * _FIRST + + + + * _NEXT + + + + * _PREV - - - + * _LAST - - + + * _FOREACH + + + + * _FOREACH_SAFE + + + + * _FOREACH_REVERSE - - - + * _FOREACH_REVERSE_SAFE - - - + * _INSERT_HEAD + + + + * _INSERT_BEFORE - + - + * _INSERT_AFTER + + + + * _INSERT_TAIL - - + + * _CONCAT - - + + * _REMOVE_HEAD + - + - * _REMOVE + + + + * */ #define QUEUE_MACRO_DEBUG 0 #if QUEUE_MACRO_DEBUG /* Store the last 2 places the queue element or head was altered */ struct qm_trace { char * lastfile; int lastline; char * prevfile; int prevline; }; #define TRACEBUF struct qm_trace trace; #define TRASHIT(x) do {(x) = (void *)-1;} while (0) #define QMD_TRACE_HEAD(head) do { \ (head)->trace.prevline = (head)->trace.lastline; \ (head)->trace.prevfile = (head)->trace.lastfile; \ (head)->trace.lastline = __LINE__; \ (head)->trace.lastfile = __FILE__; \ } while (0) #define QMD_TRACE_ELEM(elem) do { \ (elem)->trace.prevline = (elem)->trace.lastline; \ (elem)->trace.prevfile = (elem)->trace.lastfile; \ (elem)->trace.lastline = __LINE__; \ (elem)->trace.lastfile = __FILE__; \ } while (0) #else #define QMD_TRACE_ELEM(elem) #define QMD_TRACE_HEAD(head) #define TRACEBUF #define TRASHIT(x) #endif /* QUEUE_MACRO_DEBUG */ /* * Singly-linked List declarations. */ #define SLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define SLIST_HEAD_INITIALIZER(head) \ { NULL } #define SLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #define SLIST_EMPTY(head) ((head)->slh_first == NULL) #define SLIST_FIRST(head) ((head)->slh_first) #define SLIST_FOREACH(var, head, field) \ for ((var) = SLIST_FIRST((head)); \ (var); \ (var) = SLIST_NEXT((var), field)) #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = SLIST_FIRST((head)); \ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ (var) = (tvar)) #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ for ((varp) = &SLIST_FIRST((head)); \ ((var) = *(varp)) != NULL; \ (varp) = &SLIST_NEXT((var), field)) #define SLIST_INIT(head) do { \ SLIST_FIRST((head)) = NULL; \ } while (0) #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ SLIST_NEXT((slistelm), field) = (elm); \ } while (0) #define SLIST_INSERT_HEAD(head, elm, field) do { \ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ SLIST_FIRST((head)) = (elm); \ } while (0) #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) #define SLIST_REMOVE(head, elm, type, field) do { \ if (SLIST_FIRST((head)) == (elm)) { \ SLIST_REMOVE_HEAD((head), field); \ } \ else { \ struct type *curelm = SLIST_FIRST((head)); \ while (SLIST_NEXT(curelm, field) != (elm)) \ curelm = SLIST_NEXT(curelm, field); \ SLIST_NEXT(curelm, field) = \ SLIST_NEXT(SLIST_NEXT(curelm, field), field); \ } \ } while (0) #define SLIST_REMOVE_HEAD(head, field) do { \ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ } while (0) /* * Singly-linked Tail queue declarations. */ #define STAILQ_HEAD(name, type) \ struct name { \ struct type *stqh_first;/* first element */ \ struct type **stqh_last;/* addr of last next element */ \ } #define STAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).stqh_first } #define STAILQ_ENTRY(type) \ struct { \ struct type *stqe_next; /* next element */ \ } /* * Singly-linked Tail queue functions. */ #define STAILQ_CONCAT(head1, head2) do { \ if (!STAILQ_EMPTY((head2))) { \ *(head1)->stqh_last = (head2)->stqh_first; \ (head1)->stqh_last = (head2)->stqh_last; \ STAILQ_INIT((head2)); \ } \ } while (0) #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) #define STAILQ_FIRST(head) ((head)->stqh_first) #define STAILQ_FOREACH(var, head, field) \ for((var) = STAILQ_FIRST((head)); \ (var); \ (var) = STAILQ_NEXT((var), field)) #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = STAILQ_FIRST((head)); \ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define STAILQ_INIT(head) do { \ STAILQ_FIRST((head)) = NULL; \ (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ STAILQ_NEXT((tqelm), field) = (elm); \ } while (0) #define STAILQ_INSERT_HEAD(head, elm, field) do { \ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ STAILQ_FIRST((head)) = (elm); \ } while (0) #define STAILQ_INSERT_TAIL(head, elm, field) do { \ STAILQ_NEXT((elm), field) = NULL; \ *(head)->stqh_last = (elm); \ (head)->stqh_last = &STAILQ_NEXT((elm), field); \ } while (0) #define STAILQ_LAST(head, type, field) \ (STAILQ_EMPTY((head)) ? \ NULL : \ ((struct type *) \ ((char *)((head)->stqh_last) - __offsetof(struct type, field)))) #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) #define STAILQ_REMOVE(head, elm, type, field) do { \ if (STAILQ_FIRST((head)) == (elm)) { \ STAILQ_REMOVE_HEAD((head), field); \ } \ else { \ struct type *curelm = STAILQ_FIRST((head)); \ while (STAILQ_NEXT(curelm, field) != (elm)) \ curelm = STAILQ_NEXT(curelm, field); \ if ((STAILQ_NEXT(curelm, field) = \ STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\ (head)->stqh_last = &STAILQ_NEXT((curelm), field);\ } \ } while (0) #define STAILQ_REMOVE_HEAD(head, field) do { \ if ((STAILQ_FIRST((head)) = \ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \ (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) /* Reverse a list in-place. */ #define STAILQ_REVERSE(head, type, member) do { \ struct type *elem = STAILQ_FIRST(head), *next; \ STAILQ_INIT(head); \ while (elem) { \ next = STAILQ_NEXT(elem, member); \ STAILQ_INSERT_HEAD(head, elem, member); \ elem = next; \ } \ } while (0) /* Concat all members of head1 starting from elem to the end of head2. */ #define STAILQ_SPLICE(head1, elem, member, head2) do { \ if (elem) { \ *(head2)->stqh_last = (elem); \ (head2)->stqh_last = (head1)->stqh_last; \ (head1)->stqh_last = &STAILQ_FIRST(head1); \ while (*(head1)->stqh_last != (elem)) { \ (head1)->stqh_last = &STAILQ_NEXT( \ *(head1)->stqh_last, member); \ } \ *(head1)->stqh_last = NULL; \ } \ } while (0) /* * List declarations. */ #define LIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define LIST_HEAD_INITIALIZER(head) \ { NULL } #define LIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } /* * List functions. */ #define LIST_EMPTY(head) ((head)->lh_first == NULL) #define LIST_FIRST(head) ((head)->lh_first) #define LIST_FOREACH(var, head, field) \ for ((var) = LIST_FIRST((head)); \ (var); \ (var) = LIST_NEXT((var), field)) #define LIST_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = LIST_FIRST((head)); \ (var) && ((tvar) = LIST_NEXT((var), field), 1); \ (var) = (tvar)) #define LIST_INIT(head) do { \ LIST_FIRST((head)) = NULL; \ } while (0) #define LIST_INSERT_AFTER(listelm, elm, field) do { \ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ LIST_NEXT((listelm), field)->field.le_prev = \ &LIST_NEXT((elm), field); \ LIST_NEXT((listelm), field) = (elm); \ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ } while (0) #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.le_prev = (listelm)->field.le_prev; \ LIST_NEXT((elm), field) = (listelm); \ *(listelm)->field.le_prev = (elm); \ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ } while (0) #define LIST_INSERT_HEAD(head, elm, field) do { \ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ LIST_FIRST((head)) = (elm); \ (elm)->field.le_prev = &LIST_FIRST((head)); \ } while (0) #define LIST_NEXT(elm, field) ((elm)->field.le_next) #define LIST_REMOVE(elm, field) do { \ if (LIST_NEXT((elm), field) != NULL) \ LIST_NEXT((elm), field)->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = LIST_NEXT((elm), field); \ } while (0) /* * Tail queue declarations. */ #define TAILQ_HEAD(name, type) \ struct name { \ struct type *tqh_first; /* first element */ \ struct type **tqh_last; /* addr of last next element */ \ TRACEBUF \ } #define TAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first } #define TAILQ_ENTRY(type) \ struct { \ struct type *tqe_next; /* next element */ \ struct type **tqe_prev; /* address of previous next element */ \ TRACEBUF \ } /* * Tail queue functions. */ #define TAILQ_CONCAT(head1, head2, field) do { \ if (!TAILQ_EMPTY(head2)) { \ *(head1)->tqh_last = (head2)->tqh_first; \ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ (head1)->tqh_last = (head2)->tqh_last; \ TAILQ_INIT((head2)); \ QMD_TRACE_HEAD(head1); \ QMD_TRACE_HEAD(head2); \ } \ } while (0) #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define TAILQ_FIRST(head) ((head)->tqh_first) #define TAILQ_FOREACH(var, head, field) \ for ((var) = TAILQ_FIRST((head)); \ (var); \ (var) = TAILQ_NEXT((var), field)) #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ for ((var) = TAILQ_FIRST((head)); \ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ (var) = (tvar)) #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ for ((var) = TAILQ_LAST((head), headname); \ (var); \ (var) = TAILQ_PREV((var), headname, field)) #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ for ((var) = TAILQ_LAST((head), headname); \ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ (var) = (tvar)) #define TAILQ_INIT(head) do { \ TAILQ_FIRST((head)) = NULL; \ (head)->tqh_last = &TAILQ_FIRST((head)); \ QMD_TRACE_HEAD(head); \ } while (0) #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ TAILQ_NEXT((elm), field)->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else { \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ QMD_TRACE_HEAD(head); \ } \ TAILQ_NEXT((listelm), field) = (elm); \ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ QMD_TRACE_ELEM(&(elm)->field); \ QMD_TRACE_ELEM(&listelm->field); \ } while (0) #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ TAILQ_NEXT((elm), field) = (listelm); \ *(listelm)->field.tqe_prev = (elm); \ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ QMD_TRACE_ELEM(&(elm)->field); \ QMD_TRACE_ELEM(&listelm->field); \ } while (0) #define TAILQ_INSERT_HEAD(head, elm, field) do { \ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ TAILQ_FIRST((head))->field.tqe_prev = \ &TAILQ_NEXT((elm), field); \ else \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ TAILQ_FIRST((head)) = (elm); \ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ QMD_TRACE_HEAD(head); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_INSERT_TAIL(head, elm, field) do { \ TAILQ_NEXT((elm), field) = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &TAILQ_NEXT((elm), field); \ QMD_TRACE_HEAD(head); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) #define TAILQ_REMOVE(head, elm, field) do { \ if ((TAILQ_NEXT((elm), field)) != NULL) \ TAILQ_NEXT((elm), field)->field.tqe_prev = \ (elm)->field.tqe_prev; \ else { \ (head)->tqh_last = (elm)->field.tqe_prev; \ QMD_TRACE_HEAD(head); \ } \ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ TRASHIT((elm)->field.tqe_next); \ TRASHIT((elm)->field.tqe_prev); \ QMD_TRACE_ELEM(&(elm)->field); \ } while (0) #define TAILQ_REMOVE_HEAD(head, field) do { \ TAILQ_REMOVE(head, TAILQ_FIRST(head), field); \ } while (0) #ifdef _KERNEL /* * XXX insque() and remque() are an old way of handling certain queues. * They bogusly assumes that all queue heads look alike. */ struct quehead { struct quehead *qh_link; struct quehead *qh_rlink; }; #ifdef __CC_SUPPORTS___INLINE static __inline void insque(void *a, void *b) { struct quehead *element = (struct quehead *)a, *head = (struct quehead *)b; element->qh_link = head->qh_link; element->qh_rlink = head; head->qh_link = element; element->qh_link->qh_rlink = element; } static __inline void remque(void *a) { struct quehead *element = (struct quehead *)a; element->qh_link->qh_rlink = element->qh_rlink; element->qh_rlink->qh_link = element->qh_link; element->qh_rlink = 0; } #else /* !__CC_SUPPORTS___INLINE */ void insque(void *a, void *b); void remque(void *a); #endif /* __CC_SUPPORTS___INLINE */ #endif /* _KERNEL */ #endif /* !_SYS_QUEUE_H_ */ tarantool_1.9.1.26.g63eb81e3c/third_party/sha1.c0000664000000000000000000001554513306560010017543 0ustar rootroot /* from valgrind tests */ /* ================ sha1.c ================ */ /* SHA-1 in C By Steve Reid 100% Public Domain Test Vectors (from FIPS PUB 180-1) "abc" A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1 A million repetitions of "a" 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F */ /* #define LITTLE_ENDIAN * This should be #define'd already, if true. */ /* #define SHA1HANDSOFF * Copies data before messing with it. */ #define SHA1HANDSOFF #include #include #include /* for u_int*_t */ #include "sha1.h" #define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) /* blk0() and blk() perform the initial expand. */ /* I got the idea of expanding during the round function from SSLeay */ #define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \ |(rol(block->l[i],8)&0x00FF00FF)) #define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \ ^block->l[(i+2)&15]^block->l[i&15],1)) /* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ #define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30); #define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30); #define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30); #define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30); #define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30); /* Hash a single 512-bit block. This is the core of the algorithm. */ void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]) { uint32_t a, b, c, d, e; typedef union { unsigned char c[64]; uint32_t l[16]; } CHAR64LONG16; #ifdef SHA1HANDSOFF CHAR64LONG16 block[1]; /* use array to appear as a pointer */ memcpy(block, buffer, 64); #else /* The following had better never be used because it causes the * pointer-to-const buffer to be cast into a pointer to non-const. * And the result is written through. I threw a "const" in, hoping * this will cause a diagnostic. */ CHAR64LONG16* block = (const CHAR64LONG16*)buffer; #endif /* Copy context->state[] to working vars */ a = state[0]; b = state[1]; c = state[2]; d = state[3]; e = state[4]; /* 4 rounds of 20 operations each. Loop unrolled. */ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3); R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7); R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11); R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15); R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19); R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23); R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27); R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31); R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35); R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39); R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43); R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47); R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51); R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55); R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59); R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63); R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67); R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71); R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75); R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79); /* Add the working vars back into context.state[] */ state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; /* Wipe variables */ a = b = c = d = e = 0; #ifdef SHA1HANDSOFF memset(block, '\0', sizeof(block)); #endif } /* SHA1Init - Initialize new context */ void SHA1Init(SHA1_CTX* context) { /* SHA1 initialization constants */ context->state[0] = 0x67452301; context->state[1] = 0xEFCDAB89; context->state[2] = 0x98BADCFE; context->state[3] = 0x10325476; context->state[4] = 0xC3D2E1F0; context->count[0] = context->count[1] = 0; } /* Run your data through this. */ void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len) { uint32_t i, j; j = context->count[0]; if ((context->count[0] += len << 3) < j) context->count[1]++; context->count[1] += (len>>29); j = (j >> 3) & 63; if ((j + len) > 63) { memcpy(&context->buffer[j], data, (i = 64-j)); SHA1Transform(context->state, context->buffer); for ( ; i + 63 < len; i += 64) { SHA1Transform(context->state, &data[i]); } j = 0; } else i = 0; memcpy(&context->buffer[j], &data[i], len - i); } /* Add padding and return the message digest. */ void SHA1Final(unsigned char digest[20], SHA1_CTX* context) { unsigned i; unsigned char finalcount[8]; unsigned char c; #if 0 /* untested "improvement" by DHR */ /* Convert context->count to a sequence of bytes * in finalcount. Second element first, but * big-endian order within element. * But we do it all backwards. */ unsigned char *fcp = &finalcount[8]; for (i = 0; i < 2; i++) { uint32_t t = context->count[i]; int j; for (j = 0; j < 4; t >>= 8, j++) *--fcp = (unsigned char) t; } #else for (i = 0; i < 8; i++) { finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)] >> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */ } #endif c = 0200; SHA1Update(context, &c, 1); while ((context->count[0] & 504) != 448) { c = 0000; SHA1Update(context, &c, 1); } SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */ for (i = 0; i < 20; i++) { digest[i] = (unsigned char) ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255); } /* Wipe variables */ memset(context, '\0', sizeof(*context)); memset(&finalcount, '\0', sizeof(finalcount)); } /* ================ end of sha1.c ================ */ #if 0 #define BUFSIZE 4096 int main(int argc, char **argv) { SHA1_CTX ctx; unsigned char hash[20], buf[BUFSIZE]; int i; for(i=0;i ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This is part of the libb64 project, and has been placed in the * public domain. For details, see * http://sourceforge.net/projects/libb64 */ #ifdef __cplusplus extern "C" { #endif #define BASE64_CHARS_PER_LINE 72 /** Options for base64 encoder. */ enum base64_options { /** Do not write padding symbols '='. */ BASE64_NOPAD = 1, /* 0 0 1 */ /** Do not write '\n' every 72 symbols. */ BASE64_NOWRAP = 2, /* 0 1 0 */ /** * No-pad + no-wrap. * Replace '+' -> '-' and '/' -> '_'. */ BASE64_URLSAFE = 7, /* 1 1 1 */ }; inline int base64_bufsize(int binsize, int options) { int datasize = binsize * 4/3; if ((options & BASE64_NOWRAP) == 0) { /* Account '\n' symbols. */ datasize += ((datasize + BASE64_CHARS_PER_LINE - 1)/ BASE64_CHARS_PER_LINE); } else if (binsize % 3 != 0) { datasize++; } if ((options & BASE64_NOPAD) == 0) datasize += 4; return datasize; } /** * Encode a binary stream into BASE64 text. * * @pre the buffer size is at least 4/3 of the stream * size + stream_size/72 (newlines) + 4 * * @param[in] in_bin the binary input stream to decode * @param[in] in_len size of the input * @param[out] out_base64 output buffer for the encoded data * @param[in] out_len buffer size, must be at least * 4/3 of the input size * * @return the size of encoded output */ int base64_encode(const char *in_bin, int in_len, char *out_base64, int out_len, int options); /** * Decode a BASE64 text into a binary * * @param[in] in_base64 the BASE64 stream to decode * @param[in] in_len size of the input * @param[out] out_bin output buffer size * @param[in] out_len buffer size * * @pre the output buffer size must be at least * 3/4 + 1 of the size of the input * * @return the size of decoded output */ int base64_decode(const char *in_base64, int in_len, char *out_bin, int out_len); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* BASE64_H */ tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/0000775000000000000000000000000013306562377020045 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/luafun/fun-scm-1.rockspec0000644000000000000000000000156013306562377023306 0ustar rootrootpackage = "fun" version = "scm-1" source = { url = "git://github.com/luafun/luafun.git", } description = { summary = "High-performance functional programming library for Lua", homepage = "https://luafun.github.io/", license = "MIT/X11", maintainer = "Roman Tsisyk ", detailed = [[ Lua Fun is a high-performance functional programming library for Lua designed with LuaJIT's trace compiler in mind. Lua Fun provides a set of more than 50 programming primitives typically found in languages like Standard ML, Haskell, Erlang, JavaScript, Python and even Lisp. High-order functions such as map, filter, reduce, zip, etc., make it easy to write simple and efficient functional code. ]] } dependencies = { "lua" } build = { type = "builtin", modules = { fun = "fun.lua", }, copy_directories = { "tests" }, } tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/doc/0000755000000000000000000000000013306562377020610 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/luafun/doc/reference.rst0000644000000000000000000000030513306562377023276 0ustar rootrootAPI Reference ============= .. toctree:: basic.rst generators.rst slicing.rst indexing.rst filtering.rst reducing.rst transformations.rst compositions.rst operators.rsttarantool_1.9.1.26.g63eb81e3c/third_party/luafun/doc/getting_started.rst0000644000000000000000000001347013306562377024536 0ustar rootrootGetting Started =============== Please jump to `Using the Library`_ section if you are familiar with Lua and LuaJIT. .. contents:: Prerequisites ------------- The library is designed for LuaJIT_. **LuaJIT 2.1 alpha** is high^W **Highly** recommended for performance reasons. Lua 5.1--5.3 are also supported. The library is platform-independent and expected to work on all platforms that supported by Lua(JIT). It can be also used in any Lua(JIT) based applications, e.g. Tarantool_ or OpenResty_. You might need diff_ tool to run test system and sphinx_ to regenerate the documentation from source files. .. _LuaJIT: http://luajit.org/ .. _Tarantool: http://tarantool.org/ .. _OpenResty: http://openresty.org/ .. _diff: http://en.wikipedia.org/wiki/Diff .. _sphinx: http://sphinx-doc.org/ Installing LuaJIT ----------------- You can build LuaJIT from sources or install it from a binary archive. From Sources ```````````` 1. Clone LuaJIT git repository. Please note that **v2.1** branch is needed. You can always select this branch using ``git checkout v2.1``. .. code-block:: bash $ git clone http://luajit.org/git/luajit-2.0.git -b v2.1 luajit-2.1 Cloning into 'luajit-2.1'... 2. Compile LuaJIT .. code-block:: bash $ cd luajit-2.1/ luajit-2.1 $ make -j8 3. Install LuaJIT .. code-block:: bash luajit-2.1 $ make install luajit-2.1 $ ln -s /usr/local/bin/luajit-2.1.0-alpha /usr/local/bin/luajit Install operation might require root permissions. However, you can install LuaJIT into your home directory. From a Binary Archive ````````````````````` If operations above look too complicated for you, you always can download a binary archive from http://luajit.org/download.html page. Your favorite package manager may also have LuaJIT packages. Running LuaJIT `````````````` Ensure that freshly installed LuaJIT works: .. code-block:: bash $ luajit LuaJIT 2.1.0-alpha -- Copyright (C) 2005-2013 Mike Pall. http://luajit.org/ JIT: ON SSE2 SSE3 SSE4.1 fold cse dce fwd dse narrow loop abc sink fuse > = 2 + 2 4 It is good idea to use LuaJIT CLI under ``rlwrap`` (on nix platforms): .. code-block:: bash alias luajit="rlwrap luajit" $ luajit LuaJIT 2.1.0-alpha -- Copyright (C) 2005-2013 Mike Pall. http://luajit.org/ JIT: ON SSE2 SSE3 SSE4.1 fold cse dce fwd dse narrow loop abc sink fuse > = 2 + 2 4 > = 2 + 2 tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/doc/.gitignore0000644000000000000000000000000713306562377022575 0ustar rootroot_build tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/rpm/0000755000000000000000000000000013306562377020641 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/luafun/rpm/lua-fun.spec0000644000000000000000000000421313306562377023064 0ustar rootroot%define luaver 5.3 %define luapkgdir %{_datadir}/lua/%{luaver} # LuaJIT is compatible with Lua 5.1 and uses the same directory for modules %global ljpkgdir %{_datadir}/lua/5.1 Name: lua-fun Version: 0.1.3 Release: 1%{?dist} Summary: Functional programming library for Lua Group: Development/Libraries License: MIT URL: https://github.com/luafun/luafun Source0: https://github.com/luafun/luafun/archive/%{version}/luafun-%{version}.tar.gz BuildArch: noarch BuildRequires: luajit >= 2.0 BuildRequires: lua >= 5.1 Requires: lua >= 5.1 %package -n luajit-fun Summary: Functional programming library for LuaJIT Requires: luajit >= 2.0 %description -n lua-fun Lua Fun is a high-performance functional programming library for Lua designed with LuaJIT's trace compiler in mind. Lua Fun provides a set of more than 50 programming primitives typically found in languages like Standard ML, Haskell, Erlang, JavaScript, Python and even Lisp. High-order functions such as map, filter, reduce, zip, etc., make it easy to write simple and efficient functional code. This package provides a module for Lua %{luaver}. %description -n luajit-fun Lua Fun is a high-performance functional programming library for Lua designed with LuaJIT's trace compiler in mind. Lua Fun provides a set of more than 50 programming primitives typically found in languages like Standard ML, Haskell, Erlang, JavaScript, Python and even Lisp. High-order functions such as map, filter, reduce, zip, etc., make it easy to write simple and efficient functional code. This package provides a module for LuaJIT. %prep %setup -q -n luafun-%{version} %build # nothing to do %install # Install for Lua mkdir -p %{buildroot}%{luapkgdir} cp -av fun.lua %{buildroot}%{luapkgdir}/fun.lua # Install for LuaJIT mkdir -p %{buildroot}%{ljpkgdir} cp -av fun.lua %{buildroot}%{ljpkgdir}/fun.lua %check cd tests luajit ./runtest *.lua lua ./runtest *.lua %files -n lua-fun %{luapkgdir}/fun.lua %doc README.md CONTRIBUTING.md %license COPYING.md %files -n luajit-fun %{ljpkgdir}/fun.lua %doc README.md CONTRIBUTING.md %license COPYING.md %changelog * Mon Jan 18 2016 Roman Tsisyk - 0.1.3-1 - Initial version. tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/fun.lua0000644000000000000000000007052313306562377021345 0ustar rootroot--- --- Lua Fun - a high-performance functional programming library for LuaJIT --- --- Copyright (c) 2013-2017 Roman Tsisyk --- --- Distributed under the MIT/X11 License. See COPYING.md for more details. --- local exports = {} local methods = {} -- compatibility with Lua 5.1/5.2 local unpack = rawget(table, "unpack") or unpack -------------------------------------------------------------------------------- -- Tools -------------------------------------------------------------------------------- local return_if_not_empty = function(state_x, ...) if state_x == nil then return nil end return ... end local call_if_not_empty = function(fun, state_x, ...) if state_x == nil then return nil end return state_x, fun(...) end local function deepcopy(orig) -- used by cycle() local orig_type = type(orig) local copy if orig_type == 'table' then copy = {} for orig_key, orig_value in next, orig, nil do copy[deepcopy(orig_key)] = deepcopy(orig_value) end else copy = orig end return copy end local iterator_mt = { -- usually called by for-in loop __call = function(self, param, state) return self.gen(param, state) end; __tostring = function(self) return '' end; -- add all exported methods __index = methods; } local wrap = function(gen, param, state) return setmetatable({ gen = gen, param = param, state = state }, iterator_mt), param, state end exports.wrap = wrap local unwrap = function(self) return self.gen, self.param, self.state end methods.unwrap = unwrap -------------------------------------------------------------------------------- -- Basic Functions -------------------------------------------------------------------------------- local nil_gen = function(_param, _state) return nil end local string_gen = function(param, state) local state = state + 1 if state > #param then return nil end local r = string.sub(param, state, state) return state, r end local ipairs_gen = ipairs({}) -- get the generating function from ipairs local pairs_gen = pairs({ a = 0 }) -- get the generating function from pairs local map_gen = function(tab, key) local value local key, value = pairs_gen(tab, key) return key, key, value end local rawiter = function(obj, param, state) assert(obj ~= nil, "invalid iterator") if type(obj) == "table" then local mt = getmetatable(obj); if mt ~= nil then if mt == iterator_mt then return obj.gen, obj.param, obj.state elseif mt.__ipairs ~= nil then return mt.__ipairs(obj) elseif mt.__pairs ~= nil then return mt.__pairs(obj) end end if #obj > 0 then -- array return ipairs(obj) else -- hash return map_gen, obj, nil end elseif (type(obj) == "function") then return obj, param, state elseif (type(obj) == "string") then if #obj == 0 then return nil_gen, nil, nil end return string_gen, obj, 0 end error(string.format('object %s of type "%s" is not iterable', obj, type(obj))) end local iter = function(obj, param, state) return wrap(rawiter(obj, param, state)) end exports.iter = iter local method0 = function(fun) return function(self) return fun(self.gen, self.param, self.state) end end local method1 = function(fun) return function(self, arg1) return fun(arg1, self.gen, self.param, self.state) end end local method2 = function(fun) return function(self, arg1, arg2) return fun(arg1, arg2, self.gen, self.param, self.state) end end local export0 = function(fun) return function(gen, param, state) return fun(rawiter(gen, param, state)) end end local export1 = function(fun) return function(arg1, gen, param, state) return fun(arg1, rawiter(gen, param, state)) end end local export2 = function(fun) return function(arg1, arg2, gen, param, state) return fun(arg1, arg2, rawiter(gen, param, state)) end end local each = function(fun, gen, param, state) repeat state = call_if_not_empty(fun, gen(param, state)) until state == nil end methods.each = method1(each) exports.each = export1(each) methods.for_each = methods.each exports.for_each = exports.each methods.foreach = methods.each exports.foreach = exports.each -------------------------------------------------------------------------------- -- Generators -------------------------------------------------------------------------------- local range_gen = function(param, state) local stop, step = param[1], param[2] local state = state + step if state > stop then return nil end return state, state end local range_rev_gen = function(param, state) local stop, step = param[1], param[2] local state = state + step if state < stop then return nil end return state, state end local range = function(start, stop, step) if step == nil then if stop == nil then if start == 0 then return nil_gen, nil, nil end stop = start start = stop > 0 and 1 or -1 end step = start <= stop and 1 or -1 end assert(type(start) == "number", "start must be a number") assert(type(stop) == "number", "stop must be a number") assert(type(step) == "number", "step must be a number") assert(step ~= 0, "step must not be zero") if (step > 0) then return wrap(range_gen, {stop, step}, start - step) elseif (step < 0) then return wrap(range_rev_gen, {stop, step}, start - step) end end exports.range = range local duplicate_table_gen = function(param_x, state_x) return state_x + 1, unpack(param_x) end local duplicate_fun_gen = function(param_x, state_x) return state_x + 1, param_x(state_x) end local duplicate_gen = function(param_x, state_x) return state_x + 1, param_x end local duplicate = function(...) if select('#', ...) <= 1 then return wrap(duplicate_gen, select(1, ...), 0) else return wrap(duplicate_table_gen, {...}, 0) end end exports.duplicate = duplicate exports.replicate = duplicate exports.xrepeat = duplicate local tabulate = function(fun) assert(type(fun) == "function") return wrap(duplicate_fun_gen, fun, 0) end exports.tabulate = tabulate local zeros = function() return wrap(duplicate_gen, 0, 0) end exports.zeros = zeros local ones = function() return wrap(duplicate_gen, 1, 0) end exports.ones = ones local rands_gen = function(param_x, _state_x) return 0, math.random(param_x[1], param_x[2]) end local rands_nil_gen = function(_param_x, _state_x) return 0, math.random() end local rands = function(n, m) if n == nil and m == nil then return wrap(rands_nil_gen, 0, 0) end assert(type(n) == "number", "invalid first arg to rands") if m == nil then m = n n = 0 else assert(type(m) == "number", "invalid second arg to rands") end assert(n < m, "empty interval") return wrap(rands_gen, {n, m - 1}, 0) end exports.rands = rands -------------------------------------------------------------------------------- -- Slicing -------------------------------------------------------------------------------- local nth = function(n, gen_x, param_x, state_x) assert(n > 0, "invalid first argument to nth") -- An optimization for arrays and strings if gen_x == ipairs_gen then return param_x[n] elseif gen_x == string_gen then if n <= #param_x then return string.sub(param_x, n, n) else return nil end end for i=1,n-1,1 do state_x = gen_x(param_x, state_x) if state_x == nil then return nil end end return return_if_not_empty(gen_x(param_x, state_x)) end methods.nth = method1(nth) exports.nth = export1(nth) local head_call = function(state, ...) if state == nil then error("head: iterator is empty") end return ... end local head = function(gen, param, state) return head_call(gen(param, state)) end methods.head = method0(head) exports.head = export0(head) exports.car = exports.head methods.car = methods.head local tail = function(gen, param, state) state = gen(param, state) if state == nil then return wrap(nil_gen, nil, nil) end return wrap(gen, param, state) end methods.tail = method0(tail) exports.tail = export0(tail) exports.cdr = exports.tail methods.cdr = methods.tail local take_n_gen_x = function(i, state_x, ...) if state_x == nil then return nil end return {i, state_x}, ... end local take_n_gen = function(param, state) local n, gen_x, param_x = param[1], param[2], param[3] local i, state_x = state[1], state[2] if i >= n then return nil end return take_n_gen_x(i + 1, gen_x(param_x, state_x)) end local take_n = function(n, gen, param, state) assert(n >= 0, "invalid first argument to take_n") return wrap(take_n_gen, {n, gen, param}, {0, state}) end methods.take_n = method1(take_n) exports.take_n = export1(take_n) local take_while_gen_x = function(fun, state_x, ...) if state_x == nil or not fun(...) then return nil end return state_x, ... end local take_while_gen = function(param, state_x) local fun, gen_x, param_x = param[1], param[2], param[3] return take_while_gen_x(fun, gen_x(param_x, state_x)) end local take_while = function(fun, gen, param, state) assert(type(fun) == "function", "invalid first argument to take_while") return wrap(take_while_gen, {fun, gen, param}, state) end methods.take_while = method1(take_while) exports.take_while = export1(take_while) local take = function(n_or_fun, gen, param, state) if type(n_or_fun) == "number" then return take_n(n_or_fun, gen, param, state) else return take_while(n_or_fun, gen, param, state) end end methods.take = method1(take) exports.take = export1(take) local drop_n = function(n, gen, param, state) assert(n >= 0, "invalid first argument to drop_n") local i for i=1,n,1 do state = gen(param, state) if state == nil then return wrap(nil_gen, nil, nil) end end return wrap(gen, param, state) end methods.drop_n = method1(drop_n) exports.drop_n = export1(drop_n) local drop_while_x = function(fun, state_x, ...) if state_x == nil or not fun(...) then return state_x, false end return state_x, true, ... end local drop_while = function(fun, gen_x, param_x, state_x) assert(type(fun) == "function", "invalid first argument to drop_while") local cont, state_x_prev repeat state_x_prev = deepcopy(state_x) state_x, cont = drop_while_x(fun, gen_x(param_x, state_x)) until not cont if state_x == nil then return wrap(nil_gen, nil, nil) end return wrap(gen_x, param_x, state_x_prev) end methods.drop_while = method1(drop_while) exports.drop_while = export1(drop_while) local drop = function(n_or_fun, gen_x, param_x, state_x) if type(n_or_fun) == "number" then return drop_n(n_or_fun, gen_x, param_x, state_x) else return drop_while(n_or_fun, gen_x, param_x, state_x) end end methods.drop = method1(drop) exports.drop = export1(drop) local split = function(n_or_fun, gen_x, param_x, state_x) return take(n_or_fun, gen_x, param_x, state_x), drop(n_or_fun, gen_x, param_x, state_x) end methods.split = method1(split) exports.split = export1(split) methods.split_at = methods.split exports.split_at = exports.split methods.span = methods.split exports.span = exports.split -------------------------------------------------------------------------------- -- Indexing -------------------------------------------------------------------------------- local index = function(x, gen, param, state) local i = 1 for _k, r in gen, param, state do if r == x then return i end i = i + 1 end return nil end methods.index = method1(index) exports.index = export1(index) methods.index_of = methods.index exports.index_of = exports.index methods.elem_index = methods.index exports.elem_index = exports.index local indexes_gen = function(param, state) local x, gen_x, param_x = param[1], param[2], param[3] local i, state_x = state[1], state[2] local r while true do state_x, r = gen_x(param_x, state_x) if state_x == nil then return nil end i = i + 1 if r == x then return {i, state_x}, i end end end local indexes = function(x, gen, param, state) return wrap(indexes_gen, {x, gen, param}, {0, state}) end methods.indexes = method1(indexes) exports.indexes = export1(indexes) methods.elem_indexes = methods.indexes exports.elem_indexes = exports.indexes methods.indices = methods.indexes exports.indices = exports.indexes methods.elem_indices = methods.indexes exports.elem_indices = exports.indexes -------------------------------------------------------------------------------- -- Filtering -------------------------------------------------------------------------------- local filter1_gen = function(fun, gen_x, param_x, state_x, a) while true do if state_x == nil or fun(a) then break; end state_x, a = gen_x(param_x, state_x) end return state_x, a end -- call each other local filterm_gen local filterm_gen_shrink = function(fun, gen_x, param_x, state_x) return filterm_gen(fun, gen_x, param_x, gen_x(param_x, state_x)) end filterm_gen = function(fun, gen_x, param_x, state_x, ...) if state_x == nil then return nil end if fun(...) then return state_x, ... end return filterm_gen_shrink(fun, gen_x, param_x, state_x) end local filter_detect = function(fun, gen_x, param_x, state_x, ...) if select('#', ...) < 2 then return filter1_gen(fun, gen_x, param_x, state_x, ...) else return filterm_gen(fun, gen_x, param_x, state_x, ...) end end local filter_gen = function(param, state_x) local fun, gen_x, param_x = param[1], param[2], param[3] return filter_detect(fun, gen_x, param_x, gen_x(param_x, state_x)) end local filter = function(fun, gen, param, state) return wrap(filter_gen, {fun, gen, param}, state) end methods.filter = method1(filter) exports.filter = export1(filter) methods.remove_if = methods.filter exports.remove_if = exports.filter local grep = function(fun_or_regexp, gen, param, state) local fun = fun_or_regexp if type(fun_or_regexp) == "string" then fun = function(x) return string.find(x, fun_or_regexp) ~= nil end end return filter(fun, gen, param, state) end methods.grep = method1(grep) exports.grep = export1(grep) local partition = function(fun, gen, param, state) local neg_fun = function(...) return not fun(...) end return filter(fun, gen, param, state), filter(neg_fun, gen, param, state) end methods.partition = method1(partition) exports.partition = export1(partition) -------------------------------------------------------------------------------- -- Reducing -------------------------------------------------------------------------------- local foldl_call = function(fun, start, state, ...) if state == nil then return nil, start end return state, fun(start, ...) end local foldl = function(fun, start, gen_x, param_x, state_x) while true do state_x, start = foldl_call(fun, start, gen_x(param_x, state_x)) if state_x == nil then break; end end return start end methods.foldl = method2(foldl) exports.foldl = export2(foldl) methods.reduce = methods.foldl exports.reduce = exports.foldl local length = function(gen, param, state) if gen == ipairs_gen or gen == string_gen then return #param end local len = 0 repeat state = gen(param, state) len = len + 1 until state == nil return len - 1 end methods.length = method0(length) exports.length = export0(length) local is_null = function(gen, param, state) return gen(param, deepcopy(state)) == nil end methods.is_null = method0(is_null) exports.is_null = export0(is_null) local is_prefix_of = function(iter_x, iter_y) local gen_x, param_x, state_x = iter(iter_x) local gen_y, param_y, state_y = iter(iter_y) local r_x, r_y for i=1,10,1 do state_x, r_x = gen_x(param_x, state_x) state_y, r_y = gen_y(param_y, state_y) if state_x == nil then return true end if state_y == nil or r_x ~= r_y then return false end end end methods.is_prefix_of = is_prefix_of exports.is_prefix_of = is_prefix_of local all = function(fun, gen_x, param_x, state_x) local r repeat state_x, r = call_if_not_empty(fun, gen_x(param_x, state_x)) until state_x == nil or not r return state_x == nil end methods.all = method1(all) exports.all = export1(all) methods.every = methods.all exports.every = exports.all local any = function(fun, gen_x, param_x, state_x) local r repeat state_x, r = call_if_not_empty(fun, gen_x(param_x, state_x)) until state_x == nil or r return not not r end methods.any = method1(any) exports.any = export1(any) methods.some = methods.any exports.some = exports.any local sum = function(gen, param, state) local s = 0 local r = 0 repeat s = s + r state, r = gen(param, state) until state == nil return s end methods.sum = method0(sum) exports.sum = export0(sum) local product = function(gen, param, state) local p = 1 local r = 1 repeat p = p * r state, r = gen(param, state) until state == nil return p end methods.product = method0(product) exports.product = export0(product) local min_cmp = function(m, n) if n < m then return n else return m end end local max_cmp = function(m, n) if n > m then return n else return m end end local min = function(gen, param, state) local state, m = gen(param, state) if state == nil then error("min: iterator is empty") end local cmp if type(m) == "number" then -- An optimization: use math.min for numbers cmp = math.min else cmp = min_cmp end for _, r in gen, param, state do m = cmp(m, r) end return m end methods.min = method0(min) exports.min = export0(min) methods.minimum = methods.min exports.minimum = exports.min local min_by = function(cmp, gen_x, param_x, state_x) local state_x, m = gen_x(param_x, state_x) if state_x == nil then error("min: iterator is empty") end for _, r in gen_x, param_x, state_x do m = cmp(m, r) end return m end methods.min_by = method1(min_by) exports.min_by = export1(min_by) methods.minimum_by = methods.min_by exports.minimum_by = exports.min_by local max = function(gen_x, param_x, state_x) local state_x, m = gen_x(param_x, state_x) if state_x == nil then error("max: iterator is empty") end local cmp if type(m) == "number" then -- An optimization: use math.max for numbers cmp = math.max else cmp = max_cmp end for _, r in gen_x, param_x, state_x do m = cmp(m, r) end return m end methods.max = method0(max) exports.max = export0(max) methods.maximum = methods.max exports.maximum = exports.max local max_by = function(cmp, gen_x, param_x, state_x) local state_x, m = gen_x(param_x, state_x) if state_x == nil then error("max: iterator is empty") end for _, r in gen_x, param_x, state_x do m = cmp(m, r) end return m end methods.max_by = method1(max_by) exports.max_by = export1(max_by) methods.maximum_by = methods.maximum_by exports.maximum_by = exports.maximum_by local totable = function(gen_x, param_x, state_x) local tab, key, val = {} while true do state_x, val = gen_x(param_x, state_x) if state_x == nil then break end table.insert(tab, val) end return tab end methods.totable = method0(totable) exports.totable = export0(totable) local tomap = function(gen_x, param_x, state_x) local tab, key, val = {} while true do state_x, key, val = gen_x(param_x, state_x) if state_x == nil then break end tab[key] = val end return tab end methods.tomap = method0(tomap) exports.tomap = export0(tomap) -------------------------------------------------------------------------------- -- Transformations -------------------------------------------------------------------------------- local map_gen = function(param, state) local gen_x, param_x, fun = param[1], param[2], param[3] return call_if_not_empty(fun, gen_x(param_x, state)) end local map = function(fun, gen, param, state) return wrap(map_gen, {gen, param, fun}, state) end methods.map = method1(map) exports.map = export1(map) local enumerate_gen_call = function(state, i, state_x, ...) if state_x == nil then return nil end return {i + 1, state_x}, i, ... end local enumerate_gen = function(param, state) local gen_x, param_x = param[1], param[2] local i, state_x = state[1], state[2] return enumerate_gen_call(state, i, gen_x(param_x, state_x)) end local enumerate = function(gen, param, state) return wrap(enumerate_gen, {gen, param}, {1, state}) end methods.enumerate = method0(enumerate) exports.enumerate = export0(enumerate) local intersperse_call = function(i, state_x, ...) if state_x == nil then return nil end return {i + 1, state_x}, ... end local intersperse_gen = function(param, state) local x, gen_x, param_x = param[1], param[2], param[3] local i, state_x = state[1], state[2] if i % 2 == 1 then return {i + 1, state_x}, x else return intersperse_call(i, gen_x(param_x, state_x)) end end -- TODO: interperse must not add x to the tail local intersperse = function(x, gen, param, state) return wrap(intersperse_gen, {x, gen, param}, {0, state}) end methods.intersperse = method1(intersperse) exports.intersperse = export1(intersperse) -------------------------------------------------------------------------------- -- Compositions -------------------------------------------------------------------------------- local function zip_gen_r(param, state, state_new, ...) if #state_new == #param / 2 then return state_new, ... end local i = #state_new + 1 local gen_x, param_x = param[2 * i - 1], param[2 * i] local state_x, r = gen_x(param_x, state[i]) if state_x == nil then return nil end table.insert(state_new, state_x) return zip_gen_r(param, state, state_new, r, ...) end local zip_gen = function(param, state) return zip_gen_r(param, state, {}) end -- A special hack for zip/chain to skip last two state, if a wrapped iterator -- has been passed local numargs = function(...) local n = select('#', ...) if n >= 3 then -- Fix last argument local it = select(n - 2, ...) if type(it) == 'table' and getmetatable(it) == iterator_mt and it.param == select(n - 1, ...) and it.state == select(n, ...) then return n - 2 end end return n end local zip = function(...) local n = numargs(...) if n == 0 then return wrap(nil_gen, nil, nil) end local param = { [2 * n] = 0 } local state = { [n] = 0 } local i, gen_x, param_x, state_x for i=1,n,1 do local it = select(n - i + 1, ...) gen_x, param_x, state_x = rawiter(it) param[2 * i - 1] = gen_x param[2 * i] = param_x state[i] = state_x end return wrap(zip_gen, param, state) end methods.zip = zip exports.zip = zip local cycle_gen_call = function(param, state_x, ...) if state_x == nil then local gen_x, param_x, state_x0 = param[1], param[2], param[3] return gen_x(param_x, deepcopy(state_x0)) end return state_x, ... end local cycle_gen = function(param, state_x) local gen_x, param_x, state_x0 = param[1], param[2], param[3] return cycle_gen_call(param, gen_x(param_x, state_x)) end local cycle = function(gen, param, state) return wrap(cycle_gen, {gen, param, state}, deepcopy(state)) end methods.cycle = method0(cycle) exports.cycle = export0(cycle) -- call each other local chain_gen_r1 local chain_gen_r2 = function(param, state, state_x, ...) if state_x == nil then local i = state[1] i = i + 1 if param[3 * i - 1] == nil then return nil end local state_x = param[3 * i] return chain_gen_r1(param, {i, state_x}) end return {state[1], state_x}, ... end chain_gen_r1 = function(param, state) local i, state_x = state[1], state[2] local gen_x, param_x = param[3 * i - 2], param[3 * i - 1] return chain_gen_r2(param, state, gen_x(param_x, state[2])) end local chain = function(...) local n = numargs(...) if n == 0 then return wrap(nil_gen, nil, nil) end local param = { [3 * n] = 0 } local i, gen_x, param_x, state_x for i=1,n,1 do local elem = select(i, ...) gen_x, param_x, state_x = iter(elem) param[3 * i - 2] = gen_x param[3 * i - 1] = param_x param[3 * i] = state_x end return wrap(chain_gen_r1, param, {1, param[3]}) end methods.chain = chain exports.chain = chain -------------------------------------------------------------------------------- -- Operators -------------------------------------------------------------------------------- local operator = { ---------------------------------------------------------------------------- -- Comparison operators ---------------------------------------------------------------------------- lt = function(a, b) return a < b end, le = function(a, b) return a <= b end, eq = function(a, b) return a == b end, ne = function(a, b) return a ~= b end, ge = function(a, b) return a >= b end, gt = function(a, b) return a > b end, ---------------------------------------------------------------------------- -- Arithmetic operators ---------------------------------------------------------------------------- add = function(a, b) return a + b end, div = function(a, b) return a / b end, floordiv = function(a, b) return math.floor(a/b) end, intdiv = function(a, b) local q = a / b if a >= 0 then return math.floor(q) else return math.ceil(q) end end, mod = function(a, b) return a % b end, mul = function(a, b) return a * b end, neq = function(a) return -a end, unm = function(a) return -a end, -- an alias pow = function(a, b) return a ^ b end, sub = function(a, b) return a - b end, truediv = function(a, b) return a / b end, ---------------------------------------------------------------------------- -- String operators ---------------------------------------------------------------------------- concat = function(a, b) return a..b end, len = function(a) return #a end, length = function(a) return #a end, -- an alias ---------------------------------------------------------------------------- -- Logical operators ---------------------------------------------------------------------------- land = function(a, b) return a and b end, lor = function(a, b) return a or b end, lnot = function(a) return not a end, truth = function(a) return not not a end, } exports.operator = operator methods.operator = operator exports.op = operator methods.op = operator -------------------------------------------------------------------------------- -- module definitions -------------------------------------------------------------------------------- -- a special syntax sugar to export all functions to the global table setmetatable(exports, { __call = function(t, override) for k, v in pairs(t) do if rawget(_G, k) ~= nil then local msg = 'function ' .. k .. ' already exists in global scope.' if override then rawset(_G, k, v) print('WARNING: ' .. msg .. ' Overwritten.') else print('NOTICE: ' .. msg .. ' Skipped.') end else rawset(_G, k, v) end end end, }) return exports tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/README.md0000644000000000000000000000725313306562377021331 0ustar rootrootLua Functional ============== **Lua Fun** is a high-performance functional programming library for [Lua] designed with [LuaJIT's trace compiler][LuaJIT] in mind. Lua Fun provides a set of more than 50 programming primitives typically found in languages like Standard ML, Haskell, Erlang, JavaScript, Python and even Lisp. High-order functions such as ``map``, ``filter``, ``reduce``, ``zip``, etc., make it easy to **write simple and efficient functional code**. Let's see an example: > -- Functional style > require "fun" () > -- calculate sum(x for x^2 in 1..n) > n = 100 > print(reduce(operator.add, 0, map(function(x) return x^2 end, range(n)))) 328350 > -- Object-oriented style > local fun = require "fun" > -- calculate sum(x for x^2 in 1..n) > print(fun.range(n):map(function(x) return x^2 end):reduce(operator.add, 0)) 328350 **Lua Fun** takes full advantage of the innovative **tracing JIT compiler** to achieve transcendental performance on nested functional expressions. Functional compositions and high-order functions can be translated into **efficient machine code**. Can you believe it? Just try to run the example above with ``luajit -jdump`` and see what happens: -- skip some initilization code -- ->LOOP: 0bcaffd0 movaps xmm5, xmm7 0bcaffd3 movaps xmm7, xmm1 0bcaffd6 addsd xmm7, xmm5 0bcaffda ucomisd xmm7, xmm0 0bcaffde jnb 0x0bca0024 ->5 0bcaffe4 movaps xmm5, xmm7 0bcaffe7 mulsd xmm5, xmm5 0bcaffeb addsd xmm6, xmm5 0bcaffef jmp 0x0bcaffd0 ->LOOP ---- TRACE 1 stop -> loop The functional chain above was translated by LuaJIT to (!) **one machine loop** containing just 10 CPU assembly instructions without CALL. Unbelievable! Readable? Efficient? Can your Python/Ruby/V8 do better? Status ------ **Lua Fun** is in an early alpha stage. The library fully [documented] [Documentation] and covered with unit tests. [![Build Status](https://travis-ci.org/luafun/luafun.png)] (https://travis-ci.org/luafun/luafun) LuaJIT 2.1 alpha is recommended. The library designed in mind of fact that [LuaJIT traces tail-, up- and down-recursion][LuaJIT-Recursion] and has a lot of [byte code optimizations][LuaJIT-Optimizations]. Lua 5.1-5.3 are also supported. This is **master** (development) branch. API may be changed without any special notice. Please use **stable** branch for your production deployments. If you still want to use **master**, please don't forget to grep `git log` for *Incompatible API changes* message. Thanks! Please check out [documentation][Documentation] for more information. Misc ---- **Lua Fun** is distributed under the [MIT/X11 License] - (same as Lua and LuaJIT). The library was written to use with [Tarantool] - an efficient in-memory store and an asynchronous Lua application server. See Also -------- * [Documentation] * [RockSpec] * [RPM/DEB packages](https://packagecloud.io/rtsisyk/master) * lua-l@lists.lua.org * luajit@freelists.org * roman@tsisyk.com [Lua]: http://www.lua.org/ [LuaJIT]: http://luajit.org/luajit.html [LuaJIT-Recursion]: http://lambda-the-ultimate.org/node/3851#comment-57679 [LuaJIT-Optimizations]: http://wiki.luajit.org/Optimizations [MIT/X11 License]: http://opensource.org/licenses/MIT [Tarantool]: http://github.com/tarantool/tarantool [Getting Started]: https://luafun.github.io/getting_started.html [Documentation]: http://luafun.github.io/ [RockSpec]: https://raw.github.com/luafun/luafun/master/fun-scm-1.rockspec Please **"Star"** the project on GitHub to help it to survive! Thanks! ***** **Lua Fun**. Simple, Efficient and Functional. In Lua. With JIT. tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/COPYING.md0000644000000000000000000000241413306562377021476 0ustar rootrootCopying ======= **Lua Fun** source codes, logo and documentation are distributed under the **[MIT/X11 License]** - same as Lua and LuaJIT. Copyright (c) 2013-2017 Roman Tsisyk Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. [MIT/X11 License]: http://www.opensource.org/licenses/mit-license.php tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/CONTRIBUTING.md0000644000000000000000000000073613306562377022302 0ustar rootrootContributing ============ We'd love for you to contribute to the project and make **Lua Fun** even better than it is today! Filling Issues --------------- Please file bugs reports and feature requests using [GitHub Issues]. [GitHub Issues]: https://github.com/luafun/luafun/issues Making Changes -------------- If you want to contribute code, please fork the project on [GitHub], make changes in branch and send a pull request. [GitHub]: https://github.com/luafun/luafun tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/.travis.yml0000644000000000000000000000332013306562377022152 0ustar rootrootsudo: false language: C services: - docker env: global: - PRODUCT=lua-fun matrix: - OS=el DIST=7 - OS=fedora DIST=23 - OS=fedora DIST=24 - OS=ubuntu DIST=xenial - OS=ubuntu DIST=yakkety - OS=debian DIST=stretch before_deploy: - git clone https://github.com/packpack/packpack.git packpack - ./packpack/packpack deploy: provider: packagecloud username: ${PACKAGECLOUD_USER} repository: ${PACKAGECLOUD_REPO} token: ${PACKAGECLOUD_TOKEN} dist: ${OS}/${DIST} package_glob: build/*.{deb,rpm} skip_cleanup: true on: branch: master condition: -n "${OS}" && -n "${DIST}" && -n "${PACKAGECLOUD_TOKEN}" after_deploy: # Prune old packages from PackageCloud, keep only the last two - pip install -r ./packpack/tools/requirements.txt - python ./packpack/tools/packagecloud prune ${PACKAGECLOUD_USER}/${PACKAGECLOUD_REPO} deb ${OS} ${DIST} --keep 2 - python ./packpack/tools/packagecloud prune ${PACKAGECLOUD_USER}/${PACKAGECLOUD_REPO} rpm ${OS} ${DIST} --keep 2 cache: directories: - $HOME/lua-5.3.2 addons: apt: packages: - lua5.1 - lua5.2 - luajit # Ubuntu Precise on Travis doesn't have lua5.3 package install: - | [ -e ${HOME}/lua-5.3.2/src/lua ] || (\ wget http://www.lua.org/ftp/lua-5.3.2.tar.gz -c && \ tar xzf lua-5.3.2.tar.gz -C ${HOME} && \ make -j -C ${HOME}/lua-5.3.2 linux \ ) script: - cd tests - LUAJIT=`echo /usr/bin/luajit* | cut -f 1 -d ' '` - ${LUAJIT} -v - ${LUAJIT} runtest *.lua - lua5.1 -v - lua5.1 runtest *.lua - lua5.2 -v - lua5.2 runtest *.lua - LUA53=${HOME}/lua-5.3.2/src/lua - ${LUA53} -v - ${LUA53} runtest *.lua - cd .. notifications: email: true tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/0000755000000000000000000000000013306562377021205 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/compositions.lua0000644000000000000000000000403513306562377024440 0ustar rootroot-------------------------------------------------------------------------------- -- zip -------------------------------------------------------------------------------- dump(zip({"a", "b", "c", "d"}, {"one", "two", "three"})) --[[test a one b two c three --test]] dump(zip()) --[[test --test]] dump(zip(range(0))) --[[test error: invalid iterator --test]] dump(zip(range(0), range(0))) --[[test error: invalid iterator --test]] print(nth(10, zip(range(1, 100, 3), range(1, 100, 5), range(1, 100, 7)))) --[[test 28 46 64 --test]] dump(zip(partition(function(x) return x > 7 end, range(1, 15, 1)))) --[[test 8 1 9 2 10 3 11 4 12 5 13 6 14 7 --test]] -------------------------------------------------------------------------------- -- cycle -------------------------------------------------------------------------------- dump(take(15, cycle({"a", "b", "c", "d", "e"}))) --[[test a b c d e a b c d e a b c d e --test]] dump(take(15, cycle(range(5)))) --[[test 1 2 3 4 5 1 2 3 4 5 1 2 3 4 5 --test]] dump(take(15, cycle(zip(range(5), {"a", "b", "c", "d", "e"})))) --[[test 1 a 2 b 3 c 4 d 5 e 1 a 2 b 3 c 4 d 5 e 1 a 2 b 3 c 4 d 5 e --test]] -------------------------------------------------------------------------------- -- chain -------------------------------------------------------------------------------- dump(chain(range(2))) --[[test 1 2 --test]] dump(chain(range(2), {"a", "b", "c"}, {"one", "two", "three"})) --[[test 1 2 a b c one two three --test]] dump(take(15, cycle(chain(enumerate({"a", "b", "c"}), {"one", "two", "three"})))) --[[test 1 a 2 b 3 c one two three 1 a 2 b 3 c one two three 1 a 2 b 3 c --test]] local tab = {} local keys = {} for _it, k, v in chain({ a = 11, b = 12, c = 13}, { d = 21, e = 22 }) do tab[k] = v table.insert(keys, k) end table.sort(keys) for _, key in ipairs(keys) do print(key, tab[key]) end --[[test a 11 b 12 c 13 d 21 e 22 --test]] dump(chain(range(0), range(0), range(0))) --[[test error: invalid iterator --test]] dump(chain(range(0), range(1), range(0))) --[[test error: invalid iterator --test]] tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/slicing.lua0000644000000000000000000001147213306562377023345 0ustar rootroot-------------------------------------------------------------------------------- -- nth -------------------------------------------------------------------------------- print(nth(2, range(5))) --[[test 2 --test]] print(nth(10, range(5))) --[[test nil --test]] print(nth(2, range(0))) --[[test nil --test]] print(nth(2, {"a", "b", "c", "d", "e"})) --[[test b --test]] print(nth(2, enumerate({"a", "b", "c", "d", "e"}))) --[[test 2 b --test]] print(nth(1, "abcdef")) --[[test a --test]] print(nth(2, "abcdef")) --[[test b --test]] print(nth(6, "abcdef")) --[[test f --test]] print(nth(0, "abcdef")) --[[test error: invalid first argument to nth --test]] print(nth(7, "abcdef")) --[[test nil --test]] -------------------------------------------------------------------------------- -- head -------------------------------------------------------------------------------- print(head({"a", "b", "c", "d", "e"})) --[[test a --test]] print(head({})) --[[test error: head: iterator is empty --test]] print(head(range(0))) --[[test error: head: iterator is empty --test]] print(head(enumerate({"a", "b"}))) --[[test 1 a --test]] print(car == head) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- tail -------------------------------------------------------------------------------- dump(tail({"a", "b", "c", "d", "e"})) --[[test b c d e --test]] dump(tail({})) --[[test --test]] dump(tail(range(0))) --[[test --test]] dump(tail(enumerate({"a", "b"}))) --[[test 2 b --test]] print(cdr == tail) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- take_n -------------------------------------------------------------------------------- dump(take_n(0, duplicate(48))) --[[test --test]] dump(take_n(5, range(0))) --[[test --test]] dump(take_n(1, duplicate(48))) --[[test 48 --test]] dump(take_n(5, duplicate(48))) --[[test 48 48 48 48 48 --test]] dump(take_n(5, enumerate(duplicate('x')))) --[[test 1 x 2 x 3 x 4 x 5 x --test]] -------------------------------------------------------------------------------- -- take_while -------------------------------------------------------------------------------- dump(take_while(function(x) return x < 5 end, range(10))) --[[test 1 2 3 4 --test]] dump(take_while(function(x) return x < 5 end, range(0))) --[[test --test]] dump(take_while(function(x) return x > 100 end, range(10))) --[[test --test]] dump(take_while(function(i, a) return i ~=a end, enumerate({5, 2, 1, 3, 4}))) --[[test 1 5 --test]] -------------------------------------------------------------------------------- -- take -------------------------------------------------------------------------------- dump(take(function(x) return x < 5 end, range(10))) --[[test 1 2 3 4 --test]] dump(take(5, duplicate(48))) --[[test 48 48 48 48 48 --test]] -------------------------------------------------------------------------------- -- drop_n -------------------------------------------------------------------------------- dump(drop_n(5, range(10))) --[[test 6 7 8 9 10 --test]] dump(drop_n(0, range(5))) --[[test 1 2 3 4 5 --test]] dump(drop_n(5, range(0))) --[[test --test]] dump(drop_n(2, enumerate({'a', 'b', 'c', 'd', 'e'}))) --[[test 3 c 4 d 5 e --test]] -------------------------------------------------------------------------------- -- drop_while -------------------------------------------------------------------------------- dump(drop_while(function(x) return x < 5 end, range(10))) --[[test 5 6 7 8 9 10 --test]] dump(drop_while(function(x) return x < 5 end, range(0))) --[[test --test]] dump(drop_while(function(x) return x > 100 end, range(10))) --[[test 1 2 3 4 5 6 7 8 9 10 --test]] dump(drop_while(function(i, a) return i ~=a end, enumerate({5, 2, 1, 3, 4}))) --[[test 2 2 3 1 4 3 5 4 --test]] dump(drop_while(function(i, a) return i ~=a end, zip({1, 2, 3, 4, 5}, {5, 4, 3, 2, 1}))) --[[test 3 3 4 2 5 1 --test]] -------------------------------------------------------------------------------- -- drop -------------------------------------------------------------------------------- dump(drop(5, range(10))) --[[test 6 7 8 9 10 --test]] dump(drop(function(x) return x < 5 end, range(10))) --[[test 5 6 7 8 9 10 --test]] -------------------------------------------------------------------------------- -- span -------------------------------------------------------------------------------- dump(zip(span(function(x) return x < 5 end, range(10)))) --[[test 1 5 2 6 3 7 4 8 --test]] dump(zip(span(5, range(10)))) --[[test 1 6 2 7 3 8 4 9 5 10 --test]] dump(zip(span(function(x) return x < 5 end, range(0)))) --[[test --test]] dump(zip(span(function(x) return x < 5 end, range(5)))) --[[test 1 5 --test]] print(split == span) -- an alias --[[test true --test]] print(split_at == span) -- an alias --[[test true --test]] tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/transformations.lua0000644000000000000000000000270213306562377025142 0ustar rootroot-------------------------------------------------------------------------------- -- map -------------------------------------------------------------------------------- fun = function(...) return 'map', ... end dump(map(fun, range(0))) --[[test --test]] dump(map(fun, range(4))) --[[test map 1 map 2 map 3 map 4 --test]] dump(map(fun, enumerate({"a", "b", "c", "d", "e"}))) --[[test map 1 a map 2 b map 3 c map 4 d map 5 e --test]] dump(map(function(x) return 2 * x end, range(4))) --[[test 2 4 6 8 --test]] fun = nil --[[test --test]] -------------------------------------------------------------------------------- -- enumerate -------------------------------------------------------------------------------- dump(enumerate({"a", "b", "c", "d", "e"})) --[[test 1 a 2 b 3 c 4 d 5 e --test]] dump(enumerate(enumerate(enumerate({"a", "b", "c", "d", "e"})))) --[[test 1 1 1 a 2 2 2 b 3 3 3 c 4 4 4 d 5 5 5 e --test]] dump(enumerate(zip({"one", "two", "three", "four", "five"}, {"a", "b", "c", "d", "e"}))) --[[test 1 one a 2 two b 3 three c 4 four d 5 five e --test]] -------------------------------------------------------------------------------- -- intersperse -------------------------------------------------------------------------------- dump(intersperse("x", {})) dump(intersperse("x", {"a", "b", "c", "d", "e"})) --[[test a x b x c x d x e x --test]] dump(intersperse("x", {"a", "b", "c", "d", "e", "f"})) --[[test a x b x c x d x e x f x --test]] tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/generators.lua0000644000000000000000000000626613306562377024073 0ustar rootroot-------------------------------------------------------------------------------- -- range -------------------------------------------------------------------------------- dump(range(0)) print('--') for i=1,0 do print(i) end --[[test -- --test]] dump(range(0, 0)) print('--') for i=0,0 do print(i) end --[[test 0 -- 0 --test]] dump(range(5)) print('--') for i=1,5 do print(i) end --[[test 1 2 3 4 5 -- 1 2 3 4 5 --test]] dump(range(0, 5)) print('--') for i=0,5 do print(i) end --[[test 0 1 2 3 4 5 -- 0 1 2 3 4 5 --test]] dump(range(0, 5, 1)) print('--') for i=0,5,1 do print(i) end --[[test 0 1 2 3 4 5 -- 0 1 2 3 4 5 --test]] dump(range(0, 10, 2)) print('--') for i=0,10,2 do print(i) end --[[test 0 2 4 6 8 10 -- 0 2 4 6 8 10 --test]] dump(range(-5)) print('--') for i=-1,-5,-1 do print(i) end --[[test -1 -2 -3 -4 -5 -- -1 -2 -3 -4 -5 --test]] dump(range(0, -5, 1)) print('--') for i=0,-5,1 do print(i) end --[[test -- --test]] dump(range(0, -5, -1)) print('--') for i=0,-5,-1 do print(i) end --[[test 0 -1 -2 -3 -4 -5 -- 0 -1 -2 -3 -4 -5 --test]] dump(range(0, -10, -2)) print('--') for i=0,-10,-2 do print(i) end --[[test 0 -2 -4 -6 -8 -10 -- 0 -2 -4 -6 -8 -10 --test]] dump(range(1.2, 1.6, 0.1)) --[[test 1.2 1.3 1.4 1.5 --test]] -- Invalid step dump(range(0, 5, 0)) --[[test error: step must not be zero --test]] -------------------------------------------------------------------------------- -- duplicate -------------------------------------------------------------------------------- dump(take(5, duplicate(48))) --[[test 48 48 48 48 48 --test]] dump(take(5, duplicate(1,2,3,4,5))) --[[test 1 2 3 4 5 1 2 3 4 5 1 2 3 4 5 1 2 3 4 5 1 2 3 4 5 --test]] print(xrepeat == duplicate) -- an alias --[[test true --test]] print(replicate == duplicate) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- tabulate -------------------------------------------------------------------------------- dump(take(5, tabulate(function(x) return 2 * x end))) --[[test 0 2 4 6 8 --test]] -------------------------------------------------------------------------------- -- zeros -------------------------------------------------------------------------------- dump(take(5, zeros())) --[[test 0 0 0 0 0 --test]] -------------------------------------------------------------------------------- -- ones -------------------------------------------------------------------------------- dump(take(5, ones())) --[[test 1 1 1 1 1 --test]] -------------------------------------------------------------------------------- -- rands -------------------------------------------------------------------------------- print(all(function(x) return x >= 0 and x < 1 end, take(5, rands()))) --[[test true --test]] dump(take(5, rands(0))) --[[test error: empty interval --test]] print(all(function(x) return math.floor(x) == x end, take(5, rands(10)))) --[[test true --test]] print(all(function(x) return math.floor(x) == x end, take(5, rands(1024)))) --[[test true --test]] dump(take(5, rands(0, 1))) --[[test 0 0 0 0 0 --test]] dump(take(5, rands(5, 6))) --[[test 5 5 5 5 5 --test]] print(all(function(x) return x >= 10 and x < 20 end, take(20, rands(10, 20)))) --[[test true --test]] tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/runtest0000755000000000000000000000605613306562377022646 0ustar rootroot#!/usr/bin/env lua package.path = "../?.lua;"..package.path require "fun" () function dump(gen, init, state) each(print, gen, init, state) end local unpack = rawget(table, "unpack") or unpack local loadstring = rawget(_G, "loadstring") or load function file_print(file, ...) local n, i = select("#",...) for i=1,n do local x = select(i, ...) if type(x) == "number" and math.floor(x) == math.ceil(x) then -- A special hack for Lua 5.3: remove .0 for integer x = string.match(select(i,...), '^-?%d+') end file:write(tostring(x)) if i~=n then file:write(' ') end end file:write('\n') end local globals = {} setmetatable(_G, { __newindex = function(t,k,v) local info = debug.getinfo(2, "S") if info.short_src:sub(1,7) ~= '[string' then local file = info.short_src local func = debug.getinfo(2, "n").name or "" local line = info.linedefined globals[file..':'..line..':'..k] = {file, line, func, k} end rawset(t, k, v) end }) local function process(test_name) io.write("Testing ", test_name, "\n") local new_name = test_name..".new" local test_file = io.open(test_name, 'r') local content = test_file:read("*a"); test_file:close() local new_file = io.open(new_name, 'w') local prev_print = print print = function(...) file_print(new_file, ...) end io.flush() local expr for expr in content:gmatch("(.-)%s*--%[%[test.-test%]%]") do new_file:write(expr) new_file:write("\n--[[test\n") local res, err = loadstring(expr) if res then res, err = pcall(res, expr) end if not res then new_file:write('error: ', err:match(".-:%d+:%s*(.*)"), "\n") end new_file:write("--test]]") end new_file:write("\n") new_file:close() print = prev_print local r = os.execute(string.format('diff -U4 "%s" "%s" 2>&1', test_name, new_name)) if r then os.remove(new_name) return true else return false end end if #arg <= 0 then io.write("Usage: runtest *.lua", "\n") os.exit(1) end local failed, i = {} for i=1,#arg,1 do local test_name = arg[i] if not process(test_name) then table.insert(failed, test_name) end end if #failed > 0 then io.write("\n") io.write("Failed tests:", "\n") for _k,test_name in ipairs(failed) do io.write(" ", test_name, "\n") end io.write("\n", "Please review *.new files and update tests", "\n") end if next(globals) then io.write("\n") io.write("Some global variables have been declared by mistake:", "\n") for k, pollution in pairs(globals) do local file, line, func, var = unpack(pollution) io.write(file..":"..line.." function "..func.."() = var '"..var.."'", "\n") end io.write("\n", "Please declare them with the local statement", "\n") elseif #failed == 0 then io.write("All tests have passed!", "\n") os.exit(0) end tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/filtering.lua0000644000000000000000000000433513306562377023700 0ustar rootroot-------------------------------------------------------------------------------- -- filter -------------------------------------------------------------------------------- dump(filter(function(x) return x % 3 == 0 end, range(10))) --[[test 3 6 9 --test]] dump(filter(function(x) return x % 3 == 0 end, range(0))) --[[test --test]] dump(take(5, filter(function(i, x) return i % 3 == 0 end, enumerate(duplicate('x'))))) --[[test 3 x 6 x 9 x 12 x 15 x --test]] function filter_fun(a, b, c) if a % 16 == 0 then return true else return false end end function test3(a, b, c) return a, c, b end n = 50 dump(filter(filter_fun, map(test3, zip(range(0, n, 1), range(0, n, 2), range(0, n, 3))))) --[[test 0 0 0 16 48 32 --test]] print(remove_if == filter) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- grep -------------------------------------------------------------------------------- lines_to_grep = { [[Lorem ipsum dolor sit amet, consectetur adipisicing elit, ]], [[sed do eiusmod tempor incididunt ut labore et dolore magna ]], [[aliqua. Ut enim ad minim veniam, quis nostrud exercitation ]], [[ullamco laboris nisi ut aliquip ex ea commodo consequat.]], [[Duis aute irure dolor in reprehenderit in voluptate velit ]], [[esse cillum dolore eu fugiat nulla pariatur. Excepteur sint ]], [[occaecat cupidatat non proident, sunt in culpa qui officia ]], [[deserunt mollit anim id est laborum.]] } dump(grep("lab", lines_to_grep)) --[[test sed do eiusmod tempor incididunt ut labore et dolore magna ullamco laboris nisi ut aliquip ex ea commodo consequat. deserunt mollit anim id est laborum. --test]] lines_to_grep = { [[Emily]], [[Chloe]], [[Megan]], [[Jessica]], [[Emma]], [[Sarah]], [[Elizabeth]], [[Sophie]], [[Olivia]], [[Lauren]] } dump(grep("^Em", lines_to_grep)) --[[test Emily Emma --test]] -------------------------------------------------------------------------------- -- partition -------------------------------------------------------------------------------- dump(zip(partition(function(i, x) return i % 3 == 0 end, range(10)))) --[[test 3 1 6 2 9 4 --test]] tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/operators.lua0000644000000000000000000001101313306562377023722 0ustar rootroot-- -- All these functions are fully covered by Lua tests. -- This test just checks that all functions were defined correctly. -- print(op == operator) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- Comparison operators -------------------------------------------------------------------------------- local comparators = { 'le', 'lt', 'eq', 'ne', 'ge', 'gt' } for _k, op in iter(comparators) do print('op', op) print('==') print('num:') print(operator[op](0, 1)) print(operator[op](1, 0)) print(operator[op](0, 0)) print('str:') print(operator[op]("abc", "cde")) print(operator[op]("cde", "abc")) print(operator[op]("abc", "abc")) print('') end --[[test op le == num: true false true str: true false true op lt == num: true false false str: true false false op eq == num: false false true str: false false true op ne == num: true true false str: true true false op ge == num: false true true str: false true true op gt == num: false true false str: false true false --test]] -------------------------------------------------------------------------------- -- Arithmetic operators -------------------------------------------------------------------------------- print(operator.add(-1.0, 1.0)) print(operator.add(0, 0)) print(operator.add(12, 2)) --[[test 0 0 14 --test]] print(operator.div(10, 2)) print(operator.div(10, 3)) print(operator.div(-10, 3)) --[[test 5 3.3333333333333 -3.3333333333333 --test]] print(operator.floordiv(10, 3)) print(operator.floordiv(11, 3)) print(operator.floordiv(12, 3)) print(operator.floordiv(-10, 3)) print(operator.floordiv(-11, 3)) print(operator.floordiv(-12, 3)) --[[test 3 3 4 -4 -4 -4 --test]] print(operator.intdiv(10, 3)) print(operator.intdiv(11, 3)) print(operator.intdiv(12, 3)) print(operator.intdiv(-10, 3)) print(operator.intdiv(-11, 3)) print(operator.intdiv(-12, 3)) --[[test 3 3 4 -3 -3 -4 --test]] print(operator.truediv(10, 3)) print(operator.truediv(11, 3)) print(operator.truediv(12, 3)) print(operator.truediv(-10, 3)) print(operator.truediv(-11, 3)) print(operator.truediv(-12, 3)) --[[test 3.3333333333333 3.6666666666667 4 -3.3333333333333 -3.6666666666667 -4 --test]] print(operator.mod(10, 2)) print(operator.mod(10, 3)) print(operator.mod(-10, 3)) --[[test 0 1 2 --test]] print(operator.mul(10, 0.1)) print(operator.mul(0, 0)) print(operator.mul(-1, -1)) --[[test 1 0 1 --test]] print(operator.neq(1)) print(operator.neq(0) == 0) print(operator.neq(-0) == 0) print(operator.neq(-1)) --[[test -1 true true 1 --test]] print(operator.unm(1)) print(operator.unm(0) == 0) print(operator.unm(-0) == 0) print(operator.unm(-1)) --[[test -1 true true 1 --test]] print(operator.pow(2, 3)) print(operator.pow(0, 10)) print(operator.pow(2, 0)) --[[test 8 0 1 --test]] print(operator.sub(2, 3)) print(operator.sub(0, 10)) print(operator.sub(2, 2)) --[[test -1 -10 0 --test]] -------------------------------------------------------------------------------- -- String operators -------------------------------------------------------------------------------- print(operator.concat("aa", "bb")) print(operator.concat("aa", "")) print(operator.concat("", "bb")) --[[test aabb aa bb --test]] print(operator.len("")) print(operator.len("ab")) print(operator.len("abcd")) --[[test 0 2 4 --test]] print(operator.length("")) print(operator.length("ab")) print(operator.length("abcd")) --[[test 0 2 4 --test]] ---------------------------------------------------------------------------- -- Logical operators ---------------------------------------------------------------------------- print(operator.land(true, true)) print(operator.land(true, false)) print(operator.land(false, true)) print(operator.land(false, false)) print(operator.land(1, 0)) print(operator.land(0, 1)) print(operator.land(1, 1)) print(operator.land(0, 0)) --[[test true false false false 0 1 1 0 --test]] print(operator.lor(true, true)) print(operator.lor(true, false)) print(operator.lor(false, true)) print(operator.lor(false, false)) print(operator.lor(1, 0)) print(operator.lor(0, 1)) print(operator.lor(1, 1)) print(operator.lor(0, 0)) --[[test true true true false 1 0 1 0 --test]] print(operator.lnot(true)) print(operator.lnot(false)) print(operator.lor(1)) print(operator.lor(0)) --[[test false true 1 0 --test]] print(operator.truth(true)) print(operator.truth(false)) print(operator.truth(1)) print(operator.truth(0)) print(operator.truth(nil)) print(operator.truth("")) print(operator.truth({})) --[[test true false true true false true true --test]] tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/basic.lua0000644000000000000000000001121413306562377022770 0ustar rootroot-------------------------------------------------------------------------------- -- iter -------------------------------------------------------------------------------- -- -- Arrays -- for _it, a in iter({1, 2, 3}) do print(a) end --[[test 1 2 3 --test]] for _it, a in iter(iter(iter({1, 2, 3}))) do print(a) end --[[test 1 2 3 --test]] for _it, a in wrap(wrap(iter({1, 2, 3}))) do print(a) end --[[test 1 2 3 --test]] for _it, a in wrap(wrap(ipairs({1, 2, 3}))) do print(a) end --[[test 1 2 3 --test]] for _it, a in iter({}) do print(a) end --[[test --test]] for _it, a in iter(iter(iter({}))) do print(a) end --[[test --test]] for _it, a in wrap(wrap(iter({}))) do print(a) end --[[test --test]] for _it, a in wrap(wrap(ipairs({}))) do print(a) end --[[test --test]] -- Check that ``iter`` for arrays is equivalent to ``ipairs`` local t = {1, 2, 3} gen1, param1, state1 = iter(t):unwrap() gen2, param2, state2 = ipairs(t) print(gen1 == gen2, param1 == param2, state1 == state2) --[[test true true true --test]] -- Test that ``wrap`` do nothing for wrapped iterators gen1, param1, state1 = iter({1, 2, 3}) gen2, param2, state2 = wrap(gen1, param1, state1):unwrap() print(gen1 == gen2, param1 == param2, state1 == state2) --[[test true true true --test]] -- -- Maps -- local t = {} for _it, k, v in iter({ a = 1, b = 2, c = 3}) do t[#t + 1] = k end table.sort(t) for _it, v in iter(t) do print(v) end --[[test a b c --test]] local t = {} for _it, k, v in iter(iter(iter({ a = 1, b = 2, c = 3}))) do t[#t + 1] = k end table.sort(t) for _it, v in iter(t) do print(v) end --[[test a b c --test]] for _it, k, v in iter({}) do print(k, v) end --[[test --test]] for _it, k, v in iter(iter(iter({}))) do print(k, v) end --[[test --test]] -- -- String -- for _it, a in iter("abcde") do print(a) end --[[test a b c d e --test]] for _it, a in iter(iter(iter("abcde"))) do print(a) end --[[test a b c d e --test]] for _it, a in iter("") do print(a) end --[[test --test]] for _it, a in iter(iter(iter(""))) do print(a) end --[[test --test]] -- -- Custom generators -- local function mypairs_gen(max, state) if (state >= max) then return nil end return state + 1, state + 1 end local function mypairs(max) return mypairs_gen, max, 0 end for _it, a in iter(mypairs(10)) do print(a) end --[[test 1 2 3 4 5 6 7 8 9 10 --test]] -- -- Invalid values -- for _it, a in iter(1) do print(a) end --[[test error: object 1 of type "number" is not iterable --test]] for _it, a in iter(1, 2, 3, 4, 5, 6, 7) do print(a) end --[[test error: object 1 of type "number" is not iterable --test]] -------------------------------------------------------------------------------- -- each -------------------------------------------------------------------------------- each(print, {1, 2, 3}) --[[test 1 2 3 --test]] each(print, iter({1, 2, 3})) --[[test 1 2 3 --test]] each(print, {}) --[[test --test]] each(print, iter({})) --[[test --test]] local keys, vals = {}, {} each(function(k, v) keys[#keys + 1] = k vals[#vals + 1] = v end, { a = 1, b = 2, c = 3}) table.sort(keys) table.sort(vals) each(print, keys) each(print, vals) --[[test a b c 1 2 3 --test]] each(print, "abc") --[[test a b c --test]] each(print, iter("abc")) --[[test a b c --test]] print(for_each == each) -- an alias --[[test true --test]] print(foreach == each) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- totable -------------------------------------------------------------------------------- local tab = totable(range(5)) print(type(tab), #tab) each(print, tab) --[[test table 5 1 2 3 4 5 --test]] local tab = totable(range(0)) print(type(tab), #tab) --[[test table 0 --test]] local tab = totable("abcdef") print(type(tab), #tab) each(print, tab) --[[test table 6 a b c d e f --test]] local unpack = rawget(table, "unpack") or unpack local tab = totable({ 'a', {'b', 'c'}, {'d', 'e', 'f'}}) print(type(tab), #tab) each(print, tab[1]) each(print, map(unpack, drop(1, tab))) --[[test table 3 a b c d e f --test]] -------------------------------------------------------------------------------- -- tomap -------------------------------------------------------------------------------- local tab = tomap(zip(range(1, 7), 'abcdef')) print(type(tab), #tab) each(print, iter(tab)) --[[test table 6 a b c d e f --test]] local tab = tomap({a = 1, b = 2, c = 3}) print(type(tab), #tab) local t = {} for _it, k, v in iter(tab) do t[v] = k end table.sort(t) for k, v in ipairs(t) do print(k, v) end --[[test table 0 1 a 2 b 3 c --test]] local tab = tomap(enumerate("abcdef")) print(type(tab), #tab) each(print, tab) --[[test table 6 a b c d e f --test]] tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/reducing.lua0000644000000000000000000001201613306562377023510 0ustar rootroot-------------------------------------------------------------------------------- -- foldl -------------------------------------------------------------------------------- print(foldl(function(acc, x) return acc + x end, 0, range(5))) --[[test 15 --test]] print(foldl(operator.add, 0, range(5))) --[[test 15 --test]] print(foldl(function(acc, x, y) return acc + x * y; end, 0, zip(range(1, 5), {4, 3, 2, 1}))) --[[test 20 --test]] print(reduce == foldl) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- length -------------------------------------------------------------------------------- print(length({"a", "b", "c", "d", "e"})) --[[test 5 --test]] print(length({})) --[[test 0 --test]] print(length(range(0))) --[[test 0 --test]] -------------------------------------------------------------------------------- -- is_null -------------------------------------------------------------------------------- print(is_null({"a", "b", "c", "d", "e"})) --[[test false --test]] print(is_null({})) --[[test true --test]] print(is_null(range(0))) --[[test true --test]] local gen, init, state = range(5) print(is_null(gen, init, state)) dump(gen, init, state) --[[test false 1 2 3 4 5 --test]] -------------------------------------------------------------------------------- -- is_prefix_of -------------------------------------------------------------------------------- print(is_prefix_of({"a"}, {"a", "b", "c"})) --[[test true --test]] print(is_prefix_of({}, {"a", "b", "c"})) --[[test true --test]] print(is_prefix_of({}, {})) --[[test true --test]] print(is_prefix_of({"a"}, {})) --[[test false --test]] print(is_prefix_of(range(5), range(6))) --[[test true --test]] print(is_prefix_of(range(6), range(5))) --[[test false --test]] -------------------------------------------------------------------------------- -- all -------------------------------------------------------------------------------- print(all(function(x) return x end, {true, true, true, true})) --[[test true --test]] print(all(function(x) return x end, {true, true, true, false})) --[[test false --test]] print(all(function(x) return x end, {})) --[[test true --test]] print(every == all) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- any -------------------------------------------------------------------------------- print(any(function(x) return x end, {false, false, false, false})) --[[test false --test]] print(any(function(x) return x end, {false, false, false, true})) --[[test true --test]] print(any(function(x) return x end, {})) --[[test false --test]] print(some == any) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- sum -------------------------------------------------------------------------------- print(sum(range(1, 5))) --[[test 15 --test]] print(sum(range(1, 5, 0.5))) --[[test 27 --test]] print(sum(range(0))) --[[test 0 --test]] -------------------------------------------------------------------------------- -- product -------------------------------------------------------------------------------- print(product(range(1, 5))) --[[test 120 --test]] print(product(range(1, 5, 0.5))) --[[test 7087.5 --test]] print(product(range(0))) --[[test 1 --test]] -------------------------------------------------------------------------------- -- min -------------------------------------------------------------------------------- print(min(range(1, 10, 1))) --[[test 1 --test]] print(min({"f", "d", "c", "d", "e"})) --[[test c --test]] print(min({})) --[[test error: min: iterator is empty --test]] print(minimum == min) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- min_by -------------------------------------------------------------------------------- function min_cmp(a, b) if -a < -b then return a else return b end end --[[test --test]] print(min_by(min_cmp, range(1, 10, 1))) --[[test 10 --test]] print(min_by(min_cmp, {})) --[[test error: min: iterator is empty --test]] print(minimum_by == min_by) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- max -------------------------------------------------------------------------------- print(max(range(1, 10, 1))) --[[test 10 --test]] print(max({"f", "d", "c", "d", "e"})) --[[test f --test]] print(max({})) --[[test error: max: iterator is empty --test]] print(maximum == max) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- max_by -------------------------------------------------------------------------------- function max_cmp(a, b) if -a > -b then return a else return b end end --[[test --test]] print(max_by(max_cmp, range(1, 10, 1))) --[[test 1 --test]] print(max_by(max_cmp, {})) --[[test error: max: iterator is empty --test]] print(maximum_by == maximum_by) -- an alias --[[test true --test]] tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/.gitignore0000644000000000000000000000000613306562377023171 0ustar rootroot*.new tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/tests/indexing.lua0000644000000000000000000000244713306562377023524 0ustar rootroot-------------------------------------------------------------------------------- -- index -------------------------------------------------------------------------------- print(index(2, range(5))) --[[test 2 --test]] print(index(10, range(5))) --[[test nil --test]] print(index(2, range(0))) --[[test nil --test]] print(index("b", {"a", "b", "c", "d", "e"})) --[[test 2 --test]] print(index(1, enumerate({"a", "b", "c", "d", "e"}))) --[[test 1 --test]] print(index("b", "abcdef")) --[[test 2 --test]] print(index_of == index) -- an alias --[[test true --test]] print(elem_index == index) -- an alias --[[test true --test]] -------------------------------------------------------------------------------- -- indexes -------------------------------------------------------------------------------- dump(indexes("a", {"a", "b", "c", "d", "e", "a", "b", "c", "d", "a", "a"})) --[[test 1 6 10 11 --test]] dump(indexes("f", {"a", "b", "c", "d", "e", "a", "b", "c", "d", "a", "a"})) --[[test --test]] dump(indexes("f", {})) --[[test --test]] dump(indexes(1, enumerate({"a", "b", "c", "d", "e"}))) --[[test 1 --test]] print(indices == indexes) -- an alias --[[test true --test]] print(elem_indexes == indexes) -- an alias --[[test true --test]] print(elem_indices == indexes) -- an alias --[[test true --test]] tarantool_1.9.1.26.g63eb81e3c/third_party/luafun/.gitignore0000644000000000000000000000003413306562377022030 0ustar rootroot*~ temp/ fun.lua.c 5.?-fun/ tarantool_1.9.1.26.g63eb81e3c/third_party/compat/0000775000000000000000000000000013306560010020014 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/compat/unwind.h0000664000000000000000000002161013306560010021471 0ustar rootroot/* Exception handling and frame unwind runtime interface routines. Copyright (C) 2001, 2003, 2004, 2006 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* As a special exception, if you include this header file into source files compiled by GCC, this header file does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ /* This is derived from the C++ ABI for IA-64. Where we diverge for cross-architecture compatibility are noted with "@@@". */ #ifndef _UNWIND_H #define _UNWIND_H #ifndef HIDE_EXPORTS #pragma GCC visibility push(default) #endif #ifdef __cplusplus extern "C" { #endif /* Level 1: Base ABI */ /* @@@ The IA-64 ABI uses uint64 throughout. Most places this is inefficient for 32-bit and smaller machines. */ typedef unsigned _Unwind_Word __attribute__((__mode__(__word__))); typedef signed _Unwind_Sword __attribute__((__mode__(__word__))); #if defined(__ia64__) && defined(__hpux__) typedef unsigned _Unwind_Ptr __attribute__((__mode__(__word__))); #else typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__))); #endif typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__))); /* @@@ The IA-64 ABI uses a 64-bit word to identify the producer and consumer of an exception. We'll go along with this for now even on 32-bit machines. We'll need to provide some other option for 16-bit machines and for machines with > 8 bits per byte. */ typedef unsigned _Unwind_Exception_Class __attribute__((__mode__(__DI__))); /* The unwind interface uses reason codes in several contexts to identify the reasons for failures or other actions. */ typedef enum { _URC_NO_REASON = 0, _URC_FOREIGN_EXCEPTION_CAUGHT = 1, _URC_FATAL_PHASE2_ERROR = 2, _URC_FATAL_PHASE1_ERROR = 3, _URC_NORMAL_STOP = 4, _URC_END_OF_STACK = 5, _URC_HANDLER_FOUND = 6, _URC_INSTALL_CONTEXT = 7, _URC_CONTINUE_UNWIND = 8 } _Unwind_Reason_Code; /* The unwind interface uses a pointer to an exception header object as its representation of an exception being thrown. In general, the full representation of an exception object is language- and implementation-specific, but it will be prefixed by a header understood by the unwind interface. */ struct _Unwind_Exception; typedef void (*_Unwind_Exception_Cleanup_Fn) (_Unwind_Reason_Code, struct _Unwind_Exception *); struct _Unwind_Exception { _Unwind_Exception_Class exception_class; _Unwind_Exception_Cleanup_Fn exception_cleanup; _Unwind_Word private_1; _Unwind_Word private_2; /* @@@ The IA-64 ABI says that this structure must be double-word aligned. Taking that literally does not make much sense generically. Instead we provide the maximum alignment required by any type for the machine. */ } __attribute__((__aligned__)); /* The ACTIONS argument to the personality routine is a bitwise OR of one or more of the following constants. */ typedef int _Unwind_Action; #define _UA_SEARCH_PHASE 1 #define _UA_CLEANUP_PHASE 2 #define _UA_HANDLER_FRAME 4 #define _UA_FORCE_UNWIND 8 #define _UA_END_OF_STACK 16 /* This is an opaque type used to refer to a system-specific data structure used by the system unwinder. This context is created and destroyed by the system, and passed to the personality routine during unwinding. */ struct _Unwind_Context; /* Raise an exception, passing along the given exception object. */ extern _Unwind_Reason_Code _Unwind_RaiseException (struct _Unwind_Exception *); /* Raise an exception for forced unwinding. */ typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn) (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *, struct _Unwind_Context *, void *); extern _Unwind_Reason_Code _Unwind_ForcedUnwind (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *); /* Helper to invoke the exception_cleanup routine. */ extern void _Unwind_DeleteException (struct _Unwind_Exception *); /* Resume propagation of an existing exception. This is used after e.g. executing cleanup code, and not to implement rethrowing. */ extern void _Unwind_Resume (struct _Unwind_Exception *); /* @@@ Resume propagation of an FORCE_UNWIND exception, or to rethrow a normal exception that was handled. */ extern _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (struct _Unwind_Exception *); /* @@@ Use unwind data to perform a stack backtrace. The trace callback is called for every stack frame in the call chain, but no cleanup actions are performed. */ typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) (struct _Unwind_Context *, void *); extern _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn, void *); /* These functions are used for communicating information about the unwind context (i.e. the unwind descriptors and the user register state) between the unwind library and the personality routine and landing pad. Only selected registers maybe manipulated. */ extern _Unwind_Word _Unwind_GetGR (struct _Unwind_Context *, int); extern void _Unwind_SetGR (struct _Unwind_Context *, int, _Unwind_Word); extern _Unwind_Ptr _Unwind_GetIP (struct _Unwind_Context *); extern _Unwind_Ptr _Unwind_GetIPInfo (struct _Unwind_Context *, int *); extern void _Unwind_SetIP (struct _Unwind_Context *, _Unwind_Ptr); /* @@@ Retrieve the CFA of the given context. */ extern _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *); extern void *_Unwind_GetLanguageSpecificData (struct _Unwind_Context *); extern _Unwind_Ptr _Unwind_GetRegionStart (struct _Unwind_Context *); /* The personality routine is the function in the C++ (or other language) runtime library which serves as an interface between the system unwind library and language-specific exception handling semantics. It is specific to the code fragment described by an unwind info block, and it is always referenced via the pointer in the unwind info block, and hence it has no ABI-specified name. Note that this implies that two different C++ implementations can use different names, and have different contents in the language specific data area. Moreover, that the language specific data area contains no version info because name of the function invoked provides more effective versioning by detecting at link time the lack of code to handle the different data format. */ typedef _Unwind_Reason_Code (*_Unwind_Personality_Fn) (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *, struct _Unwind_Context *); /* @@@ The following alternate entry points are for setjmp/longjmp based unwinding. */ struct SjLj_Function_Context; extern void _Unwind_SjLj_Register (struct SjLj_Function_Context *); extern void _Unwind_SjLj_Unregister (struct SjLj_Function_Context *); extern _Unwind_Reason_Code _Unwind_SjLj_RaiseException (struct _Unwind_Exception *); extern _Unwind_Reason_Code _Unwind_SjLj_ForcedUnwind (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *); extern void _Unwind_SjLj_Resume (struct _Unwind_Exception *); extern _Unwind_Reason_Code _Unwind_SjLj_Resume_or_Rethrow (struct _Unwind_Exception *); /* @@@ The following provide access to the base addresses for text and data-relative addressing in the LDSA. In order to stay link compatible with the standard ABI for IA-64, we inline these. */ #ifdef __ia64__ #include static inline _Unwind_Ptr _Unwind_GetDataRelBase (struct _Unwind_Context *_C) { /* The GP is stored in R1. */ return _Unwind_GetGR (_C, 1); } static inline _Unwind_Ptr _Unwind_GetTextRelBase (struct _Unwind_Context *_C __attribute__ ((__unused__))) { abort (); return 0; } /* @@@ Retrieve the Backing Store Pointer of the given context. */ extern _Unwind_Word _Unwind_GetBSP (struct _Unwind_Context *); #else extern _Unwind_Ptr _Unwind_GetDataRelBase (struct _Unwind_Context *); extern _Unwind_Ptr _Unwind_GetTextRelBase (struct _Unwind_Context *); #endif /* @@@ Given an address, return the entry point of the function that contains it. */ extern void * _Unwind_FindEnclosingFunction (void *pc); #ifdef __cplusplus } #endif #ifndef HIDE_EXPORTS #pragma GCC visibility pop #endif #endif /* unwind.h */ tarantool_1.9.1.26.g63eb81e3c/third_party/compat/sys/0000775000000000000000000000000013306560010020632 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/compat/sys/bsd_time.h0000664000000000000000000001035013306560010022570 0ustar rootroot/* $OpenBSD: time.h,v 1.11 2000/10/10 13:36:48 itojun Exp $ */ /* $NetBSD: time.h,v 1.18 1996/04/23 10:29:33 mycroft Exp $ */ /* * Copyright (c) 1982, 1986, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)time.h 8.2 (Berkeley) 7/10/94 */ #ifndef _SYS_TTL_COMPAT_TIME_H_ #define _SYS_TTL_COMPAT_TIME_H_ #include #ifndef TIMEVAL_TO_TIMESPEC #define TIMEVAL_TO_TIMESPEC(tv, ts) { \ (ts)->tv_sec = (tv)->tv_sec; \ (ts)->tv_nsec = (tv)->tv_usec * 1000; \ } #endif #ifndef TIMESPEC_TO_TIMEVAL #define TIMESPEC_TO_TIMEVAL(tv, ts) { \ (tv)->tv_sec = (ts)->tv_sec; \ (tv)->tv_usec = (ts)->tv_nsec / 1000; \ } #endif /** * Enable BSD timer macros for non-BSD code. */ #if !defined(__BSD) && !defined(__USE_BSD) /* Operations on timevals. */ #define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 #define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) #define timercmp(tvp, uvp, cmp) \ (((tvp)->tv_sec == (uvp)->tv_sec) ? \ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) #define timeradd(tvp, uvp, vvp) \ do { \ (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \ if ((vvp)->tv_usec >= 1000000) { \ (vvp)->tv_sec++; \ (vvp)->tv_usec -= 1000000; \ } \ } while (0) #define timersub(tvp, uvp, vvp) \ do { \ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ if ((vvp)->tv_usec < 0) { \ (vvp)->tv_sec--; \ (vvp)->tv_usec += 1000000; \ } \ } while (0) #endif /* !defined(__BSD) && !defined(__USE_BSD) */ /* Operations on timespecs. Include if missing (one API-call check should suffice). */ #if !defined(timespecclear) #define timespecclear(tsp) (tsp)->tv_sec = (tsp)->tv_nsec = 0 #define timespecisset(tsp) ((tsp)->tv_sec || (tsp)->tv_nsec) #define timespeccmp(tsp, usp, cmp) \ (((tsp)->tv_sec == (usp)->tv_sec) ? \ ((tsp)->tv_nsec cmp (usp)->tv_nsec) : \ ((tsp)->tv_sec cmp (usp)->tv_sec)) #define timespecadd(tsp, usp, vsp) \ do { \ (vsp)->tv_sec = (tsp)->tv_sec + (usp)->tv_sec; \ (vsp)->tv_nsec = (tsp)->tv_nsec + (usp)->tv_nsec; \ if ((vsp)->tv_nsec >= 1000000000L) { \ (vsp)->tv_sec++; \ (vsp)->tv_nsec -= 1000000000L; \ } \ } while (0) #define timespecsub(tsp, usp, vsp) \ do { \ (vsp)->tv_sec = (tsp)->tv_sec - (usp)->tv_sec; \ (vsp)->tv_nsec = (tsp)->tv_nsec - (usp)->tv_nsec; \ if ((vsp)->tv_nsec < 0) { \ (vsp)->tv_sec--; \ (vsp)->tv_nsec += 1000000000L; \ } \ } while (0) #endif /* !defined(timespecclear) */ /* --- stuff got cut here - kostja, niels --- */ #endif /* !_SYS_TTL_COMPAT_TIME_H_ */ tarantool_1.9.1.26.g63eb81e3c/third_party/sptree.h0000664000000000000000000020535513306560010020216 0ustar rootroot/* * Copyright (C) 2012 Mail.RU * Copyright (C) 2010 Teodor Sigaev * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _SPTREE_H_ #define _SPTREE_H_ #include #include #include #include #if defined(__cplusplus) extern "C" { #endif /* defined(__cplusplus) */ #ifndef SPTREE_NODE_SELF /* * user could suggest pointer's storage himself */ typedef uint32_t spnode_t; #define SPNIL (0xffffffff) #define SPTREE_MIN_SIZE 64 typedef struct sptree_node_pointers { uint32_t left; /* sizeof(spnode_t) >= sizeof(sptree_node_pointers.left) !!! */ uint32_t right; } sptree_node_pointers; #define GET_SPNODE_LEFT(snp) ( (snp)->left ) #define SET_SPNODE_LEFT(snp, v) (snp)->left = (v) #define GET_SPNODE_RIGHT(snp) ( (snp)->right ) #define SET_SPNODE_RIGHT(snp, v) (snp)->right = (v) #endif /* SPTREE_NODE_SELF */ #ifndef alpha #define alpha ((double)0.75) #endif #define COUNTALPHA(size) floor(log((double)(size))/log((double)1.0/alpha)) #define _GET_SPNODE_LEFT(n) GET_SPNODE_LEFT( t->lrpointers + (n) ) #define _SET_SPNODE_LEFT(n, v) SET_SPNODE_LEFT( t->lrpointers + (n), (v) ) #define _GET_SPNODE_RIGHT(n) GET_SPNODE_RIGHT( t->lrpointers + (n) ) #define _SET_SPNODE_RIGHT(n, v) SET_SPNODE_RIGHT( t->lrpointers + (n), (v) ) #define ITHELEM(t, i) ( (char *) (t)->members + (t)->elemsize * (i) ) #define ELEMIDX(t, e) ( ((e) - (t)->members) / (t)->elemsize ) /* * makes definition of tree with methods, name should * be unique across all definitions. * * Methods: * void sptree_NAME_init(sptree_NAME *tree, size_t elemsize, void *array, * spnode_t array_len, spnode_t array_size, * int (*compare)(const void *key, const void *elem, void *arg), * int (*elemcompare)(const void *e1, const void *e2, void *arg), * void *arg) * * void sptree_NAME_replace(sptree_NAME *tree, void *value, void **p_oldvalue) * void sptree_NAME_delete(sptree_NAME *tree, void *value) * void* sptree_NAME_find(sptree_NAME *tree, void *key) * * spnode_t sptree_NAME_walk(sptree_NAME *t, void* array, spnode_t limit, spnode_t offset) * void sptree_NAME_walk_cb(sptree_NAME *t, int (*cb)(void* cb_arg, void* elem), void *cb_arg) * * sptree_NAME_iterator* sptree_NAME_iterator_init(sptree_NAME *t) * void sptree_NAME_iterator_init_set(sptree_NAME *t, sptree_NAME_iterator **iterator, void *start) * sptree_NAME_iterator* sptree_NAME_iterator_reverse_init(sptree_NAME *t) * void sptree_NAME_iterator_reverse_init_set(sptree_NAME *t, sptree_NAME_iterator **iterator, void *start) * void sptree_NAME_iterator_free(sptree_NAME_iterator *i) * * void* sptree_NAME_iterator_next(sptree_NAME_iterator *i) * void* sptree_NAME_iterator_reverse_next(sptree_NAME_iterator *i) */ #define SPTREE_DEF(name, realloc, qsort_arg) \ typedef int (*sptree_##name##_compare)(const void *, const void *, void *); \ \ typedef struct sptree_##name { \ void *members; \ sptree_node_pointers *lrpointers; \ \ spnode_t nmember; \ spnode_t ntotal; \ \ sptree_##name##_compare compare; \ sptree_##name##_compare elemcompare; \ void* arg; \ size_t elemsize; \ \ spnode_t root; \ spnode_t garbage_head; \ spnode_t size; \ spnode_t max_size; \ spnode_t max_depth; \ } sptree_##name; \ \ static spnode_t \ sptree_##name##_mktree(sptree_##name *t, spnode_t depth, spnode_t start, spnode_t end) { \ spnode_t half = ( (end + start) >> 1 ), tmp; \ \ if (depth > t->max_depth) t->max_depth = depth; \ \ if ( half == start || \ ( tmp = sptree_##name##_mktree(t, depth+1, start, half)) == half ) \ _SET_SPNODE_LEFT(half, SPNIL); \ else \ _SET_SPNODE_LEFT(half, tmp); \ if ( half+1 >= end || \ ( tmp = sptree_##name##_mktree(t, depth+1, half+1, end)) == half ) \ _SET_SPNODE_RIGHT(half, SPNIL); \ else \ _SET_SPNODE_RIGHT(half, tmp); \ \ return half; \ } \ \ static inline int \ sptree_##name##_init(sptree_##name *t, size_t elemsize, void *m, \ spnode_t nm, spnode_t nt, \ sptree_##name##_compare compare, \ sptree_##name##_compare elemcompare, \ void *arg) { \ memset(t, 0, sizeof(*t)); \ t->members = m; \ t->max_size = t->size = t->nmember = nm; \ t->ntotal = (nt==0) ? nm : nt; \ t->compare = compare != NULL ? compare : elemcompare; \ t->elemcompare = elemcompare != NULL ? elemcompare : compare; \ t->arg = arg; \ t->elemsize = elemsize; \ t->garbage_head = t->root = SPNIL; \ \ if (t->ntotal == 0 || t->members == NULL) { /* from scratch */ \ if (t->ntotal == 0) { \ t->members = NULL; \ t->ntotal = SPTREE_MIN_SIZE; \ } \ \ if (t->members == NULL) \ t->members = realloc(NULL, elemsize * t->ntotal); \ } \ t->lrpointers = (sptree_node_pointers *) realloc(NULL, \ sizeof(sptree_node_pointers) * t->ntotal); \ \ if (t->nmember == 1) { \ t->root = 0; \ _SET_SPNODE_RIGHT(0, SPNIL); \ _SET_SPNODE_LEFT(0, SPNIL); \ } else if (t->nmember > 1) { \ qsort_arg(t->members, t->nmember, elemsize, t->elemcompare, t->arg); \ /* create tree */ \ t->root = sptree_##name##_mktree(t, 1, 0, t->nmember); \ } \ if (t->members && t->lrpointers) \ return 0; \ else if (t->members) \ return t->ntotal * sizeof(sptree_node_pointers); \ else if (t->lrpointers) \ return t->ntotal * elemsize; \ else \ return t->ntotal * (sizeof(sptree_node_pointers) + elemsize); \ } \ \ static inline void \ sptree_##name##_destroy(sptree_##name *t) { \ if (t == NULL) return; \ t->members = realloc(t->members, 0); \ t->lrpointers = (sptree_node_pointers *)realloc(t->lrpointers, 0); \ } \ \ /** Nodes in the garbage list have a loop on their right link. */ \ static inline bool \ sptree_##name##_node_is_deleted(const sptree_##name *t, spnode_t node) { \ \ return _GET_SPNODE_RIGHT(node) == node; \ } \ \ static inline void* \ sptree_##name##_find(const sptree_##name *t, void *k) { \ spnode_t node = t->root; \ while(node != SPNIL) { \ int r = t->compare(k, ITHELEM(t, node), t->arg); \ if (r > 0) { \ node = _GET_SPNODE_RIGHT(node); \ } else if (r < 0) { \ node = _GET_SPNODE_LEFT(node); \ } else { \ return ITHELEM(t, node); \ } \ } \ return NULL; \ } \ \ static inline void* \ sptree_##name##_first(const sptree_##name *t) { \ spnode_t node = t->root; \ spnode_t first = SPNIL; \ while (node != SPNIL) { \ first = node; \ node = _GET_SPNODE_LEFT(node); \ } \ if (first != SPNIL) \ return ITHELEM(t, first); \ return NULL; \ } \ \ static inline void* \ sptree_##name##_last(const sptree_##name *t) { \ spnode_t node = t->root; \ spnode_t last = SPNIL; \ while (node != SPNIL) { \ last = node; \ node = _GET_SPNODE_RIGHT(node); \ } \ if (last != SPNIL) \ return ITHELEM(t, last); \ return NULL; \ } \ \ static inline void* \ sptree_##name##_random(const sptree_##name *t, spnode_t rnd) { \ for (spnode_t i = 0; i < t->size; i++, rnd++) { \ rnd %= t->nmember; \ if (!sptree_##name##_node_is_deleted(t, rnd)) \ return ITHELEM(t, rnd); \ \ } \ \ return NULL; \ } \ static inline spnode_t \ sptree_##name##_size_of_subtree(sptree_##name *t, spnode_t node) { \ if (node == SPNIL) \ return 0; \ return 1 + \ sptree_##name##_size_of_subtree(t, _GET_SPNODE_LEFT(node)) + \ sptree_##name##_size_of_subtree(t, _GET_SPNODE_RIGHT(node)); \ } \ \ static inline int \ sptree_##name##_reserve_places(sptree_##name *t, spnode_t nreserve) { \ spnode_t num_free = t->ntotal - t->size; \ if (num_free >= nreserve) \ return 0; \ spnode_t new_ntotal = MAX(t->ntotal * 2, t->ntotal + nreserve - num_free); \ void *new_members = realloc(t->members, new_ntotal * t->elemsize); \ if (!new_members) \ return new_ntotal * t->elemsize; \ t->members = new_members; \ sptree_node_pointers *new_lrpointers = (sptree_node_pointers *) \ realloc(t->lrpointers, new_ntotal * sizeof(sptree_node_pointers)); \ if (!new_lrpointers) \ return new_ntotal * sizeof(sptree_node_pointers); \ t->lrpointers = new_lrpointers; \ t->ntotal = new_ntotal; \ return 0; \ } \ \ static inline spnode_t \ sptree_##name##_get_place(sptree_##name *t) { \ spnode_t node; \ if (t->garbage_head != SPNIL) { \ node = t->garbage_head; \ t->garbage_head = _GET_SPNODE_LEFT(t->garbage_head); \ } else { \ if (t->nmember >= t->ntotal) { \ spnode_t new_ntotal = t->ntotal * 2; \ t->members = realloc(t->members, new_ntotal * t->elemsize); \ t->lrpointers = (sptree_node_pointers *) realloc(t->lrpointers, \ new_ntotal * sizeof(sptree_node_pointers)); \ t->ntotal = new_ntotal; \ } \ \ node = t->nmember; \ t->nmember++; \ } \ _SET_SPNODE_LEFT(node, SPNIL); \ _SET_SPNODE_RIGHT(node, SPNIL); \ return node; \ } \ \ static inline spnode_t \ sptree_##name##_flatten_tree(sptree_##name *t, spnode_t root, spnode_t head) { \ spnode_t node; \ if (root == SPNIL) \ return head; \ node = sptree_##name##_flatten_tree(t, _GET_SPNODE_RIGHT(root), head); \ _SET_SPNODE_RIGHT(root, node); \ return sptree_##name##_flatten_tree(t, _GET_SPNODE_LEFT(root), root); \ } \ \ static inline spnode_t \ sptree_##name##_build_tree(sptree_##name *t, spnode_t node, spnode_t size) { \ spnode_t tmp; \ if (size == 0) { \ _SET_SPNODE_LEFT(node, SPNIL); \ return node; \ } \ spnode_t root = sptree_##name##_build_tree(t, \ node, ceil(((double)size-1.0)/2.0)); \ spnode_t list = sptree_##name##_build_tree(t, \ _GET_SPNODE_RIGHT(root), floor(((double)size-1.0)/2.0)); \ tmp = _GET_SPNODE_LEFT(list); \ _SET_SPNODE_RIGHT(root, tmp); \ _SET_SPNODE_LEFT(list, root); \ \ return list; \ } \ \ static inline spnode_t \ sptree_##name##_balance(sptree_##name *t, spnode_t node, spnode_t size) { \ spnode_t fake = sptree_##name##_get_place(t); \ spnode_t z; \ \ z = sptree_##name##_flatten_tree(t, node, fake); \ sptree_##name##_build_tree(t, z, size); \ \ z = _GET_SPNODE_LEFT(fake); \ _SET_SPNODE_LEFT(fake, t->garbage_head); \ /* \ * Loop back on the right link indicates that the node \ * is in the garbage list. \ */ \ _SET_SPNODE_RIGHT(fake, fake); \ t->garbage_head = fake; \ return z; \ } \ \ static inline int \ sptree_##name##_replace(sptree_##name *t, void *v, void **p_old) { \ spnode_t node, depth = 0; \ spnode_t path[ t->max_depth + 2]; \ \ if (t->root == SPNIL) { \ _SET_SPNODE_LEFT(0, SPNIL); \ _SET_SPNODE_RIGHT(0, SPNIL); \ memcpy(t->members, v, t->elemsize); \ t->root = 0; \ t->garbage_head = SPNIL; \ t->nmember = 1; \ t->size=1; \ if (p_old) \ *p_old = NULL; \ return 0; \ } else { \ spnode_t parent = t->root; \ \ for(;;) { \ int r = t->elemcompare(v, ITHELEM(t, parent), t->arg); \ if (r==0) { \ if (p_old) \ memcpy(*p_old, ITHELEM(t, parent), t->elemsize); \ memcpy(ITHELEM(t, parent), v, t->elemsize); \ return 0; \ } \ path[depth] = parent; \ depth++; \ if (r>0) { \ if (_GET_SPNODE_RIGHT(parent) == SPNIL) { \ /* extra element can be needed for current balance implementation*/ \ int reserve_result = sptree_##name##_reserve_places(t, 2); \ if (reserve_result) \ return reserve_result; \ node = sptree_##name##_get_place(t); \ memcpy(ITHELEM(t, node), v, t->elemsize); \ _SET_SPNODE_RIGHT(parent, node); \ break; \ } else { \ parent = _GET_SPNODE_RIGHT(parent); \ } \ } else { \ if (_GET_SPNODE_LEFT(parent) == SPNIL) { \ /* extra element can be needed for current balance implementation*/ \ int reserve_result = sptree_##name##_reserve_places(t, 2); \ if (reserve_result) \ return reserve_result; \ node = sptree_##name##_get_place(t); \ memcpy(ITHELEM(t, node), v, t->elemsize); \ _SET_SPNODE_LEFT(parent, node); \ break; \ } else { \ parent = _GET_SPNODE_LEFT(parent); \ } \ } \ } \ } \ if (p_old) \ *p_old = NULL; \ \ t->size++; \ if ( t->size > t->max_size ) \ t->max_size = t->size; \ if ( depth > t->max_depth ) \ t->max_depth = depth; \ \ if ( (double)depth > COUNTALPHA(t->size)) { \ spnode_t parent; \ spnode_t i, size = 1 ; \ \ path[depth] = node; \ \ for (i = 1; ; i++) { \ if (i < depth) { \ parent = path[ depth - i ]; \ size += 1 + sptree_##name##_size_of_subtree( t, \ _GET_SPNODE_RIGHT(parent) == path[depth - i + 1] ? \ _GET_SPNODE_LEFT(parent) : _GET_SPNODE_RIGHT(parent)); \ if ((double)i > COUNTALPHA(size)) { \ spnode_t n = sptree_##name##_balance(t, parent, size); \ spnode_t pp = path[ depth - i - 1 ]; \ if (_GET_SPNODE_LEFT(pp) == parent) \ _SET_SPNODE_LEFT(pp, n); \ else \ _SET_SPNODE_RIGHT(pp, n); \ break; \ } \ } else { \ t->root = sptree_##name##_balance(t, t->root, t->size); \ t->max_size = t->size; \ break; \ } \ } \ } \ return 0; \ } \ \ static inline void \ sptree_##name##_delete(sptree_##name *t, void *k) { \ spnode_t node = t->root; \ spnode_t parent = SPNIL; \ int lr = 0; \ while(node != SPNIL) { \ int r = t->elemcompare(k, ITHELEM(t, node), t->arg); \ if (r > 0) { \ parent = node; \ node = _GET_SPNODE_RIGHT(node); \ lr = +1; \ } else if (r < 0) { \ parent = node; \ node = _GET_SPNODE_LEFT(node); \ lr = -1; \ } else {/* found */ \ if (_GET_SPNODE_LEFT(node) == SPNIL && _GET_SPNODE_RIGHT(node) == SPNIL) { \ if ( parent == SPNIL ) \ t->root = SPNIL; \ else if (lr <0) \ _SET_SPNODE_LEFT(parent, SPNIL); \ else \ _SET_SPNODE_RIGHT(parent, SPNIL); \ } else if (_GET_SPNODE_LEFT(node) == SPNIL) { \ spnode_t child = _GET_SPNODE_RIGHT(node); \ if (parent == SPNIL) t->root = child; \ else if (lr <0) _SET_SPNODE_LEFT(parent, child); \ else _SET_SPNODE_RIGHT(parent, child); \ } else if (_GET_SPNODE_RIGHT(node) == SPNIL) { \ spnode_t child = _GET_SPNODE_LEFT(node); \ if (parent == SPNIL) t->root = child; \ else if (lr <0) _SET_SPNODE_LEFT(parent, child); \ else _SET_SPNODE_RIGHT(parent, child); \ } else { \ spnode_t todel = _GET_SPNODE_LEFT(node); \ \ parent = SPNIL; \ for(;;) { \ if ( _GET_SPNODE_RIGHT(todel) != SPNIL ) { \ parent = todel; \ todel = _GET_SPNODE_RIGHT(todel); \ } else \ break; \ } \ memcpy(ITHELEM(t, node), ITHELEM(t, todel), t->elemsize); \ if (parent != SPNIL) \ _SET_SPNODE_RIGHT(parent, _GET_SPNODE_LEFT(todel)); \ else \ _SET_SPNODE_LEFT(node, _GET_SPNODE_LEFT(todel)); \ node = todel; /* node to delete */ \ } \ \ _SET_SPNODE_LEFT(node, t->garbage_head); \ /* \ * Loop back on the right link indicates that the node \ * is in the garbage list. \ */ \ _SET_SPNODE_RIGHT(node, node); \ t->garbage_head = node; \ \ break; \ } \ } \ \ if (node == SPNIL) /* not found */ \ return; \ \ t->size --; \ if ( t->size > 0 && (double)t->size < alpha * t->max_size ) { \ t->root = sptree_##name##_balance(t, t->root, t->size); \ t->max_size = t->size; \ } \ } \ \ static inline spnode_t \ sptree_##name##_walk(sptree_##name *t, void* array, spnode_t limit, spnode_t offset) { \ int level = 0; \ spnode_t count= 0, \ node, \ stack[ t->max_depth + 1 ]; \ \ if (t->root == SPNIL) return 0; \ stack[0] = t->root; \ \ while( (node = _GET_SPNODE_LEFT( stack[level] )) != SPNIL ) { \ level++; \ stack[level] = node; \ } \ \ while( count < offset + limit && level >= 0 ) { \ \ if (count >= offset) \ memcpy((char *) array + (count-offset) * t->elemsize, \ ITHELEM(t, stack[level]), t->elemsize); \ count++; \ \ node = _GET_SPNODE_RIGHT( stack[level] ); \ level--; \ while( node != SPNIL ) { \ level++; \ stack[level] = node; \ node = _GET_SPNODE_LEFT( stack[level] ); \ } \ } \ \ return (count > offset) ? count - offset : 0; \ } \ \ static inline void \ sptree_##name##_walk_cb(sptree_##name *t, int (*cb)(void*, void*), void *cb_arg ) { \ int level = 0; \ spnode_t node, \ stack[ t->max_depth + 1 ]; \ \ if (t->root == SPNIL) return; \ stack[0] = t->root; \ \ while( (node = _GET_SPNODE_LEFT( stack[level] )) != SPNIL ) { \ level++; \ stack[level] = node; \ } \ \ while( level >= 0 ) { \ if ( cb(cb_arg, ITHELEM(t, stack[level])) == 0 ) \ return; \ \ node = _GET_SPNODE_RIGHT( stack[level] ); \ level--; \ while( node != SPNIL ) { \ level++; \ stack[level] = node; \ node = _GET_SPNODE_LEFT( stack[level] ); \ } \ } \ } \ \ typedef struct sptree_##name##_iterator { \ const sptree_##name *t; \ int level; \ spnode_t max_depth; \ spnode_t stack[0]; \ } sptree_##name##_iterator; \ \ static inline sptree_##name##_iterator * \ sptree_##name##_iterator_alloc(sptree_##name *t) { \ sptree_##name##_iterator *i = (sptree_##name##_iterator *) \ realloc(NULL, sizeof(*i) + sizeof(spnode_t) * (t->max_depth + 1)); \ if (i) { \ i->t = t; \ i->level = 0; \ i->stack[0] = t->root; \ } \ return i; \ } \ \ static inline sptree_##name##_iterator * \ sptree_##name##_iterator_init(sptree_##name *t) { \ sptree_##name##_iterator *i; \ spnode_t node; \ \ if (t->root == SPNIL) return NULL; \ i = sptree_##name##_iterator_alloc(t); \ if (!i) \ return i; \ \ while( (node = _GET_SPNODE_LEFT( i->stack[i->level] )) != SPNIL ) { \ i->level++; \ i->stack[i->level] = node; \ } \ \ return i; \ } \ \ static inline int \ sptree_##name##_iterator_init_set(const sptree_##name *t, sptree_##name##_iterator **i, \ void *k) { \ spnode_t node; \ int lastLevelEq = -1, cmp; \ \ if ((*i) == NULL || t->max_depth > (*i)->max_depth) { \ sptree_##name##_iterator *new_i; \ new_i = (sptree_##name##_iterator *) realloc(*i, sizeof(**i) + \ sizeof(spnode_t) * (t->max_depth + 31)); \ if (!new_i) \ return sizeof(**i) + sizeof(spnode_t) * (t->max_depth + 31); \ *i = new_i; \ } \ \ (*i)->t = t; \ (*i)->level = -1; \ if (t->root == SPNIL) { \ (*i)->max_depth = 0; /* valgrind points out it's used in the check above ^.*/ \ return 0; \ } \ \ (*i)->max_depth = t->max_depth; \ (*i)->stack[0] = t->root; \ \ node = t->root; \ while(node != SPNIL) { \ cmp = t->compare(k, ITHELEM(t, node), t->arg); \ \ (*i)->level++; \ (*i)->stack[(*i)->level] = node; \ \ if (cmp > 0) { \ (*i)->level--; /* exclude current node from path, ie "mark as visited" */ \ node = _GET_SPNODE_RIGHT(node); \ } else if (cmp < 0) { \ node = _GET_SPNODE_LEFT(node); \ } else { \ lastLevelEq = (*i)->level; \ node = _GET_SPNODE_LEFT(node); /* one way iterator: from left to right */ \ } \ } \ \ if (lastLevelEq >= 0) \ (*i)->level = lastLevelEq; \ return 0; \ } \ \ static inline sptree_##name##_iterator * \ sptree_##name##_iterator_reverse_init(sptree_##name *t) { \ sptree_##name##_iterator *i; \ spnode_t node; \ \ if (t->root == SPNIL) return NULL; \ i = sptree_##name##_iterator_alloc(t); \ if (!i) \ return i; \ \ while( (node = _GET_SPNODE_RIGHT( i->stack[i->level] )) != SPNIL ) { \ i->level++; \ i->stack[i->level] = node; \ } \ \ return i; \ } \ \ static inline int \ sptree_##name##_iterator_reverse_init_set(const sptree_##name *t, \ sptree_##name##_iterator **i, void *k) { \ spnode_t node; \ int lastLevelEq = -1, cmp; \ \ if ((*i) == NULL || t->max_depth > (*i)->max_depth) { \ sptree_##name##_iterator *new_i; \ new_i = (sptree_##name##_iterator *) realloc(*i, sizeof(**i) + \ sizeof(spnode_t) * (t->max_depth + 31)); \ if (!new_i) \ return sizeof(**i) + sizeof(spnode_t) * (t->max_depth + 31); \ *i = new_i; \ } \ \ (*i)->t = t; \ (*i)->level = -1; \ if (t->root == SPNIL) { \ (*i)->max_depth = 0; \ return 0; \ } \ \ (*i)->max_depth = t->max_depth; \ (*i)->stack[0] = t->root; \ \ node = t->root; \ while(node != SPNIL) { \ cmp = t->compare(k, ITHELEM(t, node), t->arg); \ \ (*i)->level++; \ (*i)->stack[(*i)->level] = node; \ \ if (cmp < 0) { \ (*i)->level--; \ node = _GET_SPNODE_LEFT(node); \ } else if (cmp > 0) { \ node = _GET_SPNODE_RIGHT(node); \ } else { \ lastLevelEq = (*i)->level; \ node = _GET_SPNODE_RIGHT(node); \ } \ } \ \ if (lastLevelEq >= 0) \ (*i)->level = lastLevelEq; \ return 0; \ } \ \ static inline void \ sptree_##name##_iterator_free(sptree_##name##_iterator *i) { \ if (i == NULL) return; \ i = (sptree_##name##_iterator *)realloc(i, 0); \ } \ \ /** \ * Get the last node on the iterator stack, check \ * if the node is not deleted. \ */ \ static inline spnode_t \ sptree_##name##_iterator_next_node(sptree_##name##_iterator *i) { \ \ while (i->level >= 0) { \ spnode_t return_node = i->stack[i->level--]; \ if (! sptree_##name##_node_is_deleted(i->t, return_node)) \ return return_node; \ } \ return SPNIL; \ } \ \ static inline void* \ sptree_##name##_iterator_next(sptree_##name##_iterator *i) { \ \ if (i == NULL) return NULL; \ \ const sptree_##name *t = i->t; \ spnode_t returnNode = sptree_##name##_iterator_next_node(i); \ \ if (returnNode == SPNIL) return NULL; \ \ spnode_t node = _GET_SPNODE_RIGHT(returnNode); \ while (node != SPNIL) { \ i->level++; \ i->stack[i->level] = node; \ node = _GET_SPNODE_LEFT(i->stack[i->level]); \ } \ \ return ITHELEM(t, returnNode); \ } \ \ static inline void* \ sptree_##name##_iterator_reverse_next(sptree_##name##_iterator *i) { \ \ if (i == NULL) return NULL; \ \ const sptree_##name *t = i->t; \ spnode_t returnNode = sptree_##name##_iterator_next_node(i); \ \ if (returnNode == SPNIL) return NULL; \ \ spnode_t node = _GET_SPNODE_LEFT(returnNode); \ while (node != SPNIL) { \ i->level++; \ i->stack[i->level] = node; \ node = _GET_SPNODE_RIGHT(i->stack[i->level]); \ } \ return ITHELEM(t, returnNode); \ } /* * vim: ts=4 sts=4 et */ #endif #if defined(__cplusplus) } #endif /* defined(__cplusplus) */ tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/0000775000000000000000000000000013306562377020204 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/.hgignore0000644000000000000000000000050013306562377022000 0ustar rootrootsyntax: glob aclocal.m4 autom4te.cache configure libtool Makefile Makefile.in stamp-h1 config config.h config.h.in config.log config.status example-deconstructor example-deconstructor-alt example-reformatter example-reformatter-alt run-dumper run-emitter run-loader run-parser run-scanner .deps .libs *.o *.lo *.la *.pc tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/announcement.msg0000644000000000000000000000236213306562377023407 0ustar rootrootFrom: Kirill Simonov To: yaml-core@lists.sourceforge.net Subject: LibYAML-0.1.4: A minor bugfix release This is a minor bugfix release of LibYAML, a YAML parser and emitter written in C: * Fixed a bug that prevented an empty mapping being used as a simple key (thank to spitzak(at)rhythm(dot)com). * Fixed pointer overflow when calculating the position of a potential simple key (thank to ppelletier(at)oblong(dot)com). * Fixed yaml.dll not exporting any symbols (thank to pxn11432(at)nifty(dot)com). * Added pkg-config support (thank to rainwoodman(at)gmail(dot)com). LibYAML homepage: http://pyyaml.org/wiki/LibYAML TAR.GZ package: http://pyyaml.org/download/libyaml/yaml-0.1.4.tar.gz SVN repository: http://svn.pyyaml.org/libyaml/branches/stable Bug tracker: http://pyyaml.org/newticket?component=libyaml The library is functionally complete, but the documentation is scarce and the API may change. For more information, you may check the project homepage, the doxygen-generated documentation in the `doc` directory of the source distribution, and examples in the `tests` directory. LibYAML is written by Kirill Simonov and released under the MIT license; see the file LICENSE for more details. tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/win32/0000755000000000000000000000000013306562377021144 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/win32/config.h0000644000000000000000000000017313306562377022563 0ustar rootroot#define YAML_VERSION_MAJOR 0 #define YAML_VERSION_MINOR 1 #define YAML_VERSION_PATCH 7 #define YAML_VERSION_STRING "0.1.7" tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/win32/Makefile.am0000644000000000000000000000003013306562377023171 0ustar rootroot EXTRA_DIST = config.h tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/doc/0000755000000000000000000000000013306562377020747 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/doc/doxygen.cfg0000644000000000000000000002030613306562377023106 0ustar rootroot# Doxyfile 1.4.4 #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- PROJECT_NAME = $(PACKAGE) PROJECT_NUMBER = $(VERSION) OUTPUT_DIRECTORY = $(top_builddir)/doc/ CREATE_SUBDIRS = NO OUTPUT_LANGUAGE = English USE_WINDOWS_ENCODING = NO BRIEF_MEMBER_DESC = YES REPEAT_BRIEF = YES ABBREVIATE_BRIEF = ALWAYS_DETAILED_SEC = NO INLINE_INHERITED_MEMB = NO FULL_PATH_NAMES = YES STRIP_FROM_PATH = STRIP_FROM_INC_PATH = SHORT_NAMES = NO JAVADOC_AUTOBRIEF = YES MULTILINE_CPP_IS_BRIEF = NO DETAILS_AT_TOP = NO INHERIT_DOCS = YES DISTRIBUTE_GROUP_DOC = NO SEPARATE_MEMBER_PAGES = NO TAB_SIZE = 8 ALIASES = OPTIMIZE_OUTPUT_FOR_C = YES OPTIMIZE_OUTPUT_JAVA = NO SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- EXTRACT_ALL = NO EXTRACT_PRIVATE = NO EXTRACT_STATIC = NO EXTRACT_LOCAL_CLASSES = NO EXTRACT_LOCAL_METHODS = NO HIDE_UNDOC_MEMBERS = NO HIDE_UNDOC_CLASSES = NO HIDE_FRIEND_COMPOUNDS = NO HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO CASE_SENSE_NAMES = YES HIDE_SCOPE_NAMES = NO SHOW_INCLUDE_FILES = YES INLINE_INFO = YES SORT_MEMBER_DOCS = NO SORT_BRIEF_DOCS = NO SORT_BY_SCOPE_NAME = NO GENERATE_TODOLIST = YES GENERATE_TESTLIST = YES GENERATE_BUGLIST = YES GENERATE_DEPRECATEDLIST= YES ENABLED_SECTIONS = MAX_INITIALIZER_LINES = 30 SHOW_USED_FILES = YES SHOW_DIRECTORIES = YES FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- QUIET = NO WARNINGS = YES WARN_IF_UNDOCUMENTED = YES WARN_IF_DOC_ERROR = YES WARN_NO_PARAMDOC = NO WARN_FORMAT = "$file:$line: $text" WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- INPUT = $(top_srcdir)/include/ FILE_PATTERNS = *.h RECURSIVE = YES EXCLUDE = EXCLUDE_SYMLINKS = NO EXCLUDE_PATTERNS = EXAMPLE_PATH = EXAMPLE_PATTERNS = EXAMPLE_RECURSIVE = NO IMAGE_PATH = INPUT_FILTER = FILTER_PATTERNS = FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- SOURCE_BROWSER = NO INLINE_SOURCES = NO STRIP_CODE_COMMENTS = YES REFERENCED_BY_RELATION = NO REFERENCES_RELATION = NO USE_HTAGS = NO VERBATIM_HEADERS = NO #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- ALPHABETICAL_INDEX = NO COLS_IN_ALPHA_INDEX = 5 IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- GENERATE_HTML = YES HTML_OUTPUT = html HTML_FILE_EXTENSION = .html HTML_HEADER = HTML_FOOTER = HTML_STYLESHEET = HTML_ALIGN_MEMBERS = YES GENERATE_HTMLHELP = NO CHM_FILE = HHC_LOCATION = GENERATE_CHI = NO BINARY_TOC = NO TOC_EXPAND = NO DISABLE_INDEX = NO ENUM_VALUES_PER_LINE = 1 GENERATE_TREEVIEW = NO TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- GENERATE_LATEX = NO LATEX_OUTPUT = latex LATEX_CMD_NAME = latex MAKEINDEX_CMD_NAME = makeindex COMPACT_LATEX = NO PAPER_TYPE = a4wide EXTRA_PACKAGES = LATEX_HEADER = PDF_HYPERLINKS = NO USE_PDFLATEX = NO LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- GENERATE_RTF = NO RTF_OUTPUT = rtf COMPACT_RTF = NO RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- GENERATE_MAN = NO MAN_OUTPUT = man MAN_EXTENSION = .3 MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- GENERATE_XML = NO XML_OUTPUT = xml XML_SCHEMA = XML_DTD = XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- GENERATE_PERLMOD = NO PERLMOD_LATEX = NO PERLMOD_PRETTY = YES PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- ENABLE_PREPROCESSING = YES MACRO_EXPANSION = YES EXPAND_ONLY_PREDEF = YES SEARCH_INCLUDES = YES INCLUDE_PATH = INCLUDE_FILE_PATTERNS = PREDEFINED = "YAML_DECLARE(type)=type" EXPAND_AS_DEFINED = SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- TAGFILES = GENERATE_TAGFILE = ALLEXTERNALS = NO EXTERNAL_GROUPS = YES PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- CLASS_DIAGRAMS = NO HIDE_UNDOC_RELATIONS = YES HAVE_DOT = NO CLASS_GRAPH = YES COLLABORATION_GRAPH = YES GROUP_GRAPHS = YES UML_LOOK = NO TEMPLATE_RELATIONS = NO INCLUDE_GRAPH = YES INCLUDED_BY_GRAPH = YES CALL_GRAPH = NO GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES DOT_IMAGE_FORMAT = png DOT_PATH = DOTFILE_DIRS = MAX_DOT_GRAPH_WIDTH = 1024 MAX_DOT_GRAPH_HEIGHT = 1024 MAX_DOT_GRAPH_DEPTH = 0 DOT_TRANSPARENT = NO DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- SEARCHENGINE = NO tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/0000755000000000000000000000000013306562377020771 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/parser.c0000644000000000000000000013021313306562377022431 0ustar rootroot /* * The parser implements the following grammar: * * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END * implicit_document ::= block_node DOCUMENT-END* * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* * block_node_or_indentless_sequence ::= * ALIAS * | properties (block_content | indentless_block_sequence)? * | block_content * | indentless_block_sequence * block_node ::= ALIAS * | properties block_content? * | block_content * flow_node ::= ALIAS * | properties flow_content? * | flow_content * properties ::= TAG ANCHOR? | ANCHOR TAG? * block_content ::= block_collection | flow_collection | SCALAR * flow_content ::= flow_collection | SCALAR * block_collection ::= block_sequence | block_mapping * flow_collection ::= flow_sequence | flow_mapping * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END * indentless_sequence ::= (BLOCK-ENTRY block_node?)+ * block_mapping ::= BLOCK-MAPPING_START * ((KEY block_node_or_indentless_sequence?)? * (VALUE block_node_or_indentless_sequence?)?)* * BLOCK-END * flow_sequence ::= FLOW-SEQUENCE-START * (flow_sequence_entry FLOW-ENTRY)* * flow_sequence_entry? * FLOW-SEQUENCE-END * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? * flow_mapping ::= FLOW-MAPPING-START * (flow_mapping_entry FLOW-ENTRY)* * flow_mapping_entry? * FLOW-MAPPING-END * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? */ #include "yaml_private.h" /* * Peek the next token in the token queue. */ #define PEEK_TOKEN(parser) \ ((parser->token_available || yaml_parser_fetch_more_tokens(parser)) ? \ parser->tokens.head : NULL) /* * Remove the next token from the queue (must be called after PEEK_TOKEN). */ #define SKIP_TOKEN(parser) \ (parser->token_available = 0, \ parser->tokens_parsed ++, \ parser->stream_end_produced = \ (parser->tokens.head->type == YAML_STREAM_END_TOKEN), \ parser->tokens.head ++) /* * Public API declarations. */ YAML_DECLARE(int) yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event); /* * Error handling. */ static int yaml_parser_set_parser_error(yaml_parser_t *parser, const char *problem, yaml_mark_t problem_mark); static int yaml_parser_set_parser_error_context(yaml_parser_t *parser, const char *context, yaml_mark_t context_mark, const char *problem, yaml_mark_t problem_mark); /* * State functions. */ static int yaml_parser_state_machine(yaml_parser_t *parser, yaml_event_t *event); static int yaml_parser_parse_stream_start(yaml_parser_t *parser, yaml_event_t *event); static int yaml_parser_parse_document_start(yaml_parser_t *parser, yaml_event_t *event, int implicit); static int yaml_parser_parse_document_content(yaml_parser_t *parser, yaml_event_t *event); static int yaml_parser_parse_document_end(yaml_parser_t *parser, yaml_event_t *event); static int yaml_parser_parse_node(yaml_parser_t *parser, yaml_event_t *event, int block, int indentless_sequence); static int yaml_parser_parse_block_sequence_entry(yaml_parser_t *parser, yaml_event_t *event, int first); static int yaml_parser_parse_indentless_sequence_entry(yaml_parser_t *parser, yaml_event_t *event); static int yaml_parser_parse_block_mapping_key(yaml_parser_t *parser, yaml_event_t *event, int first); static int yaml_parser_parse_block_mapping_value(yaml_parser_t *parser, yaml_event_t *event); static int yaml_parser_parse_flow_sequence_entry(yaml_parser_t *parser, yaml_event_t *event, int first); static int yaml_parser_parse_flow_sequence_entry_mapping_key(yaml_parser_t *parser, yaml_event_t *event); static int yaml_parser_parse_flow_sequence_entry_mapping_value(yaml_parser_t *parser, yaml_event_t *event); static int yaml_parser_parse_flow_sequence_entry_mapping_end(yaml_parser_t *parser, yaml_event_t *event); static int yaml_parser_parse_flow_mapping_key(yaml_parser_t *parser, yaml_event_t *event, int first); static int yaml_parser_parse_flow_mapping_value(yaml_parser_t *parser, yaml_event_t *event, int empty); /* * Utility functions. */ static int yaml_parser_process_empty_scalar(yaml_parser_t *parser, yaml_event_t *event, yaml_mark_t mark); static int yaml_parser_process_directives(yaml_parser_t *parser, yaml_version_directive_t **version_directive_ref, yaml_tag_directive_t **tag_directives_start_ref, yaml_tag_directive_t **tag_directives_end_ref); static int yaml_parser_append_tag_directive(yaml_parser_t *parser, yaml_tag_directive_t value, int allow_duplicates, yaml_mark_t mark); /* * Get the next event. */ YAML_DECLARE(int) yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event) { assert(parser); /* Non-NULL parser object is expected. */ assert(event); /* Non-NULL event object is expected. */ /* Erase the event object. */ memset(event, 0, sizeof(yaml_event_t)); /* No events after the end of the stream or error. */ if (parser->stream_end_produced || parser->error || parser->state == YAML_PARSE_END_STATE) { return 1; } /* Generate the next event. */ return yaml_parser_state_machine(parser, event); } /* * Set parser error. */ static int yaml_parser_set_parser_error(yaml_parser_t *parser, const char *problem, yaml_mark_t problem_mark) { parser->error = YAML_PARSER_ERROR; parser->problem = problem; parser->problem_mark = problem_mark; return 0; } static int yaml_parser_set_parser_error_context(yaml_parser_t *parser, const char *context, yaml_mark_t context_mark, const char *problem, yaml_mark_t problem_mark) { parser->error = YAML_PARSER_ERROR; parser->context = context; parser->context_mark = context_mark; parser->problem = problem; parser->problem_mark = problem_mark; return 0; } /* * State dispatcher. */ static int yaml_parser_state_machine(yaml_parser_t *parser, yaml_event_t *event) { switch (parser->state) { case YAML_PARSE_STREAM_START_STATE: return yaml_parser_parse_stream_start(parser, event); case YAML_PARSE_IMPLICIT_DOCUMENT_START_STATE: return yaml_parser_parse_document_start(parser, event, 1); case YAML_PARSE_DOCUMENT_START_STATE: return yaml_parser_parse_document_start(parser, event, 0); case YAML_PARSE_DOCUMENT_CONTENT_STATE: return yaml_parser_parse_document_content(parser, event); case YAML_PARSE_DOCUMENT_END_STATE: return yaml_parser_parse_document_end(parser, event); case YAML_PARSE_BLOCK_NODE_STATE: return yaml_parser_parse_node(parser, event, 1, 0); case YAML_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: return yaml_parser_parse_node(parser, event, 1, 1); case YAML_PARSE_FLOW_NODE_STATE: return yaml_parser_parse_node(parser, event, 0, 0); case YAML_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: return yaml_parser_parse_block_sequence_entry(parser, event, 1); case YAML_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: return yaml_parser_parse_block_sequence_entry(parser, event, 0); case YAML_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: return yaml_parser_parse_indentless_sequence_entry(parser, event); case YAML_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: return yaml_parser_parse_block_mapping_key(parser, event, 1); case YAML_PARSE_BLOCK_MAPPING_KEY_STATE: return yaml_parser_parse_block_mapping_key(parser, event, 0); case YAML_PARSE_BLOCK_MAPPING_VALUE_STATE: return yaml_parser_parse_block_mapping_value(parser, event); case YAML_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: return yaml_parser_parse_flow_sequence_entry(parser, event, 1); case YAML_PARSE_FLOW_SEQUENCE_ENTRY_STATE: return yaml_parser_parse_flow_sequence_entry(parser, event, 0); case YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event); case YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event); case YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event); case YAML_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: return yaml_parser_parse_flow_mapping_key(parser, event, 1); case YAML_PARSE_FLOW_MAPPING_KEY_STATE: return yaml_parser_parse_flow_mapping_key(parser, event, 0); case YAML_PARSE_FLOW_MAPPING_VALUE_STATE: return yaml_parser_parse_flow_mapping_value(parser, event, 0); case YAML_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: return yaml_parser_parse_flow_mapping_value(parser, event, 1); default: assert(1); /* Invalid state. */ } return 0; } /* * Parse the production: * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END * ************ */ static int yaml_parser_parse_stream_start(yaml_parser_t *parser, yaml_event_t *event) { yaml_token_t *token; token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_STREAM_START_TOKEN) { return yaml_parser_set_parser_error(parser, "did not find expected ", token->start_mark); } parser->state = YAML_PARSE_IMPLICIT_DOCUMENT_START_STATE; STREAM_START_EVENT_INIT(*event, token->data.stream_start.encoding, token->start_mark, token->start_mark); SKIP_TOKEN(parser); return 1; } /* * Parse the productions: * implicit_document ::= block_node DOCUMENT-END* * * * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* * ************************* */ static int yaml_parser_parse_document_start(yaml_parser_t *parser, yaml_event_t *event, int implicit) { yaml_token_t *token; yaml_version_directive_t *version_directive = NULL; struct { yaml_tag_directive_t *start; yaml_tag_directive_t *end; } tag_directives = { NULL, NULL }; token = PEEK_TOKEN(parser); if (!token) return 0; /* Parse extra document end indicators. */ if (!implicit) { while (token->type == YAML_DOCUMENT_END_TOKEN) { SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; } } /* Parse an implicit document. */ if (implicit && token->type != YAML_VERSION_DIRECTIVE_TOKEN && token->type != YAML_TAG_DIRECTIVE_TOKEN && token->type != YAML_DOCUMENT_START_TOKEN && token->type != YAML_STREAM_END_TOKEN) { if (!yaml_parser_process_directives(parser, NULL, NULL, NULL)) return 0; if (!PUSH(parser, parser->states, YAML_PARSE_DOCUMENT_END_STATE)) return 0; parser->state = YAML_PARSE_BLOCK_NODE_STATE; DOCUMENT_START_EVENT_INIT(*event, NULL, NULL, NULL, 1, token->start_mark, token->start_mark); return 1; } /* Parse an explicit document. */ else if (token->type != YAML_STREAM_END_TOKEN) { yaml_mark_t start_mark, end_mark; start_mark = token->start_mark; if (!yaml_parser_process_directives(parser, &version_directive, &tag_directives.start, &tag_directives.end)) return 0; token = PEEK_TOKEN(parser); if (!token) goto error; if (token->type != YAML_DOCUMENT_START_TOKEN) { yaml_parser_set_parser_error(parser, "did not find expected ", token->start_mark); goto error; } if (!PUSH(parser, parser->states, YAML_PARSE_DOCUMENT_END_STATE)) goto error; parser->state = YAML_PARSE_DOCUMENT_CONTENT_STATE; end_mark = token->end_mark; DOCUMENT_START_EVENT_INIT(*event, version_directive, tag_directives.start, tag_directives.end, 0, start_mark, end_mark); SKIP_TOKEN(parser); version_directive = NULL; tag_directives.start = tag_directives.end = NULL; return 1; } /* Parse the stream end. */ else { parser->state = YAML_PARSE_END_STATE; STREAM_END_EVENT_INIT(*event, token->start_mark, token->end_mark); SKIP_TOKEN(parser); return 1; } error: yaml_free(version_directive); while (tag_directives.start != tag_directives.end) { yaml_free(tag_directives.end[-1].handle); yaml_free(tag_directives.end[-1].prefix); tag_directives.end --; } yaml_free(tag_directives.start); return 0; } /* * Parse the productions: * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* * *********** */ static int yaml_parser_parse_document_content(yaml_parser_t *parser, yaml_event_t *event) { yaml_token_t *token; token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type == YAML_VERSION_DIRECTIVE_TOKEN || token->type == YAML_TAG_DIRECTIVE_TOKEN || token->type == YAML_DOCUMENT_START_TOKEN || token->type == YAML_DOCUMENT_END_TOKEN || token->type == YAML_STREAM_END_TOKEN) { parser->state = POP(parser, parser->states); return yaml_parser_process_empty_scalar(parser, event, token->start_mark); } else { return yaml_parser_parse_node(parser, event, 1, 0); } } /* * Parse the productions: * implicit_document ::= block_node DOCUMENT-END* * ************* * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* * ************* */ static int yaml_parser_parse_document_end(yaml_parser_t *parser, yaml_event_t *event) { yaml_token_t *token; yaml_mark_t start_mark, end_mark; int implicit = 1; token = PEEK_TOKEN(parser); if (!token) return 0; start_mark = end_mark = token->start_mark; if (token->type == YAML_DOCUMENT_END_TOKEN) { end_mark = token->end_mark; SKIP_TOKEN(parser); implicit = 0; } while (!STACK_EMPTY(parser, parser->tag_directives)) { yaml_tag_directive_t tag_directive = POP(parser, parser->tag_directives); yaml_free(tag_directive.handle); yaml_free(tag_directive.prefix); } parser->state = YAML_PARSE_DOCUMENT_START_STATE; DOCUMENT_END_EVENT_INIT(*event, implicit, start_mark, end_mark); return 1; } /* * Parse the productions: * block_node_or_indentless_sequence ::= * ALIAS * ***** * | properties (block_content | indentless_block_sequence)? * ********** * * | block_content | indentless_block_sequence * * * block_node ::= ALIAS * ***** * | properties block_content? * ********** * * | block_content * * * flow_node ::= ALIAS * ***** * | properties flow_content? * ********** * * | flow_content * * * properties ::= TAG ANCHOR? | ANCHOR TAG? * ************************* * block_content ::= block_collection | flow_collection | SCALAR * ****** * flow_content ::= flow_collection | SCALAR * ****** */ static int yaml_parser_parse_node(yaml_parser_t *parser, yaml_event_t *event, int block, int indentless_sequence) { yaml_token_t *token; yaml_char_t *anchor = NULL; yaml_char_t *tag_handle = NULL; yaml_char_t *tag_suffix = NULL; yaml_char_t *tag = NULL; yaml_mark_t start_mark, end_mark, tag_mark; int implicit; token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type == YAML_ALIAS_TOKEN) { parser->state = POP(parser, parser->states); ALIAS_EVENT_INIT(*event, token->data.alias.value, token->start_mark, token->end_mark); SKIP_TOKEN(parser); return 1; } else { start_mark = end_mark = token->start_mark; if (token->type == YAML_ANCHOR_TOKEN) { anchor = token->data.anchor.value; start_mark = token->start_mark; end_mark = token->end_mark; SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) goto error; if (token->type == YAML_TAG_TOKEN) { tag_handle = token->data.tag.handle; tag_suffix = token->data.tag.suffix; tag_mark = token->start_mark; end_mark = token->end_mark; SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) goto error; } } else if (token->type == YAML_TAG_TOKEN) { tag_handle = token->data.tag.handle; tag_suffix = token->data.tag.suffix; start_mark = tag_mark = token->start_mark; end_mark = token->end_mark; SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) goto error; if (token->type == YAML_ANCHOR_TOKEN) { anchor = token->data.anchor.value; end_mark = token->end_mark; SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) goto error; } } if (tag_handle) { if (!*tag_handle) { tag = tag_suffix; yaml_free(tag_handle); tag_handle = tag_suffix = NULL; } else { yaml_tag_directive_t *tag_directive; for (tag_directive = parser->tag_directives.start; tag_directive != parser->tag_directives.top; tag_directive ++) { if (strcmp((char *)tag_directive->handle, (char *)tag_handle) == 0) { size_t prefix_len = strlen((char *)tag_directive->prefix); size_t suffix_len = strlen((char *)tag_suffix); tag = yaml_malloc(prefix_len+suffix_len+1); if (!tag) { parser->error = YAML_MEMORY_ERROR; goto error; } memcpy(tag, tag_directive->prefix, prefix_len); memcpy(tag+prefix_len, tag_suffix, suffix_len); tag[prefix_len+suffix_len] = '\0'; yaml_free(tag_handle); yaml_free(tag_suffix); tag_handle = tag_suffix = NULL; break; } } if (!tag) { yaml_parser_set_parser_error_context(parser, "while parsing a node", start_mark, "found undefined tag handle", tag_mark); goto error; } } } implicit = (!tag || !*tag); if (indentless_sequence && token->type == YAML_BLOCK_ENTRY_TOKEN) { end_mark = token->end_mark; parser->state = YAML_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE; SEQUENCE_START_EVENT_INIT(*event, anchor, tag, implicit, YAML_BLOCK_SEQUENCE_STYLE, start_mark, end_mark); return 1; } else { if (token->type == YAML_SCALAR_TOKEN) { int plain_implicit = 0; int quoted_implicit = 0; end_mark = token->end_mark; if ((token->data.scalar.style == YAML_PLAIN_SCALAR_STYLE && !tag) || (tag && strcmp((char *)tag, "!") == 0)) { plain_implicit = 1; } else if (!tag) { quoted_implicit = 1; } parser->state = POP(parser, parser->states); SCALAR_EVENT_INIT(*event, anchor, tag, token->data.scalar.value, token->data.scalar.length, plain_implicit, quoted_implicit, token->data.scalar.style, start_mark, end_mark); SKIP_TOKEN(parser); return 1; } else if (token->type == YAML_FLOW_SEQUENCE_START_TOKEN) { end_mark = token->end_mark; parser->state = YAML_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE; SEQUENCE_START_EVENT_INIT(*event, anchor, tag, implicit, YAML_FLOW_SEQUENCE_STYLE, start_mark, end_mark); return 1; } else if (token->type == YAML_FLOW_MAPPING_START_TOKEN) { end_mark = token->end_mark; parser->state = YAML_PARSE_FLOW_MAPPING_FIRST_KEY_STATE; MAPPING_START_EVENT_INIT(*event, anchor, tag, implicit, YAML_FLOW_MAPPING_STYLE, start_mark, end_mark); return 1; } else if (block && token->type == YAML_BLOCK_SEQUENCE_START_TOKEN) { end_mark = token->end_mark; parser->state = YAML_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE; SEQUENCE_START_EVENT_INIT(*event, anchor, tag, implicit, YAML_BLOCK_SEQUENCE_STYLE, start_mark, end_mark); return 1; } else if (block && token->type == YAML_BLOCK_MAPPING_START_TOKEN) { end_mark = token->end_mark; parser->state = YAML_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE; MAPPING_START_EVENT_INIT(*event, anchor, tag, implicit, YAML_BLOCK_MAPPING_STYLE, start_mark, end_mark); return 1; } else if (anchor || tag) { yaml_char_t *value = yaml_malloc(1); if (!value) { parser->error = YAML_MEMORY_ERROR; goto error; } value[0] = '\0'; parser->state = POP(parser, parser->states); SCALAR_EVENT_INIT(*event, anchor, tag, value, 0, implicit, 0, YAML_PLAIN_SCALAR_STYLE, start_mark, end_mark); return 1; } else { yaml_parser_set_parser_error_context(parser, (block ? "while parsing a block node" : "while parsing a flow node"), start_mark, "did not find expected node content", token->start_mark); goto error; } } } error: yaml_free(anchor); yaml_free(tag_handle); yaml_free(tag_suffix); yaml_free(tag); return 0; } /* * Parse the productions: * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END * ******************** *********** * ********* */ static int yaml_parser_parse_block_sequence_entry(yaml_parser_t *parser, yaml_event_t *event, int first) { yaml_token_t *token; if (first) { token = PEEK_TOKEN(parser); if (!PUSH(parser, parser->marks, token->start_mark)) return 0; SKIP_TOKEN(parser); } token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type == YAML_BLOCK_ENTRY_TOKEN) { yaml_mark_t mark = token->end_mark; SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_BLOCK_ENTRY_TOKEN && token->type != YAML_BLOCK_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)) return 0; return yaml_parser_parse_node(parser, event, 1, 0); } else { parser->state = YAML_PARSE_BLOCK_SEQUENCE_ENTRY_STATE; return yaml_parser_process_empty_scalar(parser, event, mark); } } else if (token->type == YAML_BLOCK_END_TOKEN) { yaml_mark_t dummy_mark; /* Used to eliminate a compiler warning. */ parser->state = POP(parser, parser->states); dummy_mark = POP(parser, parser->marks); SEQUENCE_END_EVENT_INIT(*event, token->start_mark, token->end_mark); SKIP_TOKEN(parser); return 1; } else { return yaml_parser_set_parser_error_context(parser, "while parsing a block collection", POP(parser, parser->marks), "did not find expected '-' indicator", token->start_mark); } } /* * Parse the productions: * indentless_sequence ::= (BLOCK-ENTRY block_node?)+ * *********** * */ static int yaml_parser_parse_indentless_sequence_entry(yaml_parser_t *parser, yaml_event_t *event) { yaml_token_t *token; token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type == YAML_BLOCK_ENTRY_TOKEN) { yaml_mark_t mark = token->end_mark; SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_BLOCK_ENTRY_TOKEN && token->type != YAML_KEY_TOKEN && token->type != YAML_VALUE_TOKEN && token->type != YAML_BLOCK_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)) return 0; return yaml_parser_parse_node(parser, event, 1, 0); } else { parser->state = YAML_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE; return yaml_parser_process_empty_scalar(parser, event, mark); } } else { parser->state = POP(parser, parser->states); SEQUENCE_END_EVENT_INIT(*event, token->start_mark, token->start_mark); return 1; } } /* * Parse the productions: * block_mapping ::= BLOCK-MAPPING_START * ******************* * ((KEY block_node_or_indentless_sequence?)? * *** * * (VALUE block_node_or_indentless_sequence?)?)* * * BLOCK-END * ********* */ static int yaml_parser_parse_block_mapping_key(yaml_parser_t *parser, yaml_event_t *event, int first) { yaml_token_t *token; if (first) { token = PEEK_TOKEN(parser); if (!PUSH(parser, parser->marks, token->start_mark)) return 0; SKIP_TOKEN(parser); } token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type == YAML_KEY_TOKEN) { yaml_mark_t mark = token->end_mark; SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_KEY_TOKEN && token->type != YAML_VALUE_TOKEN && token->type != YAML_BLOCK_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_BLOCK_MAPPING_VALUE_STATE)) return 0; return yaml_parser_parse_node(parser, event, 1, 1); } else { parser->state = YAML_PARSE_BLOCK_MAPPING_VALUE_STATE; return yaml_parser_process_empty_scalar(parser, event, mark); } } else if (token->type == YAML_BLOCK_END_TOKEN) { yaml_mark_t dummy_mark; /* Used to eliminate a compiler warning. */ parser->state = POP(parser, parser->states); dummy_mark = POP(parser, parser->marks); MAPPING_END_EVENT_INIT(*event, token->start_mark, token->end_mark); SKIP_TOKEN(parser); return 1; } else { return yaml_parser_set_parser_error_context(parser, "while parsing a block mapping", POP(parser, parser->marks), "did not find expected key", token->start_mark); } } /* * Parse the productions: * block_mapping ::= BLOCK-MAPPING_START * * ((KEY block_node_or_indentless_sequence?)? * * (VALUE block_node_or_indentless_sequence?)?)* * ***** * * BLOCK-END * */ static int yaml_parser_parse_block_mapping_value(yaml_parser_t *parser, yaml_event_t *event) { yaml_token_t *token; token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type == YAML_VALUE_TOKEN) { yaml_mark_t mark = token->end_mark; SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_KEY_TOKEN && token->type != YAML_VALUE_TOKEN && token->type != YAML_BLOCK_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_BLOCK_MAPPING_KEY_STATE)) return 0; return yaml_parser_parse_node(parser, event, 1, 1); } else { parser->state = YAML_PARSE_BLOCK_MAPPING_KEY_STATE; return yaml_parser_process_empty_scalar(parser, event, mark); } } else { parser->state = YAML_PARSE_BLOCK_MAPPING_KEY_STATE; return yaml_parser_process_empty_scalar(parser, event, token->start_mark); } } /* * Parse the productions: * flow_sequence ::= FLOW-SEQUENCE-START * ******************* * (flow_sequence_entry FLOW-ENTRY)* * * ********** * flow_sequence_entry? * * * FLOW-SEQUENCE-END * ***************** * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? * * */ static int yaml_parser_parse_flow_sequence_entry(yaml_parser_t *parser, yaml_event_t *event, int first) { yaml_token_t *token; yaml_mark_t dummy_mark; /* Used to eliminate a compiler warning. */ if (first) { token = PEEK_TOKEN(parser); if (!PUSH(parser, parser->marks, token->start_mark)) return 0; SKIP_TOKEN(parser); } token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_FLOW_SEQUENCE_END_TOKEN) { if (!first) { if (token->type == YAML_FLOW_ENTRY_TOKEN) { SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; } else { return yaml_parser_set_parser_error_context(parser, "while parsing a flow sequence", POP(parser, parser->marks), "did not find expected ',' or ']'", token->start_mark); } } if (token->type == YAML_KEY_TOKEN) { parser->state = YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE; MAPPING_START_EVENT_INIT(*event, NULL, NULL, 1, YAML_FLOW_MAPPING_STYLE, token->start_mark, token->end_mark); SKIP_TOKEN(parser); return 1; } else if (token->type != YAML_FLOW_SEQUENCE_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_FLOW_SEQUENCE_ENTRY_STATE)) return 0; return yaml_parser_parse_node(parser, event, 0, 0); } } parser->state = POP(parser, parser->states); dummy_mark = POP(parser, parser->marks); SEQUENCE_END_EVENT_INIT(*event, token->start_mark, token->end_mark); SKIP_TOKEN(parser); return 1; } /* * Parse the productions: * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? * *** * */ static int yaml_parser_parse_flow_sequence_entry_mapping_key(yaml_parser_t *parser, yaml_event_t *event) { yaml_token_t *token; token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_VALUE_TOKEN && token->type != YAML_FLOW_ENTRY_TOKEN && token->type != YAML_FLOW_SEQUENCE_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)) return 0; return yaml_parser_parse_node(parser, event, 0, 0); } else { yaml_mark_t mark = token->end_mark; SKIP_TOKEN(parser); parser->state = YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE; return yaml_parser_process_empty_scalar(parser, event, mark); } } /* * Parse the productions: * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? * ***** * */ static int yaml_parser_parse_flow_sequence_entry_mapping_value(yaml_parser_t *parser, yaml_event_t *event) { yaml_token_t *token; token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type == YAML_VALUE_TOKEN) { SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_FLOW_ENTRY_TOKEN && token->type != YAML_FLOW_SEQUENCE_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)) return 0; return yaml_parser_parse_node(parser, event, 0, 0); } } parser->state = YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE; return yaml_parser_process_empty_scalar(parser, event, token->start_mark); } /* * Parse the productions: * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? * * */ static int yaml_parser_parse_flow_sequence_entry_mapping_end(yaml_parser_t *parser, yaml_event_t *event) { yaml_token_t *token; token = PEEK_TOKEN(parser); if (!token) return 0; parser->state = YAML_PARSE_FLOW_SEQUENCE_ENTRY_STATE; MAPPING_END_EVENT_INIT(*event, token->start_mark, token->start_mark); return 1; } /* * Parse the productions: * flow_mapping ::= FLOW-MAPPING-START * ****************** * (flow_mapping_entry FLOW-ENTRY)* * * ********** * flow_mapping_entry? * ****************** * FLOW-MAPPING-END * **************** * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? * * *** * */ static int yaml_parser_parse_flow_mapping_key(yaml_parser_t *parser, yaml_event_t *event, int first) { yaml_token_t *token; yaml_mark_t dummy_mark; /* Used to eliminate a compiler warning. */ if (first) { token = PEEK_TOKEN(parser); if (!PUSH(parser, parser->marks, token->start_mark)) return 0; SKIP_TOKEN(parser); } token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_FLOW_MAPPING_END_TOKEN) { if (!first) { if (token->type == YAML_FLOW_ENTRY_TOKEN) { SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; } else { return yaml_parser_set_parser_error_context(parser, "while parsing a flow mapping", POP(parser, parser->marks), "did not find expected ',' or '}'", token->start_mark); } } if (token->type == YAML_KEY_TOKEN) { SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_VALUE_TOKEN && token->type != YAML_FLOW_ENTRY_TOKEN && token->type != YAML_FLOW_MAPPING_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_FLOW_MAPPING_VALUE_STATE)) return 0; return yaml_parser_parse_node(parser, event, 0, 0); } else { parser->state = YAML_PARSE_FLOW_MAPPING_VALUE_STATE; return yaml_parser_process_empty_scalar(parser, event, token->start_mark); } } else if (token->type != YAML_FLOW_MAPPING_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)) return 0; return yaml_parser_parse_node(parser, event, 0, 0); } } parser->state = POP(parser, parser->states); dummy_mark = POP(parser, parser->marks); MAPPING_END_EVENT_INIT(*event, token->start_mark, token->end_mark); SKIP_TOKEN(parser); return 1; } /* * Parse the productions: * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? * * ***** * */ static int yaml_parser_parse_flow_mapping_value(yaml_parser_t *parser, yaml_event_t *event, int empty) { yaml_token_t *token; token = PEEK_TOKEN(parser); if (!token) return 0; if (empty) { parser->state = YAML_PARSE_FLOW_MAPPING_KEY_STATE; return yaml_parser_process_empty_scalar(parser, event, token->start_mark); } if (token->type == YAML_VALUE_TOKEN) { SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) return 0; if (token->type != YAML_FLOW_ENTRY_TOKEN && token->type != YAML_FLOW_MAPPING_END_TOKEN) { if (!PUSH(parser, parser->states, YAML_PARSE_FLOW_MAPPING_KEY_STATE)) return 0; return yaml_parser_parse_node(parser, event, 0, 0); } } parser->state = YAML_PARSE_FLOW_MAPPING_KEY_STATE; return yaml_parser_process_empty_scalar(parser, event, token->start_mark); } /* * Generate an empty scalar event. */ static int yaml_parser_process_empty_scalar(yaml_parser_t *parser, yaml_event_t *event, yaml_mark_t mark) { yaml_char_t *value; value = yaml_malloc(1); if (!value) { parser->error = YAML_MEMORY_ERROR; return 0; } value[0] = '\0'; SCALAR_EVENT_INIT(*event, NULL, NULL, value, 0, 1, 0, YAML_PLAIN_SCALAR_STYLE, mark, mark); return 1; } /* * Parse directives. */ static int yaml_parser_process_directives(yaml_parser_t *parser, yaml_version_directive_t **version_directive_ref, yaml_tag_directive_t **tag_directives_start_ref, yaml_tag_directive_t **tag_directives_end_ref) { yaml_tag_directive_t default_tag_directives[] = { {(yaml_char_t *)"!", (yaml_char_t *)"!"}, {(yaml_char_t *)"!!", (yaml_char_t *)"tag:yaml.org,2002:"}, {NULL, NULL} }; yaml_tag_directive_t *default_tag_directive; yaml_version_directive_t *version_directive = NULL; struct { yaml_tag_directive_t *start; yaml_tag_directive_t *end; yaml_tag_directive_t *top; } tag_directives = { NULL, NULL, NULL }; yaml_token_t *token; if (!STACK_INIT(parser, tag_directives, INITIAL_STACK_SIZE)) goto error; token = PEEK_TOKEN(parser); if (!token) goto error; while (token->type == YAML_VERSION_DIRECTIVE_TOKEN || token->type == YAML_TAG_DIRECTIVE_TOKEN) { if (token->type == YAML_VERSION_DIRECTIVE_TOKEN) { if (version_directive) { yaml_parser_set_parser_error(parser, "found duplicate %YAML directive", token->start_mark); goto error; } if (token->data.version_directive.major != 1 || token->data.version_directive.minor != 1) { yaml_parser_set_parser_error(parser, "found incompatible YAML document", token->start_mark); goto error; } version_directive = yaml_malloc(sizeof(yaml_version_directive_t)); if (!version_directive) { parser->error = YAML_MEMORY_ERROR; goto error; } version_directive->major = token->data.version_directive.major; version_directive->minor = token->data.version_directive.minor; } else if (token->type == YAML_TAG_DIRECTIVE_TOKEN) { yaml_tag_directive_t value; value.handle = token->data.tag_directive.handle; value.prefix = token->data.tag_directive.prefix; if (!yaml_parser_append_tag_directive(parser, value, 0, token->start_mark)) goto error; if (!PUSH(parser, tag_directives, value)) goto error; } SKIP_TOKEN(parser); token = PEEK_TOKEN(parser); if (!token) goto error; } for (default_tag_directive = default_tag_directives; default_tag_directive->handle; default_tag_directive++) { if (!yaml_parser_append_tag_directive(parser, *default_tag_directive, 1, token->start_mark)) goto error; } if (version_directive_ref) { *version_directive_ref = version_directive; } if (tag_directives_start_ref) { if (STACK_EMPTY(parser, tag_directives)) { *tag_directives_start_ref = *tag_directives_end_ref = NULL; STACK_DEL(parser, tag_directives); } else { *tag_directives_start_ref = tag_directives.start; *tag_directives_end_ref = tag_directives.top; } } else { STACK_DEL(parser, tag_directives); } return 1; error: yaml_free(version_directive); while (!STACK_EMPTY(parser, tag_directives)) { yaml_tag_directive_t tag_directive = POP(parser, tag_directives); yaml_free(tag_directive.handle); yaml_free(tag_directive.prefix); } STACK_DEL(parser, tag_directives); return 0; } /* * Append a tag directive to the directives stack. */ static int yaml_parser_append_tag_directive(yaml_parser_t *parser, yaml_tag_directive_t value, int allow_duplicates, yaml_mark_t mark) { yaml_tag_directive_t *tag_directive; yaml_tag_directive_t copy = { NULL, NULL }; for (tag_directive = parser->tag_directives.start; tag_directive != parser->tag_directives.top; tag_directive ++) { if (strcmp((char *)value.handle, (char *)tag_directive->handle) == 0) { if (allow_duplicates) return 1; return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark); } } copy.handle = yaml_strdup(value.handle); copy.prefix = yaml_strdup(value.prefix); if (!copy.handle || !copy.prefix) { parser->error = YAML_MEMORY_ERROR; goto error; } if (!PUSH(parser, parser->tag_directives, copy)) goto error; return 1; error: yaml_free(copy.handle); yaml_free(copy.prefix); return 0; } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/emitter.c0000644000000000000000000020030113306562377022602 0ustar rootroot #include "yaml_private.h" /* * Flush the buffer if needed. */ #define FLUSH(emitter) \ ((emitter->buffer.pointer+5 < emitter->buffer.end) \ || yaml_emitter_flush(emitter)) /* * Put a character to the output buffer. */ #define PUT(emitter,value) \ (FLUSH(emitter) \ && (*(emitter->buffer.pointer++) = (yaml_char_t)(value), \ emitter->column ++, \ 1)) /* * Put a line break to the output buffer. */ #define PUT_BREAK(emitter) \ (FLUSH(emitter) \ && ((emitter->line_break == YAML_CR_BREAK ? \ (*(emitter->buffer.pointer++) = (yaml_char_t) '\r') : \ emitter->line_break == YAML_LN_BREAK ? \ (*(emitter->buffer.pointer++) = (yaml_char_t) '\n') : \ emitter->line_break == YAML_CRLN_BREAK ? \ (*(emitter->buffer.pointer++) = (yaml_char_t) '\r', \ *(emitter->buffer.pointer++) = (yaml_char_t) '\n') : 0), \ emitter->column = 0, \ emitter->line ++, \ 1)) /* * Copy a character from a string into buffer. */ #define WRITE(emitter,string) \ (FLUSH(emitter) \ && (COPY(emitter->buffer,string), \ emitter->column ++, \ 1)) /* * Copy a line break character from a string into buffer. */ #define WRITE_BREAK(emitter,string) \ (FLUSH(emitter) \ && (CHECK(string,'\n') ? \ (PUT_BREAK(emitter), \ string.pointer ++, \ 1) : \ (COPY(emitter->buffer,string), \ emitter->column = 0, \ emitter->line ++, \ 1))) /* * API functions. */ YAML_DECLARE(int) yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event); /* * Utility functions. */ static int yaml_emitter_set_emitter_error(yaml_emitter_t *emitter, const char *problem); static int yaml_emitter_need_more_events(yaml_emitter_t *emitter); static int yaml_emitter_append_tag_directive(yaml_emitter_t *emitter, yaml_tag_directive_t value, int allow_duplicates); static int yaml_emitter_increase_indent(yaml_emitter_t *emitter, int flow, int indentless); /* * State functions. */ static int yaml_emitter_state_machine(yaml_emitter_t *emitter, yaml_event_t *event); static int yaml_emitter_emit_stream_start(yaml_emitter_t *emitter, yaml_event_t *event); static int yaml_emitter_emit_document_start(yaml_emitter_t *emitter, yaml_event_t *event, int first); static int yaml_emitter_emit_document_content(yaml_emitter_t *emitter, yaml_event_t *event); static int yaml_emitter_emit_document_end(yaml_emitter_t *emitter, yaml_event_t *event); static int yaml_emitter_emit_flow_sequence_item(yaml_emitter_t *emitter, yaml_event_t *event, int first); static int yaml_emitter_emit_flow_mapping_key(yaml_emitter_t *emitter, yaml_event_t *event, int first); static int yaml_emitter_emit_flow_mapping_value(yaml_emitter_t *emitter, yaml_event_t *event, int simple); static int yaml_emitter_emit_block_sequence_item(yaml_emitter_t *emitter, yaml_event_t *event, int first); static int yaml_emitter_emit_block_mapping_key(yaml_emitter_t *emitter, yaml_event_t *event, int first); static int yaml_emitter_emit_block_mapping_value(yaml_emitter_t *emitter, yaml_event_t *event, int simple); static int yaml_emitter_emit_node(yaml_emitter_t *emitter, yaml_event_t *event, int root, int sequence, int mapping, int simple_key); static int yaml_emitter_emit_alias(yaml_emitter_t *emitter, yaml_event_t *event); static int yaml_emitter_emit_scalar(yaml_emitter_t *emitter, yaml_event_t *event); static int yaml_emitter_emit_sequence_start(yaml_emitter_t *emitter, yaml_event_t *event); static int yaml_emitter_emit_mapping_start(yaml_emitter_t *emitter, yaml_event_t *event); /* * Checkers. */ static int yaml_emitter_check_empty_document(yaml_emitter_t *emitter); static int yaml_emitter_check_empty_sequence(yaml_emitter_t *emitter); static int yaml_emitter_check_empty_mapping(yaml_emitter_t *emitter); static int yaml_emitter_check_simple_key(yaml_emitter_t *emitter); static int yaml_emitter_select_scalar_style(yaml_emitter_t *emitter, yaml_event_t *event); /* * Processors. */ static int yaml_emitter_process_anchor(yaml_emitter_t *emitter); static int yaml_emitter_process_tag(yaml_emitter_t *emitter); static int yaml_emitter_process_scalar(yaml_emitter_t *emitter); /* * Analyzers. */ static int yaml_emitter_analyze_version_directive(yaml_emitter_t *emitter, yaml_version_directive_t version_directive); static int yaml_emitter_analyze_tag_directive(yaml_emitter_t *emitter, yaml_tag_directive_t tag_directive); static int yaml_emitter_analyze_anchor(yaml_emitter_t *emitter, yaml_char_t *anchor, int alias); static int yaml_emitter_analyze_tag(yaml_emitter_t *emitter, yaml_char_t *tag); static int yaml_emitter_analyze_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length); static int yaml_emitter_analyze_event(yaml_emitter_t *emitter, yaml_event_t *event); /* * Writers. */ static int yaml_emitter_write_bom(yaml_emitter_t *emitter); static int yaml_emitter_write_indent(yaml_emitter_t *emitter); static int yaml_emitter_write_indicator(yaml_emitter_t *emitter, char *indicator, int need_whitespace, int is_whitespace, int is_indention); static int yaml_emitter_write_anchor(yaml_emitter_t *emitter, yaml_char_t *value, size_t length); static int yaml_emitter_write_tag_handle(yaml_emitter_t *emitter, yaml_char_t *value, size_t length); static int yaml_emitter_write_tag_content(yaml_emitter_t *emitter, yaml_char_t *value, size_t length, int need_whitespace); static int yaml_emitter_write_plain_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length, int allow_breaks); static int yaml_emitter_write_single_quoted_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length, int allow_breaks); static int yaml_emitter_write_double_quoted_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length, int allow_breaks); static int yaml_emitter_write_block_scalar_hints(yaml_emitter_t *emitter, yaml_string_t string); static int yaml_emitter_write_literal_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length); static int yaml_emitter_write_folded_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length); /* * Set an emitter error and return 0. */ static int yaml_emitter_set_emitter_error(yaml_emitter_t *emitter, const char *problem) { emitter->error = YAML_EMITTER_ERROR; emitter->problem = problem; return 0; } /* * Emit an event. */ YAML_DECLARE(int) yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event) { if (!ENQUEUE(emitter, emitter->events, *event)) { yaml_event_delete(event); return 0; } while (!yaml_emitter_need_more_events(emitter)) { if (!yaml_emitter_analyze_event(emitter, emitter->events.head)) return 0; if (!yaml_emitter_state_machine(emitter, emitter->events.head)) return 0; yaml_event_delete(&DEQUEUE(emitter, emitter->events)); } return 1; } /* * Check if we need to accumulate more events before emitting. * * We accumulate extra * - 1 event for DOCUMENT-START * - 2 events for SEQUENCE-START * - 3 events for MAPPING-START */ static int yaml_emitter_need_more_events(yaml_emitter_t *emitter) { int level = 0; int accumulate = 0; yaml_event_t *event; if (QUEUE_EMPTY(emitter, emitter->events)) return 1; switch (emitter->events.head->type) { case YAML_DOCUMENT_START_EVENT: accumulate = 1; break; case YAML_SEQUENCE_START_EVENT: accumulate = 2; break; case YAML_MAPPING_START_EVENT: accumulate = 3; break; default: return 0; } if (emitter->events.tail - emitter->events.head > accumulate) return 0; for (event = emitter->events.head; event != emitter->events.tail; event ++) { switch (event->type) { case YAML_STREAM_START_EVENT: case YAML_DOCUMENT_START_EVENT: case YAML_SEQUENCE_START_EVENT: case YAML_MAPPING_START_EVENT: level += 1; break; case YAML_STREAM_END_EVENT: case YAML_DOCUMENT_END_EVENT: case YAML_SEQUENCE_END_EVENT: case YAML_MAPPING_END_EVENT: level -= 1; break; default: break; } if (!level) return 0; } return 1; } /* * Append a directive to the directives stack. */ static int yaml_emitter_append_tag_directive(yaml_emitter_t *emitter, yaml_tag_directive_t value, int allow_duplicates) { yaml_tag_directive_t *tag_directive; yaml_tag_directive_t copy = { NULL, NULL }; for (tag_directive = emitter->tag_directives.start; tag_directive != emitter->tag_directives.top; tag_directive ++) { if (strcmp((char *)value.handle, (char *)tag_directive->handle) == 0) { if (allow_duplicates) return 1; return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive"); } } copy.handle = yaml_strdup(value.handle); copy.prefix = yaml_strdup(value.prefix); if (!copy.handle || !copy.prefix) { emitter->error = YAML_MEMORY_ERROR; goto error; } if (!PUSH(emitter, emitter->tag_directives, copy)) goto error; return 1; error: yaml_free(copy.handle); yaml_free(copy.prefix); return 0; } /* * Increase the indentation level. */ static int yaml_emitter_increase_indent(yaml_emitter_t *emitter, int flow, int indentless) { if (!PUSH(emitter, emitter->indents, emitter->indent)) return 0; if (emitter->indent < 0) { emitter->indent = flow ? emitter->best_indent : 0; } else if (!indentless) { emitter->indent += emitter->best_indent; } return 1; } /* * State dispatcher. */ static int yaml_emitter_state_machine(yaml_emitter_t *emitter, yaml_event_t *event) { switch (emitter->state) { case YAML_EMIT_STREAM_START_STATE: return yaml_emitter_emit_stream_start(emitter, event); case YAML_EMIT_FIRST_DOCUMENT_START_STATE: return yaml_emitter_emit_document_start(emitter, event, 1); case YAML_EMIT_DOCUMENT_START_STATE: return yaml_emitter_emit_document_start(emitter, event, 0); case YAML_EMIT_DOCUMENT_CONTENT_STATE: return yaml_emitter_emit_document_content(emitter, event); case YAML_EMIT_DOCUMENT_END_STATE: return yaml_emitter_emit_document_end(emitter, event); case YAML_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: return yaml_emitter_emit_flow_sequence_item(emitter, event, 1); case YAML_EMIT_FLOW_SEQUENCE_ITEM_STATE: return yaml_emitter_emit_flow_sequence_item(emitter, event, 0); case YAML_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: return yaml_emitter_emit_flow_mapping_key(emitter, event, 1); case YAML_EMIT_FLOW_MAPPING_KEY_STATE: return yaml_emitter_emit_flow_mapping_key(emitter, event, 0); case YAML_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: return yaml_emitter_emit_flow_mapping_value(emitter, event, 1); case YAML_EMIT_FLOW_MAPPING_VALUE_STATE: return yaml_emitter_emit_flow_mapping_value(emitter, event, 0); case YAML_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: return yaml_emitter_emit_block_sequence_item(emitter, event, 1); case YAML_EMIT_BLOCK_SEQUENCE_ITEM_STATE: return yaml_emitter_emit_block_sequence_item(emitter, event, 0); case YAML_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: return yaml_emitter_emit_block_mapping_key(emitter, event, 1); case YAML_EMIT_BLOCK_MAPPING_KEY_STATE: return yaml_emitter_emit_block_mapping_key(emitter, event, 0); case YAML_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: return yaml_emitter_emit_block_mapping_value(emitter, event, 1); case YAML_EMIT_BLOCK_MAPPING_VALUE_STATE: return yaml_emitter_emit_block_mapping_value(emitter, event, 0); case YAML_EMIT_END_STATE: return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END"); default: assert(1); /* Invalid state. */ } return 0; } /* * Expect STREAM-START. */ static int yaml_emitter_emit_stream_start(yaml_emitter_t *emitter, yaml_event_t *event) { if (event->type == YAML_STREAM_START_EVENT) { if (!emitter->encoding) { emitter->encoding = event->data.stream_start.encoding; } if (!emitter->encoding) { emitter->encoding = YAML_UTF8_ENCODING; } if (emitter->best_indent < 2 || emitter->best_indent > 9) { emitter->best_indent = 2; } if (emitter->best_width >= 0 && emitter->best_width <= emitter->best_indent*2) { emitter->best_width = 80; } if (emitter->best_width < 0) { emitter->best_width = INT_MAX; } if (!emitter->line_break) { emitter->line_break = YAML_LN_BREAK; } emitter->indent = -1; emitter->line = 0; emitter->column = 0; emitter->whitespace = 1; emitter->indention = 1; if (emitter->encoding != YAML_UTF8_ENCODING) { if (!yaml_emitter_write_bom(emitter)) return 0; } emitter->state = YAML_EMIT_FIRST_DOCUMENT_START_STATE; return 1; } return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START"); } /* * Expect DOCUMENT-START or STREAM-END. */ static int yaml_emitter_emit_document_start(yaml_emitter_t *emitter, yaml_event_t *event, int first) { if (event->type == YAML_DOCUMENT_START_EVENT) { yaml_tag_directive_t default_tag_directives[] = { {(yaml_char_t *)"!", (yaml_char_t *)"!"}, {(yaml_char_t *)"!!", (yaml_char_t *)"tag:yaml.org,2002:"}, {NULL, NULL} }; yaml_tag_directive_t *tag_directive; int implicit; if (event->data.document_start.version_directive) { if (!yaml_emitter_analyze_version_directive(emitter, *event->data.document_start.version_directive)) return 0; } for (tag_directive = event->data.document_start.tag_directives.start; tag_directive != event->data.document_start.tag_directives.end; tag_directive ++) { if (!yaml_emitter_analyze_tag_directive(emitter, *tag_directive)) return 0; if (!yaml_emitter_append_tag_directive(emitter, *tag_directive, 0)) return 0; } for (tag_directive = default_tag_directives; tag_directive->handle; tag_directive ++) { if (!yaml_emitter_append_tag_directive(emitter, *tag_directive, 1)) return 0; } implicit = event->data.document_start.implicit; if (!first || emitter->canonical) { implicit = 0; } if ((event->data.document_start.version_directive || (event->data.document_start.tag_directives.start != event->data.document_start.tag_directives.end)) && emitter->open_ended) { if (!yaml_emitter_write_indicator(emitter, "...", 1, 0, 0)) return 0; if (!yaml_emitter_write_indent(emitter)) return 0; } if (event->data.document_start.version_directive) { implicit = 0; if (!yaml_emitter_write_indicator(emitter, "%YAML", 1, 0, 0)) return 0; if (!yaml_emitter_write_indicator(emitter, "1.1", 1, 0, 0)) return 0; if (!yaml_emitter_write_indent(emitter)) return 0; } if (event->data.document_start.tag_directives.start != event->data.document_start.tag_directives.end) { implicit = 0; for (tag_directive = event->data.document_start.tag_directives.start; tag_directive != event->data.document_start.tag_directives.end; tag_directive ++) { if (!yaml_emitter_write_indicator(emitter, "%TAG", 1, 0, 0)) return 0; if (!yaml_emitter_write_tag_handle(emitter, tag_directive->handle, strlen((char *)tag_directive->handle))) return 0; if (!yaml_emitter_write_tag_content(emitter, tag_directive->prefix, strlen((char *)tag_directive->prefix), 1)) return 0; if (!yaml_emitter_write_indent(emitter)) return 0; } } if (yaml_emitter_check_empty_document(emitter)) { implicit = 0; } if (!implicit) { if (!yaml_emitter_write_indent(emitter)) return 0; if (!yaml_emitter_write_indicator(emitter, "---", 1, 0, 0)) return 0; if (emitter->canonical) { if (!yaml_emitter_write_indent(emitter)) return 0; } } emitter->state = YAML_EMIT_DOCUMENT_CONTENT_STATE; return 1; } else if (event->type == YAML_STREAM_END_EVENT) { if (emitter->open_ended) { if (!yaml_emitter_write_indicator(emitter, "...", 1, 0, 0)) return 0; if (!yaml_emitter_write_indent(emitter)) return 0; } if (!yaml_emitter_flush(emitter)) return 0; emitter->state = YAML_EMIT_END_STATE; return 1; } return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END"); } /* * Expect the root node. */ static int yaml_emitter_emit_document_content(yaml_emitter_t *emitter, yaml_event_t *event) { if (!PUSH(emitter, emitter->states, YAML_EMIT_DOCUMENT_END_STATE)) return 0; return yaml_emitter_emit_node(emitter, event, 1, 0, 0, 0); } /* * Expect DOCUMENT-END. */ static int yaml_emitter_emit_document_end(yaml_emitter_t *emitter, yaml_event_t *event) { if (event->type == YAML_DOCUMENT_END_EVENT) { if (!yaml_emitter_write_indent(emitter)) return 0; if (!event->data.document_end.implicit) { if (!yaml_emitter_write_indicator(emitter, "...", 1, 0, 0)) return 0; if (!yaml_emitter_write_indent(emitter)) return 0; } if (!yaml_emitter_flush(emitter)) return 0; emitter->state = YAML_EMIT_DOCUMENT_START_STATE; while (!STACK_EMPTY(emitter, emitter->tag_directives)) { yaml_tag_directive_t tag_directive = POP(emitter, emitter->tag_directives); yaml_free(tag_directive.handle); yaml_free(tag_directive.prefix); } return 1; } return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END"); } /* * * Expect a flow item node. */ static int yaml_emitter_emit_flow_sequence_item(yaml_emitter_t *emitter, yaml_event_t *event, int first) { if (first) { if (!yaml_emitter_write_indicator(emitter, "[", 1, 1, 0)) return 0; if (!yaml_emitter_increase_indent(emitter, 1, 0)) return 0; emitter->flow_level ++; } if (event->type == YAML_SEQUENCE_END_EVENT) { emitter->flow_level --; emitter->indent = POP(emitter, emitter->indents); if (emitter->canonical && !first) { if (!yaml_emitter_write_indicator(emitter, ",", 0, 0, 0)) return 0; if (!yaml_emitter_write_indent(emitter)) return 0; } if (!yaml_emitter_write_indicator(emitter, "]", 0, 0, 0)) return 0; emitter->state = POP(emitter, emitter->states); return 1; } if (!first) { if (!yaml_emitter_write_indicator(emitter, ",", 0, 0, 0)) return 0; } if (emitter->canonical || emitter->column > emitter->best_width) { if (!yaml_emitter_write_indent(emitter)) return 0; } if (!PUSH(emitter, emitter->states, YAML_EMIT_FLOW_SEQUENCE_ITEM_STATE)) return 0; return yaml_emitter_emit_node(emitter, event, 0, 1, 0, 0); } /* * Expect a flow key node. */ static int yaml_emitter_emit_flow_mapping_key(yaml_emitter_t *emitter, yaml_event_t *event, int first) { if (first) { if (!yaml_emitter_write_indicator(emitter, "{", 1, 1, 0)) return 0; if (!yaml_emitter_increase_indent(emitter, 1, 0)) return 0; emitter->flow_level ++; } if (event->type == YAML_MAPPING_END_EVENT) { emitter->flow_level --; emitter->indent = POP(emitter, emitter->indents); if (emitter->canonical && !first) { if (!yaml_emitter_write_indicator(emitter, ",", 0, 0, 0)) return 0; if (!yaml_emitter_write_indent(emitter)) return 0; } if (!yaml_emitter_write_indicator(emitter, "}", 0, 0, 0)) return 0; emitter->state = POP(emitter, emitter->states); return 1; } if (!first) { if (!yaml_emitter_write_indicator(emitter, ",", 0, 0, 0)) return 0; } if (emitter->canonical || emitter->column > emitter->best_width) { if (!yaml_emitter_write_indent(emitter)) return 0; } if (!emitter->canonical && yaml_emitter_check_simple_key(emitter)) { if (!PUSH(emitter, emitter->states, YAML_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)) return 0; return yaml_emitter_emit_node(emitter, event, 0, 0, 1, 1); } else { if (!yaml_emitter_write_indicator(emitter, "?", 1, 0, 0)) return 0; if (!PUSH(emitter, emitter->states, YAML_EMIT_FLOW_MAPPING_VALUE_STATE)) return 0; return yaml_emitter_emit_node(emitter, event, 0, 0, 1, 0); } } /* * Expect a flow value node. */ static int yaml_emitter_emit_flow_mapping_value(yaml_emitter_t *emitter, yaml_event_t *event, int simple) { if (simple) { if (!yaml_emitter_write_indicator(emitter, ":", 0, 0, 0)) return 0; } else { if (emitter->canonical || emitter->column > emitter->best_width) { if (!yaml_emitter_write_indent(emitter)) return 0; } if (!yaml_emitter_write_indicator(emitter, ":", 1, 0, 0)) return 0; } if (!PUSH(emitter, emitter->states, YAML_EMIT_FLOW_MAPPING_KEY_STATE)) return 0; return yaml_emitter_emit_node(emitter, event, 0, 0, 1, 0); } /* * Expect a block item node. */ static int yaml_emitter_emit_block_sequence_item(yaml_emitter_t *emitter, yaml_event_t *event, int first) { if (first) { if (!yaml_emitter_increase_indent(emitter, 0, (emitter->mapping_context && !emitter->indention))) return 0; } if (event->type == YAML_SEQUENCE_END_EVENT) { emitter->indent = POP(emitter, emitter->indents); emitter->state = POP(emitter, emitter->states); return 1; } if (!yaml_emitter_write_indent(emitter)) return 0; if (!yaml_emitter_write_indicator(emitter, "-", 1, 0, 1)) return 0; if (!PUSH(emitter, emitter->states, YAML_EMIT_BLOCK_SEQUENCE_ITEM_STATE)) return 0; return yaml_emitter_emit_node(emitter, event, 0, 1, 0, 0); } /* * Expect a block key node. */ static int yaml_emitter_emit_block_mapping_key(yaml_emitter_t *emitter, yaml_event_t *event, int first) { if (first) { if (!yaml_emitter_increase_indent(emitter, 0, 0)) return 0; } if (event->type == YAML_MAPPING_END_EVENT) { emitter->indent = POP(emitter, emitter->indents); emitter->state = POP(emitter, emitter->states); return 1; } if (!yaml_emitter_write_indent(emitter)) return 0; if (yaml_emitter_check_simple_key(emitter)) { if (!PUSH(emitter, emitter->states, YAML_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)) return 0; return yaml_emitter_emit_node(emitter, event, 0, 0, 1, 1); } else { if (!yaml_emitter_write_indicator(emitter, "?", 1, 0, 1)) return 0; if (!PUSH(emitter, emitter->states, YAML_EMIT_BLOCK_MAPPING_VALUE_STATE)) return 0; return yaml_emitter_emit_node(emitter, event, 0, 0, 1, 0); } } /* * Expect a block value node. */ static int yaml_emitter_emit_block_mapping_value(yaml_emitter_t *emitter, yaml_event_t *event, int simple) { if (simple) { if (!yaml_emitter_write_indicator(emitter, ":", 0, 0, 0)) return 0; } else { if (!yaml_emitter_write_indent(emitter)) return 0; if (!yaml_emitter_write_indicator(emitter, ":", 1, 0, 1)) return 0; } if (!PUSH(emitter, emitter->states, YAML_EMIT_BLOCK_MAPPING_KEY_STATE)) return 0; return yaml_emitter_emit_node(emitter, event, 0, 0, 1, 0); } /* * Expect a node. */ static int yaml_emitter_emit_node(yaml_emitter_t *emitter, yaml_event_t *event, int root, int sequence, int mapping, int simple_key) { emitter->root_context = root; emitter->sequence_context = sequence; emitter->mapping_context = mapping; emitter->simple_key_context = simple_key; switch (event->type) { case YAML_ALIAS_EVENT: return yaml_emitter_emit_alias(emitter, event); case YAML_SCALAR_EVENT: return yaml_emitter_emit_scalar(emitter, event); case YAML_SEQUENCE_START_EVENT: return yaml_emitter_emit_sequence_start(emitter, event); case YAML_MAPPING_START_EVENT: return yaml_emitter_emit_mapping_start(emitter, event); default: return yaml_emitter_set_emitter_error(emitter, "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS"); } return 0; } /* * Expect ALIAS. */ static int yaml_emitter_emit_alias(yaml_emitter_t *emitter, yaml_event_t *event) { if (!yaml_emitter_process_anchor(emitter)) return 0; emitter->state = POP(emitter, emitter->states); return 1; } /* * Expect SCALAR. */ static int yaml_emitter_emit_scalar(yaml_emitter_t *emitter, yaml_event_t *event) { if (!yaml_emitter_select_scalar_style(emitter, event)) return 0; if (!yaml_emitter_process_anchor(emitter)) return 0; if (!yaml_emitter_process_tag(emitter)) return 0; if (!yaml_emitter_increase_indent(emitter, 1, 0)) return 0; if (!yaml_emitter_process_scalar(emitter)) return 0; emitter->indent = POP(emitter, emitter->indents); emitter->state = POP(emitter, emitter->states); return 1; } /* * Expect SEQUENCE-START. */ static int yaml_emitter_emit_sequence_start(yaml_emitter_t *emitter, yaml_event_t *event) { if (!yaml_emitter_process_anchor(emitter)) return 0; if (!yaml_emitter_process_tag(emitter)) return 0; if (emitter->flow_level || emitter->canonical || event->data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE || yaml_emitter_check_empty_sequence(emitter)) { emitter->state = YAML_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE; } else { emitter->state = YAML_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE; } return 1; } /* * Expect MAPPING-START. */ static int yaml_emitter_emit_mapping_start(yaml_emitter_t *emitter, yaml_event_t *event) { if (!yaml_emitter_process_anchor(emitter)) return 0; if (!yaml_emitter_process_tag(emitter)) return 0; if (emitter->flow_level || emitter->canonical || event->data.mapping_start.style == YAML_FLOW_MAPPING_STYLE || yaml_emitter_check_empty_mapping(emitter)) { emitter->state = YAML_EMIT_FLOW_MAPPING_FIRST_KEY_STATE; } else { emitter->state = YAML_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE; } return 1; } /* * Check if the document content is an empty scalar. */ static int yaml_emitter_check_empty_document(yaml_emitter_t *emitter) { return 0; } /* * Check if the next events represent an empty sequence. */ static int yaml_emitter_check_empty_sequence(yaml_emitter_t *emitter) { if (emitter->events.tail - emitter->events.head < 2) return 0; return (emitter->events.head[0].type == YAML_SEQUENCE_START_EVENT && emitter->events.head[1].type == YAML_SEQUENCE_END_EVENT); } /* * Check if the next events represent an empty mapping. */ static int yaml_emitter_check_empty_mapping(yaml_emitter_t *emitter) { if (emitter->events.tail - emitter->events.head < 2) return 0; return (emitter->events.head[0].type == YAML_MAPPING_START_EVENT && emitter->events.head[1].type == YAML_MAPPING_END_EVENT); } /* * Check if the next node can be expressed as a simple key. */ static int yaml_emitter_check_simple_key(yaml_emitter_t *emitter) { yaml_event_t *event = emitter->events.head; size_t length = 0; switch (event->type) { case YAML_ALIAS_EVENT: length += emitter->anchor_data.anchor_length; break; case YAML_SCALAR_EVENT: if (emitter->scalar_data.multiline) return 0; length += emitter->anchor_data.anchor_length + emitter->tag_data.handle_length + emitter->tag_data.suffix_length + emitter->scalar_data.length; break; case YAML_SEQUENCE_START_EVENT: if (!yaml_emitter_check_empty_sequence(emitter)) return 0; length += emitter->anchor_data.anchor_length + emitter->tag_data.handle_length + emitter->tag_data.suffix_length; break; case YAML_MAPPING_START_EVENT: if (!yaml_emitter_check_empty_mapping(emitter)) return 0; length += emitter->anchor_data.anchor_length + emitter->tag_data.handle_length + emitter->tag_data.suffix_length; break; default: return 0; } if (length > 128) return 0; return 1; } /* * Determine an acceptable scalar style. */ static int yaml_emitter_select_scalar_style(yaml_emitter_t *emitter, yaml_event_t *event) { yaml_scalar_style_t style = event->data.scalar.style; int no_tag = (!emitter->tag_data.handle && !emitter->tag_data.suffix); if (no_tag && !event->data.scalar.plain_implicit && !event->data.scalar.quoted_implicit) { return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified"); } if (style == YAML_ANY_SCALAR_STYLE) style = YAML_PLAIN_SCALAR_STYLE; if (emitter->canonical) style = YAML_DOUBLE_QUOTED_SCALAR_STYLE; if (emitter->simple_key_context && emitter->scalar_data.multiline) style = YAML_DOUBLE_QUOTED_SCALAR_STYLE; if (style == YAML_PLAIN_SCALAR_STYLE) { if ((emitter->flow_level && !emitter->scalar_data.flow_plain_allowed) || (!emitter->flow_level && !emitter->scalar_data.block_plain_allowed)) style = YAML_SINGLE_QUOTED_SCALAR_STYLE; if (!emitter->scalar_data.length && (emitter->flow_level || emitter->simple_key_context)) style = YAML_SINGLE_QUOTED_SCALAR_STYLE; if (no_tag && !event->data.scalar.plain_implicit) style = YAML_SINGLE_QUOTED_SCALAR_STYLE; } if (style == YAML_SINGLE_QUOTED_SCALAR_STYLE) { if (!emitter->scalar_data.single_quoted_allowed) style = YAML_DOUBLE_QUOTED_SCALAR_STYLE; } if (style == YAML_LITERAL_SCALAR_STYLE || style == YAML_FOLDED_SCALAR_STYLE) { if (!emitter->scalar_data.block_allowed || emitter->flow_level || emitter->simple_key_context) style = YAML_DOUBLE_QUOTED_SCALAR_STYLE; } if (no_tag && !event->data.scalar.quoted_implicit && style != YAML_PLAIN_SCALAR_STYLE) { emitter->tag_data.handle = (yaml_char_t *)"!"; emitter->tag_data.handle_length = 1; } emitter->scalar_data.style = style; return 1; } /* * Write an achor. */ static int yaml_emitter_process_anchor(yaml_emitter_t *emitter) { if (!emitter->anchor_data.anchor) return 1; if (!yaml_emitter_write_indicator(emitter, (emitter->anchor_data.alias ? "*" : "&"), 1, 0, 0)) return 0; return yaml_emitter_write_anchor(emitter, emitter->anchor_data.anchor, emitter->anchor_data.anchor_length); } /* * Write a tag. */ static int yaml_emitter_process_tag(yaml_emitter_t *emitter) { if (!emitter->tag_data.handle && !emitter->tag_data.suffix) return 1; if (emitter->tag_data.handle) { if (!yaml_emitter_write_tag_handle(emitter, emitter->tag_data.handle, emitter->tag_data.handle_length)) return 0; if (emitter->tag_data.suffix) { if (!yaml_emitter_write_tag_content(emitter, emitter->tag_data.suffix, emitter->tag_data.suffix_length, 0)) return 0; } } else { if (!yaml_emitter_write_indicator(emitter, "!<", 1, 0, 0)) return 0; if (!yaml_emitter_write_tag_content(emitter, emitter->tag_data.suffix, emitter->tag_data.suffix_length, 0)) return 0; if (!yaml_emitter_write_indicator(emitter, ">", 0, 0, 0)) return 0; } return 1; } /* * Write a scalar. */ static int yaml_emitter_process_scalar(yaml_emitter_t *emitter) { switch (emitter->scalar_data.style) { case YAML_PLAIN_SCALAR_STYLE: return yaml_emitter_write_plain_scalar(emitter, emitter->scalar_data.value, emitter->scalar_data.length, !emitter->simple_key_context); case YAML_SINGLE_QUOTED_SCALAR_STYLE: return yaml_emitter_write_single_quoted_scalar(emitter, emitter->scalar_data.value, emitter->scalar_data.length, !emitter->simple_key_context); case YAML_DOUBLE_QUOTED_SCALAR_STYLE: return yaml_emitter_write_double_quoted_scalar(emitter, emitter->scalar_data.value, emitter->scalar_data.length, !emitter->simple_key_context); case YAML_LITERAL_SCALAR_STYLE: return yaml_emitter_write_literal_scalar(emitter, emitter->scalar_data.value, emitter->scalar_data.length); case YAML_FOLDED_SCALAR_STYLE: return yaml_emitter_write_folded_scalar(emitter, emitter->scalar_data.value, emitter->scalar_data.length); default: assert(1); /* Impossible. */ } return 0; } /* * Check if a %YAML directive is valid. */ static int yaml_emitter_analyze_version_directive(yaml_emitter_t *emitter, yaml_version_directive_t version_directive) { if (version_directive.major != 1 || version_directive.minor != 1) { return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive"); } return 1; } /* * Check if a %TAG directive is valid. */ static int yaml_emitter_analyze_tag_directive(yaml_emitter_t *emitter, yaml_tag_directive_t tag_directive) { yaml_string_t handle; yaml_string_t prefix; size_t handle_length; size_t prefix_length; handle_length = strlen((char *)tag_directive.handle); prefix_length = strlen((char *)tag_directive.prefix); STRING_ASSIGN(handle, tag_directive.handle, handle_length); STRING_ASSIGN(prefix, tag_directive.prefix, prefix_length); if (handle.start == handle.end) { return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty"); } if (handle.start[0] != '!') { return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'"); } if (handle.end[-1] != '!') { return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'"); } handle.pointer ++; while (handle.pointer < handle.end-1) { if (!IS_ALPHA(handle)) { return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only"); } MOVE(handle); } if (prefix.start == prefix.end) { return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty"); } return 1; } /* * Check if an anchor is valid. */ static int yaml_emitter_analyze_anchor(yaml_emitter_t *emitter, yaml_char_t *anchor, int alias) { size_t anchor_length; yaml_string_t string; anchor_length = strlen((char *)anchor); STRING_ASSIGN(string, anchor, anchor_length); if (string.start == string.end) { return yaml_emitter_set_emitter_error(emitter, alias ? "alias value must not be empty" : "anchor value must not be empty"); } while (string.pointer != string.end) { if (!IS_ALPHA(string)) { return yaml_emitter_set_emitter_error(emitter, alias ? "alias value must contain alphanumerical characters only" : "anchor value must contain alphanumerical characters only"); } MOVE(string); } emitter->anchor_data.anchor = string.start; emitter->anchor_data.anchor_length = string.end - string.start; emitter->anchor_data.alias = alias; return 1; } /* * Check if a tag is valid. */ static int yaml_emitter_analyze_tag(yaml_emitter_t *emitter, yaml_char_t *tag) { size_t tag_length; yaml_string_t string; yaml_tag_directive_t *tag_directive; tag_length = strlen((char *)tag); STRING_ASSIGN(string, tag, tag_length); if (string.start == string.end) { return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty"); } for (tag_directive = emitter->tag_directives.start; tag_directive != emitter->tag_directives.top; tag_directive ++) { size_t prefix_length = strlen((char *)tag_directive->prefix); if (prefix_length < (size_t)(string.end - string.start) && strncmp((char *)tag_directive->prefix, (char *)string.start, prefix_length) == 0) { emitter->tag_data.handle = tag_directive->handle; emitter->tag_data.handle_length = strlen((char *)tag_directive->handle); emitter->tag_data.suffix = string.start + prefix_length; emitter->tag_data.suffix_length = (string.end - string.start) - prefix_length; return 1; } } emitter->tag_data.suffix = string.start; emitter->tag_data.suffix_length = string.end - string.start; return 1; } /* * Check if a scalar is valid. */ static int yaml_emitter_analyze_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length) { yaml_string_t string; int block_indicators = 0; int flow_indicators = 0; int line_breaks = 0; int special_characters = 0; int leading_space = 0; int leading_break = 0; int trailing_space = 0; int trailing_break = 0; int break_space = 0; int space_break = 0; int preceeded_by_whitespace = 0; int followed_by_whitespace = 0; int previous_space = 0; int previous_break = 0; STRING_ASSIGN(string, value, length); emitter->scalar_data.value = value; emitter->scalar_data.length = length; if (string.start == string.end) { emitter->scalar_data.multiline = 0; emitter->scalar_data.flow_plain_allowed = 0; emitter->scalar_data.block_plain_allowed = 1; emitter->scalar_data.single_quoted_allowed = 1; emitter->scalar_data.block_allowed = 0; return 1; } if ((CHECK_AT(string, '-', 0) && CHECK_AT(string, '-', 1) && CHECK_AT(string, '-', 2)) || (CHECK_AT(string, '.', 0) && CHECK_AT(string, '.', 1) && CHECK_AT(string, '.', 2))) { block_indicators = 1; flow_indicators = 1; } preceeded_by_whitespace = 1; followed_by_whitespace = IS_BLANKZ_AT(string, WIDTH(string)); while (string.pointer != string.end) { if (string.start == string.pointer) { if (CHECK(string, '#') || CHECK(string, ',') || CHECK(string, '[') || CHECK(string, ']') || CHECK(string, '{') || CHECK(string, '}') || CHECK(string, '&') || CHECK(string, '*') || CHECK(string, '!') || CHECK(string, '|') || CHECK(string, '>') || CHECK(string, '\'') || CHECK(string, '"') || CHECK(string, '%') || CHECK(string, '@') || CHECK(string, '`')) { flow_indicators = 1; block_indicators = 1; } if (CHECK(string, '?') || CHECK(string, ':')) { flow_indicators = 1; if (followed_by_whitespace) { block_indicators = 1; } } if (CHECK(string, '-') && followed_by_whitespace) { flow_indicators = 1; block_indicators = 1; } } else { if (CHECK(string, ',') || CHECK(string, '?') || CHECK(string, '[') || CHECK(string, ']') || CHECK(string, '{') || CHECK(string, '}')) { flow_indicators = 1; } if (CHECK(string, ':')) { flow_indicators = 1; if (followed_by_whitespace) { block_indicators = 1; } } if (CHECK(string, '#') && preceeded_by_whitespace) { flow_indicators = 1; block_indicators = 1; } } if (!IS_PRINTABLE(string) || (!IS_ASCII(string) && !emitter->unicode)) { special_characters = 1; } if (IS_BREAK(string)) { line_breaks = 1; } if (IS_SPACE(string)) { if (string.start == string.pointer) { leading_space = 1; } if (string.pointer+WIDTH(string) == string.end) { trailing_space = 1; } if (previous_break) { break_space = 1; } previous_space = 1; previous_break = 0; } else if (IS_BREAK(string)) { if (string.start == string.pointer) { leading_break = 1; } if (string.pointer+WIDTH(string) == string.end) { trailing_break = 1; } if (previous_space) { space_break = 1; } previous_space = 0; previous_break = 1; } else { previous_space = 0; previous_break = 0; } preceeded_by_whitespace = IS_BLANKZ(string); MOVE(string); if (string.pointer != string.end) { followed_by_whitespace = IS_BLANKZ_AT(string, WIDTH(string)); } } emitter->scalar_data.multiline = line_breaks; emitter->scalar_data.flow_plain_allowed = 1; emitter->scalar_data.block_plain_allowed = 1; emitter->scalar_data.single_quoted_allowed = 1; emitter->scalar_data.block_allowed = 1; if (leading_space || leading_break || trailing_space || trailing_break) { emitter->scalar_data.flow_plain_allowed = 0; emitter->scalar_data.block_plain_allowed = 0; } if (trailing_space) { emitter->scalar_data.block_allowed = 0; } if (break_space) { emitter->scalar_data.flow_plain_allowed = 0; emitter->scalar_data.block_plain_allowed = 0; emitter->scalar_data.single_quoted_allowed = 0; } if (space_break || special_characters) { emitter->scalar_data.flow_plain_allowed = 0; emitter->scalar_data.block_plain_allowed = 0; emitter->scalar_data.single_quoted_allowed = 0; emitter->scalar_data.block_allowed = 0; } if (line_breaks) { emitter->scalar_data.flow_plain_allowed = 0; emitter->scalar_data.block_plain_allowed = 0; } if (flow_indicators) { emitter->scalar_data.flow_plain_allowed = 0; } if (block_indicators) { emitter->scalar_data.block_plain_allowed = 0; } return 1; } /* * Check if the event data is valid. */ static int yaml_emitter_analyze_event(yaml_emitter_t *emitter, yaml_event_t *event) { emitter->anchor_data.anchor = NULL; emitter->anchor_data.anchor_length = 0; emitter->tag_data.handle = NULL; emitter->tag_data.handle_length = 0; emitter->tag_data.suffix = NULL; emitter->tag_data.suffix_length = 0; emitter->scalar_data.value = NULL; emitter->scalar_data.length = 0; switch (event->type) { case YAML_ALIAS_EVENT: if (!yaml_emitter_analyze_anchor(emitter, event->data.alias.anchor, 1)) return 0; return 1; case YAML_SCALAR_EVENT: if (event->data.scalar.anchor) { if (!yaml_emitter_analyze_anchor(emitter, event->data.scalar.anchor, 0)) return 0; } if (event->data.scalar.tag && (emitter->canonical || (!event->data.scalar.plain_implicit && !event->data.scalar.quoted_implicit))) { if (!yaml_emitter_analyze_tag(emitter, event->data.scalar.tag)) return 0; } if (!yaml_emitter_analyze_scalar(emitter, event->data.scalar.value, event->data.scalar.length)) return 0; return 1; case YAML_SEQUENCE_START_EVENT: if (event->data.sequence_start.anchor) { if (!yaml_emitter_analyze_anchor(emitter, event->data.sequence_start.anchor, 0)) return 0; } if (event->data.sequence_start.tag && (emitter->canonical || !event->data.sequence_start.implicit)) { if (!yaml_emitter_analyze_tag(emitter, event->data.sequence_start.tag)) return 0; } return 1; case YAML_MAPPING_START_EVENT: if (event->data.mapping_start.anchor) { if (!yaml_emitter_analyze_anchor(emitter, event->data.mapping_start.anchor, 0)) return 0; } if (event->data.mapping_start.tag && (emitter->canonical || !event->data.mapping_start.implicit)) { if (!yaml_emitter_analyze_tag(emitter, event->data.mapping_start.tag)) return 0; } return 1; default: return 1; } } /* * Write the BOM character. */ static int yaml_emitter_write_bom(yaml_emitter_t *emitter) { if (!FLUSH(emitter)) return 0; *(emitter->buffer.pointer++) = (yaml_char_t) '\xEF'; *(emitter->buffer.pointer++) = (yaml_char_t) '\xBB'; *(emitter->buffer.pointer++) = (yaml_char_t) '\xBF'; return 1; } static int yaml_emitter_write_indent(yaml_emitter_t *emitter) { int indent = (emitter->indent >= 0) ? emitter->indent : 0; if (!emitter->indention || emitter->column > indent || (emitter->column == indent && !emitter->whitespace)) { if (!PUT_BREAK(emitter)) return 0; } while (emitter->column < indent) { if (!PUT(emitter, ' ')) return 0; } emitter->whitespace = 1; emitter->indention = 1; return 1; } static int yaml_emitter_write_indicator(yaml_emitter_t *emitter, char *indicator, int need_whitespace, int is_whitespace, int is_indention) { size_t indicator_length; yaml_string_t string; indicator_length = strlen(indicator); STRING_ASSIGN(string, (yaml_char_t *)indicator, indicator_length); if (need_whitespace && !emitter->whitespace) { if (!PUT(emitter, ' ')) return 0; } while (string.pointer != string.end) { if (!WRITE(emitter, string)) return 0; } emitter->whitespace = is_whitespace; emitter->indention = (emitter->indention && is_indention); emitter->open_ended = 0; return 1; } static int yaml_emitter_write_anchor(yaml_emitter_t *emitter, yaml_char_t *value, size_t length) { yaml_string_t string; STRING_ASSIGN(string, value, length); while (string.pointer != string.end) { if (!WRITE(emitter, string)) return 0; } emitter->whitespace = 0; emitter->indention = 0; return 1; } static int yaml_emitter_write_tag_handle(yaml_emitter_t *emitter, yaml_char_t *value, size_t length) { yaml_string_t string; STRING_ASSIGN(string, value, length); if (!emitter->whitespace) { if (!PUT(emitter, ' ')) return 0; } while (string.pointer != string.end) { if (!WRITE(emitter, string)) return 0; } emitter->whitespace = 0; emitter->indention = 0; return 1; } static int yaml_emitter_write_tag_content(yaml_emitter_t *emitter, yaml_char_t *value, size_t length, int need_whitespace) { yaml_string_t string; STRING_ASSIGN(string, value, length); if (need_whitespace && !emitter->whitespace) { if (!PUT(emitter, ' ')) return 0; } while (string.pointer != string.end) { if (IS_ALPHA(string) || CHECK(string, ';') || CHECK(string, '/') || CHECK(string, '?') || CHECK(string, ':') || CHECK(string, '@') || CHECK(string, '&') || CHECK(string, '=') || CHECK(string, '+') || CHECK(string, '$') || CHECK(string, ',') || CHECK(string, '_') || CHECK(string, '.') || CHECK(string, '~') || CHECK(string, '*') || CHECK(string, '\'') || CHECK(string, '(') || CHECK(string, ')') || CHECK(string, '[') || CHECK(string, ']')) { if (!WRITE(emitter, string)) return 0; } else { int width = WIDTH(string); unsigned int value; while (width --) { value = *(string.pointer++); if (!PUT(emitter, '%')) return 0; if (!PUT(emitter, (value >> 4) + ((value >> 4) < 10 ? '0' : 'A' - 10))) return 0; if (!PUT(emitter, (value & 0x0F) + ((value & 0x0F) < 10 ? '0' : 'A' - 10))) return 0; } } } emitter->whitespace = 0; emitter->indention = 0; return 1; } static int yaml_emitter_write_plain_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length, int allow_breaks) { yaml_string_t string; int spaces = 0; int breaks = 0; STRING_ASSIGN(string, value, length); if (!emitter->whitespace) { if (!PUT(emitter, ' ')) return 0; } while (string.pointer != string.end) { if (IS_SPACE(string)) { if (allow_breaks && !spaces && emitter->column > emitter->best_width && !IS_SPACE_AT(string, 1)) { if (!yaml_emitter_write_indent(emitter)) return 0; MOVE(string); } else { if (!WRITE(emitter, string)) return 0; } spaces = 1; } else if (IS_BREAK(string)) { if (!breaks && CHECK(string, '\n')) { if (!PUT_BREAK(emitter)) return 0; } if (!WRITE_BREAK(emitter, string)) return 0; emitter->indention = 1; breaks = 1; } else { if (breaks) { if (!yaml_emitter_write_indent(emitter)) return 0; } if (!WRITE(emitter, string)) return 0; emitter->indention = 0; spaces = 0; breaks = 0; } } emitter->whitespace = 0; emitter->indention = 0; if (emitter->root_context) { emitter->open_ended = 1; } return 1; } static int yaml_emitter_write_single_quoted_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length, int allow_breaks) { yaml_string_t string; int spaces = 0; int breaks = 0; STRING_ASSIGN(string, value, length); if (!yaml_emitter_write_indicator(emitter, "'", 1, 0, 0)) return 0; while (string.pointer != string.end) { if (IS_SPACE(string)) { if (allow_breaks && !spaces && emitter->column > emitter->best_width && string.pointer != string.start && string.pointer != string.end - 1 && !IS_SPACE_AT(string, 1)) { if (!yaml_emitter_write_indent(emitter)) return 0; MOVE(string); } else { if (!WRITE(emitter, string)) return 0; } spaces = 1; } else if (IS_BREAK(string)) { if (!breaks && CHECK(string, '\n')) { if (!PUT_BREAK(emitter)) return 0; } if (!WRITE_BREAK(emitter, string)) return 0; emitter->indention = 1; breaks = 1; } else { if (breaks) { if (!yaml_emitter_write_indent(emitter)) return 0; } if (CHECK(string, '\'')) { if (!PUT(emitter, '\'')) return 0; } if (!WRITE(emitter, string)) return 0; emitter->indention = 0; spaces = 0; breaks = 0; } } if (!yaml_emitter_write_indicator(emitter, "'", 0, 0, 0)) return 0; emitter->whitespace = 0; emitter->indention = 0; return 1; } static int yaml_emitter_write_double_quoted_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length, int allow_breaks) { yaml_string_t string; int spaces = 0; STRING_ASSIGN(string, value, length); if (!yaml_emitter_write_indicator(emitter, "\"", 1, 0, 0)) return 0; while (string.pointer != string.end) { if (!IS_PRINTABLE(string) || (!emitter->unicode && !IS_ASCII(string)) || IS_BOM(string) || IS_BREAK(string) || CHECK(string, '"') || CHECK(string, '\\')) { unsigned char octet; unsigned int width; unsigned int value; int k; octet = string.pointer[0]; width = (octet & 0x80) == 0x00 ? 1 : (octet & 0xE0) == 0xC0 ? 2 : (octet & 0xF0) == 0xE0 ? 3 : (octet & 0xF8) == 0xF0 ? 4 : 0; value = (octet & 0x80) == 0x00 ? octet & 0x7F : (octet & 0xE0) == 0xC0 ? octet & 0x1F : (octet & 0xF0) == 0xE0 ? octet & 0x0F : (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; for (k = 1; k < (int)width; k ++) { octet = string.pointer[k]; value = (value << 6) + (octet & 0x3F); } string.pointer += width; if (!PUT(emitter, '\\')) return 0; switch (value) { case 0x00: if (!PUT(emitter, '0')) return 0; break; case 0x07: if (!PUT(emitter, 'a')) return 0; break; case 0x08: if (!PUT(emitter, 'b')) return 0; break; case 0x09: if (!PUT(emitter, 't')) return 0; break; case 0x0A: if (!PUT(emitter, 'n')) return 0; break; case 0x0B: if (!PUT(emitter, 'v')) return 0; break; case 0x0C: if (!PUT(emitter, 'f')) return 0; break; case 0x0D: if (!PUT(emitter, 'r')) return 0; break; case 0x1B: if (!PUT(emitter, 'e')) return 0; break; case 0x22: if (!PUT(emitter, '\"')) return 0; break; case 0x5C: if (!PUT(emitter, '\\')) return 0; break; case 0x85: if (!PUT(emitter, 'N')) return 0; break; case 0xA0: if (!PUT(emitter, '_')) return 0; break; case 0x2028: if (!PUT(emitter, 'L')) return 0; break; case 0x2029: if (!PUT(emitter, 'P')) return 0; break; default: if (value <= 0xFF) { if (!PUT(emitter, 'x')) return 0; width = 2; } else if (value <= 0xFFFF) { if (!PUT(emitter, 'u')) return 0; width = 4; } else { if (!PUT(emitter, 'U')) return 0; width = 8; } for (k = (width-1)*4; k >= 0; k -= 4) { int digit = (value >> k) & 0x0F; if (!PUT(emitter, digit + (digit < 10 ? '0' : 'A'-10))) return 0; } } spaces = 0; } else if (IS_SPACE(string)) { if (allow_breaks && !spaces && emitter->column > emitter->best_width && string.pointer != string.start && string.pointer != string.end - 1) { if (!yaml_emitter_write_indent(emitter)) return 0; if (IS_SPACE_AT(string, 1)) { if (!PUT(emitter, '\\')) return 0; } MOVE(string); } else { if (!WRITE(emitter, string)) return 0; } spaces = 1; } else { if (!WRITE(emitter, string)) return 0; spaces = 0; } } if (!yaml_emitter_write_indicator(emitter, "\"", 0, 0, 0)) return 0; emitter->whitespace = 0; emitter->indention = 0; return 1; } static int yaml_emitter_write_block_scalar_hints(yaml_emitter_t *emitter, yaml_string_t string) { char indent_hint[2]; char *chomp_hint = NULL; if (IS_SPACE(string) || IS_BREAK(string)) { indent_hint[0] = '0' + (char)emitter->best_indent; indent_hint[1] = '\0'; if (!yaml_emitter_write_indicator(emitter, indent_hint, 0, 0, 0)) return 0; } emitter->open_ended = 0; string.pointer = string.end; if (string.start == string.pointer) { chomp_hint = "-"; } else { do { string.pointer --; } while ((*string.pointer & 0xC0) == 0x80); if (!IS_BREAK(string)) { chomp_hint = "-"; } else if (string.start == string.pointer) { chomp_hint = "+"; emitter->open_ended = 1; } else { do { string.pointer --; } while ((*string.pointer & 0xC0) == 0x80); if (IS_BREAK(string)) { chomp_hint = "+"; emitter->open_ended = 1; } } } if (chomp_hint) { if (!yaml_emitter_write_indicator(emitter, chomp_hint, 0, 0, 0)) return 0; } return 1; } static int yaml_emitter_write_literal_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length) { yaml_string_t string; int breaks = 1; STRING_ASSIGN(string, value, length); if (!yaml_emitter_write_indicator(emitter, "|", 1, 0, 0)) return 0; if (!yaml_emitter_write_block_scalar_hints(emitter, string)) return 0; if (!PUT_BREAK(emitter)) return 0; emitter->indention = 1; emitter->whitespace = 1; while (string.pointer != string.end) { if (IS_BREAK(string)) { if (!WRITE_BREAK(emitter, string)) return 0; emitter->indention = 1; breaks = 1; } else { if (breaks) { if (!yaml_emitter_write_indent(emitter)) return 0; } if (!WRITE(emitter, string)) return 0; emitter->indention = 0; breaks = 0; } } return 1; } static int yaml_emitter_write_folded_scalar(yaml_emitter_t *emitter, yaml_char_t *value, size_t length) { yaml_string_t string; int breaks = 1; int leading_spaces = 1; STRING_ASSIGN(string, value, length); if (!yaml_emitter_write_indicator(emitter, ">", 1, 0, 0)) return 0; if (!yaml_emitter_write_block_scalar_hints(emitter, string)) return 0; if (!PUT_BREAK(emitter)) return 0; emitter->indention = 1; emitter->whitespace = 1; while (string.pointer != string.end) { if (IS_BREAK(string)) { if (!breaks && !leading_spaces && CHECK(string, '\n')) { int k = 0; while (IS_BREAK_AT(string, k)) { k += WIDTH_AT(string, k); } if (!IS_BLANKZ_AT(string, k)) { if (!PUT_BREAK(emitter)) return 0; } } if (!WRITE_BREAK(emitter, string)) return 0; emitter->indention = 1; breaks = 1; } else { if (breaks) { if (!yaml_emitter_write_indent(emitter)) return 0; leading_spaces = IS_BLANK(string); } if (!breaks && IS_SPACE(string) && !IS_SPACE_AT(string, 1) && emitter->column > emitter->best_width) { if (!yaml_emitter_write_indent(emitter)) return 0; MOVE(string); } else { if (!WRITE(emitter, string)) return 0; } emitter->indention = 0; breaks = 0; } } return 1; } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/writer.c0000644000000000000000000000771513306562377022463 0ustar rootroot #include "yaml_private.h" /* * Declarations. */ static int yaml_emitter_set_writer_error(yaml_emitter_t *emitter, const char *problem); YAML_DECLARE(int) yaml_emitter_flush(yaml_emitter_t *emitter); /* * Set the writer error and return 0. */ static int yaml_emitter_set_writer_error(yaml_emitter_t *emitter, const char *problem) { emitter->error = YAML_WRITER_ERROR; emitter->problem = problem; return 0; } /* * Flush the output buffer. */ YAML_DECLARE(int) yaml_emitter_flush(yaml_emitter_t *emitter) { int low, high; assert(emitter); /* Non-NULL emitter object is expected. */ assert(emitter->write_handler); /* Write handler must be set. */ assert(emitter->encoding); /* Output encoding must be set. */ emitter->buffer.last = emitter->buffer.pointer; emitter->buffer.pointer = emitter->buffer.start; /* Check if the buffer is empty. */ if (emitter->buffer.start == emitter->buffer.last) { return 1; } /* If the output encoding is UTF-8, we don't need to recode the buffer. */ if (emitter->encoding == YAML_UTF8_ENCODING) { if (emitter->write_handler(emitter->write_handler_data, emitter->buffer.start, emitter->buffer.last - emitter->buffer.start)) { emitter->buffer.last = emitter->buffer.start; emitter->buffer.pointer = emitter->buffer.start; return 1; } else { return yaml_emitter_set_writer_error(emitter, "write error"); } } /* Recode the buffer into the raw buffer. */ low = (emitter->encoding == YAML_UTF16LE_ENCODING ? 0 : 1); high = (emitter->encoding == YAML_UTF16LE_ENCODING ? 1 : 0); while (emitter->buffer.pointer != emitter->buffer.last) { unsigned char octet; unsigned int width; unsigned int value; size_t k; /* * See the "reader.c" code for more details on UTF-8 encoding. Note * that we assume that the buffer contains a valid UTF-8 sequence. */ /* Read the next UTF-8 character. */ octet = emitter->buffer.pointer[0]; width = (octet & 0x80) == 0x00 ? 1 : (octet & 0xE0) == 0xC0 ? 2 : (octet & 0xF0) == 0xE0 ? 3 : (octet & 0xF8) == 0xF0 ? 4 : 0; value = (octet & 0x80) == 0x00 ? octet & 0x7F : (octet & 0xE0) == 0xC0 ? octet & 0x1F : (octet & 0xF0) == 0xE0 ? octet & 0x0F : (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; for (k = 1; k < width; k ++) { octet = emitter->buffer.pointer[k]; value = (value << 6) + (octet & 0x3F); } emitter->buffer.pointer += width; /* Write the character. */ if (value < 0x10000) { emitter->raw_buffer.last[high] = value >> 8; emitter->raw_buffer.last[low] = value & 0xFF; emitter->raw_buffer.last += 2; } else { /* Write the character using a surrogate pair (check "reader.c"). */ value -= 0x10000; emitter->raw_buffer.last[high] = 0xD8 + (value >> 18); emitter->raw_buffer.last[low] = (value >> 10) & 0xFF; emitter->raw_buffer.last[high+2] = 0xDC + ((value >> 8) & 0xFF); emitter->raw_buffer.last[low+2] = value & 0xFF; emitter->raw_buffer.last += 4; } } /* Write the raw buffer. */ if (emitter->write_handler(emitter->write_handler_data, emitter->raw_buffer.start, emitter->raw_buffer.last - emitter->raw_buffer.start)) { emitter->buffer.last = emitter->buffer.start; emitter->buffer.pointer = emitter->buffer.start; emitter->raw_buffer.last = emitter->raw_buffer.start; emitter->raw_buffer.pointer = emitter->raw_buffer.start; return 1; } else { return yaml_emitter_set_writer_error(emitter, "write error"); } } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/reader.c0000644000000000000000000004046213306562377022405 0ustar rootroot #include "yaml_private.h" /* * Declarations. */ static int yaml_parser_set_reader_error(yaml_parser_t *parser, const char *problem, size_t offset, int value); static int yaml_parser_update_raw_buffer(yaml_parser_t *parser); static int yaml_parser_determine_encoding(yaml_parser_t *parser); YAML_DECLARE(int) yaml_parser_update_buffer(yaml_parser_t *parser, size_t length); /* * Set the reader error and return 0. */ static int yaml_parser_set_reader_error(yaml_parser_t *parser, const char *problem, size_t offset, int value) { parser->error = YAML_READER_ERROR; parser->problem = problem; parser->problem_offset = offset; parser->problem_value = value; return 0; } /* * Byte order marks. */ #define BOM_UTF8 "\xef\xbb\xbf" #define BOM_UTF16LE "\xff\xfe" #define BOM_UTF16BE "\xfe\xff" /* * Determine the input stream encoding by checking the BOM symbol. If no BOM is * found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. */ static int yaml_parser_determine_encoding(yaml_parser_t *parser) { /* Ensure that we had enough bytes in the raw buffer. */ while (!parser->eof && parser->raw_buffer.last - parser->raw_buffer.pointer < 3) { if (!yaml_parser_update_raw_buffer(parser)) { return 0; } } /* Determine the encoding. */ if (parser->raw_buffer.last - parser->raw_buffer.pointer >= 2 && !memcmp(parser->raw_buffer.pointer, BOM_UTF16LE, 2)) { parser->encoding = YAML_UTF16LE_ENCODING; parser->raw_buffer.pointer += 2; parser->offset += 2; } else if (parser->raw_buffer.last - parser->raw_buffer.pointer >= 2 && !memcmp(parser->raw_buffer.pointer, BOM_UTF16BE, 2)) { parser->encoding = YAML_UTF16BE_ENCODING; parser->raw_buffer.pointer += 2; parser->offset += 2; } else if (parser->raw_buffer.last - parser->raw_buffer.pointer >= 3 && !memcmp(parser->raw_buffer.pointer, BOM_UTF8, 3)) { parser->encoding = YAML_UTF8_ENCODING; parser->raw_buffer.pointer += 3; parser->offset += 3; } else { parser->encoding = YAML_UTF8_ENCODING; } return 1; } /* * Update the raw buffer. */ static int yaml_parser_update_raw_buffer(yaml_parser_t *parser) { size_t size_read = 0; /* Return if the raw buffer is full. */ if (parser->raw_buffer.start == parser->raw_buffer.pointer && parser->raw_buffer.last == parser->raw_buffer.end) return 1; /* Return on EOF. */ if (parser->eof) return 1; /* Move the remaining bytes in the raw buffer to the beginning. */ if (parser->raw_buffer.start < parser->raw_buffer.pointer && parser->raw_buffer.pointer < parser->raw_buffer.last) { memmove(parser->raw_buffer.start, parser->raw_buffer.pointer, parser->raw_buffer.last - parser->raw_buffer.pointer); } parser->raw_buffer.last -= parser->raw_buffer.pointer - parser->raw_buffer.start; parser->raw_buffer.pointer = parser->raw_buffer.start; /* Call the read handler to fill the buffer. */ if (!parser->read_handler(parser->read_handler_data, parser->raw_buffer.last, parser->raw_buffer.end - parser->raw_buffer.last, &size_read)) { return yaml_parser_set_reader_error(parser, "input error", parser->offset, -1); } parser->raw_buffer.last += size_read; if (!size_read) { parser->eof = 1; } return 1; } /* * Ensure that the buffer contains at least `length` characters. * Return 1 on success, 0 on failure. * * The length is supposed to be significantly less that the buffer size. */ YAML_DECLARE(int) yaml_parser_update_buffer(yaml_parser_t *parser, size_t length) { int first = 1; assert(parser->read_handler); /* Read handler must be set. */ /* If the EOF flag is set and the raw buffer is empty, do nothing. */ if (parser->eof && parser->raw_buffer.pointer == parser->raw_buffer.last) return 1; /* Return if the buffer contains enough characters. */ if (parser->unread >= length) return 1; /* Determine the input encoding if it is not known yet. */ if (!parser->encoding) { if (!yaml_parser_determine_encoding(parser)) return 0; } /* Move the unread characters to the beginning of the buffer. */ if (parser->buffer.start < parser->buffer.pointer && parser->buffer.pointer < parser->buffer.last) { size_t size = parser->buffer.last - parser->buffer.pointer; memmove(parser->buffer.start, parser->buffer.pointer, size); parser->buffer.pointer = parser->buffer.start; parser->buffer.last = parser->buffer.start + size; } else if (parser->buffer.pointer == parser->buffer.last) { parser->buffer.pointer = parser->buffer.start; parser->buffer.last = parser->buffer.start; } /* Fill the buffer until it has enough characters. */ while (parser->unread < length) { /* Fill the raw buffer if necessary. */ if (!first || parser->raw_buffer.pointer == parser->raw_buffer.last) { if (!yaml_parser_update_raw_buffer(parser)) return 0; } first = 0; /* Decode the raw buffer. */ while (parser->raw_buffer.pointer != parser->raw_buffer.last) { unsigned int value = 0, value2 = 0; int incomplete = 0; unsigned char octet; unsigned int width = 0; int low, high; size_t k; size_t raw_unread = parser->raw_buffer.last - parser->raw_buffer.pointer; /* Decode the next character. */ switch (parser->encoding) { case YAML_UTF8_ENCODING: /* * Decode a UTF-8 character. Check RFC 3629 * (http://www.ietf.org/rfc/rfc3629.txt) for more details. * * The following table (taken from the RFC) is used for * decoding. * * Char. number range | UTF-8 octet sequence * (hexadecimal) | (binary) * --------------------+------------------------------------ * 0000 0000-0000 007F | 0xxxxxxx * 0000 0080-0000 07FF | 110xxxxx 10xxxxxx * 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx * 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx * * Additionally, the characters in the range 0xD800-0xDFFF * are prohibited as they are reserved for use with UTF-16 * surrogate pairs. */ /* Determine the length of the UTF-8 sequence. */ octet = parser->raw_buffer.pointer[0]; width = (octet & 0x80) == 0x00 ? 1 : (octet & 0xE0) == 0xC0 ? 2 : (octet & 0xF0) == 0xE0 ? 3 : (octet & 0xF8) == 0xF0 ? 4 : 0; /* Check if the leading octet is valid. */ if (!width) return yaml_parser_set_reader_error(parser, "invalid leading UTF-8 octet", parser->offset, octet); /* Check if the raw buffer contains an incomplete character. */ if (width > raw_unread) { if (parser->eof) { return yaml_parser_set_reader_error(parser, "incomplete UTF-8 octet sequence", parser->offset, -1); } incomplete = 1; break; } /* Decode the leading octet. */ value = (octet & 0x80) == 0x00 ? octet & 0x7F : (octet & 0xE0) == 0xC0 ? octet & 0x1F : (octet & 0xF0) == 0xE0 ? octet & 0x0F : (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; /* Check and decode the trailing octets. */ for (k = 1; k < width; k ++) { octet = parser->raw_buffer.pointer[k]; /* Check if the octet is valid. */ if ((octet & 0xC0) != 0x80) return yaml_parser_set_reader_error(parser, "invalid trailing UTF-8 octet", parser->offset+k, octet); /* Decode the octet. */ value = (value << 6) + (octet & 0x3F); } /* Check the length of the sequence against the value. */ if (!((width == 1) || (width == 2 && value >= 0x80) || (width == 3 && value >= 0x800) || (width == 4 && value >= 0x10000))) return yaml_parser_set_reader_error(parser, "invalid length of a UTF-8 sequence", parser->offset, -1); /* Check the range of the value. */ if ((value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF) return yaml_parser_set_reader_error(parser, "invalid Unicode character", parser->offset, value); break; case YAML_UTF16LE_ENCODING: case YAML_UTF16BE_ENCODING: low = (parser->encoding == YAML_UTF16LE_ENCODING ? 0 : 1); high = (parser->encoding == YAML_UTF16LE_ENCODING ? 1 : 0); /* * The UTF-16 encoding is not as simple as one might * naively think. Check RFC 2781 * (http://www.ietf.org/rfc/rfc2781.txt). * * Normally, two subsequent bytes describe a Unicode * character. However a special technique (called a * surrogate pair) is used for specifying character * values larger than 0xFFFF. * * A surrogate pair consists of two pseudo-characters: * high surrogate area (0xD800-0xDBFF) * low surrogate area (0xDC00-0xDFFF) * * The following formulas are used for decoding * and encoding characters using surrogate pairs: * * U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) * U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) * W1 = 110110yyyyyyyyyy * W2 = 110111xxxxxxxxxx * * where U is the character value, W1 is the high surrogate * area, W2 is the low surrogate area. */ /* Check for incomplete UTF-16 character. */ if (raw_unread < 2) { if (parser->eof) { return yaml_parser_set_reader_error(parser, "incomplete UTF-16 character", parser->offset, -1); } incomplete = 1; break; } /* Get the character. */ value = parser->raw_buffer.pointer[low] + (parser->raw_buffer.pointer[high] << 8); /* Check for unexpected low surrogate area. */ if ((value & 0xFC00) == 0xDC00) return yaml_parser_set_reader_error(parser, "unexpected low surrogate area", parser->offset, value); /* Check for a high surrogate area. */ if ((value & 0xFC00) == 0xD800) { width = 4; /* Check for incomplete surrogate pair. */ if (raw_unread < 4) { if (parser->eof) { return yaml_parser_set_reader_error(parser, "incomplete UTF-16 surrogate pair", parser->offset, -1); } incomplete = 1; break; } /* Get the next character. */ value2 = parser->raw_buffer.pointer[low+2] + (parser->raw_buffer.pointer[high+2] << 8); /* Check for a low surrogate area. */ if ((value2 & 0xFC00) != 0xDC00) return yaml_parser_set_reader_error(parser, "expected low surrogate area", parser->offset+2, value2); /* Generate the value of the surrogate pair. */ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF); } else { width = 2; } break; default: assert(1); /* Impossible. */ } /* Check if the raw buffer contains enough bytes to form a character. */ if (incomplete) break; /* * Check if the character is in the allowed range: * #x9 | #xA | #xD | [#x20-#x7E] (8 bit) * | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) * | [#x10000-#x10FFFF] (32 bit) */ if (! (value == 0x09 || value == 0x0A || value == 0x0D || (value >= 0x20 && value <= 0x7E) || (value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) || (value >= 0xE000 && value <= 0xFFFD) || (value >= 0x10000 && value <= 0x10FFFF))) return yaml_parser_set_reader_error(parser, "control characters are not allowed", parser->offset, value); /* Move the raw pointers. */ parser->raw_buffer.pointer += width; parser->offset += width; /* Finally put the character into the buffer. */ /* 0000 0000-0000 007F -> 0xxxxxxx */ if (value <= 0x7F) { *(parser->buffer.last++) = value; } /* 0000 0080-0000 07FF -> 110xxxxx 10xxxxxx */ else if (value <= 0x7FF) { *(parser->buffer.last++) = 0xC0 + (value >> 6); *(parser->buffer.last++) = 0x80 + (value & 0x3F); } /* 0000 0800-0000 FFFF -> 1110xxxx 10xxxxxx 10xxxxxx */ else if (value <= 0xFFFF) { *(parser->buffer.last++) = 0xE0 + (value >> 12); *(parser->buffer.last++) = 0x80 + ((value >> 6) & 0x3F); *(parser->buffer.last++) = 0x80 + (value & 0x3F); } /* 0001 0000-0010 FFFF -> 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ else { *(parser->buffer.last++) = 0xF0 + (value >> 18); *(parser->buffer.last++) = 0x80 + ((value >> 12) & 0x3F); *(parser->buffer.last++) = 0x80 + ((value >> 6) & 0x3F); *(parser->buffer.last++) = 0x80 + (value & 0x3F); } parser->unread ++; } /* On EOF, put NUL into the buffer and return. */ if (parser->eof) { *(parser->buffer.last++) = '\0'; parser->unread ++; return 1; } } if (parser->offset >= PTRDIFF_MAX) return yaml_parser_set_reader_error(parser, "input is too long", PTRDIFF_MAX, -1); return 1; } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/loader.c0000644000000000000000000002716713306562377022420 0ustar rootroot #include "yaml_private.h" /* * API functions. */ YAML_DECLARE(int) yaml_parser_load(yaml_parser_t *parser, yaml_document_t *document); /* * Error handling. */ static int yaml_parser_set_composer_error(yaml_parser_t *parser, const char *problem, yaml_mark_t problem_mark); static int yaml_parser_set_composer_error_context(yaml_parser_t *parser, const char *context, yaml_mark_t context_mark, const char *problem, yaml_mark_t problem_mark); /* * Alias handling. */ static int yaml_parser_register_anchor(yaml_parser_t *parser, int index, yaml_char_t *anchor); /* * Clean up functions. */ static void yaml_parser_delete_aliases(yaml_parser_t *parser); /* * Composer functions. */ static int yaml_parser_load_document(yaml_parser_t *parser, yaml_event_t *first_event); static int yaml_parser_load_node(yaml_parser_t *parser, yaml_event_t *first_event); static int yaml_parser_load_alias(yaml_parser_t *parser, yaml_event_t *first_event); static int yaml_parser_load_scalar(yaml_parser_t *parser, yaml_event_t *first_event); static int yaml_parser_load_sequence(yaml_parser_t *parser, yaml_event_t *first_event); static int yaml_parser_load_mapping(yaml_parser_t *parser, yaml_event_t *first_event); /* * Load the next document of the stream. */ YAML_DECLARE(int) yaml_parser_load(yaml_parser_t *parser, yaml_document_t *document) { yaml_event_t event; assert(parser); /* Non-NULL parser object is expected. */ assert(document); /* Non-NULL document object is expected. */ memset(document, 0, sizeof(yaml_document_t)); if (!STACK_INIT(parser, document->nodes, INITIAL_STACK_SIZE)) goto error; if (!parser->stream_start_produced) { if (!yaml_parser_parse(parser, &event)) goto error; assert(event.type == YAML_STREAM_START_EVENT); /* STREAM-START is expected. */ } if (parser->stream_end_produced) { return 1; } if (!yaml_parser_parse(parser, &event)) goto error; if (event.type == YAML_STREAM_END_EVENT) { return 1; } if (!STACK_INIT(parser, parser->aliases, INITIAL_STACK_SIZE)) goto error; parser->document = document; if (!yaml_parser_load_document(parser, &event)) goto error; yaml_parser_delete_aliases(parser); parser->document = NULL; return 1; error: yaml_parser_delete_aliases(parser); yaml_document_delete(document); parser->document = NULL; return 0; } /* * Set composer error. */ static int yaml_parser_set_composer_error(yaml_parser_t *parser, const char *problem, yaml_mark_t problem_mark) { parser->error = YAML_COMPOSER_ERROR; parser->problem = problem; parser->problem_mark = problem_mark; return 0; } /* * Set composer error with context. */ static int yaml_parser_set_composer_error_context(yaml_parser_t *parser, const char *context, yaml_mark_t context_mark, const char *problem, yaml_mark_t problem_mark) { parser->error = YAML_COMPOSER_ERROR; parser->context = context; parser->context_mark = context_mark; parser->problem = problem; parser->problem_mark = problem_mark; return 0; } /* * Delete the stack of aliases. */ static void yaml_parser_delete_aliases(yaml_parser_t *parser) { while (!STACK_EMPTY(parser, parser->aliases)) { yaml_free(POP(parser, parser->aliases).anchor); } STACK_DEL(parser, parser->aliases); } /* * Compose a document object. */ static int yaml_parser_load_document(yaml_parser_t *parser, yaml_event_t *first_event) { yaml_event_t event; assert(first_event->type == YAML_DOCUMENT_START_EVENT); /* DOCUMENT-START is expected. */ parser->document->version_directive = first_event->data.document_start.version_directive; parser->document->tag_directives.start = first_event->data.document_start.tag_directives.start; parser->document->tag_directives.end = first_event->data.document_start.tag_directives.end; parser->document->start_implicit = first_event->data.document_start.implicit; parser->document->start_mark = first_event->start_mark; if (!yaml_parser_parse(parser, &event)) return 0; if (!yaml_parser_load_node(parser, &event)) return 0; if (!yaml_parser_parse(parser, &event)) return 0; assert(event.type == YAML_DOCUMENT_END_EVENT); /* DOCUMENT-END is expected. */ parser->document->end_implicit = event.data.document_end.implicit; parser->document->end_mark = event.end_mark; return 1; } /* * Compose a node. */ static int yaml_parser_load_node(yaml_parser_t *parser, yaml_event_t *first_event) { switch (first_event->type) { case YAML_ALIAS_EVENT: return yaml_parser_load_alias(parser, first_event); case YAML_SCALAR_EVENT: return yaml_parser_load_scalar(parser, first_event); case YAML_SEQUENCE_START_EVENT: return yaml_parser_load_sequence(parser, first_event); case YAML_MAPPING_START_EVENT: return yaml_parser_load_mapping(parser, first_event); default: assert(0); /* Could not happen. */ return 0; } return 0; } /* * Add an anchor. */ static int yaml_parser_register_anchor(yaml_parser_t *parser, int index, yaml_char_t *anchor) { yaml_alias_data_t data; yaml_alias_data_t *alias_data; if (!anchor) return 1; data.anchor = anchor; data.index = index; data.mark = parser->document->nodes.start[index-1].start_mark; for (alias_data = parser->aliases.start; alias_data != parser->aliases.top; alias_data ++) { if (strcmp((char *)alias_data->anchor, (char *)anchor) == 0) { yaml_free(anchor); return yaml_parser_set_composer_error_context(parser, "found duplicate anchor; first occurence", alias_data->mark, "second occurence", data.mark); } } if (!PUSH(parser, parser->aliases, data)) { yaml_free(anchor); return 0; } return 1; } /* * Compose a node corresponding to an alias. */ static int yaml_parser_load_alias(yaml_parser_t *parser, yaml_event_t *first_event) { yaml_char_t *anchor = first_event->data.alias.anchor; yaml_alias_data_t *alias_data; for (alias_data = parser->aliases.start; alias_data != parser->aliases.top; alias_data ++) { if (strcmp((char *)alias_data->anchor, (char *)anchor) == 0) { yaml_free(anchor); return alias_data->index; } } yaml_free(anchor); return yaml_parser_set_composer_error(parser, "found undefined alias", first_event->start_mark); } /* * Compose a scalar node. */ static int yaml_parser_load_scalar(yaml_parser_t *parser, yaml_event_t *first_event) { yaml_node_t node; int index; yaml_char_t *tag = first_event->data.scalar.tag; if (!STACK_LIMIT(parser, parser->document->nodes, INT_MAX-1)) goto error; if (!tag || strcmp((char *)tag, "!") == 0) { yaml_free(tag); tag = yaml_strdup((yaml_char_t *)YAML_DEFAULT_SCALAR_TAG); if (!tag) goto error; } SCALAR_NODE_INIT(node, tag, first_event->data.scalar.value, first_event->data.scalar.length, first_event->data.scalar.style, first_event->start_mark, first_event->end_mark); if (!PUSH(parser, parser->document->nodes, node)) goto error; index = parser->document->nodes.top - parser->document->nodes.start; if (!yaml_parser_register_anchor(parser, index, first_event->data.scalar.anchor)) return 0; return index; error: yaml_free(tag); yaml_free(first_event->data.scalar.anchor); yaml_free(first_event->data.scalar.value); return 0; } /* * Compose a sequence node. */ static int yaml_parser_load_sequence(yaml_parser_t *parser, yaml_event_t *first_event) { yaml_event_t event; yaml_node_t node; struct { yaml_node_item_t *start; yaml_node_item_t *end; yaml_node_item_t *top; } items = { NULL, NULL, NULL }; int index, item_index; yaml_char_t *tag = first_event->data.sequence_start.tag; if (!STACK_LIMIT(parser, parser->document->nodes, INT_MAX-1)) goto error; if (!tag || strcmp((char *)tag, "!") == 0) { yaml_free(tag); tag = yaml_strdup((yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG); if (!tag) goto error; } if (!STACK_INIT(parser, items, INITIAL_STACK_SIZE)) goto error; SEQUENCE_NODE_INIT(node, tag, items.start, items.end, first_event->data.sequence_start.style, first_event->start_mark, first_event->end_mark); if (!PUSH(parser, parser->document->nodes, node)) goto error; index = parser->document->nodes.top - parser->document->nodes.start; if (!yaml_parser_register_anchor(parser, index, first_event->data.sequence_start.anchor)) return 0; if (!yaml_parser_parse(parser, &event)) return 0; while (event.type != YAML_SEQUENCE_END_EVENT) { if (!STACK_LIMIT(parser, parser->document->nodes.start[index-1].data.sequence.items, INT_MAX-1)) return 0; item_index = yaml_parser_load_node(parser, &event); if (!item_index) return 0; if (!PUSH(parser, parser->document->nodes.start[index-1].data.sequence.items, item_index)) return 0; if (!yaml_parser_parse(parser, &event)) return 0; } parser->document->nodes.start[index-1].end_mark = event.end_mark; return index; error: yaml_free(tag); yaml_free(first_event->data.sequence_start.anchor); return 0; } /* * Compose a mapping node. */ static int yaml_parser_load_mapping(yaml_parser_t *parser, yaml_event_t *first_event) { yaml_event_t event; yaml_node_t node; struct { yaml_node_pair_t *start; yaml_node_pair_t *end; yaml_node_pair_t *top; } pairs = { NULL, NULL, NULL }; int index; yaml_node_pair_t pair; yaml_char_t *tag = first_event->data.mapping_start.tag; if (!STACK_LIMIT(parser, parser->document->nodes, INT_MAX-1)) goto error; if (!tag || strcmp((char *)tag, "!") == 0) { yaml_free(tag); tag = yaml_strdup((yaml_char_t *)YAML_DEFAULT_MAPPING_TAG); if (!tag) goto error; } if (!STACK_INIT(parser, pairs, INITIAL_STACK_SIZE)) goto error; MAPPING_NODE_INIT(node, tag, pairs.start, pairs.end, first_event->data.mapping_start.style, first_event->start_mark, first_event->end_mark); if (!PUSH(parser, parser->document->nodes, node)) goto error; index = parser->document->nodes.top - parser->document->nodes.start; if (!yaml_parser_register_anchor(parser, index, first_event->data.mapping_start.anchor)) return 0; if (!yaml_parser_parse(parser, &event)) return 0; while (event.type != YAML_MAPPING_END_EVENT) { if (!STACK_LIMIT(parser, parser->document->nodes.start[index-1].data.mapping.pairs, INT_MAX-1)) return 0; pair.key = yaml_parser_load_node(parser, &event); if (!pair.key) return 0; if (!yaml_parser_parse(parser, &event)) return 0; pair.value = yaml_parser_load_node(parser, &event); if (!pair.value) return 0; if (!PUSH(parser, parser->document->nodes.start[index-1].data.mapping.pairs, pair)) return 0; if (!yaml_parser_parse(parser, &event)) return 0; } parser->document->nodes.start[index-1].end_mark = event.end_mark; return index; error: yaml_free(tag); yaml_free(first_event->data.mapping_start.anchor); return 0; } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/api.c0000644000000000000000000010714113306562377021712 0ustar rootroot #include "yaml_private.h" /* * Get the library version. */ YAML_DECLARE(const char *) yaml_get_version_string(void) { return YAML_VERSION_STRING; } /* * Get the library version numbers. */ YAML_DECLARE(void) yaml_get_version(int *major, int *minor, int *patch) { *major = YAML_VERSION_MAJOR; *minor = YAML_VERSION_MINOR; *patch = YAML_VERSION_PATCH; } /* * Allocate a dynamic memory block. */ YAML_DECLARE(void *) yaml_malloc(size_t size) { return malloc(size ? size : 1); } /* * Reallocate a dynamic memory block. */ YAML_DECLARE(void *) yaml_realloc(void *ptr, size_t size) { return ptr ? realloc(ptr, size ? size : 1) : malloc(size ? size : 1); } /* * Free a dynamic memory block. */ YAML_DECLARE(void) yaml_free(void *ptr) { if (ptr) free(ptr); } /* * Duplicate a string. */ YAML_DECLARE(yaml_char_t *) yaml_strdup(const yaml_char_t *str) { if (!str) return NULL; return (yaml_char_t *)strdup((char *)str); } /* * Extend a string. */ YAML_DECLARE(int) yaml_string_extend(yaml_char_t **start, yaml_char_t **pointer, yaml_char_t **end) { yaml_char_t *new_start = yaml_realloc(*start, (*end - *start)*2); if (!new_start) return 0; memset(new_start + (*end - *start), 0, *end - *start); *pointer = new_start + (*pointer - *start); *end = new_start + (*end - *start)*2; *start = new_start; return 1; } /* * Append a string B to a string A. */ YAML_DECLARE(int) yaml_string_join( yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end, yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end) { if (*b_start == *b_pointer) return 1; while (*a_end - *a_pointer <= *b_pointer - *b_start) { if (!yaml_string_extend(a_start, a_pointer, a_end)) return 0; } memcpy(*a_pointer, *b_start, *b_pointer - *b_start); *a_pointer += *b_pointer - *b_start; return 1; } /* * Extend a stack. */ YAML_DECLARE(int) yaml_stack_extend(void **start, void **top, void **end) { void *new_start = yaml_realloc(*start, ((char *)*end - (char *)*start)*2); if (!new_start) return 0; *top = (char *)new_start + ((char *)*top - (char *)*start); *end = (char *)new_start + ((char *)*end - (char *)*start)*2; *start = new_start; return 1; } /* * Extend or move a queue. */ YAML_DECLARE(int) yaml_queue_extend(void **start, void **head, void **tail, void **end) { /* Check if we need to resize the queue. */ if (*start == *head && *tail == *end) { void *new_start = yaml_realloc(*start, ((char *)*end - (char *)*start)*2); if (!new_start) return 0; *head = (char *)new_start + ((char *)*head - (char *)*start); *tail = (char *)new_start + ((char *)*tail - (char *)*start); *end = (char *)new_start + ((char *)*end - (char *)*start)*2; *start = new_start; } /* Check if we need to move the queue at the beginning of the buffer. */ if (*tail == *end) { if (*head != *tail) { memmove(*start, *head, (char *)*tail - (char *)*head); } *tail = (char *)*tail - (char *)*head + (char *)*start; *head = *start; } return 1; } /* * Create a new parser object. */ YAML_DECLARE(int) yaml_parser_initialize(yaml_parser_t *parser) { assert(parser); /* Non-NULL parser object expected. */ memset(parser, 0, sizeof(yaml_parser_t)); if (!BUFFER_INIT(parser, parser->raw_buffer, INPUT_RAW_BUFFER_SIZE)) goto error; if (!BUFFER_INIT(parser, parser->buffer, INPUT_BUFFER_SIZE)) goto error; if (!QUEUE_INIT(parser, parser->tokens, INITIAL_QUEUE_SIZE)) goto error; if (!STACK_INIT(parser, parser->indents, INITIAL_STACK_SIZE)) goto error; if (!STACK_INIT(parser, parser->simple_keys, INITIAL_STACK_SIZE)) goto error; if (!STACK_INIT(parser, parser->states, INITIAL_STACK_SIZE)) goto error; if (!STACK_INIT(parser, parser->marks, INITIAL_STACK_SIZE)) goto error; if (!STACK_INIT(parser, parser->tag_directives, INITIAL_STACK_SIZE)) goto error; return 1; error: BUFFER_DEL(parser, parser->raw_buffer); BUFFER_DEL(parser, parser->buffer); QUEUE_DEL(parser, parser->tokens); STACK_DEL(parser, parser->indents); STACK_DEL(parser, parser->simple_keys); STACK_DEL(parser, parser->states); STACK_DEL(parser, parser->marks); STACK_DEL(parser, parser->tag_directives); return 0; } /* * Destroy a parser object. */ YAML_DECLARE(void) yaml_parser_delete(yaml_parser_t *parser) { assert(parser); /* Non-NULL parser object expected. */ BUFFER_DEL(parser, parser->raw_buffer); BUFFER_DEL(parser, parser->buffer); while (!QUEUE_EMPTY(parser, parser->tokens)) { yaml_token_delete(&DEQUEUE(parser, parser->tokens)); } QUEUE_DEL(parser, parser->tokens); STACK_DEL(parser, parser->indents); STACK_DEL(parser, parser->simple_keys); STACK_DEL(parser, parser->states); STACK_DEL(parser, parser->marks); while (!STACK_EMPTY(parser, parser->tag_directives)) { yaml_tag_directive_t tag_directive = POP(parser, parser->tag_directives); yaml_free(tag_directive.handle); yaml_free(tag_directive.prefix); } STACK_DEL(parser, parser->tag_directives); memset(parser, 0, sizeof(yaml_parser_t)); } /* * String read handler. */ static int yaml_string_read_handler(void *data, unsigned char *buffer, size_t size, size_t *size_read) { yaml_parser_t *parser = data; if (parser->input.string.current == parser->input.string.end) { *size_read = 0; return 1; } if (size > (size_t)(parser->input.string.end - parser->input.string.current)) { size = parser->input.string.end - parser->input.string.current; } memcpy(buffer, parser->input.string.current, size); parser->input.string.current += size; *size_read = size; return 1; } /* * File read handler. */ static int yaml_file_read_handler(void *data, unsigned char *buffer, size_t size, size_t *size_read) { yaml_parser_t *parser = data; *size_read = fread(buffer, 1, size, parser->input.file); return !ferror(parser->input.file); } /* * Set a string input. */ YAML_DECLARE(void) yaml_parser_set_input_string(yaml_parser_t *parser, const unsigned char *input, size_t size) { assert(parser); /* Non-NULL parser object expected. */ assert(!parser->read_handler); /* You can set the source only once. */ assert(input); /* Non-NULL input string expected. */ parser->read_handler = yaml_string_read_handler; parser->read_handler_data = parser; parser->input.string.start = input; parser->input.string.current = input; parser->input.string.end = input+size; } /* * Set a file input. */ YAML_DECLARE(void) yaml_parser_set_input_file(yaml_parser_t *parser, FILE *file) { assert(parser); /* Non-NULL parser object expected. */ assert(!parser->read_handler); /* You can set the source only once. */ assert(file); /* Non-NULL file object expected. */ parser->read_handler = yaml_file_read_handler; parser->read_handler_data = parser; parser->input.file = file; } /* * Set a generic input. */ YAML_DECLARE(void) yaml_parser_set_input(yaml_parser_t *parser, yaml_read_handler_t *handler, void *data) { assert(parser); /* Non-NULL parser object expected. */ assert(!parser->read_handler); /* You can set the source only once. */ assert(handler); /* Non-NULL read handler expected. */ parser->read_handler = handler; parser->read_handler_data = data; } /* * Set the source encoding. */ YAML_DECLARE(void) yaml_parser_set_encoding(yaml_parser_t *parser, yaml_encoding_t encoding) { assert(parser); /* Non-NULL parser object expected. */ assert(!parser->encoding); /* Encoding is already set or detected. */ parser->encoding = encoding; } /* * Create a new emitter object. */ YAML_DECLARE(int) yaml_emitter_initialize(yaml_emitter_t *emitter) { assert(emitter); /* Non-NULL emitter object expected. */ memset(emitter, 0, sizeof(yaml_emitter_t)); if (!BUFFER_INIT(emitter, emitter->buffer, OUTPUT_BUFFER_SIZE)) goto error; if (!BUFFER_INIT(emitter, emitter->raw_buffer, OUTPUT_RAW_BUFFER_SIZE)) goto error; if (!STACK_INIT(emitter, emitter->states, INITIAL_STACK_SIZE)) goto error; if (!QUEUE_INIT(emitter, emitter->events, INITIAL_QUEUE_SIZE)) goto error; if (!STACK_INIT(emitter, emitter->indents, INITIAL_STACK_SIZE)) goto error; if (!STACK_INIT(emitter, emitter->tag_directives, INITIAL_STACK_SIZE)) goto error; return 1; error: BUFFER_DEL(emitter, emitter->buffer); BUFFER_DEL(emitter, emitter->raw_buffer); STACK_DEL(emitter, emitter->states); QUEUE_DEL(emitter, emitter->events); STACK_DEL(emitter, emitter->indents); STACK_DEL(emitter, emitter->tag_directives); return 0; } /* * Destroy an emitter object. */ YAML_DECLARE(void) yaml_emitter_delete(yaml_emitter_t *emitter) { assert(emitter); /* Non-NULL emitter object expected. */ BUFFER_DEL(emitter, emitter->buffer); BUFFER_DEL(emitter, emitter->raw_buffer); STACK_DEL(emitter, emitter->states); while (!QUEUE_EMPTY(emitter, emitter->events)) { yaml_event_delete(&DEQUEUE(emitter, emitter->events)); } QUEUE_DEL(emitter, emitter->events); STACK_DEL(emitter, emitter->indents); while (!STACK_EMPTY(empty, emitter->tag_directives)) { yaml_tag_directive_t tag_directive = POP(emitter, emitter->tag_directives); yaml_free(tag_directive.handle); yaml_free(tag_directive.prefix); } STACK_DEL(emitter, emitter->tag_directives); yaml_free(emitter->anchors); memset(emitter, 0, sizeof(yaml_emitter_t)); } /* * String write handler. */ static int yaml_string_write_handler(void *data, unsigned char *buffer, size_t size) { yaml_emitter_t *emitter = data; if (emitter->output.string.size - *emitter->output.string.size_written < size) { memcpy(emitter->output.string.buffer + *emitter->output.string.size_written, buffer, emitter->output.string.size - *emitter->output.string.size_written); *emitter->output.string.size_written = emitter->output.string.size; return 0; } memcpy(emitter->output.string.buffer + *emitter->output.string.size_written, buffer, size); *emitter->output.string.size_written += size; return 1; } /* * File write handler. */ static int yaml_file_write_handler(void *data, unsigned char *buffer, size_t size) { yaml_emitter_t *emitter = data; return (fwrite(buffer, 1, size, emitter->output.file) == size); } /* * Set a string output. */ YAML_DECLARE(void) yaml_emitter_set_output_string(yaml_emitter_t *emitter, unsigned char *output, size_t size, size_t *size_written) { assert(emitter); /* Non-NULL emitter object expected. */ assert(!emitter->write_handler); /* You can set the output only once. */ assert(output); /* Non-NULL output string expected. */ emitter->write_handler = yaml_string_write_handler; emitter->write_handler_data = emitter; emitter->output.string.buffer = output; emitter->output.string.size = size; emitter->output.string.size_written = size_written; *size_written = 0; } /* * Set a file output. */ YAML_DECLARE(void) yaml_emitter_set_output_file(yaml_emitter_t *emitter, FILE *file) { assert(emitter); /* Non-NULL emitter object expected. */ assert(!emitter->write_handler); /* You can set the output only once. */ assert(file); /* Non-NULL file object expected. */ emitter->write_handler = yaml_file_write_handler; emitter->write_handler_data = emitter; emitter->output.file = file; } /* * Set a generic output handler. */ YAML_DECLARE(void) yaml_emitter_set_output(yaml_emitter_t *emitter, yaml_write_handler_t *handler, void *data) { assert(emitter); /* Non-NULL emitter object expected. */ assert(!emitter->write_handler); /* You can set the output only once. */ assert(handler); /* Non-NULL handler object expected. */ emitter->write_handler = handler; emitter->write_handler_data = data; } /* * Set the output encoding. */ YAML_DECLARE(void) yaml_emitter_set_encoding(yaml_emitter_t *emitter, yaml_encoding_t encoding) { assert(emitter); /* Non-NULL emitter object expected. */ assert(!emitter->encoding); /* You can set encoding only once. */ emitter->encoding = encoding; } /* * Set the canonical output style. */ YAML_DECLARE(void) yaml_emitter_set_canonical(yaml_emitter_t *emitter, int canonical) { assert(emitter); /* Non-NULL emitter object expected. */ emitter->canonical = (canonical != 0); } /* * Set the indentation increment. */ YAML_DECLARE(void) yaml_emitter_set_indent(yaml_emitter_t *emitter, int indent) { assert(emitter); /* Non-NULL emitter object expected. */ emitter->best_indent = (1 < indent && indent < 10) ? indent : 2; } /* * Set the preferred line width. */ YAML_DECLARE(void) yaml_emitter_set_width(yaml_emitter_t *emitter, int width) { assert(emitter); /* Non-NULL emitter object expected. */ emitter->best_width = (width >= 0) ? width : -1; } /* * Set if unescaped non-ASCII characters are allowed. */ YAML_DECLARE(void) yaml_emitter_set_unicode(yaml_emitter_t *emitter, int unicode) { assert(emitter); /* Non-NULL emitter object expected. */ emitter->unicode = (unicode != 0); } /* * Set the preferred line break character. */ YAML_DECLARE(void) yaml_emitter_set_break(yaml_emitter_t *emitter, yaml_break_t line_break) { assert(emitter); /* Non-NULL emitter object expected. */ emitter->line_break = line_break; } /* * Destroy a token object. */ YAML_DECLARE(void) yaml_token_delete(yaml_token_t *token) { assert(token); /* Non-NULL token object expected. */ switch (token->type) { case YAML_TAG_DIRECTIVE_TOKEN: yaml_free(token->data.tag_directive.handle); yaml_free(token->data.tag_directive.prefix); break; case YAML_ALIAS_TOKEN: yaml_free(token->data.alias.value); break; case YAML_ANCHOR_TOKEN: yaml_free(token->data.anchor.value); break; case YAML_TAG_TOKEN: yaml_free(token->data.tag.handle); yaml_free(token->data.tag.suffix); break; case YAML_SCALAR_TOKEN: yaml_free(token->data.scalar.value); break; default: break; } memset(token, 0, sizeof(yaml_token_t)); } /* * Check if a string is a valid UTF-8 sequence. * * Check 'reader.c' for more details on UTF-8 encoding. */ static int yaml_check_utf8(yaml_char_t *start, size_t length) { yaml_char_t *end = start+length; yaml_char_t *pointer = start; while (pointer < end) { unsigned char octet; unsigned int width; unsigned int value; size_t k; octet = pointer[0]; width = (octet & 0x80) == 0x00 ? 1 : (octet & 0xE0) == 0xC0 ? 2 : (octet & 0xF0) == 0xE0 ? 3 : (octet & 0xF8) == 0xF0 ? 4 : 0; value = (octet & 0x80) == 0x00 ? octet & 0x7F : (octet & 0xE0) == 0xC0 ? octet & 0x1F : (octet & 0xF0) == 0xE0 ? octet & 0x0F : (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; if (!width) return 0; if (pointer+width > end) return 0; for (k = 1; k < width; k ++) { octet = pointer[k]; if ((octet & 0xC0) != 0x80) return 0; value = (value << 6) + (octet & 0x3F); } if (!((width == 1) || (width == 2 && value >= 0x80) || (width == 3 && value >= 0x800) || (width == 4 && value >= 0x10000))) return 0; pointer += width; } return 1; } /* * Create STREAM-START. */ YAML_DECLARE(int) yaml_stream_start_event_initialize(yaml_event_t *event, yaml_encoding_t encoding) { yaml_mark_t mark = { 0, 0, 0 }; assert(event); /* Non-NULL event object is expected. */ STREAM_START_EVENT_INIT(*event, encoding, mark, mark); return 1; } /* * Create STREAM-END. */ YAML_DECLARE(int) yaml_stream_end_event_initialize(yaml_event_t *event) { yaml_mark_t mark = { 0, 0, 0 }; assert(event); /* Non-NULL event object is expected. */ STREAM_END_EVENT_INIT(*event, mark, mark); return 1; } /* * Create DOCUMENT-START. */ YAML_DECLARE(int) yaml_document_start_event_initialize(yaml_event_t *event, yaml_version_directive_t *version_directive, yaml_tag_directive_t *tag_directives_start, yaml_tag_directive_t *tag_directives_end, int implicit) { struct { yaml_error_type_t error; } context; yaml_mark_t mark = { 0, 0, 0 }; yaml_version_directive_t *version_directive_copy = NULL; struct { yaml_tag_directive_t *start; yaml_tag_directive_t *end; yaml_tag_directive_t *top; } tag_directives_copy = { NULL, NULL, NULL }; yaml_tag_directive_t value = { NULL, NULL }; assert(event); /* Non-NULL event object is expected. */ assert((tag_directives_start && tag_directives_end) || (tag_directives_start == tag_directives_end)); /* Valid tag directives are expected. */ if (version_directive) { version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)); if (!version_directive_copy) goto error; version_directive_copy->major = version_directive->major; version_directive_copy->minor = version_directive->minor; } if (tag_directives_start != tag_directives_end) { yaml_tag_directive_t *tag_directive; if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) goto error; for (tag_directive = tag_directives_start; tag_directive != tag_directives_end; tag_directive ++) { assert(tag_directive->handle); assert(tag_directive->prefix); if (!yaml_check_utf8(tag_directive->handle, strlen((char *)tag_directive->handle))) goto error; if (!yaml_check_utf8(tag_directive->prefix, strlen((char *)tag_directive->prefix))) goto error; value.handle = yaml_strdup(tag_directive->handle); value.prefix = yaml_strdup(tag_directive->prefix); if (!value.handle || !value.prefix) goto error; if (!PUSH(&context, tag_directives_copy, value)) goto error; value.handle = NULL; value.prefix = NULL; } } DOCUMENT_START_EVENT_INIT(*event, version_directive_copy, tag_directives_copy.start, tag_directives_copy.top, implicit, mark, mark); return 1; error: yaml_free(version_directive_copy); while (!STACK_EMPTY(context, tag_directives_copy)) { yaml_tag_directive_t value = POP(context, tag_directives_copy); yaml_free(value.handle); yaml_free(value.prefix); } STACK_DEL(context, tag_directives_copy); yaml_free(value.handle); yaml_free(value.prefix); return 0; } /* * Create DOCUMENT-END. */ YAML_DECLARE(int) yaml_document_end_event_initialize(yaml_event_t *event, int implicit) { yaml_mark_t mark = { 0, 0, 0 }; assert(event); /* Non-NULL emitter object is expected. */ DOCUMENT_END_EVENT_INIT(*event, implicit, mark, mark); return 1; } /* * Create ALIAS. */ YAML_DECLARE(int) yaml_alias_event_initialize(yaml_event_t *event, yaml_char_t *anchor) { yaml_mark_t mark = { 0, 0, 0 }; yaml_char_t *anchor_copy = NULL; assert(event); /* Non-NULL event object is expected. */ assert(anchor); /* Non-NULL anchor is expected. */ if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0; anchor_copy = yaml_strdup(anchor); if (!anchor_copy) return 0; ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark); return 1; } /* * Create SCALAR. */ YAML_DECLARE(int) yaml_scalar_event_initialize(yaml_event_t *event, yaml_char_t *anchor, yaml_char_t *tag, yaml_char_t *value, int length, int plain_implicit, int quoted_implicit, yaml_scalar_style_t style) { yaml_mark_t mark = { 0, 0, 0 }; yaml_char_t *anchor_copy = NULL; yaml_char_t *tag_copy = NULL; yaml_char_t *value_copy = NULL; assert(event); /* Non-NULL event object is expected. */ assert(value); /* Non-NULL anchor is expected. */ if (anchor) { if (!yaml_check_utf8(anchor, strlen((char *)anchor))) goto error; anchor_copy = yaml_strdup(anchor); if (!anchor_copy) goto error; } if (tag) { if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; tag_copy = yaml_strdup(tag); if (!tag_copy) goto error; } if (length < 0) { length = strlen((char *)value); } if (!yaml_check_utf8(value, length)) goto error; value_copy = yaml_malloc(length+1); if (!value_copy) goto error; memcpy(value_copy, value, length); value_copy[length] = '\0'; SCALAR_EVENT_INIT(*event, anchor_copy, tag_copy, value_copy, length, plain_implicit, quoted_implicit, style, mark, mark); return 1; error: yaml_free(anchor_copy); yaml_free(tag_copy); yaml_free(value_copy); return 0; } /* * Create SEQUENCE-START. */ YAML_DECLARE(int) yaml_sequence_start_event_initialize(yaml_event_t *event, yaml_char_t *anchor, yaml_char_t *tag, int implicit, yaml_sequence_style_t style) { yaml_mark_t mark = { 0, 0, 0 }; yaml_char_t *anchor_copy = NULL; yaml_char_t *tag_copy = NULL; assert(event); /* Non-NULL event object is expected. */ if (anchor) { if (!yaml_check_utf8(anchor, strlen((char *)anchor))) goto error; anchor_copy = yaml_strdup(anchor); if (!anchor_copy) goto error; } if (tag) { if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; tag_copy = yaml_strdup(tag); if (!tag_copy) goto error; } SEQUENCE_START_EVENT_INIT(*event, anchor_copy, tag_copy, implicit, style, mark, mark); return 1; error: yaml_free(anchor_copy); yaml_free(tag_copy); return 0; } /* * Create SEQUENCE-END. */ YAML_DECLARE(int) yaml_sequence_end_event_initialize(yaml_event_t *event) { yaml_mark_t mark = { 0, 0, 0 }; assert(event); /* Non-NULL event object is expected. */ SEQUENCE_END_EVENT_INIT(*event, mark, mark); return 1; } /* * Create MAPPING-START. */ YAML_DECLARE(int) yaml_mapping_start_event_initialize(yaml_event_t *event, yaml_char_t *anchor, yaml_char_t *tag, int implicit, yaml_mapping_style_t style) { yaml_mark_t mark = { 0, 0, 0 }; yaml_char_t *anchor_copy = NULL; yaml_char_t *tag_copy = NULL; assert(event); /* Non-NULL event object is expected. */ if (anchor) { if (!yaml_check_utf8(anchor, strlen((char *)anchor))) goto error; anchor_copy = yaml_strdup(anchor); if (!anchor_copy) goto error; } if (tag) { if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; tag_copy = yaml_strdup(tag); if (!tag_copy) goto error; } MAPPING_START_EVENT_INIT(*event, anchor_copy, tag_copy, implicit, style, mark, mark); return 1; error: yaml_free(anchor_copy); yaml_free(tag_copy); return 0; } /* * Create MAPPING-END. */ YAML_DECLARE(int) yaml_mapping_end_event_initialize(yaml_event_t *event) { yaml_mark_t mark = { 0, 0, 0 }; assert(event); /* Non-NULL event object is expected. */ MAPPING_END_EVENT_INIT(*event, mark, mark); return 1; } /* * Destroy an event object. */ YAML_DECLARE(void) yaml_event_delete(yaml_event_t *event) { yaml_tag_directive_t *tag_directive; assert(event); /* Non-NULL event object expected. */ switch (event->type) { case YAML_DOCUMENT_START_EVENT: yaml_free(event->data.document_start.version_directive); for (tag_directive = event->data.document_start.tag_directives.start; tag_directive != event->data.document_start.tag_directives.end; tag_directive++) { yaml_free(tag_directive->handle); yaml_free(tag_directive->prefix); } yaml_free(event->data.document_start.tag_directives.start); break; case YAML_ALIAS_EVENT: yaml_free(event->data.alias.anchor); break; case YAML_SCALAR_EVENT: yaml_free(event->data.scalar.anchor); yaml_free(event->data.scalar.tag); yaml_free(event->data.scalar.value); break; case YAML_SEQUENCE_START_EVENT: yaml_free(event->data.sequence_start.anchor); yaml_free(event->data.sequence_start.tag); break; case YAML_MAPPING_START_EVENT: yaml_free(event->data.mapping_start.anchor); yaml_free(event->data.mapping_start.tag); break; default: break; } memset(event, 0, sizeof(yaml_event_t)); } /* * Create a document object. */ YAML_DECLARE(int) yaml_document_initialize(yaml_document_t *document, yaml_version_directive_t *version_directive, yaml_tag_directive_t *tag_directives_start, yaml_tag_directive_t *tag_directives_end, int start_implicit, int end_implicit) { struct { yaml_error_type_t error; } context; struct { yaml_node_t *start; yaml_node_t *end; yaml_node_t *top; } nodes = { NULL, NULL, NULL }; yaml_version_directive_t *version_directive_copy = NULL; struct { yaml_tag_directive_t *start; yaml_tag_directive_t *end; yaml_tag_directive_t *top; } tag_directives_copy = { NULL, NULL, NULL }; yaml_tag_directive_t value = { NULL, NULL }; yaml_mark_t mark = { 0, 0, 0 }; assert(document); /* Non-NULL document object is expected. */ assert((tag_directives_start && tag_directives_end) || (tag_directives_start == tag_directives_end)); /* Valid tag directives are expected. */ if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error; if (version_directive) { version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)); if (!version_directive_copy) goto error; version_directive_copy->major = version_directive->major; version_directive_copy->minor = version_directive->minor; } if (tag_directives_start != tag_directives_end) { yaml_tag_directive_t *tag_directive; if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) goto error; for (tag_directive = tag_directives_start; tag_directive != tag_directives_end; tag_directive ++) { assert(tag_directive->handle); assert(tag_directive->prefix); if (!yaml_check_utf8(tag_directive->handle, strlen((char *)tag_directive->handle))) goto error; if (!yaml_check_utf8(tag_directive->prefix, strlen((char *)tag_directive->prefix))) goto error; value.handle = yaml_strdup(tag_directive->handle); value.prefix = yaml_strdup(tag_directive->prefix); if (!value.handle || !value.prefix) goto error; if (!PUSH(&context, tag_directives_copy, value)) goto error; value.handle = NULL; value.prefix = NULL; } } DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, tag_directives_copy.start, tag_directives_copy.top, start_implicit, end_implicit, mark, mark); return 1; error: STACK_DEL(&context, nodes); yaml_free(version_directive_copy); while (!STACK_EMPTY(&context, tag_directives_copy)) { yaml_tag_directive_t value = POP(&context, tag_directives_copy); yaml_free(value.handle); yaml_free(value.prefix); } STACK_DEL(&context, tag_directives_copy); yaml_free(value.handle); yaml_free(value.prefix); return 0; } /* * Destroy a document object. */ YAML_DECLARE(void) yaml_document_delete(yaml_document_t *document) { struct { yaml_error_type_t error; } context; yaml_tag_directive_t *tag_directive; context.error = YAML_NO_ERROR; /* Eliminate a compliler warning. */ assert(document); /* Non-NULL document object is expected. */ while (!STACK_EMPTY(&context, document->nodes)) { yaml_node_t node = POP(&context, document->nodes); yaml_free(node.tag); switch (node.type) { case YAML_SCALAR_NODE: yaml_free(node.data.scalar.value); break; case YAML_SEQUENCE_NODE: STACK_DEL(&context, node.data.sequence.items); break; case YAML_MAPPING_NODE: STACK_DEL(&context, node.data.mapping.pairs); break; default: assert(0); /* Should not happen. */ } } STACK_DEL(&context, document->nodes); yaml_free(document->version_directive); for (tag_directive = document->tag_directives.start; tag_directive != document->tag_directives.end; tag_directive++) { yaml_free(tag_directive->handle); yaml_free(tag_directive->prefix); } yaml_free(document->tag_directives.start); memset(document, 0, sizeof(yaml_document_t)); } /** * Get a document node. */ YAML_DECLARE(yaml_node_t *) yaml_document_get_node(yaml_document_t *document, int index) { assert(document); /* Non-NULL document object is expected. */ if (index > 0 && document->nodes.start + index <= document->nodes.top) { return document->nodes.start + index - 1; } return NULL; } /** * Get the root object. */ YAML_DECLARE(yaml_node_t *) yaml_document_get_root_node(yaml_document_t *document) { assert(document); /* Non-NULL document object is expected. */ if (document->nodes.top != document->nodes.start) { return document->nodes.start; } return NULL; } /* * Add a scalar node to a document. */ YAML_DECLARE(int) yaml_document_add_scalar(yaml_document_t *document, yaml_char_t *tag, yaml_char_t *value, int length, yaml_scalar_style_t style) { struct { yaml_error_type_t error; } context; yaml_mark_t mark = { 0, 0, 0 }; yaml_char_t *tag_copy = NULL; yaml_char_t *value_copy = NULL; yaml_node_t node; assert(document); /* Non-NULL document object is expected. */ assert(value); /* Non-NULL value is expected. */ if (!tag) { tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG; } if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; tag_copy = yaml_strdup(tag); if (!tag_copy) goto error; if (length < 0) { length = strlen((char *)value); } if (!yaml_check_utf8(value, length)) goto error; value_copy = yaml_malloc(length+1); if (!value_copy) goto error; memcpy(value_copy, value, length); value_copy[length] = '\0'; SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark); if (!PUSH(&context, document->nodes, node)) goto error; return document->nodes.top - document->nodes.start; error: yaml_free(tag_copy); yaml_free(value_copy); return 0; } /* * Add a sequence node to a document. */ YAML_DECLARE(int) yaml_document_add_sequence(yaml_document_t *document, yaml_char_t *tag, yaml_sequence_style_t style) { struct { yaml_error_type_t error; } context; yaml_mark_t mark = { 0, 0, 0 }; yaml_char_t *tag_copy = NULL; struct { yaml_node_item_t *start; yaml_node_item_t *end; yaml_node_item_t *top; } items = { NULL, NULL, NULL }; yaml_node_t node; assert(document); /* Non-NULL document object is expected. */ if (!tag) { tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG; } if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; tag_copy = yaml_strdup(tag); if (!tag_copy) goto error; if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error; SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, style, mark, mark); if (!PUSH(&context, document->nodes, node)) goto error; return document->nodes.top - document->nodes.start; error: STACK_DEL(&context, items); yaml_free(tag_copy); return 0; } /* * Add a mapping node to a document. */ YAML_DECLARE(int) yaml_document_add_mapping(yaml_document_t *document, yaml_char_t *tag, yaml_mapping_style_t style) { struct { yaml_error_type_t error; } context; yaml_mark_t mark = { 0, 0, 0 }; yaml_char_t *tag_copy = NULL; struct { yaml_node_pair_t *start; yaml_node_pair_t *end; yaml_node_pair_t *top; } pairs = { NULL, NULL, NULL }; yaml_node_t node; assert(document); /* Non-NULL document object is expected. */ if (!tag) { tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG; } if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; tag_copy = yaml_strdup(tag); if (!tag_copy) goto error; if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error; MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, style, mark, mark); if (!PUSH(&context, document->nodes, node)) goto error; return document->nodes.top - document->nodes.start; error: STACK_DEL(&context, pairs); yaml_free(tag_copy); return 0; } /* * Append an item to a sequence node. */ YAML_DECLARE(int) yaml_document_append_sequence_item(yaml_document_t *document, int sequence, int item) { struct { yaml_error_type_t error; } context; assert(document); /* Non-NULL document is required. */ assert(sequence > 0 && document->nodes.start + sequence <= document->nodes.top); /* Valid sequence id is required. */ assert(document->nodes.start[sequence-1].type == YAML_SEQUENCE_NODE); /* A sequence node is required. */ assert(item > 0 && document->nodes.start + item <= document->nodes.top); /* Valid item id is required. */ if (!PUSH(&context, document->nodes.start[sequence-1].data.sequence.items, item)) return 0; return 1; } /* * Append a pair of a key and a value to a mapping node. */ YAML_DECLARE(int) yaml_document_append_mapping_pair(yaml_document_t *document, int mapping, int key, int value) { struct { yaml_error_type_t error; } context; yaml_node_pair_t pair; assert(document); /* Non-NULL document is required. */ assert(mapping > 0 && document->nodes.start + mapping <= document->nodes.top); /* Valid mapping id is required. */ assert(document->nodes.start[mapping-1].type == YAML_MAPPING_NODE); /* A mapping node is required. */ assert(key > 0 && document->nodes.start + key <= document->nodes.top); /* Valid key id is required. */ assert(value > 0 && document->nodes.start + value <= document->nodes.top); /* Valid value id is required. */ pair.key = key; pair.value = value; if (!PUSH(&context, document->nodes.start[mapping-1].data.mapping.pairs, pair)) return 0; return 1; } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/yaml_private.h0000644000000000000000000007153713306562377023653 0ustar rootroot #if HAVE_CONFIG_H #include #endif #include #include #include #include #ifndef _MSC_VER #include #else #ifdef _WIN64 #define PTRDIFF_MAX _I64_MAX #else #define PTRDIFF_MAX INT_MAX #endif #endif /* * Memory management. */ YAML_DECLARE(void *) yaml_malloc(size_t size); YAML_DECLARE(void *) yaml_realloc(void *ptr, size_t size); YAML_DECLARE(void) yaml_free(void *ptr); YAML_DECLARE(yaml_char_t *) yaml_strdup(const yaml_char_t *); /* * Reader: Ensure that the buffer contains at least `length` characters. */ YAML_DECLARE(int) yaml_parser_update_buffer(yaml_parser_t *parser, size_t length); /* * Scanner: Ensure that the token stack contains at least one token ready. */ YAML_DECLARE(int) yaml_parser_fetch_more_tokens(yaml_parser_t *parser); /* * The size of the input raw buffer. */ #define INPUT_RAW_BUFFER_SIZE 16384 /* * The size of the input buffer. * * It should be possible to decode the whole raw buffer. */ #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3) /* * The size of the output buffer. */ #define OUTPUT_BUFFER_SIZE 16384 /* * The size of the output raw buffer. * * It should be possible to encode the whole output buffer. */ #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2) /* * The size of other stacks and queues. */ #define INITIAL_STACK_SIZE 16 #define INITIAL_QUEUE_SIZE 16 #define INITIAL_STRING_SIZE 16 /* * Buffer management. */ #define BUFFER_INIT(context,buffer,size) \ (((buffer).start = yaml_malloc(size)) ? \ ((buffer).last = (buffer).pointer = (buffer).start, \ (buffer).end = (buffer).start+(size), \ 1) : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) #define BUFFER_DEL(context,buffer) \ (yaml_free((buffer).start), \ (buffer).start = (buffer).pointer = (buffer).end = 0) /* * String management. */ typedef struct { yaml_char_t *start; yaml_char_t *end; yaml_char_t *pointer; } yaml_string_t; YAML_DECLARE(int) yaml_string_extend(yaml_char_t **start, yaml_char_t **pointer, yaml_char_t **end); YAML_DECLARE(int) yaml_string_join( yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end, yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end); #define NULL_STRING { NULL, NULL, NULL } #define STRING(string,length) { (string), (string)+(length), (string) } #define STRING_ASSIGN(value,string,length) \ ((value).start = (string), \ (value).end = (string)+(length), \ (value).pointer = (string)) #define STRING_INIT(context,string,size) \ (((string).start = yaml_malloc(size)) ? \ ((string).pointer = (string).start, \ (string).end = (string).start+(size), \ memset((string).start, 0, (size)), \ 1) : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) #define STRING_DEL(context,string) \ (yaml_free((string).start), \ (string).start = (string).pointer = (string).end = 0) #define STRING_EXTEND(context,string) \ ((((string).pointer+5 < (string).end) \ || yaml_string_extend(&(string).start, \ &(string).pointer, &(string).end)) ? \ 1 : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) #define CLEAR(context,string) \ ((string).pointer = (string).start, \ memset((string).start, 0, (string).end-(string).start)) #define JOIN(context,string_a,string_b) \ ((yaml_string_join(&(string_a).start, &(string_a).pointer, \ &(string_a).end, &(string_b).start, \ &(string_b).pointer, &(string_b).end)) ? \ ((string_b).pointer = (string_b).start, \ 1) : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) /* * String check operations. */ /* * Check the octet at the specified position. */ #define CHECK_AT(string,octet,offset) \ ((string).pointer[offset] == (yaml_char_t)(octet)) /* * Check the current octet in the buffer. */ #define CHECK(string,octet) CHECK_AT((string),(octet),0) /* * Check if the character at the specified position is an alphabetical * character, a digit, '_', or '-'. */ #define IS_ALPHA_AT(string,offset) \ (((string).pointer[offset] >= (yaml_char_t) '0' && \ (string).pointer[offset] <= (yaml_char_t) '9') || \ ((string).pointer[offset] >= (yaml_char_t) 'A' && \ (string).pointer[offset] <= (yaml_char_t) 'Z') || \ ((string).pointer[offset] >= (yaml_char_t) 'a' && \ (string).pointer[offset] <= (yaml_char_t) 'z') || \ (string).pointer[offset] == '_' || \ (string).pointer[offset] == '-') #define IS_ALPHA(string) IS_ALPHA_AT((string),0) /* * Check if the character at the specified position is a digit. */ #define IS_DIGIT_AT(string,offset) \ (((string).pointer[offset] >= (yaml_char_t) '0' && \ (string).pointer[offset] <= (yaml_char_t) '9')) #define IS_DIGIT(string) IS_DIGIT_AT((string),0) /* * Get the value of a digit. */ #define AS_DIGIT_AT(string,offset) \ ((string).pointer[offset] - (yaml_char_t) '0') #define AS_DIGIT(string) AS_DIGIT_AT((string),0) /* * Check if the character at the specified position is a hex-digit. */ #define IS_HEX_AT(string,offset) \ (((string).pointer[offset] >= (yaml_char_t) '0' && \ (string).pointer[offset] <= (yaml_char_t) '9') || \ ((string).pointer[offset] >= (yaml_char_t) 'A' && \ (string).pointer[offset] <= (yaml_char_t) 'F') || \ ((string).pointer[offset] >= (yaml_char_t) 'a' && \ (string).pointer[offset] <= (yaml_char_t) 'f')) #define IS_HEX(string) IS_HEX_AT((string),0) /* * Get the value of a hex-digit. */ #define AS_HEX_AT(string,offset) \ (((string).pointer[offset] >= (yaml_char_t) 'A' && \ (string).pointer[offset] <= (yaml_char_t) 'F') ? \ ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \ ((string).pointer[offset] >= (yaml_char_t) 'a' && \ (string).pointer[offset] <= (yaml_char_t) 'f') ? \ ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \ ((string).pointer[offset] - (yaml_char_t) '0')) #define AS_HEX(string) AS_HEX_AT((string),0) /* * Check if the character is ASCII. */ #define IS_ASCII_AT(string,offset) \ ((string).pointer[offset] <= (yaml_char_t) '\x7F') #define IS_ASCII(string) IS_ASCII_AT((string),0) /* * Check if the character can be printed unescaped. */ #define IS_PRINTABLE_AT(string,offset) \ (((string).pointer[offset] == 0x0A) /* . == #x0A */ \ || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \ && (string).pointer[offset] <= 0x7E) \ || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \ && (string).pointer[offset+1] >= 0xA0) \ || ((string).pointer[offset] > 0xC2 \ && (string).pointer[offset] < 0xED) \ || ((string).pointer[offset] == 0xED \ && (string).pointer[offset+1] < 0xA0) \ || ((string).pointer[offset] == 0xEE) \ || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \ && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \ && (string).pointer[offset+2] == 0xBF) \ && !((string).pointer[offset+1] == 0xBF \ && ((string).pointer[offset+2] == 0xBE \ || (string).pointer[offset+2] == 0xBF)))) #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0) /* * Check if the character at the specified position is NUL. */ #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset)) #define IS_Z(string) IS_Z_AT((string),0) /* * Check if the character at the specified position is BOM. */ #define IS_BOM_AT(string,offset) \ (CHECK_AT((string),'\xEF',(offset)) \ && CHECK_AT((string),'\xBB',(offset)+1) \ && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */ #define IS_BOM(string) IS_BOM_AT(string,0) /* * Check if the character at the specified position is space. */ #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset)) #define IS_SPACE(string) IS_SPACE_AT((string),0) /* * Check if the character at the specified position is tab. */ #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset)) #define IS_TAB(string) IS_TAB_AT((string),0) /* * Check if the character at the specified position is blank (space or tab). */ #define IS_BLANK_AT(string,offset) \ (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset))) #define IS_BLANK(string) IS_BLANK_AT((string),0) /* * Check if the character at the specified position is a line break. */ #define IS_BREAK_AT(string,offset) \ (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \ || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \ || (CHECK_AT((string),'\xC2',(offset)) \ && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \ || (CHECK_AT((string),'\xE2',(offset)) \ && CHECK_AT((string),'\x80',(offset)+1) \ && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \ || (CHECK_AT((string),'\xE2',(offset)) \ && CHECK_AT((string),'\x80',(offset)+1) \ && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */ #define IS_BREAK(string) IS_BREAK_AT((string),0) #define IS_CRLF_AT(string,offset) \ (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1)) #define IS_CRLF(string) IS_CRLF_AT((string),0) /* * Check if the character is a line break or NUL. */ #define IS_BREAKZ_AT(string,offset) \ (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset))) #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0) /* * Check if the character is a line break, space, or NUL. */ #define IS_SPACEZ_AT(string,offset) \ (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset))) #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0) /* * Check if the character is a line break, space, tab, or NUL. */ #define IS_BLANKZ_AT(string,offset) \ (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset))) #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0) /* * Determine the width of the character. */ #define WIDTH_AT(string,offset) \ (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \ ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \ ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \ ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0) #define WIDTH(string) WIDTH_AT((string),0) /* * Move the string pointer to the next character. */ #define MOVE(string) ((string).pointer += WIDTH((string))) /* * Copy a character and move the pointers of both strings. */ #define COPY(string_a,string_b) \ ((*(string_b).pointer & 0x80) == 0x00 ? \ (*((string_a).pointer++) = *((string_b).pointer++)) : \ (*(string_b).pointer & 0xE0) == 0xC0 ? \ (*((string_a).pointer++) = *((string_b).pointer++), \ *((string_a).pointer++) = *((string_b).pointer++)) : \ (*(string_b).pointer & 0xF0) == 0xE0 ? \ (*((string_a).pointer++) = *((string_b).pointer++), \ *((string_a).pointer++) = *((string_b).pointer++), \ *((string_a).pointer++) = *((string_b).pointer++)) : \ (*(string_b).pointer & 0xF8) == 0xF0 ? \ (*((string_a).pointer++) = *((string_b).pointer++), \ *((string_a).pointer++) = *((string_b).pointer++), \ *((string_a).pointer++) = *((string_b).pointer++), \ *((string_a).pointer++) = *((string_b).pointer++)) : 0) /* * Stack and queue management. */ YAML_DECLARE(int) yaml_stack_extend(void **start, void **top, void **end); YAML_DECLARE(int) yaml_queue_extend(void **start, void **head, void **tail, void **end); #define STACK_INIT(context,stack,size) \ (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \ ((stack).top = (stack).start, \ (stack).end = (stack).start+(size), \ 1) : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) #define STACK_DEL(context,stack) \ (yaml_free((stack).start), \ (stack).start = (stack).top = (stack).end = 0) #define STACK_EMPTY(context,stack) \ ((stack).start == (stack).top) #define STACK_LIMIT(context,stack,size) \ ((stack).top - (stack).start < (size) ? \ 1 : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) #define PUSH(context,stack,value) \ (((stack).top != (stack).end \ || yaml_stack_extend((void **)&(stack).start, \ (void **)&(stack).top, (void **)&(stack).end)) ? \ (*((stack).top++) = value, \ 1) : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) #define POP(context,stack) \ (*(--(stack).top)) #define QUEUE_INIT(context,queue,size) \ (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \ ((queue).head = (queue).tail = (queue).start, \ (queue).end = (queue).start+(size), \ 1) : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) #define QUEUE_DEL(context,queue) \ (yaml_free((queue).start), \ (queue).start = (queue).head = (queue).tail = (queue).end = 0) #define QUEUE_EMPTY(context,queue) \ ((queue).head == (queue).tail) #define ENQUEUE(context,queue,value) \ (((queue).tail != (queue).end \ || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \ (void **)&(queue).tail, (void **)&(queue).end)) ? \ (*((queue).tail++) = value, \ 1) : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) #define DEQUEUE(context,queue) \ (*((queue).head++)) #define QUEUE_INSERT(context,queue,index,value) \ (((queue).tail != (queue).end \ || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \ (void **)&(queue).tail, (void **)&(queue).end)) ? \ (memmove((queue).head+(index)+1,(queue).head+(index), \ ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \ *((queue).head+(index)) = value, \ (queue).tail++, \ 1) : \ ((context)->error = YAML_MEMORY_ERROR, \ 0)) /* * Token initializers. */ #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \ (memset(&(token), 0, sizeof(yaml_token_t)), \ (token).type = (token_type), \ (token).start_mark = (token_start_mark), \ (token).end_mark = (token_end_mark)) #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \ (TOKEN_INIT((token),YAML_STREAM_START_TOKEN,(start_mark),(end_mark)), \ (token).data.stream_start.encoding = (token_encoding)) #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \ (TOKEN_INIT((token),YAML_STREAM_END_TOKEN,(start_mark),(end_mark))) #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \ (TOKEN_INIT((token),YAML_ALIAS_TOKEN,(start_mark),(end_mark)), \ (token).data.alias.value = (token_value)) #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \ (TOKEN_INIT((token),YAML_ANCHOR_TOKEN,(start_mark),(end_mark)), \ (token).data.anchor.value = (token_value)) #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \ (TOKEN_INIT((token),YAML_TAG_TOKEN,(start_mark),(end_mark)), \ (token).data.tag.handle = (token_handle), \ (token).data.tag.suffix = (token_suffix)) #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \ (TOKEN_INIT((token),YAML_SCALAR_TOKEN,(start_mark),(end_mark)), \ (token).data.scalar.value = (token_value), \ (token).data.scalar.length = (token_length), \ (token).data.scalar.style = (token_style)) #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \ (TOKEN_INIT((token),YAML_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \ (token).data.version_directive.major = (token_major), \ (token).data.version_directive.minor = (token_minor)) #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \ (TOKEN_INIT((token),YAML_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \ (token).data.tag_directive.handle = (token_handle), \ (token).data.tag_directive.prefix = (token_prefix)) /* * Event initializers. */ #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \ (memset(&(event), 0, sizeof(yaml_event_t)), \ (event).type = (event_type), \ (event).start_mark = (event_start_mark), \ (event).end_mark = (event_end_mark)) #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \ (EVENT_INIT((event),YAML_STREAM_START_EVENT,(start_mark),(end_mark)), \ (event).data.stream_start.encoding = (event_encoding)) #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \ (EVENT_INIT((event),YAML_STREAM_END_EVENT,(start_mark),(end_mark))) #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \ event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \ (EVENT_INIT((event),YAML_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \ (event).data.document_start.version_directive = (event_version_directive), \ (event).data.document_start.tag_directives.start = (event_tag_directives_start), \ (event).data.document_start.tag_directives.end = (event_tag_directives_end), \ (event).data.document_start.implicit = (event_implicit)) #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \ (EVENT_INIT((event),YAML_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \ (event).data.document_end.implicit = (event_implicit)) #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \ (EVENT_INIT((event),YAML_ALIAS_EVENT,(start_mark),(end_mark)), \ (event).data.alias.anchor = (event_anchor)) #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \ event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \ (EVENT_INIT((event),YAML_SCALAR_EVENT,(start_mark),(end_mark)), \ (event).data.scalar.anchor = (event_anchor), \ (event).data.scalar.tag = (event_tag), \ (event).data.scalar.value = (event_value), \ (event).data.scalar.length = (event_length), \ (event).data.scalar.plain_implicit = (event_plain_implicit), \ (event).data.scalar.quoted_implicit = (event_quoted_implicit), \ (event).data.scalar.style = (event_style)) #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \ event_implicit,event_style,start_mark,end_mark) \ (EVENT_INIT((event),YAML_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \ (event).data.sequence_start.anchor = (event_anchor), \ (event).data.sequence_start.tag = (event_tag), \ (event).data.sequence_start.implicit = (event_implicit), \ (event).data.sequence_start.style = (event_style)) #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \ (EVENT_INIT((event),YAML_SEQUENCE_END_EVENT,(start_mark),(end_mark))) #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \ event_implicit,event_style,start_mark,end_mark) \ (EVENT_INIT((event),YAML_MAPPING_START_EVENT,(start_mark),(end_mark)), \ (event).data.mapping_start.anchor = (event_anchor), \ (event).data.mapping_start.tag = (event_tag), \ (event).data.mapping_start.implicit = (event_implicit), \ (event).data.mapping_start.style = (event_style)) #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \ (EVENT_INIT((event),YAML_MAPPING_END_EVENT,(start_mark),(end_mark))) /* * Document initializer. */ #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \ document_version_directive,document_tag_directives_start, \ document_tag_directives_end,document_start_implicit, \ document_end_implicit,document_start_mark,document_end_mark) \ (memset(&(document), 0, sizeof(yaml_document_t)), \ (document).nodes.start = (document_nodes_start), \ (document).nodes.end = (document_nodes_end), \ (document).nodes.top = (document_nodes_start), \ (document).version_directive = (document_version_directive), \ (document).tag_directives.start = (document_tag_directives_start), \ (document).tag_directives.end = (document_tag_directives_end), \ (document).start_implicit = (document_start_implicit), \ (document).end_implicit = (document_end_implicit), \ (document).start_mark = (document_start_mark), \ (document).end_mark = (document_end_mark)) /* * Node initializers. */ #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \ (memset(&(node), 0, sizeof(yaml_node_t)), \ (node).type = (node_type), \ (node).tag = (node_tag), \ (node).start_mark = (node_start_mark), \ (node).end_mark = (node_end_mark)) #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \ node_style,start_mark,end_mark) \ (NODE_INIT((node),YAML_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \ (node).data.scalar.value = (node_value), \ (node).data.scalar.length = (node_length), \ (node).data.scalar.style = (node_style)) #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \ node_style,start_mark,end_mark) \ (NODE_INIT((node),YAML_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \ (node).data.sequence.items.start = (node_items_start), \ (node).data.sequence.items.end = (node_items_end), \ (node).data.sequence.items.top = (node_items_start), \ (node).data.sequence.style = (node_style)) #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \ node_style,start_mark,end_mark) \ (NODE_INIT((node),YAML_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \ (node).data.mapping.pairs.start = (node_pairs_start), \ (node).data.mapping.pairs.end = (node_pairs_end), \ (node).data.mapping.pairs.top = (node_pairs_start), \ (node).data.mapping.style = (node_style)) tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/scanner.c0000644000000000000000000027777713306562377022621 0ustar rootroot /* * Introduction * ************ * * The following notes assume that you are familiar with the YAML specification * (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in * some cases we are less restrictive that it requires. * * The process of transforming a YAML stream into a sequence of events is * divided on two steps: Scanning and Parsing. * * The Scanner transforms the input stream into a sequence of tokens, while the * parser transform the sequence of tokens produced by the Scanner into a * sequence of parsing events. * * The Scanner is rather clever and complicated. The Parser, on the contrary, * is a straightforward implementation of a recursive-descendant parser (or, * LL(1) parser, as it is usually called). * * Actually there are two issues of Scanning that might be called "clever", the * rest is quite straightforward. The issues are "block collection start" and * "simple keys". Both issues are explained below in details. * * Here the Scanning step is explained and implemented. We start with the list * of all the tokens produced by the Scanner together with short descriptions. * * Now, tokens: * * STREAM-START(encoding) # The stream start. * STREAM-END # The stream end. * VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. * TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. * DOCUMENT-START # '---' * DOCUMENT-END # '...' * BLOCK-SEQUENCE-START # Indentation increase denoting a block * BLOCK-MAPPING-START # sequence or a block mapping. * BLOCK-END # Indentation decrease. * FLOW-SEQUENCE-START # '[' * FLOW-SEQUENCE-END # ']' * BLOCK-SEQUENCE-START # '{' * BLOCK-SEQUENCE-END # '}' * BLOCK-ENTRY # '-' * FLOW-ENTRY # ',' * KEY # '?' or nothing (simple keys). * VALUE # ':' * ALIAS(anchor) # '*anchor' * ANCHOR(anchor) # '&anchor' * TAG(handle,suffix) # '!handle!suffix' * SCALAR(value,style) # A scalar. * * The following two tokens are "virtual" tokens denoting the beginning and the * end of the stream: * * STREAM-START(encoding) * STREAM-END * * We pass the information about the input stream encoding with the * STREAM-START token. * * The next two tokens are responsible for tags: * * VERSION-DIRECTIVE(major,minor) * TAG-DIRECTIVE(handle,prefix) * * Example: * * %YAML 1.1 * %TAG ! !foo * %TAG !yaml! tag:yaml.org,2002: * --- * * The correspoding sequence of tokens: * * STREAM-START(utf-8) * VERSION-DIRECTIVE(1,1) * TAG-DIRECTIVE("!","!foo") * TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") * DOCUMENT-START * STREAM-END * * Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole * line. * * The document start and end indicators are represented by: * * DOCUMENT-START * DOCUMENT-END * * Note that if a YAML stream contains an implicit document (without '---' * and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be * produced. * * In the following examples, we present whole documents together with the * produced tokens. * * 1. An implicit document: * * 'a scalar' * * Tokens: * * STREAM-START(utf-8) * SCALAR("a scalar",single-quoted) * STREAM-END * * 2. An explicit document: * * --- * 'a scalar' * ... * * Tokens: * * STREAM-START(utf-8) * DOCUMENT-START * SCALAR("a scalar",single-quoted) * DOCUMENT-END * STREAM-END * * 3. Several documents in a stream: * * 'a scalar' * --- * 'another scalar' * --- * 'yet another scalar' * * Tokens: * * STREAM-START(utf-8) * SCALAR("a scalar",single-quoted) * DOCUMENT-START * SCALAR("another scalar",single-quoted) * DOCUMENT-START * SCALAR("yet another scalar",single-quoted) * STREAM-END * * We have already introduced the SCALAR token above. The following tokens are * used to describe aliases, anchors, tag, and scalars: * * ALIAS(anchor) * ANCHOR(anchor) * TAG(handle,suffix) * SCALAR(value,style) * * The following series of examples illustrate the usage of these tokens: * * 1. A recursive sequence: * * &A [ *A ] * * Tokens: * * STREAM-START(utf-8) * ANCHOR("A") * FLOW-SEQUENCE-START * ALIAS("A") * FLOW-SEQUENCE-END * STREAM-END * * 2. A tagged scalar: * * !!float "3.14" # A good approximation. * * Tokens: * * STREAM-START(utf-8) * TAG("!!","float") * SCALAR("3.14",double-quoted) * STREAM-END * * 3. Various scalar styles: * * --- # Implicit empty plain scalars do not produce tokens. * --- a plain scalar * --- 'a single-quoted scalar' * --- "a double-quoted scalar" * --- |- * a literal scalar * --- >- * a folded * scalar * * Tokens: * * STREAM-START(utf-8) * DOCUMENT-START * DOCUMENT-START * SCALAR("a plain scalar",plain) * DOCUMENT-START * SCALAR("a single-quoted scalar",single-quoted) * DOCUMENT-START * SCALAR("a double-quoted scalar",double-quoted) * DOCUMENT-START * SCALAR("a literal scalar",literal) * DOCUMENT-START * SCALAR("a folded scalar",folded) * STREAM-END * * Now it's time to review collection-related tokens. We will start with * flow collections: * * FLOW-SEQUENCE-START * FLOW-SEQUENCE-END * FLOW-MAPPING-START * FLOW-MAPPING-END * FLOW-ENTRY * KEY * VALUE * * The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and * FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' * correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the * indicators '?' and ':', which are used for denoting mapping keys and values, * are represented by the KEY and VALUE tokens. * * The following examples show flow collections: * * 1. A flow sequence: * * [item 1, item 2, item 3] * * Tokens: * * STREAM-START(utf-8) * FLOW-SEQUENCE-START * SCALAR("item 1",plain) * FLOW-ENTRY * SCALAR("item 2",plain) * FLOW-ENTRY * SCALAR("item 3",plain) * FLOW-SEQUENCE-END * STREAM-END * * 2. A flow mapping: * * { * a simple key: a value, # Note that the KEY token is produced. * ? a complex key: another value, * } * * Tokens: * * STREAM-START(utf-8) * FLOW-MAPPING-START * KEY * SCALAR("a simple key",plain) * VALUE * SCALAR("a value",plain) * FLOW-ENTRY * KEY * SCALAR("a complex key",plain) * VALUE * SCALAR("another value",plain) * FLOW-ENTRY * FLOW-MAPPING-END * STREAM-END * * A simple key is a key which is not denoted by the '?' indicator. Note that * the Scanner still produce the KEY token whenever it encounters a simple key. * * For scanning block collections, the following tokens are used (note that we * repeat KEY and VALUE here): * * BLOCK-SEQUENCE-START * BLOCK-MAPPING-START * BLOCK-END * BLOCK-ENTRY * KEY * VALUE * * The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation * increase that precedes a block collection (cf. the INDENT token in Python). * The token BLOCK-END denote indentation decrease that ends a block collection * (cf. the DEDENT token in Python). However YAML has some syntax pecularities * that makes detections of these tokens more complex. * * The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators * '-', '?', and ':' correspondingly. * * The following examples show how the tokens BLOCK-SEQUENCE-START, * BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: * * 1. Block sequences: * * - item 1 * - item 2 * - * - item 3.1 * - item 3.2 * - * key 1: value 1 * key 2: value 2 * * Tokens: * * STREAM-START(utf-8) * BLOCK-SEQUENCE-START * BLOCK-ENTRY * SCALAR("item 1",plain) * BLOCK-ENTRY * SCALAR("item 2",plain) * BLOCK-ENTRY * BLOCK-SEQUENCE-START * BLOCK-ENTRY * SCALAR("item 3.1",plain) * BLOCK-ENTRY * SCALAR("item 3.2",plain) * BLOCK-END * BLOCK-ENTRY * BLOCK-MAPPING-START * KEY * SCALAR("key 1",plain) * VALUE * SCALAR("value 1",plain) * KEY * SCALAR("key 2",plain) * VALUE * SCALAR("value 2",plain) * BLOCK-END * BLOCK-END * STREAM-END * * 2. Block mappings: * * a simple key: a value # The KEY token is produced here. * ? a complex key * : another value * a mapping: * key 1: value 1 * key 2: value 2 * a sequence: * - item 1 * - item 2 * * Tokens: * * STREAM-START(utf-8) * BLOCK-MAPPING-START * KEY * SCALAR("a simple key",plain) * VALUE * SCALAR("a value",plain) * KEY * SCALAR("a complex key",plain) * VALUE * SCALAR("another value",plain) * KEY * SCALAR("a mapping",plain) * BLOCK-MAPPING-START * KEY * SCALAR("key 1",plain) * VALUE * SCALAR("value 1",plain) * KEY * SCALAR("key 2",plain) * VALUE * SCALAR("value 2",plain) * BLOCK-END * KEY * SCALAR("a sequence",plain) * VALUE * BLOCK-SEQUENCE-START * BLOCK-ENTRY * SCALAR("item 1",plain) * BLOCK-ENTRY * SCALAR("item 2",plain) * BLOCK-END * BLOCK-END * STREAM-END * * YAML does not always require to start a new block collection from a new * line. If the current line contains only '-', '?', and ':' indicators, a new * block collection may start at the current line. The following examples * illustrate this case: * * 1. Collections in a sequence: * * - - item 1 * - item 2 * - key 1: value 1 * key 2: value 2 * - ? complex key * : complex value * * Tokens: * * STREAM-START(utf-8) * BLOCK-SEQUENCE-START * BLOCK-ENTRY * BLOCK-SEQUENCE-START * BLOCK-ENTRY * SCALAR("item 1",plain) * BLOCK-ENTRY * SCALAR("item 2",plain) * BLOCK-END * BLOCK-ENTRY * BLOCK-MAPPING-START * KEY * SCALAR("key 1",plain) * VALUE * SCALAR("value 1",plain) * KEY * SCALAR("key 2",plain) * VALUE * SCALAR("value 2",plain) * BLOCK-END * BLOCK-ENTRY * BLOCK-MAPPING-START * KEY * SCALAR("complex key") * VALUE * SCALAR("complex value") * BLOCK-END * BLOCK-END * STREAM-END * * 2. Collections in a mapping: * * ? a sequence * : - item 1 * - item 2 * ? a mapping * : key 1: value 1 * key 2: value 2 * * Tokens: * * STREAM-START(utf-8) * BLOCK-MAPPING-START * KEY * SCALAR("a sequence",plain) * VALUE * BLOCK-SEQUENCE-START * BLOCK-ENTRY * SCALAR("item 1",plain) * BLOCK-ENTRY * SCALAR("item 2",plain) * BLOCK-END * KEY * SCALAR("a mapping",plain) * VALUE * BLOCK-MAPPING-START * KEY * SCALAR("key 1",plain) * VALUE * SCALAR("value 1",plain) * KEY * SCALAR("key 2",plain) * VALUE * SCALAR("value 2",plain) * BLOCK-END * BLOCK-END * STREAM-END * * YAML also permits non-indented sequences if they are included into a block * mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: * * key: * - item 1 # BLOCK-SEQUENCE-START is NOT produced here. * - item 2 * * Tokens: * * STREAM-START(utf-8) * BLOCK-MAPPING-START * KEY * SCALAR("key",plain) * VALUE * BLOCK-ENTRY * SCALAR("item 1",plain) * BLOCK-ENTRY * SCALAR("item 2",plain) * BLOCK-END */ #include "yaml_private.h" /* * Ensure that the buffer contains the required number of characters. * Return 1 on success, 0 on failure (reader error or memory error). */ #define CACHE(parser,length) \ (parser->unread >= (length) \ ? 1 \ : yaml_parser_update_buffer(parser, (length))) /* * Advance the buffer pointer. */ #define SKIP(parser) \ (parser->mark.index ++, \ parser->mark.column ++, \ parser->unread --, \ parser->buffer.pointer += WIDTH(parser->buffer)) #define SKIP_LINE(parser) \ (IS_CRLF(parser->buffer) ? \ (parser->mark.index += 2, \ parser->mark.column = 0, \ parser->mark.line ++, \ parser->unread -= 2, \ parser->buffer.pointer += 2) : \ IS_BREAK(parser->buffer) ? \ (parser->mark.index ++, \ parser->mark.column = 0, \ parser->mark.line ++, \ parser->unread --, \ parser->buffer.pointer += WIDTH(parser->buffer)) : 0) /* * Copy a character to a string buffer and advance pointers. */ #define READ(parser,string) \ (STRING_EXTEND(parser,string) ? \ (COPY(string,parser->buffer), \ parser->mark.index ++, \ parser->mark.column ++, \ parser->unread --, \ 1) : 0) /* * Copy a line break character to a string buffer and advance pointers. */ #define READ_LINE(parser,string) \ (STRING_EXTEND(parser,string) ? \ (((CHECK_AT(parser->buffer,'\r',0) \ && CHECK_AT(parser->buffer,'\n',1)) ? /* CR LF -> LF */ \ (*((string).pointer++) = (yaml_char_t) '\n', \ parser->buffer.pointer += 2, \ parser->mark.index += 2, \ parser->mark.column = 0, \ parser->mark.line ++, \ parser->unread -= 2) : \ (CHECK_AT(parser->buffer,'\r',0) \ || CHECK_AT(parser->buffer,'\n',0)) ? /* CR|LF -> LF */ \ (*((string).pointer++) = (yaml_char_t) '\n', \ parser->buffer.pointer ++, \ parser->mark.index ++, \ parser->mark.column = 0, \ parser->mark.line ++, \ parser->unread --) : \ (CHECK_AT(parser->buffer,'\xC2',0) \ && CHECK_AT(parser->buffer,'\x85',1)) ? /* NEL -> LF */ \ (*((string).pointer++) = (yaml_char_t) '\n', \ parser->buffer.pointer += 2, \ parser->mark.index ++, \ parser->mark.column = 0, \ parser->mark.line ++, \ parser->unread --) : \ (CHECK_AT(parser->buffer,'\xE2',0) && \ CHECK_AT(parser->buffer,'\x80',1) && \ (CHECK_AT(parser->buffer,'\xA8',2) || \ CHECK_AT(parser->buffer,'\xA9',2))) ? /* LS|PS -> LS|PS */ \ (*((string).pointer++) = *(parser->buffer.pointer++), \ *((string).pointer++) = *(parser->buffer.pointer++), \ *((string).pointer++) = *(parser->buffer.pointer++), \ parser->mark.index ++, \ parser->mark.column = 0, \ parser->mark.line ++, \ parser->unread --) : 0), \ 1) : 0) /* * Public API declarations. */ YAML_DECLARE(int) yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token); /* * Error handling. */ static int yaml_parser_set_scanner_error(yaml_parser_t *parser, const char *context, yaml_mark_t context_mark, const char *problem); /* * High-level token API. */ YAML_DECLARE(int) yaml_parser_fetch_more_tokens(yaml_parser_t *parser); static int yaml_parser_fetch_next_token(yaml_parser_t *parser); /* * Potential simple keys. */ static int yaml_parser_stale_simple_keys(yaml_parser_t *parser); static int yaml_parser_save_simple_key(yaml_parser_t *parser); static int yaml_parser_remove_simple_key(yaml_parser_t *parser); static int yaml_parser_increase_flow_level(yaml_parser_t *parser); static int yaml_parser_decrease_flow_level(yaml_parser_t *parser); /* * Indentation treatment. */ static int yaml_parser_roll_indent(yaml_parser_t *parser, ptrdiff_t column, ptrdiff_t number, yaml_token_type_t type, yaml_mark_t mark); static int yaml_parser_unroll_indent(yaml_parser_t *parser, ptrdiff_t column); /* * Token fetchers. */ static int yaml_parser_fetch_stream_start(yaml_parser_t *parser); static int yaml_parser_fetch_stream_end(yaml_parser_t *parser); static int yaml_parser_fetch_directive(yaml_parser_t *parser); static int yaml_parser_fetch_document_indicator(yaml_parser_t *parser, yaml_token_type_t type); static int yaml_parser_fetch_flow_collection_start(yaml_parser_t *parser, yaml_token_type_t type); static int yaml_parser_fetch_flow_collection_end(yaml_parser_t *parser, yaml_token_type_t type); static int yaml_parser_fetch_flow_entry(yaml_parser_t *parser); static int yaml_parser_fetch_block_entry(yaml_parser_t *parser); static int yaml_parser_fetch_key(yaml_parser_t *parser); static int yaml_parser_fetch_value(yaml_parser_t *parser); static int yaml_parser_fetch_anchor(yaml_parser_t *parser, yaml_token_type_t type); static int yaml_parser_fetch_tag(yaml_parser_t *parser); static int yaml_parser_fetch_block_scalar(yaml_parser_t *parser, int literal); static int yaml_parser_fetch_flow_scalar(yaml_parser_t *parser, int single); static int yaml_parser_fetch_plain_scalar(yaml_parser_t *parser); /* * Token scanners. */ static int yaml_parser_scan_to_next_token(yaml_parser_t *parser); static int yaml_parser_scan_directive(yaml_parser_t *parser, yaml_token_t *token); static int yaml_parser_scan_directive_name(yaml_parser_t *parser, yaml_mark_t start_mark, yaml_char_t **name); static int yaml_parser_scan_version_directive_value(yaml_parser_t *parser, yaml_mark_t start_mark, int *major, int *minor); static int yaml_parser_scan_version_directive_number(yaml_parser_t *parser, yaml_mark_t start_mark, int *number); static int yaml_parser_scan_tag_directive_value(yaml_parser_t *parser, yaml_mark_t mark, yaml_char_t **handle, yaml_char_t **prefix); static int yaml_parser_scan_anchor(yaml_parser_t *parser, yaml_token_t *token, yaml_token_type_t type); static int yaml_parser_scan_tag(yaml_parser_t *parser, yaml_token_t *token); static int yaml_parser_scan_tag_handle(yaml_parser_t *parser, int directive, yaml_mark_t start_mark, yaml_char_t **handle); static int yaml_parser_scan_tag_uri(yaml_parser_t *parser, int directive, yaml_char_t *head, yaml_mark_t start_mark, yaml_char_t **uri); static int yaml_parser_scan_uri_escapes(yaml_parser_t *parser, int directive, yaml_mark_t start_mark, yaml_string_t *string); static int yaml_parser_scan_block_scalar(yaml_parser_t *parser, yaml_token_t *token, int literal); static int yaml_parser_scan_block_scalar_breaks(yaml_parser_t *parser, int *indent, yaml_string_t *breaks, yaml_mark_t start_mark, yaml_mark_t *end_mark); static int yaml_parser_scan_flow_scalar(yaml_parser_t *parser, yaml_token_t *token, int single); static int yaml_parser_scan_plain_scalar(yaml_parser_t *parser, yaml_token_t *token); /* * Get the next token. */ YAML_DECLARE(int) yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token) { assert(parser); /* Non-NULL parser object is expected. */ assert(token); /* Non-NULL token object is expected. */ /* Erase the token object. */ memset(token, 0, sizeof(yaml_token_t)); /* No tokens after STREAM-END or error. */ if (parser->stream_end_produced || parser->error) { return 1; } /* Ensure that the tokens queue contains enough tokens. */ if (!parser->token_available) { if (!yaml_parser_fetch_more_tokens(parser)) return 0; } /* Fetch the next token from the queue. */ *token = DEQUEUE(parser, parser->tokens); parser->token_available = 0; parser->tokens_parsed ++; if (token->type == YAML_STREAM_END_TOKEN) { parser->stream_end_produced = 1; } return 1; } /* * Set the scanner error and return 0. */ static int yaml_parser_set_scanner_error(yaml_parser_t *parser, const char *context, yaml_mark_t context_mark, const char *problem) { parser->error = YAML_SCANNER_ERROR; parser->context = context; parser->context_mark = context_mark; parser->problem = problem; parser->problem_mark = parser->mark; return 0; } /* * Ensure that the tokens queue contains at least one token which can be * returned to the Parser. */ YAML_DECLARE(int) yaml_parser_fetch_more_tokens(yaml_parser_t *parser) { int need_more_tokens; /* While we need more tokens to fetch, do it. */ while (1) { /* * Check if we really need to fetch more tokens. */ need_more_tokens = 0; if (parser->tokens.head == parser->tokens.tail) { /* Queue is empty. */ need_more_tokens = 1; } else { yaml_simple_key_t *simple_key; /* Check if any potential simple key may occupy the head position. */ if (!yaml_parser_stale_simple_keys(parser)) return 0; for (simple_key = parser->simple_keys.start; simple_key != parser->simple_keys.top; simple_key++) { if (simple_key->possible && simple_key->token_number == parser->tokens_parsed) { need_more_tokens = 1; break; } } } /* We are finished. */ if (!need_more_tokens) break; /* Fetch the next token. */ if (!yaml_parser_fetch_next_token(parser)) return 0; } parser->token_available = 1; return 1; } /* * The dispatcher for token fetchers. */ static int yaml_parser_fetch_next_token(yaml_parser_t *parser) { /* Ensure that the buffer is initialized. */ if (!CACHE(parser, 1)) return 0; /* Check if we just started scanning. Fetch STREAM-START then. */ if (!parser->stream_start_produced) return yaml_parser_fetch_stream_start(parser); /* Eat whitespaces and comments until we reach the next token. */ if (!yaml_parser_scan_to_next_token(parser)) return 0; /* Remove obsolete potential simple keys. */ if (!yaml_parser_stale_simple_keys(parser)) return 0; /* Check the indentation level against the current column. */ if (!yaml_parser_unroll_indent(parser, parser->mark.column)) return 0; /* * Ensure that the buffer contains at least 4 characters. 4 is the length * of the longest indicators ('--- ' and '... '). */ if (!CACHE(parser, 4)) return 0; /* Is it the end of the stream? */ if (IS_Z(parser->buffer)) return yaml_parser_fetch_stream_end(parser); /* Is it a directive? */ if (parser->mark.column == 0 && CHECK(parser->buffer, '%')) return yaml_parser_fetch_directive(parser); /* Is it the document start indicator? */ if (parser->mark.column == 0 && CHECK_AT(parser->buffer, '-', 0) && CHECK_AT(parser->buffer, '-', 1) && CHECK_AT(parser->buffer, '-', 2) && IS_BLANKZ_AT(parser->buffer, 3)) return yaml_parser_fetch_document_indicator(parser, YAML_DOCUMENT_START_TOKEN); /* Is it the document end indicator? */ if (parser->mark.column == 0 && CHECK_AT(parser->buffer, '.', 0) && CHECK_AT(parser->buffer, '.', 1) && CHECK_AT(parser->buffer, '.', 2) && IS_BLANKZ_AT(parser->buffer, 3)) return yaml_parser_fetch_document_indicator(parser, YAML_DOCUMENT_END_TOKEN); /* Is it the flow sequence start indicator? */ if (CHECK(parser->buffer, '[')) return yaml_parser_fetch_flow_collection_start(parser, YAML_FLOW_SEQUENCE_START_TOKEN); /* Is it the flow mapping start indicator? */ if (CHECK(parser->buffer, '{')) return yaml_parser_fetch_flow_collection_start(parser, YAML_FLOW_MAPPING_START_TOKEN); /* Is it the flow sequence end indicator? */ if (CHECK(parser->buffer, ']')) return yaml_parser_fetch_flow_collection_end(parser, YAML_FLOW_SEQUENCE_END_TOKEN); /* Is it the flow mapping end indicator? */ if (CHECK(parser->buffer, '}')) return yaml_parser_fetch_flow_collection_end(parser, YAML_FLOW_MAPPING_END_TOKEN); /* Is it the flow entry indicator? */ if (CHECK(parser->buffer, ',')) return yaml_parser_fetch_flow_entry(parser); /* Is it the block entry indicator? */ if (CHECK(parser->buffer, '-') && IS_BLANKZ_AT(parser->buffer, 1)) return yaml_parser_fetch_block_entry(parser); /* Is it the key indicator? */ if (CHECK(parser->buffer, '?') && (parser->flow_level || IS_BLANKZ_AT(parser->buffer, 1))) return yaml_parser_fetch_key(parser); /* Is it the value indicator? */ if (CHECK(parser->buffer, ':') && (parser->flow_level || IS_BLANKZ_AT(parser->buffer, 1))) return yaml_parser_fetch_value(parser); /* Is it an alias? */ if (CHECK(parser->buffer, '*')) return yaml_parser_fetch_anchor(parser, YAML_ALIAS_TOKEN); /* Is it an anchor? */ if (CHECK(parser->buffer, '&')) return yaml_parser_fetch_anchor(parser, YAML_ANCHOR_TOKEN); /* Is it a tag? */ if (CHECK(parser->buffer, '!')) return yaml_parser_fetch_tag(parser); /* Is it a literal scalar? */ if (CHECK(parser->buffer, '|') && !parser->flow_level) return yaml_parser_fetch_block_scalar(parser, 1); /* Is it a folded scalar? */ if (CHECK(parser->buffer, '>') && !parser->flow_level) return yaml_parser_fetch_block_scalar(parser, 0); /* Is it a single-quoted scalar? */ if (CHECK(parser->buffer, '\'')) return yaml_parser_fetch_flow_scalar(parser, 1); /* Is it a double-quoted scalar? */ if (CHECK(parser->buffer, '"')) return yaml_parser_fetch_flow_scalar(parser, 0); /* * Is it a plain scalar? * * A plain scalar may start with any non-blank characters except * * '-', '?', ':', ',', '[', ']', '{', '}', * '#', '&', '*', '!', '|', '>', '\'', '\"', * '%', '@', '`'. * * In the block context (and, for the '-' indicator, in the flow context * too), it may also start with the characters * * '-', '?', ':' * * if it is followed by a non-space character. * * The last rule is more restrictive than the specification requires. */ if (!(IS_BLANKZ(parser->buffer) || CHECK(parser->buffer, '-') || CHECK(parser->buffer, '?') || CHECK(parser->buffer, ':') || CHECK(parser->buffer, ',') || CHECK(parser->buffer, '[') || CHECK(parser->buffer, ']') || CHECK(parser->buffer, '{') || CHECK(parser->buffer, '}') || CHECK(parser->buffer, '#') || CHECK(parser->buffer, '&') || CHECK(parser->buffer, '*') || CHECK(parser->buffer, '!') || CHECK(parser->buffer, '|') || CHECK(parser->buffer, '>') || CHECK(parser->buffer, '\'') || CHECK(parser->buffer, '"') || CHECK(parser->buffer, '%') || CHECK(parser->buffer, '@') || CHECK(parser->buffer, '`')) || (CHECK(parser->buffer, '-') && !IS_BLANK_AT(parser->buffer, 1)) || (!parser->flow_level && (CHECK(parser->buffer, '?') || CHECK(parser->buffer, ':')) && !IS_BLANKZ_AT(parser->buffer, 1))) return yaml_parser_fetch_plain_scalar(parser); /* * If we don't determine the token type so far, it is an error. */ return yaml_parser_set_scanner_error(parser, "while scanning for the next token", parser->mark, "found character that cannot start any token"); } /* * Check the list of potential simple keys and remove the positions that * cannot contain simple keys anymore. */ static int yaml_parser_stale_simple_keys(yaml_parser_t *parser) { yaml_simple_key_t *simple_key; /* Check for a potential simple key for each flow level. */ for (simple_key = parser->simple_keys.start; simple_key != parser->simple_keys.top; simple_key ++) { /* * The specification requires that a simple key * * - is limited to a single line, * - is shorter than 1024 characters. */ if (simple_key->possible && (simple_key->mark.line < parser->mark.line || simple_key->mark.index+1024 < parser->mark.index)) { /* Check if the potential simple key to be removed is required. */ if (simple_key->required) { return yaml_parser_set_scanner_error(parser, "while scanning a simple key", simple_key->mark, "could not find expected ':'"); } simple_key->possible = 0; } } return 1; } /* * Check if a simple key may start at the current position and add it if * needed. */ static int yaml_parser_save_simple_key(yaml_parser_t *parser) { /* * A simple key is required at the current position if the scanner is in * the block context and the current column coincides with the indentation * level. */ int required = (!parser->flow_level && parser->indent == (ptrdiff_t)parser->mark.column); /* * If the current position may start a simple key, save it. */ if (parser->simple_key_allowed) { yaml_simple_key_t simple_key; simple_key.possible = 1; simple_key.required = required; simple_key.token_number = parser->tokens_parsed + (parser->tokens.tail - parser->tokens.head); simple_key.mark = parser->mark; if (!yaml_parser_remove_simple_key(parser)) return 0; *(parser->simple_keys.top-1) = simple_key; } return 1; } /* * Remove a potential simple key at the current flow level. */ static int yaml_parser_remove_simple_key(yaml_parser_t *parser) { yaml_simple_key_t *simple_key = parser->simple_keys.top-1; if (simple_key->possible) { /* If the key is required, it is an error. */ if (simple_key->required) { return yaml_parser_set_scanner_error(parser, "while scanning a simple key", simple_key->mark, "could not find expected ':'"); } } /* Remove the key from the stack. */ simple_key->possible = 0; return 1; } /* * Increase the flow level and resize the simple key list if needed. */ static int yaml_parser_increase_flow_level(yaml_parser_t *parser) { yaml_simple_key_t empty_simple_key = { 0, 0, 0, { 0, 0, 0 } }; /* Reset the simple key on the next level. */ if (!PUSH(parser, parser->simple_keys, empty_simple_key)) return 0; /* Increase the flow level. */ if (parser->flow_level == INT_MAX) { parser->error = YAML_MEMORY_ERROR; return 0; } parser->flow_level++; return 1; } /* * Decrease the flow level. */ static int yaml_parser_decrease_flow_level(yaml_parser_t *parser) { yaml_simple_key_t dummy_key; /* Used to eliminate a compiler warning. */ if (parser->flow_level) { parser->flow_level --; dummy_key = POP(parser, parser->simple_keys); } return 1; } /* * Push the current indentation level to the stack and set the new level * the current column is greater than the indentation level. In this case, * append or insert the specified token into the token queue. * */ static int yaml_parser_roll_indent(yaml_parser_t *parser, ptrdiff_t column, ptrdiff_t number, yaml_token_type_t type, yaml_mark_t mark) { yaml_token_t token; /* In the flow context, do nothing. */ if (parser->flow_level) return 1; if (parser->indent < column) { /* * Push the current indentation level to the stack and set the new * indentation level. */ if (!PUSH(parser, parser->indents, parser->indent)) return 0; if (column > INT_MAX) { parser->error = YAML_MEMORY_ERROR; return 0; } parser->indent = column; /* Create a token and insert it into the queue. */ TOKEN_INIT(token, type, mark, mark); if (number == -1) { if (!ENQUEUE(parser, parser->tokens, token)) return 0; } else { if (!QUEUE_INSERT(parser, parser->tokens, number - parser->tokens_parsed, token)) return 0; } } return 1; } /* * Pop indentation levels from the indents stack until the current level * becomes less or equal to the column. For each intendation level, append * the BLOCK-END token. */ static int yaml_parser_unroll_indent(yaml_parser_t *parser, ptrdiff_t column) { yaml_token_t token; /* In the flow context, do nothing. */ if (parser->flow_level) return 1; /* Loop through the intendation levels in the stack. */ while (parser->indent > column) { /* Create a token and append it to the queue. */ TOKEN_INIT(token, YAML_BLOCK_END_TOKEN, parser->mark, parser->mark); if (!ENQUEUE(parser, parser->tokens, token)) return 0; /* Pop the indentation level. */ parser->indent = POP(parser, parser->indents); } return 1; } /* * Initialize the scanner and produce the STREAM-START token. */ static int yaml_parser_fetch_stream_start(yaml_parser_t *parser) { yaml_simple_key_t simple_key = { 0, 0, 0, { 0, 0, 0 } }; yaml_token_t token; /* Set the initial indentation. */ parser->indent = -1; /* Initialize the simple key stack. */ if (!PUSH(parser, parser->simple_keys, simple_key)) return 0; /* A simple key is allowed at the beginning of the stream. */ parser->simple_key_allowed = 1; /* We have started. */ parser->stream_start_produced = 1; /* Create the STREAM-START token and append it to the queue. */ STREAM_START_TOKEN_INIT(token, parser->encoding, parser->mark, parser->mark); if (!ENQUEUE(parser, parser->tokens, token)) return 0; return 1; } /* * Produce the STREAM-END token and shut down the scanner. */ static int yaml_parser_fetch_stream_end(yaml_parser_t *parser) { yaml_token_t token; /* Force new line. */ if (parser->mark.column != 0) { parser->mark.column = 0; parser->mark.line ++; } /* Reset the indentation level. */ if (!yaml_parser_unroll_indent(parser, -1)) return 0; /* Reset simple keys. */ if (!yaml_parser_remove_simple_key(parser)) return 0; parser->simple_key_allowed = 0; /* Create the STREAM-END token and append it to the queue. */ STREAM_END_TOKEN_INIT(token, parser->mark, parser->mark); if (!ENQUEUE(parser, parser->tokens, token)) return 0; return 1; } /* * Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. */ static int yaml_parser_fetch_directive(yaml_parser_t *parser) { yaml_token_t token; /* Reset the indentation level. */ if (!yaml_parser_unroll_indent(parser, -1)) return 0; /* Reset simple keys. */ if (!yaml_parser_remove_simple_key(parser)) return 0; parser->simple_key_allowed = 0; /* Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. */ if (!yaml_parser_scan_directive(parser, &token)) return 0; /* Append the token to the queue. */ if (!ENQUEUE(parser, parser->tokens, token)) { yaml_token_delete(&token); return 0; } return 1; } /* * Produce the DOCUMENT-START or DOCUMENT-END token. */ static int yaml_parser_fetch_document_indicator(yaml_parser_t *parser, yaml_token_type_t type) { yaml_mark_t start_mark, end_mark; yaml_token_t token; /* Reset the indentation level. */ if (!yaml_parser_unroll_indent(parser, -1)) return 0; /* Reset simple keys. */ if (!yaml_parser_remove_simple_key(parser)) return 0; parser->simple_key_allowed = 0; /* Consume the token. */ start_mark = parser->mark; SKIP(parser); SKIP(parser); SKIP(parser); end_mark = parser->mark; /* Create the DOCUMENT-START or DOCUMENT-END token. */ TOKEN_INIT(token, type, start_mark, end_mark); /* Append the token to the queue. */ if (!ENQUEUE(parser, parser->tokens, token)) return 0; return 1; } /* * Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. */ static int yaml_parser_fetch_flow_collection_start(yaml_parser_t *parser, yaml_token_type_t type) { yaml_mark_t start_mark, end_mark; yaml_token_t token; /* The indicators '[' and '{' may start a simple key. */ if (!yaml_parser_save_simple_key(parser)) return 0; /* Increase the flow level. */ if (!yaml_parser_increase_flow_level(parser)) return 0; /* A simple key may follow the indicators '[' and '{'. */ parser->simple_key_allowed = 1; /* Consume the token. */ start_mark = parser->mark; SKIP(parser); end_mark = parser->mark; /* Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. */ TOKEN_INIT(token, type, start_mark, end_mark); /* Append the token to the queue. */ if (!ENQUEUE(parser, parser->tokens, token)) return 0; return 1; } /* * Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. */ static int yaml_parser_fetch_flow_collection_end(yaml_parser_t *parser, yaml_token_type_t type) { yaml_mark_t start_mark, end_mark; yaml_token_t token; /* Reset any potential simple key on the current flow level. */ if (!yaml_parser_remove_simple_key(parser)) return 0; /* Decrease the flow level. */ if (!yaml_parser_decrease_flow_level(parser)) return 0; /* No simple keys after the indicators ']' and '}'. */ parser->simple_key_allowed = 0; /* Consume the token. */ start_mark = parser->mark; SKIP(parser); end_mark = parser->mark; /* Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. */ TOKEN_INIT(token, type, start_mark, end_mark); /* Append the token to the queue. */ if (!ENQUEUE(parser, parser->tokens, token)) return 0; return 1; } /* * Produce the FLOW-ENTRY token. */ static int yaml_parser_fetch_flow_entry(yaml_parser_t *parser) { yaml_mark_t start_mark, end_mark; yaml_token_t token; /* Reset any potential simple keys on the current flow level. */ if (!yaml_parser_remove_simple_key(parser)) return 0; /* Simple keys are allowed after ','. */ parser->simple_key_allowed = 1; /* Consume the token. */ start_mark = parser->mark; SKIP(parser); end_mark = parser->mark; /* Create the FLOW-ENTRY token and append it to the queue. */ TOKEN_INIT(token, YAML_FLOW_ENTRY_TOKEN, start_mark, end_mark); if (!ENQUEUE(parser, parser->tokens, token)) return 0; return 1; } /* * Produce the BLOCK-ENTRY token. */ static int yaml_parser_fetch_block_entry(yaml_parser_t *parser) { yaml_mark_t start_mark, end_mark; yaml_token_t token; /* Check if the scanner is in the block context. */ if (!parser->flow_level) { /* Check if we are allowed to start a new entry. */ if (!parser->simple_key_allowed) { return yaml_parser_set_scanner_error(parser, NULL, parser->mark, "block sequence entries are not allowed in this context"); } /* Add the BLOCK-SEQUENCE-START token if needed. */ if (!yaml_parser_roll_indent(parser, parser->mark.column, -1, YAML_BLOCK_SEQUENCE_START_TOKEN, parser->mark)) return 0; } else { /* * It is an error for the '-' indicator to occur in the flow context, * but we let the Parser detect and report about it because the Parser * is able to point to the context. */ } /* Reset any potential simple keys on the current flow level. */ if (!yaml_parser_remove_simple_key(parser)) return 0; /* Simple keys are allowed after '-'. */ parser->simple_key_allowed = 1; /* Consume the token. */ start_mark = parser->mark; SKIP(parser); end_mark = parser->mark; /* Create the BLOCK-ENTRY token and append it to the queue. */ TOKEN_INIT(token, YAML_BLOCK_ENTRY_TOKEN, start_mark, end_mark); if (!ENQUEUE(parser, parser->tokens, token)) return 0; return 1; } /* * Produce the KEY token. */ static int yaml_parser_fetch_key(yaml_parser_t *parser) { yaml_mark_t start_mark, end_mark; yaml_token_t token; /* In the block context, additional checks are required. */ if (!parser->flow_level) { /* Check if we are allowed to start a new key (not nessesary simple). */ if (!parser->simple_key_allowed) { return yaml_parser_set_scanner_error(parser, NULL, parser->mark, "mapping keys are not allowed in this context"); } /* Add the BLOCK-MAPPING-START token if needed. */ if (!yaml_parser_roll_indent(parser, parser->mark.column, -1, YAML_BLOCK_MAPPING_START_TOKEN, parser->mark)) return 0; } /* Reset any potential simple keys on the current flow level. */ if (!yaml_parser_remove_simple_key(parser)) return 0; /* Simple keys are allowed after '?' in the block context. */ parser->simple_key_allowed = (!parser->flow_level); /* Consume the token. */ start_mark = parser->mark; SKIP(parser); end_mark = parser->mark; /* Create the KEY token and append it to the queue. */ TOKEN_INIT(token, YAML_KEY_TOKEN, start_mark, end_mark); if (!ENQUEUE(parser, parser->tokens, token)) return 0; return 1; } /* * Produce the VALUE token. */ static int yaml_parser_fetch_value(yaml_parser_t *parser) { yaml_mark_t start_mark, end_mark; yaml_token_t token; yaml_simple_key_t *simple_key = parser->simple_keys.top-1; /* Have we found a simple key? */ if (simple_key->possible) { /* Create the KEY token and insert it into the queue. */ TOKEN_INIT(token, YAML_KEY_TOKEN, simple_key->mark, simple_key->mark); if (!QUEUE_INSERT(parser, parser->tokens, simple_key->token_number - parser->tokens_parsed, token)) return 0; /* In the block context, we may need to add the BLOCK-MAPPING-START token. */ if (!yaml_parser_roll_indent(parser, simple_key->mark.column, simple_key->token_number, YAML_BLOCK_MAPPING_START_TOKEN, simple_key->mark)) return 0; /* Remove the simple key. */ simple_key->possible = 0; /* A simple key cannot follow another simple key. */ parser->simple_key_allowed = 0; } else { /* The ':' indicator follows a complex key. */ /* In the block context, extra checks are required. */ if (!parser->flow_level) { /* Check if we are allowed to start a complex value. */ if (!parser->simple_key_allowed) { return yaml_parser_set_scanner_error(parser, NULL, parser->mark, "mapping values are not allowed in this context"); } /* Add the BLOCK-MAPPING-START token if needed. */ if (!yaml_parser_roll_indent(parser, parser->mark.column, -1, YAML_BLOCK_MAPPING_START_TOKEN, parser->mark)) return 0; } /* Simple keys after ':' are allowed in the block context. */ parser->simple_key_allowed = (!parser->flow_level); } /* Consume the token. */ start_mark = parser->mark; SKIP(parser); end_mark = parser->mark; /* Create the VALUE token and append it to the queue. */ TOKEN_INIT(token, YAML_VALUE_TOKEN, start_mark, end_mark); if (!ENQUEUE(parser, parser->tokens, token)) return 0; return 1; } /* * Produce the ALIAS or ANCHOR token. */ static int yaml_parser_fetch_anchor(yaml_parser_t *parser, yaml_token_type_t type) { yaml_token_t token; /* An anchor or an alias could be a simple key. */ if (!yaml_parser_save_simple_key(parser)) return 0; /* A simple key cannot follow an anchor or an alias. */ parser->simple_key_allowed = 0; /* Create the ALIAS or ANCHOR token and append it to the queue. */ if (!yaml_parser_scan_anchor(parser, &token, type)) return 0; if (!ENQUEUE(parser, parser->tokens, token)) { yaml_token_delete(&token); return 0; } return 1; } /* * Produce the TAG token. */ static int yaml_parser_fetch_tag(yaml_parser_t *parser) { yaml_token_t token; /* A tag could be a simple key. */ if (!yaml_parser_save_simple_key(parser)) return 0; /* A simple key cannot follow a tag. */ parser->simple_key_allowed = 0; /* Create the TAG token and append it to the queue. */ if (!yaml_parser_scan_tag(parser, &token)) return 0; if (!ENQUEUE(parser, parser->tokens, token)) { yaml_token_delete(&token); return 0; } return 1; } /* * Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. */ static int yaml_parser_fetch_block_scalar(yaml_parser_t *parser, int literal) { yaml_token_t token; /* Remove any potential simple keys. */ if (!yaml_parser_remove_simple_key(parser)) return 0; /* A simple key may follow a block scalar. */ parser->simple_key_allowed = 1; /* Create the SCALAR token and append it to the queue. */ if (!yaml_parser_scan_block_scalar(parser, &token, literal)) return 0; if (!ENQUEUE(parser, parser->tokens, token)) { yaml_token_delete(&token); return 0; } return 1; } /* * Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. */ static int yaml_parser_fetch_flow_scalar(yaml_parser_t *parser, int single) { yaml_token_t token; /* A plain scalar could be a simple key. */ if (!yaml_parser_save_simple_key(parser)) return 0; /* A simple key cannot follow a flow scalar. */ parser->simple_key_allowed = 0; /* Create the SCALAR token and append it to the queue. */ if (!yaml_parser_scan_flow_scalar(parser, &token, single)) return 0; if (!ENQUEUE(parser, parser->tokens, token)) { yaml_token_delete(&token); return 0; } return 1; } /* * Produce the SCALAR(...,plain) token. */ static int yaml_parser_fetch_plain_scalar(yaml_parser_t *parser) { yaml_token_t token; /* A plain scalar could be a simple key. */ if (!yaml_parser_save_simple_key(parser)) return 0; /* A simple key cannot follow a flow scalar. */ parser->simple_key_allowed = 0; /* Create the SCALAR token and append it to the queue. */ if (!yaml_parser_scan_plain_scalar(parser, &token)) return 0; if (!ENQUEUE(parser, parser->tokens, token)) { yaml_token_delete(&token); return 0; } return 1; } /* * Eat whitespaces and comments until the next token is found. */ static int yaml_parser_scan_to_next_token(yaml_parser_t *parser) { /* Until the next token is not found. */ while (1) { /* Allow the BOM mark to start a line. */ if (!CACHE(parser, 1)) return 0; if (parser->mark.column == 0 && IS_BOM(parser->buffer)) SKIP(parser); /* * Eat whitespaces. * * Tabs are allowed: * * - in the flow context; * - in the block context, but not at the beginning of the line or * after '-', '?', or ':' (complex value). */ if (!CACHE(parser, 1)) return 0; while (CHECK(parser->buffer,' ') || ((parser->flow_level || !parser->simple_key_allowed) && CHECK(parser->buffer, '\t'))) { SKIP(parser); if (!CACHE(parser, 1)) return 0; } /* Eat a comment until a line break. */ if (CHECK(parser->buffer, '#')) { while (!IS_BREAKZ(parser->buffer)) { SKIP(parser); if (!CACHE(parser, 1)) return 0; } } /* If it is a line break, eat it. */ if (IS_BREAK(parser->buffer)) { if (!CACHE(parser, 2)) return 0; SKIP_LINE(parser); /* In the block context, a new line may start a simple key. */ if (!parser->flow_level) { parser->simple_key_allowed = 1; } } else { /* We have found a token. */ break; } } return 1; } /* * Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. * * Scope: * %YAML 1.1 # a comment \n * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * %TAG !yaml! tag:yaml.org,2002: \n * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */ int yaml_parser_scan_directive(yaml_parser_t *parser, yaml_token_t *token) { yaml_mark_t start_mark, end_mark; yaml_char_t *name = NULL; int major, minor; yaml_char_t *handle = NULL, *prefix = NULL; /* Eat '%'. */ start_mark = parser->mark; SKIP(parser); /* Scan the directive name. */ if (!yaml_parser_scan_directive_name(parser, start_mark, &name)) goto error; /* Is it a YAML directive? */ if (strcmp((char *)name, "YAML") == 0) { /* Scan the VERSION directive value. */ if (!yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor)) goto error; end_mark = parser->mark; /* Create a VERSION-DIRECTIVE token. */ VERSION_DIRECTIVE_TOKEN_INIT(*token, major, minor, start_mark, end_mark); } /* Is it a TAG directive? */ else if (strcmp((char *)name, "TAG") == 0) { /* Scan the TAG directive value. */ if (!yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix)) goto error; end_mark = parser->mark; /* Create a TAG-DIRECTIVE token. */ TAG_DIRECTIVE_TOKEN_INIT(*token, handle, prefix, start_mark, end_mark); } /* Unknown directive. */ else { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "found uknown directive name"); goto error; } /* Eat the rest of the line including any comments. */ if (!CACHE(parser, 1)) goto error; while (IS_BLANK(parser->buffer)) { SKIP(parser); if (!CACHE(parser, 1)) goto error; } if (CHECK(parser->buffer, '#')) { while (!IS_BREAKZ(parser->buffer)) { SKIP(parser); if (!CACHE(parser, 1)) goto error; } } /* Check if we are at the end of the line. */ if (!IS_BREAKZ(parser->buffer)) { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "did not find expected comment or line break"); goto error; } /* Eat a line break. */ if (IS_BREAK(parser->buffer)) { if (!CACHE(parser, 2)) goto error; SKIP_LINE(parser); } yaml_free(name); return 1; error: yaml_free(prefix); yaml_free(handle); yaml_free(name); return 0; } /* * Scan the directive name. * * Scope: * %YAML 1.1 # a comment \n * ^^^^ * %TAG !yaml! tag:yaml.org,2002: \n * ^^^ */ static int yaml_parser_scan_directive_name(yaml_parser_t *parser, yaml_mark_t start_mark, yaml_char_t **name) { yaml_string_t string = NULL_STRING; if (!STRING_INIT(parser, string, INITIAL_STRING_SIZE)) goto error; /* Consume the directive name. */ if (!CACHE(parser, 1)) goto error; while (IS_ALPHA(parser->buffer)) { if (!READ(parser, string)) goto error; if (!CACHE(parser, 1)) goto error; } /* Check if the name is empty. */ if (string.start == string.pointer) { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "could not find expected directive name"); goto error; } /* Check for an blank character after the name. */ if (!IS_BLANKZ(parser->buffer)) { yaml_parser_set_scanner_error(parser, "while scanning a directive", start_mark, "found unexpected non-alphabetical character"); goto error; } *name = string.start; return 1; error: STRING_DEL(parser, string); return 0; } /* * Scan the value of VERSION-DIRECTIVE. * * Scope: * %YAML 1.1 # a comment \n * ^^^^^^ */ static int yaml_parser_scan_version_directive_value(yaml_parser_t *parser, yaml_mark_t start_mark, int *major, int *minor) { /* Eat whitespaces. */ if (!CACHE(parser, 1)) return 0; while (IS_BLANK(parser->buffer)) { SKIP(parser); if (!CACHE(parser, 1)) return 0; } /* Consume the major version number. */ if (!yaml_parser_scan_version_directive_number(parser, start_mark, major)) return 0; /* Eat '.'. */ if (!CHECK(parser->buffer, '.')) { return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", start_mark, "did not find expected digit or '.' character"); } SKIP(parser); /* Consume the minor version number. */ if (!yaml_parser_scan_version_directive_number(parser, start_mark, minor)) return 0; return 1; } #define MAX_NUMBER_LENGTH 9 /* * Scan the version number of VERSION-DIRECTIVE. * * Scope: * %YAML 1.1 # a comment \n * ^ * %YAML 1.1 # a comment \n * ^ */ static int yaml_parser_scan_version_directive_number(yaml_parser_t *parser, yaml_mark_t start_mark, int *number) { int value = 0; size_t length = 0; /* Repeat while the next character is digit. */ if (!CACHE(parser, 1)) return 0; while (IS_DIGIT(parser->buffer)) { /* Check if the number is too long. */ if (++length > MAX_NUMBER_LENGTH) { return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", start_mark, "found extremely long version number"); } value = value*10 + AS_DIGIT(parser->buffer); SKIP(parser); if (!CACHE(parser, 1)) return 0; } /* Check if the number was present. */ if (!length) { return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", start_mark, "did not find expected version number"); } *number = value; return 1; } /* * Scan the value of a TAG-DIRECTIVE token. * * Scope: * %TAG !yaml! tag:yaml.org,2002: \n * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */ static int yaml_parser_scan_tag_directive_value(yaml_parser_t *parser, yaml_mark_t start_mark, yaml_char_t **handle, yaml_char_t **prefix) { yaml_char_t *handle_value = NULL; yaml_char_t *prefix_value = NULL; /* Eat whitespaces. */ if (!CACHE(parser, 1)) goto error; while (IS_BLANK(parser->buffer)) { SKIP(parser); if (!CACHE(parser, 1)) goto error; } /* Scan a handle. */ if (!yaml_parser_scan_tag_handle(parser, 1, start_mark, &handle_value)) goto error; /* Expect a whitespace. */ if (!CACHE(parser, 1)) goto error; if (!IS_BLANK(parser->buffer)) { yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", start_mark, "did not find expected whitespace"); goto error; } /* Eat whitespaces. */ while (IS_BLANK(parser->buffer)) { SKIP(parser); if (!CACHE(parser, 1)) goto error; } /* Scan a prefix. */ if (!yaml_parser_scan_tag_uri(parser, 1, NULL, start_mark, &prefix_value)) goto error; /* Expect a whitespace or line break. */ if (!CACHE(parser, 1)) goto error; if (!IS_BLANKZ(parser->buffer)) { yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", start_mark, "did not find expected whitespace or line break"); goto error; } *handle = handle_value; *prefix = prefix_value; return 1; error: yaml_free(handle_value); yaml_free(prefix_value); return 0; } static int yaml_parser_scan_anchor(yaml_parser_t *parser, yaml_token_t *token, yaml_token_type_t type) { int length = 0; yaml_mark_t start_mark, end_mark; yaml_string_t string = NULL_STRING; if (!STRING_INIT(parser, string, INITIAL_STRING_SIZE)) goto error; /* Eat the indicator character. */ start_mark = parser->mark; SKIP(parser); /* Consume the value. */ if (!CACHE(parser, 1)) goto error; while (IS_ALPHA(parser->buffer)) { if (!READ(parser, string)) goto error; if (!CACHE(parser, 1)) goto error; length ++; } end_mark = parser->mark; /* * Check if length of the anchor is greater than 0 and it is followed by * a whitespace character or one of the indicators: * * '?', ':', ',', ']', '}', '%', '@', '`'. */ if (!length || !(IS_BLANKZ(parser->buffer) || CHECK(parser->buffer, '?') || CHECK(parser->buffer, ':') || CHECK(parser->buffer, ',') || CHECK(parser->buffer, ']') || CHECK(parser->buffer, '}') || CHECK(parser->buffer, '%') || CHECK(parser->buffer, '@') || CHECK(parser->buffer, '`'))) { yaml_parser_set_scanner_error(parser, type == YAML_ANCHOR_TOKEN ? "while scanning an anchor" : "while scanning an alias", start_mark, "did not find expected alphabetic or numeric character"); goto error; } /* Create a token. */ if (type == YAML_ANCHOR_TOKEN) { ANCHOR_TOKEN_INIT(*token, string.start, start_mark, end_mark); } else { ALIAS_TOKEN_INIT(*token, string.start, start_mark, end_mark); } return 1; error: STRING_DEL(parser, string); return 0; } /* * Scan a TAG token. */ static int yaml_parser_scan_tag(yaml_parser_t *parser, yaml_token_t *token) { yaml_char_t *handle = NULL; yaml_char_t *suffix = NULL; yaml_mark_t start_mark, end_mark; start_mark = parser->mark; /* Check if the tag is in the canonical form. */ if (!CACHE(parser, 2)) goto error; if (CHECK_AT(parser->buffer, '<', 1)) { /* Set the handle to '' */ handle = yaml_malloc(1); if (!handle) goto error; handle[0] = '\0'; /* Eat '!<' */ SKIP(parser); SKIP(parser); /* Consume the tag value. */ if (!yaml_parser_scan_tag_uri(parser, 0, NULL, start_mark, &suffix)) goto error; /* Check for '>' and eat it. */ if (!CHECK(parser->buffer, '>')) { yaml_parser_set_scanner_error(parser, "while scanning a tag", start_mark, "did not find the expected '>'"); goto error; } SKIP(parser); } else { /* The tag has either the '!suffix' or the '!handle!suffix' form. */ /* First, try to scan a handle. */ if (!yaml_parser_scan_tag_handle(parser, 0, start_mark, &handle)) goto error; /* Check if it is, indeed, handle. */ if (handle[0] == '!' && handle[1] != '\0' && handle[strlen((char *)handle)-1] == '!') { /* Scan the suffix now. */ if (!yaml_parser_scan_tag_uri(parser, 0, NULL, start_mark, &suffix)) goto error; } else { /* It wasn't a handle after all. Scan the rest of the tag. */ if (!yaml_parser_scan_tag_uri(parser, 0, handle, start_mark, &suffix)) goto error; /* Set the handle to '!'. */ yaml_free(handle); handle = yaml_malloc(2); if (!handle) goto error; handle[0] = '!'; handle[1] = '\0'; /* * A special case: the '!' tag. Set the handle to '' and the * suffix to '!'. */ if (suffix[0] == '\0') { yaml_char_t *tmp = handle; handle = suffix; suffix = tmp; } } } /* Check the character which ends the tag. */ if (!CACHE(parser, 1)) goto error; if (!IS_BLANKZ(parser->buffer)) { yaml_parser_set_scanner_error(parser, "while scanning a tag", start_mark, "did not find expected whitespace or line break"); goto error; } end_mark = parser->mark; /* Create a token. */ TAG_TOKEN_INIT(*token, handle, suffix, start_mark, end_mark); return 1; error: yaml_free(handle); yaml_free(suffix); return 0; } /* * Scan a tag handle. */ static int yaml_parser_scan_tag_handle(yaml_parser_t *parser, int directive, yaml_mark_t start_mark, yaml_char_t **handle) { yaml_string_t string = NULL_STRING; if (!STRING_INIT(parser, string, INITIAL_STRING_SIZE)) goto error; /* Check the initial '!' character. */ if (!CACHE(parser, 1)) goto error; if (!CHECK(parser->buffer, '!')) { yaml_parser_set_scanner_error(parser, directive ? "while scanning a tag directive" : "while scanning a tag", start_mark, "did not find expected '!'"); goto error; } /* Copy the '!' character. */ if (!READ(parser, string)) goto error; /* Copy all subsequent alphabetical and numerical characters. */ if (!CACHE(parser, 1)) goto error; while (IS_ALPHA(parser->buffer)) { if (!READ(parser, string)) goto error; if (!CACHE(parser, 1)) goto error; } /* Check if the trailing character is '!' and copy it. */ if (CHECK(parser->buffer, '!')) { if (!READ(parser, string)) goto error; } else { /* * It's either the '!' tag or not really a tag handle. If it's a %TAG * directive, it's an error. If it's a tag token, it must be a part of * URI. */ if (directive && !(string.start[0] == '!' && string.start[1] == '\0')) { yaml_parser_set_scanner_error(parser, "while parsing a tag directive", start_mark, "did not find expected '!'"); goto error; } } *handle = string.start; return 1; error: STRING_DEL(parser, string); return 0; } /* * Scan a tag. */ static int yaml_parser_scan_tag_uri(yaml_parser_t *parser, int directive, yaml_char_t *head, yaml_mark_t start_mark, yaml_char_t **uri) { size_t length = head ? strlen((char *)head) : 0; yaml_string_t string = NULL_STRING; if (!STRING_INIT(parser, string, INITIAL_STRING_SIZE)) goto error; /* Resize the string to include the head. */ while ((size_t)(string.end - string.start) <= length) { if (!yaml_string_extend(&string.start, &string.pointer, &string.end)) { parser->error = YAML_MEMORY_ERROR; goto error; } } /* * Copy the head if needed. * * Note that we don't copy the leading '!' character. */ if (length > 1) { memcpy(string.start, head+1, length-1); string.pointer += length-1; } /* Scan the tag. */ if (!CACHE(parser, 1)) goto error; /* * The set of characters that may appear in URI is as follows: * * '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', * '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', * '%'. */ while (IS_ALPHA(parser->buffer) || CHECK(parser->buffer, ';') || CHECK(parser->buffer, '/') || CHECK(parser->buffer, '?') || CHECK(parser->buffer, ':') || CHECK(parser->buffer, '@') || CHECK(parser->buffer, '&') || CHECK(parser->buffer, '=') || CHECK(parser->buffer, '+') || CHECK(parser->buffer, '$') || CHECK(parser->buffer, ',') || CHECK(parser->buffer, '.') || CHECK(parser->buffer, '!') || CHECK(parser->buffer, '~') || CHECK(parser->buffer, '*') || CHECK(parser->buffer, '\'') || CHECK(parser->buffer, '(') || CHECK(parser->buffer, ')') || CHECK(parser->buffer, '[') || CHECK(parser->buffer, ']') || CHECK(parser->buffer, '%')) { /* Check if it is a URI-escape sequence. */ if (CHECK(parser->buffer, '%')) { if (!STRING_EXTEND(parser, string)) goto error; if (!yaml_parser_scan_uri_escapes(parser, directive, start_mark, &string)) goto error; } else { if (!READ(parser, string)) goto error; } length ++; if (!CACHE(parser, 1)) goto error; } /* Check if the tag is non-empty. */ if (!length) { if (!STRING_EXTEND(parser, string)) goto error; yaml_parser_set_scanner_error(parser, directive ? "while parsing a %TAG directive" : "while parsing a tag", start_mark, "did not find expected tag URI"); goto error; } *uri = string.start; return 1; error: STRING_DEL(parser, string); return 0; } /* * Decode an URI-escape sequence corresponding to a single UTF-8 character. */ static int yaml_parser_scan_uri_escapes(yaml_parser_t *parser, int directive, yaml_mark_t start_mark, yaml_string_t *string) { int width = 0; /* Decode the required number of characters. */ do { unsigned char octet = 0; /* Check for a URI-escaped octet. */ if (!CACHE(parser, 3)) return 0; if (!(CHECK(parser->buffer, '%') && IS_HEX_AT(parser->buffer, 1) && IS_HEX_AT(parser->buffer, 2))) { return yaml_parser_set_scanner_error(parser, directive ? "while parsing a %TAG directive" : "while parsing a tag", start_mark, "did not find URI escaped octet"); } /* Get the octet. */ octet = (AS_HEX_AT(parser->buffer, 1) << 4) + AS_HEX_AT(parser->buffer, 2); /* If it is the leading octet, determine the length of the UTF-8 sequence. */ if (!width) { width = (octet & 0x80) == 0x00 ? 1 : (octet & 0xE0) == 0xC0 ? 2 : (octet & 0xF0) == 0xE0 ? 3 : (octet & 0xF8) == 0xF0 ? 4 : 0; if (!width) { return yaml_parser_set_scanner_error(parser, directive ? "while parsing a %TAG directive" : "while parsing a tag", start_mark, "found an incorrect leading UTF-8 octet"); } } else { /* Check if the trailing octet is correct. */ if ((octet & 0xC0) != 0x80) { return yaml_parser_set_scanner_error(parser, directive ? "while parsing a %TAG directive" : "while parsing a tag", start_mark, "found an incorrect trailing UTF-8 octet"); } } /* Copy the octet and move the pointers. */ *(string->pointer++) = octet; SKIP(parser); SKIP(parser); SKIP(parser); } while (--width); return 1; } /* * Scan a block scalar. */ static int yaml_parser_scan_block_scalar(yaml_parser_t *parser, yaml_token_t *token, int literal) { yaml_mark_t start_mark; yaml_mark_t end_mark; yaml_string_t string = NULL_STRING; yaml_string_t leading_break = NULL_STRING; yaml_string_t trailing_breaks = NULL_STRING; int chomping = 0; int increment = 0; int indent = 0; int leading_blank = 0; int trailing_blank = 0; if (!STRING_INIT(parser, string, INITIAL_STRING_SIZE)) goto error; if (!STRING_INIT(parser, leading_break, INITIAL_STRING_SIZE)) goto error; if (!STRING_INIT(parser, trailing_breaks, INITIAL_STRING_SIZE)) goto error; /* Eat the indicator '|' or '>'. */ start_mark = parser->mark; SKIP(parser); /* Scan the additional block scalar indicators. */ if (!CACHE(parser, 1)) goto error; /* Check for a chomping indicator. */ if (CHECK(parser->buffer, '+') || CHECK(parser->buffer, '-')) { /* Set the chomping method and eat the indicator. */ chomping = CHECK(parser->buffer, '+') ? +1 : -1; SKIP(parser); /* Check for an indentation indicator. */ if (!CACHE(parser, 1)) goto error; if (IS_DIGIT(parser->buffer)) { /* Check that the intendation is greater than 0. */ if (CHECK(parser->buffer, '0')) { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "found an intendation indicator equal to 0"); goto error; } /* Get the intendation level and eat the indicator. */ increment = AS_DIGIT(parser->buffer); SKIP(parser); } } /* Do the same as above, but in the opposite order. */ else if (IS_DIGIT(parser->buffer)) { if (CHECK(parser->buffer, '0')) { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "found an intendation indicator equal to 0"); goto error; } increment = AS_DIGIT(parser->buffer); SKIP(parser); if (!CACHE(parser, 1)) goto error; if (CHECK(parser->buffer, '+') || CHECK(parser->buffer, '-')) { chomping = CHECK(parser->buffer, '+') ? +1 : -1; SKIP(parser); } } /* Eat whitespaces and comments to the end of the line. */ if (!CACHE(parser, 1)) goto error; while (IS_BLANK(parser->buffer)) { SKIP(parser); if (!CACHE(parser, 1)) goto error; } if (CHECK(parser->buffer, '#')) { while (!IS_BREAKZ(parser->buffer)) { SKIP(parser); if (!CACHE(parser, 1)) goto error; } } /* Check if we are at the end of the line. */ if (!IS_BREAKZ(parser->buffer)) { yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "did not find expected comment or line break"); goto error; } /* Eat a line break. */ if (IS_BREAK(parser->buffer)) { if (!CACHE(parser, 2)) goto error; SKIP_LINE(parser); } end_mark = parser->mark; /* Set the intendation level if it was specified. */ if (increment) { indent = parser->indent >= 0 ? parser->indent+increment : increment; } /* Scan the leading line breaks and determine the indentation level if needed. */ if (!yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark)) goto error; /* Scan the block scalar content. */ if (!CACHE(parser, 1)) goto error; while ((int)parser->mark.column == indent && !IS_Z(parser->buffer)) { /* * We are at the beginning of a non-empty line. */ /* Is it a trailing whitespace? */ trailing_blank = IS_BLANK(parser->buffer); /* Check if we need to fold the leading line break. */ if (!literal && (*leading_break.start == '\n') && !leading_blank && !trailing_blank) { /* Do we need to join the lines by space? */ if (*trailing_breaks.start == '\0') { if (!STRING_EXTEND(parser, string)) goto error; *(string.pointer ++) = ' '; } CLEAR(parser, leading_break); } else { if (!JOIN(parser, string, leading_break)) goto error; CLEAR(parser, leading_break); } /* Append the remaining line breaks. */ if (!JOIN(parser, string, trailing_breaks)) goto error; CLEAR(parser, trailing_breaks); /* Is it a leading whitespace? */ leading_blank = IS_BLANK(parser->buffer); /* Consume the current line. */ while (!IS_BREAKZ(parser->buffer)) { if (!READ(parser, string)) goto error; if (!CACHE(parser, 1)) goto error; } /* Consume the line break. */ if (!CACHE(parser, 2)) goto error; if (!READ_LINE(parser, leading_break)) goto error; /* Eat the following intendation spaces and line breaks. */ if (!yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark)) goto error; } /* Chomp the tail. */ if (chomping != -1) { if (!JOIN(parser, string, leading_break)) goto error; } if (chomping == 1) { if (!JOIN(parser, string, trailing_breaks)) goto error; } /* Create a token. */ SCALAR_TOKEN_INIT(*token, string.start, string.pointer-string.start, literal ? YAML_LITERAL_SCALAR_STYLE : YAML_FOLDED_SCALAR_STYLE, start_mark, end_mark); STRING_DEL(parser, leading_break); STRING_DEL(parser, trailing_breaks); return 1; error: STRING_DEL(parser, string); STRING_DEL(parser, leading_break); STRING_DEL(parser, trailing_breaks); return 0; } /* * Scan intendation spaces and line breaks for a block scalar. Determine the * intendation level if needed. */ static int yaml_parser_scan_block_scalar_breaks(yaml_parser_t *parser, int *indent, yaml_string_t *breaks, yaml_mark_t start_mark, yaml_mark_t *end_mark) { int max_indent = 0; *end_mark = parser->mark; /* Eat the intendation spaces and line breaks. */ while (1) { /* Eat the intendation spaces. */ if (!CACHE(parser, 1)) return 0; while ((!*indent || (int)parser->mark.column < *indent) && IS_SPACE(parser->buffer)) { SKIP(parser); if (!CACHE(parser, 1)) return 0; } if ((int)parser->mark.column > max_indent) max_indent = (int)parser->mark.column; /* Check for a tab character messing the intendation. */ if ((!*indent || (int)parser->mark.column < *indent) && IS_TAB(parser->buffer)) { return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", start_mark, "found a tab character where an intendation space is expected"); } /* Have we found a non-empty line? */ if (!IS_BREAK(parser->buffer)) break; /* Consume the line break. */ if (!CACHE(parser, 2)) return 0; if (!READ_LINE(parser, *breaks)) return 0; *end_mark = parser->mark; } /* Determine the indentation level if needed. */ if (!*indent) { *indent = max_indent; if (*indent < parser->indent + 1) *indent = parser->indent + 1; if (*indent < 1) *indent = 1; } return 1; } /* * Scan a quoted scalar. */ static int yaml_parser_scan_flow_scalar(yaml_parser_t *parser, yaml_token_t *token, int single) { yaml_mark_t start_mark; yaml_mark_t end_mark; yaml_string_t string = NULL_STRING; yaml_string_t leading_break = NULL_STRING; yaml_string_t trailing_breaks = NULL_STRING; yaml_string_t whitespaces = NULL_STRING; int leading_blanks; if (!STRING_INIT(parser, string, INITIAL_STRING_SIZE)) goto error; if (!STRING_INIT(parser, leading_break, INITIAL_STRING_SIZE)) goto error; if (!STRING_INIT(parser, trailing_breaks, INITIAL_STRING_SIZE)) goto error; if (!STRING_INIT(parser, whitespaces, INITIAL_STRING_SIZE)) goto error; /* Eat the left quote. */ start_mark = parser->mark; SKIP(parser); /* Consume the content of the quoted scalar. */ while (1) { /* Check that there are no document indicators at the beginning of the line. */ if (!CACHE(parser, 4)) goto error; if (parser->mark.column == 0 && ((CHECK_AT(parser->buffer, '-', 0) && CHECK_AT(parser->buffer, '-', 1) && CHECK_AT(parser->buffer, '-', 2)) || (CHECK_AT(parser->buffer, '.', 0) && CHECK_AT(parser->buffer, '.', 1) && CHECK_AT(parser->buffer, '.', 2))) && IS_BLANKZ_AT(parser->buffer, 3)) { yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", start_mark, "found unexpected document indicator"); goto error; } /* Check for EOF. */ if (IS_Z(parser->buffer)) { yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", start_mark, "found unexpected end of stream"); goto error; } /* Consume non-blank characters. */ if (!CACHE(parser, 2)) goto error; leading_blanks = 0; while (!IS_BLANKZ(parser->buffer)) { /* Check for an escaped single quote. */ if (single && CHECK_AT(parser->buffer, '\'', 0) && CHECK_AT(parser->buffer, '\'', 1)) { if (!STRING_EXTEND(parser, string)) goto error; *(string.pointer++) = '\''; SKIP(parser); SKIP(parser); } /* Check for the right quote. */ else if (CHECK(parser->buffer, single ? '\'' : '"')) { break; } /* Check for an escaped line break. */ else if (!single && CHECK(parser->buffer, '\\') && IS_BREAK_AT(parser->buffer, 1)) { if (!CACHE(parser, 3)) goto error; SKIP(parser); SKIP_LINE(parser); leading_blanks = 1; break; } /* Check for an escape sequence. */ else if (!single && CHECK(parser->buffer, '\\')) { size_t code_length = 0; if (!STRING_EXTEND(parser, string)) goto error; /* Check the escape character. */ switch (parser->buffer.pointer[1]) { case '0': *(string.pointer++) = '\0'; break; case 'a': *(string.pointer++) = '\x07'; break; case 'b': *(string.pointer++) = '\x08'; break; case 't': case '\t': *(string.pointer++) = '\x09'; break; case 'n': *(string.pointer++) = '\x0A'; break; case 'v': *(string.pointer++) = '\x0B'; break; case 'f': *(string.pointer++) = '\x0C'; break; case 'r': *(string.pointer++) = '\x0D'; break; case 'e': *(string.pointer++) = '\x1B'; break; case ' ': *(string.pointer++) = '\x20'; break; case '"': *(string.pointer++) = '"'; break; case '\'': *(string.pointer++) = '\''; break; case '\\': *(string.pointer++) = '\\'; break; case 'N': /* NEL (#x85) */ *(string.pointer++) = '\xC2'; *(string.pointer++) = '\x85'; break; case '_': /* #xA0 */ *(string.pointer++) = '\xC2'; *(string.pointer++) = '\xA0'; break; case 'L': /* LS (#x2028) */ *(string.pointer++) = '\xE2'; *(string.pointer++) = '\x80'; *(string.pointer++) = '\xA8'; break; case 'P': /* PS (#x2029) */ *(string.pointer++) = '\xE2'; *(string.pointer++) = '\x80'; *(string.pointer++) = '\xA9'; break; case 'x': code_length = 2; break; case 'u': code_length = 4; break; case 'U': code_length = 8; break; default: yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", start_mark, "found unknown escape character"); goto error; } SKIP(parser); SKIP(parser); /* Consume an arbitrary escape code. */ if (code_length) { unsigned int value = 0; size_t k; /* Scan the character value. */ if (!CACHE(parser, code_length)) goto error; for (k = 0; k < code_length; k ++) { if (!IS_HEX_AT(parser->buffer, k)) { yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", start_mark, "did not find expected hexdecimal number"); goto error; } value = (value << 4) + AS_HEX_AT(parser->buffer, k); } /* Check the value and write the character. */ if ((value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF) { yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", start_mark, "found invalid Unicode character escape code"); goto error; } if (value <= 0x7F) { *(string.pointer++) = value; } else if (value <= 0x7FF) { *(string.pointer++) = 0xC0 + (value >> 6); *(string.pointer++) = 0x80 + (value & 0x3F); } else if (value <= 0xFFFF) { *(string.pointer++) = 0xE0 + (value >> 12); *(string.pointer++) = 0x80 + ((value >> 6) & 0x3F); *(string.pointer++) = 0x80 + (value & 0x3F); } else { *(string.pointer++) = 0xF0 + (value >> 18); *(string.pointer++) = 0x80 + ((value >> 12) & 0x3F); *(string.pointer++) = 0x80 + ((value >> 6) & 0x3F); *(string.pointer++) = 0x80 + (value & 0x3F); } /* Advance the pointer. */ for (k = 0; k < code_length; k ++) { SKIP(parser); } } } else { /* It is a non-escaped non-blank character. */ if (!READ(parser, string)) goto error; } if (!CACHE(parser, 2)) goto error; } /* Check if we are at the end of the scalar. */ if (CHECK(parser->buffer, single ? '\'' : '"')) break; /* Consume blank characters. */ if (!CACHE(parser, 1)) goto error; while (IS_BLANK(parser->buffer) || IS_BREAK(parser->buffer)) { if (IS_BLANK(parser->buffer)) { /* Consume a space or a tab character. */ if (!leading_blanks) { if (!READ(parser, whitespaces)) goto error; } else { SKIP(parser); } } else { if (!CACHE(parser, 2)) goto error; /* Check if it is a first line break. */ if (!leading_blanks) { CLEAR(parser, whitespaces); if (!READ_LINE(parser, leading_break)) goto error; leading_blanks = 1; } else { if (!READ_LINE(parser, trailing_breaks)) goto error; } } if (!CACHE(parser, 1)) goto error; } /* Join the whitespaces or fold line breaks. */ if (leading_blanks) { /* Do we need to fold line breaks? */ if (leading_break.start[0] == '\n') { if (trailing_breaks.start[0] == '\0') { if (!STRING_EXTEND(parser, string)) goto error; *(string.pointer++) = ' '; } else { if (!JOIN(parser, string, trailing_breaks)) goto error; CLEAR(parser, trailing_breaks); } CLEAR(parser, leading_break); } else { if (!JOIN(parser, string, leading_break)) goto error; if (!JOIN(parser, string, trailing_breaks)) goto error; CLEAR(parser, leading_break); CLEAR(parser, trailing_breaks); } } else { if (!JOIN(parser, string, whitespaces)) goto error; CLEAR(parser, whitespaces); } } /* Eat the right quote. */ SKIP(parser); end_mark = parser->mark; /* Create a token. */ SCALAR_TOKEN_INIT(*token, string.start, string.pointer-string.start, single ? YAML_SINGLE_QUOTED_SCALAR_STYLE : YAML_DOUBLE_QUOTED_SCALAR_STYLE, start_mark, end_mark); STRING_DEL(parser, leading_break); STRING_DEL(parser, trailing_breaks); STRING_DEL(parser, whitespaces); return 1; error: STRING_DEL(parser, string); STRING_DEL(parser, leading_break); STRING_DEL(parser, trailing_breaks); STRING_DEL(parser, whitespaces); return 0; } /* * Scan a plain scalar. */ static int yaml_parser_scan_plain_scalar(yaml_parser_t *parser, yaml_token_t *token) { yaml_mark_t start_mark; yaml_mark_t end_mark; yaml_string_t string = NULL_STRING; yaml_string_t leading_break = NULL_STRING; yaml_string_t trailing_breaks = NULL_STRING; yaml_string_t whitespaces = NULL_STRING; int leading_blanks = 0; int indent = parser->indent+1; if (!STRING_INIT(parser, string, INITIAL_STRING_SIZE)) goto error; if (!STRING_INIT(parser, leading_break, INITIAL_STRING_SIZE)) goto error; if (!STRING_INIT(parser, trailing_breaks, INITIAL_STRING_SIZE)) goto error; if (!STRING_INIT(parser, whitespaces, INITIAL_STRING_SIZE)) goto error; start_mark = end_mark = parser->mark; /* Consume the content of the plain scalar. */ while (1) { /* Check for a document indicator. */ if (!CACHE(parser, 4)) goto error; if (parser->mark.column == 0 && ((CHECK_AT(parser->buffer, '-', 0) && CHECK_AT(parser->buffer, '-', 1) && CHECK_AT(parser->buffer, '-', 2)) || (CHECK_AT(parser->buffer, '.', 0) && CHECK_AT(parser->buffer, '.', 1) && CHECK_AT(parser->buffer, '.', 2))) && IS_BLANKZ_AT(parser->buffer, 3)) break; /* Check for a comment. */ if (CHECK(parser->buffer, '#')) break; /* Consume non-blank characters. */ while (!IS_BLANKZ(parser->buffer)) { /* Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". */ if (parser->flow_level && CHECK(parser->buffer, ':') && !IS_BLANKZ_AT(parser->buffer, 1)) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", start_mark, "found unexpected ':'"); goto error; } /* Check for indicators that may end a plain scalar. */ if ((CHECK(parser->buffer, ':') && IS_BLANKZ_AT(parser->buffer, 1)) || (parser->flow_level && (CHECK(parser->buffer, ',') || CHECK(parser->buffer, ':') || CHECK(parser->buffer, '?') || CHECK(parser->buffer, '[') || CHECK(parser->buffer, ']') || CHECK(parser->buffer, '{') || CHECK(parser->buffer, '}')))) break; /* Check if we need to join whitespaces and breaks. */ if (leading_blanks || whitespaces.start != whitespaces.pointer) { if (leading_blanks) { /* Do we need to fold line breaks? */ if (leading_break.start[0] == '\n') { if (trailing_breaks.start[0] == '\0') { if (!STRING_EXTEND(parser, string)) goto error; *(string.pointer++) = ' '; } else { if (!JOIN(parser, string, trailing_breaks)) goto error; CLEAR(parser, trailing_breaks); } CLEAR(parser, leading_break); } else { if (!JOIN(parser, string, leading_break)) goto error; if (!JOIN(parser, string, trailing_breaks)) goto error; CLEAR(parser, leading_break); CLEAR(parser, trailing_breaks); } leading_blanks = 0; } else { if (!JOIN(parser, string, whitespaces)) goto error; CLEAR(parser, whitespaces); } } /* Copy the character. */ if (!READ(parser, string)) goto error; end_mark = parser->mark; if (!CACHE(parser, 2)) goto error; } /* Is it the end? */ if (!(IS_BLANK(parser->buffer) || IS_BREAK(parser->buffer))) break; /* Consume blank characters. */ if (!CACHE(parser, 1)) goto error; while (IS_BLANK(parser->buffer) || IS_BREAK(parser->buffer)) { if (IS_BLANK(parser->buffer)) { /* Check for tab character that abuse intendation. */ if (leading_blanks && (int)parser->mark.column < indent && IS_TAB(parser->buffer)) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", start_mark, "found a tab character that violate intendation"); goto error; } /* Consume a space or a tab character. */ if (!leading_blanks) { if (!READ(parser, whitespaces)) goto error; } else { SKIP(parser); } } else { if (!CACHE(parser, 2)) goto error; /* Check if it is a first line break. */ if (!leading_blanks) { CLEAR(parser, whitespaces); if (!READ_LINE(parser, leading_break)) goto error; leading_blanks = 1; } else { if (!READ_LINE(parser, trailing_breaks)) goto error; } } if (!CACHE(parser, 1)) goto error; } /* Check intendation level. */ if (!parser->flow_level && (int)parser->mark.column < indent) break; } /* Create a token. */ SCALAR_TOKEN_INIT(*token, string.start, string.pointer-string.start, YAML_PLAIN_SCALAR_STYLE, start_mark, end_mark); /* Note that we change the 'simple_key_allowed' flag. */ if (leading_blanks) { parser->simple_key_allowed = 1; } STRING_DEL(parser, leading_break); STRING_DEL(parser, trailing_breaks); STRING_DEL(parser, whitespaces); return 1; error: STRING_DEL(parser, string); STRING_DEL(parser, leading_break); STRING_DEL(parser, trailing_breaks); STRING_DEL(parser, whitespaces); return 0; } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/dumper.c0000644000000000000000000002347413306562377022443 0ustar rootroot #include "yaml_private.h" /* * API functions. */ YAML_DECLARE(int) yaml_emitter_open(yaml_emitter_t *emitter); YAML_DECLARE(int) yaml_emitter_close(yaml_emitter_t *emitter); YAML_DECLARE(int) yaml_emitter_dump(yaml_emitter_t *emitter, yaml_document_t *document); /* * Clean up functions. */ static void yaml_emitter_delete_document_and_anchors(yaml_emitter_t *emitter); /* * Anchor functions. */ static void yaml_emitter_anchor_node(yaml_emitter_t *emitter, int index); static yaml_char_t * yaml_emitter_generate_anchor(yaml_emitter_t *emitter, int anchor_id); /* * Serialize functions. */ static int yaml_emitter_dump_node(yaml_emitter_t *emitter, int index); static int yaml_emitter_dump_alias(yaml_emitter_t *emitter, yaml_char_t *anchor); static int yaml_emitter_dump_scalar(yaml_emitter_t *emitter, yaml_node_t *node, yaml_char_t *anchor); static int yaml_emitter_dump_sequence(yaml_emitter_t *emitter, yaml_node_t *node, yaml_char_t *anchor); static int yaml_emitter_dump_mapping(yaml_emitter_t *emitter, yaml_node_t *node, yaml_char_t *anchor); /* * Issue a STREAM-START event. */ YAML_DECLARE(int) yaml_emitter_open(yaml_emitter_t *emitter) { yaml_event_t event; yaml_mark_t mark = { 0, 0, 0 }; assert(emitter); /* Non-NULL emitter object is required. */ assert(!emitter->opened); /* Emitter should not be opened yet. */ STREAM_START_EVENT_INIT(event, YAML_ANY_ENCODING, mark, mark); if (!yaml_emitter_emit(emitter, &event)) { return 0; } emitter->opened = 1; return 1; } /* * Issue a STREAM-END event. */ YAML_DECLARE(int) yaml_emitter_close(yaml_emitter_t *emitter) { yaml_event_t event; yaml_mark_t mark = { 0, 0, 0 }; assert(emitter); /* Non-NULL emitter object is required. */ assert(emitter->opened); /* Emitter should be opened. */ if (emitter->closed) return 1; STREAM_END_EVENT_INIT(event, mark, mark); if (!yaml_emitter_emit(emitter, &event)) { return 0; } emitter->closed = 1; return 1; } /* * Dump a YAML document. */ YAML_DECLARE(int) yaml_emitter_dump(yaml_emitter_t *emitter, yaml_document_t *document) { yaml_event_t event; yaml_mark_t mark = { 0, 0, 0 }; assert(emitter); /* Non-NULL emitter object is required. */ assert(document); /* Non-NULL emitter object is expected. */ emitter->document = document; if (!emitter->opened) { if (!yaml_emitter_open(emitter)) goto error; } if (STACK_EMPTY(emitter, document->nodes)) { if (!yaml_emitter_close(emitter)) goto error; yaml_emitter_delete_document_and_anchors(emitter); return 1; } assert(emitter->opened); /* Emitter should be opened. */ emitter->anchors = yaml_malloc(sizeof(*(emitter->anchors)) * (document->nodes.top - document->nodes.start)); if (!emitter->anchors) goto error; memset(emitter->anchors, 0, sizeof(*(emitter->anchors)) * (document->nodes.top - document->nodes.start)); DOCUMENT_START_EVENT_INIT(event, document->version_directive, document->tag_directives.start, document->tag_directives.end, document->start_implicit, mark, mark); if (!yaml_emitter_emit(emitter, &event)) goto error; yaml_emitter_anchor_node(emitter, 1); if (!yaml_emitter_dump_node(emitter, 1)) goto error; DOCUMENT_END_EVENT_INIT(event, document->end_implicit, mark, mark); if (!yaml_emitter_emit(emitter, &event)) goto error; yaml_emitter_delete_document_and_anchors(emitter); return 1; error: yaml_emitter_delete_document_and_anchors(emitter); return 0; } /* * Clean up the emitter object after a document is dumped. */ static void yaml_emitter_delete_document_and_anchors(yaml_emitter_t *emitter) { int index; if (!emitter->anchors) { yaml_document_delete(emitter->document); emitter->document = NULL; return; } for (index = 0; emitter->document->nodes.start + index < emitter->document->nodes.top; index ++) { yaml_node_t node = emitter->document->nodes.start[index]; if (!emitter->anchors[index].serialized) { yaml_free(node.tag); if (node.type == YAML_SCALAR_NODE) { yaml_free(node.data.scalar.value); } } if (node.type == YAML_SEQUENCE_NODE) { STACK_DEL(emitter, node.data.sequence.items); } if (node.type == YAML_MAPPING_NODE) { STACK_DEL(emitter, node.data.mapping.pairs); } } STACK_DEL(emitter, emitter->document->nodes); yaml_free(emitter->anchors); emitter->anchors = NULL; emitter->last_anchor_id = 0; emitter->document = NULL; } /* * Check the references of a node and assign the anchor id if needed. */ static void yaml_emitter_anchor_node(yaml_emitter_t *emitter, int index) { yaml_node_t *node = emitter->document->nodes.start + index - 1; yaml_node_item_t *item; yaml_node_pair_t *pair; emitter->anchors[index-1].references ++; if (emitter->anchors[index-1].references == 1) { switch (node->type) { case YAML_SEQUENCE_NODE: for (item = node->data.sequence.items.start; item < node->data.sequence.items.top; item ++) { yaml_emitter_anchor_node(emitter, *item); } break; case YAML_MAPPING_NODE: for (pair = node->data.mapping.pairs.start; pair < node->data.mapping.pairs.top; pair ++) { yaml_emitter_anchor_node(emitter, pair->key); yaml_emitter_anchor_node(emitter, pair->value); } break; default: break; } } else if (emitter->anchors[index-1].references == 2) { emitter->anchors[index-1].anchor = (++ emitter->last_anchor_id); } } /* * Generate a textual representation for an anchor. */ #define ANCHOR_TEMPLATE "id%03d" #define ANCHOR_TEMPLATE_LENGTH 16 static yaml_char_t * yaml_emitter_generate_anchor(yaml_emitter_t *emitter, int anchor_id) { yaml_char_t *anchor = yaml_malloc(ANCHOR_TEMPLATE_LENGTH); if (!anchor) return NULL; sprintf((char *)anchor, ANCHOR_TEMPLATE, anchor_id); return anchor; } /* * Serialize a node. */ static int yaml_emitter_dump_node(yaml_emitter_t *emitter, int index) { yaml_node_t *node = emitter->document->nodes.start + index - 1; int anchor_id = emitter->anchors[index-1].anchor; yaml_char_t *anchor = NULL; if (anchor_id) { anchor = yaml_emitter_generate_anchor(emitter, anchor_id); if (!anchor) return 0; } if (emitter->anchors[index-1].serialized) { return yaml_emitter_dump_alias(emitter, anchor); } emitter->anchors[index-1].serialized = 1; switch (node->type) { case YAML_SCALAR_NODE: return yaml_emitter_dump_scalar(emitter, node, anchor); case YAML_SEQUENCE_NODE: return yaml_emitter_dump_sequence(emitter, node, anchor); case YAML_MAPPING_NODE: return yaml_emitter_dump_mapping(emitter, node, anchor); default: assert(0); /* Could not happen. */ break; } return 0; /* Could not happen. */ } /* * Serialize an alias. */ static int yaml_emitter_dump_alias(yaml_emitter_t *emitter, yaml_char_t *anchor) { yaml_event_t event; yaml_mark_t mark = { 0, 0, 0 }; ALIAS_EVENT_INIT(event, anchor, mark, mark); return yaml_emitter_emit(emitter, &event); } /* * Serialize a scalar. */ static int yaml_emitter_dump_scalar(yaml_emitter_t *emitter, yaml_node_t *node, yaml_char_t *anchor) { yaml_event_t event; yaml_mark_t mark = { 0, 0, 0 }; int plain_implicit = (strcmp((char *)node->tag, YAML_DEFAULT_SCALAR_TAG) == 0); int quoted_implicit = (strcmp((char *)node->tag, YAML_DEFAULT_SCALAR_TAG) == 0); SCALAR_EVENT_INIT(event, anchor, node->tag, node->data.scalar.value, node->data.scalar.length, plain_implicit, quoted_implicit, node->data.scalar.style, mark, mark); return yaml_emitter_emit(emitter, &event); } /* * Serialize a sequence. */ static int yaml_emitter_dump_sequence(yaml_emitter_t *emitter, yaml_node_t *node, yaml_char_t *anchor) { yaml_event_t event; yaml_mark_t mark = { 0, 0, 0 }; int implicit = (strcmp((char *)node->tag, YAML_DEFAULT_SEQUENCE_TAG) == 0); yaml_node_item_t *item; SEQUENCE_START_EVENT_INIT(event, anchor, node->tag, implicit, node->data.sequence.style, mark, mark); if (!yaml_emitter_emit(emitter, &event)) return 0; for (item = node->data.sequence.items.start; item < node->data.sequence.items.top; item ++) { if (!yaml_emitter_dump_node(emitter, *item)) return 0; } SEQUENCE_END_EVENT_INIT(event, mark, mark); if (!yaml_emitter_emit(emitter, &event)) return 0; return 1; } /* * Serialize a mapping. */ static int yaml_emitter_dump_mapping(yaml_emitter_t *emitter, yaml_node_t *node, yaml_char_t *anchor) { yaml_event_t event; yaml_mark_t mark = { 0, 0, 0 }; int implicit = (strcmp((char *)node->tag, YAML_DEFAULT_MAPPING_TAG) == 0); yaml_node_pair_t *pair; MAPPING_START_EVENT_INIT(event, anchor, node->tag, implicit, node->data.mapping.style, mark, mark); if (!yaml_emitter_emit(emitter, &event)) return 0; for (pair = node->data.mapping.pairs.start; pair < node->data.mapping.pairs.top; pair ++) { if (!yaml_emitter_dump_node(emitter, pair->key)) return 0; if (!yaml_emitter_dump_node(emitter, pair->value)) return 0; } MAPPING_END_EVENT_INIT(event, mark, mark); if (!yaml_emitter_emit(emitter, &event)) return 0; return 1; } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/src/Makefile.am0000644000000000000000000000044313306562377023026 0ustar rootrootAM_CPPFLAGS = -I$(top_srcdir)/include lib_LTLIBRARIES = libyaml.la libyaml_la_SOURCES = yaml_private.h api.c reader.c scanner.c parser.c loader.c writer.c emitter.c dumper.c libyaml_la_LDFLAGS = -release $(YAML_LT_RELEASE) -version-info $(YAML_LT_CURRENT):$(YAML_LT_REVISION):$(YAML_LT_AGE) tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/README0000644000000000000000000000150413306562377021062 0ustar rootrootLibYAML - A C library for parsing and emitting YAML. To build and install the library, run: $ ./configure $ make # make install If you checked the source code from the Mercurial repository, run $ ./bootstrap $ ./configure $ make # make install For more information, check the LibYAML homepage: 'http://pyyaml.org/wiki/LibYAML'. Post your questions and opinions to the YAML-Core mailing list: 'http://lists.sourceforge.net/lists/listinfo/yaml-core'. Submit bug reports and feature requests to the LibYAML bug tracker: 'https://bitbucket.org/xi/libyaml/issues/new'. LibYAML is written by Kirill Simonov . It is released under the MIT license. See the file LICENSE for more details. This project is developed for Python Software Foundation as a part of Google Summer of Code under the mentorship of Clark Evans. tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/bootstrap0000755000000000000000000000004013306562377022137 0ustar rootroot#!/bin/sh exec autoreconf -fvi tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/configure.ac0000644000000000000000000000437413306562377022500 0ustar rootroot# Run `./bootstrap` to generate the "configure" script. # Define the package version numbers and the bug reporting link. m4_define([YAML_MAJOR], 0) m4_define([YAML_MINOR], 1) m4_define([YAML_PATCH], 7) m4_define([YAML_BUGS], [https://bitbucket.org/xi/libyaml/issues/new]) # Define the libtool version numbers; check the Autobook, Section 11.4. # Bump the libtool version numbers using the following algorithm: # if (the current interface has not been changed): # YAML_REVISION += 1 # else: # YAML_REVISION = 0 # YAML_CURRENT += 1 # if (this release is backward compatible with the previous release): # YAML_AGE += 1 # else: # YAML_AGE = 0 m4_define([YAML_RELEASE], 0) m4_define([YAML_CURRENT], 2) m4_define([YAML_REVISION], 5) m4_define([YAML_AGE], 0) # Initialize autoconf & automake. AC_PREREQ(2.59) AC_INIT([yaml], [YAML_MAJOR.YAML_MINOR.YAML_PATCH], [YAML_BUGS]) AC_CONFIG_AUX_DIR([config]) AC_CONFIG_HEADERS([config.h]) AM_INIT_AUTOMAKE([1.9 foreign]) # Define macro variables for the package version numbers. AC_DEFINE(YAML_VERSION_MAJOR, YAML_MAJOR, [Define the major version number.]) AC_DEFINE(YAML_VERSION_MINOR, YAML_MINOR, [Define the minor version number.]) AC_DEFINE(YAML_VERSION_PATCH, YAML_PATCH, [Define the patch version number.]) AC_DEFINE(YAML_VERSION_STRING, "YAML_MAJOR.YAML_MINOR.YAML_PATCH", [Define the version string.]) # Define substitutions for the libtool version numbers. YAML_LT_RELEASE=YAML_RELEASE YAML_LT_CURRENT=YAML_CURRENT YAML_LT_REVISION=YAML_REVISION YAML_LT_AGE=YAML_AGE AC_SUBST(YAML_LT_RELEASE) AC_SUBST(YAML_LT_CURRENT) AC_SUBST(YAML_LT_REVISION) AC_SUBST(YAML_LT_AGE) # Note: in order to update checks, run `autoscan` and look through "configure.scan". # Checks for programs. AC_PROG_CC AC_PROG_CPP AC_PROG_INSTALL AC_PROG_LN_S AC_PROG_MAKE_SET AC_PROG_LIBTOOL AC_CHECK_PROG(DOXYGEN, [doxygen], [true], [false]) AM_CONDITIONAL(DOXYGEN, [test "$DOXYGEN" = true]) # Checks for header files. AC_HEADER_STDC AC_CHECK_HEADERS([stdlib.h]) # Checks for typedefs, structures, and compiler characteristics. AC_C_CONST AC_TYPE_SIZE_T # Define Makefiles. AC_CONFIG_FILES([yaml-0.1.pc include/Makefile src/Makefile Makefile tests/Makefile win32/Makefile]) # Generate the "configure" script. AC_OUTPUT tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/LICENSE0000644000000000000000000000204213306562377021205 0ustar rootrootCopyright (c) 2006 Kirill Simonov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/CMakeLists.txt0000644000000000000000000000070413306562377022743 0ustar rootroot# Minimal CMake project for building a static library under Windows. cmake_minimum_required (VERSION 2.8) project (yaml C) set (YAML_VERSION_MAJOR 0) set (YAML_VERSION_MINOR 1) set (YAML_VERSION_PATCH 7) set (YAML_VERSION_STRING "${YAML_VERSION_MAJOR}.${YAML_VERSION_MINOR}.${YAML_VERSION_PATCH}") file (GLOB SRC src/*.c) include_directories (include win32) add_definitions (-DHAVE_CONFIG_H -DYAML_DECLARE_STATIC) add_library (yaml STATIC ${SRC}) tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/include/0000755000000000000000000000000013306562377021625 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/include/yaml.h0000644000000000000000000015172113306562377022747 0ustar rootroot/** * @file yaml.h * @brief Public interface for libyaml. * * Include the header file with the code: * @code * #include * @endcode */ #ifndef YAML_H #define YAML_H #ifdef __cplusplus extern "C" { #endif #include #include #include /** * @defgroup export Export Definitions * @{ */ /** The public API declaration. */ #ifdef _WIN32 # if defined(YAML_DECLARE_STATIC) # define YAML_DECLARE(type) type # elif defined(YAML_DECLARE_EXPORT) # define YAML_DECLARE(type) __declspec(dllexport) type # else # define YAML_DECLARE(type) __declspec(dllimport) type # endif #else # define YAML_DECLARE(type) type #endif /** @} */ /** * @defgroup version Version Information * @{ */ /** * Get the library version as a string. * * @returns The function returns the pointer to a static string of the form * @c "X.Y.Z", where @c X is the major version number, @c Y is a minor version * number, and @c Z is the patch version number. */ YAML_DECLARE(const char *) yaml_get_version_string(void); /** * Get the library version numbers. * * @param[out] major Major version number. * @param[out] minor Minor version number. * @param[out] patch Patch version number. */ YAML_DECLARE(void) yaml_get_version(int *major, int *minor, int *patch); /** @} */ /** * @defgroup basic Basic Types * @{ */ /** The character type (UTF-8 octet). */ typedef unsigned char yaml_char_t; /** The version directive data. */ typedef struct yaml_version_directive_s { /** The major version number. */ int major; /** The minor version number. */ int minor; } yaml_version_directive_t; /** The tag directive data. */ typedef struct yaml_tag_directive_s { /** The tag handle. */ yaml_char_t *handle; /** The tag prefix. */ yaml_char_t *prefix; } yaml_tag_directive_t; /** The stream encoding. */ typedef enum yaml_encoding_e { /** Let the parser choose the encoding. */ YAML_ANY_ENCODING, /** The default UTF-8 encoding. */ YAML_UTF8_ENCODING, /** The UTF-16-LE encoding with BOM. */ YAML_UTF16LE_ENCODING, /** The UTF-16-BE encoding with BOM. */ YAML_UTF16BE_ENCODING } yaml_encoding_t; /** Line break types. */ typedef enum yaml_break_e { /** Let the parser choose the break type. */ YAML_ANY_BREAK, /** Use CR for line breaks (Mac style). */ YAML_CR_BREAK, /** Use LN for line breaks (Unix style). */ YAML_LN_BREAK, /** Use CR LN for line breaks (DOS style). */ YAML_CRLN_BREAK } yaml_break_t; /** Many bad things could happen with the parser and emitter. */ typedef enum yaml_error_type_e { /** No error is produced. */ YAML_NO_ERROR, /** Cannot allocate or reallocate a block of memory. */ YAML_MEMORY_ERROR, /** Cannot read or decode the input stream. */ YAML_READER_ERROR, /** Cannot scan the input stream. */ YAML_SCANNER_ERROR, /** Cannot parse the input stream. */ YAML_PARSER_ERROR, /** Cannot compose a YAML document. */ YAML_COMPOSER_ERROR, /** Cannot write to the output stream. */ YAML_WRITER_ERROR, /** Cannot emit a YAML stream. */ YAML_EMITTER_ERROR } yaml_error_type_t; /** The pointer position. */ typedef struct yaml_mark_s { /** The position index. */ size_t index; /** The position line. */ size_t line; /** The position column. */ size_t column; } yaml_mark_t; /** @} */ /** * @defgroup styles Node Styles * @{ */ /** Scalar styles. */ typedef enum yaml_scalar_style_e { /** Let the emitter choose the style. */ YAML_ANY_SCALAR_STYLE, /** The plain scalar style. */ YAML_PLAIN_SCALAR_STYLE, /** The single-quoted scalar style. */ YAML_SINGLE_QUOTED_SCALAR_STYLE, /** The double-quoted scalar style. */ YAML_DOUBLE_QUOTED_SCALAR_STYLE, /** The literal scalar style. */ YAML_LITERAL_SCALAR_STYLE, /** The folded scalar style. */ YAML_FOLDED_SCALAR_STYLE } yaml_scalar_style_t; /** Sequence styles. */ typedef enum yaml_sequence_style_e { /** Let the emitter choose the style. */ YAML_ANY_SEQUENCE_STYLE, /** The block sequence style. */ YAML_BLOCK_SEQUENCE_STYLE, /** The flow sequence style. */ YAML_FLOW_SEQUENCE_STYLE } yaml_sequence_style_t; /** Mapping styles. */ typedef enum yaml_mapping_style_e { /** Let the emitter choose the style. */ YAML_ANY_MAPPING_STYLE, /** The block mapping style. */ YAML_BLOCK_MAPPING_STYLE, /** The flow mapping style. */ YAML_FLOW_MAPPING_STYLE /* YAML_FLOW_SET_MAPPING_STYLE */ } yaml_mapping_style_t; /** @} */ /** * @defgroup tokens Tokens * @{ */ /** Token types. */ typedef enum yaml_token_type_e { /** An empty token. */ YAML_NO_TOKEN, /** A STREAM-START token. */ YAML_STREAM_START_TOKEN, /** A STREAM-END token. */ YAML_STREAM_END_TOKEN, /** A VERSION-DIRECTIVE token. */ YAML_VERSION_DIRECTIVE_TOKEN, /** A TAG-DIRECTIVE token. */ YAML_TAG_DIRECTIVE_TOKEN, /** A DOCUMENT-START token. */ YAML_DOCUMENT_START_TOKEN, /** A DOCUMENT-END token. */ YAML_DOCUMENT_END_TOKEN, /** A BLOCK-SEQUENCE-START token. */ YAML_BLOCK_SEQUENCE_START_TOKEN, /** A BLOCK-SEQUENCE-END token. */ YAML_BLOCK_MAPPING_START_TOKEN, /** A BLOCK-END token. */ YAML_BLOCK_END_TOKEN, /** A FLOW-SEQUENCE-START token. */ YAML_FLOW_SEQUENCE_START_TOKEN, /** A FLOW-SEQUENCE-END token. */ YAML_FLOW_SEQUENCE_END_TOKEN, /** A FLOW-MAPPING-START token. */ YAML_FLOW_MAPPING_START_TOKEN, /** A FLOW-MAPPING-END token. */ YAML_FLOW_MAPPING_END_TOKEN, /** A BLOCK-ENTRY token. */ YAML_BLOCK_ENTRY_TOKEN, /** A FLOW-ENTRY token. */ YAML_FLOW_ENTRY_TOKEN, /** A KEY token. */ YAML_KEY_TOKEN, /** A VALUE token. */ YAML_VALUE_TOKEN, /** An ALIAS token. */ YAML_ALIAS_TOKEN, /** An ANCHOR token. */ YAML_ANCHOR_TOKEN, /** A TAG token. */ YAML_TAG_TOKEN, /** A SCALAR token. */ YAML_SCALAR_TOKEN } yaml_token_type_t; /** The token structure. */ typedef struct yaml_token_s { /** The token type. */ yaml_token_type_t type; /** The token data. */ union { /** The stream start (for @c YAML_STREAM_START_TOKEN). */ struct { /** The stream encoding. */ yaml_encoding_t encoding; } stream_start; /** The alias (for @c YAML_ALIAS_TOKEN). */ struct { /** The alias value. */ yaml_char_t *value; } alias; /** The anchor (for @c YAML_ANCHOR_TOKEN). */ struct { /** The anchor value. */ yaml_char_t *value; } anchor; /** The tag (for @c YAML_TAG_TOKEN). */ struct { /** The tag handle. */ yaml_char_t *handle; /** The tag suffix. */ yaml_char_t *suffix; } tag; /** The scalar value (for @c YAML_SCALAR_TOKEN). */ struct { /** The scalar value. */ yaml_char_t *value; /** The length of the scalar value. */ size_t length; /** The scalar style. */ yaml_scalar_style_t style; } scalar; /** The version directive (for @c YAML_VERSION_DIRECTIVE_TOKEN). */ struct { /** The major version number. */ int major; /** The minor version number. */ int minor; } version_directive; /** The tag directive (for @c YAML_TAG_DIRECTIVE_TOKEN). */ struct { /** The tag handle. */ yaml_char_t *handle; /** The tag prefix. */ yaml_char_t *prefix; } tag_directive; } data; /** The beginning of the token. */ yaml_mark_t start_mark; /** The end of the token. */ yaml_mark_t end_mark; } yaml_token_t; /** * Free any memory allocated for a token object. * * @param[in,out] token A token object. */ YAML_DECLARE(void) yaml_token_delete(yaml_token_t *token); /** @} */ /** * @defgroup events Events * @{ */ /** Event types. */ typedef enum yaml_event_type_e { /** An empty event. */ YAML_NO_EVENT, /** A STREAM-START event. */ YAML_STREAM_START_EVENT, /** A STREAM-END event. */ YAML_STREAM_END_EVENT, /** A DOCUMENT-START event. */ YAML_DOCUMENT_START_EVENT, /** A DOCUMENT-END event. */ YAML_DOCUMENT_END_EVENT, /** An ALIAS event. */ YAML_ALIAS_EVENT, /** A SCALAR event. */ YAML_SCALAR_EVENT, /** A SEQUENCE-START event. */ YAML_SEQUENCE_START_EVENT, /** A SEQUENCE-END event. */ YAML_SEQUENCE_END_EVENT, /** A MAPPING-START event. */ YAML_MAPPING_START_EVENT, /** A MAPPING-END event. */ YAML_MAPPING_END_EVENT } yaml_event_type_t; /** The event structure. */ typedef struct yaml_event_s { /** The event type. */ yaml_event_type_t type; /** The event data. */ union { /** The stream parameters (for @c YAML_STREAM_START_EVENT). */ struct { /** The document encoding. */ yaml_encoding_t encoding; } stream_start; /** The document parameters (for @c YAML_DOCUMENT_START_EVENT). */ struct { /** The version directive. */ yaml_version_directive_t *version_directive; /** The list of tag directives. */ struct { /** The beginning of the tag directives list. */ yaml_tag_directive_t *start; /** The end of the tag directives list. */ yaml_tag_directive_t *end; } tag_directives; /** Is the document indicator implicit? */ int implicit; } document_start; /** The document end parameters (for @c YAML_DOCUMENT_END_EVENT). */ struct { /** Is the document end indicator implicit? */ int implicit; } document_end; /** The alias parameters (for @c YAML_ALIAS_EVENT). */ struct { /** The anchor. */ yaml_char_t *anchor; } alias; /** The scalar parameters (for @c YAML_SCALAR_EVENT). */ struct { /** The anchor. */ yaml_char_t *anchor; /** The tag. */ yaml_char_t *tag; /** The scalar value. */ yaml_char_t *value; /** The length of the scalar value. */ size_t length; /** Is the tag optional for the plain style? */ int plain_implicit; /** Is the tag optional for any non-plain style? */ int quoted_implicit; /** The scalar style. */ yaml_scalar_style_t style; } scalar; /** The sequence parameters (for @c YAML_SEQUENCE_START_EVENT). */ struct { /** The anchor. */ yaml_char_t *anchor; /** The tag. */ yaml_char_t *tag; /** Is the tag optional? */ int implicit; /** The sequence style. */ yaml_sequence_style_t style; } sequence_start; /** The mapping parameters (for @c YAML_MAPPING_START_EVENT). */ struct { /** The anchor. */ yaml_char_t *anchor; /** The tag. */ yaml_char_t *tag; /** Is the tag optional? */ int implicit; /** The mapping style. */ yaml_mapping_style_t style; } mapping_start; } data; /** The beginning of the event. */ yaml_mark_t start_mark; /** The end of the event. */ yaml_mark_t end_mark; } yaml_event_t; /** * Create the STREAM-START event. * * @param[out] event An empty event object. * @param[in] encoding The stream encoding. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_stream_start_event_initialize(yaml_event_t *event, yaml_encoding_t encoding); /** * Create the STREAM-END event. * * @param[out] event An empty event object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_stream_end_event_initialize(yaml_event_t *event); /** * Create the DOCUMENT-START event. * * The @a implicit argument is considered as a stylistic parameter and may be * ignored by the emitter. * * @param[out] event An empty event object. * @param[in] version_directive The %YAML directive value or * @c NULL. * @param[in] tag_directives_start The beginning of the %TAG * directives list. * @param[in] tag_directives_end The end of the %TAG directives * list. * @param[in] implicit If the document start indicator is * implicit. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_document_start_event_initialize(yaml_event_t *event, yaml_version_directive_t *version_directive, yaml_tag_directive_t *tag_directives_start, yaml_tag_directive_t *tag_directives_end, int implicit); /** * Create the DOCUMENT-END event. * * The @a implicit argument is considered as a stylistic parameter and may be * ignored by the emitter. * * @param[out] event An empty event object. * @param[in] implicit If the document end indicator is implicit. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_document_end_event_initialize(yaml_event_t *event, int implicit); /** * Create an ALIAS event. * * @param[out] event An empty event object. * @param[in] anchor The anchor value. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_alias_event_initialize(yaml_event_t *event, yaml_char_t *anchor); /** * Create a SCALAR event. * * The @a style argument may be ignored by the emitter. * * Either the @a tag attribute or one of the @a plain_implicit and * @a quoted_implicit flags must be set. * * @param[out] event An empty event object. * @param[in] anchor The scalar anchor or @c NULL. * @param[in] tag The scalar tag or @c NULL. * @param[in] value The scalar value. * @param[in] length The length of the scalar value. * @param[in] plain_implicit If the tag may be omitted for the plain * style. * @param[in] quoted_implicit If the tag may be omitted for any * non-plain style. * @param[in] style The scalar style. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_scalar_event_initialize(yaml_event_t *event, yaml_char_t *anchor, yaml_char_t *tag, yaml_char_t *value, int length, int plain_implicit, int quoted_implicit, yaml_scalar_style_t style); /** * Create a SEQUENCE-START event. * * The @a style argument may be ignored by the emitter. * * Either the @a tag attribute or the @a implicit flag must be set. * * @param[out] event An empty event object. * @param[in] anchor The sequence anchor or @c NULL. * @param[in] tag The sequence tag or @c NULL. * @param[in] implicit If the tag may be omitted. * @param[in] style The sequence style. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_sequence_start_event_initialize(yaml_event_t *event, yaml_char_t *anchor, yaml_char_t *tag, int implicit, yaml_sequence_style_t style); /** * Create a SEQUENCE-END event. * * @param[out] event An empty event object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_sequence_end_event_initialize(yaml_event_t *event); /** * Create a MAPPING-START event. * * The @a style argument may be ignored by the emitter. * * Either the @a tag attribute or the @a implicit flag must be set. * * @param[out] event An empty event object. * @param[in] anchor The mapping anchor or @c NULL. * @param[in] tag The mapping tag or @c NULL. * @param[in] implicit If the tag may be omitted. * @param[in] style The mapping style. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_mapping_start_event_initialize(yaml_event_t *event, yaml_char_t *anchor, yaml_char_t *tag, int implicit, yaml_mapping_style_t style); /** * Create a MAPPING-END event. * * @param[out] event An empty event object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_mapping_end_event_initialize(yaml_event_t *event); /** * Free any memory allocated for an event object. * * @param[in,out] event An event object. */ YAML_DECLARE(void) yaml_event_delete(yaml_event_t *event); /** @} */ /** * @defgroup nodes Nodes * @{ */ /** The tag @c !!null with the only possible value: @c null. */ #define YAML_NULL_TAG "tag:yaml.org,2002:null" /** The tag @c !!bool with the values: @c true and @c falce. */ #define YAML_BOOL_TAG "tag:yaml.org,2002:bool" /** The tag @c !!str for string values. */ #define YAML_STR_TAG "tag:yaml.org,2002:str" /** The tag @c !!int for integer values. */ #define YAML_INT_TAG "tag:yaml.org,2002:int" /** The tag @c !!float for float values. */ #define YAML_FLOAT_TAG "tag:yaml.org,2002:float" /** The tag @c !!timestamp for date and time values. */ #define YAML_TIMESTAMP_TAG "tag:yaml.org,2002:timestamp" /** The tag @c !!seq is used to denote sequences. */ #define YAML_SEQ_TAG "tag:yaml.org,2002:seq" /** The tag @c !!map is used to denote mapping. */ #define YAML_MAP_TAG "tag:yaml.org,2002:map" /** The default scalar tag is @c !!str. */ #define YAML_DEFAULT_SCALAR_TAG YAML_STR_TAG /** The default sequence tag is @c !!seq. */ #define YAML_DEFAULT_SEQUENCE_TAG YAML_SEQ_TAG /** The default mapping tag is @c !!map. */ #define YAML_DEFAULT_MAPPING_TAG YAML_MAP_TAG /** Node types. */ typedef enum yaml_node_type_e { /** An empty node. */ YAML_NO_NODE, /** A scalar node. */ YAML_SCALAR_NODE, /** A sequence node. */ YAML_SEQUENCE_NODE, /** A mapping node. */ YAML_MAPPING_NODE } yaml_node_type_t; /** The forward definition of a document node structure. */ typedef struct yaml_node_s yaml_node_t; /** An element of a sequence node. */ typedef int yaml_node_item_t; /** An element of a mapping node. */ typedef struct yaml_node_pair_s { /** The key of the element. */ int key; /** The value of the element. */ int value; } yaml_node_pair_t; /** The node structure. */ struct yaml_node_s { /** The node type. */ yaml_node_type_t type; /** The node tag. */ yaml_char_t *tag; /** The node data. */ union { /** The scalar parameters (for @c YAML_SCALAR_NODE). */ struct { /** The scalar value. */ yaml_char_t *value; /** The length of the scalar value. */ size_t length; /** The scalar style. */ yaml_scalar_style_t style; } scalar; /** The sequence parameters (for @c YAML_SEQUENCE_NODE). */ struct { /** The stack of sequence items. */ struct { /** The beginning of the stack. */ yaml_node_item_t *start; /** The end of the stack. */ yaml_node_item_t *end; /** The top of the stack. */ yaml_node_item_t *top; } items; /** The sequence style. */ yaml_sequence_style_t style; } sequence; /** The mapping parameters (for @c YAML_MAPPING_NODE). */ struct { /** The stack of mapping pairs (key, value). */ struct { /** The beginning of the stack. */ yaml_node_pair_t *start; /** The end of the stack. */ yaml_node_pair_t *end; /** The top of the stack. */ yaml_node_pair_t *top; } pairs; /** The mapping style. */ yaml_mapping_style_t style; } mapping; } data; /** The beginning of the node. */ yaml_mark_t start_mark; /** The end of the node. */ yaml_mark_t end_mark; }; /** The document structure. */ typedef struct yaml_document_s { /** The document nodes. */ struct { /** The beginning of the stack. */ yaml_node_t *start; /** The end of the stack. */ yaml_node_t *end; /** The top of the stack. */ yaml_node_t *top; } nodes; /** The version directive. */ yaml_version_directive_t *version_directive; /** The list of tag directives. */ struct { /** The beginning of the tag directives list. */ yaml_tag_directive_t *start; /** The end of the tag directives list. */ yaml_tag_directive_t *end; } tag_directives; /** Is the document start indicator implicit? */ int start_implicit; /** Is the document end indicator implicit? */ int end_implicit; /** The beginning of the document. */ yaml_mark_t start_mark; /** The end of the document. */ yaml_mark_t end_mark; } yaml_document_t; /** * Create a YAML document. * * @param[out] document An empty document object. * @param[in] version_directive The %YAML directive value or * @c NULL. * @param[in] tag_directives_start The beginning of the %TAG * directives list. * @param[in] tag_directives_end The end of the %TAG directives * list. * @param[in] start_implicit If the document start indicator is * implicit. * @param[in] end_implicit If the document end indicator is * implicit. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_document_initialize(yaml_document_t *document, yaml_version_directive_t *version_directive, yaml_tag_directive_t *tag_directives_start, yaml_tag_directive_t *tag_directives_end, int start_implicit, int end_implicit); /** * Delete a YAML document and all its nodes. * * @param[in,out] document A document object. */ YAML_DECLARE(void) yaml_document_delete(yaml_document_t *document); /** * Get a node of a YAML document. * * The pointer returned by this function is valid until any of the functions * modifying the documents are called. * * @param[in] document A document object. * @param[in] index The node id. * * @returns the node objct or @c NULL if @c node_id is out of range. */ YAML_DECLARE(yaml_node_t *) yaml_document_get_node(yaml_document_t *document, int index); /** * Get the root of a YAML document node. * * The root object is the first object added to the document. * * The pointer returned by this function is valid until any of the functions * modifying the documents are called. * * An empty document produced by the parser signifies the end of a YAML * stream. * * @param[in] document A document object. * * @returns the node object or @c NULL if the document is empty. */ YAML_DECLARE(yaml_node_t *) yaml_document_get_root_node(yaml_document_t *document); /** * Create a SCALAR node and attach it to the document. * * The @a style argument may be ignored by the emitter. * * @param[in,out] document A document object. * @param[in] tag The scalar tag. * @param[in] value The scalar value. * @param[in] length The length of the scalar value. * @param[in] style The scalar style. * * @returns the node id or @c 0 on error. */ YAML_DECLARE(int) yaml_document_add_scalar(yaml_document_t *document, yaml_char_t *tag, yaml_char_t *value, int length, yaml_scalar_style_t style); /** * Create a SEQUENCE node and attach it to the document. * * The @a style argument may be ignored by the emitter. * * @param[in,out] document A document object. * @param[in] tag The sequence tag. * @param[in] style The sequence style. * * @returns the node id or @c 0 on error. */ YAML_DECLARE(int) yaml_document_add_sequence(yaml_document_t *document, yaml_char_t *tag, yaml_sequence_style_t style); /** * Create a MAPPING node and attach it to the document. * * The @a style argument may be ignored by the emitter. * * @param[in,out] document A document object. * @param[in] tag The sequence tag. * @param[in] style The sequence style. * * @returns the node id or @c 0 on error. */ YAML_DECLARE(int) yaml_document_add_mapping(yaml_document_t *document, yaml_char_t *tag, yaml_mapping_style_t style); /** * Add an item to a SEQUENCE node. * * @param[in,out] document A document object. * @param[in] sequence The sequence node id. * @param[in] item The item node id. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_document_append_sequence_item(yaml_document_t *document, int sequence, int item); /** * Add a pair of a key and a value to a MAPPING node. * * @param[in,out] document A document object. * @param[in] mapping The mapping node id. * @param[in] key The key node id. * @param[in] value The value node id. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_document_append_mapping_pair(yaml_document_t *document, int mapping, int key, int value); /** @} */ /** * @defgroup parser Parser Definitions * @{ */ /** * The prototype of a read handler. * * The read handler is called when the parser needs to read more bytes from the * source. The handler should write not more than @a size bytes to the @a * buffer. The number of written bytes should be set to the @a length variable. * * @param[in,out] data A pointer to an application data specified by * yaml_parser_set_input(). * @param[out] buffer The buffer to write the data from the source. * @param[in] size The size of the buffer. * @param[out] size_read The actual number of bytes read from the source. * * @returns On success, the handler should return @c 1. If the handler failed, * the returned value should be @c 0. On EOF, the handler should set the * @a size_read to @c 0 and return @c 1. */ typedef int yaml_read_handler_t(void *data, unsigned char *buffer, size_t size, size_t *size_read); /** * This structure holds information about a potential simple key. */ typedef struct yaml_simple_key_s { /** Is a simple key possible? */ int possible; /** Is a simple key required? */ int required; /** The number of the token. */ size_t token_number; /** The position mark. */ yaml_mark_t mark; } yaml_simple_key_t; /** * The states of the parser. */ typedef enum yaml_parser_state_e { /** Expect STREAM-START. */ YAML_PARSE_STREAM_START_STATE, /** Expect the beginning of an implicit document. */ YAML_PARSE_IMPLICIT_DOCUMENT_START_STATE, /** Expect DOCUMENT-START. */ YAML_PARSE_DOCUMENT_START_STATE, /** Expect the content of a document. */ YAML_PARSE_DOCUMENT_CONTENT_STATE, /** Expect DOCUMENT-END. */ YAML_PARSE_DOCUMENT_END_STATE, /** Expect a block node. */ YAML_PARSE_BLOCK_NODE_STATE, /** Expect a block node or indentless sequence. */ YAML_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE, /** Expect a flow node. */ YAML_PARSE_FLOW_NODE_STATE, /** Expect the first entry of a block sequence. */ YAML_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE, /** Expect an entry of a block sequence. */ YAML_PARSE_BLOCK_SEQUENCE_ENTRY_STATE, /** Expect an entry of an indentless sequence. */ YAML_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE, /** Expect the first key of a block mapping. */ YAML_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE, /** Expect a block mapping key. */ YAML_PARSE_BLOCK_MAPPING_KEY_STATE, /** Expect a block mapping value. */ YAML_PARSE_BLOCK_MAPPING_VALUE_STATE, /** Expect the first entry of a flow sequence. */ YAML_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE, /** Expect an entry of a flow sequence. */ YAML_PARSE_FLOW_SEQUENCE_ENTRY_STATE, /** Expect a key of an ordered mapping. */ YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE, /** Expect a value of an ordered mapping. */ YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE, /** Expect the and of an ordered mapping entry. */ YAML_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE, /** Expect the first key of a flow mapping. */ YAML_PARSE_FLOW_MAPPING_FIRST_KEY_STATE, /** Expect a key of a flow mapping. */ YAML_PARSE_FLOW_MAPPING_KEY_STATE, /** Expect a value of a flow mapping. */ YAML_PARSE_FLOW_MAPPING_VALUE_STATE, /** Expect an empty value of a flow mapping. */ YAML_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE, /** Expect nothing. */ YAML_PARSE_END_STATE } yaml_parser_state_t; /** * This structure holds aliases data. */ typedef struct yaml_alias_data_s { /** The anchor. */ yaml_char_t *anchor; /** The node id. */ int index; /** The anchor mark. */ yaml_mark_t mark; } yaml_alias_data_t; /** * The parser structure. * * All members are internal. Manage the structure using the @c yaml_parser_ * family of functions. */ typedef struct yaml_parser_s { /** * @name Error handling * @{ */ /** Error type. */ yaml_error_type_t error; /** Error description. */ const char *problem; /** The byte about which the problem occured. */ size_t problem_offset; /** The problematic value (@c -1 is none). */ int problem_value; /** The problem position. */ yaml_mark_t problem_mark; /** The error context. */ const char *context; /** The context position. */ yaml_mark_t context_mark; /** * @} */ /** * @name Reader stuff * @{ */ /** Read handler. */ yaml_read_handler_t *read_handler; /** A pointer for passing to the read handler. */ void *read_handler_data; /** Standard (string or file) input data. */ union { /** String input data. */ struct { /** The string start pointer. */ const unsigned char *start; /** The string end pointer. */ const unsigned char *end; /** The string current position. */ const unsigned char *current; } string; /** File input data. */ FILE *file; } input; /** EOF flag */ int eof; /** The working buffer. */ struct { /** The beginning of the buffer. */ yaml_char_t *start; /** The end of the buffer. */ yaml_char_t *end; /** The current position of the buffer. */ yaml_char_t *pointer; /** The last filled position of the buffer. */ yaml_char_t *last; } buffer; /* The number of unread characters in the buffer. */ size_t unread; /** The raw buffer. */ struct { /** The beginning of the buffer. */ unsigned char *start; /** The end of the buffer. */ unsigned char *end; /** The current position of the buffer. */ unsigned char *pointer; /** The last filled position of the buffer. */ unsigned char *last; } raw_buffer; /** The input encoding. */ yaml_encoding_t encoding; /** The offset of the current position (in bytes). */ size_t offset; /** The mark of the current position. */ yaml_mark_t mark; /** * @} */ /** * @name Scanner stuff * @{ */ /** Have we started to scan the input stream? */ int stream_start_produced; /** Have we reached the end of the input stream? */ int stream_end_produced; /** The number of unclosed '[' and '{' indicators. */ int flow_level; /** The tokens queue. */ struct { /** The beginning of the tokens queue. */ yaml_token_t *start; /** The end of the tokens queue. */ yaml_token_t *end; /** The head of the tokens queue. */ yaml_token_t *head; /** The tail of the tokens queue. */ yaml_token_t *tail; } tokens; /** The number of tokens fetched from the queue. */ size_t tokens_parsed; /* Does the tokens queue contain a token ready for dequeueing. */ int token_available; /** The indentation levels stack. */ struct { /** The beginning of the stack. */ int *start; /** The end of the stack. */ int *end; /** The top of the stack. */ int *top; } indents; /** The current indentation level. */ int indent; /** May a simple key occur at the current position? */ int simple_key_allowed; /** The stack of simple keys. */ struct { /** The beginning of the stack. */ yaml_simple_key_t *start; /** The end of the stack. */ yaml_simple_key_t *end; /** The top of the stack. */ yaml_simple_key_t *top; } simple_keys; /** * @} */ /** * @name Parser stuff * @{ */ /** The parser states stack. */ struct { /** The beginning of the stack. */ yaml_parser_state_t *start; /** The end of the stack. */ yaml_parser_state_t *end; /** The top of the stack. */ yaml_parser_state_t *top; } states; /** The current parser state. */ yaml_parser_state_t state; /** The stack of marks. */ struct { /** The beginning of the stack. */ yaml_mark_t *start; /** The end of the stack. */ yaml_mark_t *end; /** The top of the stack. */ yaml_mark_t *top; } marks; /** The list of TAG directives. */ struct { /** The beginning of the list. */ yaml_tag_directive_t *start; /** The end of the list. */ yaml_tag_directive_t *end; /** The top of the list. */ yaml_tag_directive_t *top; } tag_directives; /** * @} */ /** * @name Dumper stuff * @{ */ /** The alias data. */ struct { /** The beginning of the list. */ yaml_alias_data_t *start; /** The end of the list. */ yaml_alias_data_t *end; /** The top of the list. */ yaml_alias_data_t *top; } aliases; /** The currently parsed document. */ yaml_document_t *document; /** * @} */ } yaml_parser_t; /** * Initialize a parser. * * This function creates a new parser object. An application is responsible * for destroying the object using the yaml_parser_delete() function. * * @param[out] parser An empty parser object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_parser_initialize(yaml_parser_t *parser); /** * Destroy a parser. * * @param[in,out] parser A parser object. */ YAML_DECLARE(void) yaml_parser_delete(yaml_parser_t *parser); /** * Set a string input. * * Note that the @a input pointer must be valid while the @a parser object * exists. The application is responsible for destroing @a input after * destroying the @a parser. * * @param[in,out] parser A parser object. * @param[in] input A source data. * @param[in] size The length of the source data in bytes. */ YAML_DECLARE(void) yaml_parser_set_input_string(yaml_parser_t *parser, const unsigned char *input, size_t size); /** * Set a file input. * * @a file should be a file object open for reading. The application is * responsible for closing the @a file. * * @param[in,out] parser A parser object. * @param[in] file An open file. */ YAML_DECLARE(void) yaml_parser_set_input_file(yaml_parser_t *parser, FILE *file); /** * Set a generic input handler. * * @param[in,out] parser A parser object. * @param[in] handler A read handler. * @param[in] data Any application data for passing to the read * handler. */ YAML_DECLARE(void) yaml_parser_set_input(yaml_parser_t *parser, yaml_read_handler_t *handler, void *data); /** * Set the source encoding. * * @param[in,out] parser A parser object. * @param[in] encoding The source encoding. */ YAML_DECLARE(void) yaml_parser_set_encoding(yaml_parser_t *parser, yaml_encoding_t encoding); /** * Scan the input stream and produce the next token. * * Call the function subsequently to produce a sequence of tokens corresponding * to the input stream. The initial token has the type * @c YAML_STREAM_START_TOKEN while the ending token has the type * @c YAML_STREAM_END_TOKEN. * * An application is responsible for freeing any buffers associated with the * produced token object using the @c yaml_token_delete function. * * An application must not alternate the calls of yaml_parser_scan() with the * calls of yaml_parser_parse() or yaml_parser_load(). Doing this will break * the parser. * * @param[in,out] parser A parser object. * @param[out] token An empty token object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token); /** * Parse the input stream and produce the next parsing event. * * Call the function subsequently to produce a sequence of events corresponding * to the input stream. The initial event has the type * @c YAML_STREAM_START_EVENT while the ending event has the type * @c YAML_STREAM_END_EVENT. * * An application is responsible for freeing any buffers associated with the * produced event object using the yaml_event_delete() function. * * An application must not alternate the calls of yaml_parser_parse() with the * calls of yaml_parser_scan() or yaml_parser_load(). Doing this will break the * parser. * * @param[in,out] parser A parser object. * @param[out] event An empty event object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event); /** * Parse the input stream and produce the next YAML document. * * Call this function subsequently to produce a sequence of documents * constituting the input stream. * * If the produced document has no root node, it means that the document * end has been reached. * * An application is responsible for freeing any data associated with the * produced document object using the yaml_document_delete() function. * * An application must not alternate the calls of yaml_parser_load() with the * calls of yaml_parser_scan() or yaml_parser_parse(). Doing this will break * the parser. * * @param[in,out] parser A parser object. * @param[out] document An empty document object. * * @return @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_parser_load(yaml_parser_t *parser, yaml_document_t *document); /** @} */ /** * @defgroup emitter Emitter Definitions * @{ */ /** * The prototype of a write handler. * * The write handler is called when the emitter needs to flush the accumulated * characters to the output. The handler should write @a size bytes of the * @a buffer to the output. * * @param[in,out] data A pointer to an application data specified by * yaml_emitter_set_output(). * @param[in] buffer The buffer with bytes to be written. * @param[in] size The size of the buffer. * * @returns On success, the handler should return @c 1. If the handler failed, * the returned value should be @c 0. */ typedef int yaml_write_handler_t(void *data, unsigned char *buffer, size_t size); /** The emitter states. */ typedef enum yaml_emitter_state_e { /** Expect STREAM-START. */ YAML_EMIT_STREAM_START_STATE, /** Expect the first DOCUMENT-START or STREAM-END. */ YAML_EMIT_FIRST_DOCUMENT_START_STATE, /** Expect DOCUMENT-START or STREAM-END. */ YAML_EMIT_DOCUMENT_START_STATE, /** Expect the content of a document. */ YAML_EMIT_DOCUMENT_CONTENT_STATE, /** Expect DOCUMENT-END. */ YAML_EMIT_DOCUMENT_END_STATE, /** Expect the first item of a flow sequence. */ YAML_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE, /** Expect an item of a flow sequence. */ YAML_EMIT_FLOW_SEQUENCE_ITEM_STATE, /** Expect the first key of a flow mapping. */ YAML_EMIT_FLOW_MAPPING_FIRST_KEY_STATE, /** Expect a key of a flow mapping. */ YAML_EMIT_FLOW_MAPPING_KEY_STATE, /** Expect a value for a simple key of a flow mapping. */ YAML_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE, /** Expect a value of a flow mapping. */ YAML_EMIT_FLOW_MAPPING_VALUE_STATE, /** Expect the first item of a block sequence. */ YAML_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE, /** Expect an item of a block sequence. */ YAML_EMIT_BLOCK_SEQUENCE_ITEM_STATE, /** Expect the first key of a block mapping. */ YAML_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE, /** Expect the key of a block mapping. */ YAML_EMIT_BLOCK_MAPPING_KEY_STATE, /** Expect a value for a simple key of a block mapping. */ YAML_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE, /** Expect a value of a block mapping. */ YAML_EMIT_BLOCK_MAPPING_VALUE_STATE, /** Expect nothing. */ YAML_EMIT_END_STATE } yaml_emitter_state_t; /** * The emitter structure. * * All members are internal. Manage the structure using the @c yaml_emitter_ * family of functions. */ typedef struct yaml_emitter_s { /** * @name Error handling * @{ */ /** Error type. */ yaml_error_type_t error; /** Error description. */ const char *problem; /** * @} */ /** * @name Writer stuff * @{ */ /** Write handler. */ yaml_write_handler_t *write_handler; /** A pointer for passing to the white handler. */ void *write_handler_data; /** Standard (string or file) output data. */ union { /** String output data. */ struct { /** The buffer pointer. */ unsigned char *buffer; /** The buffer size. */ size_t size; /** The number of written bytes. */ size_t *size_written; } string; /** File output data. */ FILE *file; } output; /** The working buffer. */ struct { /** The beginning of the buffer. */ yaml_char_t *start; /** The end of the buffer. */ yaml_char_t *end; /** The current position of the buffer. */ yaml_char_t *pointer; /** The last filled position of the buffer. */ yaml_char_t *last; } buffer; /** The raw buffer. */ struct { /** The beginning of the buffer. */ unsigned char *start; /** The end of the buffer. */ unsigned char *end; /** The current position of the buffer. */ unsigned char *pointer; /** The last filled position of the buffer. */ unsigned char *last; } raw_buffer; /** The stream encoding. */ yaml_encoding_t encoding; /** * @} */ /** * @name Emitter stuff * @{ */ /** If the output is in the canonical style? */ int canonical; /** The number of indentation spaces. */ int best_indent; /** The preferred width of the output lines. */ int best_width; /** Allow unescaped non-ASCII characters? */ int unicode; /** The preferred line break. */ yaml_break_t line_break; /** The stack of states. */ struct { /** The beginning of the stack. */ yaml_emitter_state_t *start; /** The end of the stack. */ yaml_emitter_state_t *end; /** The top of the stack. */ yaml_emitter_state_t *top; } states; /** The current emitter state. */ yaml_emitter_state_t state; /** The event queue. */ struct { /** The beginning of the event queue. */ yaml_event_t *start; /** The end of the event queue. */ yaml_event_t *end; /** The head of the event queue. */ yaml_event_t *head; /** The tail of the event queue. */ yaml_event_t *tail; } events; /** The stack of indentation levels. */ struct { /** The beginning of the stack. */ int *start; /** The end of the stack. */ int *end; /** The top of the stack. */ int *top; } indents; /** The list of tag directives. */ struct { /** The beginning of the list. */ yaml_tag_directive_t *start; /** The end of the list. */ yaml_tag_directive_t *end; /** The top of the list. */ yaml_tag_directive_t *top; } tag_directives; /** The current indentation level. */ int indent; /** The current flow level. */ int flow_level; /** Is it the document root context? */ int root_context; /** Is it a sequence context? */ int sequence_context; /** Is it a mapping context? */ int mapping_context; /** Is it a simple mapping key context? */ int simple_key_context; /** The current line. */ int line; /** The current column. */ int column; /** If the last character was a whitespace? */ int whitespace; /** If the last character was an indentation character (' ', '-', '?', ':')? */ int indention; /** If an explicit document end is required? */ int open_ended; /** Anchor analysis. */ struct { /** The anchor value. */ yaml_char_t *anchor; /** The anchor length. */ size_t anchor_length; /** Is it an alias? */ int alias; } anchor_data; /** Tag analysis. */ struct { /** The tag handle. */ yaml_char_t *handle; /** The tag handle length. */ size_t handle_length; /** The tag suffix. */ yaml_char_t *suffix; /** The tag suffix length. */ size_t suffix_length; } tag_data; /** Scalar analysis. */ struct { /** The scalar value. */ yaml_char_t *value; /** The scalar length. */ size_t length; /** Does the scalar contain line breaks? */ int multiline; /** Can the scalar be expessed in the flow plain style? */ int flow_plain_allowed; /** Can the scalar be expressed in the block plain style? */ int block_plain_allowed; /** Can the scalar be expressed in the single quoted style? */ int single_quoted_allowed; /** Can the scalar be expressed in the literal or folded styles? */ int block_allowed; /** The output style. */ yaml_scalar_style_t style; } scalar_data; /** * @} */ /** * @name Dumper stuff * @{ */ /** If the stream was already opened? */ int opened; /** If the stream was already closed? */ int closed; /** The information associated with the document nodes. */ struct { /** The number of references. */ int references; /** The anchor id. */ int anchor; /** If the node has been emitted? */ int serialized; } *anchors; /** The last assigned anchor id. */ int last_anchor_id; /** The currently emitted document. */ yaml_document_t *document; /** * @} */ } yaml_emitter_t; /** * Initialize an emitter. * * This function creates a new emitter object. An application is responsible * for destroying the object using the yaml_emitter_delete() function. * * @param[out] emitter An empty parser object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_emitter_initialize(yaml_emitter_t *emitter); /** * Destroy an emitter. * * @param[in,out] emitter An emitter object. */ YAML_DECLARE(void) yaml_emitter_delete(yaml_emitter_t *emitter); /** * Set a string output. * * The emitter will write the output characters to the @a output buffer of the * size @a size. The emitter will set @a size_written to the number of written * bytes. If the buffer is smaller than required, the emitter produces the * YAML_WRITE_ERROR error. * * @param[in,out] emitter An emitter object. * @param[in] output An output buffer. * @param[in] size The buffer size. * @param[in] size_written The pointer to save the number of written * bytes. */ YAML_DECLARE(void) yaml_emitter_set_output_string(yaml_emitter_t *emitter, unsigned char *output, size_t size, size_t *size_written); /** * Set a file output. * * @a file should be a file object open for writing. The application is * responsible for closing the @a file. * * @param[in,out] emitter An emitter object. * @param[in] file An open file. */ YAML_DECLARE(void) yaml_emitter_set_output_file(yaml_emitter_t *emitter, FILE *file); /** * Set a generic output handler. * * @param[in,out] emitter An emitter object. * @param[in] handler A write handler. * @param[in] data Any application data for passing to the write * handler. */ YAML_DECLARE(void) yaml_emitter_set_output(yaml_emitter_t *emitter, yaml_write_handler_t *handler, void *data); /** * Set the output encoding. * * @param[in,out] emitter An emitter object. * @param[in] encoding The output encoding. */ YAML_DECLARE(void) yaml_emitter_set_encoding(yaml_emitter_t *emitter, yaml_encoding_t encoding); /** * Set if the output should be in the "canonical" format as in the YAML * specification. * * @param[in,out] emitter An emitter object. * @param[in] canonical If the output is canonical. */ YAML_DECLARE(void) yaml_emitter_set_canonical(yaml_emitter_t *emitter, int canonical); /** * Set the intendation increment. * * @param[in,out] emitter An emitter object. * @param[in] indent The indentation increment (1 < . < 10). */ YAML_DECLARE(void) yaml_emitter_set_indent(yaml_emitter_t *emitter, int indent); /** * Set the preferred line width. @c -1 means unlimited. * * @param[in,out] emitter An emitter object. * @param[in] width The preferred line width. */ YAML_DECLARE(void) yaml_emitter_set_width(yaml_emitter_t *emitter, int width); /** * Set if unescaped non-ASCII characters are allowed. * * @param[in,out] emitter An emitter object. * @param[in] unicode If unescaped Unicode characters are allowed. */ YAML_DECLARE(void) yaml_emitter_set_unicode(yaml_emitter_t *emitter, int unicode); /** * Set the preferred line break. * * @param[in,out] emitter An emitter object. * @param[in] line_break The preferred line break. */ YAML_DECLARE(void) yaml_emitter_set_break(yaml_emitter_t *emitter, yaml_break_t line_break); /** * Emit an event. * * The event object may be generated using the yaml_parser_parse() function. * The emitter takes the responsibility for the event object and destroys its * content after it is emitted. The event object is destroyed even if the * function fails. * * @param[in,out] emitter An emitter object. * @param[in,out] event An event object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event); /** * Start a YAML stream. * * This function should be used before yaml_emitter_dump() is called. * * @param[in,out] emitter An emitter object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_emitter_open(yaml_emitter_t *emitter); /** * Finish a YAML stream. * * This function should be used after yaml_emitter_dump() is called. * * @param[in,out] emitter An emitter object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_emitter_close(yaml_emitter_t *emitter); /** * Emit a YAML document. * * The documen object may be generated using the yaml_parser_load() function * or the yaml_document_initialize() function. The emitter takes the * responsibility for the document object and destoys its content after * it is emitted. The document object is destroyedeven if the function fails. * * @param[in,out] emitter An emitter object. * @param[in,out] document A document object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_emitter_dump(yaml_emitter_t *emitter, yaml_document_t *document); /** * Flush the accumulated characters to the output. * * @param[in,out] emitter An emitter object. * * @returns @c 1 if the function succeeded, @c 0 on error. */ YAML_DECLARE(int) yaml_emitter_flush(yaml_emitter_t *emitter); /** @} */ #ifdef __cplusplus } #endif #endif /* #ifndef YAML_H */ tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/include/Makefile.am0000644000000000000000000000061413306562377023662 0ustar rootrootINCLUDES = yaml.h DOXYGEN_CFG = $(top_srcdir)/doc/doxygen.cfg nobase_include_HEADERS = $(INCLUDES) if DOXYGEN html: $(INCLUDES) $(DOXYGEN_CFG) PACKAGE=$(PACKAGE) VERSION=$(VERSION) top_srcdir=$(top_srcdir) top_builddir=$(top_builddir) doxygen $(DOXYGEN_CFG) endif maintainer-clean-local: -rm -rf $(top_builddir)/doc/html dist-hook: html cp -a $(top_builddir)/doc/html $(top_distdir)/doc tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/Makefile.am0000644000000000000000000000072513306562377022242 0ustar rootroot## Run `./bootstrap` to generate the "Makefile.in" files in this directory and ## the "$SUBDIRS" subdirectories. SUBDIRS = include src . tests win32 EXTRA_DIST = README LICENSE CMakeLists.txt doc/doxygen.cfg pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = yaml-0.1.pc maintainer-clean-local: -rm -f aclocal.m4 config.h.in configure config/* -find ${builddir} -name Makefile.in -exec rm -f '{}' ';' .PHONY: bootstrap bootstrap: maintainer-clean ./bootstrap tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/tests/0000755000000000000000000000000013306562377021344 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/tests/example-deconstructor.c0000644000000000000000000012560213306562377026045 0ustar rootroot #include #include #include int main(int argc, char *argv[]) { int help = 0; int canonical = 0; int unicode = 0; int k; int done = 0; yaml_parser_t parser; yaml_emitter_t emitter; yaml_event_t input_event; yaml_event_t output_event; /* Clear the objects. */ memset(&parser, 0, sizeof(parser)); memset(&emitter, 0, sizeof(emitter)); memset(&input_event, 0, sizeof(input_event)); memset(&output_event, 0, sizeof(output_event)); /* Analyze command line options. */ for (k = 1; k < argc; k ++) { if (strcmp(argv[k], "-h") == 0 || strcmp(argv[k], "--help") == 0) { help = 1; } else if (strcmp(argv[k], "-c") == 0 || strcmp(argv[k], "--canonical") == 0) { canonical = 1; } else if (strcmp(argv[k], "-u") == 0 || strcmp(argv[k], "--unicode") == 0) { unicode = 1; } else { fprintf(stderr, "Unrecognized option: %s\n" "Try `%s --help` for more information.\n", argv[k], argv[0]); return 1; } } /* Display the help string. */ if (help) { printf("%s major); if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:int", number, -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'minor'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "minor", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write a number. */ sprintf(number, "%d", version->minor); if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:int", number, -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write '}'. */ if (!yaml_mapping_end_event_initialize(&output_event)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* Display the document tag directives. */ if (input_event.data.document_start.tag_directives.start != input_event.data.document_start.tag_directives.end) { yaml_tag_directive_t *tag; /* Write 'tags'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "tags", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Start a block sequence. */ if (!yaml_sequence_start_event_initialize(&output_event, NULL, "tag:yaml.org,2002:seq", 1, YAML_BLOCK_SEQUENCE_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; for (tag = input_event.data.document_start.tag_directives.start; tag != input_event.data.document_start.tag_directives.end; tag ++) { /* Write '{'. */ if (!yaml_mapping_start_event_initialize(&output_event, NULL, "tag:yaml.org,2002:map", 1, YAML_FLOW_MAPPING_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'handle'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "handle", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the tag directive handle. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", tag->handle, -1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'prefix'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "prefix", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the tag directive prefix. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", tag->prefix, -1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write '}'. */ if (!yaml_mapping_end_event_initialize(&output_event)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* End a block sequence. */ if (!yaml_sequence_end_event_initialize(&output_event)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* Write 'implicit'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "implicit", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write if the document is implicit. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:bool", (input_event.data.document_start.implicit ? "true" : "false"), -1, 1, 0, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; break; case YAML_DOCUMENT_END_EVENT: /* Write 'type'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "type", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'DOCUMENT-END'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "DOCUMENT-END", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'implicit'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "implicit", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write if the document is implicit. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:bool", (input_event.data.document_end.implicit ? "true" : "false"), -1, 1, 0, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; break; case YAML_ALIAS_EVENT: /* Write 'type'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "type", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'ALIAS'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "ALIAS", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'anchor'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "anchor", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the alias anchor. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", input_event.data.alias.anchor, -1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; break; case YAML_SCALAR_EVENT: /* Write 'type'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "type", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'SCALAR'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "SCALAR", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Display the scalar anchor. */ if (input_event.data.scalar.anchor) { /* Write 'anchor'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "anchor", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the scalar anchor. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", input_event.data.scalar.anchor, -1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* Display the scalar tag. */ if (input_event.data.scalar.tag) { /* Write 'tag'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "tag", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the scalar tag. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", input_event.data.scalar.tag, -1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* Display the scalar value. */ /* Write 'value'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "value", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the scalar value. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", input_event.data.scalar.value, input_event.data.scalar.length, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Display if the scalar tag is implicit. */ /* Write 'implicit'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "implicit", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write '{'. */ if (!yaml_mapping_start_event_initialize(&output_event, NULL, "tag:yaml.org,2002:map", 1, YAML_FLOW_MAPPING_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'plain'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "plain", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write if the scalar is implicit in the plain style. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:bool", (input_event.data.scalar.plain_implicit ? "true" : "false"), -1, 1, 0, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'quoted'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "non-plain", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write if the scalar is implicit in a non-plain style. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:bool", (input_event.data.scalar.quoted_implicit ? "true" : "false"), -1, 1, 0, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write '}'. */ if (!yaml_mapping_end_event_initialize(&output_event)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Display the style information. */ if (input_event.data.scalar.style) { yaml_scalar_style_t style = input_event.data.scalar.style; /* Write 'style'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "style", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the scalar style. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", (style == YAML_PLAIN_SCALAR_STYLE ? "plain" : style == YAML_SINGLE_QUOTED_SCALAR_STYLE ? "single-quoted" : style == YAML_DOUBLE_QUOTED_SCALAR_STYLE ? "double-quoted" : style == YAML_LITERAL_SCALAR_STYLE ? "literal" : style == YAML_FOLDED_SCALAR_STYLE ? "folded" : "unknown"), -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } break; case YAML_SEQUENCE_START_EVENT: /* Write 'type'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "type", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'SEQUENCE-START'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "SEQUENCE-START", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Display the sequence anchor. */ if (input_event.data.sequence_start.anchor) { /* Write 'anchor'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "anchor", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the sequence anchor. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", input_event.data.sequence_start.anchor, -1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* Display the sequence tag. */ if (input_event.data.sequence_start.tag) { /* Write 'tag'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "tag", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the sequence tag. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", input_event.data.sequence_start.tag, -1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* Write 'implicit'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "implicit", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write if the sequence tag is implicit. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:bool", (input_event.data.sequence_start.implicit ? "true" : "false"), -1, 1, 0, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Display the style information. */ if (input_event.data.sequence_start.style) { yaml_sequence_style_t style = input_event.data.sequence_start.style; /* Write 'style'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "style", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the scalar style. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", (style == YAML_BLOCK_SEQUENCE_STYLE ? "block" : style == YAML_FLOW_SEQUENCE_STYLE ? "flow" : "unknown"), -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } break; case YAML_SEQUENCE_END_EVENT: /* Write 'type'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "type", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'SEQUENCE-END'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "SEQUENCE-END", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; break; case YAML_MAPPING_START_EVENT: /* Write 'type'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "type", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'MAPPING-START'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "MAPPING-START", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Display the mapping anchor. */ if (input_event.data.mapping_start.anchor) { /* Write 'anchor'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "anchor", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the mapping anchor. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", input_event.data.mapping_start.anchor, -1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* Display the mapping tag. */ if (input_event.data.mapping_start.tag) { /* Write 'tag'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "tag", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the mapping tag. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", input_event.data.mapping_start.tag, -1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* Write 'implicit'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "implicit", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write if the mapping tag is implicit. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:bool", (input_event.data.mapping_start.implicit ? "true" : "false"), -1, 1, 0, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Display the style information. */ if (input_event.data.mapping_start.style) { yaml_mapping_style_t style = input_event.data.mapping_start.style; /* Write 'style'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "style", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write the scalar style. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", (style == YAML_BLOCK_MAPPING_STYLE ? "block" : style == YAML_FLOW_MAPPING_STYLE ? "flow" : "unknown"), -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } break; case YAML_MAPPING_END_EVENT: /* Write 'type'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "type", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Write 'MAPPING-END'. */ if (!yaml_scalar_event_initialize(&output_event, NULL, "tag:yaml.org,2002:str", "MAPPING-END", -1, 1, 1, YAML_PLAIN_SCALAR_STYLE)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; break; default: /* It couldn't really happen. */ break; } /* Delete the event object. */ yaml_event_delete(&input_event); /* Create and emit a MAPPING-END event. */ if (!yaml_mapping_end_event_initialize(&output_event)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; } /* Create and emit the SEQUENCE-END event. */ if (!yaml_sequence_end_event_initialize(&output_event)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Create and emit the DOCUMENT-END event. */ if (!yaml_document_end_event_initialize(&output_event, 0)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; /* Create and emit the STREAM-END event. */ if (!yaml_stream_end_event_initialize(&output_event)) goto event_error; if (!yaml_emitter_emit(&emitter, &output_event)) goto emitter_error; yaml_parser_delete(&parser); yaml_emitter_delete(&emitter); return 0; parser_error: /* Display a parser error message. */ switch (parser.error) { case YAML_MEMORY_ERROR: fprintf(stderr, "Memory error: Not enough memory for parsing\n"); break; case YAML_READER_ERROR: if (parser.problem_value != -1) { fprintf(stderr, "Reader error: %s: #%X at %d\n", parser.problem, parser.problem_value, parser.problem_offset); } else { fprintf(stderr, "Reader error: %s at %d\n", parser.problem, parser.problem_offset); } break; case YAML_SCANNER_ERROR: if (parser.context) { fprintf(stderr, "Scanner error: %s at line %d, column %d\n" "%s at line %d, column %d\n", parser.context, parser.context_mark.line+1, parser.context_mark.column+1, parser.problem, parser.problem_mark.line+1, parser.problem_mark.column+1); } else { fprintf(stderr, "Scanner error: %s at line %d, column %d\n", parser.problem, parser.problem_mark.line+1, parser.problem_mark.column+1); } break; case YAML_PARSER_ERROR: if (parser.context) { fprintf(stderr, "Parser error: %s at line %d, column %d\n" "%s at line %d, column %d\n", parser.context, parser.context_mark.line+1, parser.context_mark.column+1, parser.problem, parser.problem_mark.line+1, parser.problem_mark.column+1); } else { fprintf(stderr, "Parser error: %s at line %d, column %d\n", parser.problem, parser.problem_mark.line+1, parser.problem_mark.column+1); } break; default: /* Couldn't happen. */ fprintf(stderr, "Internal error\n"); break; } yaml_event_delete(&input_event); yaml_parser_delete(&parser); yaml_emitter_delete(&emitter); return 1; emitter_error: /* Display an emitter error message. */ switch (emitter.error) { case YAML_MEMORY_ERROR: fprintf(stderr, "Memory error: Not enough memory for emitting\n"); break; case YAML_WRITER_ERROR: fprintf(stderr, "Writer error: %s\n", emitter.problem); break; case YAML_EMITTER_ERROR: fprintf(stderr, "Emitter error: %s\n", emitter.problem); break; default: /* Couldn't happen. */ fprintf(stderr, "Internal error\n"); break; } yaml_event_delete(&input_event); yaml_parser_delete(&parser); yaml_emitter_delete(&emitter); return 1; event_error: fprintf(stderr, "Memory error: Not enough memory for creating an event\n"); yaml_event_delete(&input_event); yaml_parser_delete(&parser); yaml_emitter_delete(&emitter); return 1; } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/tests/run-parser.c0000644000000000000000000000222613306562377023610 0ustar rootroot#include #include #include #ifdef NDEBUG #undef NDEBUG #endif #include int main(int argc, char *argv[]) { int number; if (argc < 2) { printf("Usage: %s file1.yaml ...\n", argv[0]); return 0; } for (number = 1; number < argc; number ++) { FILE *file; yaml_parser_t parser; yaml_event_t event; int done = 0; int count = 0; int error = 0; printf("[%d] Parsing '%s': ", number, argv[number]); fflush(stdout); file = fopen(argv[number], "rb"); assert(file); assert(yaml_parser_initialize(&parser)); yaml_parser_set_input_file(&parser, file); while (!done) { if (!yaml_parser_parse(&parser, &event)) { error = 1; break; } done = (event.type == YAML_STREAM_END_EVENT); yaml_event_delete(&event); count ++; } yaml_parser_delete(&parser); assert(!fclose(file)); printf("%s (%d events)\n", (error ? "FAILURE" : "SUCCESS"), count); } return 0; } tarantool_1.9.1.26.g63eb81e3c/third_party/libyaml/tests/example-deconstructor-alt.c0000644000000000000000000010057613306562377026626 0ustar rootroot #include #include #include int main(int argc, char *argv[]) { int help = 0; int canonical = 0; int unicode = 0; int k; int done = 0; yaml_parser_t parser; yaml_emitter_t emitter; yaml_event_t input_event; yaml_document_t output_document; int root; /* Clear the objects. */ memset(&parser, 0, sizeof(parser)); memset(&emitter, 0, sizeof(emitter)); memset(&input_event, 0, sizeof(input_event)); memset(&output_document, 0, sizeof(output_document)); /* Analyze command line options. */ for (k = 1; k < argc; k ++) { if (strcmp(argv[k], "-h") == 0 || strcmp(argv[k], "--help") == 0) { help = 1; } else if (strcmp(argv[k], "-c") == 0 || strcmp(argv[k], "--canonical") == 0) { canonical = 1; } else if (strcmp(argv[k], "-u") == 0 || strcmp(argv[k], "--unicode") == 0) { unicode = 1; } else { fprintf(stderr, "Unrecognized option: %s\n" "Try `%s --help` for more information.\n", argv[k], argv[0]); return 1; } } /* Display the help string. */ if (help) { printf("%s . */ if (input_event.data.stream_start.encoding) { yaml_encoding_t encoding = input_event.data.stream_start.encoding; key = yaml_document_add_scalar(&output_document, NULL, "encoding", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, (encoding == YAML_UTF8_ENCODING ? "utf-8" : encoding == YAML_UTF16LE_ENCODING ? "utf-16-le" : encoding == YAML_UTF16BE_ENCODING ? "utf-16-be" : "unknown"), -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; } break; case YAML_STREAM_END_EVENT: /* Add 'type': 'STREAM-END'. */ key = yaml_document_add_scalar(&output_document, NULL, "type", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, "STREAM-END", -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; break; case YAML_DOCUMENT_START_EVENT: /* Add 'type': 'DOCUMENT-START'. */ key = yaml_document_add_scalar(&output_document, NULL, "type", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, "DOCUMENT-START", -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; /* Display the output_document version numbers. */ if (input_event.data.document_start.version_directive) { yaml_version_directive_t *version = input_event.data.document_start.version_directive; char number[64]; /* Add 'version': {}. */ key = yaml_document_add_scalar(&output_document, NULL, "version", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; map = yaml_document_add_mapping(&output_document, NULL, YAML_FLOW_MAPPING_STYLE); if (!map) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, map)) goto document_error; /* Add 'major': . */ key = yaml_document_add_scalar(&output_document, NULL, "major", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; sprintf(number, "%d", version->major); value = yaml_document_add_scalar(&output_document, YAML_INT_TAG, number, -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, map, key, value)) goto document_error; /* Add 'minor': . */ key = yaml_document_add_scalar(&output_document, NULL, "minor", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; sprintf(number, "%d", version->minor); value = yaml_document_add_scalar(&output_document, YAML_INT_TAG, number, -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, map, key, value)) goto document_error; } /* Display the output_document tag directives. */ if (input_event.data.document_start.tag_directives.start != input_event.data.document_start.tag_directives.end) { yaml_tag_directive_t *tag; /* Add 'tags': []. */ key = yaml_document_add_scalar(&output_document, NULL, "tags", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; seq = yaml_document_add_sequence(&output_document, NULL, YAML_BLOCK_SEQUENCE_STYLE); if (!seq) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, seq)) goto document_error; for (tag = input_event.data.document_start.tag_directives.start; tag != input_event.data.document_start.tag_directives.end; tag ++) { /* Add {}. */ map = yaml_document_add_mapping(&output_document, NULL, YAML_FLOW_MAPPING_STYLE); if (!map) goto document_error; if (!yaml_document_append_sequence_item(&output_document, seq, map)) goto document_error; /* Add 'handle': . */ key = yaml_document_add_scalar(&output_document, NULL, "handle", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, tag->handle, -1, YAML_DOUBLE_QUOTED_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, map, key, value)) goto document_error; /* Add 'prefix': . */ key = yaml_document_add_scalar(&output_document, NULL, "prefix", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, tag->prefix, -1, YAML_DOUBLE_QUOTED_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, map, key, value)) goto document_error; } } /* Add 'implicit': . */ key = yaml_document_add_scalar(&output_document, NULL, "implicit", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, YAML_BOOL_TAG, (input_event.data.document_start.implicit ? "true" : "false"), -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; break; case YAML_DOCUMENT_END_EVENT: /* Add 'type': 'DOCUMENT-END'. */ key = yaml_document_add_scalar(&output_document, NULL, "type", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, "DOCUMENT-END", -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; /* Add 'implicit': . */ key = yaml_document_add_scalar(&output_document, NULL, "implicit", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, YAML_BOOL_TAG, (input_event.data.document_end.implicit ? "true" : "false"), -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; break; case YAML_ALIAS_EVENT: /* Add 'type': 'ALIAS'. */ key = yaml_document_add_scalar(&output_document, NULL, "type", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, "ALIAS", -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; /* Add 'anchor': . */ key = yaml_document_add_scalar(&output_document, NULL, "anchor", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, input_event.data.alias.anchor, -1, YAML_DOUBLE_QUOTED_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; break; case YAML_SCALAR_EVENT: /* Add 'type': 'SCALAR'. */ key = yaml_document_add_scalar(&output_document, NULL, "type", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, "SCALAR", -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; /* Add 'anchor': . */ if (input_event.data.scalar.anchor) { key = yaml_document_add_scalar(&output_document, NULL, "anchor", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, input_event.data.scalar.anchor, -1, YAML_DOUBLE_QUOTED_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; } /* Add 'tag': . */ if (input_event.data.scalar.tag) { key = yaml_document_add_scalar(&output_document, NULL, "tag", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, input_event.data.scalar.tag, -1, YAML_DOUBLE_QUOTED_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; } /* Add 'value': . */ key = yaml_document_add_scalar(&output_document, NULL, "value", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, NULL, input_event.data.scalar.value, input_event.data.scalar.length, YAML_DOUBLE_QUOTED_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, value)) goto document_error; /* Display if the scalar tag is implicit. */ /* Add 'implicit': {} */ key = yaml_document_add_scalar(&output_document, NULL, "version", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; map = yaml_document_add_mapping(&output_document, NULL, YAML_FLOW_MAPPING_STYLE); if (!map) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, properties, key, map)) goto document_error; /* Add 'plain': . */ key = yaml_document_add_scalar(&output_document, NULL, "plain", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, YAML_BOOL_TAG, (input_event.data.scalar.plain_implicit ? "true" : "false"), -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, map, key, value)) goto document_error; /* Add 'quoted': . */ key = yaml_document_add_scalar(&output_document, NULL, "quoted", -1, YAML_PLAIN_SCALAR_STYLE); if (!key) goto document_error; value = yaml_document_add_scalar(&output_document, YAML_BOOL_TAG, (input_event.data.scalar.quoted_implicit ? "true" : "false"), -1, YAML_PLAIN_SCALAR_STYLE); if (!value) goto document_error; if (!yaml_document_append_mapping_pair(&output_document, map, key, value)) goto document_error; /* Display the style information. */ if (input_event.data.scalar.style) { yaml_scalar_style_t style = input_event.data.scalar.style; /* Add 'style':

This page describes the detailed semantics underlying the FFI library and its interaction with both Lua and C code.

Given that the FFI library is designed to interface with C code and that declarations can be written in plain C syntax, it closely follows the C language semantics, wherever possible. Some minor concessions are needed for smoother interoperation with Lua language semantics.

Please don't be overwhelmed by the contents of this page — this is a reference and you may need to consult it, if in doubt. It doesn't hurt to skim this page, but most of the semantics "just work" as you'd expect them to work. It should be straightforward to write applications using the LuaJIT FFI for developers with a C or C++ background.

C Language Support

The FFI library has a built-in C parser with a minimal memory footprint. It's used by the ffi.* library functions to declare C types or external symbols.

It's only purpose is to parse C declarations, as found e.g. in C header files. Although it does evaluate constant expressions, it's not a C compiler. The body of inline C function definitions is simply ignored.

Also, this is not a validating C parser. It expects and accepts correctly formed C declarations, but it may choose to ignore bad declarations or show rather generic error messages. If in doubt, please check the input against your favorite C compiler.

The C parser complies to the C99 language standard plus the following extensions:

  • The '\e' escape in character and string literals.
  • The C99/C++ boolean type, declared with the keywords bool or _Bool.
  • Complex numbers, declared with the keywords complex or _Complex.
  • Two complex number types: complex (aka complex double) and complex float.
  • Vector types, declared with the GCC mode or vector_size attribute.
  • Unnamed ('transparent') struct/union fields inside a struct/union.
  • Incomplete enum declarations, handled like incomplete struct declarations.
  • Unnamed enum fields inside a struct/union. This is similar to a scoped C++ enum, except that declared constants are visible in the global namespace, too.
  • Scoped static const declarations inside a struct/union (from C++).
  • Zero-length arrays ([0]), empty struct/union, variable-length arrays (VLA, [?]) and variable-length structs (VLS, with a trailing VLA).
  • C++ reference types (int &x).
  • Alternate GCC keywords with '__', e.g. __const__.
  • GCC __attribute__ with the following attributes: aligned, packed, mode, vector_size, cdecl, fastcall, stdcall, thiscall.
  • The GCC __extension__ keyword and the GCC __alignof__ operator.
  • GCC __asm__("symname") symbol name redirection for function declarations.
  • MSVC keywords for fixed-length types: __int8, __int16, __int32 and __int64.
  • MSVC __cdecl, __fastcall, __stdcall, __thiscall, __ptr32, __ptr64, __declspec(align(n)) and #pragma pack.
  • All other GCC/MSVC-specific attributes are ignored.

The following C types are pre-defined by the C parser (like a typedef, except re-declarations will be ignored):

  • Vararg handling: va_list, __builtin_va_list, __gnuc_va_list.
  • From <stddef.h>: ptrdiff_t, size_t, wchar_t.
  • From <stdint.h>: int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, intptr_t, uintptr_t.
  • From <unistd.h> (POSIX): ssize_t.

You're encouraged to use these types in preference to compiler-specific extensions or target-dependent standard types. E.g. char differs in signedness and long differs in size, depending on the target architecture and platform ABI.

The following C features are not supported:

  • A declaration must always have a type specifier; it doesn't default to an int type.
  • Old-style empty function declarations (K&R) are not allowed. All C functions must have a proper prototype declaration. A function declared without parameters (int foo();) is treated as a function taking zero arguments, like in C++.
  • The long double C type is parsed correctly, but there's no support for the related conversions, accesses or arithmetic operations.
  • Wide character strings and character literals are not supported.
  • See below for features that are currently not implemented.

C Type Conversion Rules

Conversions from C types to Lua objects

These conversion rules apply for read accesses to C types: indexing pointers, arrays or struct/union types; reading external variables or constant values; retrieving return values from C calls:

Input Conversion Output
int8_t, int16_tsign-ext int32_tdoublenumber
uint8_t, uint16_tzero-ext int32_tdoublenumber
int32_t, uint32_tdoublenumber
int64_t, uint64_tboxed value64 bit int cdata
double, floatdoublenumber
bool0 → false, otherwise trueboolean
enumboxed valueenum cdata
Complex numberboxed valuecomplex cdata
Vectorboxed valuevector cdata
Pointerboxed valuepointer cdata
Arrayboxed referencereference cdata
struct/unionboxed referencereference cdata

Bitfields are treated like their underlying type.

Reference types are dereferenced before a conversion can take place — the conversion is applied to the C type pointed to by the reference.

Conversions from Lua objects to C types

These conversion rules apply for write accesses to C types: indexing pointers, arrays or struct/union types; initializing cdata objects; casts to C types; writing to external variables; passing arguments to C calls:

Input Conversion Output
numberdouble
booleanfalse → 0, true → 1bool
nilNULL(void *)
lightuserdatalightuserdata address →(void *)
userdatauserdata payload →(void *)
io.* fileget FILE * handle →(void *)
stringmatch against enum constantenum
stringcopy string data + zero-byteint8_t[], uint8_t[]
stringstring data →const char[]
functioncreate callbackC function type
tabletable initializerArray
tabletable initializerstruct/union
cdatacdata payload →C type

If the result type of this conversion doesn't match the C type of the destination, the conversion rules between C types are applied.

Reference types are immutable after initialization ("no re-seating of references"). For initialization purposes or when passing values to reference parameters, they are treated like pointers. Note that unlike in C++, there's no way to implement automatic reference generation of variables under the Lua language semantics. If you want to call a function with a reference parameter, you need to explicitly pass a one-element array.

Conversions between C types

These conversion rules are more or less the same as the standard C conversion rules. Some rules only apply to casts, or require pointer or type compatibility:

Input Conversion Output
Signed integernarrow or sign-extendInteger
Unsigned integernarrow or zero-extendInteger
Integerrounddouble, float
double, floattrunc int32_tnarrow(u)int8_t, (u)int16_t
double, floattrunc(u)int32_t, (u)int64_t
double, floatroundfloat, double
Numbern == 0 → 0, otherwise 1bool
boolfalse → 0, true → 1Number
Complex numberconvert real partNumber
Numberconvert real part, imag = 0Complex number
Complex numberconvert real and imag partComplex number
Numberconvert scalar and replicateVector
Vectorcopy (same size)Vector
struct/uniontake base address (compat)Pointer
Arraytake base address (compat)Pointer
Functiontake function addressFunction pointer
Numberconvert via uintptr_t (cast)Pointer
Pointerconvert address (compat/cast)Pointer
Pointerconvert address (cast)Integer
Arrayconvert base address (cast)Integer
Arraycopy (compat)Array
struct/unioncopy (identical type)struct/union

Bitfields or enum types are treated like their underlying type.

Conversions not listed above will raise an error. E.g. it's not possible to convert a pointer to a complex number or vice versa.

Conversions for vararg C function arguments

The following default conversion rules apply when passing Lua objects to the variable argument part of vararg C functions:

Input Conversion Output
numberdouble
booleanfalse → 0, true → 1bool
nilNULL(void *)
userdatauserdata payload →(void *)
lightuserdatalightuserdata address →(void *)
stringstring data →const char *
float cdatadouble
Array cdatatake base addressElement pointer
struct/union cdatatake base addressstruct/union pointer
Function cdatatake function addressFunction pointer
Any other cdatano conversionC type

To pass a Lua object, other than a cdata object, as a specific type, you need to override the conversion rules: create a temporary cdata object with a constructor or a cast and initialize it with the value to pass:

Assuming x is a Lua number, here's how to pass it as an integer to a vararg function:

ffi.cdef[[
int printf(const char *fmt, ...);
]]
ffi.C.printf("integer value: %d\n", ffi.new("int", x))

If you don't do this, the default Lua number → double conversion rule applies. A vararg C function expecting an integer will see a garbled or uninitialized value.

Initializers

Creating a cdata object with ffi.new() or the equivalent constructor syntax always initializes its contents, too. Different rules apply, depending on the number of optional initializers and the C types involved:

  • If no initializers are given, the object is filled with zero bytes.
  • Scalar types (numbers and pointers) accept a single initializer. The Lua object is converted to the scalar C type.
  • Valarrays (complex numbers and vectors) are treated like scalars when a single initializer is given. Otherwise they are treated like regular arrays.
  • Aggregate types (arrays and structs) accept either a single cdata initializer of the same type (copy constructor), a single table initializer, or a flat list of initializers.
  • The elements of an array are initialized, starting at index zero. If a single initializer is given for an array, it's repeated for all remaining elements. This doesn't happen if two or more initializers are given: all remaining uninitialized elements are filled with zero bytes.
  • Byte arrays may also be initialized with a Lua string. This copies the whole string plus a terminating zero-byte. The copy stops early only if the array has a known, fixed size.
  • The fields of a struct are initialized in the order of their declaration. Uninitialized fields are filled with zero bytes.
  • Only the first field of a union can be initialized with a flat initializer.
  • Elements or fields which are aggregates themselves are initialized with a single initializer, but this may be a table initializer or a compatible aggregate.
  • Excess initializers cause an error.

Table Initializers

The following rules apply if a Lua table is used to initialize an Array or a struct/union:

  • If the table index [0] is non-nil, then the table is assumed to be zero-based. Otherwise it's assumed to be one-based.
  • Array elements, starting at index zero, are initialized one-by-one with the consecutive table elements, starting at either index [0] or [1]. This process stops at the first nil table element.
  • If exactly one array element was initialized, it's repeated for all the remaining elements. Otherwise all remaining uninitialized elements are filled with zero bytes.
  • The above logic only applies to arrays with a known fixed size. A VLA is only initialized with the element(s) given in the table. Depending on the use case, you may need to explicitly add a NULL or 0 terminator to a VLA.
  • A struct/union can be initialized in the order of the declaration of its fields. Each field is initialized with consecutive table elements, starting at either index [0] or [1]. This process stops at the first nil table element.
  • Otherwise, if neither index [0] nor [1] is present, a struct/union is initialized by looking up each field name (as a string key) in the table. Each non-nil value is used to initialize the corresponding field.
  • Uninitialized fields of a struct are filled with zero bytes, except for the trailing VLA of a VLS.
  • Initialization of a union stops after one field has been initialized. If no field has been initialized, the union is filled with zero bytes.
  • Elements or fields which are aggregates themselves are initialized with a single initializer, but this may be a nested table initializer (or a compatible aggregate).
  • Excess initializers for an array cause an error. Excess initializers for a struct/union are ignored. Unrelated table entries are ignored, too.

Example:

local ffi = require("ffi")

ffi.cdef[[
struct foo { int a, b; };
union bar { int i; double d; };
struct nested { int x; struct foo y; };
]]

ffi.new("int[3]", {})            --> 0, 0, 0
ffi.new("int[3]", {1})           --> 1, 1, 1
ffi.new("int[3]", {1,2})         --> 1, 2, 0
ffi.new("int[3]", {1,2,3})       --> 1, 2, 3
ffi.new("int[3]", {[0]=1})       --> 1, 1, 1
ffi.new("int[3]", {[0]=1,2})     --> 1, 2, 0
ffi.new("int[3]", {[0]=1,2,3})   --> 1, 2, 3
ffi.new("int[3]", {[0]=1,2,3,4}) --> error: too many initializers

ffi.new("struct foo", {})            --> a = 0, b = 0
ffi.new("struct foo", {1})           --> a = 1, b = 0
ffi.new("struct foo", {1,2})         --> a = 1, b = 2
ffi.new("struct foo", {[0]=1,2})     --> a = 1, b = 2
ffi.new("struct foo", {b=2})         --> a = 0, b = 2
ffi.new("struct foo", {a=1,b=2,c=3}) --> a = 1, b = 2  'c' is ignored

ffi.new("union bar", {})        --> i = 0, d = 0.0
ffi.new("union bar", {1})       --> i = 1, d = ?
ffi.new("union bar", {[0]=1,2}) --> i = 1, d = ?    '2' is ignored
ffi.new("union bar", {d=2})     --> i = ?, d = 2.0

ffi.new("struct nested", {1,{2,3}})     --> x = 1, y.a = 2, y.b = 3
ffi.new("struct nested", {x=1,y={2,3}}) --> x = 1, y.a = 2, y.b = 3

Operations on cdata Objects

All of the standard Lua operators can be applied to cdata objects or a mix of a cdata object and another Lua object. The following list shows the pre-defined operations.

Reference types are dereferenced before performing each of the operations below — the operation is applied to the C type pointed to by the reference.

The pre-defined operations are always tried first before deferring to a metamethod or index table (if any) for the corresponding ctype (except for __new). An error is raised if the metamethod lookup or index table lookup fails.

Indexing a cdata object

  • Indexing a pointer/array: a cdata pointer/array can be indexed by a cdata number or a Lua number. The element address is computed as the base address plus the number value multiplied by the element size in bytes. A read access loads the element value and converts it to a Lua object. A write access converts a Lua object to the element type and stores the converted value to the element. An error is raised if the element size is undefined or a write access to a constant element is attempted.
  • Dereferencing a struct/union field: a cdata struct/union or a pointer to a struct/union can be dereferenced by a string key, giving the field name. The field address is computed as the base address plus the relative offset of the field. A read access loads the field value and converts it to a Lua object. A write access converts a Lua object to the field type and stores the converted value to the field. An error is raised if a write access to a constant struct/union or a constant field is attempted. Scoped enum constants or static constants are treated like a constant field.
  • Indexing a complex number: a complex number can be indexed either by a cdata number or a Lua number with the values 0 or 1, or by the strings "re" or "im". A read access loads the real part ([0], .re) or the imaginary part ([1], .im) part of a complex number and converts it to a Lua number. The sub-parts of a complex number are immutable — assigning to an index of a complex number raises an error. Accessing out-of-bound indexes returns unspecified results, but is guaranteed not to trigger memory access violations.
  • Indexing a vector: a vector is treated like an array for indexing purposes, except the vector elements are immutable — assigning to an index of a vector raises an error.

A ctype object can be indexed with a string key, too. The only pre-defined operation is reading scoped constants of struct/union types. All other accesses defer to the corresponding metamethods or index tables (if any).

Note: since there's (deliberately) no address-of operator, a cdata object holding a value type is effectively immutable after initialization. The JIT compiler benefits from this fact when applying certain optimizations.

As a consequence, the elements of complex numbers and vectors are immutable. But the elements of an aggregate holding these types may be modified of course. I.e. you cannot assign to foo.c.im, but you can assign a (newly created) complex number to foo.c.

The JIT compiler implements strict aliasing rules: accesses to different types do not alias, except for differences in signedness (this applies even to char pointers, unlike C99). Type punning through unions is explicitly detected and allowed.

Calling a cdata object

  • Constructor: a ctype object can be called and used as a constructor. This is equivalent to ffi.new(ct, ...), unless a __new metamethod is defined. The __new metamethod is called with the ctype object plus any other arguments passed to the contructor. Note that you have to use ffi.new inside of it, since calling ct(...) would cause infinite recursion.
  • C function call: a cdata function or cdata function pointer can be called. The passed arguments are converted to the C types of the parameters given by the function declaration. Arguments passed to the variable argument part of vararg C function use special conversion rules. This C function is called and the return value (if any) is converted to a Lua object.
    On Windows/x86 systems, __stdcall functions are automatically detected and a function declared as __cdecl (the default) is silently fixed up after the first call.

Arithmetic on cdata objects

  • Pointer arithmetic: a cdata pointer/array and a cdata number or a Lua number can be added or subtracted. The number must be on the right hand side for a subtraction. The result is a pointer of the same type with an address plus or minus the number value multiplied by the element size in bytes. An error is raised if the element size is undefined.
  • Pointer difference: two compatible cdata pointers/arrays can be subtracted. The result is the difference between their addresses, divided by the element size in bytes. An error is raised if the element size is undefined or zero.
  • 64 bit integer arithmetic: the standard arithmetic operators (+ - * / % ^ and unary minus) can be applied to two cdata numbers, or a cdata number and a Lua number. If one of them is an uint64_t, the other side is converted to an uint64_t and an unsigned arithmetic operation is performed. Otherwise both sides are converted to an int64_t and a signed arithmetic operation is performed. The result is a boxed 64 bit cdata object.
    If one of the operands is an enum and the other operand is a string, the string is converted to the value of a matching enum constant before the above conversion.
    These rules ensure that 64 bit integers are "sticky". Any expression involving at least one 64 bit integer operand results in another one. The undefined cases for the division, modulo and power operators return 2LL ^ 63 or 2ULL ^ 63.
    You'll have to explicitly convert a 64 bit integer to a Lua number (e.g. for regular floating-point calculations) with tonumber(). But note this may incur a precision loss.
  • 64 bit bitwise operations: the rules for 64 bit arithmetic operators apply analogously.
    Unlike the other bit.* operations, bit.tobit() converts a cdata number via int64_t to int32_t and returns a Lua number.
    For bit.band(), bit.bor() and bit.bxor(), the conversion to int64_t or uint64_t applies to all arguments, if any argument is a cdata number.
    For all other operations, only the first argument is used to determine the output type. This implies that a cdata number as a shift count for shifts and rotates is accepted, but that alone does not cause a cdata number output.

Comparisons of cdata objects

  • Pointer comparison: two compatible cdata pointers/arrays can be compared. The result is the same as an unsigned comparison of their addresses. nil is treated like a NULL pointer, which is compatible with any other pointer type.
  • 64 bit integer comparison: two cdata numbers, or a cdata number and a Lua number can be compared with each other. If one of them is an uint64_t, the other side is converted to an uint64_t and an unsigned comparison is performed. Otherwise both sides are converted to an int64_t and a signed comparison is performed.
    If one of the operands is an enum and the other operand is a string, the string is converted to the value of a matching enum constant before the above conversion.
  • Comparisons for equality/inequality never raise an error. Even incompatible pointers can be compared for equality by address. Any other incompatible comparison (also with non-cdata objects) treats the two sides as unequal.

cdata objects as table keys

Lua tables may be indexed by cdata objects, but this doesn't provide any useful semantics — cdata objects are unsuitable as table keys!

A cdata object is treated like any other garbage-collected object and is hashed and compared by its address for table indexing. Since there's no interning for cdata value types, the same value may be boxed in different cdata objects with different addresses. Thus t[1LL+1LL] and t[2LL] usually do not point to the same hash slot and they certainly do not point to the same hash slot as t[2].

It would seriously drive up implementation complexity and slow down the common case, if one were to add extra handling for by-value hashing and comparisons to Lua tables. Given the ubiquity of their use inside the VM, this is not acceptable.

There are three viable alternatives, if you really need to use cdata objects as keys:

  • If you can get by with the precision of Lua numbers (52 bits), then use tonumber() on a cdata number or combine multiple fields of a cdata aggregate to a Lua number. Then use the resulting Lua number as a key when indexing tables.
    One obvious benefit: t[tonumber(2LL)] does point to the same slot as t[2].
  • Otherwise use either tostring() on 64 bit integers or complex numbers or combine multiple fields of a cdata aggregate to a Lua string (e.g. with ffi.string()). Then use the resulting Lua string as a key when indexing tables.
  • Create your own specialized hash table implementation using the C types provided by the FFI library, just like you would in C code. Ultimately this may give much better performance than the other alternatives or what a generic by-value hash table could possibly provide.

Parameterized Types

To facilitate some abstractions, the two functions ffi.typeof and ffi.cdef support parameterized types in C declarations. Note: none of the other API functions taking a cdecl allow this.

Any place you can write a typedef name, an identifier or a number in a declaration, you can write $ (the dollar sign) instead. These placeholders are replaced in order of appearance with the arguments following the cdecl string:

-- Declare a struct with a parameterized field type and name:
ffi.cdef([[
typedef struct { $ $; } foo_t;
]], type1, name1)

-- Anonymous struct with dynamic names:
local bar_t = ffi.typeof("struct { int $, $; }", name1, name2)
-- Derived pointer type:
local bar_ptr_t = ffi.typeof("$ *", bar_t)

-- Parameterized dimensions work even where a VLA won't work:
local matrix_t = ffi.typeof("uint8_t[$][$]", width, height)

Caveat: this is not simple text substitution! A passed ctype or cdata object is treated like the underlying type, a passed string is considered an identifier and a number is considered a number. You must not mix this up: e.g. passing "int" as a string doesn't work in place of a type, you'd need to use ffi.typeof("int") instead.

The main use for parameterized types are libraries implementing abstract data types (» example), similar to what can be achieved with C++ template metaprogramming. Another use case are derived types of anonymous structs, which avoids pollution of the global struct namespace.

Please note that parameterized types are a nice tool and indispensable for certain use cases. But you'll want to use them sparingly in regular code, e.g. when all types are actually fixed.

Garbage Collection of cdata Objects

All explicitly (ffi.new(), ffi.cast() etc.) or implicitly (accessors) created cdata objects are garbage collected. You need to ensure to retain valid references to cdata objects somewhere on a Lua stack, an upvalue or in a Lua table while they are still in use. Once the last reference to a cdata object is gone, the garbage collector will automatically free the memory used by it (at the end of the next GC cycle).

Please note that pointers themselves are cdata objects, however they are not followed by the garbage collector. So e.g. if you assign a cdata array to a pointer, you must keep the cdata object holding the array alive as long as the pointer is still in use:

ffi.cdef[[
typedef struct { int *a; } foo_t;
]]

local s = ffi.new("foo_t", ffi.new("int[10]")) -- WRONG!

local a = ffi.new("int[10]") -- OK
local s = ffi.new("foo_t", a)
-- Now do something with 's', but keep 'a' alive until you're done.

Similar rules apply for Lua strings which are implicitly converted to "const char *": the string object itself must be referenced somewhere or it'll be garbage collected eventually. The pointer will then point to stale data, which may have already been overwritten. Note that string literals are automatically kept alive as long as the function containing it (actually its prototype) is not garbage collected.

Objects which are passed as an argument to an external C function are kept alive until the call returns. So it's generally safe to create temporary cdata objects in argument lists. This is a common idiom for passing specific C types to vararg functions.

Memory areas returned by C functions (e.g. from malloc()) must be manually managed, of course (or use ffi.gc()). Pointers to cdata objects are indistinguishable from pointers returned by C functions (which is one of the reasons why the GC cannot follow them).

Callbacks

The LuaJIT FFI automatically generates special callback functions whenever a Lua function is converted to a C function pointer. This associates the generated callback function pointer with the C type of the function pointer and the Lua function object (closure).

This can happen implicitly due to the usual conversions, e.g. when passing a Lua function to a function pointer argument. Or you can use ffi.cast() to explicitly cast a Lua function to a C function pointer.

Currently only certain C function types can be used as callback functions. Neither C vararg functions nor functions with pass-by-value aggregate argument or result types are supported. There are no restrictions for the kind of Lua functions that can be called from the callback — no checks for the proper number of arguments are made. The return value of the Lua function will be converted to the result type and an error will be thrown for invalid conversions.

It's allowed to throw errors across a callback invocation, but it's not advisable in general. Do this only if you know the C function, that called the callback, copes with the forced stack unwinding and doesn't leak resources.

One thing that's not allowed, is to let an FFI call into a C function get JIT-compiled, which in turn calls a callback, calling into Lua again. Usually this attempt is caught by the interpreter first and the C function is blacklisted for compilation.

However, this heuristic may fail under specific circumstances: e.g. a message polling function might not run Lua callbacks right away and the call gets JIT-compiled. If it later happens to call back into Lua (e.g. a rarely invoked error callback), you'll get a VM PANIC with the message "bad callback". Then you'll need to manually turn off JIT-compilation with jit.off() for the surrounding Lua function that invokes such a message polling function (or similar).

Callback resource handling

Callbacks take up resources — you can only have a limited number of them at the same time (500 - 1000, depending on the architecture). The associated Lua functions are anchored to prevent garbage collection, too.

Callbacks due to implicit conversions are permanent! There is no way to guess their lifetime, since the C side might store the function pointer for later use (typical for GUI toolkits). The associated resources cannot be reclaimed until termination:

ffi.cdef[[
typedef int (__stdcall *WNDENUMPROC)(void *hwnd, intptr_t l);
int EnumWindows(WNDENUMPROC func, intptr_t l);
]]

-- Implicit conversion to a callback via function pointer argument.
local count = 0
ffi.C.EnumWindows(function(hwnd, l)
  count = count + 1
  return true
end, 0)
-- The callback is permanent and its resources cannot be reclaimed!
-- Ok, so this may not be a problem, if you do this only once.

Note: this example shows that you must properly declare __stdcall callbacks on Windows/x86 systems. The calling convention cannot be automatically detected, unlike for __stdcall calls to Windows functions.

For some use cases it's necessary to free up the resources or to dynamically redirect callbacks. Use an explicit cast to a C function pointer and keep the resulting cdata object. Then use the cb:free() or cb:set() methods on the cdata object:

-- Explicitly convert to a callback via cast.
local count = 0
local cb = ffi.cast("WNDENUMPROC", function(hwnd, l)
  count = count + 1
  return true
end)

-- Pass it to a C function.
ffi.C.EnumWindows(cb, 0)
-- EnumWindows doesn't need the callback after it returns, so free it.

cb:free()
-- The callback function pointer is no longer valid and its resources
-- will be reclaimed. The created Lua closure will be garbage collected.

Callback performance

Callbacks are slow! First, the C to Lua transition itself has an unavoidable cost, similar to a lua_call() or lua_pcall(). Argument and result marshalling add to that cost. And finally, neither the C compiler nor LuaJIT can inline or optimize across the language barrier and hoist repeated computations out of a callback function.

Do not use callbacks for performance-sensitive work: e.g. consider a numerical integration routine which takes a user-defined function to integrate over. It's a bad idea to call a user-defined Lua function from C code millions of times. The callback overhead will be absolutely detrimental for performance.

It's considerably faster to write the numerical integration routine itself in Lua — the JIT compiler will be able to inline the user-defined function and optimize it together with its calling context, with very competitive performance.

As a general guideline: use callbacks only when you must, because of existing C APIs. E.g. callback performance is irrelevant for a GUI application, which waits for user input most of the time, anyway.

For new designs avoid push-style APIs: a C function repeatedly calling a callback for each result. Instead use pull-style APIs: call a C function repeatedly to get a new result. Calls from Lua to C via the FFI are much faster than the other way round. Most well-designed libraries already use pull-style APIs (read/write, get/put).

C Library Namespaces

A C library namespace is a special kind of object which allows access to the symbols contained in shared libraries or the default symbol namespace. The default ffi.C namespace is automatically created when the FFI library is loaded. C library namespaces for specific shared libraries may be created with the ffi.load() API function.

Indexing a C library namespace object with a symbol name (a Lua string) automatically binds it to the library. First the symbol type is resolved — it must have been declared with ffi.cdef. Then the symbol address is resolved by searching for the symbol name in the associated shared libraries or the default symbol namespace. Finally, the resulting binding between the symbol name, the symbol type and its address is cached. Missing symbol declarations or nonexistent symbol names cause an error.

This is what happens on a read access for the different kinds of symbols:

  • External functions: a cdata object with the type of the function and its address is returned.
  • External variables: the symbol address is dereferenced and the loaded value is converted to a Lua object and returned.
  • Constant values (static const or enum constants): the constant is converted to a Lua object and returned.

This is what happens on a write access:

  • External variables: the value to be written is converted to the C type of the variable and then stored at the symbol address.
  • Writing to constant variables or to any other symbol type causes an error, like any other attempted write to a constant location.

C library namespaces themselves are garbage collected objects. If the last reference to the namespace object is gone, the garbage collector will eventually release the shared library reference and remove all memory associated with the namespace. Since this may trigger the removal of the shared library from the memory of the running process, it's generally not safe to use function cdata objects obtained from a library if the namespace object may be unreferenced.

Performance notice: the JIT compiler specializes to the identity of namespace objects and to the strings used to index it. This effectively turns function cdata objects into constants. It's not useful and actually counter-productive to explicitly cache these function objects, e.g. local strlen = ffi.C.strlen. OTOH it is useful to cache the namespace itself, e.g. local C = ffi.C.

No Hand-holding!

The FFI library has been designed as a low-level library. The goal is to interface with C code and C data types with a minimum of overhead. This means you can do anything you can do from C: access all memory, overwrite anything in memory, call machine code at any memory address and so on.

The FFI library provides no memory safety, unlike regular Lua code. It will happily allow you to dereference a NULL pointer, to access arrays out of bounds or to misdeclare C functions. If you make a mistake, your application might crash, just like equivalent C code would.

This behavior is inevitable, since the goal is to provide full interoperability with C code. Adding extra safety measures, like bounds checks, would be futile. There's no way to detect misdeclarations of C functions, since shared libraries only provide symbol names, but no type information. Likewise there's no way to infer the valid range of indexes for a returned pointer.

Again: the FFI library is a low-level library. This implies it needs to be used with care, but it's flexibility and performance often outweigh this concern. If you're a C or C++ developer, it'll be easy to apply your existing knowledge. OTOH writing code for the FFI library is not for the faint of heart and probably shouldn't be the first exercise for someone with little experience in Lua, C or C++.

As a corollary of the above, the FFI library is not safe for use by untrusted Lua code. If you're sandboxing untrusted Lua code, you definitely don't want to give this code access to the FFI library or to any cdata object (except 64 bit integers or complex numbers). Any properly engineered Lua sandbox needs to provide safety wrappers for many of the standard Lua library functions — similar wrappers need to be written for high-level operations on FFI data types, too.

Current Status

The initial release of the FFI library has some limitations and is missing some features. Most of these will be fixed in future releases.

C language support is currently incomplete:

  • C declarations are not passed through a C pre-processor, yet.
  • The C parser is able to evaluate most constant expressions commonly found in C header files. However it doesn't handle the full range of C expression semantics and may fail for some obscure constructs.
  • static const declarations only work for integer types up to 32 bits. Neither declaring string constants nor floating-point constants is supported.
  • Packed struct bitfields that cross container boundaries are not implemented.
  • Native vector types may be defined with the GCC mode or vector_size attribute. But no operations other than loading, storing and initializing them are supported, yet.
  • The volatile type qualifier is currently ignored by compiled code.
  • ffi.cdef silently ignores most re-declarations. Note: avoid re-declarations which do not conform to C99. The implementation will eventually be changed to perform strict checks.

The JIT compiler already handles a large subset of all FFI operations. It automatically falls back to the interpreter for unimplemented operations (you can check for this with the -jv command line option). The following operations are currently not compiled and may exhibit suboptimal performance, especially when used in inner loops:

  • Vector operations.
  • Table initializers.
  • Initialization of nested struct/union types.
  • Non-default initialization of VLA/VLS or large C types (> 128 bytes or > 16 array elements.
  • Bitfield initializations.
  • Pointer differences for element sizes that are not a power of two.
  • Calls to C functions with aggregates passed or returned by value.
  • Calls to ctype metamethods which are not plain functions.
  • ctype __newindex tables and non-string lookups in ctype __index tables.
  • tostring() for cdata types.
  • Calls to ffi.cdef(), ffi.load() and ffi.metatype().

Other missing features:

  • Arithmetic for complex numbers.
  • Passing structs by value to vararg C functions.
  • C++ exception interoperability does not extend to C functions called via the FFI, if the call is compiled.

tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/ext_c_api.html0000644000000000000000000001363213306562377023434 0ustar rootroot Lua/C API Extensions

LuaJIT adds some extensions to the standard Lua/C API. The LuaJIT include directory must be in the compiler search path (-Ipath) to be able to include the required header for C code:

#include "luajit.h"

Or for C++ code:

#include "lua.hpp"

luaJIT_setmode(L, idx, mode) — Control VM

This is a C API extension to allow control of the VM from C code. The full prototype of LuaJIT_setmode is:

LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);

The returned status is either success (1) or failure (0). The second argument is either 0 or a stack index (similar to the other Lua/C API functions).

The third argument specifies the mode, which is 'or'ed with a flag. The flag can be LUAJIT_MODE_OFF to turn a feature on, LUAJIT_MODE_ON to turn a feature off, or LUAJIT_MODE_FLUSH to flush cached code.

The following modes are defined:

luaJIT_setmode(L, 0, LUAJIT_MODE_ENGINE|flag)

Turn the whole JIT compiler on or off or flush the whole cache of compiled code.

luaJIT_setmode(L, idx, LUAJIT_MODE_FUNC|flag)
luaJIT_setmode(L, idx, LUAJIT_MODE_ALLFUNC|flag)
luaJIT_setmode(L, idx, LUAJIT_MODE_ALLSUBFUNC|flag)

This sets the mode for the function at the stack index idx or the parent of the calling function (idx = 0). It either enables JIT compilation for a function, disables it and flushes any already compiled code or only flushes already compiled code. This applies recursively to all sub-functions of the function with LUAJIT_MODE_ALLFUNC or only to the sub-functions with LUAJIT_MODE_ALLSUBFUNC.

luaJIT_setmode(L, trace,
  LUAJIT_MODE_TRACE|LUAJIT_MODE_FLUSH)

Flushes the specified root trace and all of its side traces from the cache. The code for the trace will be retained as long as there are any other traces which link to it.

luaJIT_setmode(L, idx, LUAJIT_MODE_WRAPCFUNC|flag)

This mode defines a wrapper function for calls to C functions. If called with LUAJIT_MODE_ON, the stack index at idx must be a lightuserdata object holding a pointer to the wrapper function. From now on all C functions are called through the wrapper function. If called with LUAJIT_MODE_OFF this mode is turned off and all C functions are directly called.

The wrapper function can be used for debugging purposes or to catch and convert foreign exceptions. But please read the section on C++ exception interoperability first. Recommended usage can be seen in this C++ code excerpt:

#include <exception>
#include "lua.hpp"

// Catch C++ exceptions and convert them to Lua error messages.
// Customize as needed for your own exception classes.
static int wrap_exceptions(lua_State *L, lua_CFunction f)
{
  try {
    return f(L);  // Call wrapped function and return result.
  } catch (const char *s) {  // Catch and convert exceptions.
    lua_pushstring(L, s);
  } catch (std::exception& e) {
    lua_pushstring(L, e.what());
  } catch (...) {
    lua_pushliteral(L, "caught (...)");
  }
  return lua_error(L);  // Rethrow as a Lua error.
}

static int myinit(lua_State *L)
{
  ...
  // Define wrapper function and enable it.
  lua_pushlightuserdata(L, (void *)wrap_exceptions);
  luaJIT_setmode(L, -1, LUAJIT_MODE_WRAPCFUNC|LUAJIT_MODE_ON);
  lua_pop(L, 1);
  ...
}

Note that you can only define a single global wrapper function, so be careful when using this mechanism from multiple C++ modules. Also note that this mechanism is not without overhead.


tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/luajit.html0000644000000000000000000001762213306562377022774 0ustar rootroot LuaJIT

LuaJIT is a Just-In-Time Compiler (JIT) for the » Lua programming language. Lua is a powerful, dynamic and light-weight programming language. It may be embedded or used as a general-purpose, stand-alone language.

LuaJIT is Copyright © 2005-2017 Mike Pall, released under the » MIT open source license.

Compatibility

WindowsLinuxBSDOSXPOSIX
EmbeddedAndroidiOS
PS3PS4PS VitaXbox 360Xbox One
GCCClang
LLVM
MSVC
x86
x64
ARM
ARM64
PPCMIPS32
MIPS64
Lua 5.1
API+ABI
+ JIT+ BitOp+ FFIDrop-in
DLL/.so

Overview

3x
-  100x
115 KB
VM
90 KB
JIT
63 KLOC
C
24 KLOC
ASM
11 KLOC
Lua

LuaJIT has been successfully used as a scripting middleware in games, appliances, network and graphics apps, numerical simulations, trading platforms and many other specialty applications. It scales from embedded devices, smartphones, desktops up to server farms. It combines high flexibility with » high performance and an unmatched low memory footprint.

LuaJIT has been in continuous development since 2005. It's widely considered to be one of the fastest dynamic language implementations. It has outperformed other dynamic languages on many cross-language benchmarks since its first release — often by a substantial margin.

For LuaJIT 2.0, the whole VM has been rewritten from the ground up and relentlessly optimized for performance. It combines a high-speed interpreter, written in assembler, with a state-of-the-art JIT compiler.

An innovative trace compiler is integrated with advanced, SSA-based optimizations and highly tuned code generation backends. A substantial reduction of the overhead associated with dynamic languages allows it to break into the performance range traditionally reserved for offline, static language compilers.

More ...

Please select a sub-topic in the navigation bar to learn more about LuaJIT.


tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/ext_profiler.html0000644000000000000000000003151713306562377024205 0ustar rootroot Profiler

LuaJIT has an integrated statistical profiler with very low overhead. It allows sampling the currently executing stack and other parameters in regular intervals.

The integrated profiler can be accessed from three levels:

High-Level Profiler

The bundled high-level profiler offers basic profiling functionality. It generates simple textual summaries or source code annotations. It can be accessed with the -jp command line option or from Lua code by loading the underlying jit.p module.

To cut to the chase — run this to get a CPU usage profile by function name:

luajit -jp myapp.lua

It's not a stated goal of the bundled profiler to add every possible option or to cater for special profiling needs. The low-level profiler APIs are documented below. They may be used by third-party authors to implement advanced functionality, e.g. IDE integration or graphical profilers.

Note: Sampling works for both interpreted and JIT-compiled code. The results for JIT-compiled code may sometimes be surprising. LuaJIT heavily optimizes and inlines Lua code — there's no simple one-to-one correspondence between source code lines and the sampled machine code.

-jp=[options[,output]]

The -jp command line option starts the high-level profiler. When the application run by the command line terminates, the profiler stops and writes the results to stdout or to the specified output file.

The options argument specifies how the profiling is to be performed:

  • f — Stack dump: function name, otherwise module:line. This is the default mode.
  • F — Stack dump: ditto, but dump module:name.
  • l — Stack dump: module:line.
  • <number> — stack dump depth (callee ← caller). Default: 1.
  • -<number> — Inverse stack dump depth (caller → callee).
  • s — Split stack dump after first stack level. Implies depth ≥ 2 or depth ≤ -2.
  • p — Show full path for module names.
  • v — Show VM states.
  • z — Show zones.
  • r — Show raw sample counts. Default: show percentages.
  • a — Annotate excerpts from source code files.
  • A — Annotate complete source code files.
  • G — Produce raw output suitable for graphical tools.
  • m<number> — Minimum sample percentage to be shown. Default: 3%.
  • i<number> — Sampling interval in milliseconds. Default: 10ms.
    Note: The actual sampling precision is OS-dependent.

The default output for -jp is a list of the most CPU consuming spots in the application. Increasing the stack dump depth with (say) -jp=2 may help to point out the main callers or callees of hotspots. But sample aggregation is still flat per unique stack dump.

To get a two-level view (split view) of callers/callees, use -jp=s or -jp=-s. The percentages shown for the second level are relative to the first level.

To see how much time is spent in each line relative to a function, use -jp=fl.

To see how much time is spent in different VM states or zones, use -jp=v or -jp=z.

Combinations of v/z with f/F/l produce two-level views, e.g. -jp=vf or -jp=fv. This shows the time spent in a VM state or zone vs. hotspots. This can be used to answer questions like "Which time consuming functions are only interpreted?" or "What's the garbage collector overhead for a specific function?".

Multiple options can be combined — but not all combinations make sense, see above. E.g. -jp=3si4m1 samples three stack levels deep in 4ms intervals and shows a split view of the CPU consuming functions and their callers with a 1% threshold.

Source code annotations produced by -jp=a or -jp=A are always flat and at the line level. Obviously, the source code files need to be readable by the profiler script.

The high-level profiler can also be started and stopped from Lua code with:

require("jit.p").start(options, output)
...
require("jit.p").stop()

jit.zone — Zones

Zones can be used to provide information about different parts of an application to the high-level profiler. E.g. a game could make use of an "AI" zone, a "PHYS" zone, etc. Zones are hierarchical, organized as a stack.

The jit.zone module needs to be loaded explicitly:

local zone = require("jit.zone")
  • zone("name") pushes a named zone to the zone stack.
  • zone() pops the current zone from the zone stack and returns its name.
  • zone:get() returns the current zone name or nil.
  • zone:flush() flushes the zone stack.

To show the time spent in each zone use -jp=z. To show the time spent relative to hotspots use e.g. -jp=zf or -jp=fz.

Low-level Lua API

The jit.profile module gives access to the low-level API of the profiler from Lua code. This module needs to be loaded explicitly:

local profile = require("jit.profile")

This module can be used to implement your own higher-level profiler. A typical profiling run starts the profiler, captures stack dumps in the profiler callback, adds them to a hash table to aggregate the number of samples, stops the profiler and then analyzes all of the captured stack dumps. Other parameters can be sampled in the profiler callback, too. But it's important not to spend too much time in the callback, since this may skew the statistics.

profile.start(mode, cb) — Start profiler

This function starts the profiler. The mode argument is a string holding options:

  • f — Profile with precision down to the function level.
  • l — Profile with precision down to the line level.
  • i<number> — Sampling interval in milliseconds (default 10ms).
    Note: The actual sampling precision is OS-dependent.

The cb argument is a callback function which is called with three arguments: (thread, samples, vmstate). The callback is called on a separate coroutine, the thread argument is the state that holds the stack to sample for profiling. Note: do not modify the stack of that state or call functions on it.

samples gives the number of accumulated samples since the last callback (usually 1).

vmstate holds the VM state at the time the profiling timer triggered. This may or may not correspond to the state of the VM when the profiling callback is called. The state is either 'N' native (compiled) code, 'I' interpreted code, 'C' C code, 'G' the garbage collector, or 'J' the JIT compiler.

profile.stop() — Stop profiler

This function stops the profiler.

dump = profile.dumpstack([thread,] fmt, depth) — Dump stack

This function allows taking stack dumps in an efficient manner. It returns a string with a stack dump for the thread (coroutine), formatted according to the fmt argument:

  • p — Preserve the full path for module names. Otherwise only the file name is used.
  • f — Dump the function name if it can be derived. Otherwise use module:line.
  • F — Ditto, but dump module:name.
  • l — Dump module:line.
  • Z — Zap the following characters for the last dumped frame.
  • All other characters are added verbatim to the output string.

The depth argument gives the number of frames to dump, starting at the topmost frame of the thread. A negative number dumps the frames in inverse order.

The first example prints a list of the current module names and line numbers of up to 10 frames in separate lines. The second example prints semicolon-separated function names for all frames (up to 100) in inverse order:

print(profile.dumpstack(thread, "l\n", 10))
print(profile.dumpstack(thread, "lZ;", -100))

Low-level C API

The profiler can be controlled directly from C code, e.g. for use by IDEs. The declarations are in "luajit.h" (see Lua/C API extensions).

luaJIT_profile_start(L, mode, cb, data) — Start profiler

This function starts the profiler. See above for a description of the mode argument.

The cb argument is a callback function with the following declaration:

typedef void (*luaJIT_profile_callback)(void *data, lua_State *L,
                                        int samples, int vmstate);

data is available for use by the callback. L is the state that holds the stack to sample for profiling. Note: do not modify this stack or call functions on this stack — use a separate coroutine for this purpose. See above for a description of samples and vmstate.

luaJIT_profile_stop(L) — Stop profiler

This function stops the profiler.

p = luaJIT_profile_dumpstack(L, fmt, depth, len) — Dump stack

This function allows taking stack dumps in an efficient manner. See above for a description of fmt and depth.

This function returns a const char * pointing to a private string buffer of the profiler. The int *len argument returns the length of the output string. The buffer is overwritten on the next call and deallocated when the profiler stops. You either need to consume the content immediately or copy it for later use.


tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/running.html0000644000000000000000000003263013306562377023160 0ustar rootroot Running LuaJIT

LuaJIT has only a single stand-alone executable, called luajit on POSIX systems or luajit.exe on Windows. It can be used to run simple Lua statements or whole Lua applications from the command line. It has an interactive mode, too.

Command Line Options

The luajit stand-alone executable is just a slightly modified version of the regular lua stand-alone executable. It supports the same basic options, too. luajit -h prints a short list of the available options. Please have a look at the » Lua manual for details.

LuaJIT has some additional options:

-b[options] input output

This option saves or lists bytecode. The following additional options are accepted:

  • -l — Only list bytecode.
  • -s — Strip debug info (this is the default).
  • -g — Keep debug info.
  • -n name — Set module name (default: auto-detect from input name)
  • -t type — Set output file type (default: auto-detect from output name).
  • -a arch — Override architecture for object files (default: native).
  • -o os — Override OS for object files (default: native).
  • -e chunk — Use chunk string as input.
  • - (a single minus sign) — Use stdin as input and/or stdout as output.

The output file type is auto-detected from the extension of the output file name:

  • c — C source file, exported bytecode data.
  • h — C header file, static bytecode data.
  • obj or o — Object file, exported bytecode data (OS- and architecture-specific).
  • raw or any other extension — Raw bytecode file (portable).

Notes:

  • See also string.dump() for information on bytecode portability and compatibility.
  • A file in raw bytecode format is auto-detected and can be loaded like any Lua source file. E.g. directly from the command line or with loadfile(), dofile() etc.
  • To statically embed the bytecode of a module in your application, generate an object file and just link it with your application.
  • On most ELF-based systems (e.g. Linux) you need to explicitly export the global symbols when linking your application, e.g. with: -Wl,-E
  • require() tries to load embedded bytecode data from exported symbols (in *.exe or lua51.dll on Windows) and from shared libraries in package.cpath.

Typical usage examples:

luajit -b test.lua test.out                 # Save bytecode to test.out
luajit -bg test.lua test.out                # Keep debug info
luajit -be "print('hello world')" test.out  # Save cmdline script

luajit -bl test.lua                         # List to stdout
luajit -bl test.lua test.txt                # List to test.txt
luajit -ble "print('hello world')"          # List cmdline script

luajit -b test.lua test.obj                 # Generate object file
# Link test.obj with your application and load it with require("test")

-j cmd[=arg[,arg...]]

This option performs a LuaJIT control command or activates one of the loadable extension modules. The command is first looked up in the jit.* library. If no matching function is found, a module named jit.<cmd> is loaded and the start() function of the module is called with the specified arguments (if any). The space between -j and cmd is optional.

Here are the available LuaJIT control commands:

  • -jon — Turns the JIT compiler on (default).
  • -joff — Turns the JIT compiler off (only use the interpreter).
  • -jflush — Flushes the whole cache of compiled code.
  • -jv — Shows verbose information about the progress of the JIT compiler.
  • -jdump — Dumps the code and structures used in various compiler stages.
  • -jp — Start the integrated profiler.

The -jv and -jdump commands are extension modules written in Lua. They are mainly used for debugging the JIT compiler itself. For a description of their options and output format, please read the comment block at the start of their source. They can be found in the lib directory of the source distribution or installed under the jit directory. By default this is /usr/local/share/luajit-2.0.5/jit on POSIX systems.

-O[level]
-O[+]flag   -O-flag
-Oparam=value

This options allows fine-tuned control of the optimizations used by the JIT compiler. This is mainly intended for debugging LuaJIT itself. Please note that the JIT compiler is extremely fast (we are talking about the microsecond to millisecond range). Disabling optimizations doesn't have any visible impact on its overhead, but usually generates code that runs slower.

The first form sets an optimization level — this enables a specific mix of optimization flags. -O0 turns off all optimizations and higher numbers enable more optimizations. Omitting the level (i.e. just -O) sets the default optimization level, which is -O3 in the current version.

The second form adds or removes individual optimization flags. The third form sets a parameter for the VM or the JIT compiler to a specific value.

You can either use this option multiple times (like -Ocse -O-dce -Ohotloop=10) or separate several settings with a comma (like -O+cse,-dce,hotloop=10). The settings are applied from left to right and later settings override earlier ones. You can freely mix the three forms, but note that setting an optimization level overrides all earlier flags.

Here are the available flags and at what optimization levels they are enabled:

Flag -O1 -O2 -O3  
foldConstant Folding, Simplifications and Reassociation
cseCommon-Subexpression Elimination
dceDead-Code Elimination
narrow Narrowing of numbers to integers
loop Loop Optimizations (code hoisting)
fwd  Load Forwarding (L2L) and Store Forwarding (S2L)
dse  Dead-Store Elimination
abc  Array Bounds Check Elimination
sink  Allocation/Store Sinking
fuse  Fusion of operands into instructions

Here are the parameters and their default settings:

Parameter Default  
maxtrace1000Max. number of traces in the cache
maxrecord4000Max. number of recorded IR instructions
maxirconst500Max. number of IR constants of a trace
maxside100Max. number of side traces of a root trace
maxsnap500Max. number of snapshots for a trace
hotloop56Number of iterations to detect a hot loop or hot call
hotexit10Number of taken exits to start a side trace
tryside4Number of attempts to compile a side trace
instunroll4Max. unroll factor for instable loops
loopunroll15Max. unroll factor for loop ops in side traces
callunroll3Max. unroll factor for pseudo-recursive calls
recunroll2Min. unroll factor for true recursion
sizemcode32Size of each machine code area in KBytes (Windows: 64K)
maxmcode512Max. total size of all machine code areas in KBytes

tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/bluequad-print.css0000644000000000000000000000472213306562377024261 0ustar rootroot/* Copyright (C) 2004-2017 Mike Pall. * * You are welcome to use the general ideas of this design for your own sites. * But please do not steal the stylesheet, the layout or the color scheme. */ body { font-family: serif; font-size: 11pt; margin: 0 3em; padding: 0; border: none; } a:link, a:visited, a:hover, a:active { text-decoration: none; background: transparent; color: #0000ff; } h1, h2, h3 { font-family: sans-serif; font-weight: bold; text-align: left; margin: 0.5em 0; padding: 0; } h1 { font-size: 200%; } h2 { font-size: 150%; } h3 { font-size: 125%; } p { margin: 0 0 0.5em 0; padding: 0; } ul, ol { margin: 0.5em 0; padding: 0 0 0 2em; } ul { list-style: outside square; } ol { list-style: outside decimal; } li { margin: 0; padding: 0; } dl { margin: 1em 0; padding: 1em; border: 1px solid black; } dt { font-weight: bold; margin: 0; padding: 0; } dt sup { float: right; margin-left: 1em; } dd { margin: 0.5em 0 0 2em; padding: 0; } table { table-layout: fixed; width: 100%; margin: 1em 0; padding: 0; border: 1px solid black; border-spacing: 0; border-collapse: collapse; } tr { margin: 0; padding: 0; border: none; } td { text-align: left; margin: 0; padding: 0.2em 0.5em; border-top: 1px solid black; border-bottom: 1px solid black; } tr.separate td { border-top: double; } tt, pre, code, kbd, samp { font-family: monospace; font-size: 75%; } kbd { font-weight: bolder; } blockquote, pre { margin: 1em 2em; padding: 0; } img { border: none; vertical-align: baseline; margin: 0; padding: 0; } img.left { float: left; margin: 0.5em 1em 0.5em 0; } img.right { float: right; margin: 0.5em 0 0.5em 1em; } .flush { clear: both; visibility: hidden; } .hide, .noprint, #nav { display: none !important; } .pagebreak { page-break-before: always; } #site { text-align: right; font-family: sans-serif; font-weight: bold; margin: 0 1em; border-bottom: 1pt solid black; } #site a { font-size: 1.2em; } #site a:link, #site a:visited { text-decoration: none; font-weight: bold; background: transparent; color: #ffffff; } #logo { color: #ff8000; } #head { clear: both; margin: 0 1em; } #main { line-height: 1.3; text-align: justify; margin: 1em; } #foot { clear: both; font-size: 80%; text-align: center; margin: 0 1.25em; padding: 0.5em 0 0 0; border-top: 1pt solid black; page-break-before: avoid; page-break-after: avoid; } tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/img/0000755000000000000000000000000013306562377021362 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/img/contact.png0000644000000000000000000000247413306562377023532 0ustar rootrootPNG  IHDR (PLTEiktfiqKMSIKQGIOEGM237./3*+/{~y|uxsv`bj^`hY\cWZaEFL>@E<>C)*-'(+%&)#$'!"% ps|nqzjmvWY`SU\NQW:;@57;Ϲͷ"Ǡ~|gjrehpcfnadlNPVLNTHJPFHN348126/04-.2+,0Į|z}x{vytwack_ai]_gZ]dX[bDEK?AF=?D(),Խ$%(  svqt}or{mpyknwVX_PRY68<46:θ # vIDATHǽ_QO=O QӰ̭H$JfIGsH aM.=?l{Vk]\^9>|yc1͸CG0"> D l;r_f,:?dbtԨ&ث;T{_6]xNost_2=s;\&r8L>f^z@>jTՙ~o^;\G _K?vF{Em:;: Md]uK=j}ٹU 7Y[W\߃Y@Qxp@R5Т PԐpr^[e~Ljќ1cbx4Fu4·.(ܮSfA5@I~A9gi,Ts͡?xŕ֚cM5uvw, ADv 7OWVejb8UU PF\ā}RLhJ?mpo%@$ ! |]Mӏ9AYҖg"Ɗ)sxUmIKr]+ 8tSA@z[r Gݙ \Q/mOHjDR 6,"pVhW/OvU r/{k9)/1{:gw~3s1gO=`=BgGǹ\7h`M]m]d/N}4IENDB`tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/install.html0000644000000000000000000006124213306562377023147 0ustar rootroot Installation

LuaJIT is only distributed as a source package. This page explains how to build and install LuaJIT with different operating systems and C compilers.

For the impatient (on POSIX systems):

make && sudo make install

LuaJIT currently builds out-of-the box on most systems. Here's the compatibility matrix for the supported combinations of operating systems, CPUs and compilers:

CPU / OS Linux or
Android
*BSD, Other OSX 10.4+ or
iOS 3.0+
Windows
XP/Vista/7
x86 (32 bit) GCC 4.2+ GCC 4.2+ XCode 5.0+
Clang
MSVC, MSVC/EE
WinSDK
MinGW, Cygwin
x64 (64 bit) GCC 4.2+ GCC 4.2+
ORBIS (PS4)
XCode 5.0+
Clang
MSVC + SDK v7.0
WinSDK v7.0
Durango (Xbox One)
ARMv5+
ARM9E+
GCC 4.2+ GCC 4.2+
PSP2 (PS VITA)
XCode 5.0+
Clang
 
ARM64 GCC 4.8+   XCode 6.0+
Clang 3.5+
 
PPC GCC 4.3+ GCC 4.3+
GCC 4.1 (PS3)
  XEDK (Xbox 360)
MIPS32
MIPS64
GCC 4.3+ GCC 4.3+    

Configuring LuaJIT

The standard configuration should work fine for most installations. Usually there is no need to tweak the settings. The following files hold all user-configurable settings:

  • src/luaconf.h sets some configuration variables.
  • Makefile has settings for installing LuaJIT (POSIX only).
  • src/Makefile has settings for compiling LuaJIT under POSIX, MinGW or Cygwin.
  • src/msvcbuild.bat has settings for compiling LuaJIT with MSVC or WinSDK.

Please read the instructions given in these files, before changing any settings.

LuaJIT on x64 currently uses 32 bit GC objects by default. LJ_GC64 mode may be explicitly enabled: add XCFLAGS=-DLUAJIT_ENABLE_GC64 to the make command or run msvcbuild gc64 for MSVC/WinSDK. Please check the note about the bytecode format differences, too.

POSIX Systems (Linux, OSX, *BSD etc.)

Prerequisites

Depending on your distribution, you may need to install a package for GCC, the development headers and/or a complete SDK. E.g. on a current Debian/Ubuntu, install libc6-dev with the package manager.

Download the current source package of LuaJIT (pick the .tar.gz), if you haven't already done so. Move it to a directory of your choice, open a terminal window and change to this directory. Now unpack the archive and change to the newly created directory:

tar zxf LuaJIT-2.0.5.tar.gz
cd LuaJIT-2.0.5

Building LuaJIT

The supplied Makefiles try to auto-detect the settings needed for your operating system and your compiler. They need to be run with GNU Make, which is probably the default on your system, anyway. Simply run:

make

This always builds a native binary, depending on the host OS you're running this command on. Check the section on cross-compilation for more options.

By default, modules are only searched under the prefix /usr/local. You can add an extra prefix to the search paths by appending the PREFIX option, e.g.:

make PREFIX=/home/myself/lj2

Note for OSX: if the MACOSX_DEPLOYMENT_TARGET environment variable is not set, then it's forced to 10.4.

Installing LuaJIT

The top-level Makefile installs LuaJIT by default under /usr/local, i.e. the executable ends up in /usr/local/bin and so on. You need root privileges to write to this path. So, assuming sudo is installed on your system, run the following command and enter your sudo password:

sudo make install

Otherwise specify the directory prefix as an absolute path, e.g.:

make install PREFIX=/home/myself/lj2

Obviously the prefixes given during build and installation need to be the same.

Windows Systems

Prerequisites

Either install one of the open source SDKs (» MinGW or » Cygwin), which come with a modified GCC plus the required development headers.

Or install Microsoft's Visual C++ (MSVC). The freely downloadable » Express Edition works just fine, but only contains an x86 compiler.

The freely downloadable » Windows SDK only comes with command line tools, but this is all you need to build LuaJIT. It contains x86 and x64 compilers.

Next, download the source package and unpack it using an archive manager (e.g. the Windows Explorer) to a directory of your choice.

Building with MSVC

Open a "Visual Studio .NET Command Prompt", cd to the directory where you've unpacked the sources and run these commands:

cd src
msvcbuild

Then follow the installation instructions below.

Building with the Windows SDK

Open a "Windows SDK Command Shell" and select the x86 compiler:

setenv /release /x86

Or select the x64 compiler:

setenv /release /x64

Then cd to the directory where you've unpacked the sources and run these commands:

cd src
msvcbuild

Then follow the installation instructions below.

Building with MinGW or Cygwin

Open a command prompt window and make sure the MinGW or Cygwin programs are in your path. Then cd to the directory where you've unpacked the sources and run this command for MinGW:

mingw32-make

Or this command for Cygwin:

make

Then follow the installation instructions below.

Installing LuaJIT

Copy luajit.exe and lua51.dll (built in the src directory) to a newly created directory (any location is ok). Add lua and lua\jit directories below it and copy all Lua files from the src\jit directory of the distribution to the latter directory.

There are no hardcoded absolute path names — all modules are loaded relative to the directory where luajit.exe is installed (see src/luaconf.h).

Cross-compiling LuaJIT

First, let's clear up some terminology:

  • Host: This is your development system, usually based on a x64 or x86 CPU.
  • Target: This is the target system you want LuaJIT to run on, e.g. Android/ARM.
  • Toolchain: This comprises a C compiler, linker, assembler and a matching C library.
  • Host (or system) toolchain: This is the toolchain used to build native binaries for your host system.
  • Cross-compile toolchain: This is the toolchain used to build binaries for the target system. They can only be run on the target system.

The GNU Makefile-based build system allows cross-compiling on any host for any supported target:

  • Yes, you need a toolchain for both your host and your target!
  • Both host and target architectures must have the same pointer size.
  • E.g. if you want to cross-compile to a 32 bit target on a 64 bit host, you need to install the multilib development package (e.g. libc6-dev-i386 on Debian/Ubuntu) and build a 32 bit host part (HOST_CC="gcc -m32").
  • 64 bit targets always require compilation on a 64 bit host.

You need to specify TARGET_SYS whenever the host OS and the target OS differ, or you'll get assembler or linker errors:

  • E.g. if you're compiling on a Windows or OSX host for embedded Linux or Android, you need to add TARGET_SYS=Linux to the examples below.
  • For a minimal target OS, you may need to disable the built-in allocator in src/Makefile and use TARGET_SYS=Other.
  • Don't forget to specify the same TARGET_SYS for the install step, too.

Here are some examples where host and target have the same CPU:

# Cross-compile to a 32 bit binary on a multilib x64 OS
make CC="gcc -m32"

# Cross-compile on Debian/Ubuntu for Windows (mingw32 package)
make HOST_CC="gcc -m32" CROSS=i586-mingw32msvc- TARGET_SYS=Windows

The CROSS prefix allows specifying a standard GNU cross-compile toolchain (Binutils, GCC and a matching libc). The prefix may vary depending on the --target the toolchain was built for (note the CROSS prefix has a trailing "-"). The examples below use the canonical toolchain triplets for Linux.

Since there's often no easy way to detect CPU features at runtime, it's important to compile with the proper CPU or architecture settings:

  • The best way to get consistent results is to specify the correct settings when building the toolchain yourself.
  • For a pre-built, generic toolchain add -mcpu=... or -march=... and other necessary flags to TARGET_CFLAGS.
  • For ARM it's important to have the correct -mfloat-abi=... setting, too. Otherwise LuaJIT may not run at the full performance of your target CPU.
  • For MIPS it's important to select a supported ABI (o32 on MIPS32, n64 on MIPS64) and consistently compile your project either with hard-float or soft-float compiler settings.

Here are some examples for targets with a different CPU than the host:

# ARM soft-float
make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabi- \
     TARGET_CFLAGS="-mfloat-abi=soft"

# ARM soft-float ABI with VFP (example for Cortex-A9)
make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabi- \
     TARGET_CFLAGS="-mcpu=cortex-a9 -mfloat-abi=softfp"

# ARM hard-float ABI with VFP (armhf, most modern toolchains)
make HOST_CC="gcc -m32" CROSS=arm-linux-gnueabihf-

# ARM64
make CROSS=aarch64-linux-

# PPC
make HOST_CC="gcc -m32" CROSS=powerpc-linux-gnu-

# MIPS32 big-endian
make HOST_CC="gcc -m32" CROSS=mips-linux-
# MIPS32 little-endian
make HOST_CC="gcc -m32" CROSS=mipsel-linux-

# MIPS64 big-endian
make CROSS=mips-linux- TARGET_CFLAGS="-mips64r2 -mabi=64"
# MIPS64 little-endian
make CROSS=mipsel-linux- TARGET_CFLAGS="-mips64r2 -mabi=64"

You can cross-compile for Android using the Android NDK. The environment variables need to match the install locations and the desired target platform. E.g. Android 4.0 corresponds to ABI level 14. For details check the folder docs in the NDK directory.

Only a few common variations for the different CPUs, ABIs and platforms are listed. Please use your own judgement for which combination you want to build/deploy or which lowest common denominator you want to pick:

# Android/ARM, armeabi (ARMv5TE soft-float), Android 2.2+ (Froyo)
NDK=/opt/android/ndk
NDKABI=8
NDKVER=$NDK/toolchains/arm-linux-androideabi-4.9
NDKP=$NDKVER/prebuilt/linux-x86/bin/arm-linux-androideabi-
NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-arm"
make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"

# Android/ARM, armeabi-v7a (ARMv7 VFP), Android 4.0+ (ICS)
NDK=/opt/android/ndk
NDKABI=14
NDKVER=$NDK/toolchains/arm-linux-androideabi-4.9
NDKP=$NDKVER/prebuilt/linux-x86/bin/arm-linux-androideabi-
NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-arm"
NDKARCH="-march=armv7-a -mfloat-abi=softfp -Wl,--fix-cortex-a8"
make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF $NDKARCH"

# Android/MIPS, mipsel (MIPS32R1 hard-float), Android 4.0+ (ICS)
NDK=/opt/android/ndk
NDKABI=14
NDKVER=$NDK/toolchains/mipsel-linux-android-4.9
NDKP=$NDKVER/prebuilt/linux-x86/bin/mipsel-linux-android-
NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-mips"
make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"

# Android/x86, x86 (i686 SSE3), Android 4.0+ (ICS)
NDK=/opt/android/ndk
NDKABI=14
NDKVER=$NDK/toolchains/x86-4.9
NDKP=$NDKVER/prebuilt/linux-x86/bin/i686-linux-android-
NDKF="--sysroot $NDK/platforms/android-$NDKABI/arch-x86"
make HOST_CC="gcc -m32" CROSS=$NDKP TARGET_FLAGS="$NDKF"

You can cross-compile for iOS 3.0+ (iPhone/iPad) using the » iOS SDK:

Note: the JIT compiler is disabled for iOS, because regular iOS Apps are not allowed to generate code at runtime. You'll only get the performance of the LuaJIT interpreter on iOS. This is still faster than plain Lua, but much slower than the JIT compiler. Please complain to Apple, not me. Or use Android. :-p

# iOS/ARM (32 bit)
ISDKP=$(xcrun --sdk iphoneos --show-sdk-path)
ICC=$(xcrun --sdk iphoneos --find clang)
ISDKF="-arch armv7 -isysroot $ISDKP"
make DEFAULT_CC=clang HOST_CC="clang -m32 -arch i386" \
     CROSS="$(dirname $ICC)/" TARGET_FLAGS="$ISDKF" TARGET_SYS=iOS

# iOS/ARM64
ISDKP=$(xcrun --sdk iphoneos --show-sdk-path)
ICC=$(xcrun --sdk iphoneos --find clang)
ISDKF="-arch arm64 -isysroot $ISDKP"
make DEFAULT_CC=clang CROSS="$(dirname $ICC)/" \
     TARGET_FLAGS="$ISDKF" TARGET_SYS=iOS

Cross-compiling for consoles

Building LuaJIT for consoles requires both a supported host compiler (x86 or x64) and a cross-compiler (to PPC or ARM) from the official console SDK.

Due to restrictions on consoles, the JIT compiler is disabled and only the fast interpreter is built. This is still faster than plain Lua, but much slower than the JIT compiler. The FFI is disabled, too, since it's not very useful in such an environment.

The following commands build a static library libluajit.a, which can be linked against your game, just like the Lua library.

To cross-compile for PS3 from a Linux host (requires 32 bit GCC, i.e. multilib Linux/x64) or a Windows host (requires 32 bit MinGW), run this command:

make HOST_CC="gcc -m32" CROSS=ppu-lv2-

To cross-compile for PS4 from a Windows host, open a "Visual Studio .NET Command Prompt" (64 bit host compiler), cd to the directory where you've unpacked the sources and run the following commands:

cd src
ps4build

To cross-compile for PS Vita from a Windows host, open a "Visual Studio .NET Command Prompt" (32 bit host compiler), cd to the directory where you've unpacked the sources and run the following commands:

cd src
psvitabuild

To cross-compile for Xbox 360 from a Windows host, open a "Visual Studio .NET Command Prompt" (32 bit host compiler), cd to the directory where you've unpacked the sources and run the following commands:

cd src
xedkbuild

To cross-compile for Xbox One from a Windows host, open a "Visual Studio .NET Command Prompt" (64 bit host compiler), cd to the directory where you've unpacked the sources and run the following commands:

cd src
xb1build

Embedding LuaJIT

LuaJIT is API-compatible with Lua 5.1. If you've already embedded Lua into your application, you probably don't need to do anything to switch to LuaJIT, except link with a different library:

  • It's strongly suggested to build LuaJIT separately using the supplied build system. Please do not attempt to integrate the individual source files into your build tree. You'll most likely get the internal build dependencies wrong or mess up the compiler flags. Treat LuaJIT like any other external library and link your application with either the dynamic or static library, depending on your needs.
  • If you want to load C modules compiled for plain Lua with require(), you need to make sure the public symbols (e.g. lua_pushnumber) are exported, too:
    • On POSIX systems you can either link to the shared library or link the static library into your application. In the latter case you'll need to export all public symbols from your main executable (e.g. -Wl,-E on Linux) and add the external dependencies (e.g. -lm -ldl on Linux).
    • Since Windows symbols are bound to a specific DLL name, you need to link to the lua51.dll created by the LuaJIT build (do not rename the DLL). You may link LuaJIT statically on Windows only if you don't intend to load Lua/C modules at runtime.
  • If you're building a 64 bit application on OSX which links directly or indirectly against LuaJIT which is not built for LJ_GC64 mode, you need to link your main executable with these flags:
    -pagezero_size 10000 -image_base 100000000
    

Additional hints for initializing LuaJIT using the C API functions:

  • Here's a » simple example for embedding Lua or LuaJIT into your application.
  • Make sure you use luaL_newstate. Avoid using lua_newstate, since this uses the (slower) default memory allocator from your system (no support for this on x64).
  • Make sure you use luaL_openlibs and not the old Lua 5.0 style of calling luaopen_base etc. directly.
  • To change or extend the list of standard libraries to load, copy src/lib_init.c to your project and modify it accordingly. Make sure the jit library is loaded or the JIT compiler will not be activated.
  • The bit.* module for bitwise operations is already built-in. There's no need to statically link » Lua BitOp to your application.

Hints for Distribution Maintainers

The LuaJIT build system has extra provisions for the needs of most POSIX-based distributions. If you're a package maintainer for a distribution, please make use of these features and avoid patching, subverting, autotoolizing or messing up the build system in unspeakable ways.

There should be absolutely no need to patch luaconf.h or any of the Makefiles. And please do not hand-pick files for your packages — simply use whatever make install creates. There's a reason for all of the files and directories it creates.

The build system uses GNU make and auto-detects most settings based on the host you're building it on. This should work fine for native builds, even when sandboxed. You may need to pass some of the following flags to both the make and the make install command lines for a regular distribution build:

  • PREFIX overrides the installation path and should usually be set to /usr. Setting this also changes the module paths and the paths needed to locate the shared library.
  • DESTDIR is an absolute path which allows you to install to a shadow tree instead of the root tree of the build system.
  • MULTILIB sets the architecture-specific library path component for multilib systems. The default is lib.
  • Have a look at the top-level Makefile and src/Makefile for additional variables to tweak. The following variables may be overridden, but it's not recommended, except for special needs like cross-builds: BUILDMODE, CC, HOST_CC, STATIC_CC, DYNAMIC_CC, CFLAGS, HOST_CFLAGS, TARGET_CFLAGS, LDFLAGS, HOST_LDFLAGS, TARGET_LDFLAGS, TARGET_SHLDFLAGS, TARGET_FLAGS, LIBS, HOST_LIBS, TARGET_LIBS, CROSS, HOST_SYS, TARGET_SYS

The build system has a special target for an amalgamated build, i.e. make amalg. This compiles the LuaJIT core as one huge C file and allows GCC to generate faster and shorter code. Alas, this requires lots of memory during the build. This may be a problem for some users, that's why it's not enabled by default. But it shouldn't be a problem for most build farms. It's recommended that binary distributions use this target for their LuaJIT builds.

The tl;dr version of the above:

make amalg PREFIX=/usr && \
make install PREFIX=/usr DESTDIR=/tmp/buildroot

Finally, if you encounter any difficulties, please contact me first, instead of releasing a broken package onto unsuspecting users. Because they'll usually gonna complain to me (the upstream) and not you (the package maintainer), anyway.


tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/status.html0000644000000000000000000000753313306562377023027 0ustar rootroot Status

LuaJIT 2.0 is the current stable branch. This branch is in feature-freeze — new features will only be added to LuaJIT 2.1.

Current Status

LuaJIT ought to run all Lua 5.1-compatible source code just fine. It's considered a serious bug if the VM crashes or produces unexpected results — please report this.

Known incompatibilities and issues in LuaJIT 2.0:

  • There are some differences in implementation-defined behavior. These either have a good reason, are arbitrary design choices or are due to quirks in the VM. The latter cases may get fixed if a demonstrable need is shown.
  • The Lua debug API is missing a couple of features (return hooks for non-Lua functions) and shows slightly different behavior in LuaJIT (no per-coroutine hooks, no tail call counting).
  • Currently some out-of-memory errors from on-trace code are not handled correctly. The error may fall through an on-trace pcall or it may be passed on to the function set with lua_atpanic on x64. This issue will be fixed with the new garbage collector.
  • LuaJIT on 64 bit systems provides a limited range of 47 bits for the legacy lightuserdata data type. This is only relevant on x64 systems which use the negative part of the virtual address space in user mode, e.g. Solaris/x64, and on ARM64 systems configured with a 48 bit or 52 bit VA. Avoid using lightuserdata to hold pointers that may point outside of that range, e.g. variables on the stack. In general, avoid this data type for new code and replace it with (much more performant) FFI bindings. FFI cdata pointers can address the full 64 bit range.

tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/contact.html0000644000000000000000000000565513306562377023142 0ustar rootroot Contact

If you want to report bugs, propose fixes or suggest enhancements, please use the GitHub issue tracker.

Please send general questions to the » LuaJIT mailing list.

You can also send any questions you have directly to me:

Copyright

All documentation is Copyright © 2005-2017 Mike Pall.


tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/ext_ffi_tutorial.html0000644000000000000000000005403513306562377025052 0ustar rootroot FFI Tutorial

This page is intended to give you an overview of the features of the FFI library by presenting a few use cases and guidelines.

This page makes no attempt to explain all of the FFI library, though. You'll want to have a look at the ffi.* API function reference and the FFI semantics to learn more.

Loading the FFI Library

The FFI library is built into LuaJIT by default, but it's not loaded and initialized by default. The suggested way to use the FFI library is to add the following to the start of every Lua file that needs one of its functions:

local ffi = require("ffi")

Please note this doesn't define an ffi variable in the table of globals — you really need to use the local variable. The require function ensures the library is only loaded once.

Note: If you want to experiment with the FFI from the interactive prompt of the command line executable, omit the local, as it doesn't preserve local variables across lines.

Accessing Standard System Functions

The following code explains how to access standard system functions. We slowly print two lines of dots by sleeping for 10 milliseconds after each dot:

 
①





②
③
④



⑤





⑥local ffi = require("ffi")
ffi.cdef[[
void Sleep(int ms);
int poll(struct pollfd *fds, unsigned long nfds, int timeout);
]]

local sleep
if ffi.os == "Windows" then
  function sleep(s)
    ffi.C.Sleep(s*1000)
  end
else
  function sleep(s)
    ffi.C.poll(nil, 0, s*1000)
  end
end

for i=1,160 do
  io.write("."); io.flush()
  sleep(0.01)
end
io.write("\n")

Here's the step-by-step explanation:

This defines the C library functions we're going to use. The part inside the double-brackets (in green) is just standard C syntax. You can usually get this info from the C header files or the documentation provided by each C library or C compiler.

The difficulty we're facing here, is that there are different standards to choose from. Windows has a simple Sleep() function. On other systems there are a variety of functions available to achieve sub-second sleeps, but with no clear consensus. Thankfully poll() can be used for this task, too, and it's present on most non-Windows systems. The check for ffi.os makes sure we use the Windows-specific function only on Windows systems.

Here we're wrapping the call to the C function in a Lua function. This isn't strictly necessary, but it's helpful to deal with system-specific issues only in one part of the code. The way we're wrapping it ensures the check for the OS is only done during initialization and not for every call.

A more subtle point is that we defined our sleep() function (for the sake of this example) as taking the number of seconds, but accepting fractional seconds. Multiplying this by 1000 gets us milliseconds, but that still leaves it a Lua number, which is a floating-point value. Alas, the Sleep() function only accepts an integer value. Luckily for us, the FFI library automatically performs the conversion when calling the function (truncating the FP value towards zero, like in C).

Some readers will notice that Sleep() is part of KERNEL32.DLL and is also a stdcall function. So how can this possibly work? The FFI library provides the ffi.C default C library namespace, which allows calling functions from the default set of libraries, like a C compiler would. Also, the FFI library automatically detects stdcall functions, so you don't need to declare them as such.

The poll() function takes a couple more arguments we're not going to use. You can simply use nil to pass a NULL pointer and 0 for the nfds parameter. Please note that the number 0 does not convert to a pointer value, unlike in C++. You really have to pass pointers to pointer arguments and numbers to number arguments.

The page on FFI semantics has all of the gory details about conversions between Lua objects and C types. For the most part you don't have to deal with this, as it's performed automatically and it's carefully designed to bridge the semantic differences between Lua and C.

Now that we have defined our own sleep() function, we can just call it from plain Lua code. That wasn't so bad, huh? Turning these boring animated dots into a fascinating best-selling game is left as an exercise for the reader. :-)

Accessing the zlib Compression Library

The following code shows how to access the zlib compression library from Lua code. We'll define two convenience wrapper functions that take a string and compress or uncompress it to another string:

 
①






②


③

④


⑤


⑥







⑦local ffi = require("ffi")
ffi.cdef[[
unsigned long compressBound(unsigned long sourceLen);
int compress2(uint8_t *dest, unsigned long *destLen,
	      const uint8_t *source, unsigned long sourceLen, int level);
int uncompress(uint8_t *dest, unsigned long *destLen,
	       const uint8_t *source, unsigned long sourceLen);
]]
local zlib = ffi.load(ffi.os == "Windows" and "zlib1" or "z")

local function compress(txt)
  local n = zlib.compressBound(#txt)
  local buf = ffi.new("uint8_t[?]", n)
  local buflen = ffi.new("unsigned long[1]", n)
  local res = zlib.compress2(buf, buflen, txt, #txt, 9)
  assert(res == 0)
  return ffi.string(buf, buflen[0])
end

local function uncompress(comp, n)
  local buf = ffi.new("uint8_t[?]", n)
  local buflen = ffi.new("unsigned long[1]", n)
  local res = zlib.uncompress(buf, buflen, comp, #comp)
  assert(res == 0)
  return ffi.string(buf, buflen[0])
end

-- Simple test code.
local txt = string.rep("abcd", 1000)
print("Uncompressed size: ", #txt)
local c = compress(txt)
print("Compressed size: ", #c)
local txt2 = uncompress(c, #txt)
assert(txt2 == txt)

Here's the step-by-step explanation:

This defines some of the C functions provided by zlib. For the sake of this example, some type indirections have been reduced and it uses the pre-defined fixed-size integer types, while still adhering to the zlib API/ABI.

This loads the zlib shared library. On POSIX systems it's named libz.so and usually comes pre-installed. Since ffi.load() automatically adds any missing standard prefixes/suffixes, we can simply load the "z" library. On Windows it's named zlib1.dll and you'll have to download it first from the » zlib site. The check for ffi.os makes sure we pass the right name to ffi.load().

First, the maximum size of the compression buffer is obtained by calling the zlib.compressBound function with the length of the uncompressed string. The next line allocates a byte buffer of this size. The [?] in the type specification indicates a variable-length array (VLA). The actual number of elements of this array is given as the 2nd argument to ffi.new().

This may look strange at first, but have a look at the declaration of the compress2 function from zlib: the destination length is defined as a pointer! This is because you pass in the maximum buffer size and get back the actual length that was used.

In C you'd pass in the address of a local variable (&buflen). But since there's no address-of operator in Lua, we'll just pass in a one-element array. Conveniently it can be initialized with the maximum buffer size in one step. Calling the actual zlib.compress2 function is then straightforward.

We want to return the compressed data as a Lua string, so we'll use ffi.string(). It needs a pointer to the start of the data and the actual length. The length has been returned in the buflen array, so we'll just get it from there.

Note that since the function returns now, the buf and buflen variables will eventually be garbage collected. This is fine, because ffi.string() has copied the contents to a newly created (interned) Lua string. If you plan to call this function lots of times, consider reusing the buffers and/or handing back the results in buffers instead of strings. This will reduce the overhead for garbage collection and string interning.

The uncompress functions does the exact opposite of the compress function. The compressed data doesn't include the size of the original string, so this needs to be passed in. Otherwise no surprises here.

The code, that makes use of the functions we just defined, is just plain Lua code. It doesn't need to know anything about the LuaJIT FFI — the convenience wrapper functions completely hide it.

One major advantage of the LuaJIT FFI is that you are now able to write those wrappers in Lua. And at a fraction of the time it would cost you to create an extra C module using the Lua/C API. Many of the simpler C functions can probably be used directly from your Lua code, without any wrappers.

Side note: the zlib API uses the long type for passing lengths and sizes around. But all those zlib functions actually only deal with 32 bit values. This is an unfortunate choice for a public API, but may be explained by zlib's history — we'll just have to deal with it.

First, you should know that a long is a 64 bit type e.g. on POSIX/x64 systems, but a 32 bit type on Windows/x64 and on 32 bit systems. Thus a long result can be either a plain Lua number or a boxed 64 bit integer cdata object, depending on the target system.

Ok, so the ffi.* functions generally accept cdata objects wherever you'd want to use a number. That's why we get a away with passing n to ffi.string() above. But other Lua library functions or modules don't know how to deal with this. So for maximum portability one needs to use tonumber() on returned long results before passing them on. Otherwise the application might work on some systems, but would fail in a POSIX/x64 environment.

Defining Metamethods for a C Type

The following code explains how to define metamethods for a C type. We define a simple point type and add some operations to it:

 
①



②

③

④



⑤

⑥local ffi = require("ffi")
ffi.cdef[[
typedef struct { double x, y; } point_t;
]]

local point
local mt = {
  __add = function(a, b) return point(a.x+b.x, a.y+b.y) end,
  __len = function(a) return math.sqrt(a.x*a.x + a.y*a.y) end,
  __index = {
    area = function(a) return a.x*a.x + a.y*a.y end,
  },
}
point = ffi.metatype("point_t", mt)

local a = point(3, 4)
print(a.x, a.y)  --> 3  4
print(#a)        --> 5
print(a:area())  --> 25
local b = a + point(0.5, 8)
print(#b)        --> 12.5

Here's the step-by-step explanation:

This defines the C type for a two-dimensional point object.

We have to declare the variable holding the point constructor first, because it's used inside of a metamethod.

Let's define an __add metamethod which adds the coordinates of two points and creates a new point object. For simplicity, this function assumes that both arguments are points. But it could be any mix of objects, if at least one operand is of the required type (e.g. adding a point plus a number or vice versa). Our __len metamethod returns the distance of a point to the origin.

If we run out of operators, we can define named methods, too. Here the __index table defines an area function. For custom indexing needs, one might want to define __index and __newindex functions instead.

This associates the metamethods with our C type. This only needs to be done once. For convenience, a constructor is returned by ffi.metatype(). We're not required to use it, though. The original C type can still be used e.g. to create an array of points. The metamethods automatically apply to any and all uses of this type.

Please note that the association with a metatable is permanent and the metatable must not be modified afterwards! Ditto for the __index table.

Here are some simple usage examples for the point type and their expected results. The pre-defined operations (such as a.x) can be freely mixed with the newly defined metamethods. Note that area is a method and must be called with the Lua syntax for methods: a:area(), not a.area().

The C type metamethod mechanism is most useful when used in conjunction with C libraries that are written in an object-oriented style. Creators return a pointer to a new instance and methods take an instance pointer as the first argument. Sometimes you can just point __index to the library namespace and __gc to the destructor and you're done. But often enough you'll want to add convenience wrappers, e.g. to return actual Lua strings or when returning multiple values.

Some C libraries only declare instance pointers as an opaque void * type. In this case you can use a fake type for all declarations, e.g. a pointer to a named (incomplete) struct will do: typedef struct foo_type *foo_handle. The C side doesn't know what you declare with the LuaJIT FFI, but as long as the underlying types are compatible, everything still works.

Translating C Idioms

Here's a list of common C idioms and their translation to the LuaJIT FFI:

Idiom C code Lua code
Pointer dereference
int *p;
x = *p;
*p = y;
x = p[0]
p[0] = y
Pointer indexing
int i, *p;
x = p[i];
p[i+1] = y;
x = p[i]
p[i+1] = y
Array indexing
int i, a[];
x = a[i];
a[i+1] = y;
x = a[i]
a[i+1] = y
struct/union dereference
struct foo s;
x = s.field;
s.field = y;
x = s.field
s.field = y
struct/union pointer deref.
struct foo *sp;
x = sp->field;
sp->field = y;
x = s.field
s.field = y
Pointer arithmetic
int i, *p;
x = p + i;
y = p - i;
x = p + i
y = p - i
Pointer difference
int *p1, *p2;
x = p1 - p2;x = p1 - p2
Array element pointer
int i, a[];
x = &a[i];x = a+i
Cast pointer to address
int *p;
x = (intptr_t)p;x = tonumber(
 ffi.cast("intptr_t",
          p))
Functions with outargs
void foo(int *inoutlen);
int len = x;
foo(&len);
y = len;
local len =
  ffi.new("int[1]", x)
foo(len)
y = len[0]
Vararg conversions
int printf(char *fmt, ...);
printf("%g", 1.0);
printf("%d", 1);
 
printf("%g", 1);
printf("%d",
  ffi.new("int", 1))

To Cache or Not to Cache

It's a common Lua idiom to cache library functions in local variables or upvalues, e.g.:

local byte, char = string.byte, string.char
local function foo(x)
  return char(byte(x)+1)
end

This replaces several hash-table lookups with a (faster) direct use of a local or an upvalue. This is less important with LuaJIT, since the JIT compiler optimizes hash-table lookups a lot and is even able to hoist most of them out of the inner loops. It can't eliminate all of them, though, and it saves some typing for often-used functions. So there's still a place for this, even with LuaJIT.

The situation is a bit different with C function calls via the FFI library. The JIT compiler has special logic to eliminate all of the lookup overhead for functions resolved from a C library namespace! Thus it's not helpful and actually counter-productive to cache individual C functions like this:

local funca, funcb = ffi.C.funca, ffi.C.funcb -- Not helpful!
local function foo(x, n)
  for i=1,n do funcb(funca(x, i), 1) end
end

This turns them into indirect calls and generates bigger and slower machine code. Instead you'll want to cache the namespace itself and rely on the JIT compiler to eliminate the lookups:

local C = ffi.C          -- Instead use this!
local function foo(x, n)
  for i=1,n do C.funcb(C.funca(x, i), 1) end
end

This generates both shorter and faster code. So don't cache C functions, but do cache namespaces! Most often the namespace is already in a local variable at an outer scope, e.g. from local lib = ffi.load(...). Note that copying it to a local variable in the function scope is unnecessary.


tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/extensions.html0000644000000000000000000004250513306562377023701 0ustar rootroot Extensions

LuaJIT is fully upwards-compatible with Lua 5.1. It supports all » standard Lua library functions and the full set of » Lua/C API functions.

LuaJIT is also fully ABI-compatible to Lua 5.1 at the linker/dynamic loader level. This means you can compile a C module against the standard Lua headers and load the same shared library from either Lua or LuaJIT.

LuaJIT extends the standard Lua VM with new functionality and adds several extension modules. Please note this page is only about functional enhancements and not about performance enhancements, such as the optimized VM, the faster interpreter or the JIT compiler.

Extensions Modules

LuaJIT comes with several built-in extension modules:

bit.* — Bitwise operations

LuaJIT supports all bitwise operations as defined by » Lua BitOp:

bit.tobit  bit.tohex  bit.bnot    bit.band bit.bor  bit.bxor
bit.lshift bit.rshift bit.arshift bit.rol  bit.ror  bit.bswap

This module is a LuaJIT built-in — you don't need to download or install Lua BitOp. The Lua BitOp site has full documentation for all » Lua BitOp API functions. The FFI adds support for 64 bit bitwise operations, using the same API functions.

Please make sure to require the module before using any of its functions:

local bit = require("bit")

An already installed Lua BitOp module is ignored by LuaJIT. This way you can use bit operations from both Lua and LuaJIT on a shared installation.

ffi.* — FFI library

The FFI library allows calling external C functions and the use of C data structures from pure Lua code.

jit.* — JIT compiler control

The functions in this module control the behavior of the JIT compiler engine.

C API extensions

LuaJIT adds some extra functions to the Lua/C API.

Profiler

LuaJIT has an integrated profiler.

Enhanced Standard Library Functions

xpcall(f, err [,args...]) passes arguments

Unlike the standard implementation in Lua 5.1, xpcall() passes any arguments after the error function to the function which is called in a protected context.

loadfile() etc. handle UTF-8 source code

Non-ASCII characters are handled transparently by the Lua source code parser. This allows the use of UTF-8 characters in identifiers and strings. A UTF-8 BOM is skipped at the start of the source code.

tostring() etc. canonicalize NaN and ±Inf

All number-to-string conversions consistently convert non-finite numbers to the same strings on all platforms. NaN results in "nan", positive infinity results in "inf" and negative infinity results in "-inf".

tonumber() etc. use builtin string to number conversion

All string-to-number conversions consistently convert integer and floating-point inputs in decimal, hexadecimal and binary on all platforms. strtod() is not used anymore, which avoids numerous problems with poor C library implementations. The builtin conversion function provides full precision according to the IEEE-754 standard, it works independently of the current locale and it supports hex floating-point numbers (e.g. 0x1.5p-3).

string.dump(f [,strip]) generates portable bytecode

An extra argument has been added to string.dump(). If set to true, 'stripped' bytecode without debug information is generated. This speeds up later bytecode loading and reduces memory usage. See also the -b command line option.

The generated bytecode is portable and can be loaded on any architecture that LuaJIT supports, independent of word size or endianess. However the bytecode compatibility versions must match. Bytecode stays compatible for dot releases (x.y.0 → x.y.1), but may change with major or minor releases (2.0 → 2.1) or between any beta release. Foreign bytecode (e.g. from Lua 5.1) is incompatible and cannot be loaded.

Note: LJ_GC64 mode requires a different frame layout, which implies a different, incompatible bytecode format for ports that use this mode (e.g. ARM64 or MIPS64) or when explicitly enabled for x64. This may be rectified in the future.

table.new(narray, nhash) allocates a pre-sized table

An extra library function table.new() can be made available via require("table.new"). This creates a pre-sized table, just like the C API equivalent lua_createtable(). This is useful for big tables if the final table size is known and automatic table resizing is too expensive.

table.clear(tab) clears a table

An extra library function table.clear() can be made available via require("table.clear"). This clears all keys and values from a table, but preserves the allocated array/hash sizes. This is useful when a table, which is linked from multiple places, needs to be cleared and/or when recycling a table for use by the same context. This avoids managing backlinks, saves an allocation and the overhead of incremental array/hash part growth.

Please note this function is meant for very specific situations. In most cases it's better to replace the (usually single) link with a new table and let the GC do its work.

Enhanced PRNG for math.random()

LuaJIT uses a Tausworthe PRNG with period 2^223 to implement math.random() and math.randomseed(). The quality of the PRNG results is much superior compared to the standard Lua implementation which uses the platform-specific ANSI rand().

The PRNG generates the same sequences from the same seeds on all platforms and makes use of all bits in the seed argument. math.random() without arguments generates 52 pseudo-random bits for every call. The result is uniformly distributed between 0.0 and 1.0. It's correctly scaled up and rounded for math.random(n [,m]) to preserve uniformity.

io.* functions handle 64 bit file offsets

The file I/O functions in the standard io.* library handle 64 bit file offsets. In particular this means it's possible to open files larger than 2 Gigabytes and to reposition or obtain the current file position for offsets beyond 2 GB (fp:seek() method).

debug.* functions identify metamethods

debug.getinfo() and lua_getinfo() also return information about invoked metamethods. The namewhat field is set to "metamethod" and the name field has the name of the corresponding metamethod (e.g. "__index").

Fully Resumable VM

The LuaJIT VM is fully resumable. This means you can yield from a coroutine even across contexts, where this would not possible with the standard Lua 5.1 VM: e.g. you can yield across pcall() and xpcall(), across iterators and across metamethods.

Extensions from Lua 5.2

LuaJIT supports some language and library extensions from Lua 5.2. Features that are unlikely to break existing code are unconditionally enabled:

  • goto and ::labels::.
  • Hex escapes '\x3F' and '\*' escape in strings.
  • load(string|reader [, chunkname [,mode [,env]]]).
  • loadstring() is an alias for load().
  • loadfile(filename [,mode [,env]]).
  • math.log(x [,base]).
  • string.rep(s, n [,sep]).
  • string.format(): %q reversible. %s checks __tostring. %a and "%A added.
  • String matching pattern %g added.
  • io.read("*L").
  • io.lines() and file:lines() process io.read() options.
  • os.exit(status|true|false [,close]).
  • package.searchpath(name, path [, sep [, rep]]).
  • package.loadlib(name, "*").
  • debug.getinfo() returns nparams and isvararg for option "u".
  • debug.getlocal() accepts function instead of level.
  • debug.getlocal() and debug.setlocal() accept negative indexes for varargs.
  • debug.getupvalue() and debug.setupvalue() handle C functions.
  • debug.upvalueid() and debug.upvaluejoin().
  • Lua/C API extensions: lua_version() lua_upvalueid() lua_upvaluejoin() lua_loadx() lua_copy() lua_tonumberx() lua_tointegerx() luaL_fileresult() luaL_execresult() luaL_loadfilex() luaL_loadbufferx() luaL_traceback() luaL_setfuncs() luaL_pushmodule() luaL_newlibtable() luaL_newlib() luaL_testudata() luaL_setmetatable()
  • Command line option -E.
  • Command line checks __tostring for errors.

Other features are only enabled, if LuaJIT is built with -DLUAJIT_ENABLE_LUA52COMPAT:

  • goto is a keyword and not a valid variable name anymore.
  • break can be placed anywhere. Empty statements (;;) are allowed.
  • __lt, __le are invoked for mixed types.
  • __len for tables. rawlen() library function.
  • pairs() and ipairs() check for __pairs and __ipairs.
  • coroutine.running() returns two results.
  • table.pack() and table.unpack() (same as unpack()).
  • io.write() and file:write() return file handle instead of true.
  • os.execute() and pipe:close() return detailed exit status.
  • debug.setmetatable() returns object.
  • debug.getuservalue() and debug.setuservalue().
  • Remove math.mod(), string.gfind().
  • package.searchers.
  • module() returns the module table.

Note: this provides only partial compatibility with Lua 5.2 at the language and Lua library level. LuaJIT is API+ABI-compatible with Lua 5.1, which prevents implementing features that would otherwise break the Lua/C API and ABI (e.g. _ENV).

Extensions from Lua 5.3

LuaJIT supports some extensions from Lua 5.3:

  • Unicode escape '\u{XX...}' embeds the UTF-8 encoding in string literals.
  • The argument table arg can be read (and modified) by LUA_INIT and -e chunks.
  • io.read() and file:read() accept formats with or without a leading *.
  • table.move(a1, f, e, t [,a2]).
  • coroutine.isyieldable().
  • Lua/C API extensions: lua_isyieldable()

C++ Exception Interoperability

LuaJIT has built-in support for interoperating with C++ exceptions. The available range of features depends on the target platform and the toolchain used to compile LuaJIT:

Platform Compiler Interoperability
POSIX/x64, DWARF2 unwinding GCC 4.3+, Clang Full
ARM -DLUAJIT_UNWIND_EXTERNAL GCC, Clang Full
Other platforms, DWARF2 unwinding GCC, Clang Limited
Windows/x64 MSVC or WinSDK Full
Windows/x86 Any Full
Other platforms Other compilers No

Full interoperability means:

  • C++ exceptions can be caught on the Lua side with pcall(), lua_pcall() etc.
  • C++ exceptions will be converted to the generic Lua error "C++ exception", unless you use the C call wrapper feature.
  • It's safe to throw C++ exceptions across non-protected Lua frames on the C stack. The contents of the C++ exception object pass through unmodified.
  • Lua errors can be caught on the C++ side with catch(...). The corresponding Lua error message can be retrieved from the Lua stack.
  • Throwing Lua errors across C++ frames is safe. C++ destructors will be called.

Limited interoperability means:

  • C++ exceptions can be caught on the Lua side with pcall(), lua_pcall() etc.
  • C++ exceptions will be converted to the generic Lua error "C++ exception", unless you use the C call wrapper feature.
  • C++ exceptions will be caught by non-protected Lua frames and are rethrown as a generic Lua error. The C++ exception object will be destroyed.
  • Lua errors cannot be caught on the C++ side.
  • Throwing Lua errors across C++ frames will not call C++ destructors.

No interoperability means:

  • It's not safe to throw C++ exceptions across Lua frames.
  • C++ exceptions cannot be caught on the Lua side.
  • Lua errors cannot be caught on the C++ side.
  • Throwing Lua errors across C++ frames will not call C++ destructors.

tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/faq.html0000644000000000000000000001700513306562377022246 0ustar rootroot Frequently Asked Questions (FAQ)
Q: Where can I learn more about LuaJIT and Lua?
Q: Where can I learn more about the compiler technology used by LuaJIT?
I'm planning to write more documentation about the internals of LuaJIT. In the meantime, please use the following Google Scholar searches to find relevant papers:
Search for: » Trace Compiler
Search for: » JIT Compiler
Search for: » Dynamic Language Optimizations
Search for: » SSA Form
Search for: » Linear Scan Register Allocation
Here is a list of the » innovative features in LuaJIT.
And, you know, reading the source is of course the only way to enlightenment. :-)
Q: Why do I get this error: "attempt to index global 'arg' (a nil value)"?
Q: My vararg functions fail after switching to LuaJIT!
LuaJIT is compatible to the Lua 5.1 language standard. It doesn't support the implicit arg parameter for old-style vararg functions from Lua 5.0.
Please convert your code to the » Lua 5.1 vararg syntax.
Q: Why do I get this error: "bad FPU precision"?
Q: I get weird behavior after initializing Direct3D.
Q: Some FPU operations crash after I load a Delphi DLL.
DirectX/Direct3D (up to version 9) sets the x87 FPU to single-precision mode by default. This violates the Windows ABI and interferes with the operation of many programs — LuaJIT is affected, too. Please make sure you always use the D3DCREATE_FPU_PRESERVE flag when initializing Direct3D.
Direct3D version 10 or higher do not show this behavior anymore. Consider testing your application with older versions, too.
Similarly, the Borland/Delphi runtime modifies the FPU control word and enables FP exceptions. Of course this violates the Windows ABI, too. Please check the Delphi docs for the Set8087CW method.
Q: Sometimes Ctrl-C fails to stop my Lua program. Why?
The interrupt signal handler sets a Lua debug hook. But this is currently ignored by compiled code (this will eventually be fixed). If your program is running in a tight loop and never falls back to the interpreter, the debug hook never runs and can't throw the "interrupted!" error.
In the meantime you have to press Ctrl-C twice to get stop your program. That's similar to when it's stuck running inside a C function under the Lua interpreter.
Q: Why doesn't my favorite power-patch for Lua apply against LuaJIT?
Because it's a completely redesigned VM and has very little code in common with Lua anymore. Also, if the patch introduces changes to the Lua semantics, these would need to be reflected everywhere in the VM, from the interpreter up to all stages of the compiler.
Please use only standard Lua language constructs. For many common needs you can use source transformations or use wrapper or proxy functions. The compiler will happily optimize away such indirections.
Q: Lua runs everywhere. Why doesn't LuaJIT support my CPU?
Because it's a compiler — it needs to generate native machine code. This means the code generator must be ported to each architecture. And the fast interpreter is written in assembler and must be ported, too. This is quite an undertaking.
The install documentation shows the supported architectures. Other architectures will follow based on sufficient user demand and/or sponsoring.
Q: When will feature X be added? When will the next version be released?
When it's ready.
C'mon, it's open source — I'm doing it on my own time and you're getting it for free. You can either contribute a patch or sponsor the development of certain features, if they are important to you.

tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/ext_jit.html0000644000000000000000000001341713306562377023150 0ustar rootroot jit.* Library

The functions in this built-in module control the behavior of the JIT compiler engine. Note that JIT-compilation is fully automatic — you probably won't need to use any of the following functions unless you have special needs.

jit.on()
jit.off()

Turns the whole JIT compiler on (default) or off.

These functions are typically used with the command line options -j on or -j off.

jit.flush()

Flushes the whole cache of compiled code.

jit.on(func|true [,true|false])
jit.off(func|true [,true|false])
jit.flush(func|true [,true|false])

jit.on enables JIT compilation for a Lua function (this is the default).

jit.off disables JIT compilation for a Lua function and flushes any already compiled code from the code cache.

jit.flush flushes the code, but doesn't affect the enable/disable status.

The current function, i.e. the Lua function calling this library function, can also be specified by passing true as the first argument.

If the second argument is true, JIT compilation is also enabled, disabled or flushed recursively for all sub-functions of a function. With false only the sub-functions are affected.

The jit.on and jit.off functions only set a flag which is checked when the function is about to be compiled. They do not trigger immediate compilation.

Typical usage is jit.off(true, true) in the main chunk of a module to turn off JIT compilation for the whole module for debugging purposes.

jit.flush(tr)

Flushes the root trace, specified by its number, and all of its side traces from the cache. The code for the trace will be retained as long as there are any other traces which link to it.

status, ... = jit.status()

Returns the current status of the JIT compiler. The first result is either true or false if the JIT compiler is turned on or off. The remaining results are strings for CPU-specific features and enabled optimizations.

jit.version

Contains the LuaJIT version string.

jit.version_num

Contains the version number of the LuaJIT core. Version xx.yy.zz is represented by the decimal number xxyyzz.

jit.os

Contains the target OS name: "Windows", "Linux", "OSX", "BSD", "POSIX" or "Other".

jit.arch

Contains the target architecture name: "x86", "x64", "arm", "arm64", "ppc", "mips" or "mips64".

jit.opt.* — JIT compiler optimization control

This sub-module provides the backend for the -O command line option.

You can also use it programmatically, e.g.:

jit.opt.start(2) -- same as -O2
jit.opt.start("-dce")
jit.opt.start("hotloop=10", "hotexit=2")

Unlike in LuaJIT 1.x, the module is built-in and optimization is turned on by default! It's no longer necessary to run require("jit.opt").start(), which was one of the ways to enable optimization.

jit.util.* — JIT compiler introspection

This sub-module holds functions to introspect the bytecode, generated traces, the IR and the generated machine code. The functionality provided by this module is still in flux and therefore undocumented.

The debug modules -jbc, -jv and -jdump make extensive use of these functions. Please check out their source code, if you want to know more.


tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/ext_ffi.html0000644000000000000000000002414013306562377023121 0ustar rootroot FFI Library

The FFI library allows calling external C functions and using C data structures from pure Lua code.

The FFI library largely obviates the need to write tedious manual Lua/C bindings in C. No need to learn a separate binding language — it parses plain C declarations! These can be cut-n-pasted from C header files or reference manuals. It's up to the task of binding large libraries without the need for dealing with fragile binding generators.

The FFI library is tightly integrated into LuaJIT (it's not available as a separate module). The code generated by the JIT-compiler for accesses to C data structures from Lua code is on par with the code a C compiler would generate. Calls to C functions can be inlined in JIT-compiled code, unlike calls to functions bound via the classic Lua/C API.

This page gives a short introduction to the usage of the FFI library. Please use the FFI sub-topics in the navigation bar to learn more.

Motivating Example: Calling External C Functions

It's really easy to call an external C library function:

①
②


③local ffi = require("ffi")
ffi.cdef[[
int printf(const char *fmt, ...);
]]
ffi.C.printf("Hello %s!", "world")

So, let's pick that apart:

Load the FFI library.

Add a C declaration for the function. The part inside the double-brackets (in green) is just standard C syntax.

Call the named C function — Yes, it's that simple!

Actually, what goes on behind the scenes is far from simple: makes use of the standard C library namespace ffi.C. Indexing this namespace with a symbol name ("printf") automatically binds it to the standard C library. The result is a special kind of object which, when called, runs the printf function. The arguments passed to this function are automatically converted from Lua objects to the corresponding C types.

Ok, so maybe the use of printf() wasn't such a spectacular example. You could have done that with io.write() and string.format(), too. But you get the idea ...

So here's something to pop up a message box on Windows:

local ffi = require("ffi")
ffi.cdef[[
int MessageBoxA(void *w, const char *txt, const char *cap, int type);
]]
ffi.C.MessageBoxA(nil, "Hello world!", "Test", 0)

Bing! Again, that was far too easy, no?

Compare this with the effort required to bind that function using the classic Lua/C API: create an extra C file, add a C function that retrieves and checks the argument types passed from Lua and calls the actual C function, add a list of module functions and their names, add a luaopen_* function and register all module functions, compile and link it into a shared library (DLL), move it to the proper path, add Lua code that loads the module aaaand ... finally call the binding function. Phew!

Motivating Example: Using C Data Structures

The FFI library allows you to create and access C data structures. Of course the main use for this is for interfacing with C functions. But they can be used stand-alone, too.

Lua is built upon high-level data types. They are flexible, extensible and dynamic. That's why we all love Lua so much. Alas, this can be inefficient for certain tasks, where you'd really want a low-level data type. E.g. a large array of a fixed structure needs to be implemented with a big table holding lots of tiny tables. This imposes both a substantial memory overhead as well as a performance overhead.

Here's a sketch of a library that operates on color images plus a simple benchmark. First, the plain Lua version:

local floor = math.floor

local function image_ramp_green(n)
  local img = {}
  local f = 255/(n-1)
  for i=1,n do
    img[i] = { red = 0, green = floor((i-1)*f), blue = 0, alpha = 255 }
  end
  return img
end

local function image_to_grey(img, n)
  for i=1,n do
    local y = floor(0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue)
    img[i].red = y; img[i].green = y; img[i].blue = y
  end
end

local N = 400*400
local img = image_ramp_green(N)
for i=1,1000 do
  image_to_grey(img, N)
end

This creates a table with 160.000 pixels, each of which is a table holding four number values in the range of 0-255. First an image with a green ramp is created (1D for simplicity), then the image is converted to greyscale 1000 times. Yes, that's silly, but I was in need of a simple example ...

And here's the FFI version. The modified parts have been marked in bold:

①





②

③
④






③
⑤local ffi = require("ffi")
ffi.cdef[[
typedef struct { uint8_t red, green, blue, alpha; } rgba_pixel;
]]

local function image_ramp_green(n)
  local img = ffi.new("rgba_pixel[?]", n)
  local f = 255/(n-1)
  for i=0,n-1 do
    img[i].green = i*f
    img[i].alpha = 255
  end
  return img
end

local function image_to_grey(img, n)
  for i=0,n-1 do
    local y = 0.3*img[i].red + 0.59*img[i].green + 0.11*img[i].blue
    img[i].red = y; img[i].green = y; img[i].blue = y
  end
end

local N = 400*400
local img = image_ramp_green(N)
for i=1,1000 do
  image_to_grey(img, N)
end

Ok, so that wasn't too difficult:

First, load the FFI library and declare the low-level data type. Here we choose a struct which holds four byte fields, one for each component of a 4x8 bit RGBA pixel.

Creating the data structure with ffi.new() is straightforward — the '?' is a placeholder for the number of elements of a variable-length array.

C arrays are zero-based, so the indexes have to run from 0 to n-1. One might want to allocate one more element instead to simplify converting legacy code.

Since ffi.new() zero-fills the array by default, we only need to set the green and the alpha fields.

The calls to math.floor() can be omitted here, because floating-point numbers are already truncated towards zero when converting them to an integer. This happens implicitly when the number is stored in the fields of each pixel.

Now let's have a look at the impact of the changes: first, memory consumption for the image is down from 22 Megabytes to 640 Kilobytes (400*400*4 bytes). That's a factor of 35x less! So, yes, tables do have a noticeable overhead. BTW: The original program would consume 40 Megabytes in plain Lua (on x64).

Next, performance: the pure Lua version runs in 9.57 seconds (52.9 seconds with the Lua interpreter) and the FFI version runs in 0.48 seconds on my machine (YMMV). That's a factor of 20x faster (110x faster than the Lua interpreter).

The avid reader may notice that converting the pure Lua version over to use array indexes for the colors ([1] instead of .red, [2] instead of .green etc.) ought to be more compact and faster. This is certainly true (by a factor of ~1.7x). Switching to a struct-of-arrays would help, too.

However the resulting code would be less idiomatic and rather error-prone. And it still doesn't get even close to the performance of the FFI version of the code. Also, high-level data structures cannot be easily passed to other C functions, especially I/O functions, without undue conversion penalties.


tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/ext_ffi_api.html0000644000000000000000000005173713306562377023766 0ustar rootroot ffi.* API Functions

This page describes the API functions provided by the FFI library in detail. It's recommended to read through the introduction and the FFI tutorial first.

Glossary

  • cdecl — An abstract C type declaration (a Lua string).
  • ctype — A C type object. This is a special kind of cdata returned by ffi.typeof(). It serves as a cdata constructor when called.
  • cdata — A C data object. It holds a value of the corresponding ctype.
  • ct — A C type specification which can be used for most of the API functions. Either a cdecl, a ctype or a cdata serving as a template type.
  • cb — A callback object. This is a C data object holding a special function pointer. Calling this function from C code runs an associated Lua function.
  • VLA — A variable-length array is declared with a ? instead of the number of elements, e.g. "int[?]". The number of elements (nelem) must be given when it's created.
  • VLS — A variable-length struct is a struct C type where the last element is a VLA. The same rules for declaration and creation apply.

Declaring and Accessing External Symbols

External symbols must be declared first and can then be accessed by indexing a C library namespace, which automatically binds the symbol to a specific library.

ffi.cdef(def)

Adds multiple C declarations for types or external symbols (named variables or functions). def must be a Lua string. It's recommended to use the syntactic sugar for string arguments as follows:

ffi.cdef[[
typedef struct foo { int a, b; } foo_t;  // Declare a struct and typedef.
int dofoo(foo_t *f, int n);  /* Declare an external C function. */
]]

The contents of the string (the part in green above) must be a sequence of C declarations, separated by semicolons. The trailing semicolon for a single declaration may be omitted.

Please note that external symbols are only declared, but they are not bound to any specific address, yet. Binding is achieved with C library namespaces (see below).

C declarations are not passed through a C pre-processor, yet. No pre-processor tokens are allowed, except for #pragma pack. Replace #define in existing C header files with enum, static const or typedef and/or pass the files through an external C pre-processor (once). Be careful not to include unneeded or redundant declarations from unrelated header files.

ffi.C

This is the default C library namespace — note the uppercase 'C'. It binds to the default set of symbols or libraries on the target system. These are more or less the same as a C compiler would offer by default, without specifying extra link libraries.

On POSIX systems, this binds to symbols in the default or global namespace. This includes all exported symbols from the executable and any libraries loaded into the global namespace. This includes at least libc, libm, libdl (on Linux), libgcc (if compiled with GCC), as well as any exported symbols from the Lua/C API provided by LuaJIT itself.

On Windows systems, this binds to symbols exported from the *.exe, the lua51.dll (i.e. the Lua/C API provided by LuaJIT itself), the C runtime library LuaJIT was linked with (msvcrt*.dll), kernel32.dll, user32.dll and gdi32.dll.

clib = ffi.load(name [,global])

This loads the dynamic library given by name and returns a new C library namespace which binds to its symbols. On POSIX systems, if global is true, the library symbols are loaded into the global namespace, too.

If name is a path, the library is loaded from this path. Otherwise name is canonicalized in a system-dependent way and searched in the default search path for dynamic libraries:

On POSIX systems, if the name contains no dot, the extension .so is appended. Also, the lib prefix is prepended if necessary. So ffi.load("z") looks for "libz.so" in the default shared library search path.

On Windows systems, if the name contains no dot, the extension .dll is appended. So ffi.load("ws2_32") looks for "ws2_32.dll" in the default DLL search path.

Creating cdata Objects

The following API functions create cdata objects (type() returns "cdata"). All created cdata objects are garbage collected.

cdata = ffi.new(ct [,nelem] [,init...])
cdata = ctype([nelem,] [init...])

Creates a cdata object for the given ct. VLA/VLS types require the nelem argument. The second syntax uses a ctype as a constructor and is otherwise fully equivalent.

The cdata object is initialized according to the rules for initializers, using the optional init arguments. Excess initializers cause an error.

Performance notice: if you want to create many objects of one kind, parse the cdecl only once and get its ctype with ffi.typeof(). Then use the ctype as a constructor repeatedly.

Please note that an anonymous struct declaration implicitly creates a new and distinguished ctype every time you use it for ffi.new(). This is probably not what you want, especially if you create more than one cdata object. Different anonymous structs are not considered assignment-compatible by the C standard, even though they may have the same fields! Also, they are considered different types by the JIT-compiler, which may cause an excessive number of traces. It's strongly suggested to either declare a named struct or typedef with ffi.cdef() or to create a single ctype object for an anonymous struct with ffi.typeof().

ctype = ffi.typeof(ct)

Creates a ctype object for the given ct.

This function is especially useful to parse a cdecl only once and then use the resulting ctype object as a constructor.

cdata = ffi.cast(ct, init)

Creates a scalar cdata object for the given ct. The cdata object is initialized with init using the "cast" variant of the C type conversion rules.

This functions is mainly useful to override the pointer compatibility checks or to convert pointers to addresses or vice versa.

ctype = ffi.metatype(ct, metatable)

Creates a ctype object for the given ct and associates it with a metatable. Only struct/union types, complex numbers and vectors are allowed. Other types may be wrapped in a struct, if needed.

The association with a metatable is permanent and cannot be changed afterwards. Neither the contents of the metatable nor the contents of an __index table (if any) may be modified afterwards. The associated metatable automatically applies to all uses of this type, no matter how the objects are created or where they originate from. Note that pre-defined operations on types have precedence (e.g. declared field names cannot be overriden).

All standard Lua metamethods are implemented. These are called directly, without shortcuts and on any mix of types. For binary operations, the left operand is checked first for a valid ctype metamethod. The __gc metamethod only applies to struct/union types and performs an implicit ffi.gc() call during creation of an instance.

cdata = ffi.gc(cdata, finalizer)

Associates a finalizer with a pointer or aggregate cdata object. The cdata object is returned unchanged.

This function allows safe integration of unmanaged resources into the automatic memory management of the LuaJIT garbage collector. Typical usage:

local p = ffi.gc(ffi.C.malloc(n), ffi.C.free)
...
p = nil -- Last reference to p is gone.
-- GC will eventually run finalizer: ffi.C.free(p)

A cdata finalizer works like the __gc metamethod for userdata objects: when the last reference to a cdata object is gone, the associated finalizer is called with the cdata object as an argument. The finalizer can be a Lua function or a cdata function or cdata function pointer. An existing finalizer can be removed by setting a nil finalizer, e.g. right before explicitly deleting a resource:

ffi.C.free(ffi.gc(p, nil)) -- Manually free the memory.

C Type Information

The following API functions return information about C types. They are most useful for inspecting cdata objects.

size = ffi.sizeof(ct [,nelem])

Returns the size of ct in bytes. Returns nil if the size is not known (e.g. for "void" or function types). Requires nelem for VLA/VLS types, except for cdata objects.

align = ffi.alignof(ct)

Returns the minimum required alignment for ct in bytes.

ofs [,bpos,bsize] = ffi.offsetof(ct, field)

Returns the offset (in bytes) of field relative to the start of ct, which must be a struct. Additionally returns the position and the field size (in bits) for bit fields.

status = ffi.istype(ct, obj)

Returns true if obj has the C type given by ct. Returns false otherwise.

C type qualifiers (const etc.) are ignored. Pointers are checked with the standard pointer compatibility rules, but without any special treatment for void *. If ct specifies a struct/union, then a pointer to this type is accepted, too. Otherwise the types must match exactly.

Note: this function accepts all kinds of Lua objects for the obj argument, but always returns false for non-cdata objects.

Utility Functions

err = ffi.errno([newerr])

Returns the error number set by the last C function call which indicated an error condition. If the optional newerr argument is present, the error number is set to the new value and the previous value is returned.

This function offers a portable and OS-independent way to get and set the error number. Note that only some C functions set the error number. And it's only significant if the function actually indicated an error condition (e.g. with a return value of -1 or NULL). Otherwise, it may or may not contain any previously set value.

You're advised to call this function only when needed and as close as possible after the return of the related C function. The errno value is preserved across hooks, memory allocations, invocations of the JIT compiler and other internal VM activity. The same applies to the value returned by GetLastError() on Windows, but you need to declare and call it yourself.

str = ffi.string(ptr [,len])

Creates an interned Lua string from the data pointed to by ptr.

If the optional argument len is missing, ptr is converted to a "char *" and the data is assumed to be zero-terminated. The length of the string is computed with strlen().

Otherwise ptr is converted to a "void *" and len gives the length of the data. The data may contain embedded zeros and need not be byte-oriented (though this may cause endianess issues).

This function is mainly useful to convert (temporary) "const char *" pointers returned by C functions to Lua strings and store them or pass them to other functions expecting a Lua string. The Lua string is an (interned) copy of the data and bears no relation to the original data area anymore. Lua strings are 8 bit clean and may be used to hold arbitrary, non-character data.

Performance notice: it's faster to pass the length of the string, if it's known. E.g. when the length is returned by a C call like sprintf().

ffi.copy(dst, src, len)
ffi.copy(dst, str)

Copies the data pointed to by src to dst. dst is converted to a "void *" and src is converted to a "const void *".

In the first syntax, len gives the number of bytes to copy. Caveat: if src is a Lua string, then len must not exceed #src+1.

In the second syntax, the source of the copy must be a Lua string. All bytes of the string plus a zero-terminator are copied to dst (i.e. #src+1 bytes).

Performance notice: ffi.copy() may be used as a faster (inlinable) replacement for the C library functions memcpy(), strcpy() and strncpy().

ffi.fill(dst, len [,c])

Fills the data pointed to by dst with len constant bytes, given by c. If c is omitted, the data is zero-filled.

Performance notice: ffi.fill() may be used as a faster (inlinable) replacement for the C library function memset(dst, c, len). Please note the different order of arguments!

Target-specific Information

status = ffi.abi(param)

Returns true if param (a Lua string) applies for the target ABI (Application Binary Interface). Returns false otherwise. The following parameters are currently defined:

Parameter Description
32bit32 bit architecture
64bit64 bit architecture
leLittle-endian architecture
beBig-endian architecture
fpuTarget has a hardware FPU
softfpsoftfp calling conventions
hardfphardfp calling conventions
eabiEABI variant of the standard ABI
winWindows variant of the standard ABI
gc6464 bit GC references

ffi.os

Contains the target OS name. Same contents as jit.os.

ffi.arch

Contains the target architecture name. Same contents as jit.arch.

Methods for Callbacks

The C types for callbacks have some extra methods:

cb:free()

Free the resources associated with a callback. The associated Lua function is unanchored and may be garbage collected. The callback function pointer is no longer valid and must not be called anymore (it may be reused by a subsequently created callback).

cb:set(func)

Associate a new Lua function with a callback. The C type of the callback and the callback function pointer are unchanged.

This method is useful to dynamically switch the receiver of callbacks without creating a new callback each time and registering it again (e.g. with a GUI library).

Extended Standard Library Functions

The following standard library functions have been extended to work with cdata objects:

n = tonumber(cdata)

Converts a number cdata object to a double and returns it as a Lua number. This is particularly useful for boxed 64 bit integer values. Caveat: this conversion may incur a precision loss.

s = tostring(cdata)

Returns a string representation of the value of 64 bit integers ("nnnLL" or "nnnULL") or complex numbers ("re±imi"). Otherwise returns a string representation of the C type of a ctype object ("ctype<type>") or a cdata object ("cdata<type>: address"), unless you override it with a __tostring metamethod (see ffi.metatype()).

iter, obj, start = pairs(cdata)
iter, obj, start = ipairs(cdata)

Calls the __pairs or __ipairs metamethod of the corresponding ctype.

Extensions to the Lua Parser

The parser for Lua source code treats numeric literals with the suffixes LL or ULL as signed or unsigned 64 bit integers. Case doesn't matter, but uppercase is recommended for readability. It handles decimal (42LL), hexadecimal (0x2aLL) and binary (0b101010LL) literals.

The imaginary part of complex numbers can be specified by suffixing number literals with i or I, e.g. 12.5i. Caveat: you'll need to use 1i to get an imaginary part with the value one, since i itself still refers to a variable named i.


tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/bluequad.css0000644000000000000000000001270713306562377023131 0ustar rootroot/* Copyright (C) 2004-2017 Mike Pall. * * You are welcome to use the general ideas of this design for your own sites. * But please do not steal the stylesheet, the layout or the color scheme. */ /* colorscheme: * * site | head #4162bf/white | #6078bf/#e6ecff * ------+------ ----------------+------------------- * nav | main #bfcfff | #e6ecff/black * * nav: hiback loback #c5d5ff #b9c9f9 * hiborder loborder #e6ecff #97a7d7 * link hover #2142bf #ff0000 * * link: link visited hover #2142bf #8122bf #ff0000 * * main: boxback boxborder #f0f4ff #bfcfff */ body { font-family: Verdana, Arial, Helvetica, sans-serif; font-size: 10pt; margin: 0; padding: 0; border: none; background: #e0e0e0; color: #000000; } a:link { text-decoration: none; background: transparent; color: #2142bf; } a:visited { text-decoration: none; background: transparent; color: #8122bf; } a:hover, a:active { text-decoration: underline; background: transparent; color: #ff0000; } h1, h2, h3 { font-weight: bold; text-align: left; margin: 0.5em 0; padding: 0; background: transparent; } h1 { font-size: 200%; line-height: 3em; /* really 6em relative to body, match #site span */ margin: 0; } h2 { font-size: 150%; color: #606060; } h3 { font-size: 125%; color: #404040; } p { max-width: 600px; margin: 0 0 0.5em 0; padding: 0; } b { color: #404040; } ul, ol { max-width: 600px; margin: 0.5em 0; padding: 0 0 0 2em; } ul { list-style: outside square; } ol { list-style: outside decimal; } li { margin: 0; padding: 0; } dl { max-width: 600px; margin: 1em 0; padding: 1em; border: 1px solid #bfcfff; background: #f0f4ff; } dt { font-weight: bold; margin: 0; padding: 0; } dt sup { float: right; margin-left: 1em; color: #808080; } dt a:visited { text-decoration: none; color: #2142bf; } dt a:hover, dt a:active { text-decoration: none; color: #ff0000; } dd { margin: 0.5em 0 0 2em; padding: 0; } div.tablewrap { /* for IE *sigh* */ max-width: 600px; } table { table-layout: fixed; border-spacing: 0; border-collapse: collapse; max-width: 600px; width: 100%; margin: 1em 0; padding: 0; border: 1px solid #bfcfff; } tr { margin: 0; padding: 0; border: none; } tr.odd { background: #f0f4ff; } tr.separate td { border-top: 1px solid #bfcfff; } td { text-align: left; margin: 0; padding: 0.2em 0.5em; border: none; } tt, code, kbd, samp { font-family: Courier New, Courier, monospace; line-height: 1.2; font-size: 110%; } kbd { font-weight: bolder; } blockquote, pre { max-width: 600px; margin: 1em 2em; padding: 0; } pre { line-height: 1.1; } pre.code { line-height: 1.4; margin: 0.5em 0 1em 0.5em; padding: 0.5em 1em; border: 1px solid #bfcfff; background: #f0f4ff; } pre.mark { padding-left: 2em; } span.codemark { position:absolute; left: 16em; color: #4040c0; } span.mark { color: #4040c0; font-family: Courier New, Courier, monospace; line-height: 1.1; } img { border: none; vertical-align: baseline; margin: 0; padding: 0; } img.left { float: left; margin: 0.5em 1em 0.5em 0; } img.right { float: right; margin: 0.5em 0 0.5em 1em; } .indent { padding-left: 1em; } .flush { clear: both; visibility: hidden; } .hide, .noscreen { display: none !important; } .ext { color: #ff8000; } .new { font-size: 6pt; vertical-align: middle; background: #ff8000; color: #ffffff; } #site { clear: both; float: left; width: 13em; text-align: center; font-weight: bold; margin: 0; padding: 0; background: transparent; color: #ffffff; } #site a { font-size: 200%; } #site a:link, #site a:visited { text-decoration: none; font-weight: bold; background: transparent; color: #ffffff; } #site span { line-height: 3em; /* really 6em relative to body, match h1 */ } #logo { color: #ffb380; } #head { margin: 0; padding: 0 0 0 2em; border-left: solid 13em #4162bf; border-right: solid 3em #6078bf; background: #6078bf; color: #e6ecff; } #nav { clear: both; float: left; overflow: hidden; text-align: left; line-height: 1.5; width: 13em; padding-top: 1em; background: transparent; } #nav ul { list-style: none outside; margin: 0; padding: 0; } #nav li { margin: 0; padding: 0; } #nav a { display: block; text-decoration: none; font-weight: bold; margin: 0; padding: 2px 1em; border-top: 1px solid transparent; border-bottom: 1px solid transparent; background: transparent; color: #2142bf; } #nav a:hover, #nav a:active { text-decoration: none; border-top: 1px solid #97a7d7; border-bottom: 1px solid #e6ecff; background: #b9c9f9; color: #ff0000; } #nav a.current, #nav a.current:hover, #nav a.current:active { border-top: 1px solid #e6ecff; border-bottom: 1px solid #97a7d7; background: #c5d5ff; color: #2142bf; } #nav ul ul a { padding: 0 1em 0 1.7em; } #nav ul ul ul a { padding: 0 0.5em 0 2.4em; } #main { line-height: 1.5; text-align: left; margin: 0; padding: 1em 2em; border-left: solid 13em #bfcfff; border-right: solid 3em #e6ecff; background: #e6ecff; } #foot { clear: both; font-size: 80%; text-align: center; margin: 0; padding: 0.5em; background: #6078bf; color: #ffffff; } #foot a:link, #foot a:visited { text-decoration: underline; background: transparent; color: #ffffff; } #foot a:hover, #foot a:active { text-decoration: underline; background: transparent; color: #bfcfff; } tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/doc/changes.html0000644000000000000000000012453213306562377023113 0ustar rootroot LuaJIT Change History

This is a list of changes between the released versions of LuaJIT.
The current stable version is LuaJIT 2.0.5.

Please check the » Online Change History to see whether newer versions are available.

LuaJIT 2.1.0-beta3 — 2017-05-01

  • Rewrite memory block allocator.
  • Add various extension from Lua 5.2/5.3.
  • Remove old Lua 5.0 compatibility defines.
  • Set arg table before evaluating LUA_INIT and -e chunks.
  • Fix FOLD rules for math.abs() and FP negation.
  • Fix soft-float math.abs() and negation.
  • Fix formatting of some small denormals at low precision.
  • LJ_GC64: Add JIT compiler support.
  • x64/LJ_GC64: Add JIT compiler backend.
  • x86/x64: Generate BMI2 shifts and rotates, if available.
  • Windows/x86: Add full exception interoperability.
  • ARM64: Add big-endian support.
  • ARM64: Add JIT compiler backend.
  • MIPS: Fix TSETR barrier.
  • MIPS: Support MIPS16 interlinking.
  • MIPS soft-float: Fix code generation for HREF.
  • MIPS64: Add MIPS64 hard-float JIT compiler backend.
  • MIPS64: Add MIPS64 hard-float/soft-float support to interpreter.
  • FFI: Compile bitfield loads/stores.
  • Various fixes common with the 2.0 branch.

LuaJIT 2.1.0-beta2 — 2016-03-03

  • Enable trace stitching.
  • Use internal implementation for converting FP numbers to strings.
  • Parse Unicode escape '\u{XX...}' in string literals.
  • Add MIPS soft-float support.
  • Switch MIPS port to dual-number mode.
  • x86/x64: Add support for AES-NI, AVX and AVX2 to DynASM.
  • FFI: Add ssize_t declaration.
  • FFI: Parse #line NN and #NN.
  • Various minor fixes.

LuaJIT 2.1.0-beta1 — 2015-08-25

This is a brief summary of the major changes in LuaJIT 2.1 compared to 2.0. Please take a look at the commit history for more details.

  • Changes to the VM core:
    • Add low-overhead profiler (-jp).
    • Add LJ_GC64 mode: 64 bit GC object references (really: 47 bit). Interpreter-only for now.
    • Add LJ_FR2 mode: Two-slot frame info. Required by LJ_GC64 mode.
    • Add table.new() and table.clear().
    • Parse binary number literals (0bxxx).
  • Improvements to the JIT compiler:
    • Add trace stitching (disabled for now).
    • Compile various builtins: string.char(), string.reverse(), string.lower(), string.upper(), string.rep(), string.format(), table.concat(), bit.tohex(), getfenv(0), debug.getmetatable().
    • Compile string.find() for fixed string searches (no patterns).
    • Compile BC_TSETM, e.g. {1,2,3,f()}.
    • Compile string concatenations (BC_CAT).
    • Compile __concat metamethod.
    • Various minor optimizations.
  • Internal Changes:
    • Add support for embedding LuaJIT bytecode for builtins.
    • Replace various builtins with embedded bytecode.
    • Refactor string buffers and string formatting.
    • Remove obsolete non-truncating number to integer conversions.
  • Ports:
    • Add Xbox One port (LJ_GC64 mode).
    • ARM64: Add port of the interpreter (LJ_GC64 mode).
    • x64: Add separate port of the interpreter to LJ_GC64 mode.
    • x86/x64: Drop internal x87 math functions. Use libm functions.
    • x86: Remove x87 support from interpreter. SSE2 is mandatory now.
    • PPC/e500: Drop support for this architecture.
  • FFI library:
    • FFI: Add 64 bit bitwise operations.
    • FFI: Compile VLA/VLS and large cdata allocations with default initialization.
    • FFI: Compile conversions from functions to function pointers.
    • FFI: Compile lightuserdata to void * conversion.
    • FFI: Compile ffi.gc(cdata, nil), too.
    • FFI: Add ffi.typeinfo().

LuaJIT 2.0.5 — 2017-05-01

  • Add workaround for MSVC 2015 stdio changes.
  • Limit mcode alloc probing, depending on the available pool size.
  • Fix overly restrictive range calculation in mcode allocation.
  • Fix out-of-scope goto handling in parser.
  • Remove internal __mode = "K" and replace with safe check.
  • Add "proto" field to jit.util.funcinfo().
  • Fix GC step size calculation.
  • Initialize uv->immutable for upvalues of loaded chunks.
  • Fix for cdata vs. non-cdata arithmetics/comparisons.
  • Drop leftover regs in 'for' iterator assignment, too.
  • Fix PHI remarking in SINK pass.
  • Don't try to record outermost pcall() return to lower frame.
  • Add guard for obscure aliasing between open upvalues and SSA slots.
  • Remove assumption that lj_math_random_step() doesn't clobber FPRs.
  • Fix handling of non-numeric strings in arithmetic coercions.
  • Fix recording of select(n, ...) with off-trace varargs
  • Fix install for cross-builds.
  • Don't allocate unused 2nd result register in JIT compiler backend.
  • Drop marks from replayed instructions when sinking.
  • Fix unsinking check.
  • Properly handle OOM in trace_save().
  • Limit number of arguments given to io.lines() and fp:lines().
  • Fix narrowing of TOBIT.
  • OSX: Fix build with recent XCode.
  • x86/x64: Don't spill an explicit REF_BASE in the IR.
  • x86/x64: Fix instruction length decoder.
  • x86/x64: Search for exit jumps with instruction length decoder.
  • ARM: Fix BLX encoding for Thumb interworking calls.
  • MIPS: Don't use RID_GP as a scratch register.
  • MIPS: Fix emitted code for U32 to float conversion.
  • MIPS: Backport workaround for compact unwind tables.
  • MIPS: Fix cross-endian jit.bcsave.
  • MIPS: Fix BC_ISNEXT fallback path.
  • MIPS: Fix use of ffgccheck delay slots in interpreter.
  • FFI: Fix FOLD rules for int64_t comparisons.
  • FFI: Fix SPLIT pass for CONV i64.u64.
  • FFI: Fix ipairs() recording.
  • FFI: Don't propagate qualifiers into subtypes of complex.

LuaJIT 2.0.4 — 2015-05-14

  • Fix stack check in narrowing optimization.
  • Fix Lua/C API typecheck error for special indexes.
  • Fix string to number conversion.
  • Fix lexer error for chunks without tokens.
  • Don't compile IR_RETF after CALLT to ff with-side effects.
  • Fix BC_UCLO/BC_JMP join optimization in Lua parser.
  • Fix corner case in string to number conversion.
  • Gracefully handle lua_error() for a suspended coroutine.
  • Avoid error messages when building with Clang.
  • Fix snapshot #0 handling for traces with a stack check on entry.
  • Fix fused constant loads under high register pressure.
  • Invalidate backpropagation cache after DCE.
  • Fix ABC elimination.
  • Fix debug info for main chunk of stripped bytecode.
  • Fix FOLD rule for string.sub(s, ...) == k.
  • Fix FOLD rule for STRREF of SNEW.
  • Fix frame traversal while searching for error function.
  • Prevent GC estimate miscalculation due to buffer growth.
  • Prevent adding side traces for stack checks.
  • Fix top slot calculation for snapshots with continuations.
  • Fix check for reuse of SCEV results in FORL.
  • Add PS Vita port.
  • Fix compatibility issues with Illumos.
  • Fix DragonFly build (unsupported).
  • OpenBSD/x86: Better executable memory allocation for W^X mode.
  • x86: Fix argument checks for ipairs() iterator.
  • x86: lj_math_random_step() clobbers XMM regs on OSX Clang.
  • x86: Fix code generation for unused result of math.random().
  • x64: Allow building with LUAJIT_USE_SYSMALLOC and LUAJIT_USE_VALGRIND.
  • x86/x64: Fix argument check for bit shifts.
  • x86/x64: Fix code generation for fused test/arith ops.
  • ARM: Fix write barrier check in BC_USETS.
  • PPC: Fix red zone overflow in machine code generation.
  • PPC: Don't use mcrxr on PPE.
  • Various archs: Fix excess stack growth in interpreter.
  • FFI: Fix FOLD rule for TOBIT + CONV num.u32.
  • FFI: Prevent DSE across ffi.string().
  • FFI: No meta fallback when indexing pointer to incomplete struct.
  • FFI: Fix initialization of unions of subtypes.
  • FFI: Fix cdata vs. non-cdata arithmetic and comparisons.
  • FFI: Fix __index/__newindex metamethod resolution for ctypes.
  • FFI: Fix compilation of reference field access.
  • FFI: Fix frame traversal for backtraces with FFI callbacks.
  • FFI: Fix recording of indexing a struct pointer ctype object itself.
  • FFI: Allow non-scalar cdata to be compared for equality by address.
  • FFI: Fix pseudo type conversions for type punning.

LuaJIT 2.0.3 — 2014-03-12

  • Add PS4 port.
  • Add support for multilib distro builds.
  • Fix OSX build.
  • Fix MinGW build.
  • Fix Xbox 360 build.
  • Improve ULOAD forwarding for open upvalues.
  • Fix GC steps threshold handling when called by JIT-compiled code.
  • Fix argument checks for math.deg() and math.rad().
  • Fix jit.flush(func|true).
  • Respect jit.off(func) when returning to a function, too.
  • Fix compilation of string.byte(s, nil, n).
  • Fix line number for relocated bytecode after closure fixup
  • Fix frame traversal for backtraces.
  • Fix ABC elimination.
  • Fix handling of redundant PHIs.
  • Fix snapshot restore for exit to function header.
  • Fix type punning alias analysis for constified pointers
  • Fix call unroll checks in the presence of metamethod frames.
  • Fix initial maxslot for down-recursive traces.
  • Prevent BASE register coalescing if parent uses IR_RETF.
  • Don't purge modified function from stack slots in BC_RET.
  • Fix recording of BC_VARG.
  • Don't access dangling reference to reallocated IR.
  • Fix frame depth display for bytecode dump in -jdump.
  • ARM: Fix register allocation when rematerializing FPRs.
  • x64: Fix store to upvalue for lightuserdata values.
  • FFI: Add missing GC steps for callback argument conversions.
  • FFI: Properly unload loaded DLLs.
  • FFI: Fix argument checks for ffi.string().
  • FFI/x64: Fix passing of vector arguments to calls.
  • FFI: Rehash finalizer table after GC cycle, if needed.
  • FFI: Fix cts->L for cdata unsinking in snapshot restore.

LuaJIT 2.0.2 — 2013-06-03

  • Fix memory access check for fast string interning.
  • Fix MSVC intrinsics for older versions.
  • Add missing GC steps for io.* functions.
  • Fix spurious red zone overflows in machine code generation.
  • Fix jump-range constrained mcode allocation.
  • Inhibit DSE for implicit loads via calls.
  • Fix builtin string to number conversion for overflow digits.
  • Fix optional argument handling while recording builtins.
  • Fix optional argument handling in table.concat().
  • Add partial support for building with MingW64 GCC 4.8-SEH.
  • Add missing PHI barrier to string.sub(str, a, b) == kstr FOLD rule.
  • Fix compatibility issues with Illumos.
  • ARM: Fix cache flush/sync for exit stubs of JIT-compiled code.
  • MIPS: Fix cache flush/sync for JIT-compiled code jump area.
  • PPC: Add plt suffix for external calls from assembler code.
  • FFI: Fix snapshot substitution in SPLIT pass.
  • FFI/x86: Fix register allocation for 64 bit comparisons.
  • FFI: Fix tailcall in lowest frame to C function with bool result.
  • FFI: Ignore long type specifier in ffi.istype().
  • FFI: Fix calling conventions for 32 bit OSX and iOS simulator (struct returns).
  • FFI: Fix calling conventions for ARM hard-float EABI (nested structs).
  • FFI: Improve error messages for arithmetic and comparison operators.
  • FFI: Insert no-op type conversion for pointer to integer cast.
  • FFI: Fix unroll limit for ffi.fill().
  • FFI: Must sink XBAR together with XSTOREs.
  • FFI: Preserve intermediate string for const char * conversion.

LuaJIT 2.0.1 — 2013-02-19

  • Don't clear frame for out-of-memory error.
  • Leave hook when resume catches error thrown from hook.
  • Add missing GC steps for template table creation.
  • Fix discharge order of comparisons in Lua parser.
  • Improve buffer handling for io.read().
  • OSX: Add support for Mach-O object files to -b option.
  • Fix PS3 port.
  • Fix/enable Xbox 360 port.
  • x86/x64: Always mark ref for shift count as non-weak.
  • x64: Don't fuse implicitly 32-to-64 extended operands.
  • ARM: Fix armhf call argument handling.
  • ARM: Fix code generation for integer math.min/math.max.
  • PPC/e500: Fix lj_vm_floor() for Inf/NaN.
  • FFI: Change priority of table initializer variants for structs.
  • FFI: Fix code generation for bool call result check on x86/x64.
  • FFI: Load FFI library on-demand for bytecode with cdata literals.
  • FFI: Fix handling of qualified transparent structs/unions.

LuaJIT 2.0.0 — 2012-11-08

  • Correctness and completeness:
    • Fix Android/x86 build.
    • Fix recording of equality comparisons with __eq metamethods.
    • Fix detection of immutable upvalues.
    • Replace error with PANIC for callbacks from JIT-compiled code.
    • Fix builtin string to number conversion for INT_MIN.
    • Don't create unneeded array part for template tables.
    • Fix CONV.num.int sinking.
    • Don't propagate implicitly widened number to index metamethods.
    • ARM: Fix ordered comparisons of number vs. non-number.
    • FFI: Fix code generation for replay of sunk float fields.
    • FFI: Fix signedness of bool.
    • FFI: Fix recording of bool call result check on x86/x64.
    • FFI: Fix stack-adjustment for __thiscall callbacks.

LuaJIT 2.0.0-beta11 — 2012-10-16

  • New features:
    • Use ARM VFP instructions, if available (build-time detection).
    • Add support for ARM hard-float EABI (armhf).
    • Add PS3 port.
    • Add many features from Lua 5.2, e.g. goto/labels. Refer to this list.
    • FFI: Add parameterized C types.
    • FFI: Add support for copy constructors.
    • FFI: Equality comparisons never raise an error (treat as unequal instead).
    • FFI: Box all accessed or returned enums.
    • FFI: Check for __new metamethod when calling a constructor.
    • FFI: Handle __pairs/__ipairs metamethods for cdata objects.
    • FFI: Convert io.* file handle to FILE * pointer (but as a void *).
    • FFI: Detect and support type punning through unions.
    • FFI: Improve various error messages.
  • Build-system reorganization:
    • Reorganize directory layout:
      lib/*src/jit/*
      src/buildvm_*.dascsrc/vm_*.dasc
      src/buildvm_*.h → removed
      src/buildvm*src/host/*
    • Add minified Lua interpreter plus Lua BitOp (minilua) to run DynASM.
    • Change DynASM bit operations to use Lua BitOp
    • Translate only vm_*.dasc for detected target architecture.
    • Improve target detection for msvcbuild.bat.
    • Fix build issues on Cygwin and MinGW with optional MSys.
    • Handle cross-compiles with FPU/no-FPU or hard-fp/soft-fp ABI mismatch.
    • Remove some library functions for no-JIT/no-FFI builds.
    • Add uninstall target to top-level Makefile.
  • Correctness and completeness:
    • Preserve snapshot #0 PC for all traces.
    • Fix argument checks for coroutine.create().
    • Command line prints version and JIT status to stdout, not stderr.
    • Fix userdata __gc separations at Lua state close.
    • Fix TDUP to HLOAD forwarding for LJ_DUALNUM builds.
    • Fix buffer check in bytecode writer.
    • Make os.date() thread-safe.
    • Add missing declarations for MSVC intrinsics.
    • Fix dispatch table modifications for return hooks.
    • Workaround for MSVC conversion bug (doubleuint32_tint32_t).
    • Fix FOLD rule (i-j)-i => 0-j.
    • Never use DWARF unwinder on Windows.
    • Fix shrinking of direct mapped blocks in builtin allocator.
    • Limit recursion depth in string.match() et al.
    • Fix late despecialization of ITERN after loop has been entered.
    • Fix 'f' and 'L' options for debug.getinfo() and lua_getinfo().
    • Fix package.searchpath().
    • OSX: Change dylib names to be consistent with other platforms.
    • Android: Workaround for broken sprintf("%g", -0.0).
    • x86: Remove support for ancient CPUs without CMOV (before Pentium Pro).
    • x86: Fix register allocation for calls returning register pair.
    • x86/x64: Fix fusion of unsigned byte comparisons with swapped operands.
    • ARM: Fix tonumber() argument check.
    • ARM: Fix modulo operator and math.floor()/math.ceil() for inf/nan.
    • ARM: Invoke SPLIT pass for leftover IR_TOBIT.
    • ARM: Fix BASE register coalescing.
    • PPC: Fix interpreter state setup in callbacks.
    • PPC: Fix string.sub() range check.
    • MIPS: Support generation of MIPS/MIPSEL bytecode object files.
    • MIPS: Fix calls to floor()/ceil()/trunc().
    • ARM/PPC: Detect more target architecture variants.
    • ARM/PPC/e500/MIPS: Fix tailcalls from fast functions, esp. tostring().
    • ARM/PPC/MIPS: Fix rematerialization of FP constants.
    • FFI: Don't call FreeLibrary() on our own EXE/DLL.
    • FFI: Resolve metamethods for constructors, too.
    • FFI: Properly disable callbacks on iOS (would require executable memory).
    • FFI: Fix cdecl string parsing during recording.
    • FFI: Show address pointed to for tostring(ref), too.
    • FFI: Fix alignment of C call argument/return structure.
    • FFI: Initialize all fields of standard types.
    • FFI: Fix callback handling when new C types are declared in callback.
    • FFI: Fix recording of constructors for pointers.
    • FFI: Always resolve metamethods for pointers to structs.
    • FFI: Correctly propagate alignment when interning nested types.
  • Structural and performance enhancements:
    • Add allocation sinking and store sinking optimization.
    • Constify immutable upvalues.
    • Add builtin string to integer or FP number conversion. Improves cross-platform consistency and correctness.
    • Create string hash slots in template tables for non-const values, too. Avoids later table resizes.
    • Eliminate HREFK guard for template table references.
    • Add various new FOLD rules.
    • Don't use stack unwinding for lua_yield() (slow on x64).
    • ARM, PPC, MIPS: Improve XLOAD operand fusion and register hinting.
    • PPC, MIPS: Compile math.sqrt() to sqrt instruction, if available.
    • FFI: Fold KPTR + constant offset in SPLIT pass.
    • FFI: Optimize/inline ffi.copy() and ffi.fill().
    • FFI: Compile and optimize array/struct copies.
    • FFI: Compile ffi.typeof(cdata|ctype), ffi.sizeof(), ffi.alignof(), ffi.offsetof() and ffi.gc().

LuaJIT 2.0.0-beta10 — 2012-05-09

  • New features:
    • The MIPS of LuaJIT is complete. It requires a CPU conforming to the MIPS32 R1 architecture with hardware FPU. O32 hard-fp ABI, little-endian or big-endian.
    • Auto-detect target arch via cross-compiler. No need for TARGET=arch anymore.
    • Make DynASM compatible with Lua 5.2.
    • From Lua 5.2: Try __tostring metamethod on non-string error messages..
  • Correctness and completeness:
    • Fix parsing of hex literals with exponents.
    • Fix bytecode dump for certain number constants.
    • Fix argument type in error message for relative arguments.
    • Fix argument error handling on Lua stacks without a frame.
    • Add missing mcode limit check in assembler backend.
    • Fix compilation on OpenBSD.
    • Avoid recursive GC steps after GC-triggered trace exit.
    • Replace <unwind.h> definitions with our own.
    • Fix OSX build issues. Bump minimum required OSX version to 10.4.
    • Fix discharge order of comparisons in Lua parser.
    • Ensure running __gc of userdata created in __gc at state close.
    • Limit number of userdata __gc separations at state close.
    • Fix bytecode JMP slot range when optimizing and/or with constant LHS.
    • Fix DSE of USTORE.
    • Make lua_concat() work from C hook with partial frame.
    • Add required PHIs for implicit conversions, e.g. via XREF forwarding.
    • Add more comparison variants to Valgrind suppressions file.
    • Disable loading bytecode with an extra header (BOM or #!).
    • Fix PHI stack slot syncing.
    • ARM: Reorder type/value tests to silence Valgrind.
    • ARM: Fix register allocation for ldrd-optimized HREFK.
    • ARM: Fix conditional branch fixup for OBAR.
    • ARM: Invoke SPLIT pass for double args in FFI call.
    • ARM: Handle all CALL* ops with double results in SPLIT pass.
    • ARM: Fix rejoin of POW in SPLIT pass.
    • ARM: Fix compilation of math.sinh, math.cosh, math.tanh.
    • ARM, PPC: Avoid pointless arg clearing in BC_IFUNCF.
    • PPC: Fix resume after yield from hook.
    • PPC: Fix argument checking for rawget().
    • PPC: Fix fusion of floating-point XLOAD/XSTORE.
    • PPC: Fix HREFK code generation for huge tables.
    • PPC: Use builtin D-Cache/I-Cache sync code.
  • FFI library:
    • Ignore empty statements in ffi.cdef().
    • Ignore number parsing errors while skipping definitions.
    • Don't touch frame in callbacks with tailcalls to fast functions.
    • Fix library unloading on POSIX systems.
    • Finalize cdata before userdata when closing the state.
    • Change ffi.load() library name resolution for Cygwin.
    • Fix resolving of function name redirects on Windows/x86.
    • Fix symbol resolving error messages on Windows.
    • Fix blacklisting of C functions calling callbacks.
    • Fix result type of pointer difference.
    • Use correct PC in FFI metamethod error message.
    • Allow 'typedef _Bool int BOOL;' for the Windows API.
    • Don't record test for bool result of call, if ignored.

LuaJIT 2.0.0-beta9 — 2011-12-14

  • New features:
    • PPC port of LuaJIT is complete. Default is the dual-number port (usually faster). Single-number port selectable via src/Makefile at build time.
    • Add FFI callback support.
    • Extend -b to generate .c, .h or .obj/.o files with embedded bytecode.
    • Allow loading embedded bytecode with require().
    • From Lua 5.2: Change to '\z' escape. Reject undefined escape sequences.
  • Correctness and completeness:
    • Fix OSX 10.7 build. Fix install_name and versioning on OSX.
    • Fix iOS build.
    • Install dis_arm.lua, too.
    • Mark installed shared library as executable.
    • Add debug option to msvcbuild.bat and improve error handling.
    • Fix data-flow analysis for iterators.
    • Fix forced unwinding triggered by external unwinder.
    • Record missing for loop slot loads (return to lower frame).
    • Always use ANSI variants of Windows system functions.
    • Fix GC barrier for multi-result table constructor (TSETM).
    • Fix/add various FOLD rules.
    • Add potential PHI for number conversions due to type instability.
    • Do not eliminate PHIs only referenced from other PHIs.
    • Correctly anchor implicit number to string conversions in Lua/C API.
    • Fix various stack limit checks.
    • x64: Use thread-safe exceptions for external unwinding (GCC platforms).
    • x64: Fix result type of cdata index conversions.
    • x64: Fix math.random() and bit.bswap() code generation.
    • x64: Fix lightuserdata comparisons.
    • x64: Always extend stack-passed arguments to pointer size.
    • ARM: Many fixes to code generation backend.
    • PPC/e500: Fix dispatch for binop metamethods.
    • PPC/e500: Save/restore condition registers when entering/leaving the VM.
    • PPC/e500: Fix write barrier in stores of strings to upvalues.
  • FFI library:
    • Fix C comment parsing.
    • Fix snapshot optimization for cdata comparisons.
    • Fix recording of const/enum lookups in namespaces.
    • Fix call argument and return handling for I8/U8/I16/U16 types.
    • Fix unfused loads of float fields.
    • Fix ffi.string() recording.
    • Save GetLastError() around ffi.load() and symbol resolving, too.
    • Improve ld script detection in ffi.load().
    • Record loads/stores to external variables in namespaces.
    • Compile calls to stdcall, fastcall and vararg functions.
    • Treat function ctypes like pointers in comparisons.
    • Resolve __call metamethod for pointers, too.
    • Record C function calls with bool return values.
    • Record ffi.errno().
    • x86: Fix number to uint32_t conversion rounding.
    • x86: Fix 64 bit arithmetic in assembler backend.
    • x64: Fix struct-by-value calling conventions.
    • ARM: Ensure invocation of SPLIT pass for float conversions.
  • Structural and performance enhancements:
    • Display trace types with -jv and -jdump.
    • Record isolated calls. But prefer recording loops over calls.
    • Specialize to prototype for non-monomorphic functions. Solves the trace-explosion problem for closure-heavy programming styles.
    • Always generate a portable vmdef.lua. Easier for distros.

LuaJIT 2.0.0-beta8 — 2011-06-23

  • New features:
    • Soft-float ARM port of LuaJIT is complete.
    • Add support for bytecode loading/saving and -b command line option.
    • From Lua 5.2: __len metamethod for tables (disabled by default).
  • Correctness and completeness:
    • ARM: Misc. fixes for interpreter.
    • x86/x64: Fix bit.* argument checking in interpreter.
    • Catch early out-of-memory in memory allocator initialization.
    • Fix data-flow analysis for paths leading to an upvalue close.
    • Fix check for missing arguments in string.format().
    • Fix Solaris/x86 build (note: not a supported target).
    • Fix recording of loops with instable directions in side traces.
    • x86/x64: Fix fusion of comparisons with u8/u16 XLOAD.
    • x86/x64: Fix register allocation for variable shifts.
  • FFI library:
    • Add ffi.errno(). Save errno/GetLastError() around allocations etc.
    • Fix __gc for VLA/VLS cdata objects.
    • Fix recording of casts from 32 bit cdata pointers to integers.
    • tonumber(cdata) returns nil for non-numbers.
    • Show address pointed to for tostring(pointer).
    • Print NULL pointers as "cdata<... *>: NULL".
    • Support __tostring metamethod for pointers to structs, too.
  • Structural and performance enhancements:
    • More tuning for loop unrolling heuristics.
    • Flatten and compress in-memory debug info (saves ~70%).

LuaJIT 2.0.0-beta7 — 2011-05-05

  • New features:
    • ARM port of the LuaJIT interpreter is complete.
    • FFI library: Add ffi.gc(), ffi.metatype(), ffi.istype().
    • FFI library: Resolve ld script redirection in ffi.load().
    • From Lua 5.2: package.searchpath(), fp:read("*L"), load(string).
    • From Lua 5.2, disabled by default: empty statement, table.unpack(), modified coroutine.running().
  • Correctness and completeness:
    • FFI library: numerous fixes.
    • Fix type mismatches in store-to-load forwarding.
    • Fix error handling within metamethods.
    • Fix table.maxn().
    • Improve accuracy of x^-k on x64.
    • Fix code generation for Intel Atom in x64 mode.
    • Fix narrowing of POW.
    • Fix recording of retried fast functions.
    • Fix code generation for bit.bnot() and multiplies.
    • Fix error location within cpcall frames.
    • Add workaround for old libgcc unwind bug.
    • Fix lua_yield() and getmetatable(lightuserdata) on x64.
    • Misc. fixes for PPC/e500 interpreter.
    • Fix stack slot updates for down-recursion.
  • Structural and performance enhancements:
    • Add dual-number mode (int/double) for the VM. Enabled for ARM.
    • Improve narrowing of arithmetic operators and for loops.
    • Tune loop unrolling heuristics and increase trace recorder limits.
    • Eliminate dead slots in snapshots using bytecode data-flow analysis.
    • Avoid phantom stores to proxy tables.
    • Optimize lookups in empty proxy tables.
    • Improve bytecode optimization of and/or operators.

LuaJIT 2.0.0-beta6 — 2011-02-11

  • New features:
    • PowerPC/e500v2 port of the LuaJIT interpreter is complete.
    • Various minor features from Lua 5.2: Hex escapes in literals, '\*' escape, reversible string.format("%q",s), "%g" pattern, table.sort checks callbacks, os.exit(status|true|false[,close]).
    • Lua 5.2 __pairs and __ipairs metamethods (disabled by default).
    • Initial release of the FFI library.
  • Correctness and completeness:
    • Fix string.format() for non-finite numbers.
    • Fix memory leak when compiled to use the built-in allocator.
    • x86/x64: Fix unnecessary resize in TSETM bytecode.
    • Fix various GC issues with traces and jit.flush().
    • x64: Fix fusion of indexes for array references.
    • x86/x64: Fix stack overflow handling for coroutine results.
    • Enable low-2GB memory allocation on FreeBSD/x64.
    • Fix collectgarbage("count") result if more than 2GB is in use.
    • Fix parsing of hex floats.
    • x86/x64: Fix loop branch inversion with trailing HREF+NE/EQ.
    • Add jit.os string.
    • coroutine.create() permits running C functions, too.
    • Fix OSX build to work with newer ld64 versions.
    • Fix bytecode optimization of and/or operators.
  • Structural and performance enhancements:
    • Emit specialized bytecode for pairs()/next().
    • Improve bytecode coalescing of nil constants.
    • Compile calls to vararg functions.
    • Compile select().
    • Improve alias analysis, esp. for loads from allocations.
    • Tuning of various compiler heuristics.
    • Refactor and extend IR conversion instructions.
    • x86/x64: Various backend enhancements related to the FFI.
    • Add SPLIT pass to split 64 bit IR instructions for 32 bit CPUs.

LuaJIT 2.0.0-beta5 — 2010-08-24

  • Correctness and completeness:
    • Fix trace exit dispatch to function headers.
    • Fix Windows and OSX builds with LUAJIT_DISABLE_JIT.
    • Reorganize and fix placement of generated machine code on x64.
    • Fix TNEW in x64 interpreter.
    • Do not eliminate PHIs for values only referenced from side exits.
    • OS-independent canonicalization of strings for non-finite numbers.
    • Fix string.char() range check on x64.
    • Fix tostring() resolving within print().
    • Fix error handling for next().
    • Fix passing of constant arguments to external calls on x64.
    • Fix interpreter argument check for two-argument SSE math functions.
    • Fix C frame chain corruption caused by lua_cpcall().
    • Fix return from pcall() within active hook.
  • Structural and performance enhancements:
    • Replace on-trace GC frame syncing with interpreter exit.
    • Improve hash lookup specialization by not removing dead keys during GC.
    • Turn traces into true GC objects.
    • Avoid starting a GC cycle immediately after library init.
    • Add weak guards to improve dead-code elimination.
    • Speed up string interning.

LuaJIT 2.0.0-beta4 — 2010-03-28

  • Correctness and completeness:
    • Fix precondition for on-trace creation of table keys.
    • Fix {f()} on x64 when table is resized.
    • Fix folding of ordered comparisons with same references.
    • Fix snapshot restores for multi-result bytecodes.
    • Fix potential hang when recording bytecode with nested closures.
    • Fix recording of getmetatable(), tonumber() and bad argument types.
    • Fix SLOAD fusion across returns to lower frames.
  • Structural and performance enhancements:
    • Add array bounds check elimination. -Oabc is enabled by default.
    • More tuning for x64, e.g. smaller table objects.

LuaJIT 2.0.0-beta3 — 2010-03-07

  • LuaJIT x64 port:
    • Port integrated memory allocator to Linux/x64, Windows/x64 and OSX/x64.
    • Port interpreter and JIT compiler to x64.
    • Port DynASM to x64.
    • Many 32/64 bit cleanups in the VM.
    • Allow building the interpreter with either x87 or SSE2 arithmetics.
    • Add external unwinding and C++ exception interop (default on x64).
  • Correctness and completeness:
    • Fix constructor bytecode generation for certain conditional values.
    • Fix some cases of ordered string comparisons.
    • Fix lua_tocfunction().
    • Fix cutoff register in JMP bytecode for some conditional expressions.
    • Fix PHI marking algorithm for references from variant slots.
    • Fix package.cpath for non-default PREFIX.
    • Fix DWARF2 frame unwind information for interpreter on OSX.
    • Drive the GC forward on string allocations in the parser.
    • Implement call/return hooks (zero-cost if disabled).
    • Implement yield from C hooks.
    • Disable JIT compiler on older non-SSE2 CPUs instead of aborting.
  • Structural and performance enhancements:
    • Compile recursive code (tail-, up- and down-recursion).
    • Improve heuristics for bytecode penalties and blacklisting.
    • Split CALL/FUNC recording and clean up fast function call semantics.
    • Major redesign of internal function call handling.
    • Improve FOR loop const specialization and integerness checks.
    • Switch to pre-initialized stacks. Avoid frame-clearing.
    • Colocation of prototypes and related data: bytecode, constants, debug info.
    • Cleanup parser and streamline bytecode generation.
    • Add support for weak IR references to register allocator.
    • Switch to compressed, extensible snapshots.
    • Compile returns to frames below the start frame.
    • Improve alias analysis of upvalues using a disambiguation hash value.
    • Compile floor/ceil/trunc to SSE2 helper calls or SSE4.1 instructions.
    • Add generic C call handling to IR and backend.
    • Improve KNUM fuse vs. load heuristics.
    • Compile various io.*() functions.
    • Compile math.sinh(), math.cosh(), math.tanh() and math.random().

LuaJIT 2.0.0-beta2 — 2009-11-09

  • Reorganize build system. Build static+shared library on POSIX.
  • Allow C++ exception conversion on all platforms using a wrapper function.
  • Automatically catch C++ exceptions and rethrow Lua error (DWARF2 only).
  • Check for the correct x87 FPU precision at strategic points.
  • Always use wrappers for libm functions.
  • Resurrect metamethod name strings before copying them.
  • Mark current trace, even if compiler is idle.
  • Ensure FILE metatable is created only once.
  • Fix type comparisons when different integer types are involved.
  • Fix getmetatable() recording.
  • Fix TDUP with dead keys in template table.
  • jit.flush(tr) returns status. Prevent manual flush of a trace that's still linked.
  • Improve register allocation heuristics for invariant references.
  • Compile the push/pop variants of table.insert() and table.remove().
  • Compatibility with MSVC link /debug.
  • Fix lua_iscfunction().
  • Fix math.random() when compiled with -fpic (OSX).
  • Fix table.maxn().
  • Bump MACOSX_DEPLOYMENT_TARGET to 10.4
  • luaL_check*() and luaL_opt*() now support negative arguments, too.
    This matches the behavior of Lua 5.1, but not the specification.

LuaJIT 2.0.0-beta1 — 2009-10-31

  • This is the first public release of LuaJIT 2.0.
  • The whole VM has been rewritten from the ground up, so there's no point in listing differences over earlier versions.

tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/0000755000000000000000000000000013306562377020630 5ustar rootroottarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lj_asm_mips.h0000644000000000000000000023463713306562377023315 0ustar rootroot/* ** MIPS IR assembler (SSA IR -> machine code). ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h */ /* -- Register allocator extensions --------------------------------------- */ /* Allocate a register with a hint. */ static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) { Reg r = IR(ref)->r; if (ra_noreg(r)) { if (!ra_hashint(r) && !iscrossref(as, ref)) ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ r = ra_allocref(as, ref, allow); } ra_noweak(as, r); return r; } /* Allocate a register or RID_ZERO. */ static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow) { Reg r = IR(ref)->r; if (ra_noreg(r)) { if (!(allow & RSET_FPR) && irref_isk(ref) && get_kval(IR(ref)) == 0) return RID_ZERO; r = ra_allocref(as, ref, allow); } else { ra_noweak(as, r); } return r; } /* Allocate two source registers for three-operand instructions. */ static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow) { IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); Reg left = irl->r, right = irr->r; if (ra_hasreg(left)) { ra_noweak(as, left); if (ra_noreg(right)) right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left)); else ra_noweak(as, right); } else if (ra_hasreg(right)) { ra_noweak(as, right); left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right)); } else if (ra_hashint(right)) { right = ra_alloc1z(as, ir->op2, allow); left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right)); } else { left = ra_alloc1z(as, ir->op1, allow); right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left)); } return left | (right << 8); } /* -- Guard handling ------------------------------------------------------ */ /* Need some spare long-range jump slots, for out-of-range branches. */ #define MIPS_SPAREJUMP 4 /* Setup spare long-range jump slots per mcarea. */ static void asm_sparejump_setup(ASMState *as) { MCode *mxp = as->mcbot; /* Assumes sizeof(MCLink) == 8. */ if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == 8) { lua_assert(MIPSI_NOP == 0); memset(mxp+2, 0, MIPS_SPAREJUMP*8); mxp += MIPS_SPAREJUMP*2; lua_assert(mxp < as->mctop); lj_mcode_sync(as->mcbot, mxp); lj_mcode_commitbot(as->J, mxp); as->mcbot = mxp; as->mclim = as->mcbot + MCLIM_REDZONE; } } /* Setup exit stub after the end of each trace. */ static void asm_exitstub_setup(ASMState *as) { MCode *mxp = as->mctop; /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */ *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno; *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu); lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0); *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0; as->mctop = mxp; } /* Keep this in-sync with exitstub_trace_addr(). */ #define asm_exitstub_addr(as) ((as)->mctop) /* Emit conditional branch to exit for guard. */ static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt) { MCode *target = asm_exitstub_addr(as); MCode *p = as->mcp; if (LJ_UNLIKELY(p == as->invmcp)) { as->invmcp = NULL; as->loopinv = 1; as->mcp = p+1; mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */ target = p; /* Patch target later in asm_loop_fixup. */ } emit_ti(as, MIPSI_LI, RID_TMP, as->snapno); emit_branch(as, mi, rs, rt, target); } /* -- Operand fusion ------------------------------------------------------ */ /* Limit linear search to this distance. Avoids O(n^2) behavior. */ #define CONFLICT_SEARCH_LIM 31 /* Check if there's no conflicting instruction between curins and ref. */ static int noconflict(ASMState *as, IRRef ref, IROp conflict) { IRIns *ir = as->ir; IRRef i = as->curins; if (i > ref + CONFLICT_SEARCH_LIM) return 0; /* Give up, ref is too far away. */ while (--i > ref) if (ir[i].o == conflict) return 0; /* Conflict found. */ return 1; /* Ok, no conflict. */ } /* Fuse the array base of colocated arrays. */ static int32_t asm_fuseabase(ASMState *as, IRRef ref) { IRIns *ir = IR(ref); if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && !neverfuse(as) && noconflict(as, ref, IR_NEWREF)) return (int32_t)sizeof(GCtab); return 0; } /* Fuse array/hash/upvalue reference into register+offset operand. */ static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow) { IRIns *ir = IR(ref); if (ra_noreg(ir->r)) { if (ir->o == IR_AREF) { if (mayfuse(as, ref)) { if (irref_isk(ir->op2)) { IRRef tab = IR(ir->op1)->op1; int32_t ofs = asm_fuseabase(as, tab); IRRef refa = ofs ? tab : ir->op1; ofs += 8*IR(ir->op2)->i; if (checki16(ofs)) { *ofsp = ofs; return ra_alloc1(as, refa, allow); } } } } else if (ir->o == IR_HREFK) { if (mayfuse(as, ref)) { int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); if (checki16(ofs)) { *ofsp = ofs; return ra_alloc1(as, ir->op1, allow); } } } else if (ir->o == IR_UREFC) { if (irref_isk(ir->op1)) { GCfunc *fn = ir_kfunc(IR(ir->op1)); intptr_t ofs = (intptr_t)&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv; intptr_t jgl = (intptr_t)J2G(as->J); if ((uintptr_t)(ofs-jgl) < 65536) { *ofsp = ofs-jgl-32768; return RID_JGL; } else { *ofsp = (int16_t)ofs; return ra_allock(as, ofs-(int16_t)ofs, allow); } } } } *ofsp = 0; return ra_alloc1(as, ref, allow); } /* Fuse XLOAD/XSTORE reference into load/store operand. */ static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref, RegSet allow, int32_t ofs) { IRIns *ir = IR(ref); Reg base; if (ra_noreg(ir->r) && canfuse(as, ir)) { if (ir->o == IR_ADD) { intptr_t ofs2; if (irref_isk(ir->op2) && (ofs2 = ofs + get_kval(IR(ir->op2)), checki16(ofs2))) { ref = ir->op1; ofs = (int32_t)ofs2; } } else if (ir->o == IR_STRREF) { intptr_t ofs2 = 65536; lua_assert(ofs == 0); ofs = (int32_t)sizeof(GCstr); if (irref_isk(ir->op2)) { ofs2 = ofs + get_kval(IR(ir->op2)); ref = ir->op1; } else if (irref_isk(ir->op1)) { ofs2 = ofs + get_kval(IR(ir->op1)); ref = ir->op2; } if (!checki16(ofs2)) { /* NYI: Fuse ADD with constant. */ Reg right, left = ra_alloc2(as, ir, allow); right = (left >> 8); left &= 255; emit_hsi(as, mi, rt, RID_TMP, ofs); emit_dst(as, MIPSI_AADDU, RID_TMP, left, right); return; } ofs = ofs2; } } base = ra_alloc1(as, ref, allow); emit_hsi(as, mi, rt, base, ofs); } /* -- Calls --------------------------------------------------------------- */ /* Generate a call to a C function. */ static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) { uint32_t n, nargs = CCI_XNARGS(ci); int32_t ofs = LJ_32 ? 16 : 0; #if LJ_SOFTFP Reg gpr = REGARG_FIRSTGPR; #else Reg gpr, fpr = REGARG_FIRSTFPR; #endif if ((void *)ci->func) emit_call(as, (void *)ci->func, 1); #if !LJ_SOFTFP for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++) as->cost[gpr] = REGCOST(~0u, ASMREF_L); gpr = REGARG_FIRSTGPR; #endif for (n = 0; n < nargs; n++) { /* Setup args. */ IRRef ref = args[n]; if (ref) { IRIns *ir = IR(ref); #if !LJ_SOFTFP if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR && !(ci->flags & CCI_VARARG)) { lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */ ra_leftov(as, fpr, ref); fpr += LJ_32 ? 2 : 1; gpr += (LJ_32 && irt_isnum(ir->t)) ? 2 : 1; } else #endif { #if LJ_32 && !LJ_SOFTFP fpr = REGARG_LASTFPR+1; #endif if (LJ_32 && irt_isnum(ir->t)) gpr = (gpr+1) & ~1; if (gpr <= REGARG_LASTGPR) { lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */ #if !LJ_SOFTFP if (irt_isfp(ir->t)) { RegSet of = as->freeset; Reg r; /* Workaround to protect argument GPRs from being used for remat. */ as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1); r = ra_alloc1(as, ref, RSET_FPR); as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1)); if (irt_isnum(ir->t)) { #if LJ_32 emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1); emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r); lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */ gpr += 2; #else emit_tg(as, MIPSI_DMFC1, gpr, r); gpr++; fpr++; #endif } else if (irt_isfloat(ir->t)) { emit_tg(as, MIPSI_MFC1, gpr, r); gpr++; #if LJ_64 fpr++; #endif } } else #endif { ra_leftov(as, gpr, ref); gpr++; #if LJ_64 fpr++; #endif } } else { Reg r = ra_alloc1z(as, ref, !LJ_SOFTFP && irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); #if LJ_32 if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4; emit_spstore(as, ir, r, ofs); ofs += irt_isnum(ir->t) ? 8 : 4; #else emit_spstore(as, ir, r, ofs + ((LJ_BE && (LJ_SOFTFP || r < RID_MAX_GPR) && !irt_is64(ir->t)) ? 4 : 0)); ofs += 8; #endif } } } else { #if !LJ_SOFTFP fpr = REGARG_LASTFPR+1; #endif if (gpr <= REGARG_LASTGPR) { gpr++; #if LJ_64 fpr++; #endif } else { ofs += LJ_32 ? 4 : 8; } } checkmclim(as); } } /* Setup result reg/sp for call. Evict scratch regs. */ static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) { RegSet drop = RSET_SCRATCH; #if LJ_32 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t)); #endif #if !LJ_SOFTFP if ((ci->flags & CCI_NOFPRCLOBBER)) drop &= ~RSET_FPR; #endif if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); /* Dest reg handled below. */ #if LJ_32 if (hiop && ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ #endif ra_evictset(as, drop); /* Evictions must be performed first. */ if (ra_used(ir)) { lua_assert(!irt_ispri(ir->t)); if (!LJ_SOFTFP && irt_isfp(ir->t)) { if ((ci->flags & CCI_CASTU64)) { int32_t ofs = sps_scale(ir->s); Reg dest = ir->r; if (ra_hasreg(dest)) { ra_free(as, dest); ra_modified(as, dest); #if LJ_32 emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1); emit_tg(as, MIPSI_MTC1, RID_RETLO, dest); #else emit_tg(as, MIPSI_DMTC1, RID_RET, dest); #endif } if (ofs) { #if LJ_32 emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0)); emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4)); #else emit_tsi(as, MIPSI_SD, RID_RET, RID_SP, ofs); #endif } } else { ra_destreg(as, ir, RID_FPRET); } #if LJ_32 } else if (hiop) { ra_destpair(as, ir); #endif } else { ra_destreg(as, ir, RID_RET); } } } static void asm_callx(ASMState *as, IRIns *ir) { IRRef args[CCI_NARGS_MAX*2]; CCallInfo ci; IRRef func; IRIns *irf; ci.flags = asm_callx_flags(as, ir); asm_collectargs(as, ir, &ci, args); asm_setupresult(as, ir, &ci); func = ir->op2; irf = IR(func); if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } if (irref_isk(func)) { /* Call to constant address. */ ci.func = (ASMFunction)(void *)get_kval(irf); } else { /* Need specific register for indirect calls. */ Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR)); MCode *p = as->mcp; if (r == RID_CFUNCADDR) *--p = MIPSI_NOP; else *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r); *--p = MIPSI_JALR | MIPSF_S(r); as->mcp = p; ci.func = (ASMFunction)(void *)0; } asm_gencall(as, &ci, args); } #if !LJ_SOFTFP static void asm_callround(ASMState *as, IRIns *ir, IRCallID id) { /* The modified regs must match with the *.dasc implementation. */ RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)| RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR); if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); ra_evictset(as, drop); ra_destreg(as, ir, RID_FPRET); emit_call(as, (void *)lj_ir_callinfo[id].func, 0); ra_leftov(as, REGARG_FIRSTFPR, ir->op1); } #endif /* -- Returns ------------------------------------------------------------- */ /* Return to lower frame. Guard that it goes to the right spot. */ static void asm_retf(ASMState *as, IRIns *ir) { Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); void *pc = ir_kptr(IR(ir->op2)); int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); as->topslot -= (BCReg)delta; if ((int32_t)as->topslot < 0) as->topslot = 0; irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ emit_setgl(as, base, jit_base); emit_addptr(as, base, -8*delta); asm_guard(as, MIPSI_BNE, RID_TMP, ra_allock(as, igcptr(pc), rset_exclude(RSET_GPR, base))); emit_tsi(as, MIPSI_AL, RID_TMP, base, -8); } /* -- Type conversions ---------------------------------------------------- */ #if !LJ_SOFTFP static void asm_tointg(ASMState *as, IRIns *ir, Reg left) { Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); Reg dest = ra_dest(as, ir, RSET_GPR); asm_guard(as, MIPSI_BC1F, 0, 0); emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left); emit_fg(as, MIPSI_CVT_D_W, tmp, tmp); emit_tg(as, MIPSI_MFC1, dest, tmp); emit_fg(as, MIPSI_CVT_W_D, tmp, left); } static void asm_tobit(ASMState *as, IRIns *ir) { RegSet allow = RSET_FPR; Reg dest = ra_dest(as, ir, RSET_GPR); Reg left = ra_alloc1(as, ir->op1, allow); Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left)); Reg tmp = ra_scratch(as, rset_clear(allow, right)); emit_tg(as, MIPSI_MFC1, dest, tmp); emit_fgh(as, MIPSI_ADD_D, tmp, left, right); } #endif static void asm_conv(ASMState *as, IRIns *ir) { IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); #if !LJ_SOFTFP int stfp = (st == IRT_NUM || st == IRT_FLOAT); #endif #if LJ_64 int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64); #endif IRRef lref = ir->op1; #if LJ_32 lua_assert(!(irt_isint64(ir->t) || (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */ #endif #if LJ_32 && LJ_SOFTFP /* FP conversions are handled by SPLIT. */ lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT)); /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */ #else lua_assert(irt_type(ir->t) != st); if (irt_isfp(ir->t)) { Reg dest = ra_dest(as, ir, RSET_FPR); if (stfp) { /* FP to FP conversion. */ emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S, dest, ra_alloc1(as, lref, RSET_FPR)); } else if (st == IRT_U32) { /* U32 to FP conversion. */ /* y = (x ^ 0x8000000) + 2147483648.0 */ Reg left = ra_alloc1(as, lref, RSET_GPR); Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest)); if (irt_isfloat(ir->t)) emit_fg(as, MIPSI_CVT_S_D, dest, dest); /* Must perform arithmetic with doubles to keep the precision. */ emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp); emit_fg(as, MIPSI_CVT_D_W, dest, dest); emit_lsptr(as, MIPSI_LDC1, (tmp & 31), (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR); emit_tg(as, MIPSI_MTC1, RID_TMP, dest); emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left); emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); #if LJ_64 } else if(st == IRT_U64) { /* U64 to FP conversion. */ /* if (x >= 1u<<63) y = (double)(int64_t)(x&(1u<<63)-1) + pow(2.0, 63) */ Reg left = ra_alloc1(as, lref, RSET_GPR); Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest)); MCLabel l_end = emit_label(as); if (irt_isfloat(ir->t)) { emit_fgh(as, MIPSI_ADD_S, dest, dest, tmp); emit_lsptr(as, MIPSI_LWC1, (tmp & 31), (void *)&as->J->k32[LJ_K32_2P63], rset_exclude(RSET_GPR, left)); emit_fg(as, MIPSI_CVT_S_L, dest, dest); } else { emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp); emit_lsptr(as, MIPSI_LDC1, (tmp & 31), (void *)&as->J->k64[LJ_K64_2P63], rset_exclude(RSET_GPR, left)); emit_fg(as, MIPSI_CVT_D_L, dest, dest); } emit_branch(as, MIPSI_BGEZ, left, RID_ZERO, l_end); emit_tg(as, MIPSI_DMTC1, RID_TMP, dest); emit_tsml(as, MIPSI_DEXTM, RID_TMP, left, 30, 0); #endif } else { /* Integer to FP conversion. */ Reg left = ra_alloc1(as, lref, RSET_GPR); #if LJ_32 emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W, dest, dest); emit_tg(as, MIPSI_MTC1, left, dest); #else MIPSIns mi = irt_isfloat(ir->t) ? (st64 ? MIPSI_CVT_S_L : MIPSI_CVT_S_W) : (st64 ? MIPSI_CVT_D_L : MIPSI_CVT_D_W); emit_fg(as, mi, dest, dest); emit_tg(as, st64 ? MIPSI_DMTC1 : MIPSI_MTC1, left, dest); #endif } } else if (stfp) { /* FP to integer conversion. */ if (irt_isguard(ir->t)) { /* Checked conversions are only supported from number to int. */ lua_assert(irt_isint(ir->t) && st == IRT_NUM); asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); } else { Reg dest = ra_dest(as, ir, RSET_GPR); Reg left = ra_alloc1(as, lref, RSET_FPR); Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); if (irt_isu32(ir->t)) { /* FP to U32 conversion. */ /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */ emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP); emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000); emit_tg(as, MIPSI_MFC1, dest, tmp); emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D, tmp, tmp); emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D, tmp, left, tmp); if (st == IRT_FLOAT) emit_lsptr(as, MIPSI_LWC1, (tmp & 31), (void *)&as->J->k32[LJ_K32_2P31], RSET_GPR); else emit_lsptr(as, MIPSI_LDC1, (tmp & 31), (void *)&as->J->k64[LJ_K64_2P31], RSET_GPR); #if LJ_64 } else if (irt_isu64(ir->t)) { /* FP to U64 conversion. */ MCLabel l_end; emit_tg(as, MIPSI_DMFC1, dest, tmp); l_end = emit_label(as); /* For inputs >= 2^63 add -2^64 and convert again. */ if (st == IRT_NUM) { emit_fg(as, MIPSI_TRUNC_L_D, tmp, tmp); emit_fgh(as, MIPSI_ADD_D, tmp, left, tmp); emit_lsptr(as, MIPSI_LDC1, (tmp & 31), (void *)&as->J->k64[LJ_K64_M2P64], rset_exclude(RSET_GPR, dest)); emit_fg(as, MIPSI_TRUNC_L_D, tmp, left); /* Delay slot. */ emit_branch(as, MIPSI_BC1T, 0, 0, l_end); emit_fgh(as, MIPSI_C_OLT_D, 0, left, tmp); emit_lsptr(as, MIPSI_LDC1, (tmp & 31), (void *)&as->J->k64[LJ_K64_2P63], rset_exclude(RSET_GPR, dest)); } else { emit_fg(as, MIPSI_TRUNC_L_S, tmp, tmp); emit_fgh(as, MIPSI_ADD_S, tmp, left, tmp); emit_lsptr(as, MIPSI_LWC1, (tmp & 31), (void *)&as->J->k32[LJ_K32_M2P64], rset_exclude(RSET_GPR, dest)); emit_fg(as, MIPSI_TRUNC_L_S, tmp, left); /* Delay slot. */ emit_branch(as, MIPSI_BC1T, 0, 0, l_end); emit_fgh(as, MIPSI_C_OLT_S, 0, left, tmp); emit_lsptr(as, MIPSI_LWC1, (tmp & 31), (void *)&as->J->k32[LJ_K32_2P63], rset_exclude(RSET_GPR, dest)); } #endif } else { #if LJ_32 emit_tg(as, MIPSI_MFC1, dest, tmp); emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D, tmp, left); #else MIPSIns mi = irt_is64(ir->t) ? (st == IRT_NUM ? MIPSI_TRUNC_L_D : MIPSI_TRUNC_L_S) : (st == IRT_NUM ? MIPSI_TRUNC_W_D : MIPSI_TRUNC_W_S); emit_tg(as, irt_is64(ir->t) ? MIPSI_DMFC1 : MIPSI_MFC1, dest, left); emit_fg(as, mi, left, left); #endif } } } else #endif { Reg dest = ra_dest(as, ir, RSET_GPR); if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ Reg left = ra_alloc1(as, ir->op1, RSET_GPR); lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); if ((ir->op2 & IRCONV_SEXT)) { if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) { emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left); } else { uint32_t shift = st == IRT_I8 ? 24 : 16; emit_dta(as, MIPSI_SRA, dest, dest, shift); emit_dta(as, MIPSI_SLL, dest, left, shift); } } else { emit_tsi(as, MIPSI_ANDI, dest, left, (int32_t)(st == IRT_U8 ? 0xff : 0xffff)); } } else { /* 32/64 bit integer conversions. */ #if LJ_32 /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */ ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ #else if (irt_is64(ir->t)) { if (st64) { /* 64/64 bit no-op (cast)*/ ra_leftov(as, dest, lref); } else { Reg left = ra_alloc1(as, lref, RSET_GPR); if ((ir->op2 & IRCONV_SEXT)) { /* 32 to 64 bit sign extension. */ emit_dta(as, MIPSI_SLL, dest, left, 0); } else { /* 32 to 64 bit zero extension. */ emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0); } } } else { if (st64) { /* This is either a 32 bit reg/reg mov which zeroes the hiword ** or a load of the loword from a 64 bit address. */ Reg left = ra_alloc1(as, lref, RSET_GPR); emit_tsml(as, MIPSI_DEXT, dest, left, 31, 0); } else { /* 32/32 bit no-op (cast). */ /* Do nothing, but may need to move regs. */ ra_leftov(as, dest, lref); } } #endif } } } static void asm_strto(ASMState *as, IRIns *ir) { const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; IRRef args[2]; int32_t ofs = 0; #if LJ_SOFTFP ra_evictset(as, RSET_SCRATCH); if (ra_used(ir)) { if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) && (ir->s & 1) == LJ_BE && (ir->s ^ 1) == (ir+1)->s) { int i; for (i = 0; i < 2; i++) { Reg r = (ir+i)->r; if (ra_hasreg(r)) { ra_free(as, r); ra_modified(as, r); emit_spload(as, ir+i, r, sps_scale((ir+i)->s)); } } ofs = sps_scale(ir->s & ~1); } else { Reg rhi = ra_dest(as, ir+1, RSET_GPR); Reg rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi)); emit_tsi(as, MIPSI_LW, rhi, RID_SP, ofs+(LJ_BE?0:4)); emit_tsi(as, MIPSI_LW, rlo, RID_SP, ofs+(LJ_BE?4:0)); } } #else RegSet drop = RSET_SCRATCH; if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */ ra_evictset(as, drop); ofs = sps_scale(ir->s); #endif asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */ args[0] = ir->op1; /* GCstr *str */ args[1] = ASMREF_TMP1; /* TValue *n */ asm_gencall(as, ci, args); /* Store the result to the spill slot or temp slots. */ emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs); } /* -- Memory references --------------------------------------------------- */ #if LJ_64 /* Store tagged value for ref at base+ofs. */ static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref) { RegSet allow = rset_exclude(RSET_GPR, base); IRIns *ir = IR(ref); lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); if (irref_isk(ref)) { TValue k; lj_ir_kvalue(as->J->L, &k, ir); emit_tsi(as, MIPSI_SD, ra_allock(as, (int64_t)k.u64, allow), base, ofs); } else { Reg src = ra_alloc1(as, ref, allow); Reg type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, rset_exclude(allow, src)); emit_tsi(as, MIPSI_SD, RID_TMP, base, ofs); if (irt_isinteger(ir->t)) { emit_dst(as, MIPSI_DADDU, RID_TMP, RID_TMP, type); emit_tsml(as, MIPSI_DEXT, RID_TMP, src, 31, 0); } else { emit_dst(as, MIPSI_DADDU, RID_TMP, src, type); } } } #endif /* Get pointer to TValue. */ static void asm_tvptr(ASMState *as, Reg dest, IRRef ref) { IRIns *ir = IR(ref); if (irt_isnum(ir->t)) { if (irref_isk(ref)) /* Use the number constant itself as a TValue. */ ra_allockreg(as, igcptr(ir_knum(ir)), dest); else /* Otherwise force a spill and use the spill slot. */ emit_tsi(as, MIPSI_AADDIU, dest, RID_SP, ra_spill(as, ir)); } else { /* Otherwise use g->tmptv to hold the TValue. */ #if LJ_32 RegSet allow = rset_exclude(RSET_GPR, dest); Reg type; emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, (int32_t)(offsetof(global_State, tmptv)-32768)); if (!irt_ispri(ir->t)) { Reg src = ra_alloc1(as, ref, allow); emit_setgl(as, src, tmptv.gcr); } if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) type = ra_alloc1(as, ref+1, allow); else type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); emit_setgl(as, type, tmptv.it); #else asm_tvstore64(as, dest, 0, ref); emit_tsi(as, MIPSI_DADDIU, dest, RID_JGL, (int32_t)(offsetof(global_State, tmptv)-32768)); #endif } } static void asm_aref(ASMState *as, IRIns *ir) { Reg dest = ra_dest(as, ir, RSET_GPR); Reg idx, base; if (irref_isk(ir->op2)) { IRRef tab = IR(ir->op1)->op1; int32_t ofs = asm_fuseabase(as, tab); IRRef refa = ofs ? tab : ir->op1; ofs += 8*IR(ir->op2)->i; if (checki16(ofs)) { base = ra_alloc1(as, refa, RSET_GPR); emit_tsi(as, MIPSI_AADDIU, dest, base, ofs); return; } } base = ra_alloc1(as, ir->op1, RSET_GPR); idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); emit_dst(as, MIPSI_AADDU, dest, RID_TMP, base); emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3); } /* Inlined hash lookup. Specialized for key type and for const keys. ** The equivalent C code is: ** Node *n = hashkey(t, key); ** do { ** if (lj_obj_equal(&n->key, key)) return &n->val; ** } while ((n = nextnode(n))); ** return niltv(L); */ static void asm_href(ASMState *as, IRIns *ir, IROp merge) { RegSet allow = RSET_GPR; int destused = ra_used(ir); Reg dest = ra_dest(as, ir, allow); Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2; IRRef refkey = ir->op2; IRIns *irkey = IR(refkey); int isk = irref_isk(refkey); IRType1 kt = irkey->t; uint32_t khash; MCLabel l_end, l_loop, l_next; rset_clear(allow, tab); #if LJ_32 && LJ_SOFTFP if (!isk) { key = ra_alloc1(as, refkey, allow); rset_clear(allow, key); if (irkey[1].o == IR_HIOP) { if (ra_hasreg((irkey+1)->r)) { type = tmpnum = (irkey+1)->r; tmp1 = ra_scratch(as, allow); rset_clear(allow, tmp1); ra_noweak(as, tmpnum); } else { type = tmpnum = ra_allocref(as, refkey+1, allow); } rset_clear(allow, tmpnum); } else { type = ra_allock(as, (int32_t)irt_toitype(irkey->t), allow); rset_clear(allow, type); } } #else if (irt_isnum(kt)) { key = ra_alloc1(as, refkey, RSET_FPR); tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key)); } else if (!irt_ispri(kt)) { key = ra_alloc1(as, refkey, allow); rset_clear(allow, key); #if LJ_32 type = ra_allock(as, (int32_t)irt_toitype(irkey->t), allow); rset_clear(allow, type); #endif } #endif tmp2 = ra_scratch(as, allow); rset_clear(allow, tmp2); /* Key not found in chain: jump to exit (if merged) or load niltv. */ l_end = emit_label(as); as->invmcp = NULL; if (merge == IR_NE) asm_guard(as, MIPSI_B, RID_ZERO, RID_ZERO); else if (destused) emit_loada(as, dest, niltvg(J2G(as->J))); /* Follow hash chain until the end. */ emit_move(as, dest, tmp1); l_loop = --as->mcp; emit_tsi(as, MIPSI_AL, tmp1, dest, (int32_t)offsetof(Node, next)); l_next = emit_label(as); /* Type and value comparison. */ if (merge == IR_EQ) { /* Must match asm_guard(). */ emit_ti(as, MIPSI_LI, RID_TMP, as->snapno); l_end = asm_exitstub_addr(as); } if (!LJ_SOFTFP && irt_isnum(kt)) { emit_branch(as, MIPSI_BC1T, 0, 0, l_end); emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key); *--as->mcp = MIPSI_NOP; /* Avoid NaN comparison overhead. */ emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next); emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM); #if LJ_32 emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n)); } else { if (irt_ispri(kt)) { emit_branch(as, MIPSI_BEQ, tmp1, type, l_end); } else { emit_branch(as, MIPSI_BEQ, tmp2, key, l_end); emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.gcr)); emit_branch(as, MIPSI_BNE, tmp1, type, l_next); } } emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it)); *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu); #else emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 15); emit_tg(as, MIPSI_DMTC1, tmp1, tmpnum); emit_tsi(as, MIPSI_LD, tmp1, dest, (int32_t)offsetof(Node, key.u64)); } else if (irt_isaddr(kt)) { Reg refk = tmp2; if (isk) { int64_t k = ((int64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64; refk = ra_allock(as, k, allow); rset_clear(allow, refk); } emit_branch(as, MIPSI_BEQ, tmp1, refk, l_end); emit_tsi(as, MIPSI_LD, tmp1, dest, offsetof(Node, key)); } else { Reg pri = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow); rset_clear(allow, pri); lua_assert(irt_ispri(kt) && !irt_isnil(kt)); emit_branch(as, MIPSI_BEQ, tmp1, pri, l_end); emit_tsi(as, MIPSI_LD, tmp1, dest, offsetof(Node, key)); } *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu); if (!isk && irt_isaddr(kt)) { type = ra_allock(as, (int64_t)irt_toitype(kt) << 47, allow); emit_dst(as, MIPSI_DADDU, tmp2, key, type); rset_clear(allow, type); } #endif /* Load main position relative to tab->node into dest. */ khash = isk ? ir_khash(irkey) : 1; if (khash == 0) { emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node)); } else { Reg tmphash = tmp1; if (isk) tmphash = ra_allock(as, khash, allow); emit_dst(as, MIPSI_AADDU, dest, dest, tmp1); lua_assert(sizeof(Node) == 24); emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1); emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3); emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5); emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash); emit_tsi(as, MIPSI_AL, dest, tab, (int32_t)offsetof(GCtab, node)); emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask)); if (isk) { /* Nothing to do. */ } else if (irt_isstr(kt)) { emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, hash)); } else { /* Must match with hash*() in lj_tab.c. */ emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2); emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31); emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2); emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31); emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest); #if LJ_32 if (LJ_SOFTFP ? (irkey[1].o == IR_HIOP) : irt_isnum(kt)) { emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1); if ((as->flags & JIT_F_MIPSXXR2)) { emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31); } else { emit_dst(as, MIPSI_OR, dest, dest, tmp1); emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1); emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31); } emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1); #if LJ_SOFTFP emit_ds(as, MIPSI_MOVE, tmp1, type); emit_ds(as, MIPSI_MOVE, tmp2, key); #else emit_tg(as, MIPSI_MFC1, tmp2, key); emit_tg(as, MIPSI_MFC1, tmp1, key+1); #endif } else { emit_dst(as, MIPSI_XOR, tmp2, key, tmp1); emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31); emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow)); } #else emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1); emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31); if (irt_isnum(kt)) { emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1); emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 0); emit_dta(as, MIPSI_SLL, tmp2, LJ_SOFTFP ? key : tmp1, 0); #if !LJ_SOFTFP emit_tg(as, MIPSI_DMFC1, tmp1, key); #endif } else { checkmclim(as); emit_dta(as, MIPSI_DSRA32, tmp1, tmp1, 0); emit_dta(as, MIPSI_SLL, tmp2, key, 0); emit_dst(as, MIPSI_DADDU, tmp1, key, type); } #endif } } } static void asm_hrefk(ASMState *as, IRIns *ir) { IRIns *kslot = IR(ir->op2); IRIns *irkey = IR(kslot->op1); int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); int32_t kofs = ofs + (int32_t)offsetof(Node, key); Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; Reg node = ra_alloc1(as, ir->op1, RSET_GPR); RegSet allow = rset_exclude(RSET_GPR, node); Reg idx = node; #if LJ_32 Reg key = RID_NONE, type = RID_TMP; int32_t lo, hi; #else Reg key = ra_scratch(as, allow); int64_t k; #endif lua_assert(ofs % sizeof(Node) == 0); if (ofs > 32736) { idx = dest; rset_clear(allow, dest); kofs = (int32_t)offsetof(Node, key); } else if (ra_hasreg(dest)) { emit_tsi(as, MIPSI_AADDIU, dest, node, ofs); } #if LJ_32 if (!irt_ispri(irkey->t)) { key = ra_scratch(as, allow); rset_clear(allow, key); } if (irt_isnum(irkey->t)) { lo = (int32_t)ir_knum(irkey)->u32.lo; hi = (int32_t)ir_knum(irkey)->u32.hi; } else { lo = irkey->i; hi = irt_toitype(irkey->t); if (!ra_hasreg(key)) goto nolo; } asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO); nolo: asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO); if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0)); emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4)); #else if (irt_ispri(irkey->t)) { lua_assert(!irt_isnil(irkey->t)); k = ~((int64_t)~irt_toitype(irkey->t) << 47); } else if (irt_isnum(irkey->t)) { k = (int64_t)ir_knum(irkey)->u64; } else { k = ((int64_t)irt_toitype(irkey->t) << 47) | (int64_t)ir_kgc(irkey); } asm_guard(as, MIPSI_BNE, key, ra_allock(as, k, allow)); emit_tsi(as, MIPSI_LD, key, idx, kofs); #endif if (ofs > 32736) emit_tsi(as, MIPSI_AADDU, dest, node, ra_allock(as, ofs, allow)); } static void asm_uref(ASMState *as, IRIns *ir) { Reg dest = ra_dest(as, ir, RSET_GPR); if (irref_isk(ir->op1)) { GCfunc *fn = ir_kfunc(IR(ir->op1)); MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; emit_lsptr(as, MIPSI_AL, dest, v, RSET_GPR); } else { Reg uv = ra_scratch(as, RSET_GPR); Reg func = ra_alloc1(as, ir->op1, RSET_GPR); if (ir->o == IR_UREFC) { asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); emit_tsi(as, MIPSI_AADDIU, dest, uv, (int32_t)offsetof(GCupval, tv)); emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); } else { emit_tsi(as, MIPSI_AL, dest, uv, (int32_t)offsetof(GCupval, v)); } emit_tsi(as, MIPSI_AL, uv, func, (int32_t)offsetof(GCfuncL, uvptr) + (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8)); } } static void asm_fref(ASMState *as, IRIns *ir) { UNUSED(as); UNUSED(ir); lua_assert(!ra_used(ir)); } static void asm_strref(ASMState *as, IRIns *ir) { #if LJ_32 Reg dest = ra_dest(as, ir, RSET_GPR); IRRef ref = ir->op2, refk = ir->op1; int32_t ofs = (int32_t)sizeof(GCstr); Reg r; if (irref_isk(ref)) { IRRef tmp = refk; refk = ref; ref = tmp; } else if (!irref_isk(refk)) { Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); IRIns *irr = IR(ir->op2); if (ra_hasreg(irr->r)) { ra_noweak(as, irr->r); right = irr->r; } else if (mayfuse(as, irr->op2) && irr->o == IR_ADD && irref_isk(irr->op2) && checki16(ofs + IR(irr->op2)->i)) { ofs += IR(irr->op2)->i; right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left)); } else { right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left)); } emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs); emit_dst(as, MIPSI_ADDU, dest, left, right); return; } r = ra_alloc1(as, ref, RSET_GPR); ofs += IR(refk)->i; if (checki16(ofs)) emit_tsi(as, MIPSI_ADDIU, dest, r, ofs); else emit_dst(as, MIPSI_ADDU, dest, r, ra_allock(as, ofs, rset_exclude(RSET_GPR, r))); #else RegSet allow = RSET_GPR; Reg dest = ra_dest(as, ir, allow); Reg base = ra_alloc1(as, ir->op1, allow); IRIns *irr = IR(ir->op2); int32_t ofs = sizeof(GCstr); rset_clear(allow, base); if (irref_isk(ir->op2) && checki16(ofs + irr->i)) { emit_tsi(as, MIPSI_DADDIU, dest, base, ofs + irr->i); } else { emit_tsi(as, MIPSI_DADDIU, dest, dest, ofs); emit_dst(as, MIPSI_DADDU, dest, base, ra_alloc1(as, ir->op2, allow)); } #endif } /* -- Loads and stores ---------------------------------------------------- */ static MIPSIns asm_fxloadins(IRIns *ir) { switch (irt_type(ir->t)) { case IRT_I8: return MIPSI_LB; case IRT_U8: return MIPSI_LBU; case IRT_I16: return MIPSI_LH; case IRT_U16: return MIPSI_LHU; case IRT_NUM: lua_assert(!LJ_SOFTFP); return MIPSI_LDC1; case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_LWC1; default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_LD : MIPSI_LW; } } static MIPSIns asm_fxstoreins(IRIns *ir) { switch (irt_type(ir->t)) { case IRT_I8: case IRT_U8: return MIPSI_SB; case IRT_I16: case IRT_U16: return MIPSI_SH; case IRT_NUM: lua_assert(!LJ_SOFTFP); return MIPSI_SDC1; case IRT_FLOAT: if (!LJ_SOFTFP) return MIPSI_SWC1; default: return (LJ_64 && irt_is64(ir->t)) ? MIPSI_SD : MIPSI_SW; } } static void asm_fload(ASMState *as, IRIns *ir) { Reg dest = ra_dest(as, ir, RSET_GPR); MIPSIns mi = asm_fxloadins(ir); Reg idx; int32_t ofs; if (ir->op1 == REF_NIL) { idx = RID_JGL; ofs = (ir->op2 << 2) - 32768 - GG_OFS(g); } else { idx = ra_alloc1(as, ir->op1, RSET_GPR); if (ir->op2 == IRFL_TAB_ARRAY) { ofs = asm_fuseabase(as, ir->op1); if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ emit_tsi(as, MIPSI_AADDIU, dest, idx, ofs); return; } } ofs = field_ofs[ir->op2]; } lua_assert(!irt_isfp(ir->t)); emit_tsi(as, mi, dest, idx, ofs); } static void asm_fstore(ASMState *as, IRIns *ir) { if (ir->r != RID_SINK) { Reg src = ra_alloc1z(as, ir->op2, RSET_GPR); IRIns *irf = IR(ir->op1); Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); int32_t ofs = field_ofs[irf->op2]; MIPSIns mi = asm_fxstoreins(ir); lua_assert(!irt_isfp(ir->t)); emit_tsi(as, mi, src, idx, ofs); } } static void asm_xload(ASMState *as, IRIns *ir) { Reg dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED)); asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0); } static void asm_xstore_(ASMState *as, IRIns *ir, int32_t ofs) { if (ir->r != RID_SINK) { Reg src = ra_alloc1z(as, ir->op2, (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR); asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, rset_exclude(RSET_GPR, src), ofs); } } #define asm_xstore(as, ir) asm_xstore_(as, ir, 0) static void asm_ahuvload(ASMState *as, IRIns *ir) { int hiop = (LJ_32 && LJ_SOFTFP && (ir+1)->o == IR_HIOP); Reg dest = RID_NONE, type = RID_TMP, idx; RegSet allow = RSET_GPR; int32_t ofs = 0; IRType1 t = ir->t; if (hiop) { t.irt = IRT_NUM; if (ra_used(ir+1)) { type = ra_dest(as, ir+1, allow); rset_clear(allow, type); } } if (ra_used(ir)) { lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || irt_isint(ir->t) || irt_isaddr(ir->t)); dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); rset_clear(allow, dest); #if LJ_64 if (irt_isaddr(t)) emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0); else if (irt_isint(t)) emit_dta(as, MIPSI_SLL, dest, dest, 0); #endif } idx = asm_fuseahuref(as, ir->op1, &ofs, allow); rset_clear(allow, idx); if (irt_isnum(t)) { asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM); } else { asm_guard(as, MIPSI_BNE, type, ra_allock(as, (int32_t)irt_toitype(t), allow)); } #if LJ_32 if (ra_hasreg(dest)) { if (!LJ_SOFTFP && irt_isnum(t)) emit_hsi(as, MIPSI_LDC1, dest, idx, ofs); else emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0)); } emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4)); #else if (ra_hasreg(dest)) { if (!LJ_SOFTFP && irt_isnum(t)) { emit_hsi(as, MIPSI_LDC1, dest, idx, ofs); dest = type; } } else { dest = type; } emit_dta(as, MIPSI_DSRA32, type, dest, 15); emit_tsi(as, MIPSI_LD, dest, idx, ofs); #endif } static void asm_ahustore(ASMState *as, IRIns *ir) { RegSet allow = RSET_GPR; Reg idx, src = RID_NONE, type = RID_NONE; int32_t ofs = 0; if (ir->r == RID_SINK) return; if (!LJ_SOFTFP && irt_isnum(ir->t)) { src = ra_alloc1(as, ir->op2, RSET_FPR); idx = asm_fuseahuref(as, ir->op1, &ofs, allow); emit_hsi(as, MIPSI_SDC1, src, idx, ofs); } else { #if LJ_32 if (!irt_ispri(ir->t)) { src = ra_alloc1(as, ir->op2, allow); rset_clear(allow, src); } if (LJ_SOFTFP && (ir+1)->o == IR_HIOP) type = ra_alloc1(as, (ir+1)->op2, allow); else type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); rset_clear(allow, type); idx = asm_fuseahuref(as, ir->op1, &ofs, allow); if (ra_hasreg(src)) emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0)); emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4)); #else Reg tmp = RID_TMP; if (irt_ispri(ir->t)) { tmp = ra_allock(as, ~((int64_t)~irt_toitype(ir->t) << 47), allow); rset_clear(allow, tmp); } else { src = ra_alloc1(as, ir->op2, allow); rset_clear(allow, src); type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, allow); rset_clear(allow, type); } idx = asm_fuseahuref(as, ir->op1, &ofs, allow); emit_tsi(as, MIPSI_SD, tmp, idx, ofs); if (ra_hasreg(src)) { if (irt_isinteger(ir->t)) { emit_dst(as, MIPSI_DADDU, tmp, tmp, type); emit_tsml(as, MIPSI_DEXT, tmp, src, 31, 0); } else { emit_dst(as, MIPSI_DADDU, tmp, src, type); } } #endif } } static void asm_sload(ASMState *as, IRIns *ir) { Reg dest = RID_NONE, type = RID_NONE, base; RegSet allow = RSET_GPR; IRType1 t = ir->t; #if LJ_32 int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); int hiop = (LJ_32 && LJ_SOFTFP && (ir+1)->o == IR_HIOP); if (hiop) t.irt = IRT_NUM; #else int32_t ofs = 8*((int32_t)ir->op1-2); #endif lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK)); #if LJ_32 && LJ_SOFTFP lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */ if (hiop && ra_used(ir+1)) { type = ra_dest(as, ir+1, allow); rset_clear(allow, type); } #else if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { dest = ra_scratch(as, RSET_FPR); asm_tointg(as, ir, dest); t.irt = IRT_NUM; /* Continue with a regular number type check. */ } else #endif if (ra_used(ir)) { lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) || irt_isint(ir->t) || irt_isaddr(ir->t)); dest = ra_dest(as, ir, (!LJ_SOFTFP && irt_isnum(t)) ? RSET_FPR : allow); rset_clear(allow, dest); base = ra_alloc1(as, REF_BASE, allow); rset_clear(allow, base); if (!LJ_SOFTFP && (ir->op2 & IRSLOAD_CONVERT)) { if (irt_isint(t)) { Reg tmp = ra_scratch(as, RSET_FPR); emit_tg(as, MIPSI_MFC1, dest, tmp); emit_fg(as, MIPSI_TRUNC_W_D, tmp, tmp); dest = tmp; t.irt = IRT_NUM; /* Check for original type. */ } else { Reg tmp = ra_scratch(as, RSET_GPR); emit_fg(as, MIPSI_CVT_D_W, dest, dest); emit_tg(as, MIPSI_MTC1, tmp, dest); dest = tmp; t.irt = IRT_INT; /* Check for original type. */ } } #if LJ_64 else if (irt_isaddr(t)) { /* Clear type from pointers. */ emit_tsml(as, MIPSI_DEXTM, dest, dest, 14, 0); } else if (irt_isint(t) && (ir->op2 & IRSLOAD_TYPECHECK)) { /* Sign-extend integers. */ emit_dta(as, MIPSI_SLL, dest, dest, 0); } #endif goto dotypecheck; } base = ra_alloc1(as, REF_BASE, allow); rset_clear(allow, base); dotypecheck: #if LJ_32 if ((ir->op2 & IRSLOAD_TYPECHECK)) { if (ra_noreg(type)) type = RID_TMP; if (irt_isnum(t)) { asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); emit_tsi(as, MIPSI_SLTIU, RID_TMP, type, (int32_t)LJ_TISNUM); } else { Reg ktype = ra_allock(as, irt_toitype(t), allow); asm_guard(as, MIPSI_BNE, type, ktype); } } if (ra_hasreg(dest)) { if (!LJ_SOFTFP && irt_isnum(t)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs); else emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0)); } if (ra_hasreg(type)) emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4)); #else if ((ir->op2 & IRSLOAD_TYPECHECK)) { type = dest < RID_MAX_GPR ? dest : RID_TMP; if (irt_ispri(t)) { asm_guard(as, MIPSI_BNE, type, ra_allock(as, ~((int64_t)~irt_toitype(t) << 47) , allow)); } else { if (irt_isnum(t)) { asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO); emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM); if (ra_hasreg(dest)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs); } else { asm_guard(as, MIPSI_BNE, RID_TMP, ra_allock(as, (int32_t)irt_toitype(t), allow)); } emit_dta(as, MIPSI_DSRA32, RID_TMP, type, 15); } emit_tsi(as, MIPSI_LD, type, base, ofs); } else if (ra_hasreg(dest)) { if (irt_isnum(t)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs); else emit_tsi(as, irt_isint(t) ? MIPSI_LW : MIPSI_LD, dest, base, ofs ^ ((LJ_BE && irt_isint(t)) ? 4 : 0)); } #endif } /* -- Allocations --------------------------------------------------------- */ #if LJ_HASFFI static void asm_cnew(ASMState *as, IRIns *ir) { CTState *cts = ctype_ctsG(J2G(as->J)); CTypeID id = (CTypeID)IR(ir->op1)->i; CTSize sz; CTInfo info = lj_ctype_info(cts, id, &sz); const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; IRRef args[4]; RegSet drop = RSET_SCRATCH; lua_assert(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL)); as->gcsteps++; if (ra_hasreg(ir->r)) rset_clear(drop, ir->r); /* Dest reg handled below. */ ra_evictset(as, drop); if (ra_used(ir)) ra_destreg(as, ir, RID_RET); /* GCcdata * */ /* Initialize immutable cdata object. */ if (ir->o == IR_CNEWI) { RegSet allow = (RSET_GPR & ~RSET_SCRATCH); #if LJ_32 int32_t ofs = sizeof(GCcdata); if (sz == 8) { ofs += 4; lua_assert((ir+1)->o == IR_HIOP); if (LJ_LE) ir++; } for (;;) { Reg r = ra_alloc1z(as, ir->op2, allow); emit_tsi(as, MIPSI_SW, r, RID_RET, ofs); rset_clear(allow, r); if (ofs == sizeof(GCcdata)) break; ofs -= 4; if (LJ_BE) ir++; else ir--; } #else emit_tsi(as, MIPSI_SD, ra_alloc1(as, ir->op2, allow), RID_RET, sizeof(GCcdata)); #endif lua_assert(sz == 4 || sz == 8); } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; args[0] = ASMREF_L; /* lua_State *L */ args[1] = ir->op1; /* CTypeID id */ args[2] = ir->op2; /* CTSize sz */ args[3] = ASMREF_TMP1; /* CTSize align */ asm_gencall(as, ci, args); emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); return; } /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct)); emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid)); emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA); emit_ti(as, MIPSI_LI, RID_TMP, id); /* Lower 16 bit used. Sign-ext ok. */ args[0] = ASMREF_L; /* lua_State *L */ args[1] = ASMREF_TMP1; /* MSize size */ asm_gencall(as, ci, args); ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), ra_releasetmp(as, ASMREF_TMP1)); } #else #define asm_cnew(as, ir) ((void)0) #endif /* -- Write barriers ------------------------------------------------------ */ static void asm_tbar(ASMState *as, IRIns *ir) { Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab)); Reg link = RID_TMP; MCLabel l_end = emit_label(as); emit_tsi(as, MIPSI_AS, link, tab, (int32_t)offsetof(GCtab, gclist)); emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked)); emit_setgl(as, tab, gc.grayagain); emit_getgl(as, link, gc.grayagain); emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP); /* Clear black bit. */ emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK); emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked)); } static void asm_obar(ASMState *as, IRIns *ir) { const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; IRRef args[2]; MCLabel l_end; Reg obj, val, tmp; /* No need for other object barriers (yet). */ lua_assert(IR(ir->op1)->o == IR_UREFC); ra_evictset(as, RSET_SCRATCH); l_end = emit_label(as); args[0] = ASMREF_TMP1; /* global_State *g */ args[1] = ir->op1; /* TValue *tv */ asm_gencall(as, ci, args); emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); obj = IR(ir->op1)->r; tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj)); emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK); emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end); emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES); val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); emit_tsi(as, MIPSI_LBU, tmp, obj, (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked)); } /* -- Arithmetic and logic operations ------------------------------------- */ #if !LJ_SOFTFP static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi) { Reg dest = ra_dest(as, ir, RSET_FPR); Reg right, left = ra_alloc2(as, ir, RSET_FPR); right = (left >> 8); left &= 255; emit_fgh(as, mi, dest, left, right); } static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi) { Reg dest = ra_dest(as, ir, RSET_FPR); Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); emit_fg(as, mi, dest, left); } static void asm_fpmath(ASMState *as, IRIns *ir) { if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir)) return; if (ir->op2 <= IRFPM_TRUNC) asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2); else if (ir->op2 == IRFPM_SQRT) asm_fpunary(as, ir, MIPSI_SQRT_D); else asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2); } #endif static void asm_add(ASMState *as, IRIns *ir) { IRType1 t = ir->t; #if !LJ_SOFTFP if (irt_isnum(t)) { asm_fparith(as, ir, MIPSI_ADD_D); } else #endif { Reg dest = ra_dest(as, ir, RSET_GPR); Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); if (irref_isk(ir->op2)) { intptr_t k = get_kval(IR(ir->op2)); if (checki16(k)) { emit_tsi(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDIU : MIPSI_ADDIU, dest, left, k); return; } } right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); emit_dst(as, (LJ_64 && irt_is64(t)) ? MIPSI_DADDU : MIPSI_ADDU, dest, left, right); } } static void asm_sub(ASMState *as, IRIns *ir) { #if !LJ_SOFTFP if (irt_isnum(ir->t)) { asm_fparith(as, ir, MIPSI_SUB_D); } else #endif { Reg dest = ra_dest(as, ir, RSET_GPR); Reg right, left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest, left, right); } } static void asm_mul(ASMState *as, IRIns *ir) { #if !LJ_SOFTFP if (irt_isnum(ir->t)) { asm_fparith(as, ir, MIPSI_MUL_D); } else #endif { Reg dest = ra_dest(as, ir, RSET_GPR); Reg right, left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; if (LJ_64 && irt_is64(ir->t)) { emit_dst(as, MIPSI_MFLO, dest, 0, 0); emit_dst(as, MIPSI_DMULT, 0, left, right); } else { emit_dst(as, MIPSI_MUL, dest, left, right); } } } static void asm_mod(ASMState *as, IRIns *ir) { #if LJ_64 && LJ_HASFFI if (!irt_isint(ir->t)) asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : IRCALL_lj_carith_modu64); else #endif asm_callid(as, ir, IRCALL_lj_vm_modi); } #if !LJ_SOFTFP static void asm_pow(ASMState *as, IRIns *ir) { #if LJ_64 && LJ_HASFFI if (!irt_isnum(ir->t)) asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : IRCALL_lj_carith_powu64); else #endif asm_callid(as, ir, IRCALL_lj_vm_powi); } static void asm_div(ASMState *as, IRIns *ir) { #if LJ_64 && LJ_HASFFI if (!irt_isnum(ir->t)) asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : IRCALL_lj_carith_divu64); else #endif asm_fparith(as, ir, MIPSI_DIV_D); } #endif static void asm_neg(ASMState *as, IRIns *ir) { #if !LJ_SOFTFP if (irt_isnum(ir->t)) { asm_fpunary(as, ir, MIPSI_NEG_D); } else #endif { Reg dest = ra_dest(as, ir, RSET_GPR); Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); emit_dst(as, (LJ_64 && irt_is64(ir->t)) ? MIPSI_DSUBU : MIPSI_SUBU, dest, RID_ZERO, left); } } #define asm_abs(as, ir) asm_fpunary(as, ir, MIPSI_ABS_D) #define asm_atan2(as, ir) asm_callid(as, ir, IRCALL_atan2) #define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp) static void asm_arithov(ASMState *as, IRIns *ir) { Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR); lua_assert(!irt_is64(ir->t)); if (irref_isk(ir->op2)) { int k = IR(ir->op2)->i; if (ir->o == IR_SUBOV) k = -k; if (checki16(k)) { /* (dest < left) == (k >= 0 ? 1 : 0) */ left = ra_alloc1(as, ir->op1, RSET_GPR); asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left); emit_tsi(as, MIPSI_ADDIU, dest, left, k); if (dest == left) emit_move(as, RID_TMP, left); return; } } left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left), right), dest)); asm_guard(as, MIPSI_BLTZ, RID_TMP, 0); emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp); if (ir->o == IR_ADDOV) { /* ((dest^left) & (dest^right)) < 0 */ emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right); } else { /* ((dest^left) & (dest^~right)) < 0 */ emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest); emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO); } emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left); emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right); if (dest == left || dest == right) emit_move(as, RID_TMP, dest == left ? left : right); } #define asm_addov(as, ir) asm_arithov(as, ir) #define asm_subov(as, ir) asm_arithov(as, ir) static void asm_mulov(ASMState *as, IRIns *ir) { Reg dest = ra_dest(as, ir, RSET_GPR); Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left), right), dest)); asm_guard(as, MIPSI_BNE, RID_TMP, tmp); emit_dta(as, MIPSI_SRA, RID_TMP, dest, 31); emit_dst(as, MIPSI_MFHI, tmp, 0, 0); emit_dst(as, MIPSI_MFLO, dest, 0, 0); emit_dst(as, MIPSI_MULT, 0, left, right); } #if LJ_32 && LJ_HASFFI static void asm_add64(ASMState *as, IRIns *ir) { Reg dest = ra_dest(as, ir, RSET_GPR); Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); if (irref_isk(ir->op2)) { int32_t k = IR(ir->op2)->i; if (k == 0) { emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP); goto loarith; } else if (checki16(k)) { emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP); emit_tsi(as, MIPSI_ADDIU, dest, left, k); goto loarith; } } emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP); right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); emit_dst(as, MIPSI_ADDU, dest, left, right); loarith: ir--; dest = ra_dest(as, ir, RSET_GPR); left = ra_alloc1(as, ir->op1, RSET_GPR); if (irref_isk(ir->op2)) { int32_t k = IR(ir->op2)->i; if (k == 0) { if (dest != left) emit_move(as, dest, left); return; } else if (checki16(k)) { if (dest == left) { Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left)); emit_move(as, dest, tmp); dest = tmp; } emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left); emit_tsi(as, MIPSI_ADDIU, dest, left, k); return; } } right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); if (dest == left && dest == right) { Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); emit_move(as, dest, tmp); dest = tmp; } emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left); emit_dst(as, MIPSI_ADDU, dest, left, right); } static void asm_sub64(ASMState *as, IRIns *ir) { Reg dest = ra_dest(as, ir, RSET_GPR); Reg right, left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP); emit_dst(as, MIPSI_SUBU, dest, left, right); ir--; dest = ra_dest(as, ir, RSET_GPR); left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; if (dest == left) { Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); emit_move(as, dest, tmp); dest = tmp; } emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest); emit_dst(as, MIPSI_SUBU, dest, left, right); } static void asm_neg64(ASMState *as, IRIns *ir) { Reg dest = ra_dest(as, ir, RSET_GPR); Reg left = ra_alloc1(as, ir->op1, RSET_GPR); emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP); emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); ir--; dest = ra_dest(as, ir, RSET_GPR); left = ra_alloc1(as, ir->op1, RSET_GPR); emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest); emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left); } #endif static void asm_bnot(ASMState *as, IRIns *ir) { Reg left, right, dest = ra_dest(as, ir, RSET_GPR); IRIns *irl = IR(ir->op1); if (mayfuse(as, ir->op1) && irl->o == IR_BOR) { left = ra_alloc2(as, irl, RSET_GPR); right = (left >> 8); left &= 255; } else { left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); right = RID_ZERO; } emit_dst(as, MIPSI_NOR, dest, left, right); } static void asm_bswap(ASMState *as, IRIns *ir) { Reg dest = ra_dest(as, ir, RSET_GPR); Reg left = ra_alloc1(as, ir->op1, RSET_GPR); #if LJ_32 if ((as->flags & JIT_F_MIPSXXR2)) { emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16); emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left); } else { Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest)); emit_dst(as, MIPSI_OR, dest, dest, tmp); emit_dst(as, MIPSI_OR, dest, dest, RID_TMP); emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00); emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8); emit_dta(as, MIPSI_SRL, dest, left, 8); emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00); emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP); emit_dta(as, MIPSI_SRL, tmp, left, 24); emit_dta(as, MIPSI_SLL, RID_TMP, left, 24); } #else if (irt_is64(ir->t)) { emit_dst(as, MIPSI_DSHD, dest, 0, RID_TMP); emit_dst(as, MIPSI_DSBH, RID_TMP, 0, left); } else { emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16); emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left); } #endif } static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) { Reg dest = ra_dest(as, ir, RSET_GPR); Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); if (irref_isk(ir->op2)) { intptr_t k = get_kval(IR(ir->op2)); if (checku16(k)) { emit_tsi(as, mik, dest, left, k); return; } } right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); emit_dst(as, mi, dest, left, right); } #define asm_band(as, ir) asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI) #define asm_bor(as, ir) asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI) #define asm_bxor(as, ir) asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI) static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik) { Reg dest = ra_dest(as, ir, RSET_GPR); if (irref_isk(ir->op2)) { /* Constant shifts. */ uint32_t shift = (uint32_t)IR(ir->op2)->i; if (LJ_64 && irt_is64(ir->t)) mik |= (shift & 32) ? MIPSI_D32 : MIPSI_D; emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR), (shift & 31)); } else { Reg right, left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; if (LJ_64 && irt_is64(ir->t)) mi |= MIPSI_DV; emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */ } } #define asm_bshl(as, ir) asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL) #define asm_bshr(as, ir) asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL) #define asm_bsar(as, ir) asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA) #define asm_brol(as, ir) lua_assert(0) static void asm_bror(ASMState *as, IRIns *ir) { if (LJ_64 || (as->flags & JIT_F_MIPSXXR2)) { asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR); } else { Reg dest = ra_dest(as, ir, RSET_GPR); if (irref_isk(ir->op2)) { /* Constant shifts. */ uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31); Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); emit_rotr(as, dest, left, RID_TMP, shift); } else { Reg right, left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; emit_dst(as, MIPSI_OR, dest, dest, RID_TMP); emit_dst(as, MIPSI_SRLV, dest, right, left); emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left); emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right); } } } #if LJ_32 && LJ_SOFTFP static void asm_sfpmin_max(ASMState *as, IRIns *ir) { CCallInfo ci = lj_ir_callinfo[(IROp)ir->o == IR_MIN ? IRCALL_lj_vm_sfmin : IRCALL_lj_vm_sfmax]; IRRef args[4]; args[0^LJ_BE] = ir->op1; args[1^LJ_BE] = (ir+1)->op1; args[2^LJ_BE] = ir->op2; args[3^LJ_BE] = (ir+1)->op2; asm_setupresult(as, ir, &ci); emit_call(as, (void *)ci.func, 0); ci.func = NULL; asm_gencall(as, &ci, args); } #endif static void asm_min_max(ASMState *as, IRIns *ir, int ismax) { if (!LJ_SOFTFP && irt_isnum(ir->t)) { Reg dest = ra_dest(as, ir, RSET_FPR); Reg right, left = ra_alloc2(as, ir, RSET_FPR); right = (left >> 8); left &= 255; if (dest == left) { emit_fg(as, MIPSI_MOVT_D, dest, right); } else { emit_fg(as, MIPSI_MOVF_D, dest, left); if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right); } emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? left : right, ismax ? right : left); } else { Reg dest = ra_dest(as, ir, RSET_GPR); Reg right, left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; if (dest == left) { emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP); } else { emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP); if (dest != right) emit_move(as, dest, right); } emit_dst(as, MIPSI_SLT, RID_TMP, ismax ? left : right, ismax ? right : left); } } #define asm_min(as, ir) asm_min_max(as, ir, 0) #define asm_max(as, ir) asm_min_max(as, ir, 1) /* -- Comparisons --------------------------------------------------------- */ #if LJ_32 && LJ_SOFTFP /* SFP comparisons. */ static void asm_sfpcomp(ASMState *as, IRIns *ir) { const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp]; RegSet drop = RSET_SCRATCH; Reg r; IRRef args[4]; args[LJ_LE ? 0 : 1] = ir->op1; args[LJ_LE ? 1 : 0] = (ir+1)->op1; args[LJ_LE ? 2 : 3] = ir->op2; args[LJ_LE ? 3 : 2] = (ir+1)->op2; for (r = REGARG_FIRSTGPR; r <= REGARG_FIRSTGPR+3; r++) { if (!rset_test(as->freeset, r) && regcost_ref(as->cost[r]) == args[r-REGARG_FIRSTGPR]) rset_clear(drop, r); } ra_evictset(as, drop); asm_setupresult(as, ir, ci); switch ((IROp)ir->o) { case IR_LT: asm_guard(as, MIPSI_BGEZ, RID_RET, 0); break; case IR_ULT: asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP); emit_loadi(as, RID_TMP, 1); asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); break; case IR_GE: asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP); emit_loadi(as, RID_TMP, 2); asm_guard(as, MIPSI_BLTZ, RID_RET, 0); break; case IR_LE: asm_guard(as, MIPSI_BGTZ, RID_RET, 0); break; case IR_GT: asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP); emit_loadi(as, RID_TMP, 2); asm_guard(as, MIPSI_BLEZ, RID_RET, 0); break; case IR_UGE: asm_guard(as, MIPSI_BLTZ, RID_RET, 0); break; case IR_ULE: asm_guard(as, MIPSI_BEQ, RID_RET, RID_TMP); emit_loadi(as, RID_TMP, 1); break; case IR_UGT: case IR_ABC: asm_guard(as, MIPSI_BLEZ, RID_RET, 0); break; case IR_EQ: case IR_NE: asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_RET, RID_ZERO); default: break; } asm_gencall(as, ci, args); } #endif static void asm_comp(ASMState *as, IRIns *ir) { /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */ IROp op = ir->o; if (!LJ_SOFTFP && irt_isnum(ir->t)) { Reg right, left = ra_alloc2(as, ir, RSET_FPR); right = (left >> 8); left &= 255; asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right); } else { Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR); if (op == IR_ABC) op = IR_UGT; if ((op&4) == 0 && irref_isk(ir->op2) && get_kval(IR(ir->op2)) == 0) { MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) : ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ); asm_guard(as, mi, left, 0); } else { if (irref_isk(ir->op2)) { intptr_t k = get_kval(IR(ir->op2)); if ((op&2)) k++; if (checki16(k)) { asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI, RID_TMP, left, k); return; } } right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP, (op&2) ? right : left, (op&2) ? left : right); } } } static void asm_equal(ASMState *as, IRIns *ir) { Reg right, left = ra_alloc2(as, ir, (!LJ_SOFTFP && irt_isnum(ir->t)) ? RSET_FPR : RSET_GPR); right = (left >> 8); left &= 255; if (!LJ_SOFTFP && irt_isnum(ir->t)) { asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0); emit_fgh(as, MIPSI_C_EQ_D, 0, left, right); } else { asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right); } } #if LJ_32 && LJ_HASFFI /* 64 bit integer comparisons. */ static void asm_comp64(ASMState *as, IRIns *ir) { /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */ IROp op = (ir-1)->o; MCLabel l_end; Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR); righthi = (lefthi >> 8); lefthi &= 255; leftlo = ra_alloc2(as, ir-1, rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi)); rightlo = (leftlo >> 8); leftlo &= 255; asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO); l_end = emit_label(as); if (lefthi != righthi) emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP, (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi); emit_dst(as, MIPSI_SLTU, RID_TMP, (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo); if (lefthi != righthi) emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end); } static void asm_comp64eq(ASMState *as, IRIns *ir) { Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR); right = (left >> 8); left &= 255; asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO); tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right)); emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp); emit_dst(as, MIPSI_XOR, tmp, left, right); left = ra_alloc2(as, ir-1, RSET_GPR); right = (left >> 8); left &= 255; emit_dst(as, MIPSI_XOR, RID_TMP, left, right); } #endif /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ /* Hiword op of a split 64 bit op. Previous op must be the loword op. */ static void asm_hiop(ASMState *as, IRIns *ir) { #if LJ_32 && (LJ_HASFFI || LJ_SOFTFP) /* HIOP is marked as a store because it needs its own DCE logic. */ int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ as->curins--; /* Always skip the CONV. */ #if LJ_HASFFI && !LJ_SOFTFP if (usehi || uselo) asm_conv64(as, ir); return; #endif } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */ as->curins--; /* Always skip the loword comparison. */ #if LJ_SOFTFP if (!irt_isint(ir->t)) { asm_sfpcomp(as, ir-1); return; } #endif #if LJ_HASFFI asm_comp64(as, ir); #endif return; } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ as->curins--; /* Always skip the loword comparison. */ #if LJ_SOFTFP if (!irt_isint(ir->t)) { asm_sfpcomp(as, ir-1); return; } #endif #if LJ_HASFFI asm_comp64eq(as, ir); #endif return; #if LJ_SOFTFP } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) { as->curins--; /* Always skip the loword min/max. */ if (uselo || usehi) asm_sfpmin_max(as, ir-1); return; #endif } else if ((ir-1)->o == IR_XSTORE) { as->curins--; /* Handle both stores here. */ if ((ir-1)->r != RID_SINK) { asm_xstore_(as, ir, LJ_LE ? 4 : 0); asm_xstore_(as, ir-1, LJ_LE ? 0 : 4); } return; } if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ switch ((ir-1)->o) { #if LJ_HASFFI case IR_ADD: as->curins--; asm_add64(as, ir); break; case IR_SUB: as->curins--; asm_sub64(as, ir); break; case IR_NEG: as->curins--; asm_neg64(as, ir); break; #endif #if LJ_SOFTFP case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: case IR_STRTO: if (!uselo) ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */ break; #endif case IR_CALLN: case IR_CALLS: case IR_CALLXS: if (!uselo) ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ break; #if LJ_SOFTFP case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR: #endif case IR_CNEWI: /* Nothing to do here. Handled by lo op itself. */ break; default: lua_assert(0); break; } #else UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */ #endif } /* -- Profiling ----------------------------------------------------------- */ static void asm_prof(ASMState *as, IRIns *ir) { UNUSED(ir); asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO); emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, HOOK_PROFILE); emit_lsglptr(as, MIPSI_LBU, RID_TMP, (int32_t)offsetof(global_State, hookmask)); } /* -- Stack handling ------------------------------------------------------ */ /* Check Lua stack size for overflow. Use exit handler as fallback. */ static void asm_stack_check(ASMState *as, BCReg topslot, IRIns *irp, RegSet allow, ExitNo exitno) { /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */ Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE; ExitNo oldsnap = as->snapno; rset_clear(allow, pbase); #if LJ_32 tmp = allow ? rset_pickbot(allow) : (pbase == RID_RETHI ? RID_RETLO : RID_RETHI); #else tmp = allow ? rset_pickbot(allow) : RID_RET; #endif as->snapno = exitno; asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO); as->snapno = oldsnap; if (allow == RSET_EMPTY) /* Restore temp. register. */ emit_tsi(as, MIPSI_AL, tmp, RID_SP, 0); else ra_modified(as, tmp); emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot)); emit_dst(as, MIPSI_ASUBU, RID_TMP, tmp, pbase); emit_tsi(as, MIPSI_AL, tmp, tmp, offsetof(lua_State, maxstack)); if (pbase == RID_TMP) emit_getgl(as, RID_TMP, jit_base); emit_getgl(as, tmp, cur_L); if (allow == RSET_EMPTY) /* Spill temp. register. */ emit_tsi(as, MIPSI_AS, tmp, RID_SP, 0); } /* Restore Lua stack from on-trace state. */ static void asm_stack_restore(ASMState *as, SnapShot *snap) { SnapEntry *map = &as->T->snapmap[snap->mapofs]; #if LJ_32 || defined(LUA_USE_ASSERT) SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2]; #endif MSize n, nent = snap->nent; /* Store the value of all modified slots to the Lua stack. */ for (n = 0; n < nent; n++) { SnapEntry sn = map[n]; BCReg s = snap_slot(sn); int32_t ofs = 8*((int32_t)s-1-LJ_FR2); IRRef ref = snap_ref(sn); IRIns *ir = IR(ref); if ((sn & SNAP_NORESTORE)) continue; if (irt_isnum(ir->t)) { #if LJ_SOFTFP Reg tmp; RegSet allow = rset_exclude(RSET_GPR, RID_BASE); lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */ tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo, allow); emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?4:0)); if (rset_test(as->freeset, tmp+1)) allow = RID2RSET(tmp+1); tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, allow); emit_tsi(as, MIPSI_SW, tmp, RID_BASE, ofs+(LJ_BE?0:4)); #else Reg src = ra_alloc1(as, ref, RSET_FPR); emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs); #endif } else { #if LJ_32 RegSet allow = rset_exclude(RSET_GPR, RID_BASE); Reg type; lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t)); if (!irt_ispri(ir->t)) { Reg src = ra_alloc1(as, ref, allow); rset_clear(allow, src); emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0)); } if ((sn & (SNAP_CONT|SNAP_FRAME))) { if (s == 0) continue; /* Do not overwrite link to previous frame. */ type = ra_allock(as, (int32_t)(*flinks--), allow); #if LJ_SOFTFP } else if ((sn & SNAP_SOFTFPNUM)) { type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPR, RID_BASE)); #endif } else { type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); } emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4)); #else asm_tvstore64(as, RID_BASE, ofs, ref); #endif } checkmclim(as); } lua_assert(map + nent == flinks); } /* -- GC handling --------------------------------------------------------- */ /* Check GC threshold and do one or more GC steps. */ static void asm_gc_check(ASMState *as) { const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; IRRef args[2]; MCLabel l_end; Reg tmp; ra_evictset(as, RSET_SCRATCH); l_end = emit_label(as); /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ /* Assumes asm_snap_prep() already done. */ asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO); args[0] = ASMREF_TMP1; /* global_State *g */ args[1] = ASMREF_TMP2; /* MSize steps */ asm_gencall(as, ci, args); emit_tsi(as, MIPSI_AADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768); tmp = ra_releasetmp(as, ASMREF_TMP2); emit_loadi(as, tmp, as->gcsteps); /* Jump around GC step if GC total < GC threshold. */ emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end); emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp); emit_getgl(as, tmp, gc.threshold); emit_getgl(as, RID_TMP, gc.total); as->gcsteps = 0; checkmclim(as); } /* -- Loop handling ------------------------------------------------------- */ /* Fixup the loop branch. */ static void asm_loop_fixup(ASMState *as) { MCode *p = as->mctop; MCode *target = as->mcp; p[-1] = MIPSI_NOP; if (as->loopinv) { /* Inverted loop branch? */ /* asm_guard already inverted the cond branch. Only patch the target. */ p[-3] |= ((target-p+2) & 0x0000ffffu); } else { p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); } } /* -- Head of trace ------------------------------------------------------- */ /* Coalesce BASE register for a root trace. */ static void asm_head_root_base(ASMState *as) { IRIns *ir = IR(REF_BASE); Reg r = ir->r; if (as->loopinv) as->mctop--; if (ra_hasreg(r)) { ra_free(as, r); if (rset_test(as->modset, r) || irt_ismarked(ir->t)) ir->r = RID_INIT; /* No inheritance for modified BASE register. */ if (r != RID_BASE) emit_move(as, r, RID_BASE); } } /* Coalesce BASE register for a side trace. */ static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) { IRIns *ir = IR(REF_BASE); Reg r = ir->r; if (as->loopinv) as->mctop--; if (ra_hasreg(r)) { ra_free(as, r); if (rset_test(as->modset, r) || irt_ismarked(ir->t)) ir->r = RID_INIT; /* No inheritance for modified BASE register. */ if (irp->r == r) { rset_clear(allow, r); /* Mark same BASE register as coalesced. */ } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { rset_clear(allow, irp->r); emit_move(as, r, irp->r); /* Move from coalesced parent reg. */ } else { emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ } } return allow; } /* -- Tail of trace ------------------------------------------------------- */ /* Fixup the tail code. */ static void asm_tail_fixup(ASMState *as, TraceNo lnk) { MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp; int32_t spadj = as->T->spadjust; MCode *p = as->mctop-1; *p = spadj ? (MIPSI_AADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP; p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); } /* Prepare tail of code. */ static void asm_tail_prep(ASMState *as) { as->mcp = as->mctop-2; /* Leave room for branch plus nop or stack adj. */ as->invmcp = as->loopref ? as->mcp : NULL; } /* -- Trace setup --------------------------------------------------------- */ /* Ensure there are enough stack slots for call arguments. */ static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) { IRRef args[CCI_NARGS_MAX*2]; uint32_t i, nargs = CCI_XNARGS(ci); #if LJ_32 int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; #else int nslots = 0, ngpr = REGARG_NUMGPR; #endif asm_collectargs(as, ir, ci, args); for (i = 0; i < nargs; i++) { #if LJ_32 if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t) && nfpr > 0 && !(ci->flags & CCI_VARARG)) { nfpr--; ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1; } else if (!LJ_SOFTFP && args[i] && irt_isnum(IR(args[i])->t)) { nfpr = 0; ngpr = ngpr & ~1; if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1; } else { nfpr = 0; if (ngpr > 0) ngpr--; else nslots++; } #else if (ngpr > 0) ngpr--; else nslots += 2; #endif } if (nslots > as->evenspill) /* Leave room for args in stack slots. */ as->evenspill = nslots; return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); } static void asm_setup_target(ASMState *as) { asm_sparejump_setup(as); asm_exitstub_setup(as); } /* -- Trace patching ------------------------------------------------------ */ /* Patch exit jumps of existing machine code to a new target. */ void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) { MCode *p = T->mcode; MCode *pe = (MCode *)((char *)p + T->szmcode); MCode *px = exitstub_trace_addr(T, exitno); MCode *cstart = NULL, *cstop = NULL; MCode *mcarea = lj_mcode_patch(J, p, 0); MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno; MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu); for (p++; p < pe; p++) { if (*p == exitload) { /* Look for load of exit number. */ if (((p[-1] ^ (px-p)) & 0xffffu) == 0) { /* Look for exitstub branch. */ ptrdiff_t delta = target - p; if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */ patchbranch: p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu); *p = MIPSI_NOP; /* Replace the load of the exit number. */ cstop = p; if (!cstart) cstart = p-1; } else { /* Branch out of range. Use spare jump slot in mcarea. */ int i; for (i = 2; i < 2+MIPS_SPAREJUMP*2; i += 2) { if (mcarea[i] == tjump) { delta = mcarea+i - p; goto patchbranch; } else if (mcarea[i] == MIPSI_NOP) { mcarea[i] = tjump; cstart = mcarea+i; delta = mcarea+i - p; goto patchbranch; } } /* Ignore jump slot overflow. Child trace is simply not attached. */ } } else if (p+1 == pe) { /* Patch NOP after code for inverted loop branch. Use of J is ok. */ lua_assert(p[1] == MIPSI_NOP); p[1] = tjump; *p = MIPSI_NOP; /* Replace the load of the exit number. */ cstop = p+2; if (!cstart) cstart = p+1; } } } if (cstart) lj_mcode_sync(cstart, cstop); lj_mcode_patch(J, mcarea, 1); } tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lib_base.c0000644000000000000000000004050213306562377022535 0ustar rootroot/* ** Base and coroutine library. ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h ** ** Major portions taken verbatim or adapted from the Lua interpreter. ** Copyright (C) 1994-2011 Lua.org, PUC-Rio. See Copyright Notice in lua.h */ #include #define lib_base_c #define LUA_LIB #include "lua.h" #include "lauxlib.h" #include "lualib.h" #include "lj_obj.h" #include "lj_gc.h" #include "lj_err.h" #include "lj_debug.h" #include "lj_str.h" #include "lj_tab.h" #include "lj_meta.h" #include "lj_state.h" #include "lj_frame.h" #if LJ_HASFFI #include "lj_ctype.h" #include "lj_cconv.h" #endif #include "lj_bc.h" #include "lj_ff.h" #include "lj_dispatch.h" #include "lj_char.h" #include "lj_strscan.h" #include "lj_strfmt.h" #include "lj_lib.h" /* -- Base library: checks ------------------------------------------------ */ #define LJLIB_MODULE_base LJLIB_ASM(assert) LJLIB_REC(.) { GCstr *s; lj_lib_checkany(L, 1); s = lj_lib_optstr(L, 2); if (s) lj_err_callermsg(L, strdata(s)); else lj_err_caller(L, LJ_ERR_ASSERT); return FFH_UNREACHABLE; } /* ORDER LJ_T */ LJLIB_PUSH("nil") LJLIB_PUSH("boolean") LJLIB_PUSH(top-1) /* boolean */ LJLIB_PUSH("userdata") LJLIB_PUSH("string") LJLIB_PUSH("upval") LJLIB_PUSH("thread") LJLIB_PUSH("proto") LJLIB_PUSH("function") LJLIB_PUSH("trace") LJLIB_PUSH("cdata") LJLIB_PUSH("table") LJLIB_PUSH(top-9) /* userdata */ LJLIB_PUSH("number") LJLIB_ASM_(type) LJLIB_REC(.) /* Recycle the lj_lib_checkany(L, 1) from assert. */ /* -- Base library: iterators --------------------------------------------- */ /* This solves a circular dependency problem -- change FF_next_N as needed. */ LJ_STATIC_ASSERT((int)FF_next == FF_next_N); LJLIB_ASM(next) { lj_lib_checktab(L, 1); return FFH_UNREACHABLE; } #if LJ_52 || LJ_HASFFI static int ffh_pairs(lua_State *L, MMS mm) { TValue *o = lj_lib_checkany(L, 1); cTValue *mo = lj_meta_lookup(L, o, mm); if ((LJ_52 || tviscdata(o)) && !tvisnil(mo)) { L->top = o+1; /* Only keep one argument. */ copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */ return FFH_TAILCALL; } else { if (!tvistab(o)) lj_err_argt(L, 1, LUA_TTABLE); if (LJ_FR2) { copyTV(L, o-1, o); o--; } setfuncV(L, o-1, funcV(lj_lib_upvalue(L, 1))); if (mm == MM_pairs) setnilV(o+1); else setintV(o+1, 0); return FFH_RES(3); } } #else #define ffh_pairs(L, mm) (lj_lib_checktab(L, 1), FFH_UNREACHABLE) #endif LJLIB_PUSH(lastcl) LJLIB_ASM(pairs) LJLIB_REC(xpairs 0) { return ffh_pairs(L, MM_pairs); } LJLIB_NOREGUV LJLIB_ASM(ipairs_aux) LJLIB_REC(.) { lj_lib_checktab(L, 1); lj_lib_checkint(L, 2); return FFH_UNREACHABLE; } LJLIB_PUSH(lastcl) LJLIB_ASM(ipairs) LJLIB_REC(xpairs 1) { return ffh_pairs(L, MM_ipairs); } /* -- Base library: getters and setters ----------------------------------- */ LJLIB_ASM_(getmetatable) LJLIB_REC(.) /* Recycle the lj_lib_checkany(L, 1) from assert. */ LJLIB_ASM(setmetatable) LJLIB_REC(.) { GCtab *t = lj_lib_checktab(L, 1); GCtab *mt = lj_lib_checktabornil(L, 2); if (!tvisnil(lj_meta_lookup(L, L->base, MM_metatable))) lj_err_caller(L, LJ_ERR_PROTMT); setgcref(t->metatable, obj2gco(mt)); if (mt) { lj_gc_objbarriert(L, t, mt); } settabV(L, L->base-1-LJ_FR2, t); return FFH_RES(1); } LJLIB_CF(getfenv) LJLIB_REC(.) { GCfunc *fn; cTValue *o = L->base; if (!(o < L->top && tvisfunc(o))) { int level = lj_lib_optint(L, 1, 1); o = lj_debug_frame(L, level, &level); if (o == NULL) lj_err_arg(L, 1, LJ_ERR_INVLVL); if (LJ_FR2) o--; } fn = &gcval(o)->fn; settabV(L, L->top++, isluafunc(fn) ? tabref(fn->l.env) : tabref(L->env)); return 1; } LJLIB_CF(setfenv) { GCfunc *fn; GCtab *t = lj_lib_checktab(L, 2); cTValue *o = L->base; if (!(o < L->top && tvisfunc(o))) { int level = lj_lib_checkint(L, 1); if (level == 0) { /* NOBARRIER: A thread (i.e. L) is never black. */ setgcref(L->env, obj2gco(t)); return 0; } o = lj_debug_frame(L, level, &level); if (o == NULL) lj_err_arg(L, 1, LJ_ERR_INVLVL); if (LJ_FR2) o--; } fn = &gcval(o)->fn; if (!isluafunc(fn)) lj_err_caller(L, LJ_ERR_SETFENV); setgcref(fn->l.env, obj2gco(t)); lj_gc_objbarrier(L, obj2gco(fn), t); setfuncV(L, L->top++, fn); return 1; } LJLIB_ASM(rawget) LJLIB_REC(.) { lj_lib_checktab(L, 1); lj_lib_checkany(L, 2); return FFH_UNREACHABLE; } LJLIB_CF(rawset) LJLIB_REC(.) { lj_lib_checktab(L, 1); lj_lib_checkany(L, 2); L->top = 1+lj_lib_checkany(L, 3); lua_rawset(L, 1); return 1; } LJLIB_CF(rawequal) LJLIB_REC(.) { cTValue *o1 = lj_lib_checkany(L, 1); cTValue *o2 = lj_lib_checkany(L, 2); setboolV(L->top-1, lj_obj_equal(o1, o2)); return 1; } #if LJ_52 LJLIB_CF(rawlen) LJLIB_REC(.) { cTValue *o = L->base; int32_t len; if (L->top > o && tvisstr(o)) len = (int32_t)strV(o)->len; else len = (int32_t)lj_tab_len(lj_lib_checktab(L, 1)); setintV(L->top-1, len); return 1; } #endif LJLIB_CF(unpack) { GCtab *t = lj_lib_checktab(L, 1); int32_t n, i = lj_lib_optint(L, 2, 1); int32_t e = (L->base+3-1 < L->top && !tvisnil(L->base+3-1)) ? lj_lib_checkint(L, 3) : (int32_t)lj_tab_len(t); if (i > e) return 0; n = e - i + 1; if (n <= 0 || !lua_checkstack(L, n)) lj_err_caller(L, LJ_ERR_UNPACK); do { cTValue *tv = lj_tab_getint(t, i); if (tv) { copyTV(L, L->top++, tv); } else { setnilV(L->top++); } } while (i++ < e); return n; } LJLIB_CF(select) LJLIB_REC(.) { int32_t n = (int32_t)(L->top - L->base); if (n >= 1 && tvisstr(L->base) && *strVdata(L->base) == '#') { setintV(L->top-1, n-1); return 1; } else { int32_t i = lj_lib_checkint(L, 1); if (i < 0) i = n + i; else if (i > n) i = n; if (i < 1) lj_err_arg(L, 1, LJ_ERR_IDXRNG); return n - i; } } /* -- Base library: conversions ------------------------------------------- */ LJLIB_ASM(tonumber) LJLIB_REC(.) { int32_t base = lj_lib_optint(L, 2, 10); if (base == 10) { TValue *o = lj_lib_checkany(L, 1); if (lj_strscan_numberobj(o)) { copyTV(L, L->base-1-LJ_FR2, o); return FFH_RES(1); } #if LJ_HASFFI if (tviscdata(o)) { CTState *cts = ctype_cts(L); CType *ct = lj_ctype_rawref(cts, cdataV(o)->ctypeid); if (ctype_isenum(ct->info)) ct = ctype_child(cts, ct); if (ctype_isnum(ct->info) || ctype_iscomplex(ct->info)) { if (LJ_DUALNUM && ctype_isinteger_or_bool(ct->info) && ct->size <= 4 && !(ct->size == 4 && (ct->info & CTF_UNSIGNED))) { int32_t i; lj_cconv_ct_tv(cts, ctype_get(cts, CTID_INT32), (uint8_t *)&i, o, 0); setintV(L->base-1-LJ_FR2, i); return FFH_RES(1); } lj_cconv_ct_tv(cts, ctype_get(cts, CTID_DOUBLE), (uint8_t *)&(L->base-1-LJ_FR2)->n, o, 0); return FFH_RES(1); } } #endif } else { const char *p = strdata(lj_lib_checkstr(L, 1)); char *ep; unsigned long ul; if (base < 2 || base > 36) lj_err_arg(L, 2, LJ_ERR_BASERNG); ul = strtoul(p, &ep, base); if (p != ep) { while (lj_char_isspace((unsigned char)(*ep))) ep++; if (*ep == '\0') { if (LJ_DUALNUM && LJ_LIKELY(ul < 0x80000000u)) setintV(L->base-1-LJ_FR2, (int32_t)ul); else setnumV(L->base-1-LJ_FR2, (lua_Number)ul); return FFH_RES(1); } } } setnilV(L->base-1-LJ_FR2); return FFH_RES(1); } LJLIB_ASM(tostring) LJLIB_REC(.) { TValue *o = lj_lib_checkany(L, 1); cTValue *mo; L->top = o+1; /* Only keep one argument. */ if (!tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { copyTV(L, L->base-1-LJ_FR2, mo); /* Replace callable. */ return FFH_TAILCALL; } lj_gc_check(L); setstrV(L, L->base-1-LJ_FR2, lj_strfmt_obj(L, L->base)); return FFH_RES(1); } /* -- Base library: throw and catch errors -------------------------------- */ LJLIB_CF(error) { int32_t level = lj_lib_optint(L, 2, 1); lua_settop(L, 1); if (lua_isstring(L, 1) && level > 0) { luaL_where(L, level); lua_pushvalue(L, 1); lua_concat(L, 2); } return lua_error(L); } LJLIB_ASM(pcall) LJLIB_REC(.) { lj_lib_checkany(L, 1); lj_lib_checkfunc(L, 2); /* For xpcall only. */ return FFH_UNREACHABLE; } LJLIB_ASM_(xpcall) LJLIB_REC(.) /* -- Base library: load Lua code ----------------------------------------- */ static int load_aux(lua_State *L, int status, int envarg) { if (status == LUA_OK) { if (tvistab(L->base+envarg-1)) { GCfunc *fn = funcV(L->top-1); GCtab *t = tabV(L->base+envarg-1); setgcref(fn->c.env, obj2gco(t)); lj_gc_objbarrier(L, fn, t); } return 1; } else { setnilV(L->top-2); return 2; } } LJLIB_CF(loadfile) { GCstr *fname = lj_lib_optstr(L, 1); GCstr *mode = lj_lib_optstr(L, 2); int status; lua_settop(L, 3); /* Ensure env arg exists. */ status = luaL_loadfilex(L, fname ? strdata(fname) : NULL, mode ? strdata(mode) : NULL); return load_aux(L, status, 3); } static const char *reader_func(lua_State *L, void *ud, size_t *size) { UNUSED(ud); luaL_checkstack(L, 2, "too many nested functions"); copyTV(L, L->top++, L->base); lua_call(L, 0, 1); /* Call user-supplied function. */ L->top--; if (tvisnil(L->top)) { *size = 0; return NULL; } else if (tvisstr(L->top) || tvisnumber(L->top)) { copyTV(L, L->base+4, L->top); /* Anchor string in reserved stack slot. */ return lua_tolstring(L, 5, size); } else { lj_err_caller(L, LJ_ERR_RDRSTR); return NULL; } } LJLIB_CF(load) { GCstr *name = lj_lib_optstr(L, 2); GCstr *mode = lj_lib_optstr(L, 3); int status; if (L->base < L->top && (tvisstr(L->base) || tvisnumber(L->base))) { GCstr *s = lj_lib_checkstr(L, 1); lua_settop(L, 4); /* Ensure env arg exists. */ status = luaL_loadbufferx(L, strdata(s), s->len, strdata(name ? name : s), mode ? strdata(mode) : NULL); } else { lj_lib_checkfunc(L, 1); lua_settop(L, 5); /* Reserve a slot for the string from the reader. */ status = lua_loadx(L, reader_func, NULL, name ? strdata(name) : "=(load)", mode ? strdata(mode) : NULL); } return load_aux(L, status, 4); } LJLIB_CF(loadstring) { return lj_cf_load(L); } LJLIB_CF(dofile) { GCstr *fname = lj_lib_optstr(L, 1); setnilV(L->top); L->top = L->base+1; if (luaL_loadfile(L, fname ? strdata(fname) : NULL) != LUA_OK) lua_error(L); lua_call(L, 0, LUA_MULTRET); return (int)(L->top - L->base) - 1; } /* -- Base library: GC control -------------------------------------------- */ LJLIB_CF(gcinfo) { setintV(L->top++, (int32_t)(G(L)->gc.total >> 10)); return 1; } LJLIB_CF(collectgarbage) { int opt = lj_lib_checkopt(L, 1, LUA_GCCOLLECT, /* ORDER LUA_GC* */ "\4stop\7restart\7collect\5count\1\377\4step\10setpause\12setstepmul\1\377\11isrunning"); int32_t data = lj_lib_optint(L, 2, 0); if (opt == LUA_GCCOUNT) { setnumV(L->top, (lua_Number)G(L)->gc.total/1024.0); } else { int res = lua_gc(L, opt, data); if (opt == LUA_GCSTEP || opt == LUA_GCISRUNNING) setboolV(L->top, res); else setintV(L->top, res); } L->top++; return 1; } /* -- Base library: miscellaneous functions ------------------------------- */ LJLIB_PUSH(top-2) /* Upvalue holds weak table. */ LJLIB_CF(newproxy) { lua_settop(L, 1); lua_newuserdata(L, 0); if (lua_toboolean(L, 1) == 0) { /* newproxy(): without metatable. */ return 1; } else if (lua_isboolean(L, 1)) { /* newproxy(true): with metatable. */ lua_newtable(L); lua_pushvalue(L, -1); lua_pushboolean(L, 1); lua_rawset(L, lua_upvalueindex(1)); /* Remember mt in weak table. */ } else { /* newproxy(proxy): inherit metatable. */ int validproxy = 0; if (lua_getmetatable(L, 1)) { lua_rawget(L, lua_upvalueindex(1)); validproxy = lua_toboolean(L, -1); lua_pop(L, 1); } if (!validproxy) lj_err_arg(L, 1, LJ_ERR_NOPROXY); lua_getmetatable(L, 1); } lua_setmetatable(L, 2); return 1; } LJLIB_PUSH("tostring") LJLIB_CF(print) { ptrdiff_t i, nargs = L->top - L->base; cTValue *tv = lj_tab_getstr(tabref(L->env), strV(lj_lib_upvalue(L, 1))); int shortcut; if (tv && !tvisnil(tv)) { copyTV(L, L->top++, tv); } else { setstrV(L, L->top++, strV(lj_lib_upvalue(L, 1))); lua_gettable(L, LUA_GLOBALSINDEX); tv = L->top-1; } shortcut = (tvisfunc(tv) && funcV(tv)->c.ffid == FF_tostring); for (i = 0; i < nargs; i++) { cTValue *o = &L->base[i]; const char *str; size_t size; MSize len; if (shortcut && (str = lj_strfmt_wstrnum(L, o, &len)) != NULL) { size = len; } else { copyTV(L, L->top+1, o); copyTV(L, L->top, L->top-1); L->top += 2; lua_call(L, 1, 1); str = lua_tolstring(L, -1, &size); if (!str) lj_err_caller(L, LJ_ERR_PRTOSTR); L->top--; } if (i) putchar('\t'); fwrite(str, 1, size, stdout); } putchar('\n'); return 0; } LJLIB_PUSH(top-3) LJLIB_SET(_VERSION) #include "lj_libdef.h" /* -- Coroutine library --------------------------------------------------- */ #define LJLIB_MODULE_coroutine LJLIB_CF(coroutine_status) { const char *s; lua_State *co; if (!(L->top > L->base && tvisthread(L->base))) lj_err_arg(L, 1, LJ_ERR_NOCORO); co = threadV(L->base); if (co == L) s = "running"; else if (co->status == LUA_YIELD) s = "suspended"; else if (co->status != LUA_OK) s = "dead"; else if (co->base > tvref(co->stack)+1+LJ_FR2) s = "normal"; else if (co->top == co->base) s = "dead"; else s = "suspended"; lua_pushstring(L, s); return 1; } LJLIB_CF(coroutine_running) { #if LJ_52 int ismain = lua_pushthread(L); setboolV(L->top++, ismain); return 2; #else if (lua_pushthread(L)) setnilV(L->top++); return 1; #endif } LJLIB_CF(coroutine_isyieldable) { setboolV(L->top++, cframe_canyield(L->cframe)); return 1; } LJLIB_CF(coroutine_create) { lua_State *L1; if (!(L->base < L->top && tvisfunc(L->base))) lj_err_argt(L, 1, LUA_TFUNCTION); L1 = lua_newthread(L); setfuncV(L, L1->top++, funcV(L->base)); return 1; } LJLIB_ASM(coroutine_yield) { lj_err_caller(L, LJ_ERR_CYIELD); return FFH_UNREACHABLE; } static int ffh_resume(lua_State *L, lua_State *co, int wrap) { if (co->cframe != NULL || co->status > LUA_YIELD || (co->status == LUA_OK && co->top == co->base)) { ErrMsg em = co->cframe ? LJ_ERR_CORUN : LJ_ERR_CODEAD; if (wrap) lj_err_caller(L, em); setboolV(L->base-1-LJ_FR2, 0); setstrV(L, L->base-LJ_FR2, lj_err_str(L, em)); return FFH_RES(2); } lj_state_growstack(co, (MSize)(L->top - L->base)); return FFH_RETRY; } LJLIB_ASM(coroutine_resume) { if (!(L->top > L->base && tvisthread(L->base))) lj_err_arg(L, 1, LJ_ERR_NOCORO); return ffh_resume(L, threadV(L->base), 0); } LJLIB_NOREG LJLIB_ASM(coroutine_wrap_aux) { return ffh_resume(L, threadV(lj_lib_upvalue(L, 1)), 1); } /* Inline declarations. */ LJ_ASMF void lj_ff_coroutine_wrap_aux(void); #if !(LJ_TARGET_MIPS && defined(ljamalg_c)) LJ_FUNCA_NORET void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, lua_State *co); #endif /* Error handler, called from assembler VM. */ void LJ_FASTCALL lj_ffh_coroutine_wrap_err(lua_State *L, lua_State *co) { co->top--; copyTV(L, L->top, co->top); L->top++; if (tvisstr(L->top-1)) lj_err_callermsg(L, strVdata(L->top-1)); else lj_err_run(L); } /* Forward declaration. */ static void setpc_wrap_aux(lua_State *L, GCfunc *fn); LJLIB_CF(coroutine_wrap) { GCfunc *fn; lj_cf_coroutine_create(L); fn = lj_lib_pushcc(L, lj_ffh_coroutine_wrap_aux, FF_coroutine_wrap_aux, 1); setpc_wrap_aux(L, fn); return 1; } #include "lj_libdef.h" /* Fix the PC of wrap_aux. Really ugly workaround. */ static void setpc_wrap_aux(lua_State *L, GCfunc *fn) { setmref(fn->c.pc, &L2GG(L)->bcff[lj_lib_init_coroutine[1]+2]); } /* ------------------------------------------------------------------------ */ static void newproxy_weaktable(lua_State *L) { /* NOBARRIER: The table is new (marked white). */ GCtab *t = lj_tab_new(L, 0, 1); settabV(L, L->top++, t); setgcref(t->metatable, obj2gco(t)); setstrV(L, lj_tab_setstr(L, t, lj_str_newlit(L, "__mode")), lj_str_newlit(L, "kv")); t->nomm = (uint8_t)(~(1u<env); settabV(L, lj_tab_setstr(L, env, lj_str_newlit(L, "_G")), env); lua_pushliteral(L, LUA_VERSION); /* top-3. */ newproxy_weaktable(L); /* top-2. */ LJ_LIB_REG(L, "_G", base); LJ_LIB_REG(L, LUA_COLIBNAME, coroutine); return 2; } tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lj_parse.c0000644000000000000000000023354113306562377022603 0ustar rootroot/* ** Lua parser (source code -> bytecode). ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h ** ** Major portions taken verbatim or adapted from the Lua interpreter. ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h */ #define lj_parse_c #define LUA_CORE #include "lj_obj.h" #include "lj_gc.h" #include "lj_err.h" #include "lj_debug.h" #include "lj_buf.h" #include "lj_str.h" #include "lj_tab.h" #include "lj_func.h" #include "lj_state.h" #include "lj_bc.h" #if LJ_HASFFI #include "lj_ctype.h" #endif #include "lj_strfmt.h" #include "lj_lex.h" #include "lj_parse.h" #include "lj_vm.h" #include "lj_vmevent.h" /* -- Parser structures and definitions ----------------------------------- */ /* Expression kinds. */ typedef enum { /* Constant expressions must be first and in this order: */ VKNIL, VKFALSE, VKTRUE, VKSTR, /* sval = string value */ VKNUM, /* nval = number value */ VKLAST = VKNUM, VKCDATA, /* nval = cdata value, not treated as a constant expression */ /* Non-constant expressions follow: */ VLOCAL, /* info = local register, aux = vstack index */ VUPVAL, /* info = upvalue index, aux = vstack index */ VGLOBAL, /* sval = string value */ VINDEXED, /* info = table register, aux = index reg/byte/string const */ VJMP, /* info = instruction PC */ VRELOCABLE, /* info = instruction PC */ VNONRELOC, /* info = result register */ VCALL, /* info = instruction PC, aux = base */ VVOID } ExpKind; /* Expression descriptor. */ typedef struct ExpDesc { union { struct { uint32_t info; /* Primary info. */ uint32_t aux; /* Secondary info. */ } s; TValue nval; /* Number value. */ GCstr *sval; /* String value. */ } u; ExpKind k; BCPos t; /* True condition jump list. */ BCPos f; /* False condition jump list. */ } ExpDesc; /* Macros for expressions. */ #define expr_hasjump(e) ((e)->t != (e)->f) #define expr_isk(e) ((e)->k <= VKLAST) #define expr_isk_nojump(e) (expr_isk(e) && !expr_hasjump(e)) #define expr_isnumk(e) ((e)->k == VKNUM) #define expr_isnumk_nojump(e) (expr_isnumk(e) && !expr_hasjump(e)) #define expr_isstrk(e) ((e)->k == VKSTR) #define expr_numtv(e) check_exp(expr_isnumk((e)), &(e)->u.nval) #define expr_numberV(e) numberVnum(expr_numtv((e))) /* Initialize expression. */ static LJ_AINLINE void expr_init(ExpDesc *e, ExpKind k, uint32_t info) { e->k = k; e->u.s.info = info; e->f = e->t = NO_JMP; } /* Check number constant for +-0. */ static int expr_numiszero(ExpDesc *e) { TValue *o = expr_numtv(e); return tvisint(o) ? (intV(o) == 0) : tviszero(o); } /* Per-function linked list of scope blocks. */ typedef struct FuncScope { struct FuncScope *prev; /* Link to outer scope. */ MSize vstart; /* Start of block-local variables. */ uint8_t nactvar; /* Number of active vars outside the scope. */ uint8_t flags; /* Scope flags. */ } FuncScope; #define FSCOPE_LOOP 0x01 /* Scope is a (breakable) loop. */ #define FSCOPE_BREAK 0x02 /* Break used in scope. */ #define FSCOPE_GOLA 0x04 /* Goto or label used in scope. */ #define FSCOPE_UPVAL 0x08 /* Upvalue in scope. */ #define FSCOPE_NOCLOSE 0x10 /* Do not close upvalues. */ #define NAME_BREAK ((GCstr *)(uintptr_t)1) /* Index into variable stack. */ typedef uint16_t VarIndex; #define LJ_MAX_VSTACK (65536 - LJ_MAX_UPVAL) /* Variable/goto/label info. */ #define VSTACK_VAR_RW 0x01 /* R/W variable. */ #define VSTACK_GOTO 0x02 /* Pending goto. */ #define VSTACK_LABEL 0x04 /* Label. */ /* Per-function state. */ typedef struct FuncState { GCtab *kt; /* Hash table for constants. */ LexState *ls; /* Lexer state. */ lua_State *L; /* Lua state. */ FuncScope *bl; /* Current scope. */ struct FuncState *prev; /* Enclosing function. */ BCPos pc; /* Next bytecode position. */ BCPos lasttarget; /* Bytecode position of last jump target. */ BCPos jpc; /* Pending jump list to next bytecode. */ BCReg freereg; /* First free register. */ BCReg nactvar; /* Number of active local variables. */ BCReg nkn, nkgc; /* Number of lua_Number/GCobj constants */ BCLine linedefined; /* First line of the function definition. */ BCInsLine *bcbase; /* Base of bytecode stack. */ BCPos bclim; /* Limit of bytecode stack. */ MSize vbase; /* Base of variable stack for this function. */ uint8_t flags; /* Prototype flags. */ uint8_t numparams; /* Number of parameters. */ uint8_t framesize; /* Fixed frame size. */ uint8_t nuv; /* Number of upvalues */ VarIndex varmap[LJ_MAX_LOCVAR]; /* Map from register to variable idx. */ VarIndex uvmap[LJ_MAX_UPVAL]; /* Map from upvalue to variable idx. */ VarIndex uvtmp[LJ_MAX_UPVAL]; /* Temporary upvalue map. */ } FuncState; /* Binary and unary operators. ORDER OPR */ typedef enum BinOpr { OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, /* ORDER ARITH */ OPR_CONCAT, OPR_NE, OPR_EQ, OPR_LT, OPR_GE, OPR_LE, OPR_GT, OPR_AND, OPR_OR, OPR_NOBINOPR } BinOpr; LJ_STATIC_ASSERT((int)BC_ISGE-(int)BC_ISLT == (int)OPR_GE-(int)OPR_LT); LJ_STATIC_ASSERT((int)BC_ISLE-(int)BC_ISLT == (int)OPR_LE-(int)OPR_LT); LJ_STATIC_ASSERT((int)BC_ISGT-(int)BC_ISLT == (int)OPR_GT-(int)OPR_LT); LJ_STATIC_ASSERT((int)BC_SUBVV-(int)BC_ADDVV == (int)OPR_SUB-(int)OPR_ADD); LJ_STATIC_ASSERT((int)BC_MULVV-(int)BC_ADDVV == (int)OPR_MUL-(int)OPR_ADD); LJ_STATIC_ASSERT((int)BC_DIVVV-(int)BC_ADDVV == (int)OPR_DIV-(int)OPR_ADD); LJ_STATIC_ASSERT((int)BC_MODVV-(int)BC_ADDVV == (int)OPR_MOD-(int)OPR_ADD); /* -- Error handling ------------------------------------------------------ */ LJ_NORET LJ_NOINLINE static void err_syntax(LexState *ls, ErrMsg em) { lj_lex_error(ls, ls->tok, em); } LJ_NORET LJ_NOINLINE static void err_token(LexState *ls, LexToken tok) { lj_lex_error(ls, ls->tok, LJ_ERR_XTOKEN, lj_lex_token2str(ls, tok)); } LJ_NORET static void err_limit(FuncState *fs, uint32_t limit, const char *what) { if (fs->linedefined == 0) lj_lex_error(fs->ls, 0, LJ_ERR_XLIMM, limit, what); else lj_lex_error(fs->ls, 0, LJ_ERR_XLIMF, fs->linedefined, limit, what); } #define checklimit(fs, v, l, m) if ((v) >= (l)) err_limit(fs, l, m) #define checklimitgt(fs, v, l, m) if ((v) > (l)) err_limit(fs, l, m) #define checkcond(ls, c, em) { if (!(c)) err_syntax(ls, em); } /* -- Management of constants --------------------------------------------- */ /* Return bytecode encoding for primitive constant. */ #define const_pri(e) check_exp((e)->k <= VKTRUE, (e)->k) #define tvhaskslot(o) ((o)->u32.hi == 0) #define tvkslot(o) ((o)->u32.lo) /* Add a number constant. */ static BCReg const_num(FuncState *fs, ExpDesc *e) { lua_State *L = fs->L; TValue *o; lua_assert(expr_isnumk(e)); o = lj_tab_set(L, fs->kt, &e->u.nval); if (tvhaskslot(o)) return tvkslot(o); o->u64 = fs->nkn; return fs->nkn++; } /* Add a GC object constant. */ static BCReg const_gc(FuncState *fs, GCobj *gc, uint32_t itype) { lua_State *L = fs->L; TValue key, *o; setgcV(L, &key, gc, itype); /* NOBARRIER: the key is new or kept alive. */ o = lj_tab_set(L, fs->kt, &key); if (tvhaskslot(o)) return tvkslot(o); o->u64 = fs->nkgc; return fs->nkgc++; } /* Add a string constant. */ static BCReg const_str(FuncState *fs, ExpDesc *e) { lua_assert(expr_isstrk(e) || e->k == VGLOBAL); return const_gc(fs, obj2gco(e->u.sval), LJ_TSTR); } /* Anchor string constant to avoid GC. */ GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t len) { /* NOBARRIER: the key is new or kept alive. */ lua_State *L = ls->L; GCstr *s = lj_str_new(L, str, len); TValue *tv = lj_tab_setstr(L, ls->fs->kt, s); if (tvisnil(tv)) setboolV(tv, 1); lj_gc_check(L); return s; } #if LJ_HASFFI /* Anchor cdata to avoid GC. */ void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd) { /* NOBARRIER: the key is new or kept alive. */ lua_State *L = ls->L; setcdataV(L, tv, cd); setboolV(lj_tab_set(L, ls->fs->kt, tv), 1); } #endif /* -- Jump list handling -------------------------------------------------- */ /* Get next element in jump list. */ static BCPos jmp_next(FuncState *fs, BCPos pc) { ptrdiff_t delta = bc_j(fs->bcbase[pc].ins); if ((BCPos)delta == NO_JMP) return NO_JMP; else return (BCPos)(((ptrdiff_t)pc+1)+delta); } /* Check if any of the instructions on the jump list produce no value. */ static int jmp_novalue(FuncState *fs, BCPos list) { for (; list != NO_JMP; list = jmp_next(fs, list)) { BCIns p = fs->bcbase[list >= 1 ? list-1 : list].ins; if (!(bc_op(p) == BC_ISTC || bc_op(p) == BC_ISFC || bc_a(p) == NO_REG)) return 1; } return 0; } /* Patch register of test instructions. */ static int jmp_patchtestreg(FuncState *fs, BCPos pc, BCReg reg) { BCInsLine *ilp = &fs->bcbase[pc >= 1 ? pc-1 : pc]; BCOp op = bc_op(ilp->ins); if (op == BC_ISTC || op == BC_ISFC) { if (reg != NO_REG && reg != bc_d(ilp->ins)) { setbc_a(&ilp->ins, reg); } else { /* Nothing to store or already in the right register. */ setbc_op(&ilp->ins, op+(BC_IST-BC_ISTC)); setbc_a(&ilp->ins, 0); } } else if (bc_a(ilp->ins) == NO_REG) { if (reg == NO_REG) { ilp->ins = BCINS_AJ(BC_JMP, bc_a(fs->bcbase[pc].ins), 0); } else { setbc_a(&ilp->ins, reg); if (reg >= bc_a(ilp[1].ins)) setbc_a(&ilp[1].ins, reg+1); } } else { return 0; /* Cannot patch other instructions. */ } return 1; } /* Drop values for all instructions on jump list. */ static void jmp_dropval(FuncState *fs, BCPos list) { for (; list != NO_JMP; list = jmp_next(fs, list)) jmp_patchtestreg(fs, list, NO_REG); } /* Patch jump instruction to target. */ static void jmp_patchins(FuncState *fs, BCPos pc, BCPos dest) { BCIns *jmp = &fs->bcbase[pc].ins; BCPos offset = dest-(pc+1)+BCBIAS_J; lua_assert(dest != NO_JMP); if (offset > BCMAX_D) err_syntax(fs->ls, LJ_ERR_XJUMP); setbc_d(jmp, offset); } /* Append to jump list. */ static void jmp_append(FuncState *fs, BCPos *l1, BCPos l2) { if (l2 == NO_JMP) { return; } else if (*l1 == NO_JMP) { *l1 = l2; } else { BCPos list = *l1; BCPos next; while ((next = jmp_next(fs, list)) != NO_JMP) /* Find last element. */ list = next; jmp_patchins(fs, list, l2); } } /* Patch jump list and preserve produced values. */ static void jmp_patchval(FuncState *fs, BCPos list, BCPos vtarget, BCReg reg, BCPos dtarget) { while (list != NO_JMP) { BCPos next = jmp_next(fs, list); if (jmp_patchtestreg(fs, list, reg)) jmp_patchins(fs, list, vtarget); /* Jump to target with value. */ else jmp_patchins(fs, list, dtarget); /* Jump to default target. */ list = next; } } /* Jump to following instruction. Append to list of pending jumps. */ static void jmp_tohere(FuncState *fs, BCPos list) { fs->lasttarget = fs->pc; jmp_append(fs, &fs->jpc, list); } /* Patch jump list to target. */ static void jmp_patch(FuncState *fs, BCPos list, BCPos target) { if (target == fs->pc) { jmp_tohere(fs, list); } else { lua_assert(target < fs->pc); jmp_patchval(fs, list, target, NO_REG, target); } } /* -- Bytecode register allocator ----------------------------------------- */ /* Bump frame size. */ static void bcreg_bump(FuncState *fs, BCReg n) { BCReg sz = fs->freereg + n; if (sz > fs->framesize) { if (sz >= LJ_MAX_SLOTS) err_syntax(fs->ls, LJ_ERR_XSLOTS); fs->framesize = (uint8_t)sz; } } /* Reserve registers. */ static void bcreg_reserve(FuncState *fs, BCReg n) { bcreg_bump(fs, n); fs->freereg += n; } /* Free register. */ static void bcreg_free(FuncState *fs, BCReg reg) { if (reg >= fs->nactvar) { fs->freereg--; lua_assert(reg == fs->freereg); } } /* Free register for expression. */ static void expr_free(FuncState *fs, ExpDesc *e) { if (e->k == VNONRELOC) bcreg_free(fs, e->u.s.info); } /* -- Bytecode emitter ---------------------------------------------------- */ /* Emit bytecode instruction. */ static BCPos bcemit_INS(FuncState *fs, BCIns ins) { BCPos pc = fs->pc; LexState *ls = fs->ls; jmp_patchval(fs, fs->jpc, pc, NO_REG, pc); fs->jpc = NO_JMP; if (LJ_UNLIKELY(pc >= fs->bclim)) { ptrdiff_t base = fs->bcbase - ls->bcstack; checklimit(fs, ls->sizebcstack, LJ_MAX_BCINS, "bytecode instructions"); lj_mem_growvec(fs->L, ls->bcstack, ls->sizebcstack, LJ_MAX_BCINS,BCInsLine); fs->bclim = (BCPos)(ls->sizebcstack - base); fs->bcbase = ls->bcstack + base; } fs->bcbase[pc].ins = ins; fs->bcbase[pc].line = ls->lastline; fs->pc = pc+1; return pc; } #define bcemit_ABC(fs, o, a, b, c) bcemit_INS(fs, BCINS_ABC(o, a, b, c)) #define bcemit_AD(fs, o, a, d) bcemit_INS(fs, BCINS_AD(o, a, d)) #define bcemit_AJ(fs, o, a, j) bcemit_INS(fs, BCINS_AJ(o, a, j)) #define bcptr(fs, e) (&(fs)->bcbase[(e)->u.s.info].ins) /* -- Bytecode emitter for expressions ------------------------------------ */ /* Discharge non-constant expression to any register. */ static void expr_discharge(FuncState *fs, ExpDesc *e) { BCIns ins; if (e->k == VUPVAL) { ins = BCINS_AD(BC_UGET, 0, e->u.s.info); } else if (e->k == VGLOBAL) { ins = BCINS_AD(BC_GGET, 0, const_str(fs, e)); } else if (e->k == VINDEXED) { BCReg rc = e->u.s.aux; if ((int32_t)rc < 0) { ins = BCINS_ABC(BC_TGETS, 0, e->u.s.info, ~rc); } else if (rc > BCMAX_C) { ins = BCINS_ABC(BC_TGETB, 0, e->u.s.info, rc-(BCMAX_C+1)); } else { bcreg_free(fs, rc); ins = BCINS_ABC(BC_TGETV, 0, e->u.s.info, rc); } bcreg_free(fs, e->u.s.info); } else if (e->k == VCALL) { e->u.s.info = e->u.s.aux; e->k = VNONRELOC; return; } else if (e->k == VLOCAL) { e->k = VNONRELOC; return; } else { return; } e->u.s.info = bcemit_INS(fs, ins); e->k = VRELOCABLE; } /* Emit bytecode to set a range of registers to nil. */ static void bcemit_nil(FuncState *fs, BCReg from, BCReg n) { if (fs->pc > fs->lasttarget) { /* No jumps to current position? */ BCIns *ip = &fs->bcbase[fs->pc-1].ins; BCReg pto, pfrom = bc_a(*ip); switch (bc_op(*ip)) { /* Try to merge with the previous instruction. */ case BC_KPRI: if (bc_d(*ip) != ~LJ_TNIL) break; if (from == pfrom) { if (n == 1) return; } else if (from == pfrom+1) { from = pfrom; n++; } else { break; } *ip = BCINS_AD(BC_KNIL, from, from+n-1); /* Replace KPRI. */ return; case BC_KNIL: pto = bc_d(*ip); if (pfrom <= from && from <= pto+1) { /* Can we connect both ranges? */ if (from+n-1 > pto) setbc_d(ip, from+n-1); /* Patch previous instruction range. */ return; } break; default: break; } } /* Emit new instruction or replace old instruction. */ bcemit_INS(fs, n == 1 ? BCINS_AD(BC_KPRI, from, VKNIL) : BCINS_AD(BC_KNIL, from, from+n-1)); } /* Discharge an expression to a specific register. Ignore branches. */ static void expr_toreg_nobranch(FuncState *fs, ExpDesc *e, BCReg reg) { BCIns ins; expr_discharge(fs, e); if (e->k == VKSTR) { ins = BCINS_AD(BC_KSTR, reg, const_str(fs, e)); } else if (e->k == VKNUM) { #if LJ_DUALNUM cTValue *tv = expr_numtv(e); if (tvisint(tv) && checki16(intV(tv))) ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)intV(tv)); else #else lua_Number n = expr_numberV(e); int32_t k = lj_num2int(n); if (checki16(k) && n == (lua_Number)k) ins = BCINS_AD(BC_KSHORT, reg, (BCReg)(uint16_t)k); else #endif ins = BCINS_AD(BC_KNUM, reg, const_num(fs, e)); #if LJ_HASFFI } else if (e->k == VKCDATA) { fs->flags |= PROTO_FFI; ins = BCINS_AD(BC_KCDATA, reg, const_gc(fs, obj2gco(cdataV(&e->u.nval)), LJ_TCDATA)); #endif } else if (e->k == VRELOCABLE) { setbc_a(bcptr(fs, e), reg); goto noins; } else if (e->k == VNONRELOC) { if (reg == e->u.s.info) goto noins; ins = BCINS_AD(BC_MOV, reg, e->u.s.info); } else if (e->k == VKNIL) { bcemit_nil(fs, reg, 1); goto noins; } else if (e->k <= VKTRUE) { ins = BCINS_AD(BC_KPRI, reg, const_pri(e)); } else { lua_assert(e->k == VVOID || e->k == VJMP); return; } bcemit_INS(fs, ins); noins: e->u.s.info = reg; e->k = VNONRELOC; } /* Forward declaration. */ static BCPos bcemit_jmp(FuncState *fs); /* Discharge an expression to a specific register. */ static void expr_toreg(FuncState *fs, ExpDesc *e, BCReg reg) { expr_toreg_nobranch(fs, e, reg); if (e->k == VJMP) jmp_append(fs, &e->t, e->u.s.info); /* Add it to the true jump list. */ if (expr_hasjump(e)) { /* Discharge expression with branches. */ BCPos jend, jfalse = NO_JMP, jtrue = NO_JMP; if (jmp_novalue(fs, e->t) || jmp_novalue(fs, e->f)) { BCPos jval = (e->k == VJMP) ? NO_JMP : bcemit_jmp(fs); jfalse = bcemit_AD(fs, BC_KPRI, reg, VKFALSE); bcemit_AJ(fs, BC_JMP, fs->freereg, 1); jtrue = bcemit_AD(fs, BC_KPRI, reg, VKTRUE); jmp_tohere(fs, jval); } jend = fs->pc; fs->lasttarget = jend; jmp_patchval(fs, e->f, jend, reg, jfalse); jmp_patchval(fs, e->t, jend, reg, jtrue); } e->f = e->t = NO_JMP; e->u.s.info = reg; e->k = VNONRELOC; } /* Discharge an expression to the next free register. */ static void expr_tonextreg(FuncState *fs, ExpDesc *e) { expr_discharge(fs, e); expr_free(fs, e); bcreg_reserve(fs, 1); expr_toreg(fs, e, fs->freereg - 1); } /* Discharge an expression to any register. */ static BCReg expr_toanyreg(FuncState *fs, ExpDesc *e) { expr_discharge(fs, e); if (e->k == VNONRELOC) { if (!expr_hasjump(e)) return e->u.s.info; /* Already in a register. */ if (e->u.s.info >= fs->nactvar) { expr_toreg(fs, e, e->u.s.info); /* Discharge to temp. register. */ return e->u.s.info; } } expr_tonextreg(fs, e); /* Discharge to next register. */ return e->u.s.info; } /* Partially discharge expression to a value. */ static void expr_toval(FuncState *fs, ExpDesc *e) { if (expr_hasjump(e)) expr_toanyreg(fs, e); else expr_discharge(fs, e); } /* Emit store for LHS expression. */ static void bcemit_store(FuncState *fs, ExpDesc *var, ExpDesc *e) { BCIns ins; if (var->k == VLOCAL) { fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW; expr_free(fs, e); expr_toreg(fs, e, var->u.s.info); return; } else if (var->k == VUPVAL) { fs->ls->vstack[var->u.s.aux].info |= VSTACK_VAR_RW; expr_toval(fs, e); if (e->k <= VKTRUE) ins = BCINS_AD(BC_USETP, var->u.s.info, const_pri(e)); else if (e->k == VKSTR) ins = BCINS_AD(BC_USETS, var->u.s.info, const_str(fs, e)); else if (e->k == VKNUM) ins = BCINS_AD(BC_USETN, var->u.s.info, const_num(fs, e)); else ins = BCINS_AD(BC_USETV, var->u.s.info, expr_toanyreg(fs, e)); } else if (var->k == VGLOBAL) { BCReg ra = expr_toanyreg(fs, e); ins = BCINS_AD(BC_GSET, ra, const_str(fs, var)); } else { BCReg ra, rc; lua_assert(var->k == VINDEXED); ra = expr_toanyreg(fs, e); rc = var->u.s.aux; if ((int32_t)rc < 0) { ins = BCINS_ABC(BC_TSETS, ra, var->u.s.info, ~rc); } else if (rc > BCMAX_C) { ins = BCINS_ABC(BC_TSETB, ra, var->u.s.info, rc-(BCMAX_C+1)); } else { /* Free late alloced key reg to avoid assert on free of value reg. */ /* This can only happen when called from expr_table(). */ lua_assert(e->k != VNONRELOC || ra < fs->nactvar || rc < ra || (bcreg_free(fs, rc),1)); ins = BCINS_ABC(BC_TSETV, ra, var->u.s.info, rc); } } bcemit_INS(fs, ins); expr_free(fs, e); } /* Emit method lookup expression. */ static void bcemit_method(FuncState *fs, ExpDesc *e, ExpDesc *key) { BCReg idx, func, obj = expr_toanyreg(fs, e); expr_free(fs, e); func = fs->freereg; bcemit_AD(fs, BC_MOV, func+1+LJ_FR2, obj); /* Copy object to 1st argument. */ lua_assert(expr_isstrk(key)); idx = const_str(fs, key); if (idx <= BCMAX_C) { bcreg_reserve(fs, 2+LJ_FR2); bcemit_ABC(fs, BC_TGETS, func, obj, idx); } else { bcreg_reserve(fs, 3+LJ_FR2); bcemit_AD(fs, BC_KSTR, func+2+LJ_FR2, idx); bcemit_ABC(fs, BC_TGETV, func, obj, func+2+LJ_FR2); fs->freereg--; } e->u.s.info = func; e->k = VNONRELOC; } /* -- Bytecode emitter for branches --------------------------------------- */ /* Emit unconditional branch. */ static BCPos bcemit_jmp(FuncState *fs) { BCPos jpc = fs->jpc; BCPos j = fs->pc - 1; BCIns *ip = &fs->bcbase[j].ins; fs->jpc = NO_JMP; if ((int32_t)j >= (int32_t)fs->lasttarget && bc_op(*ip) == BC_UCLO) { setbc_j(ip, NO_JMP); fs->lasttarget = j+1; } else { j = bcemit_AJ(fs, BC_JMP, fs->freereg, NO_JMP); } jmp_append(fs, &j, jpc); return j; } /* Invert branch condition of bytecode instruction. */ static void invertcond(FuncState *fs, ExpDesc *e) { BCIns *ip = &fs->bcbase[e->u.s.info - 1].ins; setbc_op(ip, bc_op(*ip)^1); } /* Emit conditional branch. */ static BCPos bcemit_branch(FuncState *fs, ExpDesc *e, int cond) { BCPos pc; if (e->k == VRELOCABLE) { BCIns *ip = bcptr(fs, e); if (bc_op(*ip) == BC_NOT) { *ip = BCINS_AD(cond ? BC_ISF : BC_IST, 0, bc_d(*ip)); return bcemit_jmp(fs); } } if (e->k != VNONRELOC) { bcreg_reserve(fs, 1); expr_toreg_nobranch(fs, e, fs->freereg-1); } bcemit_AD(fs, cond ? BC_ISTC : BC_ISFC, NO_REG, e->u.s.info); pc = bcemit_jmp(fs); expr_free(fs, e); return pc; } /* Emit branch on true condition. */ static void bcemit_branch_t(FuncState *fs, ExpDesc *e) { BCPos pc; expr_discharge(fs, e); if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE) pc = NO_JMP; /* Never jump. */ else if (e->k == VJMP) invertcond(fs, e), pc = e->u.s.info; else if (e->k == VKFALSE || e->k == VKNIL) expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs); else pc = bcemit_branch(fs, e, 0); jmp_append(fs, &e->f, pc); jmp_tohere(fs, e->t); e->t = NO_JMP; } /* Emit branch on false condition. */ static void bcemit_branch_f(FuncState *fs, ExpDesc *e) { BCPos pc; expr_discharge(fs, e); if (e->k == VKNIL || e->k == VKFALSE) pc = NO_JMP; /* Never jump. */ else if (e->k == VJMP) pc = e->u.s.info; else if (e->k == VKSTR || e->k == VKNUM || e->k == VKTRUE) expr_toreg_nobranch(fs, e, NO_REG), pc = bcemit_jmp(fs); else pc = bcemit_branch(fs, e, 1); jmp_append(fs, &e->t, pc); jmp_tohere(fs, e->f); e->f = NO_JMP; } /* -- Bytecode emitter for operators -------------------------------------- */ /* Try constant-folding of arithmetic operators. */ static int foldarith(BinOpr opr, ExpDesc *e1, ExpDesc *e2) { TValue o; lua_Number n; if (!expr_isnumk_nojump(e1) || !expr_isnumk_nojump(e2)) return 0; n = lj_vm_foldarith(expr_numberV(e1), expr_numberV(e2), (int)opr-OPR_ADD); setnumV(&o, n); if (tvisnan(&o) || tvismzero(&o)) return 0; /* Avoid NaN and -0 as consts. */ if (LJ_DUALNUM) { int32_t k = lj_num2int(n); if ((lua_Number)k == n) { setintV(&e1->u.nval, k); return 1; } } setnumV(&e1->u.nval, n); return 1; } /* Emit arithmetic operator. */ static void bcemit_arith(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2) { BCReg rb, rc, t; uint32_t op; if (foldarith(opr, e1, e2)) return; if (opr == OPR_POW) { op = BC_POW; rc = expr_toanyreg(fs, e2); rb = expr_toanyreg(fs, e1); } else { op = opr-OPR_ADD+BC_ADDVV; /* Must discharge 2nd operand first since VINDEXED might free regs. */ expr_toval(fs, e2); if (expr_isnumk(e2) && (rc = const_num(fs, e2)) <= BCMAX_C) op -= BC_ADDVV-BC_ADDVN; else rc = expr_toanyreg(fs, e2); /* 1st operand discharged by bcemit_binop_left, but need KNUM/KSHORT. */ lua_assert(expr_isnumk(e1) || e1->k == VNONRELOC); expr_toval(fs, e1); /* Avoid two consts to satisfy bytecode constraints. */ if (expr_isnumk(e1) && !expr_isnumk(e2) && (t = const_num(fs, e1)) <= BCMAX_B) { rb = rc; rc = t; op -= BC_ADDVV-BC_ADDNV; } else { rb = expr_toanyreg(fs, e1); } } /* Using expr_free might cause asserts if the order is wrong. */ if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--; if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--; e1->u.s.info = bcemit_ABC(fs, op, 0, rb, rc); e1->k = VRELOCABLE; } /* Emit comparison operator. */ static void bcemit_comp(FuncState *fs, BinOpr opr, ExpDesc *e1, ExpDesc *e2) { ExpDesc *eret = e1; BCIns ins; expr_toval(fs, e1); if (opr == OPR_EQ || opr == OPR_NE) { BCOp op = opr == OPR_EQ ? BC_ISEQV : BC_ISNEV; BCReg ra; if (expr_isk(e1)) { e1 = e2; e2 = eret; } /* Need constant in 2nd arg. */ ra = expr_toanyreg(fs, e1); /* First arg must be in a reg. */ expr_toval(fs, e2); switch (e2->k) { case VKNIL: case VKFALSE: case VKTRUE: ins = BCINS_AD(op+(BC_ISEQP-BC_ISEQV), ra, const_pri(e2)); break; case VKSTR: ins = BCINS_AD(op+(BC_ISEQS-BC_ISEQV), ra, const_str(fs, e2)); break; case VKNUM: ins = BCINS_AD(op+(BC_ISEQN-BC_ISEQV), ra, const_num(fs, e2)); break; default: ins = BCINS_AD(op, ra, expr_toanyreg(fs, e2)); break; } } else { uint32_t op = opr-OPR_LT+BC_ISLT; BCReg ra, rd; if ((op-BC_ISLT) & 1) { /* GT -> LT, GE -> LE */ e1 = e2; e2 = eret; /* Swap operands. */ op = ((op-BC_ISLT)^3)+BC_ISLT; expr_toval(fs, e1); } rd = expr_toanyreg(fs, e2); ra = expr_toanyreg(fs, e1); ins = BCINS_AD(op, ra, rd); } /* Using expr_free might cause asserts if the order is wrong. */ if (e1->k == VNONRELOC && e1->u.s.info >= fs->nactvar) fs->freereg--; if (e2->k == VNONRELOC && e2->u.s.info >= fs->nactvar) fs->freereg--; bcemit_INS(fs, ins); eret->u.s.info = bcemit_jmp(fs); eret->k = VJMP; } /* Fixup left side of binary operator. */ static void bcemit_binop_left(FuncState *fs, BinOpr op, ExpDesc *e) { if (op == OPR_AND) { bcemit_branch_t(fs, e); } else if (op == OPR_OR) { bcemit_branch_f(fs, e); } else if (op == OPR_CONCAT) { expr_tonextreg(fs, e); } else if (op == OPR_EQ || op == OPR_NE) { if (!expr_isk_nojump(e)) expr_toanyreg(fs, e); } else { if (!expr_isnumk_nojump(e)) expr_toanyreg(fs, e); } } /* Emit binary operator. */ static void bcemit_binop(FuncState *fs, BinOpr op, ExpDesc *e1, ExpDesc *e2) { if (op <= OPR_POW) { bcemit_arith(fs, op, e1, e2); } else if (op == OPR_AND) { lua_assert(e1->t == NO_JMP); /* List must be closed. */ expr_discharge(fs, e2); jmp_append(fs, &e2->f, e1->f); *e1 = *e2; } else if (op == OPR_OR) { lua_assert(e1->f == NO_JMP); /* List must be closed. */ expr_discharge(fs, e2); jmp_append(fs, &e2->t, e1->t); *e1 = *e2; } else if (op == OPR_CONCAT) { expr_toval(fs, e2); if (e2->k == VRELOCABLE && bc_op(*bcptr(fs, e2)) == BC_CAT) { lua_assert(e1->u.s.info == bc_b(*bcptr(fs, e2))-1); expr_free(fs, e1); setbc_b(bcptr(fs, e2), e1->u.s.info); e1->u.s.info = e2->u.s.info; } else { expr_tonextreg(fs, e2); expr_free(fs, e2); expr_free(fs, e1); e1->u.s.info = bcemit_ABC(fs, BC_CAT, 0, e1->u.s.info, e2->u.s.info); } e1->k = VRELOCABLE; } else { lua_assert(op == OPR_NE || op == OPR_EQ || op == OPR_LT || op == OPR_GE || op == OPR_LE || op == OPR_GT); bcemit_comp(fs, op, e1, e2); } } /* Emit unary operator. */ static void bcemit_unop(FuncState *fs, BCOp op, ExpDesc *e) { if (op == BC_NOT) { /* Swap true and false lists. */ { BCPos temp = e->f; e->f = e->t; e->t = temp; } jmp_dropval(fs, e->f); jmp_dropval(fs, e->t); expr_discharge(fs, e); if (e->k == VKNIL || e->k == VKFALSE) { e->k = VKTRUE; return; } else if (expr_isk(e) || (LJ_HASFFI && e->k == VKCDATA)) { e->k = VKFALSE; return; } else if (e->k == VJMP) { invertcond(fs, e); return; } else if (e->k == VRELOCABLE) { bcreg_reserve(fs, 1); setbc_a(bcptr(fs, e), fs->freereg-1); e->u.s.info = fs->freereg-1; e->k = VNONRELOC; } else { lua_assert(e->k == VNONRELOC); } } else { lua_assert(op == BC_UNM || op == BC_LEN); if (op == BC_UNM && !expr_hasjump(e)) { /* Constant-fold negations. */ #if LJ_HASFFI if (e->k == VKCDATA) { /* Fold in-place since cdata is not interned. */ GCcdata *cd = cdataV(&e->u.nval); int64_t *p = (int64_t *)cdataptr(cd); if (cd->ctypeid == CTID_COMPLEX_DOUBLE) p[1] ^= (int64_t)U64x(80000000,00000000); else *p = -*p; return; } else #endif if (expr_isnumk(e) && !expr_numiszero(e)) { /* Avoid folding to -0. */ TValue *o = expr_numtv(e); if (tvisint(o)) { int32_t k = intV(o); if (k == -k) setnumV(o, -(lua_Number)k); else setintV(o, -k); return; } else { o->u64 ^= U64x(80000000,00000000); return; } } } expr_toanyreg(fs, e); } expr_free(fs, e); e->u.s.info = bcemit_AD(fs, op, 0, e->u.s.info); e->k = VRELOCABLE; } /* -- Lexer support ------------------------------------------------------- */ /* Check and consume optional token. */ static int lex_opt(LexState *ls, LexToken tok) { if (ls->tok == tok) { lj_lex_next(ls); return 1; } return 0; } /* Check and consume token. */ static void lex_check(LexState *ls, LexToken tok) { if (ls->tok != tok) err_token(ls, tok); lj_lex_next(ls); } /* Check for matching token. */ static void lex_match(LexState *ls, LexToken what, LexToken who, BCLine line) { if (!lex_opt(ls, what)) { if (line == ls->linenumber) { err_token(ls, what); } else { const char *swhat = lj_lex_token2str(ls, what); const char *swho = lj_lex_token2str(ls, who); lj_lex_error(ls, ls->tok, LJ_ERR_XMATCH, swhat, swho, line); } } } /* Check for string token. */ static GCstr *lex_str(LexState *ls) { GCstr *s; if (ls->tok != TK_name && (LJ_52 || ls->tok != TK_goto)) err_token(ls, TK_name); s = strV(&ls->tokval); lj_lex_next(ls); return s; } /* -- Variable handling --------------------------------------------------- */ #define var_get(ls, fs, i) ((ls)->vstack[(fs)->varmap[(i)]]) /* Define a new local variable. */ static void var_new(LexState *ls, BCReg n, GCstr *name) { FuncState *fs = ls->fs; MSize vtop = ls->vtop; checklimit(fs, fs->nactvar+n, LJ_MAX_LOCVAR, "local variables"); if (LJ_UNLIKELY(vtop >= ls->sizevstack)) { if (ls->sizevstack >= LJ_MAX_VSTACK) lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); } lua_assert((uintptr_t)name < VARNAME__MAX || lj_tab_getstr(fs->kt, name) != NULL); /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ setgcref(ls->vstack[vtop].name, obj2gco(name)); fs->varmap[fs->nactvar+n] = (uint16_t)vtop; ls->vtop = vtop+1; } #define var_new_lit(ls, n, v) \ var_new(ls, (n), lj_parse_keepstr(ls, "" v, sizeof(v)-1)) #define var_new_fixed(ls, n, vn) \ var_new(ls, (n), (GCstr *)(uintptr_t)(vn)) /* Add local variables. */ static void var_add(LexState *ls, BCReg nvars) { FuncState *fs = ls->fs; BCReg nactvar = fs->nactvar; while (nvars--) { VarInfo *v = &var_get(ls, fs, nactvar); v->startpc = fs->pc; v->slot = nactvar++; v->info = 0; } fs->nactvar = nactvar; } /* Remove local variables. */ static void var_remove(LexState *ls, BCReg tolevel) { FuncState *fs = ls->fs; while (fs->nactvar > tolevel) var_get(ls, fs, --fs->nactvar).endpc = fs->pc; } /* Lookup local variable name. */ static BCReg var_lookup_local(FuncState *fs, GCstr *n) { int i; for (i = fs->nactvar-1; i >= 0; i--) { if (n == strref(var_get(fs->ls, fs, i).name)) return (BCReg)i; } return (BCReg)-1; /* Not found. */ } /* Lookup or add upvalue index. */ static MSize var_lookup_uv(FuncState *fs, MSize vidx, ExpDesc *e) { MSize i, n = fs->nuv; for (i = 0; i < n; i++) if (fs->uvmap[i] == vidx) return i; /* Already exists. */ /* Otherwise create a new one. */ checklimit(fs, fs->nuv, LJ_MAX_UPVAL, "upvalues"); lua_assert(e->k == VLOCAL || e->k == VUPVAL); fs->uvmap[n] = (uint16_t)vidx; fs->uvtmp[n] = (uint16_t)(e->k == VLOCAL ? vidx : LJ_MAX_VSTACK+e->u.s.info); fs->nuv = n+1; return n; } /* Forward declaration. */ static void fscope_uvmark(FuncState *fs, BCReg level); /* Recursively lookup variables in enclosing functions. */ static MSize var_lookup_(FuncState *fs, GCstr *name, ExpDesc *e, int first) { if (fs) { BCReg reg = var_lookup_local(fs, name); if ((int32_t)reg >= 0) { /* Local in this function? */ expr_init(e, VLOCAL, reg); if (!first) fscope_uvmark(fs, reg); /* Scope now has an upvalue. */ return (MSize)(e->u.s.aux = (uint32_t)fs->varmap[reg]); } else { MSize vidx = var_lookup_(fs->prev, name, e, 0); /* Var in outer func? */ if ((int32_t)vidx >= 0) { /* Yes, make it an upvalue here. */ e->u.s.info = (uint8_t)var_lookup_uv(fs, vidx, e); e->k = VUPVAL; return vidx; } } } else { /* Not found in any function, must be a global. */ expr_init(e, VGLOBAL, 0); e->u.sval = name; } return (MSize)-1; /* Global. */ } /* Lookup variable name. */ #define var_lookup(ls, e) \ var_lookup_((ls)->fs, lex_str(ls), (e), 1) /* -- Goto an label handling ---------------------------------------------- */ /* Add a new goto or label. */ static MSize gola_new(LexState *ls, GCstr *name, uint8_t info, BCPos pc) { FuncState *fs = ls->fs; MSize vtop = ls->vtop; if (LJ_UNLIKELY(vtop >= ls->sizevstack)) { if (ls->sizevstack >= LJ_MAX_VSTACK) lj_lex_error(ls, 0, LJ_ERR_XLIMC, LJ_MAX_VSTACK); lj_mem_growvec(ls->L, ls->vstack, ls->sizevstack, LJ_MAX_VSTACK, VarInfo); } lua_assert(name == NAME_BREAK || lj_tab_getstr(fs->kt, name) != NULL); /* NOBARRIER: name is anchored in fs->kt and ls->vstack is not a GCobj. */ setgcref(ls->vstack[vtop].name, obj2gco(name)); ls->vstack[vtop].startpc = pc; ls->vstack[vtop].slot = (uint8_t)fs->nactvar; ls->vstack[vtop].info = info; ls->vtop = vtop+1; return vtop; } #define gola_isgoto(v) ((v)->info & VSTACK_GOTO) #define gola_islabel(v) ((v)->info & VSTACK_LABEL) #define gola_isgotolabel(v) ((v)->info & (VSTACK_GOTO|VSTACK_LABEL)) /* Patch goto to jump to label. */ static void gola_patch(LexState *ls, VarInfo *vg, VarInfo *vl) { FuncState *fs = ls->fs; BCPos pc = vg->startpc; setgcrefnull(vg->name); /* Invalidate pending goto. */ setbc_a(&fs->bcbase[pc].ins, vl->slot); jmp_patch(fs, pc, vl->startpc); } /* Patch goto to close upvalues. */ static void gola_close(LexState *ls, VarInfo *vg) { FuncState *fs = ls->fs; BCPos pc = vg->startpc; BCIns *ip = &fs->bcbase[pc].ins; lua_assert(gola_isgoto(vg)); lua_assert(bc_op(*ip) == BC_JMP || bc_op(*ip) == BC_UCLO); setbc_a(ip, vg->slot); if (bc_op(*ip) == BC_JMP) { BCPos next = jmp_next(fs, pc); if (next != NO_JMP) jmp_patch(fs, next, pc); /* Jump to UCLO. */ setbc_op(ip, BC_UCLO); /* Turn into UCLO. */ setbc_j(ip, NO_JMP); } } /* Resolve pending forward gotos for label. */ static void gola_resolve(LexState *ls, FuncScope *bl, MSize idx) { VarInfo *vg = ls->vstack + bl->vstart; VarInfo *vl = ls->vstack + idx; for (; vg < vl; vg++) if (gcrefeq(vg->name, vl->name) && gola_isgoto(vg)) { if (vg->slot < vl->slot) { GCstr *name = strref(var_get(ls, ls->fs, vg->slot).name); lua_assert((uintptr_t)name >= VARNAME__MAX); ls->linenumber = ls->fs->bcbase[vg->startpc].line; lua_assert(strref(vg->name) != NAME_BREAK); lj_lex_error(ls, 0, LJ_ERR_XGSCOPE, strdata(strref(vg->name)), strdata(name)); } gola_patch(ls, vg, vl); } } /* Fixup remaining gotos and labels for scope. */ static void gola_fixup(LexState *ls, FuncScope *bl) { VarInfo *v = ls->vstack + bl->vstart; VarInfo *ve = ls->vstack + ls->vtop; for (; v < ve; v++) { GCstr *name = strref(v->name); if (name != NULL) { /* Only consider remaining valid gotos/labels. */ if (gola_islabel(v)) { VarInfo *vg; setgcrefnull(v->name); /* Invalidate label that goes out of scope. */ for (vg = v+1; vg < ve; vg++) /* Resolve pending backward gotos. */ if (strref(vg->name) == name && gola_isgoto(vg)) { if ((bl->flags&FSCOPE_UPVAL) && vg->slot > v->slot) gola_close(ls, vg); gola_patch(ls, vg, v); } } else if (gola_isgoto(v)) { if (bl->prev) { /* Propagate goto or break to outer scope. */ bl->prev->flags |= name == NAME_BREAK ? FSCOPE_BREAK : FSCOPE_GOLA; v->slot = bl->nactvar; if ((bl->flags & FSCOPE_UPVAL)) gola_close(ls, v); } else { /* No outer scope: undefined goto label or no loop. */ ls->linenumber = ls->fs->bcbase[v->startpc].line; if (name == NAME_BREAK) lj_lex_error(ls, 0, LJ_ERR_XBREAK); else lj_lex_error(ls, 0, LJ_ERR_XLUNDEF, strdata(name)); } } } } } /* Find existing label. */ static VarInfo *gola_findlabel(LexState *ls, GCstr *name) { VarInfo *v = ls->vstack + ls->fs->bl->vstart; VarInfo *ve = ls->vstack + ls->vtop; for (; v < ve; v++) if (strref(v->name) == name && gola_islabel(v)) return v; return NULL; } /* -- Scope handling ------------------------------------------------------ */ /* Begin a scope. */ static void fscope_begin(FuncState *fs, FuncScope *bl, int flags) { bl->nactvar = (uint8_t)fs->nactvar; bl->flags = flags; bl->vstart = fs->ls->vtop; bl->prev = fs->bl; fs->bl = bl; lua_assert(fs->freereg == fs->nactvar); } /* End a scope. */ static void fscope_end(FuncState *fs) { FuncScope *bl = fs->bl; LexState *ls = fs->ls; fs->bl = bl->prev; var_remove(ls, bl->nactvar); fs->freereg = fs->nactvar; lua_assert(bl->nactvar == fs->nactvar); if ((bl->flags & (FSCOPE_UPVAL|FSCOPE_NOCLOSE)) == FSCOPE_UPVAL) bcemit_AJ(fs, BC_UCLO, bl->nactvar, 0); if ((bl->flags & FSCOPE_BREAK)) { if ((bl->flags & FSCOPE_LOOP)) { MSize idx = gola_new(ls, NAME_BREAK, VSTACK_LABEL, fs->pc); ls->vtop = idx; /* Drop break label immediately. */ gola_resolve(ls, bl, idx); } else { /* Need the fixup step to propagate the breaks. */ gola_fixup(ls, bl); return; } } if ((bl->flags & FSCOPE_GOLA)) { gola_fixup(ls, bl); } } /* Mark scope as having an upvalue. */ static void fscope_uvmark(FuncState *fs, BCReg level) { FuncScope *bl; for (bl = fs->bl; bl && bl->nactvar > level; bl = bl->prev) ; if (bl) bl->flags |= FSCOPE_UPVAL; } /* -- Function state management ------------------------------------------- */ /* Fixup bytecode for prototype. */ static void fs_fixup_bc(FuncState *fs, GCproto *pt, BCIns *bc, MSize n) { BCInsLine *base = fs->bcbase; MSize i; pt->sizebc = n; bc[0] = BCINS_AD((fs->flags & PROTO_VARARG) ? BC_FUNCV : BC_FUNCF, fs->framesize, 0); for (i = 1; i < n; i++) bc[i] = base[i].ins; } /* Fixup upvalues for child prototype, step #2. */ static void fs_fixup_uv2(FuncState *fs, GCproto *pt) { VarInfo *vstack = fs->ls->vstack; uint16_t *uv = proto_uv(pt); MSize i, n = pt->sizeuv; for (i = 0; i < n; i++) { VarIndex vidx = uv[i]; if (vidx >= LJ_MAX_VSTACK) uv[i] = vidx - LJ_MAX_VSTACK; else if ((vstack[vidx].info & VSTACK_VAR_RW)) uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL; else uv[i] = vstack[vidx].slot | PROTO_UV_LOCAL | PROTO_UV_IMMUTABLE; } } /* Fixup constants for prototype. */ static void fs_fixup_k(FuncState *fs, GCproto *pt, void *kptr) { GCtab *kt; TValue *array; Node *node; MSize i, hmask; checklimitgt(fs, fs->nkn, BCMAX_D+1, "constants"); checklimitgt(fs, fs->nkgc, BCMAX_D+1, "constants"); setmref(pt->k, kptr); pt->sizekn = fs->nkn; pt->sizekgc = fs->nkgc; kt = fs->kt; array = tvref(kt->array); for (i = 0; i < kt->asize; i++) if (tvhaskslot(&array[i])) { TValue *tv = &((TValue *)kptr)[tvkslot(&array[i])]; if (LJ_DUALNUM) setintV(tv, (int32_t)i); else setnumV(tv, (lua_Number)i); } node = noderef(kt->node); hmask = kt->hmask; for (i = 0; i <= hmask; i++) { Node *n = &node[i]; if (tvhaskslot(&n->val)) { ptrdiff_t kidx = (ptrdiff_t)tvkslot(&n->val); lua_assert(!tvisint(&n->key)); if (tvisnum(&n->key)) { TValue *tv = &((TValue *)kptr)[kidx]; if (LJ_DUALNUM) { lua_Number nn = numV(&n->key); int32_t k = lj_num2int(nn); lua_assert(!tvismzero(&n->key)); if ((lua_Number)k == nn) setintV(tv, k); else *tv = n->key; } else { *tv = n->key; } } else { GCobj *o = gcV(&n->key); setgcref(((GCRef *)kptr)[~kidx], o); lj_gc_objbarrier(fs->L, pt, o); if (tvisproto(&n->key)) fs_fixup_uv2(fs, gco2pt(o)); } } } } /* Fixup upvalues for prototype, step #1. */ static void fs_fixup_uv1(FuncState *fs, GCproto *pt, uint16_t *uv) { setmref(pt->uv, uv); pt->sizeuv = fs->nuv; memcpy(uv, fs->uvtmp, fs->nuv*sizeof(VarIndex)); } #ifndef LUAJIT_DISABLE_DEBUGINFO /* Prepare lineinfo for prototype. */ static size_t fs_prep_line(FuncState *fs, BCLine numline) { return (fs->pc-1) << (numline < 256 ? 0 : numline < 65536 ? 1 : 2); } /* Fixup lineinfo for prototype. */ static void fs_fixup_line(FuncState *fs, GCproto *pt, void *lineinfo, BCLine numline) { BCInsLine *base = fs->bcbase + 1; BCLine first = fs->linedefined; MSize i = 0, n = fs->pc-1; pt->firstline = fs->linedefined; pt->numline = numline; setmref(pt->lineinfo, lineinfo); if (LJ_LIKELY(numline < 256)) { uint8_t *li = (uint8_t *)lineinfo; do { BCLine delta = base[i].line - first; lua_assert(delta >= 0 && delta < 256); li[i] = (uint8_t)delta; } while (++i < n); } else if (LJ_LIKELY(numline < 65536)) { uint16_t *li = (uint16_t *)lineinfo; do { BCLine delta = base[i].line - first; lua_assert(delta >= 0 && delta < 65536); li[i] = (uint16_t)delta; } while (++i < n); } else { uint32_t *li = (uint32_t *)lineinfo; do { BCLine delta = base[i].line - first; lua_assert(delta >= 0); li[i] = (uint32_t)delta; } while (++i < n); } } /* Prepare variable info for prototype. */ static size_t fs_prep_var(LexState *ls, FuncState *fs, size_t *ofsvar) { VarInfo *vs =ls->vstack, *ve; MSize i, n; BCPos lastpc; lj_buf_reset(&ls->sb); /* Copy to temp. string buffer. */ /* Store upvalue names. */ for (i = 0, n = fs->nuv; i < n; i++) { GCstr *s = strref(vs[fs->uvmap[i]].name); MSize len = s->len+1; char *p = lj_buf_more(&ls->sb, len); p = lj_buf_wmem(p, strdata(s), len); setsbufP(&ls->sb, p); } *ofsvar = sbuflen(&ls->sb); lastpc = 0; /* Store local variable names and compressed ranges. */ for (ve = vs + ls->vtop, vs += fs->vbase; vs < ve; vs++) { if (!gola_isgotolabel(vs)) { GCstr *s = strref(vs->name); BCPos startpc; char *p; if ((uintptr_t)s < VARNAME__MAX) { p = lj_buf_more(&ls->sb, 1 + 2*5); *p++ = (char)(uintptr_t)s; } else { MSize len = s->len+1; p = lj_buf_more(&ls->sb, len + 2*5); p = lj_buf_wmem(p, strdata(s), len); } startpc = vs->startpc; p = lj_strfmt_wuleb128(p, startpc-lastpc); p = lj_strfmt_wuleb128(p, vs->endpc-startpc); setsbufP(&ls->sb, p); lastpc = startpc; } } lj_buf_putb(&ls->sb, '\0'); /* Terminator for varinfo. */ return sbuflen(&ls->sb); } /* Fixup variable info for prototype. */ static void fs_fixup_var(LexState *ls, GCproto *pt, uint8_t *p, size_t ofsvar) { setmref(pt->uvinfo, p); setmref(pt->varinfo, (char *)p + ofsvar); memcpy(p, sbufB(&ls->sb), sbuflen(&ls->sb)); /* Copy from temp. buffer. */ } #else /* Initialize with empty debug info, if disabled. */ #define fs_prep_line(fs, numline) (UNUSED(numline), 0) #define fs_fixup_line(fs, pt, li, numline) \ pt->firstline = pt->numline = 0, setmref((pt)->lineinfo, NULL) #define fs_prep_var(ls, fs, ofsvar) (UNUSED(ofsvar), 0) #define fs_fixup_var(ls, pt, p, ofsvar) \ setmref((pt)->uvinfo, NULL), setmref((pt)->varinfo, NULL) #endif /* Check if bytecode op returns. */ static int bcopisret(BCOp op) { switch (op) { case BC_CALLMT: case BC_CALLT: case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1: return 1; default: return 0; } } /* Fixup return instruction for prototype. */ static void fs_fixup_ret(FuncState *fs) { BCPos lastpc = fs->pc; if (lastpc <= fs->lasttarget || !bcopisret(bc_op(fs->bcbase[lastpc-1].ins))) { if ((fs->bl->flags & FSCOPE_UPVAL)) bcemit_AJ(fs, BC_UCLO, 0, 0); bcemit_AD(fs, BC_RET0, 0, 1); /* Need final return. */ } fs->bl->flags |= FSCOPE_NOCLOSE; /* Handled above. */ fscope_end(fs); lua_assert(fs->bl == NULL); /* May need to fixup returns encoded before first function was created. */ if (fs->flags & PROTO_FIXUP_RETURN) { BCPos pc; for (pc = 1; pc < lastpc; pc++) { BCIns ins = fs->bcbase[pc].ins; BCPos offset; switch (bc_op(ins)) { case BC_CALLMT: case BC_CALLT: case BC_RETM: case BC_RET: case BC_RET0: case BC_RET1: offset = bcemit_INS(fs, ins); /* Copy original instruction. */ fs->bcbase[offset].line = fs->bcbase[pc].line; offset = offset-(pc+1)+BCBIAS_J; if (offset > BCMAX_D) err_syntax(fs->ls, LJ_ERR_XFIXUP); /* Replace with UCLO plus branch. */ fs->bcbase[pc].ins = BCINS_AD(BC_UCLO, 0, offset); break; case BC_UCLO: return; /* We're done. */ default: break; } } } } /* Finish a FuncState and return the new prototype. */ static GCproto *fs_finish(LexState *ls, BCLine line) { lua_State *L = ls->L; FuncState *fs = ls->fs; BCLine numline = line - fs->linedefined; size_t sizept, ofsk, ofsuv, ofsli, ofsdbg, ofsvar; GCproto *pt; /* Apply final fixups. */ fs_fixup_ret(fs); /* Calculate total size of prototype including all colocated arrays. */ sizept = sizeof(GCproto) + fs->pc*sizeof(BCIns) + fs->nkgc*sizeof(GCRef); sizept = (sizept + sizeof(TValue)-1) & ~(sizeof(TValue)-1); ofsk = sizept; sizept += fs->nkn*sizeof(TValue); ofsuv = sizept; sizept += ((fs->nuv+1)&~1)*2; ofsli = sizept; sizept += fs_prep_line(fs, numline); ofsdbg = sizept; sizept += fs_prep_var(ls, fs, &ofsvar); /* Allocate prototype and initialize its fields. */ pt = (GCproto *)lj_mem_newgco(L, (MSize)sizept); pt->gct = ~LJ_TPROTO; pt->sizept = (MSize)sizept; pt->trace = 0; pt->flags = (uint8_t)(fs->flags & ~(PROTO_HAS_RETURN|PROTO_FIXUP_RETURN)); pt->numparams = fs->numparams; pt->framesize = fs->framesize; setgcref(pt->chunkname, obj2gco(ls->chunkname)); /* Close potentially uninitialized gap between bc and kgc. */ *(uint32_t *)((char *)pt + ofsk - sizeof(GCRef)*(fs->nkgc+1)) = 0; fs_fixup_bc(fs, pt, (BCIns *)((char *)pt + sizeof(GCproto)), fs->pc); fs_fixup_k(fs, pt, (void *)((char *)pt + ofsk)); fs_fixup_uv1(fs, pt, (uint16_t *)((char *)pt + ofsuv)); fs_fixup_line(fs, pt, (void *)((char *)pt + ofsli), numline); fs_fixup_var(ls, pt, (uint8_t *)((char *)pt + ofsdbg), ofsvar); lj_vmevent_send(L, BC, setprotoV(L, L->top++, pt); ); L->top--; /* Pop table of constants. */ ls->vtop = fs->vbase; /* Reset variable stack. */ ls->fs = fs->prev; lua_assert(ls->fs != NULL || ls->tok == TK_eof); return pt; } /* Initialize a new FuncState. */ static void fs_init(LexState *ls, FuncState *fs) { lua_State *L = ls->L; fs->prev = ls->fs; ls->fs = fs; /* Append to list. */ fs->ls = ls; fs->vbase = ls->vtop; fs->L = L; fs->pc = 0; fs->lasttarget = 0; fs->jpc = NO_JMP; fs->freereg = 0; fs->nkgc = 0; fs->nkn = 0; fs->nactvar = 0; fs->nuv = 0; fs->bl = NULL; fs->flags = 0; fs->framesize = 1; /* Minimum frame size. */ fs->kt = lj_tab_new(L, 0, 0); /* Anchor table of constants in stack to avoid being collected. */ settabV(L, L->top, fs->kt); incr_top(L); } /* -- Expressions --------------------------------------------------------- */ /* Forward declaration. */ static void expr(LexState *ls, ExpDesc *v); /* Return string expression. */ static void expr_str(LexState *ls, ExpDesc *e) { expr_init(e, VKSTR, 0); e->u.sval = lex_str(ls); } /* Return index expression. */ static void expr_index(FuncState *fs, ExpDesc *t, ExpDesc *e) { /* Already called: expr_toval(fs, e). */ t->k = VINDEXED; if (expr_isnumk(e)) { #if LJ_DUALNUM if (tvisint(expr_numtv(e))) { int32_t k = intV(expr_numtv(e)); if (checku8(k)) { t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */ return; } } #else lua_Number n = expr_numberV(e); int32_t k = lj_num2int(n); if (checku8(k) && n == (lua_Number)k) { t->u.s.aux = BCMAX_C+1+(uint32_t)k; /* 256..511: const byte key */ return; } #endif } else if (expr_isstrk(e)) { BCReg idx = const_str(fs, e); if (idx <= BCMAX_C) { t->u.s.aux = ~idx; /* -256..-1: const string key */ return; } } t->u.s.aux = expr_toanyreg(fs, e); /* 0..255: register */ } /* Parse index expression with named field. */ static void expr_field(LexState *ls, ExpDesc *v) { FuncState *fs = ls->fs; ExpDesc key; expr_toanyreg(fs, v); lj_lex_next(ls); /* Skip dot or colon. */ expr_str(ls, &key); expr_index(fs, v, &key); } /* Parse index expression with brackets. */ static void expr_bracket(LexState *ls, ExpDesc *v) { lj_lex_next(ls); /* Skip '['. */ expr(ls, v); expr_toval(ls->fs, v); lex_check(ls, ']'); } /* Get value of constant expression. */ static void expr_kvalue(TValue *v, ExpDesc *e) { if (e->k <= VKTRUE) { setpriV(v, ~(uint32_t)e->k); } else if (e->k == VKSTR) { setgcVraw(v, obj2gco(e->u.sval), LJ_TSTR); } else { lua_assert(tvisnumber(expr_numtv(e))); *v = *expr_numtv(e); } } /* Parse table constructor expression. */ static void expr_table(LexState *ls, ExpDesc *e) { FuncState *fs = ls->fs; BCLine line = ls->linenumber; GCtab *t = NULL; int vcall = 0, needarr = 0, fixt = 0; uint32_t narr = 1; /* First array index. */ uint32_t nhash = 0; /* Number of hash entries. */ BCReg freg = fs->freereg; BCPos pc = bcemit_AD(fs, BC_TNEW, freg, 0); expr_init(e, VNONRELOC, freg); bcreg_reserve(fs, 1); freg++; lex_check(ls, '{'); while (ls->tok != '}') { ExpDesc key, val; vcall = 0; if (ls->tok == '[') { expr_bracket(ls, &key); /* Already calls expr_toval. */ if (!expr_isk(&key)) expr_index(fs, e, &key); if (expr_isnumk(&key) && expr_numiszero(&key)) needarr = 1; else nhash++; lex_check(ls, '='); } else if ((ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) && lj_lex_lookahead(ls) == '=') { expr_str(ls, &key); lex_check(ls, '='); nhash++; } else { expr_init(&key, VKNUM, 0); setintV(&key.u.nval, (int)narr); narr++; needarr = vcall = 1; } expr(ls, &val); if (expr_isk(&key) && key.k != VKNIL && (key.k == VKSTR || expr_isk_nojump(&val))) { TValue k, *v; if (!t) { /* Create template table on demand. */ BCReg kidx; t = lj_tab_new(fs->L, needarr ? narr : 0, hsize2hbits(nhash)); kidx = const_gc(fs, obj2gco(t), LJ_TTAB); fs->bcbase[pc].ins = BCINS_AD(BC_TDUP, freg-1, kidx); } vcall = 0; expr_kvalue(&k, &key); v = lj_tab_set(fs->L, t, &k); lj_gc_anybarriert(fs->L, t); if (expr_isk_nojump(&val)) { /* Add const key/value to template table. */ expr_kvalue(v, &val); } else { /* Otherwise create dummy string key (avoids lj_tab_newkey). */ settabV(fs->L, v, t); /* Preserve key with table itself as value. */ fixt = 1; /* Fix this later, after all resizes. */ goto nonconst; } } else { nonconst: if (val.k != VCALL) { expr_toanyreg(fs, &val); vcall = 0; } if (expr_isk(&key)) expr_index(fs, e, &key); bcemit_store(fs, e, &val); } fs->freereg = freg; if (!lex_opt(ls, ',') && !lex_opt(ls, ';')) break; } lex_match(ls, '}', '{', line); if (vcall) { BCInsLine *ilp = &fs->bcbase[fs->pc-1]; ExpDesc en; lua_assert(bc_a(ilp->ins) == freg && bc_op(ilp->ins) == (narr > 256 ? BC_TSETV : BC_TSETB)); expr_init(&en, VKNUM, 0); en.u.nval.u32.lo = narr-1; en.u.nval.u32.hi = 0x43300000; /* Biased integer to avoid denormals. */ if (narr > 256) { fs->pc--; ilp--; } ilp->ins = BCINS_AD(BC_TSETM, freg, const_num(fs, &en)); setbc_b(&ilp[-1].ins, 0); } if (pc == fs->pc-1) { /* Make expr relocable if possible. */ e->u.s.info = pc; fs->freereg--; e->k = VRELOCABLE; } else { e->k = VNONRELOC; /* May have been changed by expr_index. */ } if (!t) { /* Construct TNEW RD: hhhhhaaaaaaaaaaa. */ BCIns *ip = &fs->bcbase[pc].ins; if (!needarr) narr = 0; else if (narr < 3) narr = 3; else if (narr > 0x7ff) narr = 0x7ff; setbc_d(ip, narr|(hsize2hbits(nhash)<<11)); } else { if (needarr && t->asize < narr) lj_tab_reasize(fs->L, t, narr-1); if (fixt) { /* Fix value for dummy keys in template table. */ Node *node = noderef(t->node); uint32_t i, hmask = t->hmask; for (i = 0; i <= hmask; i++) { Node *n = &node[i]; if (tvistab(&n->val)) { lua_assert(tabV(&n->val) == t); setnilV(&n->val); /* Turn value into nil. */ } } } lj_gc_check(fs->L); } } /* Parse function parameters. */ static BCReg parse_params(LexState *ls, int needself) { FuncState *fs = ls->fs; BCReg nparams = 0; lex_check(ls, '('); if (needself) var_new_lit(ls, nparams++, "self"); if (ls->tok != ')') { do { if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) { var_new(ls, nparams++, lex_str(ls)); } else if (ls->tok == TK_dots) { lj_lex_next(ls); fs->flags |= PROTO_VARARG; break; } else { err_syntax(ls, LJ_ERR_XPARAM); } } while (lex_opt(ls, ',')); } var_add(ls, nparams); lua_assert(fs->nactvar == nparams); bcreg_reserve(fs, nparams); lex_check(ls, ')'); return nparams; } /* Forward declaration. */ static void parse_chunk(LexState *ls); /* Parse body of a function. */ static void parse_body(LexState *ls, ExpDesc *e, int needself, BCLine line) { FuncState fs, *pfs = ls->fs; FuncScope bl; GCproto *pt; ptrdiff_t oldbase = pfs->bcbase - ls->bcstack; fs_init(ls, &fs); fscope_begin(&fs, &bl, 0); fs.linedefined = line; fs.numparams = (uint8_t)parse_params(ls, needself); fs.bcbase = pfs->bcbase + pfs->pc; fs.bclim = pfs->bclim - pfs->pc; bcemit_AD(&fs, BC_FUNCF, 0, 0); /* Placeholder. */ parse_chunk(ls); if (ls->tok != TK_end) lex_match(ls, TK_end, TK_function, line); pt = fs_finish(ls, (ls->lastline = ls->linenumber)); pfs->bcbase = ls->bcstack + oldbase; /* May have been reallocated. */ pfs->bclim = (BCPos)(ls->sizebcstack - oldbase); /* Store new prototype in the constant array of the parent. */ expr_init(e, VRELOCABLE, bcemit_AD(pfs, BC_FNEW, 0, const_gc(pfs, obj2gco(pt), LJ_TPROTO))); #if LJ_HASFFI pfs->flags |= (fs.flags & PROTO_FFI); #endif if (!(pfs->flags & PROTO_CHILD)) { if (pfs->flags & PROTO_HAS_RETURN) pfs->flags |= PROTO_FIXUP_RETURN; pfs->flags |= PROTO_CHILD; } lj_lex_next(ls); } /* Parse expression list. Last expression is left open. */ static BCReg expr_list(LexState *ls, ExpDesc *v) { BCReg n = 1; expr(ls, v); while (lex_opt(ls, ',')) { expr_tonextreg(ls->fs, v); expr(ls, v); n++; } return n; } /* Parse function argument list. */ static void parse_args(LexState *ls, ExpDesc *e) { FuncState *fs = ls->fs; ExpDesc args; BCIns ins; BCReg base; BCLine line = ls->linenumber; if (ls->tok == '(') { #if !LJ_52 if (line != ls->lastline) err_syntax(ls, LJ_ERR_XAMBIG); #endif lj_lex_next(ls); if (ls->tok == ')') { /* f(). */ args.k = VVOID; } else { expr_list(ls, &args); if (args.k == VCALL) /* f(a, b, g()) or f(a, b, ...). */ setbc_b(bcptr(fs, &args), 0); /* Pass on multiple results. */ } lex_match(ls, ')', '(', line); } else if (ls->tok == '{') { expr_table(ls, &args); } else if (ls->tok == TK_string) { expr_init(&args, VKSTR, 0); args.u.sval = strV(&ls->tokval); lj_lex_next(ls); } else { err_syntax(ls, LJ_ERR_XFUNARG); return; /* Silence compiler. */ } lua_assert(e->k == VNONRELOC); base = e->u.s.info; /* Base register for call. */ if (args.k == VCALL) { ins = BCINS_ABC(BC_CALLM, base, 2, args.u.s.aux - base - 1 - LJ_FR2); } else { if (args.k != VVOID) expr_tonextreg(fs, &args); ins = BCINS_ABC(BC_CALL, base, 2, fs->freereg - base - LJ_FR2); } expr_init(e, VCALL, bcemit_INS(fs, ins)); e->u.s.aux = base; fs->bcbase[fs->pc - 1].line = line; fs->freereg = base+1; /* Leave one result by default. */ } /* Parse primary expression. */ static void expr_primary(LexState *ls, ExpDesc *v) { FuncState *fs = ls->fs; /* Parse prefix expression. */ if (ls->tok == '(') { BCLine line = ls->linenumber; lj_lex_next(ls); expr(ls, v); lex_match(ls, ')', '(', line); expr_discharge(ls->fs, v); } else if (ls->tok == TK_name || (!LJ_52 && ls->tok == TK_goto)) { var_lookup(ls, v); } else { err_syntax(ls, LJ_ERR_XSYMBOL); } for (;;) { /* Parse multiple expression suffixes. */ if (ls->tok == '.') { expr_field(ls, v); } else if (ls->tok == '[') { ExpDesc key; expr_toanyreg(fs, v); expr_bracket(ls, &key); expr_index(fs, v, &key); } else if (ls->tok == ':') { ExpDesc key; lj_lex_next(ls); expr_str(ls, &key); bcemit_method(fs, v, &key); parse_args(ls, v); } else if (ls->tok == '(' || ls->tok == TK_string || ls->tok == '{') { expr_tonextreg(fs, v); if (LJ_FR2) bcreg_reserve(fs, 1); parse_args(ls, v); } else { break; } } } /* Parse simple expression. */ static void expr_simple(LexState *ls, ExpDesc *v) { switch (ls->tok) { case TK_number: expr_init(v, (LJ_HASFFI && tviscdata(&ls->tokval)) ? VKCDATA : VKNUM, 0); copyTV(ls->L, &v->u.nval, &ls->tokval); break; case TK_string: expr_init(v, VKSTR, 0); v->u.sval = strV(&ls->tokval); break; case TK_nil: expr_init(v, VKNIL, 0); break; case TK_true: expr_init(v, VKTRUE, 0); break; case TK_false: expr_init(v, VKFALSE, 0); break; case TK_dots: { /* Vararg. */ FuncState *fs = ls->fs; BCReg base; checkcond(ls, fs->flags & PROTO_VARARG, LJ_ERR_XDOTS); bcreg_reserve(fs, 1); base = fs->freereg-1; expr_init(v, VCALL, bcemit_ABC(fs, BC_VARG, base, 2, fs->numparams)); v->u.s.aux = base; break; } case '{': /* Table constructor. */ expr_table(ls, v); return; case TK_function: lj_lex_next(ls); parse_body(ls, v, 0, ls->linenumber); return; default: expr_primary(ls, v); return; } lj_lex_next(ls); } /* Manage syntactic levels to avoid blowing up the stack. */ static void synlevel_begin(LexState *ls) { if (++ls->level >= LJ_MAX_XLEVEL) lj_lex_error(ls, 0, LJ_ERR_XLEVELS); } #define synlevel_end(ls) ((ls)->level--) /* Convert token to binary operator. */ static BinOpr token2binop(LexToken tok) { switch (tok) { case '+': return OPR_ADD; case '-': return OPR_SUB; case '*': return OPR_MUL; case '/': return OPR_DIV; case '%': return OPR_MOD; case '^': return OPR_POW; case TK_concat: return OPR_CONCAT; case TK_ne: return OPR_NE; case TK_eq: return OPR_EQ; case '<': return OPR_LT; case TK_le: return OPR_LE; case '>': return OPR_GT; case TK_ge: return OPR_GE; case TK_and: return OPR_AND; case TK_or: return OPR_OR; default: return OPR_NOBINOPR; } } /* Priorities for each binary operator. ORDER OPR. */ static const struct { uint8_t left; /* Left priority. */ uint8_t right; /* Right priority. */ } priority[] = { {6,6}, {6,6}, {7,7}, {7,7}, {7,7}, /* ADD SUB MUL DIV MOD */ {10,9}, {5,4}, /* POW CONCAT (right associative) */ {3,3}, {3,3}, /* EQ NE */ {3,3}, {3,3}, {3,3}, {3,3}, /* LT GE GT LE */ {2,2}, {1,1} /* AND OR */ }; #define UNARY_PRIORITY 8 /* Priority for unary operators. */ /* Forward declaration. */ static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit); /* Parse unary expression. */ static void expr_unop(LexState *ls, ExpDesc *v) { BCOp op; if (ls->tok == TK_not) { op = BC_NOT; } else if (ls->tok == '-') { op = BC_UNM; } else if (ls->tok == '#') { op = BC_LEN; } else { expr_simple(ls, v); return; } lj_lex_next(ls); expr_binop(ls, v, UNARY_PRIORITY); bcemit_unop(ls->fs, op, v); } /* Parse binary expressions with priority higher than the limit. */ static BinOpr expr_binop(LexState *ls, ExpDesc *v, uint32_t limit) { BinOpr op; synlevel_begin(ls); expr_unop(ls, v); op = token2binop(ls->tok); while (op != OPR_NOBINOPR && priority[op].left > limit) { ExpDesc v2; BinOpr nextop; lj_lex_next(ls); bcemit_binop_left(ls->fs, op, v); /* Parse binary expression with higher priority. */ nextop = expr_binop(ls, &v2, priority[op].right); bcemit_binop(ls->fs, op, v, &v2); op = nextop; } synlevel_end(ls); return op; /* Return unconsumed binary operator (if any). */ } /* Parse expression. */ static void expr(LexState *ls, ExpDesc *v) { expr_binop(ls, v, 0); /* Priority 0: parse whole expression. */ } /* Assign expression to the next register. */ static void expr_next(LexState *ls) { ExpDesc e; expr(ls, &e); expr_tonextreg(ls->fs, &e); } /* Parse conditional expression. */ static BCPos expr_cond(LexState *ls) { ExpDesc v; expr(ls, &v); if (v.k == VKNIL) v.k = VKFALSE; bcemit_branch_t(ls->fs, &v); return v.f; } /* -- Assignments --------------------------------------------------------- */ /* List of LHS variables. */ typedef struct LHSVarList { ExpDesc v; /* LHS variable. */ struct LHSVarList *prev; /* Link to previous LHS variable. */ } LHSVarList; /* Eliminate write-after-read hazards for local variable assignment. */ static void assign_hazard(LexState *ls, LHSVarList *lh, const ExpDesc *v) { FuncState *fs = ls->fs; BCReg reg = v->u.s.info; /* Check against this variable. */ BCReg tmp = fs->freereg; /* Rename to this temp. register (if needed). */ int hazard = 0; for (; lh; lh = lh->prev) { if (lh->v.k == VINDEXED) { if (lh->v.u.s.info == reg) { /* t[i], t = 1, 2 */ hazard = 1; lh->v.u.s.info = tmp; } if (lh->v.u.s.aux == reg) { /* t[i], i = 1, 2 */ hazard = 1; lh->v.u.s.aux = tmp; } } } if (hazard) { bcemit_AD(fs, BC_MOV, tmp, reg); /* Rename conflicting variable. */ bcreg_reserve(fs, 1); } } /* Adjust LHS/RHS of an assignment. */ static void assign_adjust(LexState *ls, BCReg nvars, BCReg nexps, ExpDesc *e) { FuncState *fs = ls->fs; int32_t extra = (int32_t)nvars - (int32_t)nexps; if (e->k == VCALL) { extra++; /* Compensate for the VCALL itself. */ if (extra < 0) extra = 0; setbc_b(bcptr(fs, e), extra+1); /* Fixup call results. */ if (extra > 1) bcreg_reserve(fs, (BCReg)extra-1); } else { if (e->k != VVOID) expr_tonextreg(fs, e); /* Close last expression. */ if (extra > 0) { /* Leftover LHS are set to nil. */ BCReg reg = fs->freereg; bcreg_reserve(fs, (BCReg)extra); bcemit_nil(fs, reg, (BCReg)extra); } } if (nexps > nvars) ls->fs->freereg -= nexps - nvars; /* Drop leftover regs. */ } /* Recursively parse assignment statement. */ static void parse_assignment(LexState *ls, LHSVarList *lh, BCReg nvars) { ExpDesc e; checkcond(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, LJ_ERR_XSYNTAX); if (lex_opt(ls, ',')) { /* Collect LHS list and recurse upwards. */ LHSVarList vl; vl.prev = lh; expr_primary(ls, &vl.v); if (vl.v.k == VLOCAL) assign_hazard(ls, lh, &vl.v); checklimit(ls->fs, ls->level + nvars, LJ_MAX_XLEVEL, "variable names"); parse_assignment(ls, &vl, nvars+1); } else { /* Parse RHS. */ BCReg nexps; lex_check(ls, '='); nexps = expr_list(ls, &e); if (nexps == nvars) { if (e.k == VCALL) { if (bc_op(*bcptr(ls->fs, &e)) == BC_VARG) { /* Vararg assignment. */ ls->fs->freereg--; e.k = VRELOCABLE; } else { /* Multiple call results. */ e.u.s.info = e.u.s.aux; /* Base of call is not relocatable. */ e.k = VNONRELOC; } } bcemit_store(ls->fs, &lh->v, &e); return; } assign_adjust(ls, nvars, nexps, &e); } /* Assign RHS to LHS and recurse downwards. */ expr_init(&e, VNONRELOC, ls->fs->freereg-1); bcemit_store(ls->fs, &lh->v, &e); } /* Parse call statement or assignment. */ static void parse_call_assign(LexState *ls) { FuncState *fs = ls->fs; LHSVarList vl; expr_primary(ls, &vl.v); if (vl.v.k == VCALL) { /* Function call statement. */ setbc_b(bcptr(fs, &vl.v), 1); /* No results. */ } else { /* Start of an assignment. */ vl.prev = NULL; parse_assignment(ls, &vl, 1); } } /* Parse 'local' statement. */ static void parse_local(LexState *ls) { if (lex_opt(ls, TK_function)) { /* Local function declaration. */ ExpDesc v, b; FuncState *fs = ls->fs; var_new(ls, 0, lex_str(ls)); expr_init(&v, VLOCAL, fs->freereg); v.u.s.aux = fs->varmap[fs->freereg]; bcreg_reserve(fs, 1); var_add(ls, 1); parse_body(ls, &b, 0, ls->linenumber); /* bcemit_store(fs, &v, &b) without setting VSTACK_VAR_RW. */ expr_free(fs, &b); expr_toreg(fs, &b, v.u.s.info); /* The upvalue is in scope, but the local is only valid after the store. */ var_get(ls, fs, fs->nactvar - 1).startpc = fs->pc; } else { /* Local variable declaration. */ ExpDesc e; BCReg nexps, nvars = 0; do { /* Collect LHS. */ var_new(ls, nvars++, lex_str(ls)); } while (lex_opt(ls, ',')); if (lex_opt(ls, '=')) { /* Optional RHS. */ nexps = expr_list(ls, &e); } else { /* Or implicitly set to nil. */ e.k = VVOID; nexps = 0; } assign_adjust(ls, nvars, nexps, &e); var_add(ls, nvars); } } /* Parse 'function' statement. */ static void parse_func(LexState *ls, BCLine line) { FuncState *fs; ExpDesc v, b; int needself = 0; lj_lex_next(ls); /* Skip 'function'. */ /* Parse function name. */ var_lookup(ls, &v); while (ls->tok == '.') /* Multiple dot-separated fields. */ expr_field(ls, &v); if (ls->tok == ':') { /* Optional colon to signify method call. */ needself = 1; expr_field(ls, &v); } parse_body(ls, &b, needself, line); fs = ls->fs; bcemit_store(fs, &v, &b); fs->bcbase[fs->pc - 1].line = line; /* Set line for the store. */ } /* -- Control transfer statements ----------------------------------------- */ /* Check for end of block. */ static int parse_isend(LexToken tok) { switch (tok) { case TK_else: case TK_elseif: case TK_end: case TK_until: case TK_eof: return 1; default: return 0; } } /* Parse 'return' statement. */ static void parse_return(LexState *ls) { BCIns ins; FuncState *fs = ls->fs; lj_lex_next(ls); /* Skip 'return'. */ fs->flags |= PROTO_HAS_RETURN; if (parse_isend(ls->tok) || ls->tok == ';') { /* Bare return. */ ins = BCINS_AD(BC_RET0, 0, 1); } else { /* Return with one or more values. */ ExpDesc e; /* Receives the _last_ expression in the list. */ BCReg nret = expr_list(ls, &e); if (nret == 1) { /* Return one result. */ if (e.k == VCALL) { /* Check for tail call. */ BCIns *ip = bcptr(fs, &e); /* It doesn't pay off to add BC_VARGT just for 'return ...'. */ if (bc_op(*ip) == BC_VARG) goto notailcall; fs->pc--; ins = BCINS_AD(bc_op(*ip)-BC_CALL+BC_CALLT, bc_a(*ip), bc_c(*ip)); } else { /* Can return the result from any register. */ ins = BCINS_AD(BC_RET1, expr_toanyreg(fs, &e), 2); } } else { if (e.k == VCALL) { /* Append all results from a call. */ notailcall: setbc_b(bcptr(fs, &e), 0); ins = BCINS_AD(BC_RETM, fs->nactvar, e.u.s.aux - fs->nactvar); } else { expr_tonextreg(fs, &e); /* Force contiguous registers. */ ins = BCINS_AD(BC_RET, fs->nactvar, nret+1); } } } if (fs->flags & PROTO_CHILD) bcemit_AJ(fs, BC_UCLO, 0, 0); /* May need to close upvalues first. */ bcemit_INS(fs, ins); } /* Parse 'break' statement. */ static void parse_break(LexState *ls) { ls->fs->bl->flags |= FSCOPE_BREAK; gola_new(ls, NAME_BREAK, VSTACK_GOTO, bcemit_jmp(ls->fs)); } /* Parse 'goto' statement. */ static void parse_goto(LexState *ls) { FuncState *fs = ls->fs; GCstr *name = lex_str(ls); VarInfo *vl = gola_findlabel(ls, name); if (vl) /* Treat backwards goto within same scope like a loop. */ bcemit_AJ(fs, BC_LOOP, vl->slot, -1); /* No BC range check. */ fs->bl->flags |= FSCOPE_GOLA; gola_new(ls, name, VSTACK_GOTO, bcemit_jmp(fs)); } /* Parse label. */ static void parse_label(LexState *ls) { FuncState *fs = ls->fs; GCstr *name; MSize idx; fs->lasttarget = fs->pc; fs->bl->flags |= FSCOPE_GOLA; lj_lex_next(ls); /* Skip '::'. */ name = lex_str(ls); if (gola_findlabel(ls, name)) lj_lex_error(ls, 0, LJ_ERR_XLDUP, strdata(name)); idx = gola_new(ls, name, VSTACK_LABEL, fs->pc); lex_check(ls, TK_label); /* Recursively parse trailing statements: labels and ';' (Lua 5.2 only). */ for (;;) { if (ls->tok == TK_label) { synlevel_begin(ls); parse_label(ls); synlevel_end(ls); } else if (LJ_52 && ls->tok == ';') { lj_lex_next(ls); } else { break; } } /* Trailing label is considered to be outside of scope. */ if (parse_isend(ls->tok) && ls->tok != TK_until) ls->vstack[idx].slot = fs->bl->nactvar; gola_resolve(ls, fs->bl, idx); } /* -- Blocks, loops and conditional statements ---------------------------- */ /* Parse a block. */ static void parse_block(LexState *ls) { FuncState *fs = ls->fs; FuncScope bl; fscope_begin(fs, &bl, 0); parse_chunk(ls); fscope_end(fs); } /* Parse 'while' statement. */ static void parse_while(LexState *ls, BCLine line) { FuncState *fs = ls->fs; BCPos start, loop, condexit; FuncScope bl; lj_lex_next(ls); /* Skip 'while'. */ start = fs->lasttarget = fs->pc; condexit = expr_cond(ls); fscope_begin(fs, &bl, FSCOPE_LOOP); lex_check(ls, TK_do); loop = bcemit_AD(fs, BC_LOOP, fs->nactvar, 0); parse_block(ls); jmp_patch(fs, bcemit_jmp(fs), start); lex_match(ls, TK_end, TK_while, line); fscope_end(fs); jmp_tohere(fs, condexit); jmp_patchins(fs, loop, fs->pc); } /* Parse 'repeat' statement. */ static void parse_repeat(LexState *ls, BCLine line) { FuncState *fs = ls->fs; BCPos loop = fs->lasttarget = fs->pc; BCPos condexit; FuncScope bl1, bl2; fscope_begin(fs, &bl1, FSCOPE_LOOP); /* Breakable loop scope. */ fscope_begin(fs, &bl2, 0); /* Inner scope. */ lj_lex_next(ls); /* Skip 'repeat'. */ bcemit_AD(fs, BC_LOOP, fs->nactvar, 0); parse_chunk(ls); lex_match(ls, TK_until, TK_repeat, line); condexit = expr_cond(ls); /* Parse condition (still inside inner scope). */ if (!(bl2.flags & FSCOPE_UPVAL)) { /* No upvalues? Just end inner scope. */ fscope_end(fs); } else { /* Otherwise generate: cond: UCLO+JMP out, !cond: UCLO+JMP loop. */ parse_break(ls); /* Break from loop and close upvalues. */ jmp_tohere(fs, condexit); fscope_end(fs); /* End inner scope and close upvalues. */ condexit = bcemit_jmp(fs); } jmp_patch(fs, condexit, loop); /* Jump backwards if !cond. */ jmp_patchins(fs, loop, fs->pc); fscope_end(fs); /* End loop scope. */ } /* Parse numeric 'for'. */ static void parse_for_num(LexState *ls, GCstr *varname, BCLine line) { FuncState *fs = ls->fs; BCReg base = fs->freereg; FuncScope bl; BCPos loop, loopend; /* Hidden control variables. */ var_new_fixed(ls, FORL_IDX, VARNAME_FOR_IDX); var_new_fixed(ls, FORL_STOP, VARNAME_FOR_STOP); var_new_fixed(ls, FORL_STEP, VARNAME_FOR_STEP); /* Visible copy of index variable. */ var_new(ls, FORL_EXT, varname); lex_check(ls, '='); expr_next(ls); lex_check(ls, ','); expr_next(ls); if (lex_opt(ls, ',')) { expr_next(ls); } else { bcemit_AD(fs, BC_KSHORT, fs->freereg, 1); /* Default step is 1. */ bcreg_reserve(fs, 1); } var_add(ls, 3); /* Hidden control variables. */ lex_check(ls, TK_do); loop = bcemit_AJ(fs, BC_FORI, base, NO_JMP); fscope_begin(fs, &bl, 0); /* Scope for visible variables. */ var_add(ls, 1); bcreg_reserve(fs, 1); parse_block(ls); fscope_end(fs); /* Perform loop inversion. Loop control instructions are at the end. */ loopend = bcemit_AJ(fs, BC_FORL, base, NO_JMP); fs->bcbase[loopend].line = line; /* Fix line for control ins. */ jmp_patchins(fs, loopend, loop+1); jmp_patchins(fs, loop, fs->pc); } /* Try to predict whether the iterator is next() and specialize the bytecode. ** Detecting next() and pairs() by name is simplistic, but quite effective. ** The interpreter backs off if the check for the closure fails at runtime. */ static int predict_next(LexState *ls, FuncState *fs, BCPos pc) { BCIns ins = fs->bcbase[pc].ins; GCstr *name; cTValue *o; switch (bc_op(ins)) { case BC_MOV: name = gco2str(gcref(var_get(ls, fs, bc_d(ins)).name)); break; case BC_UGET: name = gco2str(gcref(ls->vstack[fs->uvmap[bc_d(ins)]].name)); break; case BC_GGET: /* There's no inverse index (yet), so lookup the strings. */ o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "pairs")); if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins)) return 1; o = lj_tab_getstr(fs->kt, lj_str_newlit(ls->L, "next")); if (o && tvhaskslot(o) && tvkslot(o) == bc_d(ins)) return 1; return 0; default: return 0; } return (name->len == 5 && !strcmp(strdata(name), "pairs")) || (name->len == 4 && !strcmp(strdata(name), "next")); } /* Parse 'for' iterator. */ static void parse_for_iter(LexState *ls, GCstr *indexname) { FuncState *fs = ls->fs; ExpDesc e; BCReg nvars = 0; BCLine line; BCReg base = fs->freereg + 3; BCPos loop, loopend, exprpc = fs->pc; FuncScope bl; int isnext; /* Hidden control variables. */ var_new_fixed(ls, nvars++, VARNAME_FOR_GEN); var_new_fixed(ls, nvars++, VARNAME_FOR_STATE); var_new_fixed(ls, nvars++, VARNAME_FOR_CTL); /* Visible variables returned from iterator. */ var_new(ls, nvars++, indexname); while (lex_opt(ls, ',')) var_new(ls, nvars++, lex_str(ls)); lex_check(ls, TK_in); line = ls->linenumber; assign_adjust(ls, 3, expr_list(ls, &e), &e); /* The iterator needs another 3 [4] slots (func [pc] | state ctl). */ bcreg_bump(fs, 3+LJ_FR2); isnext = (nvars <= 5 && predict_next(ls, fs, exprpc)); var_add(ls, 3); /* Hidden control variables. */ lex_check(ls, TK_do); loop = bcemit_AJ(fs, isnext ? BC_ISNEXT : BC_JMP, base, NO_JMP); fscope_begin(fs, &bl, 0); /* Scope for visible variables. */ var_add(ls, nvars-3); bcreg_reserve(fs, nvars-3); parse_block(ls); fscope_end(fs); /* Perform loop inversion. Loop control instructions are at the end. */ jmp_patchins(fs, loop, fs->pc); bcemit_ABC(fs, isnext ? BC_ITERN : BC_ITERC, base, nvars-3+1, 2+1); loopend = bcemit_AJ(fs, BC_ITERL, base, NO_JMP); fs->bcbase[loopend-1].line = line; /* Fix line for control ins. */ fs->bcbase[loopend].line = line; jmp_patchins(fs, loopend, loop+1); } /* Parse 'for' statement. */ static void parse_for(LexState *ls, BCLine line) { FuncState *fs = ls->fs; GCstr *varname; FuncScope bl; fscope_begin(fs, &bl, FSCOPE_LOOP); lj_lex_next(ls); /* Skip 'for'. */ varname = lex_str(ls); /* Get first variable name. */ if (ls->tok == '=') parse_for_num(ls, varname, line); else if (ls->tok == ',' || ls->tok == TK_in) parse_for_iter(ls, varname); else err_syntax(ls, LJ_ERR_XFOR); lex_match(ls, TK_end, TK_for, line); fscope_end(fs); /* Resolve break list. */ } /* Parse condition and 'then' block. */ static BCPos parse_then(LexState *ls) { BCPos condexit; lj_lex_next(ls); /* Skip 'if' or 'elseif'. */ condexit = expr_cond(ls); lex_check(ls, TK_then); parse_block(ls); return condexit; } /* Parse 'if' statement. */ static void parse_if(LexState *ls, BCLine line) { FuncState *fs = ls->fs; BCPos flist; BCPos escapelist = NO_JMP; flist = parse_then(ls); while (ls->tok == TK_elseif) { /* Parse multiple 'elseif' blocks. */ jmp_append(fs, &escapelist, bcemit_jmp(fs)); jmp_tohere(fs, flist); flist = parse_then(ls); } if (ls->tok == TK_else) { /* Parse optional 'else' block. */ jmp_append(fs, &escapelist, bcemit_jmp(fs)); jmp_tohere(fs, flist); lj_lex_next(ls); /* Skip 'else'. */ parse_block(ls); } else { jmp_append(fs, &escapelist, flist); } jmp_tohere(fs, escapelist); lex_match(ls, TK_end, TK_if, line); } /* -- Parse statements ---------------------------------------------------- */ /* Parse a statement. Returns 1 if it must be the last one in a chunk. */ static int parse_stmt(LexState *ls) { BCLine line = ls->linenumber; switch (ls->tok) { case TK_if: parse_if(ls, line); break; case TK_while: parse_while(ls, line); break; case TK_do: lj_lex_next(ls); parse_block(ls); lex_match(ls, TK_end, TK_do, line); break; case TK_for: parse_for(ls, line); break; case TK_repeat: parse_repeat(ls, line); break; case TK_function: parse_func(ls, line); break; case TK_local: lj_lex_next(ls); parse_local(ls); break; case TK_return: parse_return(ls); return 1; /* Must be last. */ case TK_break: lj_lex_next(ls); parse_break(ls); return !LJ_52; /* Must be last in Lua 5.1. */ #if LJ_52 case ';': lj_lex_next(ls); break; #endif case TK_label: parse_label(ls); break; case TK_goto: if (LJ_52 || lj_lex_lookahead(ls) == TK_name) { lj_lex_next(ls); parse_goto(ls); break; } /* else: fallthrough */ default: parse_call_assign(ls); break; } return 0; } /* A chunk is a list of statements optionally separated by semicolons. */ static void parse_chunk(LexState *ls) { int islast = 0; synlevel_begin(ls); while (!islast && !parse_isend(ls->tok)) { islast = parse_stmt(ls); lex_opt(ls, ';'); lua_assert(ls->fs->framesize >= ls->fs->freereg && ls->fs->freereg >= ls->fs->nactvar); ls->fs->freereg = ls->fs->nactvar; /* Free registers after each stmt. */ } synlevel_end(ls); } /* Entry point of bytecode parser. */ GCproto *lj_parse(LexState *ls) { FuncState fs; FuncScope bl; GCproto *pt; lua_State *L = ls->L; #ifdef LUAJIT_DISABLE_DEBUGINFO ls->chunkname = lj_str_newlit(L, "="); #else ls->chunkname = lj_str_newz(L, ls->chunkarg); #endif setstrV(L, L->top, ls->chunkname); /* Anchor chunkname string. */ incr_top(L); ls->level = 0; fs_init(ls, &fs); fs.linedefined = 0; fs.numparams = 0; fs.bcbase = NULL; fs.bclim = 0; fs.flags |= PROTO_VARARG; /* Main chunk is always a vararg func. */ fscope_begin(&fs, &bl, 0); bcemit_AD(&fs, BC_FUNCV, 0, 0); /* Placeholder. */ lj_lex_next(ls); /* Read-ahead first token. */ parse_chunk(ls); if (ls->tok != TK_eof) err_token(ls, TK_eof); pt = fs_finish(ls, ls->linenumber); L->top--; /* Drop chunkname. */ lua_assert(fs.prev == NULL); lua_assert(ls->fs == NULL); lua_assert(pt->sizeuv == 0); return pt; } tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/xb1build.bat0000644000000000000000000000614513306562377023040 0ustar rootroot@rem Script to build LuaJIT with the Xbox One SDK. @rem Donated to the public domain. @rem @rem Open a "Visual Studio .NET Command Prompt" (64 bit host compiler) @rem Then cd to this directory and run this script. @if not defined INCLUDE goto :FAIL @if not defined DurangoXDK goto :FAIL @setlocal @echo ---- Host compiler ---- @set LJCOMPILE=cl /nologo /c /MD /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /DLUAJIT_ENABLE_GC64 @set LJLINK=link /nologo @set LJMT=mt /nologo @set DASMDIR=..\dynasm @set DASM=%DASMDIR%\dynasm.lua @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c %LJCOMPILE% host\minilua.c @if errorlevel 1 goto :BAD %LJLINK% /out:minilua.exe minilua.obj @if errorlevel 1 goto :BAD if exist minilua.exe.manifest^ %LJMT% -manifest minilua.exe.manifest -outputresource:minilua.exe @rem Error out for 64 bit host compiler @minilua @if not errorlevel 8 goto :FAIL @set DASMFLAGS=-D WIN -D FFI -D P64 minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h vm_x64.dasc @if errorlevel 1 goto :BAD %LJCOMPILE% /I "." /I %DASMDIR% /D_DURANGO host\buildvm*.c @if errorlevel 1 goto :BAD %LJLINK% /out:buildvm.exe buildvm*.obj @if errorlevel 1 goto :BAD if exist buildvm.exe.manifest^ %LJMT% -manifest buildvm.exe.manifest -outputresource:buildvm.exe buildvm -m peobj -o lj_vm.obj @if errorlevel 1 goto :BAD buildvm -m bcdef -o lj_bcdef.h %ALL_LIB% @if errorlevel 1 goto :BAD buildvm -m ffdef -o lj_ffdef.h %ALL_LIB% @if errorlevel 1 goto :BAD buildvm -m libdef -o lj_libdef.h %ALL_LIB% @if errorlevel 1 goto :BAD buildvm -m recdef -o lj_recdef.h %ALL_LIB% @if errorlevel 1 goto :BAD buildvm -m vmdef -o jit\vmdef.lua %ALL_LIB% @if errorlevel 1 goto :BAD buildvm -m folddef -o lj_folddef.h lj_opt_fold.c @if errorlevel 1 goto :BAD @echo ---- Cross compiler ---- @set CWD=%cd% @call "%DurangoXDK%\xdk\DurangoVars.cmd" XDK @cd /D "%CWD%" @shift @set LJCOMPILE="cl" /nologo /c /W3 /GF /Gm- /GR- /GS- /Gy /openmp- /D_CRT_SECURE_NO_DEPRECATE /D_LIB /D_UNICODE /D_DURANGO @set LJLIB="lib" /nologo @if "%1"=="debug" ( @shift @set LJCOMPILE=%LJCOMPILE% /Zi /MDd /Od @set LJLINK=%LJLINK% /debug ) else ( @set LJCOMPILE=%LJCOMPILE% /MD /O2 /DNDEBUG ) @if "%1"=="amalg" goto :AMALG %LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c @if errorlevel 1 goto :BAD %LJLIB% /OUT:luajit.lib lj_*.obj lib_*.obj @if errorlevel 1 goto :BAD @goto :NOAMALG :AMALG %LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c @if errorlevel 1 goto :BAD %LJLIB% /OUT:luajit.lib ljamalg.obj lj_vm.obj @if errorlevel 1 goto :BAD :NOAMALG @del *.obj *.manifest minilua.exe buildvm.exe @echo. @echo === Successfully built LuaJIT for Xbox One === @goto :END :BAD @echo. @echo ******************************************************* @echo *** Build FAILED -- Please check the error messages *** @echo ******************************************************* @goto :END :FAIL @echo To run this script you must open a "Visual Studio .NET Command Prompt" @echo (64 bit host compiler). The Xbox One SDK must be installed, too. :END tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lj_asm.h0000644000000000000000000000054413306562377022251 0ustar rootroot/* ** IR assembler (SSA IR -> machine code). ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h */ #ifndef _LJ_ASM_H #define _LJ_ASM_H #include "lj_jit.h" #if LJ_HASJIT LJ_FUNC void lj_asm_trace(jit_State *J, GCtrace *T); LJ_FUNC void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target); #endif #endif tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lj_dispatch.h0000644000000000000000000001177513306562377023300 0ustar rootroot/* ** Instruction dispatch handling. ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h */ #ifndef _LJ_DISPATCH_H #define _LJ_DISPATCH_H #include "lj_obj.h" #include "lj_bc.h" #if LJ_HASJIT #include "lj_jit.h" #endif #if LJ_TARGET_MIPS /* Need our own global offset table for the dreaded MIPS calling conventions. */ #ifndef _LJ_VM_H LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t a, int32_t b); #endif #if LJ_SOFTFP #ifndef _LJ_IRCALL_H extern double __adddf3(double a, double b); extern double __subdf3(double a, double b); extern double __muldf3(double a, double b); extern double __divdf3(double a, double b); #endif #define SFGOTDEF(_) _(sqrt) _(__adddf3) _(__subdf3) _(__muldf3) _(__divdf3) #else #define SFGOTDEF(_) #endif #if LJ_HASJIT #define JITGOTDEF(_) _(lj_trace_exit) _(lj_trace_hot) #else #define JITGOTDEF(_) #endif #if LJ_HASFFI #define FFIGOTDEF(_) \ _(lj_meta_equal_cd) _(lj_ccallback_enter) _(lj_ccallback_leave) #else #define FFIGOTDEF(_) #endif #define GOTDEF(_) \ _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \ _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \ _(pow) _(fmod) _(ldexp) _(lj_vm_modi) \ _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_dispatch_stitch) \ _(lj_dispatch_profile) _(lj_err_throw) \ _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \ _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \ _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \ _(lj_meta_for) _(lj_meta_istype) _(lj_meta_len) _(lj_meta_tget) \ _(lj_meta_tset) _(lj_state_growstack) _(lj_strfmt_number) \ _(lj_str_new) _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) \ _(lj_tab_new) _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \ _(lj_tab_setinth) _(lj_buf_putstr_reverse) _(lj_buf_putstr_lower) \ _(lj_buf_putstr_upper) _(lj_buf_tostr) \ JITGOTDEF(_) FFIGOTDEF(_) SFGOTDEF(_) enum { #define GOTENUM(name) LJ_GOT_##name, GOTDEF(GOTENUM) #undef GOTENUM LJ_GOT__MAX }; #endif /* Type of hot counter. Must match the code in the assembler VM. */ /* 16 bits are sufficient. Only 0.0015% overhead with maximum slot penalty. */ typedef uint16_t HotCount; /* Number of hot counter hash table entries (must be a power of two). */ #define HOTCOUNT_SIZE 64 #define HOTCOUNT_PCMASK ((HOTCOUNT_SIZE-1)*sizeof(HotCount)) /* Hotcount decrements. */ #define HOTCOUNT_LOOP 2 #define HOTCOUNT_CALL 1 /* This solves a circular dependency problem -- bump as needed. Sigh. */ #define GG_NUM_ASMFF 57 #define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF) #define GG_LEN_SDISP BC_FUNCF #define GG_LEN_DISP (GG_LEN_DDISP + GG_LEN_SDISP) /* Global state, main thread and extra fields are allocated together. */ typedef struct GG_State { lua_State L; /* Main thread. */ global_State g; /* Global state. */ #if LJ_TARGET_MIPS ASMFunction got[LJ_GOT__MAX]; /* Global offset table. */ #endif #if LJ_HASJIT jit_State J; /* JIT state. */ HotCount hotcount[HOTCOUNT_SIZE]; /* Hot counters. */ #endif ASMFunction dispatch[GG_LEN_DISP]; /* Instruction dispatch tables. */ BCIns bcff[GG_NUM_ASMFF]; /* Bytecode for ASM fast functions. */ } GG_State; #define GG_OFS(field) ((int)offsetof(GG_State, field)) #define G2GG(gl) ((GG_State *)((char *)(gl) - GG_OFS(g))) #define J2GG(j) ((GG_State *)((char *)(j) - GG_OFS(J))) #define L2GG(L) (G2GG(G(L))) #define J2G(J) (&J2GG(J)->g) #define G2J(gl) (&G2GG(gl)->J) #define L2J(L) (&L2GG(L)->J) #define GG_G2J (GG_OFS(J) - GG_OFS(g)) #define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g)) #define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch)) #define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch)) #define GG_DISP2HOT (GG_OFS(hotcount) - GG_OFS(dispatch)) #define GG_DISP2STATIC (GG_LEN_DDISP*(int)sizeof(ASMFunction)) #define hotcount_get(gg, pc) \ (gg)->hotcount[(u32ptr(pc)>>2) & (HOTCOUNT_SIZE-1)] #define hotcount_set(gg, pc, val) \ (hotcount_get((gg), (pc)) = (HotCount)(val)) /* Dispatch table management. */ LJ_FUNC void lj_dispatch_init(GG_State *GG); #if LJ_HASJIT LJ_FUNC void lj_dispatch_init_hotcount(global_State *g); #endif LJ_FUNC void lj_dispatch_update(global_State *g); /* Instruction dispatch callback for hooks or when recording. */ LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc); LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc); #if LJ_HASJIT LJ_FUNCA void LJ_FASTCALL lj_dispatch_stitch(jit_State *J, const BCIns *pc); #endif #if LJ_HASPROFILE LJ_FUNCA void LJ_FASTCALL lj_dispatch_profile(lua_State *L, const BCIns *pc); #endif #if LJ_HASFFI && !defined(_BUILDVM_H) /* Save/restore errno and GetLastError() around hooks, exits and recording. */ #include #if LJ_TARGET_WINDOWS #define WIN32_LEAN_AND_MEAN #include #define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError(); #define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr); #else #define ERRNO_SAVE int olderr = errno; #define ERRNO_RESTORE errno = olderr; #endif #else #define ERRNO_SAVE #define ERRNO_RESTORE #endif #endif tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lua.h0000644000000000000000000003045613306562377021572 0ustar rootroot/* ** $Id: lua.h,v 1.218.1.5 2008/08/06 13:30:12 roberto Exp $ ** Lua - An Extensible Extension Language ** Lua.org, PUC-Rio, Brazil (http://www.lua.org) ** See Copyright Notice at the end of this file */ #ifndef lua_h #define lua_h #include #include #include #include "luaconf.h" #define LUA_VERSION "Lua 5.1" #define LUA_RELEASE "Lua 5.1.4" #define LUA_VERSION_NUM 501 #define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio" #define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes" /* mark for precompiled code (`Lua') */ #define LUA_SIGNATURE "\033Lua" /* option for multiple returns in `lua_pcall' and `lua_call' */ #define LUA_MULTRET (-1) /* ** pseudo-indices */ #define LUA_REGISTRYINDEX (-10000) #define LUA_ENVIRONINDEX (-10001) #define LUA_GLOBALSINDEX (-10002) #define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i)) /* thread status */ #define LUA_OK 0 #define LUA_YIELD 1 #define LUA_ERRRUN 2 #define LUA_ERRSYNTAX 3 #define LUA_ERRMEM 4 #define LUA_ERRERR 5 typedef struct lua_State lua_State; typedef int (*lua_CFunction) (lua_State *L); /* ** functions that read/write blocks when loading/dumping Lua chunks */ typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz); typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud); /* ** prototype for memory-allocation functions */ typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize); /* ** basic types */ #define LUA_TNONE (-1) #define LUA_TNIL 0 #define LUA_TBOOLEAN 1 #define LUA_TLIGHTUSERDATA 2 #define LUA_TNUMBER 3 #define LUA_TSTRING 4 #define LUA_TTABLE 5 #define LUA_TFUNCTION 6 #define LUA_TUSERDATA 7 #define LUA_TTHREAD 8 /* minimum Lua stack available to a C function */ #define LUA_MINSTACK 20 /* ** generic extra include file */ #if defined(LUA_USER_H) #include LUA_USER_H #endif /* type of numbers in Lua */ typedef LUA_NUMBER lua_Number; /* type for integer functions */ typedef LUA_INTEGER lua_Integer; /* ** state manipulation */ LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud); LUA_API void (lua_close) (lua_State *L); LUA_API lua_State *(lua_newthread) (lua_State *L); LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf); /* ** basic stack manipulation */ LUA_API int (lua_gettop) (lua_State *L); LUA_API void (lua_settop) (lua_State *L, int idx); LUA_API void (lua_pushvalue) (lua_State *L, int idx); LUA_API void (lua_remove) (lua_State *L, int idx); LUA_API void (lua_insert) (lua_State *L, int idx); LUA_API void (lua_replace) (lua_State *L, int idx); LUA_API int (lua_checkstack) (lua_State *L, int sz); LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n); /* ** access functions (stack -> C) */ LUA_API int (lua_isnumber) (lua_State *L, int idx); LUA_API int (lua_isstring) (lua_State *L, int idx); LUA_API int (lua_iscfunction) (lua_State *L, int idx); LUA_API int (lua_isuserdata) (lua_State *L, int idx); LUA_API int (lua_type) (lua_State *L, int idx); LUA_API const char *(lua_typename) (lua_State *L, int tp); LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2); LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2); LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2); LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx); LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx); LUA_API int (lua_toboolean) (lua_State *L, int idx); LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len); LUA_API uint32_t (lua_hashstring) (lua_State *L, int idx); LUA_API size_t (lua_objlen) (lua_State *L, int idx); LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx); LUA_API void *(lua_touserdata) (lua_State *L, int idx); LUA_API lua_State *(lua_tothread) (lua_State *L, int idx); LUA_API const void *(lua_topointer) (lua_State *L, int idx); /* ** push functions (C -> stack) */ LUA_API void (lua_pushnil) (lua_State *L); LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n); LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n); LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l); LUA_API void (lua_pushstring) (lua_State *L, const char *s); LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt, va_list argp); LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...); LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n); LUA_API void (lua_pushboolean) (lua_State *L, int b); LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p); LUA_API int (lua_pushthread) (lua_State *L); /* ** get functions (Lua -> stack) */ LUA_API void (lua_gettable) (lua_State *L, int idx); LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k); LUA_API void (lua_rawget) (lua_State *L, int idx); LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n); LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec); LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz); LUA_API int (lua_getmetatable) (lua_State *L, int objindex); LUA_API void (lua_getfenv) (lua_State *L, int idx); /* ** set functions (stack -> Lua) */ LUA_API void (lua_settable) (lua_State *L, int idx); LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k); LUA_API void (lua_rawset) (lua_State *L, int idx); LUA_API void (lua_rawseti) (lua_State *L, int idx, int n); LUA_API int (lua_setmetatable) (lua_State *L, int objindex); LUA_API int (lua_setfenv) (lua_State *L, int idx); /* ** `load' and `call' functions (load and run Lua code) */ LUA_API void (lua_call) (lua_State *L, int nargs, int nresults); LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc); LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud); LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt, const char *chunkname); LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data); /* ** coroutine functions */ LUA_API int (lua_yield) (lua_State *L, int nresults); LUA_API int (lua_resume) (lua_State *L, int narg); LUA_API int (lua_status) (lua_State *L); /* ** garbage-collection function and options */ #define LUA_GCSTOP 0 #define LUA_GCRESTART 1 #define LUA_GCCOLLECT 2 #define LUA_GCCOUNT 3 #define LUA_GCCOUNTB 4 #define LUA_GCSTEP 5 #define LUA_GCSETPAUSE 6 #define LUA_GCSETSTEPMUL 7 #define LUA_GCISRUNNING 9 LUA_API int (lua_gc) (lua_State *L, int what, int data); /* ** miscellaneous functions */ LUA_API int (lua_error) (lua_State *L); LUA_API int (lua_next) (lua_State *L, int idx); LUA_API void (lua_concat) (lua_State *L, int n); LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud); LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud); /* ** Calculate a hash for a specified string. Hash is the same as ** for luajit string objects (see lj_str_new()). */ LUA_API uint32_t (lua_hash) (const char *str, uint32_t len); /* ** =============================================================== ** some useful macros ** =============================================================== */ #define lua_pop(L,n) lua_settop(L, -(n)-1) #define lua_newtable(L) lua_createtable(L, 0, 0) #define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n))) #define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0) #define lua_strlen(L,i) lua_objlen(L, (i)) #define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION) #define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE) #define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA) #define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL) #define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN) #define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD) #define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE) #define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0) #define lua_pushliteral(L, s) \ lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1) #define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s)) #define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s)) #define lua_tostring(L,i) lua_tolstring(L, (i), NULL) /* ** compatibility macros and functions */ #define lua_open() luaL_newstate() #define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX) #define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0) #define lua_Chunkreader lua_Reader #define lua_Chunkwriter lua_Writer /* hack */ LUA_API void lua_setlevel (lua_State *from, lua_State *to); /* ** {====================================================================== ** Debug API ** ======================================================================= */ /* ** Event codes */ #define LUA_HOOKCALL 0 #define LUA_HOOKRET 1 #define LUA_HOOKLINE 2 #define LUA_HOOKCOUNT 3 #define LUA_HOOKTAILRET 4 /* ** Event masks */ #define LUA_MASKCALL (1 << LUA_HOOKCALL) #define LUA_MASKRET (1 << LUA_HOOKRET) #define LUA_MASKLINE (1 << LUA_HOOKLINE) #define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT) typedef struct lua_Debug lua_Debug; /* activation record */ /* Functions to be called by the debuger in specific events */ typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar); LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar); LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar); LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n); LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n); LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n); LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n); LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count); LUA_API lua_Hook lua_gethook (lua_State *L); LUA_API int lua_gethookmask (lua_State *L); LUA_API int lua_gethookcount (lua_State *L); /* From Lua 5.2. */ LUA_API void *lua_upvalueid (lua_State *L, int idx, int n); LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2); LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt, const char *chunkname, const char *mode); LUA_API const lua_Number *lua_version (lua_State *L); LUA_API void lua_copy (lua_State *L, int fromidx, int toidx); LUA_API lua_Number lua_tonumberx (lua_State *L, int idx, int *isnum); LUA_API lua_Integer lua_tointegerx (lua_State *L, int idx, int *isnum); /* From Lua 5.3. */ LUA_API int lua_isyieldable (lua_State *L); struct lua_Debug { int event; const char *name; /* (n) */ const char *namewhat; /* (n) `global', `local', `field', `method' */ const char *what; /* (S) `Lua', `C', `main', `tail' */ const char *source; /* (S) */ int currentline; /* (l) */ int nups; /* (u) number of upvalues */ int linedefined; /* (S) */ int lastlinedefined; /* (S) */ char short_src[LUA_IDSIZE]; /* (S) */ /* private part */ int i_ci; /* active function */ }; /* }====================================================================== */ /****************************************************************************** * Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ******************************************************************************/ #endif tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lj_buf.h0000644000000000000000000000561113306562377022245 0ustar rootroot/* ** Buffer handling. ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h */ #ifndef _LJ_BUF_H #define _LJ_BUF_H #include "lj_obj.h" #include "lj_gc.h" #include "lj_str.h" /* Resizable string buffers. Struct definition in lj_obj.h. */ #define sbufB(sb) (mref((sb)->b, char)) #define sbufP(sb) (mref((sb)->p, char)) #define sbufE(sb) (mref((sb)->e, char)) #define sbufL(sb) (mref((sb)->L, lua_State)) #define sbufsz(sb) ((MSize)(sbufE((sb)) - sbufB((sb)))) #define sbuflen(sb) ((MSize)(sbufP((sb)) - sbufB((sb)))) #define sbufleft(sb) ((MSize)(sbufE((sb)) - sbufP((sb)))) #define setsbufP(sb, q) (setmref((sb)->p, (q))) #define setsbufL(sb, l) (setmref((sb)->L, (l))) /* Buffer management */ LJ_FUNC char *LJ_FASTCALL lj_buf_need2(SBuf *sb, MSize sz); LJ_FUNC char *LJ_FASTCALL lj_buf_more2(SBuf *sb, MSize sz); LJ_FUNC void LJ_FASTCALL lj_buf_shrink(lua_State *L, SBuf *sb); LJ_FUNC char * LJ_FASTCALL lj_buf_tmp(lua_State *L, MSize sz); static LJ_AINLINE void lj_buf_init(lua_State *L, SBuf *sb) { setsbufL(sb, L); setmref(sb->p, NULL); setmref(sb->e, NULL); setmref(sb->b, NULL); } static LJ_AINLINE void lj_buf_reset(SBuf *sb) { setmrefr(sb->p, sb->b); } static LJ_AINLINE SBuf *lj_buf_tmp_(lua_State *L) { SBuf *sb = &G(L)->tmpbuf; setsbufL(sb, L); lj_buf_reset(sb); return sb; } static LJ_AINLINE void lj_buf_free(global_State *g, SBuf *sb) { lj_mem_free(g, sbufB(sb), sbufsz(sb)); } static LJ_AINLINE char *lj_buf_need(SBuf *sb, MSize sz) { if (LJ_UNLIKELY(sz > sbufsz(sb))) return lj_buf_need2(sb, sz); return sbufB(sb); } static LJ_AINLINE char *lj_buf_more(SBuf *sb, MSize sz) { if (LJ_UNLIKELY(sz > sbufleft(sb))) return lj_buf_more2(sb, sz); return sbufP(sb); } /* Low-level buffer put operations */ LJ_FUNC SBuf *lj_buf_putmem(SBuf *sb, const void *q, MSize len); LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putchar(SBuf *sb, int c); LJ_FUNC SBuf * LJ_FASTCALL lj_buf_putstr(SBuf *sb, GCstr *s); static LJ_AINLINE char *lj_buf_wmem(char *p, const void *q, MSize len) { return (char *)memcpy(p, q, len) + len; } static LJ_AINLINE void lj_buf_putb(SBuf *sb, int c) { char *p = lj_buf_more(sb, 1); *p++ = (char)c; setsbufP(sb, p); } /* High-level buffer put operations */ LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_reverse(SBuf *sb, GCstr *s); LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_lower(SBuf *sb, GCstr *s); LJ_FUNCA SBuf * LJ_FASTCALL lj_buf_putstr_upper(SBuf *sb, GCstr *s); LJ_FUNC SBuf *lj_buf_putstr_rep(SBuf *sb, GCstr *s, int32_t rep); LJ_FUNC SBuf *lj_buf_puttab(SBuf *sb, GCtab *t, GCstr *sep, int32_t i, int32_t e); /* Miscellaneous buffer operations */ LJ_FUNCA GCstr * LJ_FASTCALL lj_buf_tostr(SBuf *sb); LJ_FUNC GCstr *lj_buf_cat2str(lua_State *L, GCstr *s1, GCstr *s2); LJ_FUNC uint32_t LJ_FASTCALL lj_buf_ruleb128(const char **pp); static LJ_AINLINE GCstr *lj_buf_str(lua_State *L, SBuf *sb) { return lj_str_new(L, sbufB(sb), sbuflen(sb)); } #endif tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lj_mcode.h0000644000000000000000000000126313306562377022557 0ustar rootroot/* ** Machine code management. ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h */ #ifndef _LJ_MCODE_H #define _LJ_MCODE_H #include "lj_obj.h" #if LJ_HASJIT || LJ_HASFFI LJ_FUNC void lj_mcode_sync(void *start, void *end); #endif #if LJ_HASJIT #include "lj_jit.h" LJ_FUNC void lj_mcode_free(jit_State *J); LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim); LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m); LJ_FUNC void lj_mcode_abort(jit_State *J); LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish); LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need); #define lj_mcode_commitbot(J, m) (J->mcbot = (m)) #endif #endif tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lj_strfmt_num.c0000644000000000000000000004760213306562377023670 0ustar rootroot/* ** String formatting for floating-point numbers. ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h ** Contributed by Peter Cawley. */ #include #define lj_strfmt_num_c #define LUA_CORE #include "lj_obj.h" #include "lj_buf.h" #include "lj_str.h" #include "lj_strfmt.h" /* -- Precomputed tables -------------------------------------------------- */ /* Rescale factors to push the exponent of a number towards zero. */ #define RESCALE_EXPONENTS(P, N) \ P(308), P(289), P(270), P(250), P(231), P(212), P(193), P(173), P(154), \ P(135), P(115), P(96), P(77), P(58), P(38), P(0), P(0), P(0), N(39), N(58), \ N(77), N(96), N(116), N(135), N(154), N(174), N(193), N(212), N(231), \ N(251), N(270), N(289) #define ONE_E_P(X) 1e+0 ## X #define ONE_E_N(X) 1e-0 ## X static const int16_t rescale_e[] = { RESCALE_EXPONENTS(-, +) }; static const double rescale_n[] = { RESCALE_EXPONENTS(ONE_E_P, ONE_E_N) }; #undef ONE_E_N #undef ONE_E_P /* ** For p in range -70 through 57, this table encodes pairs (m, e) such that ** 4*2^p <= (uint8_t)m*10^e, and is the smallest value for which this holds. */ static const int8_t four_ulp_m_e[] = { 34, -21, 68, -21, 14, -20, 28, -20, 55, -20, 2, -19, 3, -19, 5, -19, 9, -19, -82, -18, 35, -18, 7, -17, -117, -17, 28, -17, 56, -17, 112, -16, -33, -16, 45, -16, 89, -16, -78, -15, 36, -15, 72, -15, -113, -14, 29, -14, 57, -14, 114, -13, -28, -13, 46, -13, 91, -12, -74, -12, 37, -12, 73, -12, 15, -11, 3, -11, 59, -11, 2, -10, 3, -10, 5, -10, 1, -9, -69, -9, 38, -9, 75, -9, 15, -7, 3, -7, 6, -7, 12, -6, -17, -7, 48, -7, 96, -7, -65, -6, 39, -6, 77, -6, -103, -5, 31, -5, 62, -5, 123, -4, -11, -4, 49, -4, 98, -4, -60, -3, 4, -2, 79, -3, 16, -2, 32, -2, 63, -2, 2, -1, 25, 0, 5, 1, 1, 2, 2, 2, 4, 2, 8, 2, 16, 2, 32, 2, 64, 2, -128, 2, 26, 2, 52, 2, 103, 3, -51, 3, 41, 4, 82, 4, -92, 4, 33, 4, 66, 4, -124, 5, 27, 5, 53, 5, 105, 6, 21, 6, 42, 6, 84, 6, 17, 7, 34, 7, 68, 7, 2, 8, 3, 8, 6, 8, 108, 9, -41, 9, 43, 10, 86, 9, -84, 10, 35, 10, 69, 10, -118, 11, 28, 11, 55, 12, 11, 13, 22, 13, 44, 13, 88, 13, -80, 13, 36, 13, 71, 13, -115, 14, 29, 14, 57, 14, 113, 15, -30, 15, 46, 15, 91, 15, 19, 16, 37, 16, 73, 16, 2, 17, 3, 17, 6, 17 }; /* min(2^32-1, 10^e-1) for e in range 0 through 10 */ static uint32_t ndigits_dec_threshold[] = { 0, 9U, 99U, 999U, 9999U, 99999U, 999999U, 9999999U, 99999999U, 999999999U, 0xffffffffU }; /* -- Helper functions ---------------------------------------------------- */ /* Compute the number of digits in the decimal representation of x. */ static MSize ndigits_dec(uint32_t x) { MSize t = ((lj_fls(x | 1) * 77) >> 8) + 1; /* 2^8/77 is roughly log2(10) */ return t + (x > ndigits_dec_threshold[t]); } #define WINT_R(x, sh, sc) \ { uint32_t d = (x*(((1<>sh; x -= d*sc; *p++ = (char)('0'+d); } /* Write 9-digit unsigned integer to buffer. */ static char *lj_strfmt_wuint9(char *p, uint32_t u) { uint32_t v = u / 10000, w; u -= v * 10000; w = v / 10000; v -= w * 10000; *p++ = (char)('0'+w); WINT_R(v, 23, 1000) WINT_R(v, 12, 100) WINT_R(v, 10, 10) *p++ = (char)('0'+v); WINT_R(u, 23, 1000) WINT_R(u, 12, 100) WINT_R(u, 10, 10) *p++ = (char)('0'+u); return p; } #undef WINT_R /* -- Extended precision arithmetic --------------------------------------- */ /* ** The "nd" format is a fixed-precision decimal representation for numbers. It ** consists of up to 64 uint32_t values, with each uint32_t storing a value ** in the range [0, 1e9). A number in "nd" format consists of three variables: ** ** uint32_t nd[64]; ** uint32_t ndlo; ** uint32_t ndhi; ** ** The integral part of the number is stored in nd[0 ... ndhi], the value of ** which is sum{i in [0, ndhi] | nd[i] * 10^(9*i)}. If the fractional part of ** the number is zero, ndlo is zero. Otherwise, the fractional part is stored ** in nd[ndlo ... 63], the value of which is taken to be ** sum{i in [ndlo, 63] | nd[i] * 10^(9*(i-64))}. ** ** If the array part had 128 elements rather than 64, then every double would ** have an exact representation in "nd" format. With 64 elements, all integral ** doubles have an exact representation, and all non-integral doubles have ** enough digits to make both %.99e and %.99f do the right thing. */ #if LJ_64 #define ND_MUL2K_MAX_SHIFT 29 #define ND_MUL2K_DIV1E9(val) ((uint32_t)((val) / 1000000000)) #else #define ND_MUL2K_MAX_SHIFT 11 #define ND_MUL2K_DIV1E9(val) ((uint32_t)((val) >> 9) / 1953125) #endif /* Multiply nd by 2^k and add carry_in (ndlo is assumed to be zero). */ static uint32_t nd_mul2k(uint32_t* nd, uint32_t ndhi, uint32_t k, uint32_t carry_in, SFormat sf) { uint32_t i, ndlo = 0, start = 1; /* Performance hacks. */ if (k > ND_MUL2K_MAX_SHIFT*2 && STRFMT_FP(sf) != STRFMT_FP(STRFMT_T_FP_F)) { start = ndhi - (STRFMT_PREC(sf) + 17) / 8; } /* Real logic. */ while (k >= ND_MUL2K_MAX_SHIFT) { for (i = ndlo; i <= ndhi; i++) { uint64_t val = ((uint64_t)nd[i] << ND_MUL2K_MAX_SHIFT) | carry_in; carry_in = ND_MUL2K_DIV1E9(val); nd[i] = (uint32_t)val - carry_in * 1000000000; } if (carry_in) { nd[++ndhi] = carry_in; carry_in = 0; if (start++ == ndlo) ++ndlo; } k -= ND_MUL2K_MAX_SHIFT; } if (k) { for (i = ndlo; i <= ndhi; i++) { uint64_t val = ((uint64_t)nd[i] << k) | carry_in; carry_in = ND_MUL2K_DIV1E9(val); nd[i] = (uint32_t)val - carry_in * 1000000000; } if (carry_in) nd[++ndhi] = carry_in; } return ndhi; } /* Divide nd by 2^k (ndlo is assumed to be zero). */ static uint32_t nd_div2k(uint32_t* nd, uint32_t ndhi, uint32_t k, SFormat sf) { uint32_t ndlo = 0, stop1 = ~0, stop2 = ~0; /* Performance hacks. */ if (!ndhi) { if (!nd[0]) { return 0; } else { uint32_t s = lj_ffs(nd[0]); if (s >= k) { nd[0] >>= k; return 0; } nd[0] >>= s; k -= s; } } if (k > 18) { if (STRFMT_FP(sf) == STRFMT_FP(STRFMT_T_FP_F)) { stop1 = 63 - (int32_t)STRFMT_PREC(sf) / 9; } else { int32_t floorlog2 = ndhi * 29 + lj_fls(nd[ndhi]) - k; int32_t floorlog10 = (int32_t)(floorlog2 * 0.30102999566398114); stop1 = 62 + (floorlog10 - (int32_t)STRFMT_PREC(sf)) / 9; stop2 = 61 + ndhi - (int32_t)STRFMT_PREC(sf) / 8; } } /* Real logic. */ while (k >= 9) { uint32_t i = ndhi, carry = 0; for (;;) { uint32_t val = nd[i]; nd[i] = (val >> 9) + carry; carry = (val & 0x1ff) * 1953125; if (i == ndlo) break; i = (i - 1) & 0x3f; } if (ndlo != stop1 && ndlo != stop2) { if (carry) { ndlo = (ndlo - 1) & 0x3f; nd[ndlo] = carry; } if (!nd[ndhi]) { ndhi = (ndhi - 1) & 0x3f; stop2--; } } else if (!nd[ndhi]) { if (ndhi != ndlo) { ndhi = (ndhi - 1) & 0x3f; stop2--; } else return ndlo; } k -= 9; } if (k) { uint32_t mask = (1U << k) - 1, mul = 1000000000 >> k, i = ndhi, carry = 0; for (;;) { uint32_t val = nd[i]; nd[i] = (val >> k) + carry; carry = (val & mask) * mul; if (i == ndlo) break; i = (i - 1) & 0x3f; } if (carry) { ndlo = (ndlo - 1) & 0x3f; nd[ndlo] = carry; } } return ndlo; } /* Add m*10^e to nd (assumes ndlo <= e/9 <= ndhi and 0 <= m <= 9). */ static uint32_t nd_add_m10e(uint32_t* nd, uint32_t ndhi, uint8_t m, int32_t e) { uint32_t i, carry; if (e >= 0) { i = (uint32_t)e/9; carry = m * (ndigits_dec_threshold[e - (int32_t)i*9] + 1); } else { int32_t f = (e-8)/9; i = (uint32_t)(64 + f); carry = m * (ndigits_dec_threshold[e - f*9] + 1); } for (;;) { uint32_t val = nd[i] + carry; if (LJ_UNLIKELY(val >= 1000000000)) { val -= 1000000000; nd[i] = val; if (LJ_UNLIKELY(i == ndhi)) { ndhi = (ndhi + 1) & 0x3f; nd[ndhi] = 1; break; } carry = 1; i = (i + 1) & 0x3f; } else { nd[i] = val; break; } } return ndhi; } /* Test whether two "nd" values are equal in their most significant digits. */ static int nd_similar(uint32_t* nd, uint32_t ndhi, uint32_t* ref, MSize hilen, MSize prec) { char nd9[9], ref9[9]; if (hilen <= prec) { if (LJ_UNLIKELY(nd[ndhi] != *ref)) return 0; prec -= hilen; ref--; ndhi = (ndhi - 1) & 0x3f; if (prec >= 9) { if (LJ_UNLIKELY(nd[ndhi] != *ref)) return 0; prec -= 9; ref--; ndhi = (ndhi - 1) & 0x3f; } } else { prec -= hilen - 9; } lua_assert(prec < 9); lj_strfmt_wuint9(nd9, nd[ndhi]); lj_strfmt_wuint9(ref9, *ref); return !memcmp(nd9, ref9, prec) && (nd9[prec] < '5') == (ref9[prec] < '5'); } /* -- Formatted conversions to buffer ------------------------------------- */ /* Write formatted floating-point number to either sb or p. */ static char *lj_strfmt_wfnum(SBuf *sb, SFormat sf, lua_Number n, char *p) { MSize width = STRFMT_WIDTH(sf), prec = STRFMT_PREC(sf), len; TValue t; t.n = n; if (LJ_UNLIKELY((t.u32.hi << 1) >= 0xffe00000)) { /* Handle non-finite values uniformly for %a, %e, %f, %g. */ int prefix = 0, ch = (sf & STRFMT_F_UPPER) ? 0x202020 : 0; if (((t.u32.hi & 0x000fffff) | t.u32.lo) != 0) { ch ^= ('n' << 16) | ('a' << 8) | 'n'; if ((sf & STRFMT_F_SPACE)) prefix = ' '; } else { ch ^= ('i' << 16) | ('n' << 8) | 'f'; if ((t.u32.hi & 0x80000000)) prefix = '-'; else if ((sf & STRFMT_F_PLUS)) prefix = '+'; else if ((sf & STRFMT_F_SPACE)) prefix = ' '; } len = 3 + (prefix != 0); if (!p) p = lj_buf_more(sb, width > len ? width : len); if (!(sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' '; if (prefix) *p++ = prefix; *p++ = (char)(ch >> 16); *p++ = (char)(ch >> 8); *p++ = (char)ch; } else if (STRFMT_FP(sf) == STRFMT_FP(STRFMT_T_FP_A)) { /* %a */ const char *hexdig = (sf & STRFMT_F_UPPER) ? "0123456789ABCDEFPX" : "0123456789abcdefpx"; int32_t e = (t.u32.hi >> 20) & 0x7ff; char prefix = 0, eprefix = '+'; if (t.u32.hi & 0x80000000) prefix = '-'; else if ((sf & STRFMT_F_PLUS)) prefix = '+'; else if ((sf & STRFMT_F_SPACE)) prefix = ' '; t.u32.hi &= 0xfffff; if (e) { t.u32.hi |= 0x100000; e -= 1023; } else if (t.u32.lo | t.u32.hi) { /* Non-zero denormal - normalise it. */ uint32_t shift = t.u32.hi ? 20-lj_fls(t.u32.hi) : 52-lj_fls(t.u32.lo); e = -1022 - shift; t.u64 <<= shift; } /* abs(n) == t.u64 * 2^(e - 52) */ /* If n != 0, bit 52 of t.u64 is set, and is the highest set bit. */ if ((int32_t)prec < 0) { /* Default precision: use smallest precision giving exact result. */ prec = t.u32.lo ? 13-lj_ffs(t.u32.lo)/4 : 5-lj_ffs(t.u32.hi|0x100000)/4; } else if (prec < 13) { /* Precision is sufficiently low as to maybe require rounding. */ t.u64 += (((uint64_t)1) << (51 - prec*4)); } if (e < 0) { eprefix = '-'; e = -e; } len = 5 + ndigits_dec((uint32_t)e) + prec + (prefix != 0) + ((prec | (sf & STRFMT_F_ALT)) != 0); if (!p) p = lj_buf_more(sb, width > len ? width : len); if (!(sf & (STRFMT_F_LEFT | STRFMT_F_ZERO))) { while (width-- > len) *p++ = ' '; } if (prefix) *p++ = prefix; *p++ = '0'; *p++ = hexdig[17]; /* x or X */ if ((sf & (STRFMT_F_LEFT | STRFMT_F_ZERO)) == STRFMT_F_ZERO) { while (width-- > len) *p++ = '0'; } *p++ = '0' + (t.u32.hi >> 20); /* Usually '1', sometimes '0' or '2'. */ if ((prec | (sf & STRFMT_F_ALT))) { /* Emit fractional part. */ char *q = p + 1 + prec; *p = '.'; if (prec < 13) t.u64 >>= (52 - prec*4); else while (prec > 13) p[prec--] = '0'; while (prec) { p[prec--] = hexdig[t.u64 & 15]; t.u64 >>= 4; } p = q; } *p++ = hexdig[16]; /* p or P */ *p++ = eprefix; /* + or - */ p = lj_strfmt_wint(p, e); } else { /* %e or %f or %g - begin by converting n to "nd" format. */ uint32_t nd[64]; uint32_t ndhi = 0, ndlo, i; int32_t e = (t.u32.hi >> 20) & 0x7ff, ndebias = 0; char prefix = 0, *q; if (t.u32.hi & 0x80000000) prefix = '-'; else if ((sf & STRFMT_F_PLUS)) prefix = '+'; else if ((sf & STRFMT_F_SPACE)) prefix = ' '; prec += ((int32_t)prec >> 31) & 7; /* Default precision is 6. */ if (STRFMT_FP(sf) == STRFMT_FP(STRFMT_T_FP_G)) { /* %g - decrement precision if non-zero (to make it like %e). */ prec--; prec ^= (uint32_t)((int32_t)prec >> 31); } if ((sf & STRFMT_T_FP_E) && prec < 14 && n != 0) { /* Precision is sufficiently low that rescaling will probably work. */ if ((ndebias = rescale_e[e >> 6])) { t.n = n * rescale_n[e >> 6]; if (LJ_UNLIKELY(!e)) t.n *= 1e10, ndebias -= 10; t.u64 -= 2; /* Convert 2ulp below (later we convert 2ulp above). */ nd[0] = 0x100000 | (t.u32.hi & 0xfffff); e = ((t.u32.hi >> 20) & 0x7ff) - 1075 - (ND_MUL2K_MAX_SHIFT < 29); goto load_t_lo; rescale_failed: t.n = n; e = (t.u32.hi >> 20) & 0x7ff; ndebias = ndhi = 0; } } nd[0] = t.u32.hi & 0xfffff; if (e == 0) e++; else nd[0] |= 0x100000; e -= 1043; if (t.u32.lo) { e -= 32 + (ND_MUL2K_MAX_SHIFT < 29); load_t_lo: #if ND_MUL2K_MAX_SHIFT >= 29 nd[0] = (nd[0] << 3) | (t.u32.lo >> 29); ndhi = nd_mul2k(nd, ndhi, 29, t.u32.lo & 0x1fffffff, sf); #elif ND_MUL2K_MAX_SHIFT >= 11 ndhi = nd_mul2k(nd, ndhi, 11, t.u32.lo >> 21, sf); ndhi = nd_mul2k(nd, ndhi, 11, (t.u32.lo >> 10) & 0x7ff, sf); ndhi = nd_mul2k(nd, ndhi, 11, (t.u32.lo << 1) & 0x7ff, sf); #else #error "ND_MUL2K_MAX_SHIFT too small" #endif } if (e >= 0) { ndhi = nd_mul2k(nd, ndhi, (uint32_t)e, 0, sf); ndlo = 0; } else { ndlo = nd_div2k(nd, ndhi, (uint32_t)-e, sf); if (ndhi && !nd[ndhi]) ndhi--; } /* abs(n) == nd * 10^ndebias (for slightly loose interpretation of ==) */ if ((sf & STRFMT_T_FP_E)) { /* %e or %g - assume %e and start by calculating nd's exponent (nde). */ char eprefix = '+'; int32_t nde = -1; MSize hilen; if (ndlo && !nd[ndhi]) { ndhi = 64; do {} while (!nd[--ndhi]); nde -= 64 * 9; } hilen = ndigits_dec(nd[ndhi]); nde += ndhi * 9 + hilen; if (ndebias) { /* ** Rescaling was performed, but this introduced some error, and might ** have pushed us across a rounding boundary. We check whether this ** error affected the result by introducing even more error (2ulp in ** either direction), and seeing whether a roundary boundary was ** crossed. Having already converted the -2ulp case, we save off its ** most significant digits, convert the +2ulp case, and compare them. */ int32_t eidx = e + 70 + (ND_MUL2K_MAX_SHIFT < 29) + (t.u32.lo >= 0xfffffffe && !(~t.u32.hi << 12)); const int8_t *m_e = four_ulp_m_e + eidx * 2; lua_assert(0 <= eidx && eidx < 128); nd[33] = nd[ndhi]; nd[32] = nd[(ndhi - 1) & 0x3f]; nd[31] = nd[(ndhi - 2) & 0x3f]; nd_add_m10e(nd, ndhi, (uint8_t)*m_e, m_e[1]); if (LJ_UNLIKELY(!nd_similar(nd, ndhi, nd + 33, hilen, prec + 1))) { goto rescale_failed; } } if ((int32_t)(prec - nde) < (0x3f & -(int32_t)ndlo) * 9) { /* Precision is sufficiently low as to maybe require rounding. */ ndhi = nd_add_m10e(nd, ndhi, 5, nde - prec - 1); nde += (hilen != ndigits_dec(nd[ndhi])); } nde += ndebias; if ((sf & STRFMT_T_FP_F)) { /* %g */ if ((int32_t)prec >= nde && nde >= -4) { if (nde < 0) ndhi = 0; prec -= nde; goto g_format_like_f; } else if (!(sf & STRFMT_F_ALT) && prec && width > 5) { /* Decrease precision in order to strip trailing zeroes. */ char tail[9]; uint32_t maxprec = hilen - 1 + ((ndhi - ndlo) & 0x3f) * 9; if (prec >= maxprec) prec = maxprec; else ndlo = (ndhi - (((int32_t)(prec - hilen) + 9) / 9)) & 0x3f; i = prec - hilen - (((ndhi - ndlo) & 0x3f) * 9) + 10; lj_strfmt_wuint9(tail, nd[ndlo]); while (prec && tail[--i] == '0') { prec--; if (!i) { if (ndlo == ndhi) { prec = 0; break; } lj_strfmt_wuint9(tail, nd[++ndlo]); i = 9; } } } } if (nde < 0) { /* Make nde non-negative. */ eprefix = '-'; nde = -nde; } len = 3 + prec + (prefix != 0) + ndigits_dec((uint32_t)nde) + (nde < 10) + ((prec | (sf & STRFMT_F_ALT)) != 0); if (!p) p = lj_buf_more(sb, (width > len ? width : len) + 5); if (!(sf & (STRFMT_F_LEFT | STRFMT_F_ZERO))) { while (width-- > len) *p++ = ' '; } if (prefix) *p++ = prefix; if ((sf & (STRFMT_F_LEFT | STRFMT_F_ZERO)) == STRFMT_F_ZERO) { while (width-- > len) *p++ = '0'; } q = lj_strfmt_wint(p + 1, nd[ndhi]); p[0] = p[1]; /* Put leading digit in the correct place. */ if ((prec | (sf & STRFMT_F_ALT))) { /* Emit fractional part. */ p[1] = '.'; p += 2; prec -= (MSize)(q - p); p = q; /* Account for digits already emitted. */ /* Then emit chunks of 9 digits (this may emit 8 digits too many). */ for (i = ndhi; (int32_t)prec > 0 && i != ndlo; prec -= 9) { i = (i - 1) & 0x3f; p = lj_strfmt_wuint9(p, nd[i]); } if ((sf & STRFMT_T_FP_F) && !(sf & STRFMT_F_ALT)) { /* %g (and not %#g) - strip trailing zeroes. */ p += (int32_t)prec & ((int32_t)prec >> 31); while (p[-1] == '0') p--; if (p[-1] == '.') p--; } else { /* %e (or %#g) - emit trailing zeroes. */ while ((int32_t)prec > 0) { *p++ = '0'; prec--; } p += (int32_t)prec; } } else { p++; } *p++ = (sf & STRFMT_F_UPPER) ? 'E' : 'e'; *p++ = eprefix; /* + or - */ if (nde < 10) *p++ = '0'; /* Always at least two digits of exponent. */ p = lj_strfmt_wint(p, nde); } else { /* %f (or, shortly, %g in %f style) */ if (prec < (MSize)(0x3f & -(int32_t)ndlo) * 9) { /* Precision is sufficiently low as to maybe require rounding. */ ndhi = nd_add_m10e(nd, ndhi, 5, 0 - prec - 1); } g_format_like_f: if ((sf & STRFMT_T_FP_E) && !(sf & STRFMT_F_ALT) && prec && width) { /* Decrease precision in order to strip trailing zeroes. */ if (ndlo) { /* nd has a fractional part; we need to look at its digits. */ char tail[9]; uint32_t maxprec = (64 - ndlo) * 9; if (prec >= maxprec) prec = maxprec; else ndlo = 64 - (prec + 8) / 9; i = prec - ((63 - ndlo) * 9); lj_strfmt_wuint9(tail, nd[ndlo]); while (prec && tail[--i] == '0') { prec--; if (!i) { if (ndlo == 63) { prec = 0; break; } lj_strfmt_wuint9(tail, nd[++ndlo]); i = 9; } } } else { /* nd has no fractional part, so precision goes straight to zero. */ prec = 0; } } len = ndhi * 9 + ndigits_dec(nd[ndhi]) + prec + (prefix != 0) + ((prec | (sf & STRFMT_F_ALT)) != 0); if (!p) p = lj_buf_more(sb, (width > len ? width : len) + 8); if (!(sf & (STRFMT_F_LEFT | STRFMT_F_ZERO))) { while (width-- > len) *p++ = ' '; } if (prefix) *p++ = prefix; if ((sf & (STRFMT_F_LEFT | STRFMT_F_ZERO)) == STRFMT_F_ZERO) { while (width-- > len) *p++ = '0'; } /* Emit integer part. */ p = lj_strfmt_wint(p, nd[ndhi]); i = ndhi; while (i) p = lj_strfmt_wuint9(p, nd[--i]); if ((prec | (sf & STRFMT_F_ALT))) { /* Emit fractional part. */ *p++ = '.'; /* Emit chunks of 9 digits (this may emit 8 digits too many). */ while ((int32_t)prec > 0 && i != ndlo) { i = (i - 1) & 0x3f; p = lj_strfmt_wuint9(p, nd[i]); prec -= 9; } if ((sf & STRFMT_T_FP_E) && !(sf & STRFMT_F_ALT)) { /* %g (and not %#g) - strip trailing zeroes. */ p += (int32_t)prec & ((int32_t)prec >> 31); while (p[-1] == '0') p--; if (p[-1] == '.') p--; } else { /* %f (or %#g) - emit trailing zeroes. */ while ((int32_t)prec > 0) { *p++ = '0'; prec--; } p += (int32_t)prec; } } } } if ((sf & STRFMT_F_LEFT)) while (width-- > len) *p++ = ' '; return p; } /* Add formatted floating-point number to buffer. */ SBuf *lj_strfmt_putfnum(SBuf *sb, SFormat sf, lua_Number n) { setsbufP(sb, lj_strfmt_wfnum(sb, sf, n, NULL)); return sb; } /* -- Conversions to strings ---------------------------------------------- */ /* Convert number to string. */ GCstr * LJ_FASTCALL lj_strfmt_num(lua_State *L, cTValue *o) { char buf[STRFMT_MAXBUF_NUM]; MSize len = (MSize)(lj_strfmt_wfnum(NULL, STRFMT_G14, o->n, buf) - buf); return lj_str_new(L, buf, len); } tarantool_1.9.1.26.g63eb81e3c/third_party/luajit/src/lib_string.c0000644000000000000000000004642713306562377023145 0ustar rootroot/* ** String library. ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h ** ** Major portions taken verbatim or adapted from the Lua interpreter. ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h */ #define lib_string_c #define LUA_LIB #include "lua.h" #include "lauxlib.h" #include "lualib.h" #include "lj_obj.h" #include "lj_gc.h" #include "lj_err.h" #include "lj_buf.h" #include "lj_str.h" #include "lj_tab.h" #include "lj_meta.h" #include "lj_state.h" #include "lj_ff.h" #include "lj_bcdump.h" #include "lj_char.h" #include "lj_strfmt.h" #include "lj_lib.h" /* ------------------------------------------------------------------------ */ #define LJLIB_MODULE_string LJLIB_LUA(string_len) /* function(s) CHECK_str(s) return #s end */ LJLIB_ASM(string_byte) LJLIB_REC(string_range 0) { GCstr *s = lj_lib_checkstr(L, 1); int32_t len = (int32_t)s->len; int32_t start = lj_lib_optint(L, 2, 1); int32_t stop = lj_lib_optint(L, 3, start); int32_t n, i; const unsigned char *p; if (stop < 0) stop += len+1; if (start < 0) start += len+1; if (start <= 0) start = 1; if (stop > len) stop = len; if (start > stop) return FFH_RES(0); /* Empty interval: return no results. */ start--; n = stop - start; if ((uint32_t)n > LUAI_MAXCSTACK) lj_err_caller(L, LJ_ERR_STRSLC); lj_state_checkstack(L, (MSize)n); p = (const unsigned char *)strdata(s) + start; for (i = 0; i < n; i++) setintV(L->base + i-1-LJ_FR2, p[i]); return FFH_RES(n); } LJLIB_ASM(string_char) LJLIB_REC(.) { int i, nargs = (int)(L->top - L->base); char *buf = lj_buf_tmp(L, (MSize)nargs); for (i = 1; i <= nargs; i++) { int32_t k = lj_lib_checkint(L, i); if (!checku8(k)) lj_err_arg(L, i, LJ_ERR_BADVAL); buf[i-1] = (char)k; } setstrV(L, L->base-1-LJ_FR2, lj_str_new(L, buf, (size_t)nargs)); return FFH_RES(1); } LJLIB_ASM(string_sub) LJLIB_REC(string_range 1) { lj_lib_checkstr(L, 1); lj_lib_checkint(L, 2); setintV(L->base+2, lj_lib_optint(L, 3, -1)); return FFH_RETRY; } LJLIB_CF(string_rep) LJLIB_REC(.) { GCstr *s = lj_lib_checkstr(L, 1); int32_t rep = lj_lib_checkint(L, 2); GCstr *sep = lj_lib_optstr(L, 3); SBuf *sb = lj_buf_tmp_(L); if (sep && rep > 1) { GCstr *s2 = lj_buf_cat2str(L, sep, s); lj_buf_reset(sb); lj_buf_putstr(sb, s); s = s2; rep--; } sb = lj_buf_putstr_rep(sb, s, rep); setstrV(L, L->top-1, lj_buf_str(L, sb)); lj_gc_check(L); return 1; } LJLIB_ASM(string_reverse) LJLIB_REC(string_op IRCALL_lj_buf_putstr_reverse) { lj_lib_checkstr(L, 1); return FFH_RETRY; } LJLIB_ASM_(string_lower) LJLIB_REC(string_op IRCALL_lj_buf_putstr_lower) LJLIB_ASM_(string_upper) LJLIB_REC(string_op IRCALL_lj_buf_putstr_upper) /* ------------------------------------------------------------------------ */ static int writer_buf(lua_State *L, const void *p, size_t size, void *sb) { lj_buf_putmem((SBuf *)sb, p, (MSize)size); UNUSED(L); return 0; } LJLIB_CF(string_dump) { GCfunc *fn = lj_lib_checkfunc(L, 1); int strip = L->base+1 < L->top && tvistruecond(L->base+1); SBuf *sb = lj_buf_tmp_(L); /* Assumes lj_bcwrite() doesn't use tmpbuf. */ L->top = L->base+1; if (!isluafunc(fn) || lj_bcwrite(L, funcproto(fn), writer_buf, sb, strip)) lj_err_caller(L, LJ_ERR_STRDUMP); setstrV(L, L->top-1, lj_buf_str(L, sb)); lj_gc_check(L); return 1; } /* ------------------------------------------------------------------------ */ /* macro to `unsign' a character */ #define uchar(c) ((unsigned char)(c)) #define CAP_UNFINISHED (-1) #define CAP_POSITION (-2) typedef struct MatchState { const char *src_init; /* init of source string */ const char *src_end; /* end (`\0') of source string */ lua_State *L; int level; /* total number of captures (finished or unfinished) */ int depth; struct { const char *init; ptrdiff_t len; } capture[LUA_MAXCAPTURES]; } MatchState; #define L_ESC '%' static int check_capture(MatchState *ms, int l) { l -= '1'; if (l < 0 || l >= ms->level || ms->capture[l].len == CAP_UNFINISHED) lj_err_caller(ms->L, LJ_ERR_STRCAPI); return l; } static int capture_to_close(MatchState *ms) { int level = ms->level; for (level--; level>=0; level--) if (ms->capture[level].len == CAP_UNFINISHED) return level; lj_err_caller(ms->L, LJ_ERR_STRPATC); return 0; /* unreachable */ } static const char *classend(MatchState *ms, const char *p) { switch (*p++) { case L_ESC: if (*p == '\0') lj_err_caller(ms->L, LJ_ERR_STRPATE); return p+1; case '[': if (*p == '^') p++; do { /* look for a `]' */ if (*p == '\0') lj_err_caller(ms->L, LJ_ERR_STRPATM); if (*(p++) == L_ESC && *p != '\0') p++; /* skip escapes (e.g. `%]') */ } while (*p != ']'); return p+1; default: return p; } } static const unsigned char match_class_map[32] = { 0,LJ_CHAR_ALPHA,0,LJ_CHAR_CNTRL,LJ_CHAR_DIGIT,0,0,LJ_CHAR_GRAPH,0,0,0,0, LJ_CHAR_LOWER,0,0,0,LJ_CHAR_PUNCT,0,0,LJ_CHAR_SPACE,0, LJ_CHAR_UPPER,0,LJ_CHAR_ALNUM,LJ_CHAR_XDIGIT,0,0,0,0,0,0,0 }; static int match_class(int c, int cl) { if ((cl & 0xc0) == 0x40) { int t = match_class_map[(cl&0x1f)]; if (t) { t = lj_char_isa(c, t); return (cl & 0x20) ? t : !t; } if (cl == 'z') return c == 0; if (cl == 'Z') return c != 0; } return (cl == c); } static int matchbracketclass(int c, const char *p, const char *ec) { int sig = 1; if (*(p+1) == '^') { sig = 0; p++; /* skip the `^' */ } while (++p < ec) { if (*p == L_ESC) { p++; if (match_class(c, uchar(*p))) return sig; } else if ((*(p+1) == '-') && (p+2 < ec)) { p+=2; if (uchar(*(p-2)) <= c && c <= uchar(*p)) return sig; } else if (uchar(*p) == c) return sig; } return !sig; } static int singlematch(int c, const char *p, const char *ep) { switch (*p) { case '.': return 1; /* matches any char */ case L_ESC: return match_class(c, uchar(*(p+1))); case '[': return matchbracketclass(c, p, ep-1); default: return (uchar(*p) == c); } } static const char *match(MatchState *ms, const char *s, const char *p); static const char *matchbalance(MatchState *ms, const char *s, const char *p) { if (*p == 0 || *(p+1) == 0) lj_err_caller(ms->L, LJ_ERR_STRPATU); if (*s != *p) { return NULL; } else { int b = *p; int e = *(p+1); int cont = 1; while (++s < ms->src_end) { if (*s == e) { if (--cont == 0) return s+1; } else if (*s == b) { cont++; } } } return NULL; /* string ends out of balance */ } static const char *max_expand(MatchState *ms, const char *s, const char *p, const char *ep) { ptrdiff_t i = 0; /* counts maximum expand for item */ while ((s+i)src_end && singlematch(uchar(*(s+i)), p, ep)) i++; /* keeps trying to match with the maximum repetitions */ while (i>=0) { const char *res = match(ms, (s+i), ep+1); if (res) return res; i--; /* else didn't match; reduce 1 repetition to try again */ } return NULL; } static const char *min_expand(MatchState *ms, const char *s, const char *p, const char *ep) { for (;;) { const char *res = match(ms, s, ep+1); if (res != NULL) return res; else if (ssrc_end && singlematch(uchar(*s), p, ep)) s++; /* try with one more repetition */ else return NULL; } } static const char *start_capture(MatchState *ms, const char *s, const char *p, int what) { const char *res; int level = ms->level; if (level >= LUA_MAXCAPTURES) lj_err_caller(ms->L, LJ_ERR_STRCAPN); ms->capture[level].init = s; ms->capture[level].len = what; ms->level = level+1; if ((res=match(ms, s, p)) == NULL) /* match failed? */ ms->level--; /* undo capture */ return res; } static const char *end_capture(MatchState *ms, const char *s, const char *p) { int l = capture_to_close(ms); const char *res; ms->capture[l].len = s - ms->capture[l].init; /* close capture */ if ((res = match(ms, s, p)) == NULL) /* match failed? */ ms->capture[l].len = CAP_UNFINISHED; /* undo capture */ return res; } static const char *match_capture(MatchState *ms, const char *s, int l) { size_t len; l = check_capture(ms, l); len = (size_t)ms->capture[l].len; if ((size_t)(ms->src_end-s) >= len && memcmp(ms->capture[l].init, s, len) == 0) return s+len; else return NULL; } static const char *match(MatchState *ms, const char *s, const char *p) { if (++ms->depth > LJ_MAX_XLEVEL) lj_err_caller(ms->L, LJ_ERR_STRPATX); init: /* using goto's to optimize tail recursion */ switch (*p) { case '(': /* start capture */ if (*(p+1) == ')') /* position capture? */ s = start_capture(ms, s, p+2, CAP_POSITION); else s = start_capture(ms, s, p+1, CAP_UNFINISHED); break; case ')': /* end capture */ s = end_capture(ms, s, p+1); break; case L_ESC: switch (*(p+1)) { case 'b': /* balanced string? */ s = matchbalance(ms, s, p+2); if (s == NULL) break; p+=4; goto init; /* else s = match(ms, s, p+4); */ case 'f': { /* frontier? */ const char *ep; char previous; p += 2; if (*p != '[') lj_err_caller(ms->L, LJ_ERR_STRPATB); ep = classend(ms, p); /* points to what is next */ previous = (s == ms->src_init) ? '\0' : *(s-1); if (matchbracketclass(uchar(previous), p, ep-1) || !matchbracketclass(uchar(*s), p, ep-1)) { s = NULL; break; } p=ep; goto init; /* else s = match(ms, s, ep); */ } default: if (lj_char_isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */ s = match_capture(ms, s, uchar(*(p+1))); if (s == NULL) break; p+=2; goto init; /* else s = match(ms, s, p+2) */ } goto dflt; /* case default */ } break; case '\0': /* end of pattern */ break; /* match succeeded */ case '$': /* is the `$' the last char in pattern? */ if (*(p+1) != '\0') goto dflt; if (s != ms->src_end) s = NULL; /* check end of string */ break; default: dflt: { /* it is a pattern item */ const char *ep = classend(ms, p); /* points to what is next */ int m = ssrc_end && singlematch(uchar(*s), p, ep); switch (*ep) { case '?': { /* optional */ const char *res; if (m && ((res=match(ms, s+1, ep+1)) != NULL)) { s = res; break; } p=ep+1; goto init; /* else s = match(ms, s, ep+1); */ } case '*': /* 0 or more repetitions */ s = max_expand(ms, s, p, ep); break; case '+': /* 1 or more repetitions */ s = (m ? max_expand(ms, s+1, p, ep) : NULL); break; case '-': /* 0 or more repetitions (minimum) */ s = min_expand(ms, s, p, ep); break; default: if (m) { s++; p=ep; goto init; } /* else s = match(ms, s+1, ep); */ s = NULL; break; } break; } } ms->depth--; return s; } static void push_onecapture(MatchState *ms, int i, const char *s, const char *e) { if (i >= ms->level) { if (i == 0) /* ms->level == 0, too */ lua_pushlstring(ms->L, s, (size_t)(e - s)); /* add whole match */ else lj_err_caller(ms->L, LJ_ERR_STRCAPI); } else { ptrdiff_t l = ms->capture[i].len; if (l == CAP_UNFINISHED) lj_err_caller(ms->L, LJ_ERR_STRCAPU); if (l == CAP_POSITION) lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1); else lua_pushlstring(ms->L, ms->capture[i].init, (size_t)l); } } static int push_captures(MatchState *ms, const char *s, const char *e) { int i; int nlevels = (ms->level == 0 && s) ? 1 : ms->level; luaL_checkstack(ms->L, nlevels, "too many captures"); for (i = 0; i < nlevels; i++) push_onecapture(ms, i, s, e); return nlevels; /* number of strings pushed */ } static int str_find_aux(lua_State *L, int find) { GCstr *s = lj_lib_checkstr(L, 1); GCstr *p = lj_lib_checkstr(L, 2); int32_t start = lj_lib_optint(L, 3, 1); MSize st; if (start < 0) start += (int32_t)s->len; else start--; if (start < 0) start = 0; st = (MSize)start; if (st > s->len) { #if LJ_52 setnilV(L->top-1); return 1; #else st = s->len; #endif } if (find && ((L->base+3 < L->top && tvistruecond(L->base+3)) || !lj_str_haspattern(p))) { /* Search for fixed string. */ const char *q = lj_str_find(strdata(s)+st, strdata(p), s->len-st, p->len); if (q) { setintV(L->top-2, (int32_t)(q-strdata(s)) + 1); setintV(L->top-1, (int32_t)(q-strdata(s)) + (int32_t)p->len); return 2; } } else { /* Search for pattern. */ MatchState ms; const char *pstr = strdata(p); const char *sstr = strdata(s) + st; int anchor = 0; if (*pstr == '^') { pstr++; anchor = 1; } ms.L = L; ms.src_init = strdata(s); ms.src_end = strdata(s) + s->len; do { /* Loop through string and try to match the pattern. */ const char *q; ms.level = ms.depth = 0; q = match(&ms, sstr, pstr); if (q) { if (find) { setintV(L->top++, (int32_t)(sstr-(strdata(s)-1))); setintV(L->top++, (int32_t)(q-strdata(s))); return push_captures(&ms, NULL, NULL) + 2; } else { return push_captures(&ms, sstr, q); } } } while (sstr++ < ms.src_end && !anchor); } setnilV(L->top-1); /* Not found. */ return 1; } LJLIB_CF(string_find) LJLIB_REC(.) { return str_find_aux(L, 1); } LJLIB_CF(string_match) { return str_find_aux(L, 0); } LJLIB_NOREG LJLIB_CF(string_gmatch_aux) { const char *p = strVdata(lj_lib_upvalue(L, 2)); GCstr *str = strV(lj_lib_upvalue(L, 1)); const char *s = strdata(str); TValue *tvpos = lj_lib_upvalue(L, 3); const char *src = s + tvpos->u32.lo; MatchState ms; ms.L = L; ms.src_init = s; ms.src_end = s + str->len; for (; src <= ms.src_end; src++) { const char *e; ms.level = ms.depth = 0; if ((e = match(&ms, src, p)) != NULL) { int32_t pos = (int32_t)(e - s); if (e == src) pos++; /* Ensure progress for empty match. */ tvpos->u32.lo = (uint32_t)pos; return push_captures(&ms, src, e); } } return 0; /* not found */ } LJLIB_CF(string_gmatch) { lj_lib_checkstr(L, 1); lj_lib_checkstr(L, 2); L->top = L->base+3; (L->top-1)->u64 = 0; lj_lib_pushcc(L, lj_cf_string_gmatch_aux, FF_string_gmatch_aux, 3); return 1; } static void add_s(MatchState *ms, luaL_Buffer *b, const char *s, const char *e) { size_t l, i; const char *news = lua_tolstring(ms->L, 3, &l); for (i = 0; i < l; i++) { if (news[i] != L_ESC) { luaL_addchar(b, news[i]); } else { i++; /* skip ESC */ if (!lj_char_isdigit(uchar(news[i]))) { luaL_addchar(b, news[i]); } else if (news[i] == '0') { luaL_addlstring(b, s, (size_t)(e - s)); } else { push_onecapture(ms, news[i] - '1', s, e); luaL_addvalue(b); /* add capture to accumulated result */ } } } } static void add_value(MatchState *ms, luaL_Buffer *b, const char *s, const char *e) { lua_State *L = ms->L; switch (lua_type(L, 3)) { case LUA_TNUMBER: case LUA_TSTRING: { add_s(ms, b, s, e); return; } case LUA_TFUNCTION: { int n; lua_pushvalue(L, 3); n = push_captures(ms, s, e); lua_call(L, n, 1); break; } case LUA_TTABLE: { push_onecapture(ms, 0, s, e); lua_gettable(L, 3); break; } } if (!lua_toboolean(L, -1)) { /* nil or false? */ lua_pop(L, 1); lua_pushlstring(L, s, (size_t)(e - s)); /* keep original text */ } else if (!lua_isstring(L, -1)) { lj_err_callerv(L, LJ_ERR_STRGSRV, luaL_typename(L, -1)); } luaL_addvalue(b); /* add result to accumulator */ } LJLIB_CF(string_gsub) { size_t srcl; const char *src = luaL_checklstring(L, 1, &srcl); const char *p = luaL_checkstring(L, 2); int tr = lua_type(L, 3); int max_s = luaL_optint(L, 4, (int)(srcl+1)); int anchor = (*p == '^') ? (p++, 1) : 0; int n = 0; MatchState ms; luaL_Buffer b; if (!(tr == LUA_TNUMBER || tr == LUA_TSTRING || tr == LUA_TFUNCTION || tr == LUA_TTABLE)) lj_err_arg(L, 3, LJ_ERR_NOSFT); luaL_buffinit(L, &b); ms.L = L; ms.src_init = src; ms.src_end = src+srcl; while (n < max_s) { const char *e; ms.level = ms.depth = 0; e = match(&ms, src, p); if (e) { n++; add_value(&ms, &b, src, e); } if (e && e>src) /* non empty match? */ src = e; /* skip it */ else if (src < ms.src_end) luaL_addchar(&b, *src++); else break; if (anchor) break; } luaL_addlstring(&b, src, (size_t)(ms.src_end-src)); luaL_pushresult(&b); lua_pushinteger(L, n); /* number of substitutions */ return 2; } /* ------------------------------------------------------------------------ */ /* Emulate tostring() inline. */ static GCstr *string_fmt_tostring(lua_State *L, int arg, int retry) { TValue *o = L->base+arg-1; cTValue *mo; lua_assert(o < L->top); /* Caller already checks for existence. */ if (LJ_LIKELY(tvisstr(o))) return strV(o); if (retry != 2 && !tvisnil(mo = lj_meta_lookup(L, o, MM_tostring))) { copyTV(L, L->top++, mo); copyTV(L, L->top++, o); lua_call(L, 1, 1); copyTV(L, L->base+arg-1, --L->top); return NULL; /* Buffer may be overwritten, retry. */ } return lj_strfmt_obj(L, o); } LJLIB_CF(string_format) LJLIB_REC(.) { int arg, top = (int)(L->top - L->base); GCstr *fmt; SBuf *sb; FormatState fs; SFormat sf; int retry = 0; again: arg = 1; sb = lj_buf_tmp_(L); fmt = lj_lib_checkstr(L, arg); lj_strfmt_init(&fs, strdata(fmt), fmt->len); while ((sf = lj_strfmt_parse(&fs)) != STRFMT_EOF) { if (sf == STRFMT_LIT) { lj_buf_putmem(sb, fs.str, fs.len); } else if (sf == STRFMT_ERR) { lj_err_callerv(L, LJ_ERR_STRFMT, strdata(lj_str_new(L, fs.str, fs.len))); } else { if (++arg > top) luaL_argerror(L, arg, lj_obj_typename[0]); switch (STRFMT_TYPE(sf)) { case STRFMT_INT: if (tvisint(L->base+arg-1)) { int32_t k = intV(L->base+arg-1); if (sf == STRFMT_INT) lj_strfmt_putint(sb, k); /* Shortcut for plain %d. */ else lj_strfmt_putfxint(sb, sf, k); } else { lj_strfmt_putfnum_int(sb, sf, lj_lib_checknum(L, arg)); } break; case STRFMT_UINT: if (tvisint(L->base+arg-1)) lj_strfmt_putfxint(sb, sf, intV(L->base+arg-1)); else lj_strfmt_putfnum_uint(sb, sf, lj_lib_checknum(L, arg)); break; case STRFMT_NUM: lj_strfmt_putfnum(sb, sf, lj_lib_checknum(L, arg)); break; case STRFMT_STR: { GCstr *str = string_fmt_tostring(L, arg, retry); if (str == NULL) retry = 1; else if ((sf & STRFMT_T_QUOTED)) lj_strfmt_putquoted(sb, str); /* No formatting. */ else lj_strfmt_putfstr(sb, sf, str); break; } case STRFMT_CHAR: lj_strfmt_putfchar(sb, sf, lj_lib_checkint(L, arg)); break; case STRFMT_PTR: /* No formatting. */ lj_strfmt_putptr(sb, lj_obj_ptr(L->base+arg-1)); break; default: lua_assert(0); break; } } } if (retry++ == 1) goto again; setstrV(L, L->top-1, lj_buf_str(L, sb)); lj_gc_check(L); return 1; } /* ------------------------------------------------------------------------ */ #include "lj_libdef.h" LUALIB_API int luaopen_string(lua_State *L) { GCtab *mt; global_State *g; LJ_LIB_REG(L, LUA_STRLIBNAME, string); mt = lj_tab_new(L, 0, 1); /* NOBARRIER: basemt is a GC root. */ g = G(L); setgcref(basemt_it(g, LJ_TSTR), obj2gco(mt)); settabV(L, lj_tab_setstr(L, mt, mmname_str(g, MM_index)), tabV(L->top-1)); mt->nomm = (uint8_t)(~(1u<